From cce936f4fff736927ffd53a61d7b2c6a1064e0c5 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Mon, 13 Dec 2021 13:41:05 +0200 Subject: drm/i915/cdclk: turn around i915_drv.h and intel_cdclk.h dependency MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit intel_cdclk.h only needs i915_drv.h for struct intel_cdclk_config. Move the definition to intel_cdclk.h and turn the includes around to avoid including i915_drv.h from other headers. The intel cdclk state macros in intel_cdclk.h still reference struct drm_i915_private, but as macros they don't strictly require the definition until they are used. v2: Expand on the commit message wrt cdclk state macros Reviewed-by: Ville Syrjälä Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20211213114106.296017-1-jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_atomic.c | 1 + drivers/gpu/drm/i915/display/intel_cdclk.h | 6 +++++- drivers/gpu/drm/i915/i915_drv.h | 6 +----- 3 files changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_atomic.c b/drivers/gpu/drm/i915/display/intel_atomic.c index a62550711e98..1080741d1561 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic.c +++ b/drivers/gpu/drm/i915/display/intel_atomic.c @@ -34,6 +34,7 @@ #include #include +#include "i915_drv.h" #include "intel_atomic.h" #include "intel_cdclk.h" #include "intel_display_types.h" diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h index fc638522e445..71dd84740ae3 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.h +++ b/drivers/gpu/drm/i915/display/intel_cdclk.h @@ -8,7 +8,6 @@ #include -#include "i915_drv.h" #include "intel_display.h" #include "intel_global_state.h" @@ -16,6 +15,11 @@ struct drm_i915_private; struct intel_atomic_state; struct intel_crtc_state; +struct intel_cdclk_config { + unsigned int cdclk, vco, ref, bypass; + u8 voltage_level; +}; + struct intel_cdclk_state { struct intel_global_state base; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index e99996dfd43a..433c1387a137 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -65,6 +65,7 @@ #include "i915_utils.h" #include "display/intel_bios.h" +#include "display/intel_cdclk.h" #include "display/intel_display.h" #include "display/intel_display_power.h" #include "display/intel_dmc.h" @@ -627,11 +628,6 @@ struct i915_virtual_gpu { u32 caps; }; -struct intel_cdclk_config { - unsigned int cdclk, vco, ref, bypass; - u8 voltage_level; -}; - struct i915_selftest_stash { atomic_t counter; struct ida mock_region_instances; -- cgit From a908db6d98782e8d9a8d545dcc74937db5bfac04 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Mon, 13 Dec 2021 13:41:06 +0200 Subject: drm/i915/cdclk: move struct intel_cdclk_funcs to intel_cdclk.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The funcs struct can be opaque, make it internal to intel_cdclk.c. Suggested-by: Ville Syrjälä Signed-off-by: Jani Nikula Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20211213114106.296017-2-jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_cdclk.c | 11 +++++++++++ drivers/gpu/drm/i915/i915_drv.h | 12 +----------- 2 files changed, 12 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index c30cf8d2b835..249f81a80eb7 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -63,6 +63,17 @@ * dividers can be programmed correctly. */ +struct intel_cdclk_funcs { + void (*get_cdclk)(struct drm_i915_private *i915, + struct intel_cdclk_config *cdclk_config); + void (*set_cdclk)(struct drm_i915_private *i915, + const struct intel_cdclk_config *cdclk_config, + enum pipe pipe); + int (*bw_calc_min_cdclk)(struct intel_atomic_state *state); + int (*modeset_calc_cdclk)(struct intel_cdclk_state *state); + u8 (*calc_voltage_level)(int cdclk); +}; + void intel_cdclk_get_cdclk(struct drm_i915_private *dev_priv, struct intel_cdclk_config *cdclk_config) { diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 433c1387a137..9a4070988749 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -293,6 +293,7 @@ struct intel_connector; struct intel_encoder; struct intel_atomic_state; struct intel_cdclk_config; +struct intel_cdclk_funcs; struct intel_cdclk_state; struct intel_cdclk_vals; struct intel_initial_plane_config; @@ -341,17 +342,6 @@ struct intel_color_funcs { void (*read_luts)(struct intel_crtc_state *crtc_state); }; -struct intel_cdclk_funcs { - void (*get_cdclk)(struct drm_i915_private *dev_priv, - struct intel_cdclk_config *cdclk_config); - void (*set_cdclk)(struct drm_i915_private *dev_priv, - const struct intel_cdclk_config *cdclk_config, - enum pipe pipe); - int (*bw_calc_min_cdclk)(struct intel_atomic_state *state); - int (*modeset_calc_cdclk)(struct intel_cdclk_state *state); - u8 (*calc_voltage_level)(int cdclk); -}; - struct intel_hotplug_funcs { void (*hpd_irq_setup)(struct drm_i915_private *dev_priv); }; -- cgit From ae361eb07e9b498bc224db81113118fd28e35f6e Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Tue, 14 Dec 2021 20:46:16 +0200 Subject: drm/i915/fbc: Parametrize FBC register offsets MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Parametrize ilk+ FBC register offsets based on the FBC instance. v2: More intel_ namespace (Jani) v3: Don't break gvt (Jani) Reviewed-by: Jani Nikula Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20211214184616.1410-1-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/display/intel_fbc.c | 34 ++++++++++++++++++-------------- drivers/gpu/drm/i915/display/intel_fbc.h | 6 ++++++ drivers/gpu/drm/i915/gvt/handlers.c | 13 ++++++------ drivers/gpu/drm/i915/i915_reg.h | 34 ++++++++++++++++---------------- drivers/gpu/drm/i915/intel_pm.c | 31 +++++++++++++++++------------ 5 files changed, 67 insertions(+), 51 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index b33941c9e089..0ee71ca35286 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -85,6 +85,8 @@ struct intel_fbc { struct drm_mm_node compressed_fb; struct drm_mm_node compressed_llb; + enum intel_fbc_id id; + u8 limit; bool false_color; @@ -454,10 +456,10 @@ static void ilk_fbc_activate(struct intel_fbc *fbc) struct intel_fbc_state *fbc_state = &fbc->state; struct drm_i915_private *i915 = fbc->i915; - intel_de_write(i915, ILK_DPFC_FENCE_YOFF, + intel_de_write(i915, ILK_DPFC_FENCE_YOFF(fbc->id), fbc_state->fence_y_offset); - intel_de_write(i915, ILK_DPFC_CONTROL, + intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id), DPFC_CTL_EN | g4x_dpfc_ctl(fbc)); } @@ -467,28 +469,28 @@ static void ilk_fbc_deactivate(struct intel_fbc *fbc) u32 dpfc_ctl; /* Disable compression */ - dpfc_ctl = intel_de_read(i915, ILK_DPFC_CONTROL); + dpfc_ctl = intel_de_read(i915, ILK_DPFC_CONTROL(fbc->id)); if (dpfc_ctl & DPFC_CTL_EN) { dpfc_ctl &= ~DPFC_CTL_EN; - intel_de_write(i915, ILK_DPFC_CONTROL, dpfc_ctl); + intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id), dpfc_ctl); } } static bool ilk_fbc_is_active(struct intel_fbc *fbc) { - return intel_de_read(fbc->i915, ILK_DPFC_CONTROL) & DPFC_CTL_EN; + return intel_de_read(fbc->i915, ILK_DPFC_CONTROL(fbc->id)) & DPFC_CTL_EN; } static bool ilk_fbc_is_compressing(struct intel_fbc *fbc) { - return intel_de_read(fbc->i915, ILK_DPFC_STATUS) & DPFC_COMP_SEG_MASK; + return intel_de_read(fbc->i915, ILK_DPFC_STATUS(fbc->id)) & DPFC_COMP_SEG_MASK; } static void ilk_fbc_program_cfb(struct intel_fbc *fbc) { struct drm_i915_private *i915 = fbc->i915; - intel_de_write(i915, ILK_DPFC_CB_BASE, fbc->compressed_fb.start); + intel_de_write(i915, ILK_DPFC_CB_BASE(fbc->id), fbc->compressed_fb.start); } static const struct intel_fbc_funcs ilk_fbc_funcs = { @@ -524,8 +526,8 @@ static void snb_fbc_nuke(struct intel_fbc *fbc) { struct drm_i915_private *i915 = fbc->i915; - intel_de_write(i915, MSG_FBC_REND_STATE, FBC_REND_NUKE); - intel_de_posting_read(i915, MSG_FBC_REND_STATE); + intel_de_write(i915, MSG_FBC_REND_STATE(fbc->id), FBC_REND_NUKE); + intel_de_posting_read(i915, MSG_FBC_REND_STATE(fbc->id)); } static const struct intel_fbc_funcs snb_fbc_funcs = { @@ -547,7 +549,7 @@ static void glk_fbc_program_cfb_stride(struct intel_fbc *fbc) val |= FBC_STRIDE_OVERRIDE | FBC_STRIDE(fbc_state->override_cfb_stride / fbc->limit); - intel_de_write(i915, GLK_FBC_STRIDE, val); + intel_de_write(i915, GLK_FBC_STRIDE(fbc->id), val); } static void skl_fbc_program_cfb_stride(struct intel_fbc *fbc) @@ -598,19 +600,19 @@ static void ivb_fbc_activate(struct intel_fbc *fbc) if (i915->ggtt.num_fences) snb_fbc_program_fence(fbc); - intel_de_write(i915, ILK_DPFC_CONTROL, + intel_de_write(i915, ILK_DPFC_CONTROL(fbc->id), DPFC_CTL_EN | ivb_dpfc_ctl(fbc)); } static bool ivb_fbc_is_compressing(struct intel_fbc *fbc) { - return intel_de_read(fbc->i915, ILK_DPFC_STATUS2) & DPFC_COMP_SEG_MASK_IVB; + return intel_de_read(fbc->i915, ILK_DPFC_STATUS2(fbc->id)) & DPFC_COMP_SEG_MASK_IVB; } static void ivb_fbc_set_false_color(struct intel_fbc *fbc, bool enable) { - intel_de_rmw(fbc->i915, ILK_DPFC_CONTROL, + intel_de_rmw(fbc->i915, ILK_DPFC_CONTROL(fbc->id), DPFC_CTL_FALSE_COLOR, enable ? DPFC_CTL_FALSE_COLOR : 0); } @@ -1620,7 +1622,8 @@ void intel_fbc_add_plane(struct intel_fbc *fbc, struct intel_plane *plane) fbc->possible_framebuffer_bits |= plane->frontbuffer_bit; } -static struct intel_fbc *intel_fbc_create(struct drm_i915_private *i915) +static struct intel_fbc *intel_fbc_create(struct drm_i915_private *i915, + enum intel_fbc_id fbc_id) { struct intel_fbc *fbc; @@ -1628,6 +1631,7 @@ static struct intel_fbc *intel_fbc_create(struct drm_i915_private *i915) if (!fbc) return NULL; + fbc->id = fbc_id; fbc->i915 = i915; INIT_WORK(&fbc->underrun_work, intel_fbc_underrun_work_fn); mutex_init(&fbc->lock); @@ -1671,7 +1675,7 @@ void intel_fbc_init(struct drm_i915_private *i915) if (!HAS_FBC(i915)) return; - fbc = intel_fbc_create(i915); + fbc = intel_fbc_create(i915, INTEL_FBC_A); if (!fbc) return; diff --git a/drivers/gpu/drm/i915/display/intel_fbc.h b/drivers/gpu/drm/i915/display/intel_fbc.h index 07ad0411fcc3..7b7631aec527 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.h +++ b/drivers/gpu/drm/i915/display/intel_fbc.h @@ -17,6 +17,12 @@ struct intel_fbc; struct intel_plane; struct intel_plane_state; +enum intel_fbc_id { + INTEL_FBC_A, + + I915_MAX_FBCS, +}; + int intel_fbc_atomic_check(struct intel_atomic_state *state); bool intel_fbc_pre_update(struct intel_atomic_state *state, struct intel_crtc *crtc); diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index cde0a477fb49..3938df0db188 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -40,6 +40,7 @@ #include "gvt.h" #include "i915_pvinfo.h" #include "display/intel_display_types.h" +#include "display/intel_fbc.h" /* XXX FIXME i915 has changed PP_XXX definition */ #define PCH_PP_STATUS _MMIO(0xc7200) @@ -2647,12 +2648,12 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_D(_MMIO(_TRANSA_CHICKEN2), D_ALL); MMIO_D(_MMIO(_TRANSB_CHICKEN2), D_ALL); - MMIO_D(ILK_DPFC_CB_BASE, D_ALL); - MMIO_D(ILK_DPFC_CONTROL, D_ALL); - MMIO_D(ILK_DPFC_RECOMP_CTL, D_ALL); - MMIO_D(ILK_DPFC_STATUS, D_ALL); - MMIO_D(ILK_DPFC_FENCE_YOFF, D_ALL); - MMIO_D(ILK_DPFC_CHICKEN, D_ALL); + MMIO_D(ILK_DPFC_CB_BASE(INTEL_FBC_A), D_ALL); + MMIO_D(ILK_DPFC_CONTROL(INTEL_FBC_A), D_ALL); + MMIO_D(ILK_DPFC_RECOMP_CTL(INTEL_FBC_A), D_ALL); + MMIO_D(ILK_DPFC_STATUS(INTEL_FBC_A), D_ALL); + MMIO_D(ILK_DPFC_FENCE_YOFF(INTEL_FBC_A), D_ALL); + MMIO_D(ILK_DPFC_CHICKEN(INTEL_FBC_A), D_ALL); MMIO_D(ILK_FBC_RT_BASE, D_ALL); MMIO_D(IPS_CTL, D_ALL); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 9e5ccf86088c..8528db258827 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -3353,10 +3353,10 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define FBC_LL_SIZE (1536) /* Framebuffer compression for GM45+ */ -#define DPFC_CB_BASE _MMIO(0x3200) -#define ILK_DPFC_CB_BASE _MMIO(0x43200) -#define DPFC_CONTROL _MMIO(0x3208) -#define ILK_DPFC_CONTROL _MMIO(0x43208) +#define DPFC_CB_BASE _MMIO(0x3200) +#define ILK_DPFC_CB_BASE(fbc_id) _MMIO_PIPE((fbc_id), 0x43200, 0x43240) +#define DPFC_CONTROL _MMIO(0x3208) +#define ILK_DPFC_CONTROL(fbc_id) _MMIO_PIPE((fbc_id), 0x43208, 0x43248) #define DPFC_CTL_EN REG_BIT(31) #define DPFC_CTL_PLANE_MASK_G4X REG_BIT(30) /* g4x-snb */ #define DPFC_CTL_PLANE_G4X(i9xx_plane) REG_FIELD_PREP(DPFC_CTL_PLANE_MASK_G4X, (i9xx_plane)) @@ -3374,28 +3374,28 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define DPFC_CTL_LIMIT_4X REG_FIELD_PREP(DPFC_CTL_LIMIT_MASK, 2) #define DPFC_CTL_FENCENO_MASK REG_GENMASK(3, 0) #define DPFC_CTL_FENCENO(fence) REG_FIELD_PREP(DPFC_CTL_FENCENO_MASK, (fence)) -#define DPFC_RECOMP_CTL _MMIO(0x320c) -#define ILK_DPFC_RECOMP_CTL _MMIO(0x4320c) +#define DPFC_RECOMP_CTL _MMIO(0x320c) +#define ILK_DPFC_RECOMP_CTL(fbc_id) _MMIO_PIPE((fbc_id), 0x4320c, 0x4324c) #define DPFC_RECOMP_STALL_EN REG_BIT(27) #define DPFC_RECOMP_STALL_WM_MASK REG_GENMASK(26, 16) #define DPFC_RECOMP_TIMER_COUNT_MASK REG_GENMASK(5, 0) -#define DPFC_STATUS _MMIO(0x3210) -#define ILK_DPFC_STATUS _MMIO(0x43210) +#define DPFC_STATUS _MMIO(0x3210) +#define ILK_DPFC_STATUS(fbc_id) _MMIO_PIPE((fbc_id), 0x43210, 0x43250) #define DPFC_INVAL_SEG_MASK REG_GENMASK(26, 16) #define DPFC_COMP_SEG_MASK REG_GENMASK(10, 0) -#define DPFC_STATUS2 _MMIO(0x3214) -#define ILK_DPFC_STATUS2 _MMIO(0x43214) +#define DPFC_STATUS2 _MMIO(0x3214) +#define ILK_DPFC_STATUS2(fbc_id) _MMIO_PIPE((fbc_id), 0x43214, 0x43254) #define DPFC_COMP_SEG_MASK_IVB REG_GENMASK(11, 0) -#define DPFC_FENCE_YOFF _MMIO(0x3218) -#define ILK_DPFC_FENCE_YOFF _MMIO(0x43218) -#define DPFC_CHICKEN _MMIO(0x3224) -#define ILK_DPFC_CHICKEN _MMIO(0x43224) +#define DPFC_FENCE_YOFF _MMIO(0x3218) +#define ILK_DPFC_FENCE_YOFF(fbc_id) _MMIO_PIPE((fbc_id), 0x43218, 0x43258) +#define DPFC_CHICKEN _MMIO(0x3224) +#define ILK_DPFC_CHICKEN(fbc_id) _MMIO_PIPE((fbc_id), 0x43224, 0x43264) #define DPFC_HT_MODIFY REG_BIT(31) /* pre-ivb */ #define DPFC_NUKE_ON_ANY_MODIFICATION REG_BIT(23) /* bdw+ */ #define DPFC_CHICKEN_COMP_DUMMY_PIXEL REG_BIT(14) /* glk+ */ #define DPFC_DISABLE_DUMMY0 REG_BIT(8) /* ivb+ */ -#define GLK_FBC_STRIDE _MMIO(0x43228) +#define GLK_FBC_STRIDE(fbc_id) _MMIO_PIPE((fbc_id), 0x43228, 0x43268) #define FBC_STRIDE_OVERRIDE REG_BIT(15) #define FBC_STRIDE_MASK REG_GENMASK(14, 0) #define FBC_STRIDE(x) REG_FIELD_PREP(FBC_STRIDE_MASK, (x)) @@ -3438,9 +3438,9 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define IPS_CTL _MMIO(0x43408) #define IPS_ENABLE (1 << 31) -#define MSG_FBC_REND_STATE _MMIO(0x50380) +#define MSG_FBC_REND_STATE(fbc_id) _MMIO_PIPE((fbc_id), 0x50380, 0x50384) #define FBC_REND_NUKE REG_BIT(2) -#define FBC_REND_CACHE_CLEAN REG_BIT(1) +#define FBC_REND_CACHE_CLEAN REG_BIT(1) /* * GPIO regs diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index fe1a83c02852..3714f96f17b3 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -160,8 +160,9 @@ static void bxt_init_clock_gating(struct drm_i915_private *dev_priv) * WaFbcHighMemBwCorruptionAvoidance:bxt * Display WA #0883: bxt */ - intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) | - DPFC_DISABLE_DUMMY0); + intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A), + intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A)) | + DPFC_DISABLE_DUMMY0); } static void glk_init_clock_gating(struct drm_i915_private *dev_priv) @@ -7451,8 +7452,8 @@ static void gen8_set_l3sqc_credits(struct drm_i915_private *dev_priv, static void icl_init_clock_gating(struct drm_i915_private *dev_priv) { /* Wa_1409120013:icl,ehl */ - intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, - DPFC_CHICKEN_COMP_DUMMY_PIXEL); + intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A), + DPFC_CHICKEN_COMP_DUMMY_PIXEL); /*Wa_14010594013:icl, ehl */ intel_uncore_rmw(&dev_priv->uncore, GEN8_CHICKEN_DCPR_1, @@ -7464,7 +7465,7 @@ static void gen12lp_init_clock_gating(struct drm_i915_private *dev_priv) /* Wa_1409120013:tgl,rkl,adl-s,dg1,dg2 */ if (IS_TIGERLAKE(dev_priv) || IS_ROCKETLAKE(dev_priv) || IS_ALDERLAKE_S(dev_priv) || IS_DG1(dev_priv) || IS_DG2(dev_priv)) - intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, + intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A), DPFC_CHICKEN_COMP_DUMMY_PIXEL); /* Wa_1409825376:tgl (pre-prod)*/ @@ -7526,8 +7527,9 @@ static void cfl_init_clock_gating(struct drm_i915_private *dev_priv) * WaFbcNukeOnHostModify:cfl * Display WA #0873: cfl */ - intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) | - DPFC_NUKE_ON_ANY_MODIFICATION); + intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A), + intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A)) | + DPFC_NUKE_ON_ANY_MODIFICATION); } static void kbl_init_clock_gating(struct drm_i915_private *dev_priv) @@ -7559,8 +7561,9 @@ static void kbl_init_clock_gating(struct drm_i915_private *dev_priv) * WaFbcNukeOnHostModify:kbl * Display WA #0873: kbl */ - intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) | - DPFC_NUKE_ON_ANY_MODIFICATION); + intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A), + intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A)) | + DPFC_NUKE_ON_ANY_MODIFICATION); } static void skl_init_clock_gating(struct drm_i915_private *dev_priv) @@ -7586,15 +7589,17 @@ static void skl_init_clock_gating(struct drm_i915_private *dev_priv) * WaFbcNukeOnHostModify:skl * Display WA #0873: skl */ - intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) | - DPFC_NUKE_ON_ANY_MODIFICATION); + intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A), + intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A)) | + DPFC_NUKE_ON_ANY_MODIFICATION); /* * WaFbcHighMemBwCorruptionAvoidance:skl * Display WA #0883: skl */ - intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN, intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN) | - DPFC_DISABLE_DUMMY0); + intel_uncore_write(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A), + intel_uncore_read(&dev_priv->uncore, ILK_DPFC_CHICKEN(INTEL_FBC_A)) | + DPFC_DISABLE_DUMMY0); } static void bdw_init_clock_gating(struct drm_i915_private *dev_priv) -- cgit From c2a9682d2214e834b493c454e38809e571bb3045 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Mon, 13 Dec 2021 15:44:48 +0200 Subject: drm/i915/fbc: Loop through FBC instances in various places MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Convert i915->fbc into an array in preparation for multiple FBC instances, and loop through all instances in all places where the caller does not know which instance(s) (if any) are relevant. This is the case for eg. frontbuffer tracking and FIFO underrun hadling. v2: More intel_ namespace (Jani) Leave out debugfs for later Reviewed-by: Jani Nikula Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20211213134450.3082-3-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/display/i9xx_plane.c | 2 +- drivers/gpu/drm/i915/display/intel_fbc.c | 147 +++++++++++++-------- drivers/gpu/drm/i915/display/skl_universal_plane.c | 2 +- drivers/gpu/drm/i915/i915_drv.h | 3 +- 4 files changed, 94 insertions(+), 60 deletions(-) diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c index 85950ff67609..fc6f05146a9f 100644 --- a/drivers/gpu/drm/i915/display/i9xx_plane.c +++ b/drivers/gpu/drm/i915/display/i9xx_plane.c @@ -125,7 +125,7 @@ static struct intel_fbc *i9xx_plane_fbc(struct drm_i915_private *dev_priv, enum i9xx_plane_id i9xx_plane) { if (i9xx_plane_has_fbc(dev_priv, i9xx_plane)) - return dev_priv->fbc; + return dev_priv->fbc[INTEL_FBC_A]; else return NULL; } diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index 0ee71ca35286..a73256f22a1e 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -49,6 +49,13 @@ #include "intel_fbc.h" #include "intel_frontbuffer.h" +#define for_each_fbc_id(__fbc_id) \ + for ((__fbc_id) = INTEL_FBC_A; (__fbc_id) < I915_MAX_FBCS; (__fbc_id)++) + +#define for_each_intel_fbc(__dev_priv, __fbc, __fbc_id) \ + for_each_fbc_id(__fbc_id) \ + for_each_if((__fbc) = (__dev_priv)->fbc[(__fbc_id)]) + struct intel_fbc_funcs { void (*activate)(struct intel_fbc *fbc); void (*deactivate)(struct intel_fbc *fbc); @@ -812,16 +819,16 @@ static void __intel_fbc_cleanup_cfb(struct intel_fbc *fbc) void intel_fbc_cleanup(struct drm_i915_private *i915) { - struct intel_fbc *fbc = i915->fbc; - - if (!fbc) - return; + struct intel_fbc *fbc; + enum intel_fbc_id fbc_id; - mutex_lock(&fbc->lock); - __intel_fbc_cleanup_cfb(fbc); - mutex_unlock(&fbc->lock); + for_each_intel_fbc(i915, fbc, fbc_id) { + mutex_lock(&fbc->lock); + __intel_fbc_cleanup_cfb(fbc); + mutex_unlock(&fbc->lock); - kfree(fbc); + kfree(fbc); + } } static bool stride_is_valid(const struct intel_plane_state *plane_state) @@ -1307,15 +1314,10 @@ static unsigned int intel_fbc_get_frontbuffer_bit(struct intel_fbc *fbc) return fbc->possible_framebuffer_bits; } -void intel_fbc_invalidate(struct drm_i915_private *i915, - unsigned int frontbuffer_bits, - enum fb_op_origin origin) +static void __intel_fbc_invalidate(struct intel_fbc *fbc, + unsigned int frontbuffer_bits, + enum fb_op_origin origin) { - struct intel_fbc *fbc = i915->fbc; - - if (!fbc) - return; - if (origin == ORIGIN_FLIP || origin == ORIGIN_CURSOR_UPDATE) return; @@ -1329,14 +1331,22 @@ void intel_fbc_invalidate(struct drm_i915_private *i915, mutex_unlock(&fbc->lock); } -void intel_fbc_flush(struct drm_i915_private *i915, - unsigned int frontbuffer_bits, enum fb_op_origin origin) +void intel_fbc_invalidate(struct drm_i915_private *i915, + unsigned int frontbuffer_bits, + enum fb_op_origin origin) { - struct intel_fbc *fbc = i915->fbc; + struct intel_fbc *fbc; + enum intel_fbc_id fbc_id; - if (!fbc) - return; + for_each_intel_fbc(i915, fbc, fbc_id) + __intel_fbc_invalidate(fbc, frontbuffer_bits, origin); + +} +static void __intel_fbc_flush(struct intel_fbc *fbc, + unsigned int frontbuffer_bits, + enum fb_op_origin origin) +{ mutex_lock(&fbc->lock); fbc->busy_bits &= ~frontbuffer_bits; @@ -1356,6 +1366,17 @@ out: mutex_unlock(&fbc->lock); } +void intel_fbc_flush(struct drm_i915_private *i915, + unsigned int frontbuffer_bits, + enum fb_op_origin origin) +{ + struct intel_fbc *fbc; + enum intel_fbc_id fbc_id; + + for_each_intel_fbc(i915, fbc, fbc_id) + __intel_fbc_flush(fbc, frontbuffer_bits, origin); +} + int intel_fbc_atomic_check(struct intel_atomic_state *state) { struct intel_plane_state *plane_state; @@ -1483,15 +1504,15 @@ void intel_fbc_update(struct intel_atomic_state *state, */ void intel_fbc_global_disable(struct drm_i915_private *i915) { - struct intel_fbc *fbc = i915->fbc; - - if (!fbc) - return; + struct intel_fbc *fbc; + enum intel_fbc_id fbc_id; - mutex_lock(&fbc->lock); - if (fbc->state.plane) - __intel_fbc_disable(fbc); - mutex_unlock(&fbc->lock); + for_each_intel_fbc(i915, fbc, fbc_id) { + mutex_lock(&fbc->lock); + if (fbc->state.plane) + __intel_fbc_disable(fbc); + mutex_unlock(&fbc->lock); + } } static void intel_fbc_underrun_work_fn(struct work_struct *work) @@ -1516,19 +1537,9 @@ out: mutex_unlock(&fbc->lock); } -/* - * intel_fbc_reset_underrun - reset FBC fifo underrun status. - * @i915: the i915 device - * - * See intel_fbc_handle_fifo_underrun_irq(). For automated testing we - * want to re-enable FBC after an underrun to increase test coverage. - */ -void intel_fbc_reset_underrun(struct drm_i915_private *i915) +static void __intel_fbc_reset_underrun(struct intel_fbc *fbc) { - struct intel_fbc *fbc = i915->fbc; - - if (!fbc) - return; + struct drm_i915_private *i915 = fbc->i915; cancel_work_sync(&fbc->underrun_work); @@ -1544,6 +1555,38 @@ void intel_fbc_reset_underrun(struct drm_i915_private *i915) mutex_unlock(&fbc->lock); } +/* + * intel_fbc_reset_underrun - reset FBC fifo underrun status. + * @i915: the i915 device + * + * See intel_fbc_handle_fifo_underrun_irq(). For automated testing we + * want to re-enable FBC after an underrun to increase test coverage. + */ +void intel_fbc_reset_underrun(struct drm_i915_private *i915) +{ + struct intel_fbc *fbc; + enum intel_fbc_id fbc_id; + + for_each_intel_fbc(i915, fbc, fbc_id) + __intel_fbc_reset_underrun(fbc); +} + +static void __intel_fbc_handle_fifo_underrun_irq(struct intel_fbc *fbc) +{ + /* + * There's no guarantee that underrun_detected won't be set to true + * right after this check and before the work is scheduled, but that's + * not a problem since we'll check it again under the work function + * while FBC is locked. This check here is just to prevent us from + * unnecessarily scheduling the work, and it relies on the fact that we + * never switch underrun_detect back to false after it's true. + */ + if (READ_ONCE(fbc->underrun_detected)) + return; + + schedule_work(&fbc->underrun_work); +} + /** * intel_fbc_handle_fifo_underrun_irq - disable FBC when we get a FIFO underrun * @i915: i915 device @@ -1560,21 +1603,11 @@ void intel_fbc_reset_underrun(struct drm_i915_private *i915) */ void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *i915) { - struct intel_fbc *fbc = i915->fbc; - - if (!fbc) - return; - - /* There's no guarantee that underrun_detected won't be set to true - * right after this check and before the work is scheduled, but that's - * not a problem since we'll check it again under the work function - * while FBC is locked. This check here is just to prevent us from - * unnecessarily scheduling the work, and it relies on the fact that we - * never switch underrun_detect back to false after it's true. */ - if (READ_ONCE(fbc->underrun_detected)) - return; + struct intel_fbc *fbc; + enum intel_fbc_id fbc_id; - schedule_work(&fbc->underrun_work); + for_each_intel_fbc(i915, fbc, fbc_id) + __intel_fbc_handle_fifo_underrun_irq(fbc); } /* @@ -1685,7 +1718,7 @@ void intel_fbc_init(struct drm_i915_private *i915) if (intel_fbc_hw_is_active(fbc)) intel_fbc_hw_deactivate(fbc); - i915->fbc = fbc; + i915->fbc[fbc->id] = fbc; } static int intel_fbc_debugfs_status_show(struct seq_file *m, void *unused) @@ -1778,7 +1811,7 @@ static void intel_fbc_debugfs_add(struct intel_fbc *fbc) void intel_fbc_debugfs_register(struct drm_i915_private *i915) { - struct intel_fbc *fbc = i915->fbc; + struct intel_fbc *fbc = i915->fbc[INTEL_FBC_A]; if (fbc) intel_fbc_debugfs_add(fbc); diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c index d5359cf3d270..3db57cd7474b 100644 --- a/drivers/gpu/drm/i915/display/skl_universal_plane.c +++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c @@ -1829,7 +1829,7 @@ static struct intel_fbc *skl_plane_fbc(struct drm_i915_private *dev_priv, enum pipe pipe, enum plane_id plane_id) { if (skl_plane_has_fbc(dev_priv, pipe, plane_id)) - return dev_priv->fbc; + return dev_priv->fbc[INTEL_FBC_A]; else return NULL; } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 9a4070988749..5eaa6ac3eeeb 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -71,6 +71,7 @@ #include "display/intel_dmc.h" #include "display/intel_dpll_mgr.h" #include "display/intel_dsb.h" +#include "display/intel_fbc.h" #include "display/intel_frontbuffer.h" #include "display/intel_global_state.h" #include "display/intel_gmbus.h" @@ -737,7 +738,7 @@ struct drm_i915_private { u32 pipestat_irq_mask[I915_MAX_PIPES]; struct i915_hotplug hotplug; - struct intel_fbc *fbc; + struct intel_fbc *fbc[I915_MAX_FBCS]; struct i915_drrs drrs; struct intel_opregion opregion; struct intel_vbt_data vbt; -- cgit From b8ca477e51318d28f7514abfb5a369e11848a8cf Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Mon, 13 Dec 2021 15:44:49 +0200 Subject: drm/i915/fbc: Introduce device info fbc_mask MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Declare which FBC instances are present via a fbc_mask in device info. For the moment there is just the one. TODO: Need to figure out how to expose multiple FBC instances in debugs. Just different file names, or move the files under some subdirectory (per-crtc maybe), or something else? This will need igt changes as well. v2: Put the mask into device_info.display (Jani) Put the magic pipe->fbc thing into skl_fbc_id_for_pipe() (Jani) Reviewed-by: Jani Nikula Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20211213134450.3082-4-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/display/intel_fbc.c | 38 ++++++++++++---------- drivers/gpu/drm/i915/display/skl_universal_plane.c | 17 +++++++--- drivers/gpu/drm/i915/i915_drv.h | 2 +- drivers/gpu/drm/i915/i915_pci.c | 22 ++++++------- drivers/gpu/drm/i915/intel_device_info.c | 4 ++- drivers/gpu/drm/i915/intel_device_info.h | 2 +- 6 files changed, 49 insertions(+), 36 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index a73256f22a1e..796453e1c101 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -49,11 +49,12 @@ #include "intel_fbc.h" #include "intel_frontbuffer.h" -#define for_each_fbc_id(__fbc_id) \ - for ((__fbc_id) = INTEL_FBC_A; (__fbc_id) < I915_MAX_FBCS; (__fbc_id)++) +#define for_each_fbc_id(__dev_priv, __fbc_id) \ + for ((__fbc_id) = INTEL_FBC_A; (__fbc_id) < I915_MAX_FBCS; (__fbc_id)++) \ + for_each_if(INTEL_INFO(__dev_priv)->display.fbc_mask & BIT(__fbc_id)) #define for_each_intel_fbc(__dev_priv, __fbc, __fbc_id) \ - for_each_fbc_id(__fbc_id) \ + for_each_fbc_id((__dev_priv), (__fbc_id)) \ for_each_if((__fbc) = (__dev_priv)->fbc[(__fbc_id)]) struct intel_fbc_funcs { @@ -1693,32 +1694,35 @@ static struct intel_fbc *intel_fbc_create(struct drm_i915_private *i915, */ void intel_fbc_init(struct drm_i915_private *i915) { - struct intel_fbc *fbc; + enum intel_fbc_id fbc_id; if (!drm_mm_initialized(&i915->mm.stolen)) - mkwrite_device_info(i915)->display.has_fbc = false; + mkwrite_device_info(i915)->display.fbc_mask = 0; if (need_fbc_vtd_wa(i915)) - mkwrite_device_info(i915)->display.has_fbc = false; + mkwrite_device_info(i915)->display.fbc_mask = 0; i915->params.enable_fbc = intel_sanitize_fbc_option(i915); drm_dbg_kms(&i915->drm, "Sanitized enable_fbc value: %d\n", i915->params.enable_fbc); - if (!HAS_FBC(i915)) - return; + for_each_fbc_id(i915, fbc_id) { + struct intel_fbc *fbc; - fbc = intel_fbc_create(i915, INTEL_FBC_A); - if (!fbc) - return; + fbc = intel_fbc_create(i915, fbc_id); + if (!fbc) + continue; - /* We still don't have any sort of hardware state readout for FBC, so - * deactivate it in case the BIOS activated it to make sure software - * matches the hardware state. */ - if (intel_fbc_hw_is_active(fbc)) - intel_fbc_hw_deactivate(fbc); + /* + * We still don't have any sort of hardware state readout + * for FBC, so deactivate it in case the BIOS activated it + * to make sure software matches the hardware state. + */ + if (intel_fbc_hw_is_active(fbc)) + intel_fbc_hw_deactivate(fbc); - i915->fbc[fbc->id] = fbc; + i915->fbc[fbc->id] = fbc; + } } static int intel_fbc_debugfs_status_show(struct seq_file *m, void *unused) diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c index 3db57cd7474b..158d89b8d490 100644 --- a/drivers/gpu/drm/i915/display/skl_universal_plane.c +++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c @@ -1816,20 +1816,27 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state, return 0; } +static enum intel_fbc_id skl_fbc_id_for_pipe(enum pipe pipe) +{ + return pipe - PIPE_A + INTEL_FBC_A; +} + static bool skl_plane_has_fbc(struct drm_i915_private *dev_priv, - enum pipe pipe, enum plane_id plane_id) + enum intel_fbc_id fbc_id, enum plane_id plane_id) { - if (!HAS_FBC(dev_priv)) + if ((INTEL_INFO(dev_priv)->display.fbc_mask & BIT(fbc_id)) == 0) return false; - return pipe == PIPE_A && plane_id == PLANE_PRIMARY; + return plane_id == PLANE_PRIMARY; } static struct intel_fbc *skl_plane_fbc(struct drm_i915_private *dev_priv, enum pipe pipe, enum plane_id plane_id) { - if (skl_plane_has_fbc(dev_priv, pipe, plane_id)) - return dev_priv->fbc[INTEL_FBC_A]; + enum intel_fbc_id fbc_id = skl_fbc_id_for_pipe(pipe); + + if (skl_plane_has_fbc(dev_priv, fbc_id, plane_id)) + return dev_priv->fbc[fbc_id]; else return NULL; } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 5eaa6ac3eeeb..23e2a1bcc257 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1478,7 +1478,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define I915_HAS_HOTPLUG(dev_priv) (INTEL_INFO(dev_priv)->display.has_hotplug) #define HAS_FW_BLC(dev_priv) (GRAPHICS_VER(dev_priv) > 2) -#define HAS_FBC(dev_priv) (INTEL_INFO(dev_priv)->display.has_fbc) +#define HAS_FBC(dev_priv) (INTEL_INFO(dev_priv)->display.fbc_mask != 0) #define HAS_CUR_FBC(dev_priv) (!HAS_GMCH(dev_priv) && GRAPHICS_VER(dev_priv) >= 7) #define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv)) diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index eeee028a5ad7..12e331f5fa57 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -214,13 +214,13 @@ static const struct intel_device_info i845g_info = { static const struct intel_device_info i85x_info = { I830_FEATURES, PLATFORM(INTEL_I85X), - .display.has_fbc = 1, + .display.fbc_mask = BIT(INTEL_FBC_A), }; static const struct intel_device_info i865g_info = { I845_FEATURES, PLATFORM(INTEL_I865G), - .display.has_fbc = 1, + .display.fbc_mask = BIT(INTEL_FBC_A), }; #define GEN3_FEATURES \ @@ -258,7 +258,7 @@ static const struct intel_device_info i915gm_info = { .display.has_overlay = 1, .display.overlay_needs_physical = 1, .display.supports_tv = 1, - .display.has_fbc = 1, + .display.fbc_mask = BIT(INTEL_FBC_A), .hws_needs_physical = 1, .unfenced_needs_alignment = 1, }; @@ -283,7 +283,7 @@ static const struct intel_device_info i945gm_info = { .display.has_overlay = 1, .display.overlay_needs_physical = 1, .display.supports_tv = 1, - .display.has_fbc = 1, + .display.fbc_mask = BIT(INTEL_FBC_A), .hws_needs_physical = 1, .unfenced_needs_alignment = 1, }; @@ -342,7 +342,7 @@ static const struct intel_device_info i965gm_info = { GEN4_FEATURES, PLATFORM(INTEL_I965GM), .is_mobile = 1, - .display.has_fbc = 1, + .display.fbc_mask = BIT(INTEL_FBC_A), .display.has_overlay = 1, .display.supports_tv = 1, .hws_needs_physical = 1, @@ -360,7 +360,7 @@ static const struct intel_device_info gm45_info = { GEN4_FEATURES, PLATFORM(INTEL_GM45), .is_mobile = 1, - .display.has_fbc = 1, + .display.fbc_mask = BIT(INTEL_FBC_A), .display.supports_tv = 1, .platform_engine_mask = BIT(RCS0) | BIT(VCS0), .gpu_reset_clobbers_display = false, @@ -393,7 +393,7 @@ static const struct intel_device_info ilk_m_info = { PLATFORM(INTEL_IRONLAKE), .is_mobile = 1, .has_rps = true, - .display.has_fbc = 1, + .display.fbc_mask = BIT(INTEL_FBC_A), }; #define GEN6_FEATURES \ @@ -401,7 +401,7 @@ static const struct intel_device_info ilk_m_info = { .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B), \ .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B), \ .display.has_hotplug = 1, \ - .display.has_fbc = 1, \ + .display.fbc_mask = BIT(INTEL_FBC_A), \ .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \ .has_coherent_ggtt = true, \ .has_llc = 1, \ @@ -452,7 +452,7 @@ static const struct intel_device_info snb_m_gt2_info = { .display.pipe_mask = BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), \ .display.cpu_transcoder_mask = BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), \ .display.has_hotplug = 1, \ - .display.has_fbc = 1, \ + .display.fbc_mask = BIT(INTEL_FBC_A), \ .platform_engine_mask = BIT(RCS0) | BIT(VCS0) | BIT(BCS0), \ .has_coherent_ggtt = true, \ .has_llc = 1, \ @@ -693,7 +693,7 @@ static const struct intel_device_info skl_gt4_info = { .has_64bit_reloc = 1, \ .display.has_ddi = 1, \ .display.has_fpga_dbg = 1, \ - .display.has_fbc = 1, \ + .display.fbc_mask = BIT(INTEL_FBC_A), \ .display.has_hdcp = 1, \ .display.has_psr = 1, \ .display.has_psr_hw_tracking = 1, \ @@ -948,7 +948,7 @@ static const struct intel_device_info adl_s_info = { .display.has_dp_mst = 1, \ .display.has_dsb = 1, \ .display.has_dsc = 1, \ - .display.has_fbc = 1, \ + .display.fbc_mask = BIT(INTEL_FBC_A), \ .display.has_fpga_dbg = 1, \ .display.has_hdcp = 1, \ .display.has_hotplug = 1, \ diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index cbe9972478ac..bb7d37b70626 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -335,6 +335,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) "Display fused off, disabling\n"); info->display.pipe_mask = 0; info->display.cpu_transcoder_mask = 0; + info->display.fbc_mask = 0; } else if (fuse_strap & IVB_PIPE_C_DISABLE) { drm_info(&dev_priv->drm, "PipeC fused off\n"); info->display.pipe_mask &= ~BIT(PIPE_C); @@ -346,6 +347,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) if (dfsm & SKL_DFSM_PIPE_A_DISABLE) { info->display.pipe_mask &= ~BIT(PIPE_A); info->display.cpu_transcoder_mask &= ~BIT(TRANSCODER_A); + info->display.fbc_mask &= ~BIT(INTEL_FBC_A); } if (dfsm & SKL_DFSM_PIPE_B_DISABLE) { info->display.pipe_mask &= ~BIT(PIPE_B); @@ -366,7 +368,7 @@ void intel_device_info_runtime_init(struct drm_i915_private *dev_priv) info->display.has_hdcp = 0; if (dfsm & SKL_DFSM_DISPLAY_PM_DISABLE) - info->display.has_fbc = 0; + info->display.fbc_mask = 0; if (DISPLAY_VER(dev_priv) >= 11 && (dfsm & ICL_DFSM_DMC_DISABLE)) info->display.has_dmc = 0; diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index c121d7309dd2..7cef02f5ce65 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -155,7 +155,6 @@ enum intel_ppgtt_type { func(has_dp_mst); \ func(has_dsb); \ func(has_dsc); \ - func(has_fbc); \ func(has_fpga_dbg); \ func(has_gmch); \ func(has_hdcp); \ @@ -201,6 +200,7 @@ struct intel_device_info { u8 pipe_mask; u8 cpu_transcoder_mask; + u8 fbc_mask; u8 abox_mask; #define DEFINE_FLAG(name) u8 name:1 -- cgit From e74c6aa955caedd06b5ade58e31e33338e4efde6 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Mon, 13 Dec 2021 17:14:35 +0200 Subject: drm/i915/fbc: Register per-crtc debugfs files MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Expose FBC debugfs files for each crtc. These may or may not point to the same FBC instance depending on the platform. We leave the old global debugfs files in place until igt catches up to the new per-crtc approach. v2: Take a trip via intel_crtc_debugfs_add() (Jani) Cc: Jani Nikula Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20211213151435.9700-1-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- .../gpu/drm/i915/display/intel_display_debugfs.c | 7 +++-- drivers/gpu/drm/i915/display/intel_fbc.c | 31 +++++++++++++--------- drivers/gpu/drm/i915/display/intel_fbc.h | 1 + 3 files changed, 25 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c index 572445299b04..f4de004d470f 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c @@ -2402,6 +2402,9 @@ void intel_connector_debugfs_add(struct intel_connector *intel_connector) */ void intel_crtc_debugfs_add(struct drm_crtc *crtc) { - if (crtc->debugfs_entry) - crtc_updates_add(crtc); + if (!crtc->debugfs_entry) + return; + + crtc_updates_add(crtc); + intel_fbc_crtc_debugfs_add(to_intel_crtc(crtc)); } diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index 796453e1c101..8b9acedcdfc1 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -1798,25 +1798,32 @@ DEFINE_SIMPLE_ATTRIBUTE(intel_fbc_debugfs_false_color_fops, intel_fbc_debugfs_false_color_set, "%llu\n"); -static void intel_fbc_debugfs_add(struct intel_fbc *fbc) +static void intel_fbc_debugfs_add(struct intel_fbc *fbc, + struct dentry *parent) { - struct drm_i915_private *i915 = fbc->i915; - struct drm_minor *minor = i915->drm.primary; - - debugfs_create_file("i915_fbc_status", 0444, - minor->debugfs_root, fbc, - &intel_fbc_debugfs_status_fops); + debugfs_create_file("i915_fbc_status", 0444, parent, + fbc, &intel_fbc_debugfs_status_fops); if (fbc->funcs->set_false_color) - debugfs_create_file("i915_fbc_false_color", 0644, - minor->debugfs_root, fbc, - &intel_fbc_debugfs_false_color_fops); + debugfs_create_file("i915_fbc_false_color", 0644, parent, + fbc, &intel_fbc_debugfs_false_color_fops); } +void intel_fbc_crtc_debugfs_add(struct intel_crtc *crtc) +{ + struct intel_plane *plane = to_intel_plane(crtc->base.primary); + + if (plane->fbc) + intel_fbc_debugfs_add(plane->fbc, crtc->base.debugfs_entry); +} + +/* FIXME: remove this once igt is on board with per-crtc stuff */ void intel_fbc_debugfs_register(struct drm_i915_private *i915) { - struct intel_fbc *fbc = i915->fbc[INTEL_FBC_A]; + struct drm_minor *minor = i915->drm.primary; + struct intel_fbc *fbc; + fbc = i915->fbc[INTEL_FBC_A]; if (fbc) - intel_fbc_debugfs_add(fbc); + intel_fbc_debugfs_add(fbc, minor->debugfs_root); } diff --git a/drivers/gpu/drm/i915/display/intel_fbc.h b/drivers/gpu/drm/i915/display/intel_fbc.h index 7b7631aec527..8c5a7339a27f 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.h +++ b/drivers/gpu/drm/i915/display/intel_fbc.h @@ -42,6 +42,7 @@ void intel_fbc_flush(struct drm_i915_private *dev_priv, void intel_fbc_add_plane(struct intel_fbc *fbc, struct intel_plane *plane); void intel_fbc_handle_fifo_underrun_irq(struct drm_i915_private *i915); void intel_fbc_reset_underrun(struct drm_i915_private *i915); +void intel_fbc_crtc_debugfs_add(struct intel_crtc *crtc); void intel_fbc_debugfs_register(struct drm_i915_private *i915); #endif /* __INTEL_FBC_H__ */ -- cgit From 637088a21e204b129a03dbd59bc0cd80d0292651 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Sun, 21 Nov 2021 12:00:32 +0100 Subject: drm/i915/backlight: Make ext_pwm_disable_backlight() call intel_backlight_set_pwm_level() At least the Bay Trail LPSS PWM controller used with DSI panels on many Bay Trail tablets seems to leave the PWM pin in whatever state it was (high or low) ATM that the PWM gets disabled. Combined with some panels not having a separate backlight-enable pin this leads to the backlight sometimes staying on while it should not (when the pin was high during PWM-disabling). First calling intel_backlight_set_pwm_level() will ensure that the pin is always low (or high for inverted brightness panels) since the passed in duty-cycle is 0% (or 100%) when the PWM gets disabled fixing the backlight sometimes staying on. With the exception of ext_pwm_disable_backlight() all other foo_disable_backlight() functions call intel_backlight_set_pwm_level() already before disabling the backlight, so this change also aligns ext_pwm_disable_backlight() with all the other disable() functions. Signed-off-by: Hans de Goede Acked-by: Jani Nikula Reviewed-by: Lyude Paul Link: https://patchwork.freedesktop.org/patch/msgid/20211121110032.4720-2-hdegoede@redhat.com --- drivers/gpu/drm/i915/display/intel_backlight.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c index 9523411cddd8..2db3b792aca6 100644 --- a/drivers/gpu/drm/i915/display/intel_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_backlight.c @@ -433,6 +433,8 @@ static void ext_pwm_disable_backlight(const struct drm_connector_state *old_conn struct intel_connector *connector = to_intel_connector(old_conn_state->connector); struct intel_panel *panel = &connector->panel; + intel_backlight_set_pwm_level(old_conn_state, level); + panel->backlight.pwm_state.enabled = false; pwm_apply_state(panel->backlight.pwm, &panel->backlight.pwm_state); } -- cgit From d0c0cf22d7071e9ba8d30be91723e1d997a07970 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Sun, 21 Nov 2021 20:10:01 +0100 Subject: drm/i915: Remove unused intel_gmbus_set_speed() function The intel_gmbus_set_speed() function is not used anywhere, remove it. Note drivers/gpu/drm/gma500 has its own copy called gma_intel_gmbus_set_speed() which is used, the intel_gmbus_set_speed() version in the i915 code is not used at all Signed-off-by: Hans de Goede Reviewed-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20211121191001.252076-1-hdegoede@redhat.com --- drivers/gpu/drm/i915/display/intel_gmbus.c | 7 ------- drivers/gpu/drm/i915/display/intel_gmbus.h | 1 - 2 files changed, 8 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c index 3b8b84177085..6ce8c10fe975 100644 --- a/drivers/gpu/drm/i915/display/intel_gmbus.c +++ b/drivers/gpu/drm/i915/display/intel_gmbus.c @@ -931,13 +931,6 @@ struct i2c_adapter *intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, return &dev_priv->gmbus[pin].adapter; } -void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed) -{ - struct intel_gmbus *bus = to_intel_gmbus(adapter); - - bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | speed; -} - void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit) { struct intel_gmbus *bus = to_intel_gmbus(adapter); diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.h b/drivers/gpu/drm/i915/display/intel_gmbus.h index b96212b85425..8edc2e99cf53 100644 --- a/drivers/gpu/drm/i915/display/intel_gmbus.h +++ b/drivers/gpu/drm/i915/display/intel_gmbus.h @@ -41,7 +41,6 @@ int intel_gmbus_output_aksv(struct i2c_adapter *adapter); struct i2c_adapter * intel_gmbus_get_adapter(struct drm_i915_private *dev_priv, unsigned int pin); -void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed); void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter); void intel_gmbus_reset(struct drm_i915_private *dev_priv); -- cgit From a36e7dc0af1cc7e5eaa89136c35a5305fd693731 Mon Sep 17 00:00:00 2001 From: Clint Taylor Date: Wed, 15 Dec 2021 22:26:45 -0800 Subject: drm/i915/dg1: Read OPROM via SPI controller MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Read OPROM SPI through MMIO and find VBT entry since we can't use OpRegion and PCI mapping may not work on some systems due to most BIOSes not leaving the Option ROM mapped. v2: Remove message with allocation failure Cc: Ville Syrjälä Cc: Tomas Winkler Signed-off-by: Clint Taylor Signed-off-by: Lucas De Marchi Signed-off-by: Jani Nikula Acked-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20211216062645.3477854-1-lucas.demarchi@intel.com --- drivers/gpu/drm/i915/display/intel_bios.c | 77 ++++++++++++++++++++++++++++--- drivers/gpu/drm/i915/i915_reg.h | 8 ++++ 2 files changed, 79 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index 9d989c9f5da4..76a8f001f4c4 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -2335,6 +2335,63 @@ bool intel_bios_is_valid_vbt(const void *buf, size_t size) return vbt; } +static struct vbt_header *spi_oprom_get_vbt(struct drm_i915_private *i915) +{ + u32 count, data, found, store = 0; + u32 static_region, oprom_offset; + u32 oprom_size = 0x200000; + u16 vbt_size; + u32 *vbt; + + static_region = intel_uncore_read(&i915->uncore, SPI_STATIC_REGIONS); + static_region &= OPTIONROM_SPI_REGIONID_MASK; + intel_uncore_write(&i915->uncore, PRIMARY_SPI_REGIONID, static_region); + + oprom_offset = intel_uncore_read(&i915->uncore, OROM_OFFSET); + oprom_offset &= OROM_OFFSET_MASK; + + for (count = 0; count < oprom_size; count += 4) { + intel_uncore_write(&i915->uncore, PRIMARY_SPI_ADDRESS, oprom_offset + count); + data = intel_uncore_read(&i915->uncore, PRIMARY_SPI_TRIGGER); + + if (data == *((const u32 *)"$VBT")) { + found = oprom_offset + count; + break; + } + } + + if (count >= oprom_size) + goto err_not_found; + + /* Get VBT size and allocate space for the VBT */ + intel_uncore_write(&i915->uncore, PRIMARY_SPI_ADDRESS, found + + offsetof(struct vbt_header, vbt_size)); + vbt_size = intel_uncore_read(&i915->uncore, PRIMARY_SPI_TRIGGER); + vbt_size &= 0xffff; + + vbt = kzalloc(vbt_size, GFP_KERNEL); + if (!vbt) + goto err_not_found; + + for (count = 0; count < vbt_size; count += 4) { + intel_uncore_write(&i915->uncore, PRIMARY_SPI_ADDRESS, found + count); + data = intel_uncore_read(&i915->uncore, PRIMARY_SPI_TRIGGER); + *(vbt + store++) = data; + } + + if (!intel_bios_is_valid_vbt(vbt, vbt_size)) + goto err_free_vbt; + + drm_dbg_kms(&i915->drm, "Found valid VBT in SPI flash\n"); + + return (struct vbt_header *)vbt; + +err_free_vbt: + kfree(vbt); +err_not_found: + return NULL; +} + static struct vbt_header *oprom_get_vbt(struct drm_i915_private *i915) { struct pci_dev *pdev = to_pci_dev(i915->drm.dev); @@ -2384,6 +2441,8 @@ static struct vbt_header *oprom_get_vbt(struct drm_i915_private *i915) pci_unmap_rom(pdev, oprom); + drm_dbg_kms(&i915->drm, "Found valid VBT in PCI ROM\n"); + return vbt; err_free_vbt: @@ -2418,17 +2477,23 @@ void intel_bios_init(struct drm_i915_private *i915) init_vbt_defaults(i915); - /* If the OpRegion does not have VBT, look in PCI ROM. */ + /* + * If the OpRegion does not have VBT, look in SPI flash through MMIO or + * PCI mapping + */ + if (!vbt && IS_DGFX(i915)) { + oprom_vbt = spi_oprom_get_vbt(i915); + vbt = oprom_vbt; + } + if (!vbt) { oprom_vbt = oprom_get_vbt(i915); - if (!oprom_vbt) - goto out; - vbt = oprom_vbt; - - drm_dbg_kms(&i915->drm, "Found valid VBT in PCI ROM\n"); } + if (!vbt) + goto out; + bdb = get_bdb_header(vbt); i915->vbt.version = bdb->version; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 8528db258827..5b502c8f0cfb 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -12853,6 +12853,14 @@ enum skl_power_gate { #define TCSS_DDI_STATUS_HPD_LIVE_STATUS_TBT REG_BIT(1) #define TCSS_DDI_STATUS_HPD_LIVE_STATUS_ALT REG_BIT(0) +#define PRIMARY_SPI_TRIGGER _MMIO(0x102040) +#define PRIMARY_SPI_ADDRESS _MMIO(0x102080) +#define PRIMARY_SPI_REGIONID _MMIO(0x102084) +#define SPI_STATIC_REGIONS _MMIO(0x102090) +#define OPTIONROM_SPI_REGIONID_MASK REG_GENMASK(7, 0) +#define OROM_OFFSET _MMIO(0x1020c0) +#define OROM_OFFSET_MASK REG_GENMASK(20, 16) + /* This register controls the Display State Buffer (DSB) engines. */ #define _DSBSL_INSTANCE_BASE 0x70B00 #define DSBSL_INSTANCE(pipe, id) (_DSBSL_INSTANCE_BASE + \ -- cgit From 80c469a0a03763f814715f3d12b6f3964c7423e8 Mon Sep 17 00:00:00 2001 From: Wan Jiabing Date: Thu, 14 Oct 2021 04:57:19 -0400 Subject: ARM: OMAP2+: hwmod: Add of_node_put() before break Fix following coccicheck warning: ./arch/arm/mach-omap2/omap_hwmod.c:753:1-23: WARNING: Function for_each_matching_node should have of_node_put() before break Early exits from for_each_matching_node should decrement the node reference counter. Signed-off-by: Wan Jiabing Signed-off-by: Tony Lindgren --- arch/arm/mach-omap2/omap_hwmod.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c index ccb0e3732c0d..31d1a21f6041 100644 --- a/arch/arm/mach-omap2/omap_hwmod.c +++ b/arch/arm/mach-omap2/omap_hwmod.c @@ -752,8 +752,10 @@ static int __init _init_clkctrl_providers(void) for_each_matching_node(np, ti_clkctrl_match_table) { ret = _setup_clkctrl_provider(np); - if (ret) + if (ret) { + of_node_put(np); break; + } } return ret; -- cgit From 34596ba380b03d181e24efd50e2f21045bde3696 Mon Sep 17 00:00:00 2001 From: Ye Guojin Date: Tue, 16 Nov 2021 06:27:26 +0000 Subject: ARM: OMAP2+: adjust the location of put_device() call in omapdss_init_of This was found by coccicheck: ./arch/arm/mach-omap2/display.c, 272, 1-7, ERROR missing put_device; call of_find_device_by_node on line 258, but without a corresponding object release within this function. Move the put_device() call before the if judgment. Reported-by: Zeal Robot Signed-off-by: Ye Guojin Signed-off-by: Tony Lindgren --- arch/arm/mach-omap2/display.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c index 6daaa645ae5d..21413a9b7b6c 100644 --- a/arch/arm/mach-omap2/display.c +++ b/arch/arm/mach-omap2/display.c @@ -263,9 +263,9 @@ static int __init omapdss_init_of(void) } r = of_platform_populate(node, NULL, NULL, &pdev->dev); + put_device(&pdev->dev); if (r) { pr_err("Unable to populate DSS submodule devices\n"); - put_device(&pdev->dev); return r; } -- cgit From 29a5e8496b3ac0d400dfe32288c26c774beb8cc8 Mon Sep 17 00:00:00 2001 From: Jayesh Choudhary Date: Thu, 25 Nov 2021 16:23:26 +0530 Subject: ARM: dts: am335x-wega: Fix typo in mcasp property rx-num-evt Fix the property name 'rx-num-evt'. Signed-off-by: Jayesh Choudhary Signed-off-by: Tony Lindgren --- arch/arm/boot/dts/am335x-wega.dtsi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/boot/dts/am335x-wega.dtsi b/arch/arm/boot/dts/am335x-wega.dtsi index 673159d93a6a..f957fea8208e 100644 --- a/arch/arm/boot/dts/am335x-wega.dtsi +++ b/arch/arm/boot/dts/am335x-wega.dtsi @@ -55,7 +55,7 @@ 2 1 0 0 /* # 0: INACTIVE, 1: TX, 2: RX */ >; tx-num-evt = <16>; - rt-num-evt = <16>; + rx-num-evt = <16>; status = "okay"; }; -- cgit From 23885389dbbbbc698986e77a45c1fc44a6e3632e Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Thu, 25 Nov 2021 16:48:34 +0200 Subject: ARM: dts: Fix timer regression for beagleboard revision c Commit e428e250fde6 ("ARM: dts: Configure system timers for omap3") caused a timer regression for beagleboard revision c where the system clockevent stops working if omap3isp module is unloaded. Turns out we still have beagleboard revisions a-b4 capacitor c70 quirks applied that limit the usable timers for no good reason. This also affects the power management as we use the system clock instead of the 32k clock source. Let's fix the issue by adding a new omap3-beagle-ab4.dts for the old timer quirks. This allows us to remove the timer quirks for later beagleboard revisions. We also need to update the related timer quirk check for the correct compatible property. Fixes: e428e250fde6 ("ARM: dts: Configure system timers for omap3") Cc: linux-kernel@vger.kernel.org Cc: Daniel Lezcano Cc: Thomas Gleixner Cc: Rob Herring Reported-by: Jarkko Nikula Tested-by: Jarkko Nikula Signed-off-by: Tony Lindgren --- .../devicetree/bindings/arm/omap/omap.txt | 3 ++ arch/arm/boot/dts/Makefile | 1 + arch/arm/boot/dts/omap3-beagle-ab4.dts | 47 ++++++++++++++++++++++ arch/arm/boot/dts/omap3-beagle.dts | 33 --------------- drivers/clocksource/timer-ti-dm-systimer.c | 2 +- 5 files changed, 52 insertions(+), 34 deletions(-) create mode 100644 arch/arm/boot/dts/omap3-beagle-ab4.dts diff --git a/Documentation/devicetree/bindings/arm/omap/omap.txt b/Documentation/devicetree/bindings/arm/omap/omap.txt index e77635c5422c..fa8b31660cad 100644 --- a/Documentation/devicetree/bindings/arm/omap/omap.txt +++ b/Documentation/devicetree/bindings/arm/omap/omap.txt @@ -119,6 +119,9 @@ Boards (incomplete list of examples): - OMAP3 BeagleBoard : Low cost community board compatible = "ti,omap3-beagle", "ti,omap3430", "ti,omap3" +- OMAP3 BeagleBoard A to B4 : Early BeagleBoard revisions A to B4 with a timer quirk + compatible = "ti,omap3-beagle-ab4", "ti,omap3-beagle", "ti,omap3430", "ti,omap3" + - OMAP3 Tobi with Overo : Commercial expansion board with daughter board compatible = "gumstix,omap3-overo-tobi", "gumstix,omap3-overo", "ti,omap3430", "ti,omap3" diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile index 0de64f237cd8..a387ebe8919b 100644 --- a/arch/arm/boot/dts/Makefile +++ b/arch/arm/boot/dts/Makefile @@ -794,6 +794,7 @@ dtb-$(CONFIG_ARCH_OMAP3) += \ logicpd-som-lv-37xx-devkit.dtb \ omap3430-sdp.dtb \ omap3-beagle.dtb \ + omap3-beagle-ab4.dtb \ omap3-beagle-xm.dtb \ omap3-beagle-xm-ab.dtb \ omap3-cm-t3517.dtb \ diff --git a/arch/arm/boot/dts/omap3-beagle-ab4.dts b/arch/arm/boot/dts/omap3-beagle-ab4.dts new file mode 100644 index 000000000000..990ff2d84686 --- /dev/null +++ b/arch/arm/boot/dts/omap3-beagle-ab4.dts @@ -0,0 +1,47 @@ +// SPDX-License-Identifier: GPL-2.0-only +/dts-v1/; + +#include "omap3-beagle.dts" + +/ { + model = "TI OMAP3 BeagleBoard A to B4"; + compatible = "ti,omap3-beagle-ab4", "ti,omap3-beagle", "ti,omap3430", "ti,omap3"; +}; + +/* + * Workaround for capacitor C70 issue, see "Boards revision A and < B5" + * section at https://elinux.org/BeagleBoard_Community + */ + +/* Unusable as clocksource because of unreliable oscillator */ +&counter32k { + status = "disabled"; +}; + +/* Unusable as clockevent because of unreliable oscillator, allow to idle */ +&timer1_target { + /delete-property/ti,no-reset-on-init; + /delete-property/ti,no-idle; + timer@0 { + /delete-property/ti,timer-alwon; + }; +}; + +/* Preferred always-on timer for clocksource */ +&timer12_target { + ti,no-reset-on-init; + ti,no-idle; + timer@0 { + /* Always clocked by secure_32k_fck */ + }; +}; + +/* Preferred timer for clockevent */ +&timer2_target { + ti,no-reset-on-init; + ti,no-idle; + timer@0 { + assigned-clocks = <&gpt2_fck>; + assigned-clock-parents = <&sys_ck>; + }; +}; diff --git a/arch/arm/boot/dts/omap3-beagle.dts b/arch/arm/boot/dts/omap3-beagle.dts index f9f34b8458e9..0548b391334f 100644 --- a/arch/arm/boot/dts/omap3-beagle.dts +++ b/arch/arm/boot/dts/omap3-beagle.dts @@ -304,39 +304,6 @@ phys = <0 &hsusb2_phy>; }; -/* Unusable as clocksource because of unreliable oscillator */ -&counter32k { - status = "disabled"; -}; - -/* Unusable as clockevent because if unreliable oscillator, allow to idle */ -&timer1_target { - /delete-property/ti,no-reset-on-init; - /delete-property/ti,no-idle; - timer@0 { - /delete-property/ti,timer-alwon; - }; -}; - -/* Preferred always-on timer for clocksource */ -&timer12_target { - ti,no-reset-on-init; - ti,no-idle; - timer@0 { - /* Always clocked by secure_32k_fck */ - }; -}; - -/* Preferred timer for clockevent */ -&timer2_target { - ti,no-reset-on-init; - ti,no-idle; - timer@0 { - assigned-clocks = <&gpt2_fck>; - assigned-clock-parents = <&sys_ck>; - }; -}; - &twl_gpio { ti,use-leds; /* pullups: BIT(1) */ diff --git a/drivers/clocksource/timer-ti-dm-systimer.c b/drivers/clocksource/timer-ti-dm-systimer.c index b6f97960d8ee..5c40ca1d4740 100644 --- a/drivers/clocksource/timer-ti-dm-systimer.c +++ b/drivers/clocksource/timer-ti-dm-systimer.c @@ -241,7 +241,7 @@ static void __init dmtimer_systimer_assign_alwon(void) bool quirk_unreliable_oscillator = false; /* Quirk unreliable 32 KiHz oscillator with incomplete dts */ - if (of_machine_is_compatible("ti,omap3-beagle") || + if (of_machine_is_compatible("ti,omap3-beagle-ab4") || of_machine_is_compatible("timll,omap3-devkit8000")) { quirk_unreliable_oscillator = true; counter_32k = -ENODEV; -- cgit From 7e28d0b26759846485978ada860ef4a427e06c8f Mon Sep 17 00:00:00 2001 From: Tejas Upadhyay Date: Fri, 10 Dec 2021 10:48:02 +0530 Subject: drm/i915/adl-n: Enable ADL-N platform Adding PCI device ids and enabling ADL-N platform. ADL-N from i915 point of view is subplatform of ADL-P. BSpec: 68397 Changes since V2: - Added version log history Changes since V1: - replace IS_ALDERLAKE_N with IS_ADLP_N - Jani Nikula Signed-off-by: Tejas Upadhyay Reviewed-by: Anusha Srivatsa Acked-by: Thomas Gleixner Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20211210051802.4063958-1-tejaskumarx.surendrakumar.upadhyay@intel.com --- arch/x86/kernel/early-quirks.c | 1 + drivers/gpu/drm/i915/i915_drv.h | 2 ++ drivers/gpu/drm/i915/i915_pci.c | 1 + drivers/gpu/drm/i915/intel_device_info.c | 7 +++++++ drivers/gpu/drm/i915/intel_device_info.h | 3 +++ include/drm/i915_pciids.h | 6 ++++++ 6 files changed, 20 insertions(+) diff --git a/arch/x86/kernel/early-quirks.c b/arch/x86/kernel/early-quirks.c index fd2d3ab38ebb..1ca3a56fdc2d 100644 --- a/arch/x86/kernel/early-quirks.c +++ b/arch/x86/kernel/early-quirks.c @@ -554,6 +554,7 @@ static const struct pci_device_id intel_early_ids[] __initconst = { INTEL_RKL_IDS(&gen11_early_ops), INTEL_ADLS_IDS(&gen11_early_ops), INTEL_ADLP_IDS(&gen11_early_ops), + INTEL_ADLN_IDS(&gen11_early_ops), INTEL_RPLS_IDS(&gen11_early_ops), }; diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 23e2a1bcc257..b7d6402ef6d6 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1267,6 +1267,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, IS_SUBPLATFORM(dev_priv, INTEL_DG2, INTEL_SUBPLATFORM_G11) #define IS_ADLS_RPLS(dev_priv) \ IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_S, INTEL_SUBPLATFORM_RPL_S) +#define IS_ADLP_N(dev_priv) \ + IS_SUBPLATFORM(dev_priv, INTEL_ALDERLAKE_P, INTEL_SUBPLATFORM_N) #define IS_HSW_EARLY_SDV(dev_priv) (IS_HASWELL(dev_priv) && \ (INTEL_DEVID(dev_priv) & 0xFF00) == 0x0C00) #define IS_BDW_ULT(dev_priv) \ diff --git a/drivers/gpu/drm/i915/i915_pci.c b/drivers/gpu/drm/i915/i915_pci.c index 12e331f5fa57..960c358990bc 100644 --- a/drivers/gpu/drm/i915/i915_pci.c +++ b/drivers/gpu/drm/i915/i915_pci.c @@ -1129,6 +1129,7 @@ static const struct pci_device_id pciidlist[] = { INTEL_RKL_IDS(&rkl_info), INTEL_ADLS_IDS(&adl_s_info), INTEL_ADLP_IDS(&adl_p_info), + INTEL_ADLN_IDS(&adl_p_info), INTEL_DG1_IDS(&dg1_info), INTEL_RPLS_IDS(&adl_s_info), {0, 0, 0} diff --git a/drivers/gpu/drm/i915/intel_device_info.c b/drivers/gpu/drm/i915/intel_device_info.c index bb7d37b70626..24e05f1ef486 100644 --- a/drivers/gpu/drm/i915/intel_device_info.c +++ b/drivers/gpu/drm/i915/intel_device_info.c @@ -177,6 +177,10 @@ static const u16 subplatform_portf_ids[] = { INTEL_ICL_PORT_F_IDS(0), }; +static const u16 subplatform_n_ids[] = { + INTEL_ADLN_IDS(0), +}; + static const u16 subplatform_rpls_ids[] = { INTEL_RPLS_IDS(0), }; @@ -217,6 +221,9 @@ void intel_device_info_subplatform_init(struct drm_i915_private *i915) } else if (find_devid(devid, subplatform_portf_ids, ARRAY_SIZE(subplatform_portf_ids))) { mask = BIT(INTEL_SUBPLATFORM_PORTF); + } else if (find_devid(devid, subplatform_n_ids, + ARRAY_SIZE(subplatform_n_ids))) { + mask = BIT(INTEL_SUBPLATFORM_N); } else if (find_devid(devid, subplatform_rpls_ids, ARRAY_SIZE(subplatform_rpls_ids))) { mask = BIT(INTEL_SUBPLATFORM_RPL_S); diff --git a/drivers/gpu/drm/i915/intel_device_info.h b/drivers/gpu/drm/i915/intel_device_info.h index 7cef02f5ce65..2a4e32b4ebfd 100644 --- a/drivers/gpu/drm/i915/intel_device_info.h +++ b/drivers/gpu/drm/i915/intel_device_info.h @@ -113,6 +113,9 @@ enum intel_platform { /* ADL-S */ #define INTEL_SUBPLATFORM_RPL_S 0 +/* ADL-P */ +#define INTEL_SUBPLATFORM_N 0 + enum intel_ppgtt_type { INTEL_PPGTT_NONE = I915_GEM_PPGTT_NONE, INTEL_PPGTT_ALIASING = I915_GEM_PPGTT_ALIASING, diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h index baf3d1d3d566..533890dc9da1 100644 --- a/include/drm/i915_pciids.h +++ b/include/drm/i915_pciids.h @@ -666,6 +666,12 @@ INTEL_VGA_DEVICE(0x46C2, info), \ INTEL_VGA_DEVICE(0x46C3, info) +/* ADL-N */ +#define INTEL_ADLN_IDS(info) \ + INTEL_VGA_DEVICE(0x46D0, info), \ + INTEL_VGA_DEVICE(0x46D1, info), \ + INTEL_VGA_DEVICE(0x46D2, info) + /* RPL-S */ #define INTEL_RPLS_IDS(info) \ INTEL_VGA_DEVICE(0xA780, info), \ -- cgit From 9206a3af4fc0cebbefca2d79876d279bdd8d582b Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Fri, 17 Dec 2021 13:55:58 +0200 Subject: clk: ti: Move dra7 clock devices out of the legacy section I accidentally added some dra7 clock defines to the legacy section that we want to stop using. Let's move the defines to the right location. Note that this is just a cosmetic fix. Cc: linux-clk@vger.kernel.org Cc: Stephen Boyd Cc: Tero Kristo Acked-by: Rob Herring Signed-off-by: Tony Lindgren --- include/dt-bindings/clock/dra7.h | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/include/dt-bindings/clock/dra7.h b/include/dt-bindings/clock/dra7.h index 7d57063b8a65..29ff6b895848 100644 --- a/include/dt-bindings/clock/dra7.h +++ b/include/dt-bindings/clock/dra7.h @@ -84,17 +84,10 @@ #define DRA7_L3_MAIN_2_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) #define DRA7_L3_INSTR_CLKCTRL DRA7_CLKCTRL_INDEX(0x28) -/* iva clocks */ -#define DRA7_IVA_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) -#define DRA7_SL2IF_CLKCTRL DRA7_CLKCTRL_INDEX(0x28) - /* dss clocks */ #define DRA7_DSS_CORE_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) #define DRA7_BB2D_CLKCTRL DRA7_CLKCTRL_INDEX(0x30) -/* gpu clocks */ -#define DRA7_GPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) - /* l3init clocks */ #define DRA7_MMC1_CLKCTRL DRA7_CLKCTRL_INDEX(0x28) #define DRA7_MMC2_CLKCTRL DRA7_CLKCTRL_INDEX(0x30) @@ -267,10 +260,17 @@ #define DRA7_L3INSTR_L3_MAIN_2_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) #define DRA7_L3INSTR_L3_INSTR_CLKCTRL DRA7_CLKCTRL_INDEX(0x28) +/* iva clocks */ +#define DRA7_IVA_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) +#define DRA7_SL2IF_CLKCTRL DRA7_CLKCTRL_INDEX(0x28) + /* dss clocks */ #define DRA7_DSS_DSS_CORE_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) #define DRA7_DSS_BB2D_CLKCTRL DRA7_CLKCTRL_INDEX(0x30) +/* gpu clocks */ +#define DRA7_GPU_CLKCTRL DRA7_CLKCTRL_INDEX(0x20) + /* l3init clocks */ #define DRA7_L3INIT_MMC1_CLKCTRL DRA7_CLKCTRL_INDEX(0x28) #define DRA7_L3INIT_MMC2_CLKCTRL DRA7_CLKCTRL_INDEX(0x30) -- cgit From 31aa7056bbec0259e2ec91db7d3571f66b14f93f Mon Sep 17 00:00:00 2001 From: Tony Lindgren Date: Fri, 17 Dec 2021 13:55:59 +0200 Subject: ARM: dts: Don't use legacy clock defines for dra7 clkctrl Looks like we are still using legacy clock defines for dra7. We want to stop using these as it prevents dropping the legacy clocks. Note that this is just a cosmetic fix. Cc: Tero Kristo Signed-off-by: Tony Lindgren --- arch/arm/boot/dts/dra7.dtsi | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi index 6b485cbed8d5..42bff117656c 100644 --- a/arch/arm/boot/dts/dra7.dtsi +++ b/arch/arm/boot/dts/dra7.dtsi @@ -160,7 +160,7 @@ target-module@48210000 { compatible = "ti,sysc-omap4-simple", "ti,sysc"; power-domains = <&prm_mpu>; - clocks = <&mpu_clkctrl DRA7_MPU_CLKCTRL 0>; + clocks = <&mpu_clkctrl DRA7_MPU_MPU_CLKCTRL 0>; clock-names = "fck"; #address-cells = <1>; #size-cells = <1>; @@ -875,10 +875,10 @@ <0x58000014 4>; reg-names = "rev", "syss"; ti,syss-mask = <1>; - clocks = <&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 0>, - <&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 9>, - <&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 10>, - <&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 11>; + clocks = <&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 0>, + <&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 9>, + <&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 10>, + <&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 11>; clock-names = "fck", "hdmi_clk", "sys_clk", "tv_clk"; #address-cells = <1>; #size-cells = <1>; @@ -912,7 +912,7 @@ SYSC_OMAP2_SOFTRESET | SYSC_OMAP2_AUTOIDLE)>; ti,syss-mask = <1>; - clocks = <&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 8>; + clocks = <&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 8>; clock-names = "fck"; #address-cells = <1>; #size-cells = <1>; @@ -939,8 +939,8 @@ , ; ti,sysc-mask = <(SYSC_OMAP4_SOFTRESET)>; - clocks = <&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 9>, - <&dss_clkctrl DRA7_DSS_CORE_CLKCTRL 8>; + clocks = <&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 9>, + <&dss_clkctrl DRA7_DSS_DSS_CORE_CLKCTRL 8>; clock-names = "fck", "dss_clk"; #address-cells = <1>; #size-cells = <1>; @@ -979,7 +979,7 @@ compatible = "vivante,gc"; reg = <0x0 0x700>; interrupts = ; - clocks = <&dss_clkctrl DRA7_BB2D_CLKCTRL 0>; + clocks = <&dss_clkctrl DRA7_DSS_BB2D_CLKCTRL 0>; clock-names = "core"; }; }; @@ -1333,7 +1333,7 @@ ti,no-reset-on-init; ti,no-idle; timer@0 { - assigned-clocks = <&wkupaon_clkctrl DRA7_TIMER1_CLKCTRL 24>; + assigned-clocks = <&wkupaon_clkctrl DRA7_WKUPAON_TIMER1_CLKCTRL 24>; assigned-clock-parents = <&sys_32k_ck>; }; }; -- cgit From 825ca9ed1c9f5516b30292bb1c7ab648c2a01b92 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Tue, 21 Dec 2021 21:37:53 +0200 Subject: drm: Always include the debugfs dentry in drm_crtc MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Remove the counterproductive CONFIG_DEBUG_FS ifdef and just include the debugfs dentry in drm_crtc always. This way we don't need annoying ifdefs in the actual code with DEBUGFS=n. Also we don't have these ifdefs around any of the other debugfs dentries either so can't see why drm_crtc should be special. This fixes the i915 DEBUGFS=n build because I assumed the dentry would always be there. Cc: Jani Nikula Reported-by: Nathan Chancellor Tested-by: Nathan Chancellor Fixes: e74c6aa955ca ("drm/i915/fbc: Register per-crtc debugfs files") Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20211221193754.12287-1-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula Acked-by: Daniel Vetter --- include/drm/drm_crtc.h | 2 -- 1 file changed, 2 deletions(-) diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h index 13eeba2a750a..4d01b4d89775 100644 --- a/include/drm/drm_crtc.h +++ b/include/drm/drm_crtc.h @@ -1135,14 +1135,12 @@ struct drm_crtc { */ spinlock_t commit_lock; -#ifdef CONFIG_DEBUG_FS /** * @debugfs_entry: * * Debugfs directory for this CRTC. */ struct dentry *debugfs_entry; -#endif /** * @crc: -- cgit From 980f42e7d57464af190d05b9cc0bc21846734f48 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 22 Dec 2021 10:16:54 +0200 Subject: drm/i915/bios: fix slab-out-of-bounds access MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If VBT size is not a multiple of 4, the last 4-byte store will be out of bounds of the allocated buffer. Spotted with KASAN. Round up the allocation size. v2: Use round_up() intead of roundup() as it's a power of 2 (Thomas) Reported-by: Thomas Hellström Fixes: a36e7dc0af1c ("drm/i915/dg1: Read OPROM via SPI controller") Cc: Clint Taylor Cc: Lucas De Marchi Reviewed-by: Thomas Hellström Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20211222081654.1843211-1-jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_bios.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index 76a8f001f4c4..c7a8d517ce81 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -2369,7 +2369,7 @@ static struct vbt_header *spi_oprom_get_vbt(struct drm_i915_private *i915) vbt_size = intel_uncore_read(&i915->uncore, PRIMARY_SPI_TRIGGER); vbt_size &= 0xffff; - vbt = kzalloc(vbt_size, GFP_KERNEL); + vbt = kzalloc(round_up(vbt_size, 4), GFP_KERNEL); if (!vbt) goto err_not_found; -- cgit From 798c5daf3cddff3f39c5542a50a2dbd83879b05d Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Thu, 16 Dec 2021 13:08:22 +0200 Subject: drm/i915/fbc: Remember to update FBC state even when not reallocating CFB MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We mustn't forget to update our FBC state even if we don't have to reallocate the CFB. Otherwise we won't refresh our notion of what eg. the new fence or the new override CFB stride should be. Using the wrong CFB stride in particular can cause underruns and could even corrupt other stuff in stolen. Fixes: f4cfdbb02ca8 ("drm/i915/fbc: Nuke state_cache") Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/4774 Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20211216110822.8461-1-ville.syrjala@linux.intel.com Reviewed-by: Mika Kahola --- drivers/gpu/drm/i915/display/intel_fbc.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index 8b9acedcdfc1..7fd11d735ca4 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -1408,8 +1408,10 @@ static void __intel_fbc_enable(struct intel_atomic_state *state, if (fbc->state.plane != plane) return; - if (intel_fbc_is_ok(plane_state)) + if (intel_fbc_is_ok(plane_state)) { + intel_fbc_update_state(state, crtc, plane); return; + } __intel_fbc_disable(fbc); } -- cgit From f7747be1410321de8a92e340c5ca6c18a59770e9 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 22 Dec 2021 17:40:33 +0200 Subject: drm/i915/dsi: Drop double check ACPI companion device for NULL acpi_dev_get_resources() does perform the NULL pointer check against ACPI companion device which is given as function parameter. Thus, there is no need to duplicate this check in the caller. Signed-off-by: Andy Shevchenko Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20211222154033.6770-1-andriy.shevchenko@linux.intel.com --- drivers/gpu/drm/i915/display/intel_dsi_vbt.c | 28 ++++++++++------------------ 1 file changed, 10 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c index 0da91849efde..da0bd056f3d3 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c +++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c @@ -426,24 +426,16 @@ static void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi, const u16 slave_addr) { struct drm_device *drm_dev = intel_dsi->base.base.dev; - struct device *dev = drm_dev->dev; - struct acpi_device *acpi_dev; - struct list_head resource_list; - struct i2c_adapter_lookup lookup; - - acpi_dev = ACPI_COMPANION(dev); - if (acpi_dev) { - memset(&lookup, 0, sizeof(lookup)); - lookup.slave_addr = slave_addr; - lookup.intel_dsi = intel_dsi; - lookup.dev_handle = acpi_device_handle(acpi_dev); - - INIT_LIST_HEAD(&resource_list); - acpi_dev_get_resources(acpi_dev, &resource_list, - i2c_adapter_lookup, - &lookup); - acpi_dev_free_resource_list(&resource_list); - } + struct acpi_device *adev = ACPI_COMPANION(drm_dev->dev); + struct i2c_adapter_lookup lookup = { + .slave_addr = slave_addr, + .intel_dsi = intel_dsi, + .dev_handle = acpi_device_handle(adev), + }; + LIST_HEAD(resource_list); + + acpi_dev_get_resources(adev, &resource_list, i2c_adapter_lookup, &lookup); + acpi_dev_free_resource_list(&resource_list); } #else static inline void i2c_acpi_find_adapter(struct intel_dsi *intel_dsi, -- cgit From e35d8762b04f89f9f5a188d0c440d3a2c1d010ed Mon Sep 17 00:00:00 2001 From: Anisse Astier Date: Wed, 29 Dec 2021 23:21:59 +0100 Subject: drm/i915/opregion: add support for mailbox #5 EDID MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The ACPI OpRegion Mailbox #5 ASLE extension may contain an EDID to be used for the embedded display. Add support for using it via by adding the EDID to the list of available modes on the connector, and use it for eDP when available. If a panel's EDID is broken, there may be an override EDID set in the ACPI OpRegion mailbox #5. Use it if available. Fixes the GPD Win Max display. Based on original patch series by: Jani Nikula https://patchwork.kernel.org/project/intel-gfx/patch/20200828061941.17051-1-jani.nikula@intel.com/ Changes: - EDID is copied and validated with drm_edid_is_valid - EDID is now only used as a fallback. - squashed the two patches Cc: Jani Nikula Cc: Uma Shankar Cc: Ville Syrjälä Co-developed-by: Jani Nikula Signed-off-by: Jani Nikula Signed-off-by: Anisse Astier Link: https://patchwork.freedesktop.org/patch/msgid/20211229222200.53128-2-anisse@astier.eu --- drivers/gpu/drm/i915/display/intel_dp.c | 8 ++++ drivers/gpu/drm/i915/display/intel_opregion.c | 55 ++++++++++++++++++++++++++- drivers/gpu/drm/i915/display/intel_opregion.h | 10 +++++ 3 files changed, 72 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index b5e2508db1cf..d6d8c9922feb 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -4974,6 +4974,14 @@ static bool intel_edp_init_connector(struct intel_dp *intel_dp, mutex_lock(&dev->mode_config.mutex); edid = drm_get_edid(connector, &intel_dp->aux.ddc); + if (!edid) { + /* Fallback to EDID from ACPI OpRegion, if any */ + edid = intel_opregion_get_edid(intel_connector); + if (edid) + drm_dbg_kms(&dev_priv->drm, + "[CONNECTOR:%d:%s] Using OpRegion EDID\n", + connector->base.id, connector->name); + } if (edid) { if (drm_add_edid_modes(connector, edid)) { drm_connector_update_edid_property(connector, edid); diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c index 0065111593a6..985790a66a4d 100644 --- a/drivers/gpu/drm/i915/display/intel_opregion.c +++ b/drivers/gpu/drm/i915/display/intel_opregion.c @@ -195,6 +195,8 @@ struct opregion_asle_ext { #define ASLE_IUER_WINDOWS_BTN (1 << 1) #define ASLE_IUER_POWER_BTN (1 << 0) +#define ASLE_PHED_EDID_VALID_MASK 0x3 + /* Software System Control Interrupt (SWSCI) */ #define SWSCI_SCIC_INDICATOR (1 << 0) #define SWSCI_SCIC_MAIN_FUNCTION_SHIFT 1 @@ -908,8 +910,10 @@ int intel_opregion_setup(struct drm_i915_private *dev_priv) opregion->asle->ardy = ASLE_ARDY_NOT_READY; } - if (mboxes & MBOX_ASLE_EXT) + if (mboxes & MBOX_ASLE_EXT) { drm_dbg(&dev_priv->drm, "ASLE extension supported\n"); + opregion->asle_ext = base + OPREGION_ASLE_EXT_OFFSET; + } if (intel_load_vbt_firmware(dev_priv) == 0) goto out; @@ -1036,6 +1040,54 @@ intel_opregion_get_panel_type(struct drm_i915_private *dev_priv) return ret - 1; } +/** + * intel_opregion_get_edid - Fetch EDID from ACPI OpRegion mailbox #5 + * @intel_connector: eDP connector + * + * This reads the ACPI Opregion mailbox #5 to extract the EDID that is passed + * to it. + * + * Returns: + * The EDID in the OpRegion, or NULL if there is none or it's invalid. + * + */ +struct edid *intel_opregion_get_edid(struct intel_connector *intel_connector) +{ + struct drm_connector *connector = &intel_connector->base; + struct drm_i915_private *i915 = to_i915(connector->dev); + struct intel_opregion *opregion = &i915->opregion; + const void *in_edid; + const struct edid *edid; + struct edid *new_edid; + int len; + + if (!opregion->asle_ext) + return NULL; + + in_edid = opregion->asle_ext->bddc; + + /* Validity corresponds to number of 128-byte blocks */ + len = (opregion->asle_ext->phed & ASLE_PHED_EDID_VALID_MASK) * 128; + if (!len || !memchr_inv(in_edid, 0, len)) + return NULL; + + edid = in_edid; + + if (len < EDID_LENGTH * (1 + edid->extensions)) { + drm_dbg_kms(&i915->drm, "Invalid EDID in ACPI OpRegion (Mailbox #5): too short\n"); + return NULL; + } + new_edid = drm_edid_duplicate(edid); + if (!new_edid) + return NULL; + if (!drm_edid_is_valid(new_edid)) { + kfree(new_edid); + drm_dbg_kms(&i915->drm, "Invalid EDID in ACPI OpRegion (Mailbox #5)\n"); + return NULL; + } + return new_edid; +} + void intel_opregion_register(struct drm_i915_private *i915) { struct intel_opregion *opregion = &i915->opregion; @@ -1129,6 +1181,7 @@ void intel_opregion_unregister(struct drm_i915_private *i915) opregion->acpi = NULL; opregion->swsci = NULL; opregion->asle = NULL; + opregion->asle_ext = NULL; opregion->vbt = NULL; opregion->lid_state = NULL; } diff --git a/drivers/gpu/drm/i915/display/intel_opregion.h b/drivers/gpu/drm/i915/display/intel_opregion.h index 4aa68ffbd30e..82cc0ba34af7 100644 --- a/drivers/gpu/drm/i915/display/intel_opregion.h +++ b/drivers/gpu/drm/i915/display/intel_opregion.h @@ -29,12 +29,14 @@ #include struct drm_i915_private; +struct intel_connector; struct intel_encoder; struct opregion_header; struct opregion_acpi; struct opregion_swsci; struct opregion_asle; +struct opregion_asle_ext; struct intel_opregion { struct opregion_header *header; @@ -43,6 +45,7 @@ struct intel_opregion { u32 swsci_gbda_sub_functions; u32 swsci_sbcb_sub_functions; struct opregion_asle *asle; + struct opregion_asle_ext *asle_ext; void *rvda; void *vbt_firmware; const void *vbt; @@ -71,6 +74,7 @@ int intel_opregion_notify_encoder(struct intel_encoder *intel_encoder, int intel_opregion_notify_adapter(struct drm_i915_private *dev_priv, pci_power_t state); int intel_opregion_get_panel_type(struct drm_i915_private *dev_priv); +struct edid *intel_opregion_get_edid(struct intel_connector *connector); #else /* CONFIG_ACPI*/ @@ -117,6 +121,12 @@ static inline int intel_opregion_get_panel_type(struct drm_i915_private *dev) return -ENODEV; } +static inline struct edid * +intel_opregion_get_edid(struct intel_connector *connector) +{ + return NULL; +} + #endif /* CONFIG_ACPI */ #endif -- cgit From 0b464ca3e0dd3cec65f28bc6d396d82f19080f69 Mon Sep 17 00:00:00 2001 From: Anisse Astier Date: Wed, 29 Dec 2021 23:22:00 +0100 Subject: drm: Add orientation quirk for GPD Win Max Panel is 800x1280, but mounted on a laptop form factor, sideways. Signed-off-by: Anisse Astier Reviewed-by: Hans de Goede Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20211229222200.53128-3-anisse@astier.eu --- drivers/gpu/drm/drm_panel_orientation_quirks.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/drm_panel_orientation_quirks.c b/drivers/gpu/drm/drm_panel_orientation_quirks.c index 042bb80383c9..3dc383b1e2ba 100644 --- a/drivers/gpu/drm/drm_panel_orientation_quirks.c +++ b/drivers/gpu/drm/drm_panel_orientation_quirks.c @@ -174,6 +174,12 @@ static const struct dmi_system_id orientation_data[] = { DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "MicroPC"), }, .driver_data = (void *)&lcd720x1280_rightside_up, + }, { /* GPD Win Max */ + .matches = { + DMI_EXACT_MATCH(DMI_SYS_VENDOR, "GPD"), + DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "G1619-01"), + }, + .driver_data = (void *)&lcd800x1280_rightside_up, }, { /* * GPD Pocket, note that the the DMI data is less generic then * it seems, devices with a board-vendor of "AMI Corporation" -- cgit From 80dfdeb75028084f42a81a4151a986c56aeec1c1 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 5 Jan 2022 12:21:31 +0200 Subject: drm/i915: stop including i915_irq.h from i915_drv.h Only include i915_irq.h where actually needed. Signed-off-by: Jani Nikula Acked-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20220105102131.988791-1-jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_crtc.c | 1 + drivers/gpu/drm/i915/display/intel_display_trace.h | 1 + drivers/gpu/drm/i915/gt/intel_rps.c | 1 + drivers/gpu/drm/i915/gt/uc/intel_guc.c | 1 + drivers/gpu/drm/i915/gt/uc/intel_guc_log.c | 1 + drivers/gpu/drm/i915/i915_drv.h | 1 - 6 files changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_crtc.c b/drivers/gpu/drm/i915/display/intel_crtc.c index 16c3ca66d9f0..08ee3e17ee5c 100644 --- a/drivers/gpu/drm/i915/display/intel_crtc.c +++ b/drivers/gpu/drm/i915/display/intel_crtc.c @@ -12,6 +12,7 @@ #include #include +#include "i915_irq.h" #include "i915_vgpu.h" #include "i9xx_plane.h" #include "icl_dsi.h" diff --git a/drivers/gpu/drm/i915/display/intel_display_trace.h b/drivers/gpu/drm/i915/display/intel_display_trace.h index 4043e1276383..f05f0f9b5103 100644 --- a/drivers/gpu/drm/i915/display/intel_display_trace.h +++ b/drivers/gpu/drm/i915/display/intel_display_trace.h @@ -13,6 +13,7 @@ #include #include "i915_drv.h" +#include "i915_irq.h" #include "intel_crtc.h" #include "intel_display_types.h" diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index 5e275f8dda8c..8f5bce298574 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -6,6 +6,7 @@ #include #include "i915_drv.h" +#include "i915_irq.h" #include "intel_breadcrumbs.h" #include "intel_gt.h" #include "intel_gt_clock_utils.h" diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.c b/drivers/gpu/drm/i915/gt/uc/intel_guc.c index 6e228343e8cb..0c52d1652e8b 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.c @@ -12,6 +12,7 @@ #include "intel_guc_ads.h" #include "intel_guc_submission.h" #include "i915_drv.h" +#include "i915_irq.h" /** * DOC: GuC diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c index ac0931f0374b..7b0b43e87244 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_log.c @@ -7,6 +7,7 @@ #include "gt/intel_gt.h" #include "i915_drv.h" +#include "i915_irq.h" #include "i915_memcpy.h" #include "intel_guc_log.h" diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b7d6402ef6d6..c7ce23da6ffa 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -106,7 +106,6 @@ #include "i915_scheduler.h" #include "gt/intel_timeline.h" #include "i915_vma.h" -#include "i915_irq.h" /* General customization: -- cgit From 814d5c51f8966895bb20b51c886bd3961f76f3f4 Mon Sep 17 00:00:00 2001 From: Harish Chegondi Date: Fri, 17 Dec 2021 08:02:55 -0800 Subject: drm/i915: Fix possible NULL pointer dereferences in i9xx_update_wm() Check return pointer from intel_crtc_for_plane() before dereferencing it, as it can be NULL. v2: Moved the NULL check into intel_crtc_active(). Cc: Jani Nikula Cc: Caz Yokoyama Cc: Radhakrishna Sripada Signed-off-by: Harish Chegondi Reviewed-by: Jani Nikula Signed-off-by: Matt Roper Link: https://patchwork.freedesktop.org/patch/msgid/20211217160255.1300348-1-harish.chegondi@intel.com --- drivers/gpu/drm/i915/intel_pm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 3714f96f17b3..161d064e0768 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -877,7 +877,7 @@ static bool intel_crtc_active(struct intel_crtc *crtc) * crtc->state->active once we have proper CRTC states wired up * for atomic. */ - return crtc->active && crtc->base.primary->state->fb && + return crtc && crtc->active && crtc->base.primary->state->fb && crtc->config->hw.adjusted_mode.crtc_clock; } -- cgit From 6dc701ee9fabfc929cae2d7acc957bf38e4c3264 Mon Sep 17 00:00:00 2001 From: Sven Peter Date: Sat, 11 Dec 2021 13:36:32 +0100 Subject: MAINTAINERS: Add Apple watchdog to ARM/APPLE MACHINE SUPPORT Add apple_wdt.c under the ARM/APPLE MACHINE SUPPORT entry. Signed-off-by: Sven Peter Signed-off-by: Hector Martin --- MAINTAINERS | 1 + 1 file changed, 1 insertion(+) diff --git a/MAINTAINERS b/MAINTAINERS index 03ffcf49f5cf..95cd7274cb61 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -1760,6 +1760,7 @@ F: drivers/irqchip/irq-apple-aic.c F: drivers/mailbox/apple-mailbox.c F: drivers/pinctrl/pinctrl-apple-gpio.c F: drivers/soc/apple/* +F: drivers/watchdog/apple_wdt.c F: include/dt-bindings/interrupt-controller/apple-aic.h F: include/dt-bindings/pinctrl/apple.h F: include/linux/apple-mailbox.h -- cgit From 5225e1b87432dcf0d0fc3440824b91d04c1d6cc1 Mon Sep 17 00:00:00 2001 From: Martin Blumenstingl Date: Mon, 27 Dec 2021 19:00:24 +0100 Subject: ARM: dts: meson: Fix the UART compatible strings The dt-bindings for the UART controller only allow the following values for Meson6 SoCs: - "amlogic,meson6-uart", "amlogic,meson-ao-uart" - "amlogic,meson6-uart" Use the correct fallback compatible string "amlogic,meson-ao-uart" for AO UART. Drop the "amlogic,meson-uart" compatible string from the EE domain UART controllers. Fixes: ec9b59162fd831 ("ARM: dts: meson6: use stable UART bindings") Signed-off-by: Martin Blumenstingl Signed-off-by: Neil Armstrong Link: https://lore.kernel.org/r/20211227180026.4068352-2-martin.blumenstingl@googlemail.com --- arch/arm/boot/dts/meson.dtsi | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/arm/boot/dts/meson.dtsi b/arch/arm/boot/dts/meson.dtsi index 3be7cba603d5..26eaba3fa96f 100644 --- a/arch/arm/boot/dts/meson.dtsi +++ b/arch/arm/boot/dts/meson.dtsi @@ -59,7 +59,7 @@ }; uart_A: serial@84c0 { - compatible = "amlogic,meson6-uart", "amlogic,meson-uart"; + compatible = "amlogic,meson6-uart"; reg = <0x84c0 0x18>; interrupts = ; fifo-size = <128>; @@ -67,7 +67,7 @@ }; uart_B: serial@84dc { - compatible = "amlogic,meson6-uart", "amlogic,meson-uart"; + compatible = "amlogic,meson6-uart"; reg = <0x84dc 0x18>; interrupts = ; status = "disabled"; @@ -105,7 +105,7 @@ }; uart_C: serial@8700 { - compatible = "amlogic,meson6-uart", "amlogic,meson-uart"; + compatible = "amlogic,meson6-uart"; reg = <0x8700 0x18>; interrupts = ; status = "disabled"; @@ -228,7 +228,7 @@ }; uart_AO: serial@4c0 { - compatible = "amlogic,meson6-uart", "amlogic,meson-ao-uart", "amlogic,meson-uart"; + compatible = "amlogic,meson6-uart", "amlogic,meson-ao-uart"; reg = <0x4c0 0x18>; interrupts = ; status = "disabled"; -- cgit From 57007bfb5469ba31cacf69d52195e8b75f43e32d Mon Sep 17 00:00:00 2001 From: Martin Blumenstingl Date: Mon, 27 Dec 2021 19:00:25 +0100 Subject: ARM: dts: meson8: Fix the UART device-tree schema validation The dt-bindings for the UART controller only allow the following values for Meson8 SoCs: - "amlogic,meson8-uart", "amlogic,meson-ao-uart" - "amlogic,meson8-uart" Use the correct fallback compatible string "amlogic,meson-ao-uart" for AO UART. Drop the "amlogic,meson-uart" compatible string from the EE domain UART controllers. Also update the order of the clocks to match the order defined in the yaml schema. Fixes: 6ca77502050eff ("ARM: dts: meson8: use stable UART bindings with correct gate clock") Signed-off-by: Martin Blumenstingl Signed-off-by: Neil Armstrong Link: https://lore.kernel.org/r/20211227180026.4068352-3-martin.blumenstingl@googlemail.com --- arch/arm/boot/dts/meson8.dtsi | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/arch/arm/boot/dts/meson8.dtsi b/arch/arm/boot/dts/meson8.dtsi index f80ddc98d3a2..9997a5d0333a 100644 --- a/arch/arm/boot/dts/meson8.dtsi +++ b/arch/arm/boot/dts/meson8.dtsi @@ -736,27 +736,27 @@ }; &uart_AO { - compatible = "amlogic,meson8-uart", "amlogic,meson-uart"; - clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_CLK81>; - clock-names = "baud", "xtal", "pclk"; + compatible = "amlogic,meson8-uart", "amlogic,meson-ao-uart"; + clocks = <&xtal>, <&clkc CLKID_CLK81>, <&clkc CLKID_CLK81>; + clock-names = "xtal", "pclk", "baud"; }; &uart_A { - compatible = "amlogic,meson8-uart", "amlogic,meson-uart"; - clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART0>; - clock-names = "baud", "xtal", "pclk"; + compatible = "amlogic,meson8-uart"; + clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>; + clock-names = "xtal", "pclk", "baud"; }; &uart_B { - compatible = "amlogic,meson8-uart", "amlogic,meson-uart"; - clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART1>; - clock-names = "baud", "xtal", "pclk"; + compatible = "amlogic,meson8-uart"; + clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>; + clock-names = "xtal", "pclk", "baud"; }; &uart_C { - compatible = "amlogic,meson8-uart", "amlogic,meson-uart"; - clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART2>; - clock-names = "baud", "xtal", "pclk"; + compatible = "amlogic,meson8-uart"; + clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>; + clock-names = "xtal", "pclk", "baud"; }; &usb0 { -- cgit From 3375aa77135f6aeb1107ed839a2050a4118444bc Mon Sep 17 00:00:00 2001 From: Martin Blumenstingl Date: Mon, 27 Dec 2021 19:00:26 +0100 Subject: ARM: dts: meson8b: Fix the UART device-tree schema validation The dt-bindings for the UART controller only allow the following values for Meson8 SoCs: - "amlogic,meson8b-uart", "amlogic,meson-ao-uart" - "amlogic,meson8b-uart" Use the correct fallback compatible string "amlogic,meson-ao-uart" for AO UART. Drop the "amlogic,meson-uart" compatible string from the EE domain UART controllers. Also update the order of the clocks to match the order defined in the yaml bindings. Fixes: b02d6e73f5fc96 ("ARM: dts: meson8b: use stable UART bindings with correct gate clock") Signed-off-by: Martin Blumenstingl Signed-off-by: Neil Armstrong Link: https://lore.kernel.org/r/20211227180026.4068352-4-martin.blumenstingl@googlemail.com --- arch/arm/boot/dts/meson8b.dtsi | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/arch/arm/boot/dts/meson8b.dtsi b/arch/arm/boot/dts/meson8b.dtsi index b49b7cbaed4e..94f1c03decce 100644 --- a/arch/arm/boot/dts/meson8b.dtsi +++ b/arch/arm/boot/dts/meson8b.dtsi @@ -724,27 +724,27 @@ }; &uart_AO { - compatible = "amlogic,meson8b-uart", "amlogic,meson-uart"; - clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_CLK81>; - clock-names = "baud", "xtal", "pclk"; + compatible = "amlogic,meson8b-uart", "amlogic,meson-ao-uart"; + clocks = <&xtal>, <&clkc CLKID_CLK81>, <&clkc CLKID_CLK81>; + clock-names = "xtal", "pclk", "baud"; }; &uart_A { - compatible = "amlogic,meson8b-uart", "amlogic,meson-uart"; - clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART0>; - clock-names = "baud", "xtal", "pclk"; + compatible = "amlogic,meson8b-uart"; + clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>; + clock-names = "xtal", "pclk", "baud"; }; &uart_B { - compatible = "amlogic,meson8b-uart", "amlogic,meson-uart"; - clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART1>; - clock-names = "baud", "xtal", "pclk"; + compatible = "amlogic,meson8b-uart"; + clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>; + clock-names = "xtal", "pclk", "baud"; }; &uart_C { - compatible = "amlogic,meson8b-uart", "amlogic,meson-uart"; - clocks = <&clkc CLKID_CLK81>, <&xtal>, <&clkc CLKID_UART2>; - clock-names = "baud", "xtal", "pclk"; + compatible = "amlogic,meson8b-uart"; + clocks = <&xtal>, <&clkc CLKID_UART0>, <&clkc CLKID_CLK81>; + clock-names = "xtal", "pclk", "baud"; }; &usb0 { -- cgit From 2564c35df5b81a88efce965bbfdcf32c1a1bc834 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 7 Jan 2022 15:20:43 +0200 Subject: drm/i915: split out i915_getparam.h from i915_drv.h We already have the i915_getparam.c file. Acked-by: Tvrtko Ursulin Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/27f3af2298c3cdd3cb2839c2a9a52237248e087a.1641561552.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/i915_driver.c | 1 + drivers/gpu/drm/i915/i915_drv.h | 4 ---- drivers/gpu/drm/i915/i915_getparam.c | 1 + drivers/gpu/drm/i915/i915_getparam.h | 15 +++++++++++++++ drivers/gpu/drm/i915/i915_ioc32.c | 1 + 5 files changed, 18 insertions(+), 4 deletions(-) create mode 100644 drivers/gpu/drm/i915/i915_getparam.h diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index ca3b599f6c38..f7e674e229a1 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -74,6 +74,7 @@ #include "i915_debugfs.h" #include "i915_driver.h" #include "i915_drv.h" +#include "i915_getparam.h" #include "i915_ioc32.h" #include "i915_irq.h" #include "i915_memcpy.h" diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index c7ce23da6ffa..f4a3ac37c9a3 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1587,10 +1587,6 @@ intel_vm_no_concurrent_access_wa(struct drm_i915_private *i915) return IS_CHERRYVIEW(i915) || intel_ggtt_update_needs_vtd_wa(i915); } -/* i915_getparam.c */ -int i915_getparam_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv); - /* i915_gem.c */ int i915_gem_init_userptr(struct drm_i915_private *dev_priv); void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c index 77490cb5ff9c..051b2acc1b3e 100644 --- a/drivers/gpu/drm/i915/i915_getparam.c +++ b/drivers/gpu/drm/i915/i915_getparam.c @@ -6,6 +6,7 @@ #include "gt/intel_engine_user.h" #include "i915_drv.h" +#include "i915_getparam.h" #include "i915_perf.h" int i915_getparam_ioctl(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/i915/i915_getparam.h b/drivers/gpu/drm/i915/i915_getparam.h new file mode 100644 index 000000000000..18e4752e8f70 --- /dev/null +++ b/drivers/gpu/drm/i915/i915_getparam.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __I915_GETPARAM_H__ +#define __I915_GETPARAM_H__ + +struct drm_device; +struct drm_file; + +int i915_getparam_ioctl(struct drm_device *dev, void *data, + struct drm_file *file_priv); + +#endif /* __I915_GETPARAM_H__ */ diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c index 55b97c3a3dde..33348960f623 100644 --- a/drivers/gpu/drm/i915/i915_ioc32.c +++ b/drivers/gpu/drm/i915/i915_ioc32.c @@ -31,6 +31,7 @@ #include #include "i915_drv.h" +#include "i915_getparam.h" #include "i915_ioc32.h" struct drm_i915_getparam32 { -- cgit From 23d639d7b6df487d59ed23b0c9c04dfd3f909fc3 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 7 Jan 2022 15:20:44 +0200 Subject: drm/i915: split out i915_cmd_parser.h from i915_drv.h We already have the i915_cmd_parser.c file. Acked-by: Tvrtko Ursulin Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/1a02b8788266f4f2fd4de12808b55c4a66179e98.1641561552.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 1 + drivers/gpu/drm/i915/gt/intel_engine_cs.c | 2 +- drivers/gpu/drm/i915/i915_cmd_parser.c | 1 + drivers/gpu/drm/i915/i915_cmd_parser.h | 26 ++++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_drv.h | 12 ------------ drivers/gpu/drm/i915/i915_getparam.c | 1 + 6 files changed, 30 insertions(+), 13 deletions(-) create mode 100644 drivers/gpu/drm/i915/i915_cmd_parser.h diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 4d7da07442f2..333bb30e4a32 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -23,6 +23,7 @@ #include "pxp/intel_pxp.h" +#include "i915_cmd_parser.h" #include "i915_drv.h" #include "i915_gem_clflush.h" #include "i915_gem_context.h" diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index ff6753ccb129..0ad1f594f636 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -7,8 +7,8 @@ #include "gem/i915_gem_context.h" +#include "i915_cmd_parser.h" #include "i915_drv.h" - #include "intel_breadcrumbs.h" #include "intel_context.h" #include "intel_engine.h" diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index e0403ce9ce69..9c90740520a9 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -28,6 +28,7 @@ #include "gt/intel_engine.h" #include "gt/intel_gpu_commands.h" +#include "i915_cmd_parser.h" #include "i915_drv.h" #include "i915_memcpy.h" diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.h b/drivers/gpu/drm/i915/i915_cmd_parser.h new file mode 100644 index 000000000000..ba70ac6c97cd --- /dev/null +++ b/drivers/gpu/drm/i915/i915_cmd_parser.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __I915_CMD_PARSER_H__ +#define __I915_CMD_PARSER_H__ + +#include + +struct drm_i915_private; +struct intel_engine_cs; +struct i915_vma; + +int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv); +int intel_engine_init_cmd_parser(struct intel_engine_cs *engine); +void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine); +int intel_engine_cmd_parser(struct intel_engine_cs *engine, + struct i915_vma *batch, + unsigned long batch_offset, + unsigned long batch_length, + struct i915_vma *shadow, + bool trampoline); +#define I915_CMD_PARSER_TRAMPOLINE_SIZE 8 + +#endif /* __I915_CMD_PARSER_H__ */ diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index f4a3ac37c9a3..cff38a82b659 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1735,18 +1735,6 @@ u32 i915_gem_fence_alignment(struct drm_i915_private *dev_priv, u32 size, const char *i915_cache_level_str(struct drm_i915_private *i915, int type); -/* i915_cmd_parser.c */ -int i915_cmd_parser_get_version(struct drm_i915_private *dev_priv); -int intel_engine_init_cmd_parser(struct intel_engine_cs *engine); -void intel_engine_cleanup_cmd_parser(struct intel_engine_cs *engine); -int intel_engine_cmd_parser(struct intel_engine_cs *engine, - struct i915_vma *batch, - unsigned long batch_offset, - unsigned long batch_length, - struct i915_vma *shadow, - bool trampoline); -#define I915_CMD_PARSER_TRAMPOLINE_SIZE 8 - /* intel_device_info.c */ static inline struct intel_device_info * mkwrite_device_info(struct drm_i915_private *dev_priv) diff --git a/drivers/gpu/drm/i915/i915_getparam.c b/drivers/gpu/drm/i915/i915_getparam.c index 051b2acc1b3e..da6c041c17ad 100644 --- a/drivers/gpu/drm/i915/i915_getparam.c +++ b/drivers/gpu/drm/i915/i915_getparam.c @@ -5,6 +5,7 @@ #include "gem/i915_gem_mman.h" #include "gt/intel_engine_user.h" +#include "i915_cmd_parser.h" #include "i915_drv.h" #include "i915_getparam.h" #include "i915_perf.h" -- cgit From 2ef97818d3aae3c89a6cb1e6b8cd204156434aae Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 7 Jan 2022 15:20:45 +0200 Subject: drm/i915: split out i915_gem_evict.h from i915_drv.h We already have the i915_gem_evict.c file. v2: Fixed commit message (Tvrtko) Acked-by: Tvrtko Ursulin Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/ec666853171d04daeb21a93083940df36907c343.1641561552.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 1 + drivers/gpu/drm/i915/gt/selftest_hangcheck.c | 1 + drivers/gpu/drm/i915/i915_drv.h | 11 ----------- drivers/gpu/drm/i915/i915_gem_evict.c | 1 + drivers/gpu/drm/i915/i915_gem_evict.h | 24 ++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_gem_gtt.c | 1 + drivers/gpu/drm/i915/i915_vma.c | 1 + 7 files changed, 29 insertions(+), 11 deletions(-) create mode 100644 drivers/gpu/drm/i915/i915_gem_evict.h diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 333bb30e4a32..1ff1b76d5206 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -27,6 +27,7 @@ #include "i915_drv.h" #include "i915_gem_clflush.h" #include "i915_gem_context.h" +#include "i915_gem_evict.h" #include "i915_gem_ioctls.h" #include "i915_trace.h" #include "i915_user_extensions.h" diff --git a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c index 7e2d99dd012d..32f8b4f96cfa 100644 --- a/drivers/gpu/drm/i915/gt/selftest_hangcheck.c +++ b/drivers/gpu/drm/i915/gt/selftest_hangcheck.c @@ -7,6 +7,7 @@ #include "gem/i915_gem_context.h" +#include "i915_gem_evict.h" #include "intel_gt.h" #include "intel_engine_heartbeat.h" #include "intel_engine_pm.h" diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index cff38a82b659..faff3a17fcd7 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1703,17 +1703,6 @@ i915_gem_vm_lookup(struct drm_i915_file_private *file_priv, u32 id) return vm; } -/* i915_gem_evict.c */ -int __must_check i915_gem_evict_something(struct i915_address_space *vm, - u64 min_size, u64 alignment, - unsigned long color, - u64 start, u64 end, - unsigned flags); -int __must_check i915_gem_evict_for_node(struct i915_address_space *vm, - struct drm_mm_node *node, - unsigned int flags); -int i915_gem_evict_vm(struct i915_address_space *vm); - /* i915_gem_internal.c */ struct drm_i915_gem_object * i915_gem_object_create_internal(struct drm_i915_private *dev_priv, diff --git a/drivers/gpu/drm/i915/i915_gem_evict.c b/drivers/gpu/drm/i915/i915_gem_evict.c index 2b73ddb11c66..24eee0c2055f 100644 --- a/drivers/gpu/drm/i915/i915_gem_evict.c +++ b/drivers/gpu/drm/i915/i915_gem_evict.c @@ -31,6 +31,7 @@ #include "gt/intel_gt_requests.h" #include "i915_drv.h" +#include "i915_gem_evict.h" #include "i915_trace.h" I915_SELFTEST_DECLARE(static struct igt_evict_ctl { diff --git a/drivers/gpu/drm/i915/i915_gem_evict.h b/drivers/gpu/drm/i915/i915_gem_evict.h new file mode 100644 index 000000000000..d4478b6ad11b --- /dev/null +++ b/drivers/gpu/drm/i915/i915_gem_evict.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __I915_GEM_EVICT_H__ +#define __I915_GEM_EVICT_H__ + +#include + +struct drm_mm_node; +struct i915_address_space; + +int __must_check i915_gem_evict_something(struct i915_address_space *vm, + u64 min_size, u64 alignment, + unsigned long color, + u64 start, u64 end, + unsigned flags); +int __must_check i915_gem_evict_for_node(struct i915_address_space *vm, + struct drm_mm_node *node, + unsigned int flags); +int i915_gem_evict_vm(struct i915_address_space *vm); + +#endif /* __I915_GEM_EVICT_H__ */ diff --git a/drivers/gpu/drm/i915/i915_gem_gtt.c b/drivers/gpu/drm/i915/i915_gem_gtt.c index cd5f2348a187..8a7f0d92b56f 100644 --- a/drivers/gpu/drm/i915/i915_gem_gtt.c +++ b/drivers/gpu/drm/i915/i915_gem_gtt.c @@ -20,6 +20,7 @@ #include "gt/intel_gt_requests.h" #include "i915_drv.h" +#include "i915_gem_evict.h" #include "i915_scatterlist.h" #include "i915_trace.h" #include "i915_vgpu.h" diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index bef795e265a6..84ecaa59badd 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -34,6 +34,7 @@ #include "gt/intel_gt_requests.h" #include "i915_drv.h" +#include "i915_gem_evict.h" #include "i915_sw_fence_work.h" #include "i915_trace.h" #include "i915_vma.h" -- cgit From db583eea5a820ab4afce6420aae61be9be55d05c Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 7 Jan 2022 15:20:46 +0200 Subject: drm/i915: split out gem/i915_gem_userptr.h from i915_drv.h We already have the gem/i915_gem_userptr.c file. Acked-by: Tvrtko Ursulin Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/c29f66604ebd973b8eff1cce7d7c53615a26480f.1641561552.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/gem/i915_gem_userptr.c | 1 + drivers/gpu/drm/i915/gem/i915_gem_userptr.h | 14 ++++++++++++++ drivers/gpu/drm/i915/i915_drv.h | 2 -- drivers/gpu/drm/i915/i915_gem.c | 1 + 4 files changed, 16 insertions(+), 2 deletions(-) create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_userptr.h diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c index 3173c9f9a040..2cb51e3dbb62 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_userptr.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.c @@ -42,6 +42,7 @@ #include "i915_drv.h" #include "i915_gem_ioctls.h" #include "i915_gem_object.h" +#include "i915_gem_userptr.h" #include "i915_scatterlist.h" #ifdef CONFIG_MMU_NOTIFIER diff --git a/drivers/gpu/drm/i915/gem/i915_gem_userptr.h b/drivers/gpu/drm/i915/gem/i915_gem_userptr.h new file mode 100644 index 000000000000..8dadb2f8436d --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_userptr.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2021 Intel Corporation + */ + +#ifndef __I915_GEM_USERPTR_H__ +#define __I915_GEM_USERPTR_H__ + +struct drm_i915_private; + +int i915_gem_init_userptr(struct drm_i915_private *dev_priv); +void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv); + +#endif /* __I915_GEM_USERPTR_H__ */ diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index faff3a17fcd7..d34c6117b6f9 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1588,8 +1588,6 @@ intel_vm_no_concurrent_access_wa(struct drm_i915_private *i915) } /* i915_gem.c */ -int i915_gem_init_userptr(struct drm_i915_private *dev_priv); -void i915_gem_cleanup_userptr(struct drm_i915_private *dev_priv); void i915_gem_init_early(struct drm_i915_private *dev_priv); void i915_gem_cleanup_early(struct drm_i915_private *dev_priv); diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c index 981e383d1a5d..5e8ed2419c83 100644 --- a/drivers/gpu/drm/i915/i915_gem.c +++ b/drivers/gpu/drm/i915/i915_gem.c @@ -45,6 +45,7 @@ #include "gem/i915_gem_ioctls.h" #include "gem/i915_gem_mman.h" #include "gem/i915_gem_region.h" +#include "gem/i915_gem_userptr.h" #include "gt/intel_engine_user.h" #include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" -- cgit From 386e75a41478d8d70889f0d1856e782d610353c0 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 7 Jan 2022 15:20:47 +0200 Subject: drm/i915: split out gem/i915_gem_tiling.h from i915_drv.h We already have the gem/i915_gem_tiling.c file. Acked-by: Tvrtko Ursulin Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/8073a429ed1f8ade9c0cc8a6ed1a0f82183100c5.1641561552.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/gem/i915_gem_tiling.c | 1 + drivers/gpu/drm/i915/gem/i915_gem_tiling.h | 18 ++++++++++++++++++ drivers/gpu/drm/i915/i915_drv.h | 5 ----- drivers/gpu/drm/i915/i915_vma.c | 2 +- 4 files changed, 20 insertions(+), 6 deletions(-) create mode 100644 drivers/gpu/drm/i915/gem/i915_gem_tiling.h diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c index ef4d0f7dc118..cf324329703f 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_tiling.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.c @@ -12,6 +12,7 @@ #include "i915_gem_ioctls.h" #include "i915_gem_mman.h" #include "i915_gem_object.h" +#include "i915_gem_tiling.h" /** * DOC: buffer object tiling diff --git a/drivers/gpu/drm/i915/gem/i915_gem_tiling.h b/drivers/gpu/drm/i915/gem/i915_gem_tiling.h new file mode 100644 index 000000000000..9924196a8139 --- /dev/null +++ b/drivers/gpu/drm/i915/gem/i915_gem_tiling.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __I915_GEM_TILING_H__ +#define __I915_GEM_TILING_H__ + +#include + +struct drm_i915_private; + +u32 i915_gem_fence_size(struct drm_i915_private *i915, u32 size, + unsigned int tiling, unsigned int stride); +u32 i915_gem_fence_alignment(struct drm_i915_private *i915, u32 size, + unsigned int tiling, unsigned int stride); + +#endif /* __I915_GEM_TILING_H__ */ diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index d34c6117b6f9..b102457bfa51 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1715,11 +1715,6 @@ static inline bool i915_gem_object_needs_bit17_swizzle(struct drm_i915_gem_objec i915_gem_object_is_tiled(obj); } -u32 i915_gem_fence_size(struct drm_i915_private *dev_priv, u32 size, - unsigned int tiling, unsigned int stride); -u32 i915_gem_fence_alignment(struct drm_i915_private *dev_priv, u32 size, - unsigned int tiling, unsigned int stride); - const char *i915_cache_level_str(struct drm_i915_private *i915, int type); /* intel_device_info.c */ diff --git a/drivers/gpu/drm/i915/i915_vma.c b/drivers/gpu/drm/i915/i915_vma.c index 84ecaa59badd..c837888dd542 100644 --- a/drivers/gpu/drm/i915/i915_vma.c +++ b/drivers/gpu/drm/i915/i915_vma.c @@ -26,8 +26,8 @@ #include #include "display/intel_frontbuffer.h" - #include "gem/i915_gem_lmem.h" +#include "gem/i915_gem_tiling.h" #include "gt/intel_engine.h" #include "gt/intel_engine_heartbeat.h" #include "gt/intel_gt.h" -- cgit From 7e470f103d7579836a536c15862b70118379f7f4 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Mon, 10 Jan 2022 11:57:37 +0200 Subject: drm/i915: split out PCI config space registers from i915_reg.h The PCI config space registers don't really belong next to the MMIO register definitions. v2: Fix copyright year (Matt) Cc: Matt Roper Reviewed-by: Matt Roper Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20220110095740.166078-1-jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_backlight.c | 1 + drivers/gpu/drm/i915/display/intel_cdclk.c | 1 + drivers/gpu/drm/i915/display/intel_opregion.c | 1 + drivers/gpu/drm/i915/display/intel_overlay.c | 1 + drivers/gpu/drm/i915/gt/intel_reset.c | 1 + drivers/gpu/drm/i915/i915_driver.c | 1 + drivers/gpu/drm/i915/i915_reg.h | 78 ----------------------- drivers/gpu/drm/i915/i915_suspend.c | 1 + drivers/gpu/drm/i915/intel_pci_config.h | 85 ++++++++++++++++++++++++++ 9 files changed, 92 insertions(+), 78 deletions(-) create mode 100644 drivers/gpu/drm/i915/intel_pci_config.h diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c index 2db3b792aca6..98f7ea44042f 100644 --- a/drivers/gpu/drm/i915/display/intel_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_backlight.c @@ -13,6 +13,7 @@ #include "intel_dp_aux_backlight.h" #include "intel_dsi_dcs_backlight.h" #include "intel_panel.h" +#include "intel_pci_config.h" /** * scale - scale values from one range to another diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 249f81a80eb7..1f13398e8ac2 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -31,6 +31,7 @@ #include "intel_crtc.h" #include "intel_de.h" #include "intel_display_types.h" +#include "intel_pci_config.h" #include "intel_pcode.h" #include "intel_psr.h" #include "vlv_sideband.h" diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c index 985790a66a4d..af9d30f56cc1 100644 --- a/drivers/gpu/drm/i915/display/intel_opregion.c +++ b/drivers/gpu/drm/i915/display/intel_opregion.c @@ -35,6 +35,7 @@ #include "intel_backlight.h" #include "intel_display_types.h" #include "intel_opregion.h" +#include "intel_pci_config.h" #define OPREGION_HEADER_OFFSET 0 #define OPREGION_ACPI_OFFSET 0x100 diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c index 7e3f5c6ca484..23f30fdb3519 100644 --- a/drivers/gpu/drm/i915/display/intel_overlay.c +++ b/drivers/gpu/drm/i915/display/intel_overlay.c @@ -38,6 +38,7 @@ #include "intel_display_types.h" #include "intel_frontbuffer.h" #include "intel_overlay.h" +#include "intel_pci_config.h" /* Limits for overlay size. According to intel doc, the real limits are: * Y width: 4095, UV width (planar): 2047, Y height: 2047, diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index d8fe35f2281d..c5bfcbe56890 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -19,6 +19,7 @@ #include "intel_gt.h" #include "intel_gt_pm.h" #include "intel_gt_requests.h" +#include "intel_pci_config.h" #include "intel_reset.h" #include "uc/intel_guc.h" diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index f7e674e229a1..6a7aac069b18 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -87,6 +87,7 @@ #include "intel_dram.h" #include "intel_gvt.h" #include "intel_memory_region.h" +#include "intel_pci_config.h" #include "intel_pcode.h" #include "intel_pm.h" #include "intel_region_ttm.h" diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 5b502c8f0cfb..da310fde3fb3 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -275,84 +275,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define _MASKED_BIT_ENABLE(a) ({ typeof(a) _a = (a); _MASKED_FIELD(_a, _a); }) #define _MASKED_BIT_DISABLE(a) (_MASKED_FIELD((a), 0)) -/* PCI config space */ - -#define MCHBAR_I915 0x44 -#define MCHBAR_I965 0x48 -#define MCHBAR_SIZE (4 * 4096) - -#define DEVEN 0x54 -#define DEVEN_MCHBAR_EN (1 << 28) - -/* BSM in include/drm/i915_drm.h */ - -#define HPLLCC 0xc0 /* 85x only */ -#define GC_CLOCK_CONTROL_MASK (0x7 << 0) -#define GC_CLOCK_133_200 (0 << 0) -#define GC_CLOCK_100_200 (1 << 0) -#define GC_CLOCK_100_133 (2 << 0) -#define GC_CLOCK_133_266 (3 << 0) -#define GC_CLOCK_133_200_2 (4 << 0) -#define GC_CLOCK_133_266_2 (5 << 0) -#define GC_CLOCK_166_266 (6 << 0) -#define GC_CLOCK_166_250 (7 << 0) - -#define I915_GDRST 0xc0 /* PCI config register */ -#define GRDOM_FULL (0 << 2) -#define GRDOM_RENDER (1 << 2) -#define GRDOM_MEDIA (3 << 2) -#define GRDOM_MASK (3 << 2) -#define GRDOM_RESET_STATUS (1 << 1) -#define GRDOM_RESET_ENABLE (1 << 0) - -/* BSpec only has register offset, PCI device and bit found empirically */ -#define I830_CLOCK_GATE 0xc8 /* device 0 */ -#define I830_L2_CACHE_CLOCK_GATE_DISABLE (1 << 2) - -#define GCDGMBUS 0xcc - -#define GCFGC2 0xda -#define GCFGC 0xf0 /* 915+ only */ -#define GC_LOW_FREQUENCY_ENABLE (1 << 7) -#define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4) -#define GC_DISPLAY_CLOCK_333_320_MHZ (4 << 4) -#define GC_DISPLAY_CLOCK_267_MHZ_PNV (0 << 4) -#define GC_DISPLAY_CLOCK_333_MHZ_PNV (1 << 4) -#define GC_DISPLAY_CLOCK_444_MHZ_PNV (2 << 4) -#define GC_DISPLAY_CLOCK_200_MHZ_PNV (5 << 4) -#define GC_DISPLAY_CLOCK_133_MHZ_PNV (6 << 4) -#define GC_DISPLAY_CLOCK_167_MHZ_PNV (7 << 4) -#define GC_DISPLAY_CLOCK_MASK (7 << 4) -#define GM45_GC_RENDER_CLOCK_MASK (0xf << 0) -#define GM45_GC_RENDER_CLOCK_266_MHZ (8 << 0) -#define GM45_GC_RENDER_CLOCK_320_MHZ (9 << 0) -#define GM45_GC_RENDER_CLOCK_400_MHZ (0xb << 0) -#define GM45_GC_RENDER_CLOCK_533_MHZ (0xc << 0) -#define I965_GC_RENDER_CLOCK_MASK (0xf << 0) -#define I965_GC_RENDER_CLOCK_267_MHZ (2 << 0) -#define I965_GC_RENDER_CLOCK_333_MHZ (3 << 0) -#define I965_GC_RENDER_CLOCK_444_MHZ (4 << 0) -#define I965_GC_RENDER_CLOCK_533_MHZ (5 << 0) -#define I945_GC_RENDER_CLOCK_MASK (7 << 0) -#define I945_GC_RENDER_CLOCK_166_MHZ (0 << 0) -#define I945_GC_RENDER_CLOCK_200_MHZ (1 << 0) -#define I945_GC_RENDER_CLOCK_250_MHZ (3 << 0) -#define I945_GC_RENDER_CLOCK_400_MHZ (5 << 0) -#define I915_GC_RENDER_CLOCK_MASK (7 << 0) -#define I915_GC_RENDER_CLOCK_166_MHZ (0 << 0) -#define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0) -#define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0) - -#define ASLE 0xe4 -#define ASLS 0xfc - -#define SWSCI 0xe8 -#define SWSCI_SCISEL (1 << 15) -#define SWSCI_GSSCIE (1 << 0) - -#define LBPC 0xf4 /* legacy/combination backlight modes, also called LBB */ - - #define ILK_GDSR _MMIO(MCHBAR_MIRROR_BASE + 0x2ca4) #define ILK_GRDOM_FULL (0 << 1) #define ILK_GRDOM_RENDER (1 << 1) diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c index f7b55f34dba8..889f5b7dc78e 100644 --- a/drivers/gpu/drm/i915/i915_suspend.c +++ b/drivers/gpu/drm/i915/i915_suspend.c @@ -32,6 +32,7 @@ #include "i915_drv.h" #include "i915_reg.h" #include "i915_suspend.h" +#include "intel_pci_config.h" static void intel_save_swf(struct drm_i915_private *dev_priv) { diff --git a/drivers/gpu/drm/i915/intel_pci_config.h b/drivers/gpu/drm/i915/intel_pci_config.h new file mode 100644 index 000000000000..12cd9d4f23de --- /dev/null +++ b/drivers/gpu/drm/i915/intel_pci_config.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __INTEL_PCI_CONFIG_H__ +#define __INTEL_PCI_CONFIG_H__ + +/* BSM in include/drm/i915_drm.h */ + +#define MCHBAR_I915 0x44 +#define MCHBAR_I965 0x48 +#define MCHBAR_SIZE (4 * 4096) + +#define DEVEN 0x54 +#define DEVEN_MCHBAR_EN (1 << 28) + +#define HPLLCC 0xc0 /* 85x only */ +#define GC_CLOCK_CONTROL_MASK (0x7 << 0) +#define GC_CLOCK_133_200 (0 << 0) +#define GC_CLOCK_100_200 (1 << 0) +#define GC_CLOCK_100_133 (2 << 0) +#define GC_CLOCK_133_266 (3 << 0) +#define GC_CLOCK_133_200_2 (4 << 0) +#define GC_CLOCK_133_266_2 (5 << 0) +#define GC_CLOCK_166_266 (6 << 0) +#define GC_CLOCK_166_250 (7 << 0) + +#define I915_GDRST 0xc0 +#define GRDOM_FULL (0 << 2) +#define GRDOM_RENDER (1 << 2) +#define GRDOM_MEDIA (3 << 2) +#define GRDOM_MASK (3 << 2) +#define GRDOM_RESET_STATUS (1 << 1) +#define GRDOM_RESET_ENABLE (1 << 0) + +/* BSpec only has register offset, PCI device and bit found empirically */ +#define I830_CLOCK_GATE 0xc8 /* device 0 */ +#define I830_L2_CACHE_CLOCK_GATE_DISABLE (1 << 2) + +#define GCDGMBUS 0xcc + +#define GCFGC2 0xda +#define GCFGC 0xf0 /* 915+ only */ +#define GC_LOW_FREQUENCY_ENABLE (1 << 7) +#define GC_DISPLAY_CLOCK_190_200_MHZ (0 << 4) +#define GC_DISPLAY_CLOCK_333_320_MHZ (4 << 4) +#define GC_DISPLAY_CLOCK_267_MHZ_PNV (0 << 4) +#define GC_DISPLAY_CLOCK_333_MHZ_PNV (1 << 4) +#define GC_DISPLAY_CLOCK_444_MHZ_PNV (2 << 4) +#define GC_DISPLAY_CLOCK_200_MHZ_PNV (5 << 4) +#define GC_DISPLAY_CLOCK_133_MHZ_PNV (6 << 4) +#define GC_DISPLAY_CLOCK_167_MHZ_PNV (7 << 4) +#define GC_DISPLAY_CLOCK_MASK (7 << 4) +#define GM45_GC_RENDER_CLOCK_MASK (0xf << 0) +#define GM45_GC_RENDER_CLOCK_266_MHZ (8 << 0) +#define GM45_GC_RENDER_CLOCK_320_MHZ (9 << 0) +#define GM45_GC_RENDER_CLOCK_400_MHZ (0xb << 0) +#define GM45_GC_RENDER_CLOCK_533_MHZ (0xc << 0) +#define I965_GC_RENDER_CLOCK_MASK (0xf << 0) +#define I965_GC_RENDER_CLOCK_267_MHZ (2 << 0) +#define I965_GC_RENDER_CLOCK_333_MHZ (3 << 0) +#define I965_GC_RENDER_CLOCK_444_MHZ (4 << 0) +#define I965_GC_RENDER_CLOCK_533_MHZ (5 << 0) +#define I945_GC_RENDER_CLOCK_MASK (7 << 0) +#define I945_GC_RENDER_CLOCK_166_MHZ (0 << 0) +#define I945_GC_RENDER_CLOCK_200_MHZ (1 << 0) +#define I945_GC_RENDER_CLOCK_250_MHZ (3 << 0) +#define I945_GC_RENDER_CLOCK_400_MHZ (5 << 0) +#define I915_GC_RENDER_CLOCK_MASK (7 << 0) +#define I915_GC_RENDER_CLOCK_166_MHZ (0 << 0) +#define I915_GC_RENDER_CLOCK_200_MHZ (1 << 0) +#define I915_GC_RENDER_CLOCK_333_MHZ (4 << 0) + +#define ASLE 0xe4 +#define ASLS 0xfc + +#define SWSCI 0xe8 +#define SWSCI_SCISEL (1 << 15) +#define SWSCI_GSSCIE (1 << 0) + +/* legacy/combination backlight modes, also called LBB */ +#define LBPC 0xf4 + +#endif /* __INTEL_PCI_CONFIG_H__ */ -- cgit From b4435717f53b776b770e7a025fd84688e53dcd5f Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Mon, 10 Jan 2022 11:57:38 +0200 Subject: drm/i915: split out vlv sideband registers from i915_reg.h Add a dedicated file vlv_sideband_reg.h for the VLV/CHV sideband registers. The sideband registers macros are needed by the same files that need vlv_sideband.h, so include the definitions from there. v2: Fix copyright year (Matt) Cc: Matt Roper Reviewed-by: Matt Roper Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20220110095740.166078-2-jani.nikula@intel.com --- drivers/gpu/drm/i915/i915_reg.h | 171 ------------------------------ drivers/gpu/drm/i915/vlv_sideband.h | 2 + drivers/gpu/drm/i915/vlv_sideband_reg.h | 180 ++++++++++++++++++++++++++++++++ 3 files changed, 182 insertions(+), 171 deletions(-) create mode 100644 drivers/gpu/drm/i915/vlv_sideband_reg.h diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index da310fde3fb3..e17d982f67f3 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1148,177 +1148,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define VLV_IOSF_DATA _MMIO(VLV_DISPLAY_BASE + 0x2104) #define VLV_IOSF_ADDR _MMIO(VLV_DISPLAY_BASE + 0x2108) -/* See configdb bunit SB addr map */ -#define BUNIT_REG_BISOC 0x11 - -/* PUNIT_REG_*SSPM0 */ -#define _SSPM0_SSC(val) ((val) << 0) -#define SSPM0_SSC_MASK _SSPM0_SSC(0x3) -#define SSPM0_SSC_PWR_ON _SSPM0_SSC(0x0) -#define SSPM0_SSC_CLK_GATE _SSPM0_SSC(0x1) -#define SSPM0_SSC_RESET _SSPM0_SSC(0x2) -#define SSPM0_SSC_PWR_GATE _SSPM0_SSC(0x3) -#define _SSPM0_SSS(val) ((val) << 24) -#define SSPM0_SSS_MASK _SSPM0_SSS(0x3) -#define SSPM0_SSS_PWR_ON _SSPM0_SSS(0x0) -#define SSPM0_SSS_CLK_GATE _SSPM0_SSS(0x1) -#define SSPM0_SSS_RESET _SSPM0_SSS(0x2) -#define SSPM0_SSS_PWR_GATE _SSPM0_SSS(0x3) - -/* PUNIT_REG_*SSPM1 */ -#define SSPM1_FREQSTAT_SHIFT 24 -#define SSPM1_FREQSTAT_MASK (0x1f << SSPM1_FREQSTAT_SHIFT) -#define SSPM1_FREQGUAR_SHIFT 8 -#define SSPM1_FREQGUAR_MASK (0x1f << SSPM1_FREQGUAR_SHIFT) -#define SSPM1_FREQ_SHIFT 0 -#define SSPM1_FREQ_MASK (0x1f << SSPM1_FREQ_SHIFT) - -#define PUNIT_REG_VEDSSPM0 0x32 -#define PUNIT_REG_VEDSSPM1 0x33 - -#define PUNIT_REG_DSPSSPM 0x36 -#define DSPFREQSTAT_SHIFT_CHV 24 -#define DSPFREQSTAT_MASK_CHV (0x1f << DSPFREQSTAT_SHIFT_CHV) -#define DSPFREQGUAR_SHIFT_CHV 8 -#define DSPFREQGUAR_MASK_CHV (0x1f << DSPFREQGUAR_SHIFT_CHV) -#define DSPFREQSTAT_SHIFT 30 -#define DSPFREQSTAT_MASK (0x3 << DSPFREQSTAT_SHIFT) -#define DSPFREQGUAR_SHIFT 14 -#define DSPFREQGUAR_MASK (0x3 << DSPFREQGUAR_SHIFT) -#define DSP_MAXFIFO_PM5_STATUS (1 << 22) /* chv */ -#define DSP_AUTO_CDCLK_GATE_DISABLE (1 << 7) /* chv */ -#define DSP_MAXFIFO_PM5_ENABLE (1 << 6) /* chv */ -#define _DP_SSC(val, pipe) ((val) << (2 * (pipe))) -#define DP_SSC_MASK(pipe) _DP_SSC(0x3, (pipe)) -#define DP_SSC_PWR_ON(pipe) _DP_SSC(0x0, (pipe)) -#define DP_SSC_CLK_GATE(pipe) _DP_SSC(0x1, (pipe)) -#define DP_SSC_RESET(pipe) _DP_SSC(0x2, (pipe)) -#define DP_SSC_PWR_GATE(pipe) _DP_SSC(0x3, (pipe)) -#define _DP_SSS(val, pipe) ((val) << (2 * (pipe) + 16)) -#define DP_SSS_MASK(pipe) _DP_SSS(0x3, (pipe)) -#define DP_SSS_PWR_ON(pipe) _DP_SSS(0x0, (pipe)) -#define DP_SSS_CLK_GATE(pipe) _DP_SSS(0x1, (pipe)) -#define DP_SSS_RESET(pipe) _DP_SSS(0x2, (pipe)) -#define DP_SSS_PWR_GATE(pipe) _DP_SSS(0x3, (pipe)) - -#define PUNIT_REG_ISPSSPM0 0x39 -#define PUNIT_REG_ISPSSPM1 0x3a - -#define PUNIT_REG_PWRGT_CTRL 0x60 -#define PUNIT_REG_PWRGT_STATUS 0x61 -#define PUNIT_PWRGT_MASK(pw_idx) (3 << ((pw_idx) * 2)) -#define PUNIT_PWRGT_PWR_ON(pw_idx) (0 << ((pw_idx) * 2)) -#define PUNIT_PWRGT_CLK_GATE(pw_idx) (1 << ((pw_idx) * 2)) -#define PUNIT_PWRGT_RESET(pw_idx) (2 << ((pw_idx) * 2)) -#define PUNIT_PWRGT_PWR_GATE(pw_idx) (3 << ((pw_idx) * 2)) - -#define PUNIT_PWGT_IDX_RENDER 0 -#define PUNIT_PWGT_IDX_MEDIA 1 -#define PUNIT_PWGT_IDX_DISP2D 3 -#define PUNIT_PWGT_IDX_DPIO_CMN_BC 5 -#define PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01 6 -#define PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23 7 -#define PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01 8 -#define PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23 9 -#define PUNIT_PWGT_IDX_DPIO_RX0 10 -#define PUNIT_PWGT_IDX_DPIO_RX1 11 -#define PUNIT_PWGT_IDX_DPIO_CMN_D 12 - -#define PUNIT_REG_GPU_LFM 0xd3 -#define PUNIT_REG_GPU_FREQ_REQ 0xd4 -#define PUNIT_REG_GPU_FREQ_STS 0xd8 -#define GPLLENABLE (1 << 4) -#define GENFREQSTATUS (1 << 0) -#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc -#define PUNIT_REG_CZ_TIMESTAMP 0xce - -#define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */ -#define PUNIT_FUSE_BUS1 0xf5 /* bits 55:48 */ - -#define FB_GFX_FMAX_AT_VMAX_FUSE 0x136 -#define FB_GFX_FREQ_FUSE_MASK 0xff -#define FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT 24 -#define FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT 16 -#define FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT 8 - -#define FB_GFX_FMIN_AT_VMIN_FUSE 0x137 -#define FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT 8 - -#define PUNIT_REG_DDR_SETUP2 0x139 -#define FORCE_DDR_FREQ_REQ_ACK (1 << 8) -#define FORCE_DDR_LOW_FREQ (1 << 1) -#define FORCE_DDR_HIGH_FREQ (1 << 0) - -#define PUNIT_GPU_STATUS_REG 0xdb -#define PUNIT_GPU_STATUS_MAX_FREQ_SHIFT 16 -#define PUNIT_GPU_STATUS_MAX_FREQ_MASK 0xff -#define PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT 8 -#define PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK 0xff - -#define PUNIT_GPU_DUTYCYCLE_REG 0xdf -#define PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT 8 -#define PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK 0xff - -#define IOSF_NC_FB_GFX_FREQ_FUSE 0x1c -#define FB_GFX_MAX_FREQ_FUSE_SHIFT 3 -#define FB_GFX_MAX_FREQ_FUSE_MASK 0x000007f8 -#define FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT 11 -#define FB_GFX_FGUARANTEED_FREQ_FUSE_MASK 0x0007f800 -#define IOSF_NC_FB_GFX_FMAX_FUSE_HI 0x34 -#define FB_FMAX_VMIN_FREQ_HI_MASK 0x00000007 -#define IOSF_NC_FB_GFX_FMAX_FUSE_LO 0x30 -#define FB_FMAX_VMIN_FREQ_LO_SHIFT 27 -#define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000 - -#define VLV_TURBO_SOC_OVERRIDE 0x04 -#define VLV_OVERRIDE_EN 1 -#define VLV_SOC_TDP_EN (1 << 1) -#define VLV_BIAS_CPU_125_SOC_875 (6 << 2) -#define CHV_BIAS_CPU_50_SOC_50 (3 << 2) - -/* vlv2 north clock has */ -#define CCK_FUSE_REG 0x8 -#define CCK_FUSE_HPLL_FREQ_MASK 0x3 -#define CCK_REG_DSI_PLL_FUSE 0x44 -#define CCK_REG_DSI_PLL_CONTROL 0x48 -#define DSI_PLL_VCO_EN (1 << 31) -#define DSI_PLL_LDO_GATE (1 << 30) -#define DSI_PLL_P1_POST_DIV_SHIFT 17 -#define DSI_PLL_P1_POST_DIV_MASK (0x1ff << 17) -#define DSI_PLL_P2_MUX_DSI0_DIV2 (1 << 13) -#define DSI_PLL_P3_MUX_DSI1_DIV2 (1 << 12) -#define DSI_PLL_MUX_MASK (3 << 9) -#define DSI_PLL_MUX_DSI0_DSIPLL (0 << 10) -#define DSI_PLL_MUX_DSI0_CCK (1 << 10) -#define DSI_PLL_MUX_DSI1_DSIPLL (0 << 9) -#define DSI_PLL_MUX_DSI1_CCK (1 << 9) -#define DSI_PLL_CLK_GATE_MASK (0xf << 5) -#define DSI_PLL_CLK_GATE_DSI0_DSIPLL (1 << 8) -#define DSI_PLL_CLK_GATE_DSI1_DSIPLL (1 << 7) -#define DSI_PLL_CLK_GATE_DSI0_CCK (1 << 6) -#define DSI_PLL_CLK_GATE_DSI1_CCK (1 << 5) -#define DSI_PLL_LOCK (1 << 0) -#define CCK_REG_DSI_PLL_DIVIDER 0x4c -#define DSI_PLL_LFSR (1 << 31) -#define DSI_PLL_FRACTION_EN (1 << 30) -#define DSI_PLL_FRAC_COUNTER_SHIFT 27 -#define DSI_PLL_FRAC_COUNTER_MASK (7 << 27) -#define DSI_PLL_USYNC_CNT_SHIFT 18 -#define DSI_PLL_USYNC_CNT_MASK (0x1ff << 18) -#define DSI_PLL_N1_DIV_SHIFT 16 -#define DSI_PLL_N1_DIV_MASK (3 << 16) -#define DSI_PLL_M1_DIV_SHIFT 0 -#define DSI_PLL_M1_DIV_MASK (0x1ff << 0) -#define CCK_CZ_CLOCK_CONTROL 0x62 -#define CCK_GPLL_CLOCK_CONTROL 0x67 -#define CCK_DISPLAY_CLOCK_CONTROL 0x6b -#define CCK_DISPLAY_REF_CLOCK_CONTROL 0x6c -#define CCK_TRUNK_FORCE_ON (1 << 17) -#define CCK_TRUNK_FORCE_OFF (1 << 16) -#define CCK_FREQUENCY_STATUS (0x1f << 8) -#define CCK_FREQUENCY_STATUS_SHIFT 8 -#define CCK_FREQUENCY_VALUES (0x1f << 0) - /* DPIO registers */ #define DPIO_DEVFN 0 diff --git a/drivers/gpu/drm/i915/vlv_sideband.h b/drivers/gpu/drm/i915/vlv_sideband.h index d7732f612e7f..9ce283d96b80 100644 --- a/drivers/gpu/drm/i915/vlv_sideband.h +++ b/drivers/gpu/drm/i915/vlv_sideband.h @@ -9,6 +9,8 @@ #include #include +#include "vlv_sideband_reg.h" + enum pipe; struct drm_i915_private; diff --git a/drivers/gpu/drm/i915/vlv_sideband_reg.h b/drivers/gpu/drm/i915/vlv_sideband_reg.h new file mode 100644 index 000000000000..b7fbff3d0409 --- /dev/null +++ b/drivers/gpu/drm/i915/vlv_sideband_reg.h @@ -0,0 +1,180 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef _VLV_SIDEBAND_REG_H_ +#define _VLV_SIDEBAND_REG_H_ + +/* See configdb bunit SB addr map */ +#define BUNIT_REG_BISOC 0x11 + +/* PUNIT_REG_*SSPM0 */ +#define _SSPM0_SSC(val) ((val) << 0) +#define SSPM0_SSC_MASK _SSPM0_SSC(0x3) +#define SSPM0_SSC_PWR_ON _SSPM0_SSC(0x0) +#define SSPM0_SSC_CLK_GATE _SSPM0_SSC(0x1) +#define SSPM0_SSC_RESET _SSPM0_SSC(0x2) +#define SSPM0_SSC_PWR_GATE _SSPM0_SSC(0x3) +#define _SSPM0_SSS(val) ((val) << 24) +#define SSPM0_SSS_MASK _SSPM0_SSS(0x3) +#define SSPM0_SSS_PWR_ON _SSPM0_SSS(0x0) +#define SSPM0_SSS_CLK_GATE _SSPM0_SSS(0x1) +#define SSPM0_SSS_RESET _SSPM0_SSS(0x2) +#define SSPM0_SSS_PWR_GATE _SSPM0_SSS(0x3) + +/* PUNIT_REG_*SSPM1 */ +#define SSPM1_FREQSTAT_SHIFT 24 +#define SSPM1_FREQSTAT_MASK (0x1f << SSPM1_FREQSTAT_SHIFT) +#define SSPM1_FREQGUAR_SHIFT 8 +#define SSPM1_FREQGUAR_MASK (0x1f << SSPM1_FREQGUAR_SHIFT) +#define SSPM1_FREQ_SHIFT 0 +#define SSPM1_FREQ_MASK (0x1f << SSPM1_FREQ_SHIFT) + +#define PUNIT_REG_VEDSSPM0 0x32 +#define PUNIT_REG_VEDSSPM1 0x33 + +#define PUNIT_REG_DSPSSPM 0x36 +#define DSPFREQSTAT_SHIFT_CHV 24 +#define DSPFREQSTAT_MASK_CHV (0x1f << DSPFREQSTAT_SHIFT_CHV) +#define DSPFREQGUAR_SHIFT_CHV 8 +#define DSPFREQGUAR_MASK_CHV (0x1f << DSPFREQGUAR_SHIFT_CHV) +#define DSPFREQSTAT_SHIFT 30 +#define DSPFREQSTAT_MASK (0x3 << DSPFREQSTAT_SHIFT) +#define DSPFREQGUAR_SHIFT 14 +#define DSPFREQGUAR_MASK (0x3 << DSPFREQGUAR_SHIFT) +#define DSP_MAXFIFO_PM5_STATUS (1 << 22) /* chv */ +#define DSP_AUTO_CDCLK_GATE_DISABLE (1 << 7) /* chv */ +#define DSP_MAXFIFO_PM5_ENABLE (1 << 6) /* chv */ +#define _DP_SSC(val, pipe) ((val) << (2 * (pipe))) +#define DP_SSC_MASK(pipe) _DP_SSC(0x3, (pipe)) +#define DP_SSC_PWR_ON(pipe) _DP_SSC(0x0, (pipe)) +#define DP_SSC_CLK_GATE(pipe) _DP_SSC(0x1, (pipe)) +#define DP_SSC_RESET(pipe) _DP_SSC(0x2, (pipe)) +#define DP_SSC_PWR_GATE(pipe) _DP_SSC(0x3, (pipe)) +#define _DP_SSS(val, pipe) ((val) << (2 * (pipe) + 16)) +#define DP_SSS_MASK(pipe) _DP_SSS(0x3, (pipe)) +#define DP_SSS_PWR_ON(pipe) _DP_SSS(0x0, (pipe)) +#define DP_SSS_CLK_GATE(pipe) _DP_SSS(0x1, (pipe)) +#define DP_SSS_RESET(pipe) _DP_SSS(0x2, (pipe)) +#define DP_SSS_PWR_GATE(pipe) _DP_SSS(0x3, (pipe)) + +#define PUNIT_REG_ISPSSPM0 0x39 +#define PUNIT_REG_ISPSSPM1 0x3a + +#define PUNIT_REG_PWRGT_CTRL 0x60 +#define PUNIT_REG_PWRGT_STATUS 0x61 +#define PUNIT_PWRGT_MASK(pw_idx) (3 << ((pw_idx) * 2)) +#define PUNIT_PWRGT_PWR_ON(pw_idx) (0 << ((pw_idx) * 2)) +#define PUNIT_PWRGT_CLK_GATE(pw_idx) (1 << ((pw_idx) * 2)) +#define PUNIT_PWRGT_RESET(pw_idx) (2 << ((pw_idx) * 2)) +#define PUNIT_PWRGT_PWR_GATE(pw_idx) (3 << ((pw_idx) * 2)) + +#define PUNIT_PWGT_IDX_RENDER 0 +#define PUNIT_PWGT_IDX_MEDIA 1 +#define PUNIT_PWGT_IDX_DISP2D 3 +#define PUNIT_PWGT_IDX_DPIO_CMN_BC 5 +#define PUNIT_PWGT_IDX_DPIO_TX_B_LANES_01 6 +#define PUNIT_PWGT_IDX_DPIO_TX_B_LANES_23 7 +#define PUNIT_PWGT_IDX_DPIO_TX_C_LANES_01 8 +#define PUNIT_PWGT_IDX_DPIO_TX_C_LANES_23 9 +#define PUNIT_PWGT_IDX_DPIO_RX0 10 +#define PUNIT_PWGT_IDX_DPIO_RX1 11 +#define PUNIT_PWGT_IDX_DPIO_CMN_D 12 + +#define PUNIT_REG_GPU_LFM 0xd3 +#define PUNIT_REG_GPU_FREQ_REQ 0xd4 +#define PUNIT_REG_GPU_FREQ_STS 0xd8 +#define GPLLENABLE (1 << 4) +#define GENFREQSTATUS (1 << 0) +#define PUNIT_REG_MEDIA_TURBO_FREQ_REQ 0xdc +#define PUNIT_REG_CZ_TIMESTAMP 0xce + +#define PUNIT_FUSE_BUS2 0xf6 /* bits 47:40 */ +#define PUNIT_FUSE_BUS1 0xf5 /* bits 55:48 */ + +#define FB_GFX_FMAX_AT_VMAX_FUSE 0x136 +#define FB_GFX_FREQ_FUSE_MASK 0xff +#define FB_GFX_FMAX_AT_VMAX_2SS4EU_FUSE_SHIFT 24 +#define FB_GFX_FMAX_AT_VMAX_2SS6EU_FUSE_SHIFT 16 +#define FB_GFX_FMAX_AT_VMAX_2SS8EU_FUSE_SHIFT 8 + +#define FB_GFX_FMIN_AT_VMIN_FUSE 0x137 +#define FB_GFX_FMIN_AT_VMIN_FUSE_SHIFT 8 + +#define PUNIT_REG_DDR_SETUP2 0x139 +#define FORCE_DDR_FREQ_REQ_ACK (1 << 8) +#define FORCE_DDR_LOW_FREQ (1 << 1) +#define FORCE_DDR_HIGH_FREQ (1 << 0) + +#define PUNIT_GPU_STATUS_REG 0xdb +#define PUNIT_GPU_STATUS_MAX_FREQ_SHIFT 16 +#define PUNIT_GPU_STATUS_MAX_FREQ_MASK 0xff +#define PUNIT_GPU_STATIS_GFX_MIN_FREQ_SHIFT 8 +#define PUNIT_GPU_STATUS_GFX_MIN_FREQ_MASK 0xff + +#define PUNIT_GPU_DUTYCYCLE_REG 0xdf +#define PUNIT_GPU_DUTYCYCLE_RPE_FREQ_SHIFT 8 +#define PUNIT_GPU_DUTYCYCLE_RPE_FREQ_MASK 0xff + +#define IOSF_NC_FB_GFX_FREQ_FUSE 0x1c +#define FB_GFX_MAX_FREQ_FUSE_SHIFT 3 +#define FB_GFX_MAX_FREQ_FUSE_MASK 0x000007f8 +#define FB_GFX_FGUARANTEED_FREQ_FUSE_SHIFT 11 +#define FB_GFX_FGUARANTEED_FREQ_FUSE_MASK 0x0007f800 +#define IOSF_NC_FB_GFX_FMAX_FUSE_HI 0x34 +#define FB_FMAX_VMIN_FREQ_HI_MASK 0x00000007 +#define IOSF_NC_FB_GFX_FMAX_FUSE_LO 0x30 +#define FB_FMAX_VMIN_FREQ_LO_SHIFT 27 +#define FB_FMAX_VMIN_FREQ_LO_MASK 0xf8000000 + +#define VLV_TURBO_SOC_OVERRIDE 0x04 +#define VLV_OVERRIDE_EN 1 +#define VLV_SOC_TDP_EN (1 << 1) +#define VLV_BIAS_CPU_125_SOC_875 (6 << 2) +#define CHV_BIAS_CPU_50_SOC_50 (3 << 2) + +/* vlv2 north clock has */ +#define CCK_FUSE_REG 0x8 +#define CCK_FUSE_HPLL_FREQ_MASK 0x3 +#define CCK_REG_DSI_PLL_FUSE 0x44 +#define CCK_REG_DSI_PLL_CONTROL 0x48 +#define DSI_PLL_VCO_EN (1 << 31) +#define DSI_PLL_LDO_GATE (1 << 30) +#define DSI_PLL_P1_POST_DIV_SHIFT 17 +#define DSI_PLL_P1_POST_DIV_MASK (0x1ff << 17) +#define DSI_PLL_P2_MUX_DSI0_DIV2 (1 << 13) +#define DSI_PLL_P3_MUX_DSI1_DIV2 (1 << 12) +#define DSI_PLL_MUX_MASK (3 << 9) +#define DSI_PLL_MUX_DSI0_DSIPLL (0 << 10) +#define DSI_PLL_MUX_DSI0_CCK (1 << 10) +#define DSI_PLL_MUX_DSI1_DSIPLL (0 << 9) +#define DSI_PLL_MUX_DSI1_CCK (1 << 9) +#define DSI_PLL_CLK_GATE_MASK (0xf << 5) +#define DSI_PLL_CLK_GATE_DSI0_DSIPLL (1 << 8) +#define DSI_PLL_CLK_GATE_DSI1_DSIPLL (1 << 7) +#define DSI_PLL_CLK_GATE_DSI0_CCK (1 << 6) +#define DSI_PLL_CLK_GATE_DSI1_CCK (1 << 5) +#define DSI_PLL_LOCK (1 << 0) +#define CCK_REG_DSI_PLL_DIVIDER 0x4c +#define DSI_PLL_LFSR (1 << 31) +#define DSI_PLL_FRACTION_EN (1 << 30) +#define DSI_PLL_FRAC_COUNTER_SHIFT 27 +#define DSI_PLL_FRAC_COUNTER_MASK (7 << 27) +#define DSI_PLL_USYNC_CNT_SHIFT 18 +#define DSI_PLL_USYNC_CNT_MASK (0x1ff << 18) +#define DSI_PLL_N1_DIV_SHIFT 16 +#define DSI_PLL_N1_DIV_MASK (3 << 16) +#define DSI_PLL_M1_DIV_SHIFT 0 +#define DSI_PLL_M1_DIV_MASK (0x1ff << 0) +#define CCK_CZ_CLOCK_CONTROL 0x62 +#define CCK_GPLL_CLOCK_CONTROL 0x67 +#define CCK_DISPLAY_CLOCK_CONTROL 0x6b +#define CCK_DISPLAY_REF_CLOCK_CONTROL 0x6c +#define CCK_TRUNK_FORCE_ON (1 << 17) +#define CCK_TRUNK_FORCE_OFF (1 << 16) +#define CCK_FREQUENCY_STATUS (0x1f << 8) +#define CCK_FREQUENCY_STATUS_SHIFT 8 +#define CCK_FREQUENCY_VALUES (0x1f << 0) + +#endif /* _VLV_SIDEBAND_REG_H_ */ -- cgit From e958b5884725dac86d36c1e7afe5a55f31feb0b2 Mon Sep 17 00:00:00 2001 From: Robert Hancock Date: Fri, 7 Jan 2022 15:47:06 -0600 Subject: ASoC: xilinx: xlnx_formatter_pcm: Make buffer bytes multiple of period bytes This patch is based on one in the Xilinx kernel tree, "ASoc: xlnx: Make buffer bytes multiple of period bytes" by Devarsh Thakkar. The same issue exists in the mainline version of the driver. The original patch description is as follows: "The Xilinx Audio Formatter IP has a constraint on period bytes to be multiple of 64. This leads to driver changing the period size to suitable frames such that period bytes are multiple of 64. Now since period bytes and period size are updated but not the buffer bytes, this may make the buffer bytes unaligned and not multiple of period bytes. When this happens we hear popping noise as while DMA is being done the buffer bytes are not enough to complete DMA access for last period of frame within the application buffer boundary. To avoid this, align buffer bytes too as multiple of 64, and set another constraint to always enforce number of periods as integer. Now since, there is already a rule in alsa core to enforce Buffer size = Number of Periods * Period Size this automatically aligns buffer bytes as multiple of period bytes." Fixes: 6f6c3c36f091 ("ASoC: xlnx: add pcm formatter platform driver") Cc: Devarsh Thakkar Signed-off-by: Robert Hancock Link: https://lore.kernel.org/r/20220107214711.1100162-2-robert.hancock@calian.com Signed-off-by: Mark Brown --- sound/soc/xilinx/xlnx_formatter_pcm.c | 27 ++++++++++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) diff --git a/sound/soc/xilinx/xlnx_formatter_pcm.c b/sound/soc/xilinx/xlnx_formatter_pcm.c index 91afea9d5de6..ce19a6058b27 100644 --- a/sound/soc/xilinx/xlnx_formatter_pcm.c +++ b/sound/soc/xilinx/xlnx_formatter_pcm.c @@ -37,6 +37,7 @@ #define XLNX_AUD_XFER_COUNT 0x28 #define XLNX_AUD_CH_STS_START 0x2C #define XLNX_BYTES_PER_CH 0x44 +#define XLNX_AUD_ALIGN_BYTES 64 #define AUD_STS_IOC_IRQ_MASK BIT(31) #define AUD_STS_CH_STS_MASK BIT(29) @@ -368,12 +369,32 @@ static int xlnx_formatter_pcm_open(struct snd_soc_component *component, snd_soc_set_runtime_hwparams(substream, &xlnx_pcm_hardware); runtime->private_data = stream_data; - /* Resize the period size divisible by 64 */ + /* Resize the period bytes as divisible by 64 */ err = snd_pcm_hw_constraint_step(runtime, 0, - SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 64); + SNDRV_PCM_HW_PARAM_PERIOD_BYTES, + XLNX_AUD_ALIGN_BYTES); if (err) { dev_err(component->dev, - "unable to set constraint on period bytes\n"); + "Unable to set constraint on period bytes\n"); + return err; + } + + /* Resize the buffer bytes as divisible by 64 */ + err = snd_pcm_hw_constraint_step(runtime, 0, + SNDRV_PCM_HW_PARAM_BUFFER_BYTES, + XLNX_AUD_ALIGN_BYTES); + if (err) { + dev_err(component->dev, + "Unable to set constraint on buffer bytes\n"); + return err; + } + + /* Set periods as integer multiple */ + err = snd_pcm_hw_constraint_integer(runtime, + SNDRV_PCM_HW_PARAM_PERIODS); + if (err < 0) { + dev_err(component->dev, + "Unable to set constraint on periods to be integer\n"); return err; } -- cgit From 583998c5e8cb3e7a151dca22303b68cbe65c64b5 Mon Sep 17 00:00:00 2001 From: Clint Taylor Date: Mon, 10 Jan 2022 15:45:20 -0800 Subject: drm/i915/snps: vswing value refined for SNPS phys MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Updated new values from BSPEC. Bspec: 53920 Cc: Jani Nikula Cc: José Roberto de Souza Cc: Imre Deak Signed-off-by: Clint Taylor Signed-off-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20220110234520.6836-1-clinton.a.taylor@intel.com --- drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c | 42 +++++++++++----------- 1 file changed, 21 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c index 1e689d573512..09d6ab13536c 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c +++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c @@ -985,15 +985,15 @@ static const struct intel_ddi_buf_trans adlp_dkl_phy_trans_dp_hbr2_hbr3 = { }; static const union intel_ddi_buf_trans_entry _dg2_snps_trans[] = { - { .snps = { 26, 0, 0 } }, /* VS 0, pre-emph 0 */ - { .snps = { 33, 0, 6 } }, /* VS 0, pre-emph 1 */ - { .snps = { 38, 0, 12 } }, /* VS 0, pre-emph 2 */ - { .snps = { 43, 0, 19 } }, /* VS 0, pre-emph 3 */ - { .snps = { 39, 0, 0 } }, /* VS 1, pre-emph 0 */ - { .snps = { 44, 0, 8 } }, /* VS 1, pre-emph 1 */ - { .snps = { 47, 0, 15 } }, /* VS 1, pre-emph 2 */ - { .snps = { 52, 0, 0 } }, /* VS 2, pre-emph 0 */ - { .snps = { 51, 0, 10 } }, /* VS 2, pre-emph 1 */ + { .snps = { 25, 0, 0 } }, /* VS 0, pre-emph 0 */ + { .snps = { 32, 0, 6 } }, /* VS 0, pre-emph 1 */ + { .snps = { 35, 0, 10 } }, /* VS 0, pre-emph 2 */ + { .snps = { 43, 0, 17 } }, /* VS 0, pre-emph 3 */ + { .snps = { 35, 0, 0 } }, /* VS 1, pre-emph 0 */ + { .snps = { 45, 0, 8 } }, /* VS 1, pre-emph 1 */ + { .snps = { 48, 0, 14 } }, /* VS 1, pre-emph 2 */ + { .snps = { 47, 0, 0 } }, /* VS 2, pre-emph 0 */ + { .snps = { 55, 0, 7 } }, /* VS 2, pre-emph 1 */ { .snps = { 62, 0, 0 } }, /* VS 3, pre-emph 0 */ }; @@ -1005,21 +1005,21 @@ static const struct intel_ddi_buf_trans dg2_snps_trans = { static const union intel_ddi_buf_trans_entry _dg2_snps_trans_uhbr[] = { { .snps = { 62, 0, 0 } }, /* preset 0 */ - { .snps = { 56, 0, 6 } }, /* preset 1 */ - { .snps = { 51, 0, 11 } }, /* preset 2 */ - { .snps = { 48, 0, 14 } }, /* preset 3 */ - { .snps = { 43, 0, 19 } }, /* preset 4 */ + { .snps = { 55, 0, 7 } }, /* preset 1 */ + { .snps = { 50, 0, 12 } }, /* preset 2 */ + { .snps = { 44, 0, 18 } }, /* preset 3 */ + { .snps = { 35, 0, 21 } }, /* preset 4 */ { .snps = { 59, 3, 0 } }, /* preset 5 */ { .snps = { 53, 3, 6 } }, /* preset 6 */ - { .snps = { 49, 3, 10 } }, /* preset 7 */ - { .snps = { 45, 3, 14 } }, /* preset 8 */ - { .snps = { 42, 3, 17 } }, /* preset 9 */ + { .snps = { 48, 3, 11 } }, /* preset 7 */ + { .snps = { 42, 5, 15 } }, /* preset 8 */ + { .snps = { 37, 5, 20 } }, /* preset 9 */ { .snps = { 56, 6, 0 } }, /* preset 10 */ - { .snps = { 50, 6, 6 } }, /* preset 11 */ - { .snps = { 47, 6, 9 } }, /* preset 12 */ - { .snps = { 42, 6, 14 } }, /* preset 13 */ - { .snps = { 46, 8, 8 } }, /* preset 14 */ - { .snps = { 56, 3, 3 } }, /* preset 15 */ + { .snps = { 48, 7, 7 } }, /* preset 11 */ + { .snps = { 45, 7, 10 } }, /* preset 12 */ + { .snps = { 39, 8, 15 } }, /* preset 13 */ + { .snps = { 48, 14, 0 } }, /* preset 14 */ + { .snps = { 45, 4, 4 } }, /* preset 15 */ }; static const struct intel_ddi_buf_trans dg2_snps_trans_uhbr = { -- cgit From a64067f4cecaaa4deed8e33d3266bc0bcc189142 Mon Sep 17 00:00:00 2001 From: Robert Hancock Date: Fri, 7 Jan 2022 15:47:10 -0600 Subject: ASoC: simple-card: fix probe failure on platform component A previous change to simple-card resulted in asoc_simple_parse_dai attempting to retrieve the dai_name for platform components, which are unlikely to have a valid DAI name. This caused simple-card to fail to probe when using the xlnx_formatter_pcm as the platform component, since it does not register any DAI components. Since the dai_name is not used for platform components, just skip trying to retrieve it for those. Fixes: f107294c6422 ("ASoC: simple-card: support snd_soc_dai_link_component style for cpu") Signed-off-by: Robert Hancock Link: https://lore.kernel.org/r/20220107214711.1100162-6-robert.hancock@calian.com Signed-off-by: Mark Brown --- sound/soc/generic/simple-card.c | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c index a89d1cfdda32..78419e18717d 100644 --- a/sound/soc/generic/simple-card.c +++ b/sound/soc/generic/simple-card.c @@ -28,6 +28,30 @@ static const struct snd_soc_ops simple_ops = { .hw_params = asoc_simple_hw_params, }; +static int asoc_simple_parse_platform(struct device_node *node, + struct snd_soc_dai_link_component *dlc) +{ + struct of_phandle_args args; + int ret; + + if (!node) + return 0; + + /* + * Get node via "sound-dai = <&phandle port>" + * it will be used as xxx_of_node on soc_bind_dai_link() + */ + ret = of_parse_phandle_with_args(node, DAI, CELL, 0, &args); + if (ret) + return ret; + + /* dai_name is not required and may not exist for plat component */ + + dlc->of_node = args.np; + + return 0; +} + static int asoc_simple_parse_dai(struct device_node *node, struct snd_soc_dai_link_component *dlc, int *is_single_link) @@ -289,7 +313,7 @@ static int simple_dai_link_of(struct asoc_simple_priv *priv, if (ret < 0) goto dai_link_of_err; - ret = asoc_simple_parse_dai(plat, platforms, NULL); + ret = asoc_simple_parse_platform(plat, platforms); if (ret < 0) goto dai_link_of_err; -- cgit From 879cf8006475642b747aaaa4d06f7044ab2de794 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 11 Jan 2022 10:26:58 +0300 Subject: regulator: max20086: fix error code in max20086_parse_regulators_dt() This code accidentally returns PTR_ERR(NULL) which is success. It should return a negative error code. Fixes: bfff546aae50 ("regulator: Add MAX20086-MAX20089 driver") Signed-off-by: Dan Carpenter Reviewed-by: Laurent Pinchart Link: https://lore.kernel.org/r/20220111072657.GK11243@kili Signed-off-by: Mark Brown --- drivers/regulator/max20086-regulator.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/regulator/max20086-regulator.c b/drivers/regulator/max20086-regulator.c index fbc56b043071..63aa6ec3254a 100644 --- a/drivers/regulator/max20086-regulator.c +++ b/drivers/regulator/max20086-regulator.c @@ -140,7 +140,7 @@ static int max20086_parse_regulators_dt(struct max20086 *chip, bool *boot_on) node = of_get_child_by_name(chip->dev->of_node, "regulators"); if (!node) { dev_err(chip->dev, "regulators node not found\n"); - return PTR_ERR(node); + return -ENODEV; } for (i = 0; i < chip->info->num_outputs; ++i) -- cgit From e9f9bcd598e2b6f3cfa617f8e38f83a59738d19c Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Mon, 10 Jan 2022 21:15:50 -0800 Subject: drm/i915: Use parameterized GPR register definitions everywhere Since we have an engine-parameterized macro GEN8_RING_CS_GPR, let's use that in place of the HSW_CS_GPR and BCS_GPR register definitions. Cc: Jani Nikula Signed-off-by: Matt Roper Reviewed-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20220111051600.3429104-2-matthew.d.roper@intel.com --- drivers/gpu/drm/i915/i915_cmd_parser.c | 68 ++++++++++++++++++---------------- drivers/gpu/drm/i915/i915_reg.h | 8 ---- 2 files changed, 36 insertions(+), 40 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index 9c90740520a9..a804373bcd17 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -592,6 +592,10 @@ struct drm_i915_reg_descriptor { { .addr = _reg(idx) }, \ { .addr = _reg ## _UDW(idx) } +#define REG64_BASE_IDX(_reg, base, idx) \ + { .addr = _reg(base, idx) }, \ + { .addr = _reg ## _UDW(base, idx) } + static const struct drm_i915_reg_descriptor gen7_render_regs[] = { REG64(GPGPU_THREADS_DISPATCHED), REG64(HS_INVOCATION_COUNT), @@ -637,22 +641,22 @@ static const struct drm_i915_reg_descriptor gen7_render_regs[] = { }; static const struct drm_i915_reg_descriptor hsw_render_regs[] = { - REG64_IDX(HSW_CS_GPR, 0), - REG64_IDX(HSW_CS_GPR, 1), - REG64_IDX(HSW_CS_GPR, 2), - REG64_IDX(HSW_CS_GPR, 3), - REG64_IDX(HSW_CS_GPR, 4), - REG64_IDX(HSW_CS_GPR, 5), - REG64_IDX(HSW_CS_GPR, 6), - REG64_IDX(HSW_CS_GPR, 7), - REG64_IDX(HSW_CS_GPR, 8), - REG64_IDX(HSW_CS_GPR, 9), - REG64_IDX(HSW_CS_GPR, 10), - REG64_IDX(HSW_CS_GPR, 11), - REG64_IDX(HSW_CS_GPR, 12), - REG64_IDX(HSW_CS_GPR, 13), - REG64_IDX(HSW_CS_GPR, 14), - REG64_IDX(HSW_CS_GPR, 15), + REG64_BASE_IDX(GEN8_RING_CS_GPR, RENDER_RING_BASE, 0), + REG64_BASE_IDX(GEN8_RING_CS_GPR, RENDER_RING_BASE, 1), + REG64_BASE_IDX(GEN8_RING_CS_GPR, RENDER_RING_BASE, 2), + REG64_BASE_IDX(GEN8_RING_CS_GPR, RENDER_RING_BASE, 3), + REG64_BASE_IDX(GEN8_RING_CS_GPR, RENDER_RING_BASE, 4), + REG64_BASE_IDX(GEN8_RING_CS_GPR, RENDER_RING_BASE, 5), + REG64_BASE_IDX(GEN8_RING_CS_GPR, RENDER_RING_BASE, 6), + REG64_BASE_IDX(GEN8_RING_CS_GPR, RENDER_RING_BASE, 7), + REG64_BASE_IDX(GEN8_RING_CS_GPR, RENDER_RING_BASE, 8), + REG64_BASE_IDX(GEN8_RING_CS_GPR, RENDER_RING_BASE, 9), + REG64_BASE_IDX(GEN8_RING_CS_GPR, RENDER_RING_BASE, 10), + REG64_BASE_IDX(GEN8_RING_CS_GPR, RENDER_RING_BASE, 11), + REG64_BASE_IDX(GEN8_RING_CS_GPR, RENDER_RING_BASE, 12), + REG64_BASE_IDX(GEN8_RING_CS_GPR, RENDER_RING_BASE, 13), + REG64_BASE_IDX(GEN8_RING_CS_GPR, RENDER_RING_BASE, 14), + REG64_BASE_IDX(GEN8_RING_CS_GPR, RENDER_RING_BASE, 15), REG32(HSW_SCRATCH1, .mask = ~HSW_SCRATCH1_L3_DATA_ATOMICS_DISABLE, .value = 0), @@ -675,22 +679,22 @@ static const struct drm_i915_reg_descriptor gen9_blt_regs[] = { REG32(BCS_SWCTRL), REG64_IDX(RING_TIMESTAMP, BLT_RING_BASE), REG32_IDX(RING_CTX_TIMESTAMP, BLT_RING_BASE), - REG64_IDX(BCS_GPR, 0), - REG64_IDX(BCS_GPR, 1), - REG64_IDX(BCS_GPR, 2), - REG64_IDX(BCS_GPR, 3), - REG64_IDX(BCS_GPR, 4), - REG64_IDX(BCS_GPR, 5), - REG64_IDX(BCS_GPR, 6), - REG64_IDX(BCS_GPR, 7), - REG64_IDX(BCS_GPR, 8), - REG64_IDX(BCS_GPR, 9), - REG64_IDX(BCS_GPR, 10), - REG64_IDX(BCS_GPR, 11), - REG64_IDX(BCS_GPR, 12), - REG64_IDX(BCS_GPR, 13), - REG64_IDX(BCS_GPR, 14), - REG64_IDX(BCS_GPR, 15), + REG64_BASE_IDX(GEN8_RING_CS_GPR, BLT_RING_BASE, 0), + REG64_BASE_IDX(GEN8_RING_CS_GPR, BLT_RING_BASE, 1), + REG64_BASE_IDX(GEN8_RING_CS_GPR, BLT_RING_BASE, 2), + REG64_BASE_IDX(GEN8_RING_CS_GPR, BLT_RING_BASE, 3), + REG64_BASE_IDX(GEN8_RING_CS_GPR, BLT_RING_BASE, 4), + REG64_BASE_IDX(GEN8_RING_CS_GPR, BLT_RING_BASE, 5), + REG64_BASE_IDX(GEN8_RING_CS_GPR, BLT_RING_BASE, 6), + REG64_BASE_IDX(GEN8_RING_CS_GPR, BLT_RING_BASE, 7), + REG64_BASE_IDX(GEN8_RING_CS_GPR, BLT_RING_BASE, 8), + REG64_BASE_IDX(GEN8_RING_CS_GPR, BLT_RING_BASE, 9), + REG64_BASE_IDX(GEN8_RING_CS_GPR, BLT_RING_BASE, 10), + REG64_BASE_IDX(GEN8_RING_CS_GPR, BLT_RING_BASE, 11), + REG64_BASE_IDX(GEN8_RING_CS_GPR, BLT_RING_BASE, 12), + REG64_BASE_IDX(GEN8_RING_CS_GPR, BLT_RING_BASE, 13), + REG64_BASE_IDX(GEN8_RING_CS_GPR, BLT_RING_BASE, 14), + REG64_BASE_IDX(GEN8_RING_CS_GPR, BLT_RING_BASE, 15), }; #undef REG64 diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index e17d982f67f3..ff9b9a1db8ac 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -509,10 +509,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define BCS_SRC_Y REG_BIT(0) #define BCS_DST_Y REG_BIT(1) -/* There are 16 GPR registers */ -#define BCS_GPR(n) _MMIO(0x22600 + (n) * 8) -#define BCS_GPR_UDW(n) _MMIO(0x22600 + (n) * 8 + 4) - #define GPGPU_THREADS_DISPATCHED _MMIO(0x2290) #define GPGPU_THREADS_DISPATCHED_UDW _MMIO(0x2290 + 4) #define HS_INVOCATION_COUNT _MMIO(0x2300) @@ -556,10 +552,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN7_GPGPU_DISPATCHDIMY _MMIO(0x2504) #define GEN7_GPGPU_DISPATCHDIMZ _MMIO(0x2508) -/* There are the 16 64-bit CS General Purpose Registers */ -#define HSW_CS_GPR(n) _MMIO(0x2600 + (n) * 8) -#define HSW_CS_GPR_UDW(n) _MMIO(0x2600 + (n) * 8 + 4) - #define GEN7_OACONTROL _MMIO(0x2360) #define GEN7_OACONTROL_CTX_MASK 0xFFFFF000 #define GEN7_OACONTROL_TIMER_PERIOD_MASK 0x3F -- cgit From cd5d2fdb045fb31a152cbb257e10da78fa4f06ac Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Mon, 10 Jan 2022 21:15:51 -0800 Subject: drm/i915: Parameterize PWRCTX_MAXCNT Rather than having separate definitions for each engine, create a single parameterized macro that takes the engine base offset. This will also ensure we get to the proper offset if we ever need to use these registers on newer platforms (where the media engine offsets have changed). Cc: Jani Nikula Signed-off-by: Matt Roper Reviewed-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20220111051600.3429104-3-matthew.d.roper@intel.com --- drivers/gpu/drm/i915/gt/intel_rc6.c | 8 ++++---- drivers/gpu/drm/i915/i915_reg.h | 6 +----- 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c index 43093dd2d0c9..68ad99ac83e5 100644 --- a/drivers/gpu/drm/i915/gt/intel_rc6.c +++ b/drivers/gpu/drm/i915/gt/intel_rc6.c @@ -442,10 +442,10 @@ static bool bxt_check_bios_rc6_setup(struct intel_rc6 *rc6) enable_rc6 = false; } - if (!((intel_uncore_read(uncore, PWRCTX_MAXCNT_RCSUNIT) & IDLE_TIME_MASK) > 1 && - (intel_uncore_read(uncore, PWRCTX_MAXCNT_VCSUNIT0) & IDLE_TIME_MASK) > 1 && - (intel_uncore_read(uncore, PWRCTX_MAXCNT_BCSUNIT) & IDLE_TIME_MASK) > 1 && - (intel_uncore_read(uncore, PWRCTX_MAXCNT_VECSUNIT) & IDLE_TIME_MASK) > 1)) { + if (!((intel_uncore_read(uncore, PWRCTX_MAXCNT(RENDER_RING_BASE)) & IDLE_TIME_MASK) > 1 && + (intel_uncore_read(uncore, PWRCTX_MAXCNT(GEN6_BSD_RING_BASE)) & IDLE_TIME_MASK) > 1 && + (intel_uncore_read(uncore, PWRCTX_MAXCNT(BLT_RING_BASE)) & IDLE_TIME_MASK) > 1 && + (intel_uncore_read(uncore, PWRCTX_MAXCNT(VEBOX_RING_BASE)) & IDLE_TIME_MASK) > 1)) { drm_dbg(&i915->drm, "Engine Idle wait time not set properly.\n"); enable_rc6 = false; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index ff9b9a1db8ac..bb3f843052a9 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -8954,11 +8954,7 @@ enum { #define RC6_CTX_IN_DRAM (1 << 0) #define RC6_CTX_BASE _MMIO(0xD48) #define RC6_CTX_BASE_MASK 0xFFFFFFF0 -#define PWRCTX_MAXCNT_RCSUNIT _MMIO(0x2054) -#define PWRCTX_MAXCNT_VCSUNIT0 _MMIO(0x12054) -#define PWRCTX_MAXCNT_BCSUNIT _MMIO(0x22054) -#define PWRCTX_MAXCNT_VECSUNIT _MMIO(0x1A054) -#define PWRCTX_MAXCNT_VCSUNIT1 _MMIO(0x1C054) +#define PWRCTX_MAXCNT(base) _MMIO((base) + 0x54) #define IDLE_TIME_MASK 0xFFFFF #define FORCEWAKE _MMIO(0xA18C) #define FORCEWAKE_VLV _MMIO(0x1300b0) -- cgit From e0d47fcff1f4df458b9c8824a5204adcbf624ae7 Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Mon, 10 Jan 2022 21:15:52 -0800 Subject: drm/i915: Parameterize ECOSKPD Combine the separate render and blitter register definitions into a single definition. We already know we have some workarounds on an upcoming platform that will need to update the ECOSKPD register for other engines too, so this helps pave the way for that. Cc: Jani Nikula Signed-off-by: Matt Roper Reviewed-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20220111051600.3429104-4-matthew.d.roper@intel.com --- drivers/gpu/drm/i915/gt/intel_workarounds.c | 2 +- drivers/gpu/drm/i915/gvt/handlers.c | 4 ++-- drivers/gpu/drm/i915/i915_reg.h | 14 ++++++-------- drivers/gpu/drm/i915/intel_pm.c | 6 ++++-- 4 files changed, 13 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index e1f362530889..7d87282024f5 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -2126,7 +2126,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) * they are already accustomed to from before contexts were * enabled. */ - wa_add(wal, ECOSKPD, + wa_add(wal, ECOSKPD(RENDER_RING_BASE), 0, _MASKED_BIT_ENABLE(ECO_CONSTANT_BUFFER_SR_DISABLE), 0 /* XXX bit doesn't stick on Broadwater */, true); diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 3938df0db188..329d30a36f4f 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -2877,9 +2877,9 @@ static int init_generic_mmio_info(struct intel_gvt *gvt) MMIO_D(_MMIO(0x3c), D_ALL); MMIO_D(_MMIO(0x860), D_ALL); - MMIO_D(ECOSKPD, D_ALL); + MMIO_D(ECOSKPD(RENDER_RING_BASE), D_ALL); MMIO_D(_MMIO(0x121d0), D_ALL); - MMIO_D(GEN6_BLITTER_ECOSKPD, D_ALL); + MMIO_D(ECOSKPD(BLT_RING_BASE), D_ALL); MMIO_D(_MMIO(0x41d0), D_ALL); MMIO_D(GAC_ECO_BITS, D_ALL); MMIO_D(_MMIO(0x6200), D_ALL); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index bb3f843052a9..8bdda89bca36 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2826,10 +2826,12 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GFX_FLSH_CNTL _MMIO(0x2170) /* 915+ only */ #define GFX_FLSH_CNTL_GEN6 _MMIO(0x101008) #define GFX_FLSH_CNTL_EN (1 << 0) -#define ECOSKPD _MMIO(0x21d0) -#define ECO_CONSTANT_BUFFER_SR_DISABLE REG_BIT(4) -#define ECO_GATING_CX_ONLY (1 << 3) -#define ECO_FLIP_DONE (1 << 0) +#define ECOSKPD(base) _MMIO((base) + 0x1d0) +#define ECO_CONSTANT_BUFFER_SR_DISABLE REG_BIT(4) +#define ECO_GATING_CX_ONLY REG_BIT(3) +#define GEN6_BLITTER_FBC_NOTIFY REG_BIT(3) +#define ECO_FLIP_DONE REG_BIT(0) +#define GEN6_BLITTER_LOCK_SHIFT 16 #define CACHE_MODE_0_GEN7 _MMIO(0x7000) /* IVB+ */ #define RC_OP_FLUSH_ENABLE (1 << 0) @@ -2839,10 +2841,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN8_4x4_STC_OPTIMIZATION_DISABLE (1 << 6) #define GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE (1 << 1) -#define GEN6_BLITTER_ECOSKPD _MMIO(0x221d0) -#define GEN6_BLITTER_LOCK_SHIFT 16 -#define GEN6_BLITTER_FBC_NOTIFY (1 << 3) - #define GEN6_RC_SLEEP_PSMI_CONTROL _MMIO(0x2050) #define GEN6_PSMI_SLEEP_MSG_DISABLE (1 << 0) #define GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE REG_BIT(7) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 161d064e0768..a0aefebe611f 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -7845,10 +7845,12 @@ static void gen3_init_clock_gating(struct drm_i915_private *dev_priv) intel_uncore_write(&dev_priv->uncore, D_STATE, dstate); if (IS_PINEVIEW(dev_priv)) - intel_uncore_write(&dev_priv->uncore, ECOSKPD, _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY)); + intel_uncore_write(&dev_priv->uncore, ECOSKPD(RENDER_RING_BASE), + _MASKED_BIT_ENABLE(ECO_GATING_CX_ONLY)); /* IIR "flip pending" means done if this bit is set */ - intel_uncore_write(&dev_priv->uncore, ECOSKPD, _MASKED_BIT_DISABLE(ECO_FLIP_DONE)); + intel_uncore_write(&dev_priv->uncore, ECOSKPD(RENDER_RING_BASE), + _MASKED_BIT_DISABLE(ECO_FLIP_DONE)); /* interrupts should cause a wake up from C3 */ intel_uncore_write(&dev_priv->uncore, INSTPM, _MASKED_BIT_ENABLE(INSTPM_AGPBUSY_INT_EN)); -- cgit From 3e5cbecb9aa88f00016b61200d4126f727fc71e6 Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Mon, 10 Jan 2022 21:15:53 -0800 Subject: drm/i915: Use RING_PSMI_CTL rather than per-engine macros We have a parameterized macro for RING_PSMI_CTL; let's use that instead of the per-engine definitions where possible. Cc: Jani Nikula Signed-off-by: Matt Roper Reviewed-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20220111051600.3429104-5-matthew.d.roper@intel.com --- drivers/gpu/drm/i915/gt/intel_ring_submission.c | 10 +++++----- drivers/gpu/drm/i915/gt/intel_workarounds.c | 2 +- drivers/gpu/drm/i915/i915_reg.h | 19 +++++++------------ drivers/gpu/drm/i915/intel_pm.c | 4 ++-- 4 files changed, 15 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c index 586dca1731ce..5408bc18a58e 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c @@ -1002,15 +1002,15 @@ static void gen6_bsd_submit_request(struct i915_request *request) /* Disable notification that the ring is IDLE. The GT * will then assume that it is busy and bring it out of rc6. */ - intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL, - _MASKED_BIT_ENABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); + intel_uncore_write_fw(uncore, RING_PSMI_CTL(GEN6_BSD_RING_BASE), + _MASKED_BIT_ENABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); /* Clear the context id. Here be magic! */ intel_uncore_write64_fw(uncore, GEN6_BSD_RNCID, 0x0); /* Wait for the ring not to be idle, i.e. for it to wake up. */ if (__intel_wait_for_register_fw(uncore, - GEN6_BSD_SLEEP_PSMI_CONTROL, + RING_PSMI_CTL(GEN6_BSD_RING_BASE), GEN6_BSD_SLEEP_INDICATOR, 0, 1000, 0, NULL)) @@ -1023,8 +1023,8 @@ static void gen6_bsd_submit_request(struct i915_request *request) /* Let the ring send IDLE messages to the GT again, * and so let it sleep to conserve power when idle. */ - intel_uncore_write_fw(uncore, GEN6_BSD_SLEEP_PSMI_CONTROL, - _MASKED_BIT_DISABLE(GEN6_BSD_SLEEP_MSG_DISABLE)); + intel_uncore_write_fw(uncore, RING_PSMI_CTL(GEN6_BSD_RING_BASE), + _MASKED_BIT_DISABLE(GEN6_PSMI_SLEEP_MSG_DISABLE)); intel_uncore_forcewake_put(uncore, FORCEWAKE_ALL); } diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 7d87282024f5..23cd4fd568c5 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -1789,7 +1789,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) * For DG1 this only applies to A0. */ wa_masked_en(wal, - GEN6_RC_SLEEP_PSMI_CONTROL, + RING_PSMI_CTL(RENDER_RING_BASE), GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE | GEN8_RC_SEMA_IDLE_MSG_DISABLE); } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 8bdda89bca36..93e0c9bf2880 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2296,6 +2296,13 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN6_VEVSYNC (RING_SYNC_2(VEBOX_RING_BASE)) #define GEN6_NOSYNC INVALID_MMIO_REG #define RING_PSMI_CTL(base) _MMIO((base) + 0x50) +#define GEN8_RC_SEMA_IDLE_MSG_DISABLE REG_BIT(12) +#define GEN8_FF_DOP_CLOCK_GATE_DISABLE REG_BIT(10) +#define GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE REG_BIT(7) +#define GEN6_BSD_GO_INDICATOR REG_BIT(4) +#define GEN6_BSD_SLEEP_INDICATOR REG_BIT(3) +#define GEN6_BSD_SLEEP_FLUSH_DISABLE REG_BIT(2) +#define GEN6_PSMI_SLEEP_MSG_DISABLE REG_BIT(0) #define RING_MAX_IDLE(base) _MMIO((base) + 0x54) #define RING_HWS_PGA(base) _MMIO((base) + 0x80) #define RING_ID(base) _MMIO((base) + 0x8c) @@ -2841,12 +2848,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define GEN8_4x4_STC_OPTIMIZATION_DISABLE (1 << 6) #define GEN9_PARTIAL_RESOLVE_IN_VC_DISABLE (1 << 1) -#define GEN6_RC_SLEEP_PSMI_CONTROL _MMIO(0x2050) -#define GEN6_PSMI_SLEEP_MSG_DISABLE (1 << 0) -#define GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE REG_BIT(7) -#define GEN8_RC_SEMA_IDLE_MSG_DISABLE (1 << 12) -#define GEN8_FF_DOP_CLOCK_GATE_DISABLE (1 << 10) - #define GEN6_RCS_PWR_FSM _MMIO(0x22ac) #define GEN9_RCS_FE_FSM2 _MMIO(0x22a4) @@ -2931,12 +2932,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define XEHP_EU_ENABLE _MMIO(0x9134) #define XEHP_EU_ENA_MASK 0xFF -#define GEN6_BSD_SLEEP_PSMI_CONTROL _MMIO(0x12050) -#define GEN6_BSD_SLEEP_MSG_DISABLE (1 << 0) -#define GEN6_BSD_SLEEP_FLUSH_DISABLE (1 << 2) -#define GEN6_BSD_SLEEP_INDICATOR (1 << 3) -#define GEN6_BSD_GO_INDICATOR (1 << 4) - /* On modern GEN architectures interrupt control consists of two sets * of registers. The first set pertains to the ring generating the * interrupt. The second control is for the functional block generating the diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index a0aefebe611f..fd622f959a2a 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -7631,7 +7631,7 @@ static void bdw_init_clock_gating(struct drm_i915_private *dev_priv) intel_uncore_read(&dev_priv->uncore, GEN7_FF_THREAD_MODE) & ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME)); - intel_uncore_write(&dev_priv->uncore, GEN6_RC_SLEEP_PSMI_CONTROL, + intel_uncore_write(&dev_priv->uncore, RING_PSMI_CTL(RENDER_RING_BASE), _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE)); /* WaDisableSDEUnitClockGating:bdw */ @@ -7772,7 +7772,7 @@ static void chv_init_clock_gating(struct drm_i915_private *dev_priv) ~(GEN8_FF_DS_REF_CNT_FFME | GEN7_FF_VS_REF_CNT_FFME)); /* WaDisableSemaphoreAndSyncFlipWait:chv */ - intel_uncore_write(&dev_priv->uncore, GEN6_RC_SLEEP_PSMI_CONTROL, + intel_uncore_write(&dev_priv->uncore, RING_PSMI_CTL(RENDER_RING_BASE), _MASKED_BIT_ENABLE(GEN8_RC_SEMA_IDLE_MSG_DISABLE)); /* WaDisableCSUnitClockGating:chv */ -- cgit From ab076d8d79e1e5eb3960e0a489f7a11d729c03bd Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Mon, 10 Jan 2022 21:15:54 -0800 Subject: drm/i915: Replace GFX_MODE_GEN7 with RING_MODE_GEN7 It's preferable to use parameterized register macros where possible. Cc: Jani Nikula Signed-off-by: Matt Roper Reviewed-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20220111051600.3429104-6-matthew.d.roper@intel.com --- drivers/gpu/drm/i915/gt/intel_workarounds.c | 2 +- drivers/gpu/drm/i915/gvt/mmio_context.c | 4 ++-- drivers/gpu/drm/i915/i915_reg.h | 1 - 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index 23cd4fd568c5..f5ccc21761c3 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -2013,7 +2013,7 @@ rcs_engine_wa_init(struct intel_engine_cs *engine, struct i915_wa_list *wal) if (GRAPHICS_VER(i915) == 7) { /* WaBCSVCSTlbInvalidationMode:ivb,vlv,hsw */ wa_masked_en(wal, - GFX_MODE_GEN7, + RING_MODE_GEN7(RENDER_RING_BASE), GFX_TLB_INVALIDATE_EXPLICIT | GFX_REPLAY_MODE); /* WaDisable_RenderCache_OperationalFlush:ivb,vlv,hsw */ diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index f776c470914d..abc81cdc9e5d 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c @@ -44,7 +44,7 @@ /* Raw offset is appened to each line for convenience. */ static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = { - {RCS0, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */ + {RCS0, RING_MODE_GEN7(RENDER_RING_BASE), 0xffff, false}, /* 0x229c */ {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */ {RCS0, HWSTAM, 0x0, false}, /* 0x2098 */ {RCS0, INSTPM, 0xffff, true}, /* 0x20c0 */ @@ -76,7 +76,7 @@ static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = { }; static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = { - {RCS0, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */ + {RCS0, RING_MODE_GEN7(RENDER_RING_BASE), 0xffff, false}, /* 0x229c */ {RCS0, GEN9_CTX_PREEMPT_REG, 0x0, false}, /* 0x2248 */ {RCS0, HWSTAM, 0x0, false}, /* 0x2098 */ {RCS0, INSTPM, 0xffff, true}, /* 0x20c0 */ diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 93e0c9bf2880..d0483b9da632 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2637,7 +2637,6 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) GEN9_STATE_ACK_TDL1 | GEN9_STATE_ACK_TDL0) #define GFX_MODE _MMIO(0x2520) -#define GFX_MODE_GEN7 _MMIO(0x229c) #define RING_MODE_GEN7(base) _MMIO((base) + 0x29c) #define GFX_RUN_LIST_ENABLE (1 << 15) #define GFX_INTERRUPT_STEERING (1 << 14) -- cgit From 2b25a93bf07c6b68dd9e2ee427e228cb961f7961 Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Mon, 10 Jan 2022 21:15:55 -0800 Subject: drm/i915: Introduce i915_reg_defs.h We'd like to start splitting i915_reg.h into various domain-specific register files and cleaning them up. Let's move the basic macros and type definitions to their own header file that can be including in each of the new split headers. Cc: Jani Nikula Signed-off-by: Matt Roper Reviewed-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20220111051600.3429104-7-matthew.d.roper@intel.com --- drivers/gpu/drm/i915/i915_reg.h | 88 +------------------------------- drivers/gpu/drm/i915/i915_reg_defs.h | 97 ++++++++++++++++++++++++++++++++++++ 2 files changed, 98 insertions(+), 87 deletions(-) create mode 100644 drivers/gpu/drm/i915/i915_reg_defs.h diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index d0483b9da632..ca815da75380 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -25,8 +25,7 @@ #ifndef _I915_REG_H_ #define _I915_REG_H_ -#include -#include +#include "i915_reg_defs.h" /** * DOC: The i915 register macro definition style guide @@ -116,91 +115,6 @@ * #define GEN8_BAR _MMIO(0xb888) */ -/** - * REG_BIT() - Prepare a u32 bit value - * @__n: 0-based bit number - * - * Local wrapper for BIT() to force u32, with compile time checks. - * - * @return: Value with bit @__n set. - */ -#define REG_BIT(__n) \ - ((u32)(BIT(__n) + \ - BUILD_BUG_ON_ZERO(__is_constexpr(__n) && \ - ((__n) < 0 || (__n) > 31)))) - -/** - * REG_GENMASK() - Prepare a continuous u32 bitmask - * @__high: 0-based high bit - * @__low: 0-based low bit - * - * Local wrapper for GENMASK() to force u32, with compile time checks. - * - * @return: Continuous bitmask from @__high to @__low, inclusive. - */ -#define REG_GENMASK(__high, __low) \ - ((u32)(GENMASK(__high, __low) + \ - BUILD_BUG_ON_ZERO(__is_constexpr(__high) && \ - __is_constexpr(__low) && \ - ((__low) < 0 || (__high) > 31 || (__low) > (__high))))) - -/* - * Local integer constant expression version of is_power_of_2(). - */ -#define IS_POWER_OF_2(__x) ((__x) && (((__x) & ((__x) - 1)) == 0)) - -/** - * REG_FIELD_PREP() - Prepare a u32 bitfield value - * @__mask: shifted mask defining the field's length and position - * @__val: value to put in the field - * - * Local copy of FIELD_PREP() to generate an integer constant expression, force - * u32 and for consistency with REG_FIELD_GET(), REG_BIT() and REG_GENMASK(). - * - * @return: @__val masked and shifted into the field defined by @__mask. - */ -#define REG_FIELD_PREP(__mask, __val) \ - ((u32)((((typeof(__mask))(__val) << __bf_shf(__mask)) & (__mask)) + \ - BUILD_BUG_ON_ZERO(!__is_constexpr(__mask)) + \ - BUILD_BUG_ON_ZERO((__mask) == 0 || (__mask) > U32_MAX) + \ - BUILD_BUG_ON_ZERO(!IS_POWER_OF_2((__mask) + (1ULL << __bf_shf(__mask)))) + \ - BUILD_BUG_ON_ZERO(__builtin_choose_expr(__is_constexpr(__val), (~((__mask) >> __bf_shf(__mask)) & (__val)), 0)))) - -/** - * REG_FIELD_GET() - Extract a u32 bitfield value - * @__mask: shifted mask defining the field's length and position - * @__val: value to extract the bitfield value from - * - * Local wrapper for FIELD_GET() to force u32 and for consistency with - * REG_FIELD_PREP(), REG_BIT() and REG_GENMASK(). - * - * @return: Masked and shifted value of the field defined by @__mask in @__val. - */ -#define REG_FIELD_GET(__mask, __val) ((u32)FIELD_GET(__mask, __val)) - -typedef struct { - u32 reg; -} i915_reg_t; - -#define _MMIO(r) ((const i915_reg_t){ .reg = (r) }) - -#define INVALID_MMIO_REG _MMIO(0) - -static __always_inline u32 i915_mmio_reg_offset(i915_reg_t reg) -{ - return reg.reg; -} - -static inline bool i915_mmio_reg_equal(i915_reg_t a, i915_reg_t b) -{ - return i915_mmio_reg_offset(a) == i915_mmio_reg_offset(b); -} - -static inline bool i915_mmio_reg_valid(i915_reg_t reg) -{ - return !i915_mmio_reg_equal(reg, INVALID_MMIO_REG); -} - #define VLV_DISPLAY_BASE 0x180000 #define VLV_MIPI_BASE VLV_DISPLAY_BASE #define BXT_MIPI_BASE 0x60000 diff --git a/drivers/gpu/drm/i915/i915_reg_defs.h b/drivers/gpu/drm/i915/i915_reg_defs.h new file mode 100644 index 000000000000..6ee51d4a233a --- /dev/null +++ b/drivers/gpu/drm/i915/i915_reg_defs.h @@ -0,0 +1,97 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __I915_REG_DEFS__ +#define __I915_REG_DEFS__ + +#include +#include + +/** + * REG_BIT() - Prepare a u32 bit value + * @__n: 0-based bit number + * + * Local wrapper for BIT() to force u32, with compile time checks. + * + * @return: Value with bit @__n set. + */ +#define REG_BIT(__n) \ + ((u32)(BIT(__n) + \ + BUILD_BUG_ON_ZERO(__is_constexpr(__n) && \ + ((__n) < 0 || (__n) > 31)))) + +/** + * REG_GENMASK() - Prepare a continuous u32 bitmask + * @__high: 0-based high bit + * @__low: 0-based low bit + * + * Local wrapper for GENMASK() to force u32, with compile time checks. + * + * @return: Continuous bitmask from @__high to @__low, inclusive. + */ +#define REG_GENMASK(__high, __low) \ + ((u32)(GENMASK(__high, __low) + \ + BUILD_BUG_ON_ZERO(__is_constexpr(__high) && \ + __is_constexpr(__low) && \ + ((__low) < 0 || (__high) > 31 || (__low) > (__high))))) + +/* + * Local integer constant expression version of is_power_of_2(). + */ +#define IS_POWER_OF_2(__x) ((__x) && (((__x) & ((__x) - 1)) == 0)) + +/** + * REG_FIELD_PREP() - Prepare a u32 bitfield value + * @__mask: shifted mask defining the field's length and position + * @__val: value to put in the field + * + * Local copy of FIELD_PREP() to generate an integer constant expression, force + * u32 and for consistency with REG_FIELD_GET(), REG_BIT() and REG_GENMASK(). + * + * @return: @__val masked and shifted into the field defined by @__mask. + */ +#define REG_FIELD_PREP(__mask, __val) \ + ((u32)((((typeof(__mask))(__val) << __bf_shf(__mask)) & (__mask)) + \ + BUILD_BUG_ON_ZERO(!__is_constexpr(__mask)) + \ + BUILD_BUG_ON_ZERO((__mask) == 0 || (__mask) > U32_MAX) + \ + BUILD_BUG_ON_ZERO(!IS_POWER_OF_2((__mask) + (1ULL << __bf_shf(__mask)))) + \ + BUILD_BUG_ON_ZERO(__builtin_choose_expr(__is_constexpr(__val), (~((__mask) >> __bf_shf(__mask)) & (__val)), 0)))) + +/** + * REG_FIELD_GET() - Extract a u32 bitfield value + * @__mask: shifted mask defining the field's length and position + * @__val: value to extract the bitfield value from + * + * Local wrapper for FIELD_GET() to force u32 and for consistency with + * REG_FIELD_PREP(), REG_BIT() and REG_GENMASK(). + * + * @return: Masked and shifted value of the field defined by @__mask in @__val. + */ +#define REG_FIELD_GET(__mask, __val) ((u32)FIELD_GET(__mask, __val)) + +typedef struct { + u32 reg; +} i915_reg_t; + +#define _MMIO(r) ((const i915_reg_t){ .reg = (r) }) + +#define INVALID_MMIO_REG _MMIO(0) + +static __always_inline u32 i915_mmio_reg_offset(i915_reg_t reg) +{ + return reg.reg; +} + +static inline bool i915_mmio_reg_equal(i915_reg_t a, i915_reg_t b) +{ + return i915_mmio_reg_offset(a) == i915_mmio_reg_offset(b); +} + +static inline bool i915_mmio_reg_valid(i915_reg_t reg) +{ + return !i915_mmio_reg_equal(reg, INVALID_MMIO_REG); +} + +#endif /* __I915_REG_DEFS__ */ -- cgit From 202b1f4c1234b34c15e51acc9c43e613f509f587 Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Mon, 10 Jan 2022 21:15:56 -0800 Subject: drm/i915/gt: Move engine registers to their own header Let's continue breaking up and cleaning up the massive i915_reg.h file by moving all registers that are defined in relation to an engine base to their own header. There are probably a bunch of other "engine registers" that we haven't moved yet (especially those that belong to the render engine in the 0x2??? range), but this is a relatively straightforward first step. Cc: Jani Nikula Signed-off-by: Matt Roper Reviewed-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20220111051600.3429104-8-matthew.d.roper@intel.com --- drivers/gpu/drm/i915/gt/gen2_engine_cs.c | 1 + drivers/gpu/drm/i915/gt/gen6_engine_cs.c | 1 + drivers/gpu/drm/i915/gt/gen6_ppgtt.c | 1 + drivers/gpu/drm/i915/gt/intel_engine_cs.c | 1 + drivers/gpu/drm/i915/gt/intel_engine_regs.h | 197 +++++++++++++++++++++ .../gpu/drm/i915/gt/intel_execlists_submission.c | 1 + drivers/gpu/drm/i915/gt/intel_gt.c | 1 + drivers/gpu/drm/i915/gt/intel_lrc.c | 1 + drivers/gpu/drm/i915/gt/intel_lrc_reg.h | 15 -- drivers/gpu/drm/i915/gt/intel_rc6.c | 1 + drivers/gpu/drm/i915/gt/intel_reset.c | 1 + drivers/gpu/drm/i915/gt/intel_ring.c | 1 + drivers/gpu/drm/i915/gt/intel_ring_submission.c | 1 + drivers/gpu/drm/i915/gt/intel_workarounds.c | 1 + drivers/gpu/drm/i915/gt/selftest_engine_pm.c | 1 + drivers/gpu/drm/i915/gt/selftest_gt_pm.c | 1 + drivers/gpu/drm/i915/gt/selftest_rps.c | 1 + drivers/gpu/drm/i915/gt/selftest_timeline.c | 1 + drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c | 1 + drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 3 +- drivers/gpu/drm/i915/gvt/cmd_parser.c | 1 + drivers/gpu/drm/i915/gvt/mmio_context.c | 1 + drivers/gpu/drm/i915/gvt/mmio_context.h | 1 + drivers/gpu/drm/i915/i915_cmd_parser.c | 1 + drivers/gpu/drm/i915/i915_gpu_error.c | 1 + drivers/gpu/drm/i915/i915_perf.c | 1 + drivers/gpu/drm/i915/i915_pmu.c | 1 + drivers/gpu/drm/i915/i915_reg.h | 176 +----------------- drivers/gpu/drm/i915/i915_request.c | 1 + drivers/gpu/drm/i915/intel_pm.c | 1 + drivers/gpu/drm/i915/intel_uncore.c | 2 +- 31 files changed, 228 insertions(+), 191 deletions(-) create mode 100644 drivers/gpu/drm/i915/gt/intel_engine_regs.h diff --git a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c index 61383830505e..e0e8d228b31f 100644 --- a/drivers/gpu/drm/i915/gt/gen2_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/gen2_engine_cs.c @@ -6,6 +6,7 @@ #include "gen2_engine_cs.h" #include "i915_drv.h" #include "intel_engine.h" +#include "intel_engine_regs.h" #include "intel_gpu_commands.h" #include "intel_gt.h" #include "intel_gt_irq.h" diff --git a/drivers/gpu/drm/i915/gt/gen6_engine_cs.c b/drivers/gpu/drm/i915/gt/gen6_engine_cs.c index b388ceeeb1c9..5e65550b4dfb 100644 --- a/drivers/gpu/drm/i915/gt/gen6_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/gen6_engine_cs.c @@ -5,6 +5,7 @@ #include "gen6_engine_cs.h" #include "intel_engine.h" +#include "intel_engine_regs.h" #include "intel_gpu_commands.h" #include "intel_gt.h" #include "intel_gt_irq.h" diff --git a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c index 890191f286e3..bc995f41058d 100644 --- a/drivers/gpu/drm/i915/gt/gen6_ppgtt.c +++ b/drivers/gpu/drm/i915/gt/gen6_ppgtt.c @@ -9,6 +9,7 @@ #include "i915_scatterlist.h" #include "i915_trace.h" #include "i915_vgpu.h" +#include "intel_engine_regs.h" #include "intel_gt.h" /* Write pde (index) from the page directory @pd to the page table @pt */ diff --git a/drivers/gpu/drm/i915/gt/intel_engine_cs.c b/drivers/gpu/drm/i915/gt/intel_engine_cs.c index 0ad1f594f636..d70fc19ec60b 100644 --- a/drivers/gpu/drm/i915/gt/intel_engine_cs.c +++ b/drivers/gpu/drm/i915/gt/intel_engine_cs.c @@ -13,6 +13,7 @@ #include "intel_context.h" #include "intel_engine.h" #include "intel_engine_pm.h" +#include "intel_engine_regs.h" #include "intel_engine_user.h" #include "intel_execlists_submission.h" #include "intel_gt.h" diff --git a/drivers/gpu/drm/i915/gt/intel_engine_regs.h b/drivers/gpu/drm/i915/gt/intel_engine_regs.h new file mode 100644 index 000000000000..60511f310767 --- /dev/null +++ b/drivers/gpu/drm/i915/gt/intel_engine_regs.h @@ -0,0 +1,197 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __INTEL_ENGINE_REGS__ +#define __INTEL_ENGINE_REGS__ + +#include "i915_reg_defs.h" + +#define RING_TAIL(base) _MMIO((base) + 0x30) +#define TAIL_ADDR 0x001FFFF8 +#define RING_HEAD(base) _MMIO((base) + 0x34) +#define HEAD_WRAP_COUNT 0xFFE00000 +#define HEAD_WRAP_ONE 0x00200000 +#define HEAD_ADDR 0x001FFFFC +#define RING_START(base) _MMIO((base) + 0x38) +#define RING_CTL(base) _MMIO((base) + 0x3c) +#define RING_CTL_SIZE(size) ((size) - PAGE_SIZE) /* in bytes -> pages */ +#define RING_NR_PAGES 0x001FF000 +#define RING_REPORT_MASK 0x00000006 +#define RING_REPORT_64K 0x00000002 +#define RING_REPORT_128K 0x00000004 +#define RING_NO_REPORT 0x00000000 +#define RING_VALID_MASK 0x00000001 +#define RING_VALID 0x00000001 +#define RING_INVALID 0x00000000 +#define RING_WAIT_I8XX (1 << 0) /* gen2, PRBx_HEAD */ +#define RING_WAIT (1 << 11) /* gen3+, PRBx_CTL */ +#define RING_WAIT_SEMAPHORE (1 << 10) /* gen6+ */ +#define RING_SYNC_0(base) _MMIO((base) + 0x40) +#define RING_SYNC_1(base) _MMIO((base) + 0x44) +#define RING_SYNC_2(base) _MMIO((base) + 0x48) +#define GEN6_RVSYNC (RING_SYNC_0(RENDER_RING_BASE)) +#define GEN6_RBSYNC (RING_SYNC_1(RENDER_RING_BASE)) +#define GEN6_RVESYNC (RING_SYNC_2(RENDER_RING_BASE)) +#define GEN6_VBSYNC (RING_SYNC_0(GEN6_BSD_RING_BASE)) +#define GEN6_VRSYNC (RING_SYNC_1(GEN6_BSD_RING_BASE)) +#define GEN6_VVESYNC (RING_SYNC_2(GEN6_BSD_RING_BASE)) +#define GEN6_BRSYNC (RING_SYNC_0(BLT_RING_BASE)) +#define GEN6_BVSYNC (RING_SYNC_1(BLT_RING_BASE)) +#define GEN6_BVESYNC (RING_SYNC_2(BLT_RING_BASE)) +#define GEN6_VEBSYNC (RING_SYNC_0(VEBOX_RING_BASE)) +#define GEN6_VERSYNC (RING_SYNC_1(VEBOX_RING_BASE)) +#define GEN6_VEVSYNC (RING_SYNC_2(VEBOX_RING_BASE)) +#define RING_PSMI_CTL(base) _MMIO((base) + 0x50) +#define GEN8_RC_SEMA_IDLE_MSG_DISABLE REG_BIT(12) +#define GEN8_FF_DOP_CLOCK_GATE_DISABLE REG_BIT(10) +#define GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE REG_BIT(7) +#define GEN6_BSD_GO_INDICATOR REG_BIT(4) +#define GEN6_BSD_SLEEP_INDICATOR REG_BIT(3) +#define GEN6_BSD_SLEEP_FLUSH_DISABLE REG_BIT(2) +#define GEN6_PSMI_SLEEP_MSG_DISABLE REG_BIT(0) +#define RING_MAX_IDLE(base) _MMIO((base) + 0x54) +#define PWRCTX_MAXCNT(base) _MMIO((base) + 0x54) +#define IDLE_TIME_MASK 0xFFFFF +#define RING_ACTHD_UDW(base) _MMIO((base) + 0x5c) +#define RING_DMA_FADD_UDW(base) _MMIO((base) + 0x60) /* gen8+ */ +#define RING_IPEIR(base) _MMIO((base) + 0x64) +#define RING_IPEHR(base) _MMIO((base) + 0x68) +#define RING_INSTDONE(base) _MMIO((base) + 0x6c) +#define RING_INSTPS(base) _MMIO((base) + 0x70) +#define RING_DMA_FADD(base) _MMIO((base) + 0x78) +#define RING_ACTHD(base) _MMIO((base) + 0x74) +#define RING_HWS_PGA(base) _MMIO((base) + 0x80) +#define RING_CMD_BUF_CCTL(base) _MMIO((base) + 0x84) +#define IPEIR(base) _MMIO((base) + 0x88) +#define IPEHR(base) _MMIO((base) + 0x8c) +#define RING_ID(base) _MMIO((base) + 0x8c) +#define RING_NOPID(base) _MMIO((base) + 0x94) +#define RING_HWSTAM(base) _MMIO((base) + 0x98) +#define RING_MI_MODE(base) _MMIO((base) + 0x9c) +#define RING_IMR(base) _MMIO((base) + 0xa8) +#define RING_EIR(base) _MMIO((base) + 0xb0) +#define RING_EMR(base) _MMIO((base) + 0xb4) +#define RING_ESR(base) _MMIO((base) + 0xb8) +#define RING_INSTPM(base) _MMIO((base) + 0xc0) +#define RING_CMD_CCTL(base) _MMIO((base) + 0xc4) +#define ACTHD(base) _MMIO((base) + 0xc8) +#define RING_RESET_CTL(base) _MMIO((base) + 0xd0) +#define RESET_CTL_CAT_ERROR REG_BIT(2) +#define RESET_CTL_READY_TO_RESET REG_BIT(1) +#define RESET_CTL_REQUEST_RESET REG_BIT(0) +#define DMA_FADD_I8XX(base) _MMIO((base) + 0xd0) +#define RING_BBSTATE(base) _MMIO((base) + 0x110) +#define RING_BB_PPGTT (1 << 5) +#define RING_SBBADDR(base) _MMIO((base) + 0x114) /* hsw+ */ +#define RING_SBBSTATE(base) _MMIO((base) + 0x118) /* hsw+ */ +#define RING_SBBADDR_UDW(base) _MMIO((base) + 0x11c) /* gen8+ */ +#define RING_BBADDR(base) _MMIO((base) + 0x140) +#define RING_BBADDR_UDW(base) _MMIO((base) + 0x168) /* gen8+ */ +#define CCID(base) _MMIO((base) + 0x180) +#define CCID_EN BIT(0) +#define CCID_EXTENDED_STATE_RESTORE BIT(2) +#define CCID_EXTENDED_STATE_SAVE BIT(3) +#define RING_BB_PER_CTX_PTR(base) _MMIO((base) + 0x1c0) /* gen8+ */ +#define RING_INDIRECT_CTX(base) _MMIO((base) + 0x1c4) /* gen8+ */ +#define RING_INDIRECT_CTX_OFFSET(base) _MMIO((base) + 0x1c8) /* gen8+ */ +#define ECOSKPD(base) _MMIO((base) + 0x1d0) +#define ECO_CONSTANT_BUFFER_SR_DISABLE REG_BIT(4) +#define ECO_GATING_CX_ONLY REG_BIT(3) +#define GEN6_BLITTER_FBC_NOTIFY REG_BIT(3) +#define ECO_FLIP_DONE REG_BIT(0) +#define GEN6_BLITTER_LOCK_SHIFT 16 + +#define BLIT_CCTL(base) _MMIO((base) + 0x204) +#define BLIT_CCTL_DST_MOCS_MASK REG_GENMASK(14, 8) +#define BLIT_CCTL_SRC_MOCS_MASK REG_GENMASK(6, 0) +#define BLIT_CCTL_MASK (BLIT_CCTL_DST_MOCS_MASK | \ + BLIT_CCTL_SRC_MOCS_MASK) +#define BLIT_CCTL_MOCS(dst, src) \ + (REG_FIELD_PREP(BLIT_CCTL_DST_MOCS_MASK, (dst) << 1) | \ + REG_FIELD_PREP(BLIT_CCTL_SRC_MOCS_MASK, (src) << 1)) + +/* + * CMD_CCTL read/write fields take a MOCS value and _not_ a table index. + * The lsb of each can be considered a separate enabling bit for encryption. + * 6:0 == default MOCS value for reads => 6:1 == table index for reads. + * 13:7 == default MOCS value for writes => 13:8 == table index for writes. + * 15:14 == Reserved => 31:30 are set to 0. + */ +#define CMD_CCTL_WRITE_OVERRIDE_MASK REG_GENMASK(13, 7) +#define CMD_CCTL_READ_OVERRIDE_MASK REG_GENMASK(6, 0) +#define CMD_CCTL_MOCS_MASK (CMD_CCTL_WRITE_OVERRIDE_MASK | \ + CMD_CCTL_READ_OVERRIDE_MASK) +#define CMD_CCTL_MOCS_OVERRIDE(write, read) \ + (REG_FIELD_PREP(CMD_CCTL_WRITE_OVERRIDE_MASK, (write) << 1) | \ + REG_FIELD_PREP(CMD_CCTL_READ_OVERRIDE_MASK, (read) << 1)) + +#define RING_PP_DIR_DCLV(base) _MMIO((base) + 0x220) +#define PP_DIR_DCLV_2G 0xffffffff +#define RING_PP_DIR_BASE(base) _MMIO((base) + 0x228) +#define RING_ELSP(base) _MMIO((base) + 0x230) +#define RING_EXECLIST_STATUS_LO(base) _MMIO((base) + 0x234) +#define RING_EXECLIST_STATUS_HI(base) _MMIO((base) + 0x234 + 4) +#define RING_CONTEXT_CONTROL(base) _MMIO((base) + 0x244) +#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT REG_BIT(0) +#define CTX_CTRL_RS_CTX_ENABLE REG_BIT(1) +#define CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT REG_BIT(2) +#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH REG_BIT(3) +#define GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE REG_BIT(8) +#define RING_SEMA_WAIT_POLL(base) _MMIO((base) + 0x24c) +#define GEN8_RING_PDP_UDW(base, n) _MMIO((base) + 0x270 + (n) * 8 + 4) +#define GEN8_RING_PDP_LDW(base, n) _MMIO((base) + 0x270 + (n) * 8) +#define RING_MODE_GEN7(base) _MMIO((base) + 0x29c) +#define GFX_RUN_LIST_ENABLE (1 << 15) +#define GFX_INTERRUPT_STEERING (1 << 14) +#define GFX_TLB_INVALIDATE_EXPLICIT (1 << 13) +#define GFX_SURFACE_FAULT_ENABLE (1 << 12) +#define GFX_REPLAY_MODE (1 << 11) +#define GFX_PSMI_GRANULARITY (1 << 10) +#define GFX_PPGTT_ENABLE (1 << 9) +#define GEN8_GFX_PPGTT_48B (1 << 7) +#define GFX_FORWARD_VBLANK_MASK (3 << 5) +#define GFX_FORWARD_VBLANK_NEVER (0 << 5) +#define GFX_FORWARD_VBLANK_ALWAYS (1 << 5) +#define GFX_FORWARD_VBLANK_COND (2 << 5) +#define GEN11_GFX_DISABLE_LEGACY_MODE (1 << 3) +#define RING_TIMESTAMP(base) _MMIO((base) + 0x358) +#define RING_TIMESTAMP_UDW(base) _MMIO((base) + 0x358 + 4) +#define RING_CONTEXT_STATUS_PTR(base) _MMIO((base) + 0x3a0) +#define RING_CTX_TIMESTAMP(base) _MMIO((base) + 0x3a8) /* gen8+ */ +#define RING_FORCE_TO_NONPRIV(base, i) _MMIO(((base) + 0x4D0) + (i) * 4) +#define RING_FORCE_TO_NONPRIV_ADDRESS_MASK REG_GENMASK(25, 2) +#define RING_FORCE_TO_NONPRIV_ACCESS_RW (0 << 28) /* CFL+ & Gen11+ */ +#define RING_FORCE_TO_NONPRIV_ACCESS_RD (1 << 28) +#define RING_FORCE_TO_NONPRIV_ACCESS_WR (2 << 28) +#define RING_FORCE_TO_NONPRIV_ACCESS_INVALID (3 << 28) +#define RING_FORCE_TO_NONPRIV_ACCESS_MASK (3 << 28) +#define RING_FORCE_TO_NONPRIV_RANGE_1 (0 << 0) /* CFL+ & Gen11+ */ +#define RING_FORCE_TO_NONPRIV_RANGE_4 (1 << 0) +#define RING_FORCE_TO_NONPRIV_RANGE_16 (2 << 0) +#define RING_FORCE_TO_NONPRIV_RANGE_64 (3 << 0) +#define RING_FORCE_TO_NONPRIV_RANGE_MASK (3 << 0) +#define RING_FORCE_TO_NONPRIV_MASK_VALID \ + (RING_FORCE_TO_NONPRIV_RANGE_MASK | RING_FORCE_TO_NONPRIV_ACCESS_MASK) +#define RING_MAX_NONPRIV_SLOTS 12 + +#define RING_EXECLIST_SQ_CONTENTS(base) _MMIO((base) + 0x510) +#define RING_PP_DIR_BASE_READ(base) _MMIO((base) + 0x518) +#define RING_EXECLIST_CONTROL(base) _MMIO((base) + 0x550) +#define EL_CTRL_LOAD REG_BIT(0) + +/* There are 16 64-bit CS General Purpose Registers per-engine on Gen8+ */ +#define GEN8_RING_CS_GPR(base, n) _MMIO((base) + 0x600 + (n) * 8) +#define GEN8_RING_CS_GPR_UDW(base, n) _MMIO((base) + 0x600 + (n) * 8 + 4) + +#define RING_HWS_PGA_GEN6(base) _MMIO((base) + 0x2080) + +#define VDBOX_CGCTL3F10(base) _MMIO((base) + 0x3f10) +#define IECPUNIT_CLKGATE_DIS REG_BIT(22) + +#define VDBOX_CGCTL3F18(base) _MMIO((base) + 0x3f18) +#define ALNUNIT_CLKGATE_DIS REG_BIT(13) + + +#endif /* __INTEL_ENGINE_REGS__ */ diff --git a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c index bedb80057046..ea8291361d65 100644 --- a/drivers/gpu/drm/i915/gt/intel_execlists_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_execlists_submission.c @@ -116,6 +116,7 @@ #include "intel_context.h" #include "intel_engine_heartbeat.h" #include "intel_engine_pm.h" +#include "intel_engine_regs.h" #include "intel_engine_stats.h" #include "intel_execlists_submission.h" #include "intel_gt.h" diff --git a/drivers/gpu/drm/i915/gt/intel_gt.c b/drivers/gpu/drm/i915/gt/intel_gt.c index f2422d48be32..4814453ab5ab 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt.c +++ b/drivers/gpu/drm/i915/gt/intel_gt.c @@ -10,6 +10,7 @@ #include "gem/i915_gem_lmem.h" #include "i915_drv.h" #include "intel_context.h" +#include "intel_engine_regs.h" #include "intel_gt.h" #include "intel_gt_buffer_pool.h" #include "intel_gt_clock_utils.h" diff --git a/drivers/gpu/drm/i915/gt/intel_lrc.c b/drivers/gpu/drm/i915/gt/intel_lrc.c index 56156cf18c41..1530227c4b91 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc.c +++ b/drivers/gpu/drm/i915/gt/intel_lrc.c @@ -9,6 +9,7 @@ #include "i915_drv.h" #include "i915_perf.h" #include "intel_engine.h" +#include "intel_engine_regs.h" #include "intel_gpu_commands.h" #include "intel_gt.h" #include "intel_lrc.h" diff --git a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h index f785d0ed238f..304000c7e345 100644 --- a/drivers/gpu/drm/i915/gt/intel_lrc_reg.h +++ b/drivers/gpu/drm/i915/gt/intel_lrc_reg.h @@ -53,21 +53,6 @@ #define GEN8_EXECLISTS_STATUS_BUF 0x370 #define GEN11_EXECLISTS_STATUS_BUF2 0x3c0 -/* Execlists regs */ -#define RING_ELSP(base) _MMIO((base) + 0x230) -#define RING_EXECLIST_STATUS_LO(base) _MMIO((base) + 0x234) -#define RING_EXECLIST_STATUS_HI(base) _MMIO((base) + 0x234 + 4) -#define RING_CONTEXT_CONTROL(base) _MMIO((base) + 0x244) -#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT REG_BIT(0) -#define CTX_CTRL_RS_CTX_ENABLE REG_BIT(1) -#define CTX_CTRL_ENGINE_CTX_SAVE_INHIBIT REG_BIT(2) -#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH REG_BIT(3) -#define GEN12_CTX_CTRL_OAR_CONTEXT_ENABLE REG_BIT(8) -#define RING_CONTEXT_STATUS_PTR(base) _MMIO((base) + 0x3a0) -#define RING_EXECLIST_SQ_CONTENTS(base) _MMIO((base) + 0x510) -#define RING_EXECLIST_CONTROL(base) _MMIO((base) + 0x550) -#define EL_CTRL_LOAD REG_BIT(0) - /* * The docs specify that the write pointer wraps around after 5h, "After status * is written out to the last available status QW at offset 5h, this pointer diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c index 68ad99ac83e5..8be1d005d53b 100644 --- a/drivers/gpu/drm/i915/gt/intel_rc6.c +++ b/drivers/gpu/drm/i915/gt/intel_rc6.c @@ -7,6 +7,7 @@ #include "i915_drv.h" #include "i915_vgpu.h" +#include "intel_engine_regs.h" #include "intel_gt.h" #include "intel_gt_pm.h" #include "intel_pcode.h" diff --git a/drivers/gpu/drm/i915/gt/intel_reset.c b/drivers/gpu/drm/i915/gt/intel_reset.c index c5bfcbe56890..5000608189da 100644 --- a/drivers/gpu/drm/i915/gt/intel_reset.c +++ b/drivers/gpu/drm/i915/gt/intel_reset.c @@ -16,6 +16,7 @@ #include "i915_irq.h" #include "intel_breadcrumbs.h" #include "intel_engine_pm.h" +#include "intel_engine_regs.h" #include "intel_gt.h" #include "intel_gt_pm.h" #include "intel_gt_requests.h" diff --git a/drivers/gpu/drm/i915/gt/intel_ring.c b/drivers/gpu/drm/i915/gt/intel_ring.c index 2fdd52b62092..723055340c9b 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring.c +++ b/drivers/gpu/drm/i915/gt/intel_ring.c @@ -9,6 +9,7 @@ #include "i915_drv.h" #include "i915_vma.h" #include "intel_engine.h" +#include "intel_engine_regs.h" #include "intel_gpu_commands.h" #include "intel_ring.h" #include "intel_timeline.h" diff --git a/drivers/gpu/drm/i915/gt/intel_ring_submission.c b/drivers/gpu/drm/i915/gt/intel_ring_submission.c index 5408bc18a58e..0f1aa1c275b2 100644 --- a/drivers/gpu/drm/i915/gt/intel_ring_submission.c +++ b/drivers/gpu/drm/i915/gt/intel_ring_submission.c @@ -11,6 +11,7 @@ #include "i915_mitigations.h" #include "intel_breadcrumbs.h" #include "intel_context.h" +#include "intel_engine_regs.h" #include "intel_gt.h" #include "intel_gt_irq.h" #include "intel_reset.h" diff --git a/drivers/gpu/drm/i915/gt/intel_workarounds.c b/drivers/gpu/drm/i915/gt/intel_workarounds.c index f5ccc21761c3..a7a0a3acbacb 100644 --- a/drivers/gpu/drm/i915/gt/intel_workarounds.c +++ b/drivers/gpu/drm/i915/gt/intel_workarounds.c @@ -6,6 +6,7 @@ #include "i915_drv.h" #include "intel_context.h" #include "intel_engine_pm.h" +#include "intel_engine_regs.h" #include "intel_gpu_commands.h" #include "intel_gt.h" #include "intel_ring.h" diff --git a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c index 75569666105d..0035be4bf58b 100644 --- a/drivers/gpu/drm/i915/gt/selftest_engine_pm.c +++ b/drivers/gpu/drm/i915/gt/selftest_engine_pm.c @@ -6,6 +6,7 @@ #include #include "i915_selftest.h" +#include "intel_engine_regs.h" #include "intel_gpu_commands.h" #include "intel_gt_clock_utils.h" #include "selftest_engine.h" diff --git a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c index 55c5cdb99f45..3dec126fb910 100644 --- a/drivers/gpu/drm/i915/gt/selftest_gt_pm.c +++ b/drivers/gpu/drm/i915/gt/selftest_gt_pm.c @@ -5,6 +5,7 @@ #include +#include "intel_engine_regs.h" #include "intel_gt_clock_utils.h" #include "selftest_llc.h" diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c index 7ee2513e15f9..bd170ba1cf00 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rps.c +++ b/drivers/gpu/drm/i915/gt/selftest_rps.c @@ -8,6 +8,7 @@ #include "intel_engine_heartbeat.h" #include "intel_engine_pm.h" +#include "intel_engine_regs.h" #include "intel_gpu_commands.h" #include "intel_gt_clock_utils.h" #include "intel_gt_pm.h" diff --git a/drivers/gpu/drm/i915/gt/selftest_timeline.c b/drivers/gpu/drm/i915/gt/selftest_timeline.c index d0b6a3afcf44..72a04a1a1678 100644 --- a/drivers/gpu/drm/i915/gt/selftest_timeline.c +++ b/drivers/gpu/drm/i915/gt/selftest_timeline.c @@ -8,6 +8,7 @@ #include "intel_context.h" #include "intel_engine_heartbeat.h" #include "intel_engine_pm.h" +#include "intel_engine_regs.h" #include "intel_gpu_commands.h" #include "intel_gt.h" #include "intel_gt_requests.h" diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c index 621c893a009f..4d5611291e28 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_ads.c @@ -5,6 +5,7 @@ #include +#include "gt/intel_engine_regs.h" #include "gt/intel_gt.h" #include "gt/intel_lrc.h" #include "gt/shmem_utils.h" diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index c48557dfa04c..4333d139b090 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -9,8 +9,9 @@ #include "gt/gen8_engine_cs.h" #include "gt/intel_breadcrumbs.h" #include "gt/intel_context.h" -#include "gt/intel_engine_pm.h" #include "gt/intel_engine_heartbeat.h" +#include "gt/intel_engine_pm.h" +#include "gt/intel_engine_regs.h" #include "gt/intel_gpu_commands.h" #include "gt/intel_gt.h" #include "gt/intel_gt_irq.h" diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index c4118b808268..733e68ea210a 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -37,6 +37,7 @@ #include #include "i915_drv.h" +#include "gt/intel_engine_regs.h" #include "gt/intel_gpu_commands.h" #include "gt/intel_lrc.h" #include "gt/intel_ring.h" diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c index abc81cdc9e5d..99d3534d2bd8 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.c +++ b/drivers/gpu/drm/i915/gvt/mmio_context.c @@ -35,6 +35,7 @@ #include "i915_drv.h" #include "gt/intel_context.h" +#include "gt/intel_engine_regs.h" #include "gt/intel_gpu_commands.h" #include "gt/intel_ring.h" #include "gvt.h" diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.h b/drivers/gpu/drm/i915/gvt/mmio_context.h index b6b69777af49..128fd7f4d509 100644 --- a/drivers/gpu/drm/i915/gvt/mmio_context.h +++ b/drivers/gpu/drm/i915/gvt/mmio_context.h @@ -38,6 +38,7 @@ #include +#include "gt/intel_engine_regs.h" #include "gt/intel_engine_types.h" #include "gt/intel_lrc_reg.h" #include "i915_reg.h" diff --git a/drivers/gpu/drm/i915/i915_cmd_parser.c b/drivers/gpu/drm/i915/i915_cmd_parser.c index a804373bcd17..96c398051084 100644 --- a/drivers/gpu/drm/i915/i915_cmd_parser.c +++ b/drivers/gpu/drm/i915/i915_cmd_parser.c @@ -26,6 +26,7 @@ */ #include "gt/intel_engine.h" +#include "gt/intel_engine_regs.h" #include "gt/intel_gpu_commands.h" #include "i915_cmd_parser.h" diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 2a2d7643b551..b3fc8917598a 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -41,6 +41,7 @@ #include "gem/i915_gem_context.h" #include "gem/i915_gem_lmem.h" +#include "gt/intel_engine_regs.h" #include "gt/intel_gt.h" #include "gt/intel_gt_pm.h" diff --git a/drivers/gpu/drm/i915/i915_perf.c b/drivers/gpu/drm/i915/i915_perf.c index 2f01b8c0284c..aa21e9fe3c78 100644 --- a/drivers/gpu/drm/i915/i915_perf.c +++ b/drivers/gpu/drm/i915/i915_perf.c @@ -197,6 +197,7 @@ #include "gem/i915_gem_context.h" #include "gt/intel_engine_pm.h" +#include "gt/intel_engine_regs.h" #include "gt/intel_engine_user.h" #include "gt/intel_execlists_submission.h" #include "gt/intel_gpu_commands.h" diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c index 0b488d49694c..290505b432bc 100644 --- a/drivers/gpu/drm/i915/i915_pmu.c +++ b/drivers/gpu/drm/i915/i915_pmu.c @@ -8,6 +8,7 @@ #include "gt/intel_engine.h" #include "gt/intel_engine_pm.h" +#include "gt/intel_engine_regs.h" #include "gt/intel_engine_user.h" #include "gt/intel_gt_pm.h" #include "gt/intel_rc6.h" diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index ca815da75380..1a7ffdeb8df1 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -272,14 +272,6 @@ #define GEN12_SFC_DONE(n) _MMIO(0x1cc000 + (n) * 0x1000) #define GEN12_SFC_DONE_MAX 4 -#define RING_PP_DIR_BASE(base) _MMIO((base) + 0x228) -#define RING_PP_DIR_BASE_READ(base) _MMIO((base) + 0x518) -#define RING_PP_DIR_DCLV(base) _MMIO((base) + 0x220) -#define PP_DIR_DCLV_2G 0xffffffff - -#define GEN8_RING_PDP_UDW(base, n) _MMIO((base) + 0x270 + (n) * 8 + 4) -#define GEN8_RING_PDP_LDW(base, n) _MMIO((base) + 0x270 + (n) * 8) - #define GEN8_R_PWR_CLK_STATE _MMIO(0x20C8) #define GEN8_RPCS_ENABLE (1 << 31) #define GEN8_RPCS_S_CNT_ENABLE (1 << 18) @@ -2188,71 +2180,8 @@ #define XEHP_VEBOX3_RING_BASE 0x1e8000 #define XEHP_VEBOX4_RING_BASE 0x1f8000 #define BLT_RING_BASE 0x22000 -#define RING_TAIL(base) _MMIO((base) + 0x30) -#define RING_HEAD(base) _MMIO((base) + 0x34) -#define RING_START(base) _MMIO((base) + 0x38) -#define RING_CTL(base) _MMIO((base) + 0x3c) -#define RING_CTL_SIZE(size) ((size) - PAGE_SIZE) /* in bytes -> pages */ -#define RING_SYNC_0(base) _MMIO((base) + 0x40) -#define RING_SYNC_1(base) _MMIO((base) + 0x44) -#define RING_SYNC_2(base) _MMIO((base) + 0x48) -#define GEN6_RVSYNC (RING_SYNC_0(RENDER_RING_BASE)) -#define GEN6_RBSYNC (RING_SYNC_1(RENDER_RING_BASE)) -#define GEN6_RVESYNC (RING_SYNC_2(RENDER_RING_BASE)) -#define GEN6_VBSYNC (RING_SYNC_0(GEN6_BSD_RING_BASE)) -#define GEN6_VRSYNC (RING_SYNC_1(GEN6_BSD_RING_BASE)) -#define GEN6_VVESYNC (RING_SYNC_2(GEN6_BSD_RING_BASE)) -#define GEN6_BRSYNC (RING_SYNC_0(BLT_RING_BASE)) -#define GEN6_BVSYNC (RING_SYNC_1(BLT_RING_BASE)) -#define GEN6_BVESYNC (RING_SYNC_2(BLT_RING_BASE)) -#define GEN6_VEBSYNC (RING_SYNC_0(VEBOX_RING_BASE)) -#define GEN6_VERSYNC (RING_SYNC_1(VEBOX_RING_BASE)) -#define GEN6_VEVSYNC (RING_SYNC_2(VEBOX_RING_BASE)) -#define GEN6_NOSYNC INVALID_MMIO_REG -#define RING_PSMI_CTL(base) _MMIO((base) + 0x50) -#define GEN8_RC_SEMA_IDLE_MSG_DISABLE REG_BIT(12) -#define GEN8_FF_DOP_CLOCK_GATE_DISABLE REG_BIT(10) -#define GEN12_WAIT_FOR_EVENT_POWER_DOWN_DISABLE REG_BIT(7) -#define GEN6_BSD_GO_INDICATOR REG_BIT(4) -#define GEN6_BSD_SLEEP_INDICATOR REG_BIT(3) -#define GEN6_BSD_SLEEP_FLUSH_DISABLE REG_BIT(2) -#define GEN6_PSMI_SLEEP_MSG_DISABLE REG_BIT(0) -#define RING_MAX_IDLE(base) _MMIO((base) + 0x54) -#define RING_HWS_PGA(base) _MMIO((base) + 0x80) -#define RING_ID(base) _MMIO((base) + 0x8c) -#define RING_HWS_PGA_GEN6(base) _MMIO((base) + 0x2080) - -#define RING_CMD_CCTL(base) _MMIO((base) + 0xc4) -/* - * CMD_CCTL read/write fields take a MOCS value and _not_ a table index. - * The lsb of each can be considered a separate enabling bit for encryption. - * 6:0 == default MOCS value for reads => 6:1 == table index for reads. - * 13:7 == default MOCS value for writes => 13:8 == table index for writes. - * 15:14 == Reserved => 31:30 are set to 0. - */ -#define CMD_CCTL_WRITE_OVERRIDE_MASK REG_GENMASK(13, 7) -#define CMD_CCTL_READ_OVERRIDE_MASK REG_GENMASK(6, 0) -#define CMD_CCTL_MOCS_MASK (CMD_CCTL_WRITE_OVERRIDE_MASK | \ - CMD_CCTL_READ_OVERRIDE_MASK) -#define CMD_CCTL_MOCS_OVERRIDE(write, read) \ - (REG_FIELD_PREP(CMD_CCTL_WRITE_OVERRIDE_MASK, (write) << 1) | \ - REG_FIELD_PREP(CMD_CCTL_READ_OVERRIDE_MASK, (read) << 1)) - -#define BLIT_CCTL(base) _MMIO((base) + 0x204) -#define BLIT_CCTL_DST_MOCS_MASK REG_GENMASK(14, 8) -#define BLIT_CCTL_SRC_MOCS_MASK REG_GENMASK(6, 0) -#define BLIT_CCTL_MASK (BLIT_CCTL_DST_MOCS_MASK | \ - BLIT_CCTL_SRC_MOCS_MASK) -#define BLIT_CCTL_MOCS(dst, src) \ - (REG_FIELD_PREP(BLIT_CCTL_DST_MOCS_MASK, (dst) << 1) | \ - REG_FIELD_PREP(BLIT_CCTL_SRC_MOCS_MASK, (src) << 1)) - -#define RING_RESET_CTL(base) _MMIO((base) + 0xd0) -#define RESET_CTL_CAT_ERROR REG_BIT(2) -#define RESET_CTL_READY_TO_RESET REG_BIT(1) -#define RESET_CTL_REQUEST_RESET REG_BIT(0) - -#define RING_SEMA_WAIT_POLL(base) _MMIO((base) + 0x24c) + + #define HSW_GTT_CACHE_EN _MMIO(0x4024) #define GTT_CACHE_EN_ALL 0xF0007FFF @@ -2307,49 +2236,6 @@ #define AUX_INV REG_BIT(0) #define BLT_HWS_PGA_GEN7 _MMIO(0x04280) #define VEBOX_HWS_PGA_GEN7 _MMIO(0x04380) -#define RING_ACTHD(base) _MMIO((base) + 0x74) -#define RING_ACTHD_UDW(base) _MMIO((base) + 0x5c) -#define RING_NOPID(base) _MMIO((base) + 0x94) -#define RING_IMR(base) _MMIO((base) + 0xa8) -#define RING_HWSTAM(base) _MMIO((base) + 0x98) -#define RING_TIMESTAMP(base) _MMIO((base) + 0x358) -#define RING_TIMESTAMP_UDW(base) _MMIO((base) + 0x358 + 4) -#define TAIL_ADDR 0x001FFFF8 -#define HEAD_WRAP_COUNT 0xFFE00000 -#define HEAD_WRAP_ONE 0x00200000 -#define HEAD_ADDR 0x001FFFFC -#define RING_NR_PAGES 0x001FF000 -#define RING_REPORT_MASK 0x00000006 -#define RING_REPORT_64K 0x00000002 -#define RING_REPORT_128K 0x00000004 -#define RING_NO_REPORT 0x00000000 -#define RING_VALID_MASK 0x00000001 -#define RING_VALID 0x00000001 -#define RING_INVALID 0x00000000 -#define RING_WAIT_I8XX (1 << 0) /* gen2, PRBx_HEAD */ -#define RING_WAIT (1 << 11) /* gen3+, PRBx_CTL */ -#define RING_WAIT_SEMAPHORE (1 << 10) /* gen6+ */ - -/* There are 16 64-bit CS General Purpose Registers per-engine on Gen8+ */ -#define GEN8_RING_CS_GPR(base, n) _MMIO((base) + 0x600 + (n) * 8) -#define GEN8_RING_CS_GPR_UDW(base, n) _MMIO((base) + 0x600 + (n) * 8 + 4) - -#define RING_FORCE_TO_NONPRIV(base, i) _MMIO(((base) + 0x4D0) + (i) * 4) -#define RING_FORCE_TO_NONPRIV_ADDRESS_MASK REG_GENMASK(25, 2) -#define RING_FORCE_TO_NONPRIV_ACCESS_RW (0 << 28) /* CFL+ & Gen11+ */ -#define RING_FORCE_TO_NONPRIV_ACCESS_RD (1 << 28) -#define RING_FORCE_TO_NONPRIV_ACCESS_WR (2 << 28) -#define RING_FORCE_TO_NONPRIV_ACCESS_INVALID (3 << 28) -#define RING_FORCE_TO_NONPRIV_ACCESS_MASK (3 << 28) -#define RING_FORCE_TO_NONPRIV_RANGE_1 (0 << 0) /* CFL+ & Gen11+ */ -#define RING_FORCE_TO_NONPRIV_RANGE_4 (1 << 0) -#define RING_FORCE_TO_NONPRIV_RANGE_16 (2 << 0) -#define RING_FORCE_TO_NONPRIV_RANGE_64 (3 << 0) -#define RING_FORCE_TO_NONPRIV_RANGE_MASK (3 << 0) -#define RING_FORCE_TO_NONPRIV_MASK_VALID \ - (RING_FORCE_TO_NONPRIV_RANGE_MASK \ - | RING_FORCE_TO_NONPRIV_ACCESS_MASK) -#define RING_MAX_NONPRIV_SLOTS 12 #define GEN7_TLB_RD_ADDR _MMIO(0x4700) @@ -2394,23 +2280,11 @@ #define GEN11_MCR_SLICE_MASK GEN11_MCR_SLICE(0xf) #define GEN11_MCR_SUBSLICE(subslice) (((subslice) & 0x7) << 24) #define GEN11_MCR_SUBSLICE_MASK GEN11_MCR_SUBSLICE(0x7) -#define RING_IPEIR(base) _MMIO((base) + 0x64) -#define RING_IPEHR(base) _MMIO((base) + 0x68) -#define RING_EIR(base) _MMIO((base) + 0xb0) -#define RING_EMR(base) _MMIO((base) + 0xb4) -#define RING_ESR(base) _MMIO((base) + 0xb8) /* * On GEN4, only the render ring INSTDONE exists and has a different * layout than the GEN7+ version. * The GEN2 counterpart of this register is GEN2_INSTDONE. */ -#define RING_INSTDONE(base) _MMIO((base) + 0x6c) -#define RING_INSTPS(base) _MMIO((base) + 0x70) -#define RING_DMA_FADD(base) _MMIO((base) + 0x78) -#define RING_DMA_FADD_UDW(base) _MMIO((base) + 0x60) /* gen8+ */ -#define RING_INSTPM(base) _MMIO((base) + 0xc0) -#define RING_MI_MODE(base) _MMIO((base) + 0x9c) -#define RING_CMD_BUF_CCTL(base) _MMIO((base) + 0x84) #define INSTPS _MMIO(0x2070) /* 965+ only */ #define GEN4_INSTDONE1 _MMIO(0x207c) /* 965+ only, aka INSTDONE_2 on SNB */ #define ACTHD_I965 _MMIO(0x2074) @@ -2419,26 +2293,9 @@ #define HWS_START_ADDRESS_SHIFT 4 #define PWRCTXA _MMIO(0x2088) /* 965GM+ only */ #define PWRCTX_EN (1 << 0) -#define IPEIR(base) _MMIO((base) + 0x88) -#define IPEHR(base) _MMIO((base) + 0x8c) #define GEN2_INSTDONE _MMIO(0x2090) #define NOPID _MMIO(0x2094) #define HWSTAM _MMIO(0x2098) -#define DMA_FADD_I8XX(base) _MMIO((base) + 0xd0) -#define RING_BBSTATE(base) _MMIO((base) + 0x110) -#define RING_BB_PPGTT (1 << 5) -#define RING_SBBADDR(base) _MMIO((base) + 0x114) /* hsw+ */ -#define RING_SBBSTATE(base) _MMIO((base) + 0x118) /* hsw+ */ -#define RING_SBBADDR_UDW(base) _MMIO((base) + 0x11c) /* gen8+ */ -#define RING_BBADDR(base) _MMIO((base) + 0x140) -#define RING_BBADDR_UDW(base) _MMIO((base) + 0x168) /* gen8+ */ -#define RING_BB_PER_CTX_PTR(base) _MMIO((base) + 0x1c0) /* gen8+ */ -#define RING_INDIRECT_CTX(base) _MMIO((base) + 0x1c4) /* gen8+ */ -#define RING_INDIRECT_CTX_OFFSET(base) _MMIO((base) + 0x1c8) /* gen8+ */ -#define RING_CTX_TIMESTAMP(base) _MMIO((base) + 0x3a8) /* gen8+ */ - -#define VDBOX_CGCTL3F10(base) _MMIO((base) + 0x3f10) -#define IECPUNIT_CLKGATE_DIS REG_BIT(22) #define ERROR_GEN6 _MMIO(0x40a0) #define GEN7_ERR_INT _MMIO(0x44040) @@ -2551,22 +2408,6 @@ GEN9_STATE_ACK_TDL1 | GEN9_STATE_ACK_TDL0) #define GFX_MODE _MMIO(0x2520) -#define RING_MODE_GEN7(base) _MMIO((base) + 0x29c) -#define GFX_RUN_LIST_ENABLE (1 << 15) -#define GFX_INTERRUPT_STEERING (1 << 14) -#define GFX_TLB_INVALIDATE_EXPLICIT (1 << 13) -#define GFX_SURFACE_FAULT_ENABLE (1 << 12) -#define GFX_REPLAY_MODE (1 << 11) -#define GFX_PSMI_GRANULARITY (1 << 10) -#define GFX_PPGTT_ENABLE (1 << 9) -#define GEN8_GFX_PPGTT_48B (1 << 7) - -#define GFX_FORWARD_VBLANK_MASK (3 << 5) -#define GFX_FORWARD_VBLANK_NEVER (0 << 5) -#define GFX_FORWARD_VBLANK_ALWAYS (1 << 5) -#define GFX_FORWARD_VBLANK_COND (2 << 5) - -#define GEN11_GFX_DISABLE_LEGACY_MODE (1 << 3) #define VLV_GU_CTL0 _MMIO(VLV_DISPLAY_BASE + 0x2030) #define VLV_GU_CTL1 _MMIO(VLV_DISPLAY_BASE + 0x2034) @@ -2607,7 +2448,6 @@ #define INSTPM_FORCE_ORDERING (1 << 7) /* GEN6+ */ #define INSTPM_TLB_INVALIDATE (1 << 9) #define INSTPM_SYNC_FLUSH (1 << 5) -#define ACTHD(base) _MMIO((base) + 0xc8) #define MEM_MODE _MMIO(0x20cc) #define MEM_DISPLAY_B_TRICKLE_FEED_DISABLE (1 << 3) /* 830 only */ #define MEM_DISPLAY_A_TRICKLE_FEED_DISABLE (1 << 2) /* 830/845 only */ @@ -2746,12 +2586,6 @@ #define GFX_FLSH_CNTL _MMIO(0x2170) /* 915+ only */ #define GFX_FLSH_CNTL_GEN6 _MMIO(0x101008) #define GFX_FLSH_CNTL_EN (1 << 0) -#define ECOSKPD(base) _MMIO((base) + 0x1d0) -#define ECO_CONSTANT_BUFFER_SR_DISABLE REG_BIT(4) -#define ECO_GATING_CX_ONLY REG_BIT(3) -#define GEN6_BLITTER_FBC_NOTIFY REG_BIT(3) -#define ECO_FLIP_DONE REG_BIT(0) -#define GEN6_BLITTER_LOCK_SHIFT 16 #define CACHE_MODE_0_GEN7 _MMIO(0x7000) /* IVB+ */ #define RC_OP_FLUSH_ENABLE (1 << 0) @@ -3813,10 +3647,6 @@ /* * Logical Context regs */ -#define CCID(base) _MMIO((base) + 0x180) -#define CCID_EN BIT(0) -#define CCID_EXTENDED_STATE_RESTORE BIT(2) -#define CCID_EXTENDED_STATE_SAVE BIT(3) /* * Notes on SNB/IVB/VLV context size: * - Power context is saved elsewhere (LLC or stolen) @@ -8860,8 +8690,6 @@ enum { #define RC6_CTX_IN_DRAM (1 << 0) #define RC6_CTX_BASE _MMIO(0xD48) #define RC6_CTX_BASE_MASK 0xFFFFFFF0 -#define PWRCTX_MAXCNT(base) _MMIO((base) + 0x54) -#define IDLE_TIME_MASK 0xFFFFF #define FORCEWAKE _MMIO(0xA18C) #define FORCEWAKE_VLV _MMIO(0x1300b0) #define FORCEWAKE_ACK_VLV _MMIO(0x1300b4) diff --git a/drivers/gpu/drm/i915/i915_request.c b/drivers/gpu/drm/i915/i915_request.c index 42cd17357771..55934129a6be 100644 --- a/drivers/gpu/drm/i915/i915_request.c +++ b/drivers/gpu/drm/i915/i915_request.c @@ -35,6 +35,7 @@ #include "gt/intel_context.h" #include "gt/intel_engine.h" #include "gt/intel_engine_heartbeat.h" +#include "gt/intel_engine_regs.h" #include "gt/intel_gpu_commands.h" #include "gt/intel_reset.h" #include "gt/intel_ring.h" diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index fd622f959a2a..76e1da70f4ad 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -43,6 +43,7 @@ #include "display/intel_sprite.h" #include "display/skl_universal_plane.h" +#include "gt/intel_engine_regs.h" #include "gt/intel_llc.h" #include "i915_drv.h" diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index 722910d02b5f..fefaf63dfb88 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -23,7 +23,7 @@ #include -#include "gt/intel_lrc_reg.h" /* for shadow reg list */ +#include "gt/intel_engine_regs.h" #include "i915_drv.h" #include "i915_iosf_mbi.h" -- cgit From aa1d6068a460dcb21e69f6d65fa7d3ab483d547a Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Mon, 10 Jan 2022 21:15:57 -0800 Subject: drm/i915: Move SNPS PHY registers to their own header These registers are only needed in a couple files and on specific platforms; let's keep them separate from the general register pool. v2: - Don't forget to include i915_reg_defs.h (Jani) - Ensure include guard matches header name (Jani) Cc: Jani Nikula Signed-off-by: Matt Roper Reviewed-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20220111051600.3429104-9-matthew.d.roper@intel.com --- drivers/gpu/drm/i915/display/intel_snps_phy.c | 1 + drivers/gpu/drm/i915/display/intel_snps_phy_regs.h | 75 ++++++++++++++++++++++ drivers/gpu/drm/i915/i915_reg.h | 67 ------------------- 3 files changed, 76 insertions(+), 67 deletions(-) create mode 100644 drivers/gpu/drm/i915/display/intel_snps_phy_regs.h diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c index 09f405e4d363..718bfdbae9c8 100644 --- a/drivers/gpu/drm/i915/display/intel_snps_phy.c +++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c @@ -10,6 +10,7 @@ #include "intel_de.h" #include "intel_display_types.h" #include "intel_snps_phy.h" +#include "intel_snps_phy_regs.h" /** * DOC: Synopsis PHY support diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy_regs.h b/drivers/gpu/drm/i915/display/intel_snps_phy_regs.h new file mode 100644 index 000000000000..0543465aaf14 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_snps_phy_regs.h @@ -0,0 +1,75 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __INTEL_SNPS_PHY_REGS__ +#define __INTEL_SNPS_PHY_REGS__ + +#include "i915_reg_defs.h" + +#define _SNPS_PHY_A_BASE 0x168000 +#define _SNPS_PHY_B_BASE 0x169000 +#define _SNPS_PHY(phy) _PHY(phy, \ + _SNPS_PHY_A_BASE, \ + _SNPS_PHY_B_BASE) +#define _SNPS2(phy, reg) (_SNPS_PHY(phy) - \ + _SNPS_PHY_A_BASE + (reg)) +#define _MMIO_SNPS(phy, reg) _MMIO(_SNPS2(phy, reg)) +#define _MMIO_SNPS_LN(ln, phy, reg) _MMIO(_SNPS2(phy, \ + (reg) + (ln) * 0x10)) + +#define SNPS_PHY_MPLLB_CP(phy) _MMIO_SNPS(phy, 0x168000) +#define SNPS_PHY_MPLLB_CP_INT REG_GENMASK(31, 25) +#define SNPS_PHY_MPLLB_CP_INT_GS REG_GENMASK(23, 17) +#define SNPS_PHY_MPLLB_CP_PROP REG_GENMASK(15, 9) +#define SNPS_PHY_MPLLB_CP_PROP_GS REG_GENMASK(7, 1) + +#define SNPS_PHY_MPLLB_DIV(phy) _MMIO_SNPS(phy, 0x168004) +#define SNPS_PHY_MPLLB_FORCE_EN REG_BIT(31) +#define SNPS_PHY_MPLLB_DIV_CLK_EN REG_BIT(30) +#define SNPS_PHY_MPLLB_DIV5_CLK_EN REG_BIT(29) +#define SNPS_PHY_MPLLB_V2I REG_GENMASK(27, 26) +#define SNPS_PHY_MPLLB_FREQ_VCO REG_GENMASK(25, 24) +#define SNPS_PHY_MPLLB_DIV_MULTIPLIER REG_GENMASK(23, 16) +#define SNPS_PHY_MPLLB_PMIX_EN REG_BIT(10) +#define SNPS_PHY_MPLLB_DP2_MODE REG_BIT(9) +#define SNPS_PHY_MPLLB_WORD_DIV2_EN REG_BIT(8) +#define SNPS_PHY_MPLLB_TX_CLK_DIV REG_GENMASK(7, 5) +#define SNPS_PHY_MPLLB_SHIM_DIV32_CLK_SEL REG_BIT(0) + +#define SNPS_PHY_MPLLB_FRACN1(phy) _MMIO_SNPS(phy, 0x168008) +#define SNPS_PHY_MPLLB_FRACN_EN REG_BIT(31) +#define SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN REG_BIT(30) +#define SNPS_PHY_MPLLB_FRACN_DEN REG_GENMASK(15, 0) + +#define SNPS_PHY_MPLLB_FRACN2(phy) _MMIO_SNPS(phy, 0x16800C) +#define SNPS_PHY_MPLLB_FRACN_REM REG_GENMASK(31, 16) +#define SNPS_PHY_MPLLB_FRACN_QUOT REG_GENMASK(15, 0) + +#define SNPS_PHY_MPLLB_SSCEN(phy) _MMIO_SNPS(phy, 0x168014) +#define SNPS_PHY_MPLLB_SSC_EN REG_BIT(31) +#define SNPS_PHY_MPLLB_SSC_UP_SPREAD REG_BIT(30) +#define SNPS_PHY_MPLLB_SSC_PEAK REG_GENMASK(29, 10) + +#define SNPS_PHY_MPLLB_SSCSTEP(phy) _MMIO_SNPS(phy, 0x168018) +#define SNPS_PHY_MPLLB_SSC_STEPSIZE REG_GENMASK(31, 11) + +#define SNPS_PHY_MPLLB_DIV2(phy) _MMIO_SNPS(phy, 0x16801C) +#define SNPS_PHY_MPLLB_HDMI_PIXEL_CLK_DIV REG_GENMASK(19, 18) +#define SNPS_PHY_MPLLB_HDMI_DIV REG_GENMASK(17, 15) +#define SNPS_PHY_MPLLB_REF_CLK_DIV REG_GENMASK(14, 12) +#define SNPS_PHY_MPLLB_MULTIPLIER REG_GENMASK(11, 0) + +#define SNPS_PHY_REF_CONTROL(phy) _MMIO_SNPS(phy, 0x168188) +#define SNPS_PHY_REF_CONTROL_REF_RANGE REG_GENMASK(31, 27) + +#define SNPS_PHY_TX_REQ(phy) _MMIO_SNPS(phy, 0x168200) +#define SNPS_PHY_TX_REQ_LN_DIS_PWR_STATE_PSR REG_GENMASK(31, 30) + +#define SNPS_PHY_TX_EQ(ln, phy) _MMIO_SNPS_LN(ln, phy, 0x168300) +#define SNPS_PHY_TX_EQ_MAIN REG_GENMASK(23, 18) +#define SNPS_PHY_TX_EQ_POST REG_GENMASK(15, 10) +#define SNPS_PHY_TX_EQ_PRE REG_GENMASK(7, 2) + +#endif /* __INTEL_SNPS_PHY_REGS__ */ diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 1a7ffdeb8df1..527e99ff9896 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1865,73 +1865,6 @@ #define MG_DP_MODE_CFG_DP_X2_MODE (1 << 7) #define MG_DP_MODE_CFG_DP_X1_MODE (1 << 6) -/* - * DG2 SNPS PHY registers (TC1 = PHY_E) - */ -#define _SNPS_PHY_A_BASE 0x168000 -#define _SNPS_PHY_B_BASE 0x169000 -#define _SNPS_PHY(phy) _PHY(phy, \ - _SNPS_PHY_A_BASE, \ - _SNPS_PHY_B_BASE) -#define _SNPS2(phy, reg) (_SNPS_PHY(phy) - \ - _SNPS_PHY_A_BASE + (reg)) -#define _MMIO_SNPS(phy, reg) _MMIO(_SNPS2(phy, reg)) -#define _MMIO_SNPS_LN(ln, phy, reg) _MMIO(_SNPS2(phy, \ - (reg) + (ln) * 0x10)) - -#define SNPS_PHY_MPLLB_CP(phy) _MMIO_SNPS(phy, 0x168000) -#define SNPS_PHY_MPLLB_CP_INT REG_GENMASK(31, 25) -#define SNPS_PHY_MPLLB_CP_INT_GS REG_GENMASK(23, 17) -#define SNPS_PHY_MPLLB_CP_PROP REG_GENMASK(15, 9) -#define SNPS_PHY_MPLLB_CP_PROP_GS REG_GENMASK(7, 1) - -#define SNPS_PHY_MPLLB_DIV(phy) _MMIO_SNPS(phy, 0x168004) -#define SNPS_PHY_MPLLB_FORCE_EN REG_BIT(31) -#define SNPS_PHY_MPLLB_DIV_CLK_EN REG_BIT(30) -#define SNPS_PHY_MPLLB_DIV5_CLK_EN REG_BIT(29) -#define SNPS_PHY_MPLLB_V2I REG_GENMASK(27, 26) -#define SNPS_PHY_MPLLB_FREQ_VCO REG_GENMASK(25, 24) -#define SNPS_PHY_MPLLB_DIV_MULTIPLIER REG_GENMASK(23, 16) -#define SNPS_PHY_MPLLB_PMIX_EN REG_BIT(10) -#define SNPS_PHY_MPLLB_DP2_MODE REG_BIT(9) -#define SNPS_PHY_MPLLB_WORD_DIV2_EN REG_BIT(8) -#define SNPS_PHY_MPLLB_TX_CLK_DIV REG_GENMASK(7, 5) -#define SNPS_PHY_MPLLB_SHIM_DIV32_CLK_SEL REG_BIT(0) - -#define SNPS_PHY_MPLLB_FRACN1(phy) _MMIO_SNPS(phy, 0x168008) -#define SNPS_PHY_MPLLB_FRACN_EN REG_BIT(31) -#define SNPS_PHY_MPLLB_FRACN_CGG_UPDATE_EN REG_BIT(30) -#define SNPS_PHY_MPLLB_FRACN_DEN REG_GENMASK(15, 0) - -#define SNPS_PHY_MPLLB_FRACN2(phy) _MMIO_SNPS(phy, 0x16800C) -#define SNPS_PHY_MPLLB_FRACN_REM REG_GENMASK(31, 16) -#define SNPS_PHY_MPLLB_FRACN_QUOT REG_GENMASK(15, 0) - -#define SNPS_PHY_MPLLB_SSCEN(phy) _MMIO_SNPS(phy, 0x168014) -#define SNPS_PHY_MPLLB_SSC_EN REG_BIT(31) -#define SNPS_PHY_MPLLB_SSC_UP_SPREAD REG_BIT(30) -#define SNPS_PHY_MPLLB_SSC_PEAK REG_GENMASK(29, 10) - -#define SNPS_PHY_MPLLB_SSCSTEP(phy) _MMIO_SNPS(phy, 0x168018) -#define SNPS_PHY_MPLLB_SSC_STEPSIZE REG_GENMASK(31, 11) - -#define SNPS_PHY_MPLLB_DIV2(phy) _MMIO_SNPS(phy, 0x16801C) -#define SNPS_PHY_MPLLB_HDMI_PIXEL_CLK_DIV REG_GENMASK(19, 18) -#define SNPS_PHY_MPLLB_HDMI_DIV REG_GENMASK(17, 15) -#define SNPS_PHY_MPLLB_REF_CLK_DIV REG_GENMASK(14, 12) -#define SNPS_PHY_MPLLB_MULTIPLIER REG_GENMASK(11, 0) - -#define SNPS_PHY_REF_CONTROL(phy) _MMIO_SNPS(phy, 0x168188) -#define SNPS_PHY_REF_CONTROL_REF_RANGE REG_GENMASK(31, 27) - -#define SNPS_PHY_TX_REQ(phy) _MMIO_SNPS(phy, 0x168200) -#define SNPS_PHY_TX_REQ_LN_DIS_PWR_STATE_PSR REG_GENMASK(31, 30) - -#define SNPS_PHY_TX_EQ(ln, phy) _MMIO_SNPS_LN(ln, phy, 0x168300) -#define SNPS_PHY_TX_EQ_MAIN REG_GENMASK(23, 18) -#define SNPS_PHY_TX_EQ_POST REG_GENMASK(15, 10) -#define SNPS_PHY_TX_EQ_PRE REG_GENMASK(7, 2) - /* The spec defines this only for BXT PHY0, but lets assume that this * would exist for PHY1 too if it had a second channel. */ -- cgit From d0864ee4f81fd8c782fbb382f80d6c9c531f2967 Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Mon, 10 Jan 2022 21:15:58 -0800 Subject: drm/i915: Move combo PHY registers to their own header These registers are only needed in a couple files and on specific platforms; let's keep them separate from the general register pool. Cc: Jani Nikula Signed-off-by: Matt Roper Reviewed-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20220111051600.3429104-10-matthew.d.roper@intel.com --- drivers/gpu/drm/i915/display/icl_dsi.c | 1 + drivers/gpu/drm/i915/display/intel_combo_phy.c | 1 + .../gpu/drm/i915/display/intel_combo_phy_regs.h | 162 +++++++++++++++++++++ drivers/gpu/drm/i915/display/intel_ddi.c | 1 + drivers/gpu/drm/i915/display/intel_display_power.c | 1 + drivers/gpu/drm/i915/display/intel_dp.c | 1 + drivers/gpu/drm/i915/i915_reg.h | 154 -------------------- 7 files changed, 167 insertions(+), 154 deletions(-) create mode 100644 drivers/gpu/drm/i915/display/intel_combo_phy_regs.h diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index 5781e9fac8b4..95f49535fa6e 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -32,6 +32,7 @@ #include "intel_atomic.h" #include "intel_backlight.h" #include "intel_combo_phy.h" +#include "intel_combo_phy_regs.h" #include "intel_connector.h" #include "intel_crtc.h" #include "intel_ddi.h" diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy.c b/drivers/gpu/drm/i915/display/intel_combo_phy.c index f628e0542933..4dfe77351b8b 100644 --- a/drivers/gpu/drm/i915/display/intel_combo_phy.c +++ b/drivers/gpu/drm/i915/display/intel_combo_phy.c @@ -4,6 +4,7 @@ */ #include "intel_combo_phy.h" +#include "intel_combo_phy_regs.h" #include "intel_de.h" #include "intel_display_types.h" diff --git a/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h b/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h new file mode 100644 index 000000000000..2ed65193ca19 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_combo_phy_regs.h @@ -0,0 +1,162 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __INTEL_COMBO_PHY_REGS__ +#define __INTEL_COMBO_PHY_REGS__ + +#include "i915_reg_defs.h" + +#define _ICL_COMBOPHY_A 0x162000 +#define _ICL_COMBOPHY_B 0x6C000 +#define _EHL_COMBOPHY_C 0x160000 +#define _RKL_COMBOPHY_D 0x161000 +#define _ADL_COMBOPHY_E 0x16B000 + +#define _ICL_COMBOPHY(phy) _PICK(phy, _ICL_COMBOPHY_A, \ + _ICL_COMBOPHY_B, \ + _EHL_COMBOPHY_C, \ + _RKL_COMBOPHY_D, \ + _ADL_COMBOPHY_E) + +/* ICL Port CL_DW registers */ +#define _ICL_PORT_CL_DW(dw, phy) (_ICL_COMBOPHY(phy) + \ + 4 * (dw)) + +#define ICL_PORT_CL_DW5(phy) _MMIO(_ICL_PORT_CL_DW(5, phy)) +#define CL_POWER_DOWN_ENABLE (1 << 4) +#define SUS_CLOCK_CONFIG (3 << 0) + +#define ICL_PORT_CL_DW10(phy) _MMIO(_ICL_PORT_CL_DW(10, phy)) +#define PG_SEQ_DELAY_OVERRIDE_MASK (3 << 25) +#define PG_SEQ_DELAY_OVERRIDE_SHIFT 25 +#define PG_SEQ_DELAY_OVERRIDE_ENABLE (1 << 24) +#define PWR_UP_ALL_LANES (0x0 << 4) +#define PWR_DOWN_LN_3_2_1 (0xe << 4) +#define PWR_DOWN_LN_3_2 (0xc << 4) +#define PWR_DOWN_LN_3 (0x8 << 4) +#define PWR_DOWN_LN_2_1_0 (0x7 << 4) +#define PWR_DOWN_LN_1_0 (0x3 << 4) +#define PWR_DOWN_LN_3_1 (0xa << 4) +#define PWR_DOWN_LN_3_1_0 (0xb << 4) +#define PWR_DOWN_LN_MASK (0xf << 4) +#define PWR_DOWN_LN_SHIFT 4 +#define EDP4K2K_MODE_OVRD_EN (1 << 3) +#define EDP4K2K_MODE_OVRD_OPTIMIZED (1 << 2) + +#define ICL_PORT_CL_DW12(phy) _MMIO(_ICL_PORT_CL_DW(12, phy)) +#define ICL_LANE_ENABLE_AUX (1 << 0) + +/* ICL Port COMP_DW registers */ +#define _ICL_PORT_COMP 0x100 +#define _ICL_PORT_COMP_DW(dw, phy) (_ICL_COMBOPHY(phy) + \ + _ICL_PORT_COMP + 4 * (dw)) + +#define ICL_PORT_COMP_DW0(phy) _MMIO(_ICL_PORT_COMP_DW(0, phy)) +#define COMP_INIT (1 << 31) + +#define ICL_PORT_COMP_DW1(phy) _MMIO(_ICL_PORT_COMP_DW(1, phy)) + +#define ICL_PORT_COMP_DW3(phy) _MMIO(_ICL_PORT_COMP_DW(3, phy)) +#define PROCESS_INFO_DOT_0 (0 << 26) +#define PROCESS_INFO_DOT_1 (1 << 26) +#define PROCESS_INFO_DOT_4 (2 << 26) +#define PROCESS_INFO_MASK (7 << 26) +#define PROCESS_INFO_SHIFT 26 +#define VOLTAGE_INFO_0_85V (0 << 24) +#define VOLTAGE_INFO_0_95V (1 << 24) +#define VOLTAGE_INFO_1_05V (2 << 24) +#define VOLTAGE_INFO_MASK (3 << 24) +#define VOLTAGE_INFO_SHIFT 24 + +#define ICL_PORT_COMP_DW8(phy) _MMIO(_ICL_PORT_COMP_DW(8, phy)) +#define IREFGEN (1 << 24) + +#define ICL_PORT_COMP_DW9(phy) _MMIO(_ICL_PORT_COMP_DW(9, phy)) + +#define ICL_PORT_COMP_DW10(phy) _MMIO(_ICL_PORT_COMP_DW(10, phy)) + +/* ICL Port PCS registers */ +#define _ICL_PORT_PCS_AUX 0x300 +#define _ICL_PORT_PCS_GRP 0x600 +#define _ICL_PORT_PCS_LN(ln) (0x800 + (ln) * 0x100) +#define _ICL_PORT_PCS_DW_AUX(dw, phy) (_ICL_COMBOPHY(phy) + \ + _ICL_PORT_PCS_AUX + 4 * (dw)) +#define _ICL_PORT_PCS_DW_GRP(dw, phy) (_ICL_COMBOPHY(phy) + \ + _ICL_PORT_PCS_GRP + 4 * (dw)) +#define _ICL_PORT_PCS_DW_LN(dw, ln, phy) (_ICL_COMBOPHY(phy) + \ + _ICL_PORT_PCS_LN(ln) + 4 * (dw)) +#define ICL_PORT_PCS_DW1_AUX(phy) _MMIO(_ICL_PORT_PCS_DW_AUX(1, phy)) +#define ICL_PORT_PCS_DW1_GRP(phy) _MMIO(_ICL_PORT_PCS_DW_GRP(1, phy)) +#define ICL_PORT_PCS_DW1_LN(ln, phy) _MMIO(_ICL_PORT_PCS_DW_LN(1, ln, phy)) +#define DCC_MODE_SELECT_MASK (0x3 << 20) +#define DCC_MODE_SELECT_CONTINUOSLY (0x3 << 20) +#define COMMON_KEEPER_EN (1 << 26) +#define LATENCY_OPTIM_MASK (0x3 << 2) +#define LATENCY_OPTIM_VAL(x) ((x) << 2) + +/* ICL Port TX registers */ +#define _ICL_PORT_TX_AUX 0x380 +#define _ICL_PORT_TX_GRP 0x680 +#define _ICL_PORT_TX_LN(ln) (0x880 + (ln) * 0x100) + +#define _ICL_PORT_TX_DW_AUX(dw, phy) (_ICL_COMBOPHY(phy) + \ + _ICL_PORT_TX_AUX + 4 * (dw)) +#define _ICL_PORT_TX_DW_GRP(dw, phy) (_ICL_COMBOPHY(phy) + \ + _ICL_PORT_TX_GRP + 4 * (dw)) +#define _ICL_PORT_TX_DW_LN(dw, ln, phy) (_ICL_COMBOPHY(phy) + \ + _ICL_PORT_TX_LN(ln) + 4 * (dw)) + +#define ICL_PORT_TX_DW2_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(2, phy)) +#define ICL_PORT_TX_DW2_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(2, phy)) +#define ICL_PORT_TX_DW2_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(2, ln, phy)) +#define SWING_SEL_UPPER(x) (((x) >> 3) << 15) +#define SWING_SEL_UPPER_MASK (1 << 15) +#define SWING_SEL_LOWER(x) (((x) & 0x7) << 11) +#define SWING_SEL_LOWER_MASK (0x7 << 11) +#define FRC_LATENCY_OPTIM_MASK (0x7 << 8) +#define FRC_LATENCY_OPTIM_VAL(x) ((x) << 8) +#define RCOMP_SCALAR(x) ((x) << 0) +#define RCOMP_SCALAR_MASK (0xFF << 0) + +#define ICL_PORT_TX_DW4_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(4, phy)) +#define ICL_PORT_TX_DW4_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(4, phy)) +#define ICL_PORT_TX_DW4_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(4, ln, phy)) +#define LOADGEN_SELECT (1 << 31) +#define POST_CURSOR_1(x) ((x) << 12) +#define POST_CURSOR_1_MASK (0x3F << 12) +#define POST_CURSOR_2(x) ((x) << 6) +#define POST_CURSOR_2_MASK (0x3F << 6) +#define CURSOR_COEFF(x) ((x) << 0) +#define CURSOR_COEFF_MASK (0x3F << 0) + +#define ICL_PORT_TX_DW5_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(5, phy)) +#define ICL_PORT_TX_DW5_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(5, phy)) +#define ICL_PORT_TX_DW5_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(5, ln, phy)) +#define TX_TRAINING_EN (1 << 31) +#define TAP2_DISABLE (1 << 30) +#define TAP3_DISABLE (1 << 29) +#define SCALING_MODE_SEL(x) ((x) << 18) +#define SCALING_MODE_SEL_MASK (0x7 << 18) +#define RTERM_SELECT(x) ((x) << 3) +#define RTERM_SELECT_MASK (0x7 << 3) + +#define ICL_PORT_TX_DW7_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(7, phy)) +#define ICL_PORT_TX_DW7_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(7, phy)) +#define ICL_PORT_TX_DW7_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(7, ln, phy)) +#define N_SCALAR(x) ((x) << 24) +#define N_SCALAR_MASK (0x7F << 24) + +#define ICL_PORT_TX_DW8_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(8, phy)) +#define ICL_PORT_TX_DW8_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(8, phy)) +#define ICL_PORT_TX_DW8_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(8, ln, phy)) +#define ICL_PORT_TX_DW8_ODCC_CLK_SEL REG_BIT(31) +#define ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK REG_GENMASK(30, 29) +#define ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_DIV2 REG_FIELD_PREP(ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK, 0x1) + +#define _ICL_DPHY_CHKN_REG 0x194 +#define ICL_DPHY_CHKN(port) _MMIO(_ICL_COMBOPHY(port) + _ICL_DPHY_CHKN_REG) +#define ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP REG_BIT(7) + +#endif /* __INTEL_COMBO_PHY_REGS__ */ diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 9c9d574f0b8c..766a8dbe095d 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -32,6 +32,7 @@ #include "intel_audio.h" #include "intel_backlight.h" #include "intel_combo_phy.h" +#include "intel_combo_phy_regs.h" #include "intel_connector.h" #include "intel_crtc.h" #include "intel_ddi.h" diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 05babdcf5f2e..fba35fb6d2df 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -7,6 +7,7 @@ #include "i915_irq.h" #include "intel_cdclk.h" #include "intel_combo_phy.h" +#include "intel_combo_phy_regs.h" #include "intel_crt.h" #include "intel_de.h" #include "intel_display_power.h" diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index d6d8c9922feb..942a755a0c48 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -46,6 +46,7 @@ #include "intel_atomic.h" #include "intel_audio.h" #include "intel_backlight.h" +#include "intel_combo_phy_regs.h" #include "intel_connector.h" #include "intel_crtc.h" #include "intel_ddi.h" diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 527e99ff9896..1b9d98343f03 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1536,160 +1536,6 @@ #define OCL2_LDOFUSE_PWR_DIS (1 << 6) #define BXT_PORT_CL1CM_DW30(phy) _BXT_PHY((phy), _PORT_CL1CM_DW30_BC) -/* - * ICL Port/COMBO-PHY Registers - */ -#define _ICL_COMBOPHY_A 0x162000 -#define _ICL_COMBOPHY_B 0x6C000 -#define _EHL_COMBOPHY_C 0x160000 -#define _RKL_COMBOPHY_D 0x161000 -#define _ADL_COMBOPHY_E 0x16B000 - -#define _ICL_COMBOPHY(phy) _PICK(phy, _ICL_COMBOPHY_A, \ - _ICL_COMBOPHY_B, \ - _EHL_COMBOPHY_C, \ - _RKL_COMBOPHY_D, \ - _ADL_COMBOPHY_E) - -/* ICL Port CL_DW registers */ -#define _ICL_PORT_CL_DW(dw, phy) (_ICL_COMBOPHY(phy) + \ - 4 * (dw)) - -#define ICL_PORT_CL_DW5(phy) _MMIO(_ICL_PORT_CL_DW(5, phy)) -#define CL_POWER_DOWN_ENABLE (1 << 4) -#define SUS_CLOCK_CONFIG (3 << 0) - -#define ICL_PORT_CL_DW10(phy) _MMIO(_ICL_PORT_CL_DW(10, phy)) -#define PG_SEQ_DELAY_OVERRIDE_MASK (3 << 25) -#define PG_SEQ_DELAY_OVERRIDE_SHIFT 25 -#define PG_SEQ_DELAY_OVERRIDE_ENABLE (1 << 24) -#define PWR_UP_ALL_LANES (0x0 << 4) -#define PWR_DOWN_LN_3_2_1 (0xe << 4) -#define PWR_DOWN_LN_3_2 (0xc << 4) -#define PWR_DOWN_LN_3 (0x8 << 4) -#define PWR_DOWN_LN_2_1_0 (0x7 << 4) -#define PWR_DOWN_LN_1_0 (0x3 << 4) -#define PWR_DOWN_LN_3_1 (0xa << 4) -#define PWR_DOWN_LN_3_1_0 (0xb << 4) -#define PWR_DOWN_LN_MASK (0xf << 4) -#define PWR_DOWN_LN_SHIFT 4 -#define EDP4K2K_MODE_OVRD_EN (1 << 3) -#define EDP4K2K_MODE_OVRD_OPTIMIZED (1 << 2) - -#define ICL_PORT_CL_DW12(phy) _MMIO(_ICL_PORT_CL_DW(12, phy)) -#define ICL_LANE_ENABLE_AUX (1 << 0) - -/* ICL Port COMP_DW registers */ -#define _ICL_PORT_COMP 0x100 -#define _ICL_PORT_COMP_DW(dw, phy) (_ICL_COMBOPHY(phy) + \ - _ICL_PORT_COMP + 4 * (dw)) - -#define ICL_PORT_COMP_DW0(phy) _MMIO(_ICL_PORT_COMP_DW(0, phy)) -#define COMP_INIT (1 << 31) - -#define ICL_PORT_COMP_DW1(phy) _MMIO(_ICL_PORT_COMP_DW(1, phy)) - -#define ICL_PORT_COMP_DW3(phy) _MMIO(_ICL_PORT_COMP_DW(3, phy)) -#define PROCESS_INFO_DOT_0 (0 << 26) -#define PROCESS_INFO_DOT_1 (1 << 26) -#define PROCESS_INFO_DOT_4 (2 << 26) -#define PROCESS_INFO_MASK (7 << 26) -#define PROCESS_INFO_SHIFT 26 -#define VOLTAGE_INFO_0_85V (0 << 24) -#define VOLTAGE_INFO_0_95V (1 << 24) -#define VOLTAGE_INFO_1_05V (2 << 24) -#define VOLTAGE_INFO_MASK (3 << 24) -#define VOLTAGE_INFO_SHIFT 24 - -#define ICL_PORT_COMP_DW8(phy) _MMIO(_ICL_PORT_COMP_DW(8, phy)) -#define IREFGEN (1 << 24) - -#define ICL_PORT_COMP_DW9(phy) _MMIO(_ICL_PORT_COMP_DW(9, phy)) - -#define ICL_PORT_COMP_DW10(phy) _MMIO(_ICL_PORT_COMP_DW(10, phy)) - -/* ICL Port PCS registers */ -#define _ICL_PORT_PCS_AUX 0x300 -#define _ICL_PORT_PCS_GRP 0x600 -#define _ICL_PORT_PCS_LN(ln) (0x800 + (ln) * 0x100) -#define _ICL_PORT_PCS_DW_AUX(dw, phy) (_ICL_COMBOPHY(phy) + \ - _ICL_PORT_PCS_AUX + 4 * (dw)) -#define _ICL_PORT_PCS_DW_GRP(dw, phy) (_ICL_COMBOPHY(phy) + \ - _ICL_PORT_PCS_GRP + 4 * (dw)) -#define _ICL_PORT_PCS_DW_LN(dw, ln, phy) (_ICL_COMBOPHY(phy) + \ - _ICL_PORT_PCS_LN(ln) + 4 * (dw)) -#define ICL_PORT_PCS_DW1_AUX(phy) _MMIO(_ICL_PORT_PCS_DW_AUX(1, phy)) -#define ICL_PORT_PCS_DW1_GRP(phy) _MMIO(_ICL_PORT_PCS_DW_GRP(1, phy)) -#define ICL_PORT_PCS_DW1_LN(ln, phy) _MMIO(_ICL_PORT_PCS_DW_LN(1, ln, phy)) -#define DCC_MODE_SELECT_MASK (0x3 << 20) -#define DCC_MODE_SELECT_CONTINUOSLY (0x3 << 20) -#define COMMON_KEEPER_EN (1 << 26) -#define LATENCY_OPTIM_MASK (0x3 << 2) -#define LATENCY_OPTIM_VAL(x) ((x) << 2) - -/* ICL Port TX registers */ -#define _ICL_PORT_TX_AUX 0x380 -#define _ICL_PORT_TX_GRP 0x680 -#define _ICL_PORT_TX_LN(ln) (0x880 + (ln) * 0x100) - -#define _ICL_PORT_TX_DW_AUX(dw, phy) (_ICL_COMBOPHY(phy) + \ - _ICL_PORT_TX_AUX + 4 * (dw)) -#define _ICL_PORT_TX_DW_GRP(dw, phy) (_ICL_COMBOPHY(phy) + \ - _ICL_PORT_TX_GRP + 4 * (dw)) -#define _ICL_PORT_TX_DW_LN(dw, ln, phy) (_ICL_COMBOPHY(phy) + \ - _ICL_PORT_TX_LN(ln) + 4 * (dw)) - -#define ICL_PORT_TX_DW2_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(2, phy)) -#define ICL_PORT_TX_DW2_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(2, phy)) -#define ICL_PORT_TX_DW2_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(2, ln, phy)) -#define SWING_SEL_UPPER(x) (((x) >> 3) << 15) -#define SWING_SEL_UPPER_MASK (1 << 15) -#define SWING_SEL_LOWER(x) (((x) & 0x7) << 11) -#define SWING_SEL_LOWER_MASK (0x7 << 11) -#define FRC_LATENCY_OPTIM_MASK (0x7 << 8) -#define FRC_LATENCY_OPTIM_VAL(x) ((x) << 8) -#define RCOMP_SCALAR(x) ((x) << 0) -#define RCOMP_SCALAR_MASK (0xFF << 0) - -#define ICL_PORT_TX_DW4_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(4, phy)) -#define ICL_PORT_TX_DW4_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(4, phy)) -#define ICL_PORT_TX_DW4_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(4, ln, phy)) -#define LOADGEN_SELECT (1 << 31) -#define POST_CURSOR_1(x) ((x) << 12) -#define POST_CURSOR_1_MASK (0x3F << 12) -#define POST_CURSOR_2(x) ((x) << 6) -#define POST_CURSOR_2_MASK (0x3F << 6) -#define CURSOR_COEFF(x) ((x) << 0) -#define CURSOR_COEFF_MASK (0x3F << 0) - -#define ICL_PORT_TX_DW5_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(5, phy)) -#define ICL_PORT_TX_DW5_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(5, phy)) -#define ICL_PORT_TX_DW5_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(5, ln, phy)) -#define TX_TRAINING_EN (1 << 31) -#define TAP2_DISABLE (1 << 30) -#define TAP3_DISABLE (1 << 29) -#define SCALING_MODE_SEL(x) ((x) << 18) -#define SCALING_MODE_SEL_MASK (0x7 << 18) -#define RTERM_SELECT(x) ((x) << 3) -#define RTERM_SELECT_MASK (0x7 << 3) - -#define ICL_PORT_TX_DW7_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(7, phy)) -#define ICL_PORT_TX_DW7_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(7, phy)) -#define ICL_PORT_TX_DW7_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(7, ln, phy)) -#define N_SCALAR(x) ((x) << 24) -#define N_SCALAR_MASK (0x7F << 24) - -#define ICL_PORT_TX_DW8_AUX(phy) _MMIO(_ICL_PORT_TX_DW_AUX(8, phy)) -#define ICL_PORT_TX_DW8_GRP(phy) _MMIO(_ICL_PORT_TX_DW_GRP(8, phy)) -#define ICL_PORT_TX_DW8_LN(ln, phy) _MMIO(_ICL_PORT_TX_DW_LN(8, ln, phy)) -#define ICL_PORT_TX_DW8_ODCC_CLK_SEL REG_BIT(31) -#define ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK REG_GENMASK(30, 29) -#define ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_DIV2 REG_FIELD_PREP(ICL_PORT_TX_DW8_ODCC_CLK_DIV_SEL_MASK, 0x1) - -#define _ICL_DPHY_CHKN_REG 0x194 -#define ICL_DPHY_CHKN(port) _MMIO(_ICL_COMBOPHY(port) + _ICL_DPHY_CHKN_REG) -#define ICL_DPHY_CHKN_AFE_OVER_PPI_STRAP REG_BIT(7) - #define MG_PHY_PORT_LN(ln, tc_port, ln0p1, ln0p2, ln1p1) \ _MMIO(_PORT(tc_port, ln0p1, ln0p2) + (ln) * ((ln1p1) - (ln0p1))) -- cgit From 24ce4d6d2ca626a733f70b578c4a298b200a69de Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Mon, 10 Jan 2022 21:15:59 -0800 Subject: drm/i915: Move TC PHY registers to their own header Registers representing the MG/DKL TC PHYs (including the TC DPLLs which exist inside the PHY) are only needed in a couple files and on specific platforms; let's keep them separate from the general register pool. Cc: Jani Nikula Signed-off-by: Matt Roper Reviewed-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20220111051600.3429104-11-matthew.d.roper@intel.com --- drivers/gpu/drm/i915/display/intel_ddi.c | 1 + drivers/gpu/drm/i915/display/intel_dpll_mgr.c | 1 + drivers/gpu/drm/i915/display/intel_tc.c | 1 + drivers/gpu/drm/i915/display/intel_tc_phy_regs.h | 344 +++++++++++++++++++++++ drivers/gpu/drm/i915/i915_reg.h | 333 ---------------------- 5 files changed, 347 insertions(+), 333 deletions(-) create mode 100644 drivers/gpu/drm/i915/display/intel_tc_phy_regs.h diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 766a8dbe095d..6ee0f77b7927 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -57,6 +57,7 @@ #include "intel_snps_phy.h" #include "intel_sprite.h" #include "intel_tc.h" +#include "intel_tc_phy_regs.h" #include "intel_vdsc.h" #include "intel_vrr.h" #include "skl_scaler.h" diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index fc8fda77483a..3f7357123a6d 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -28,6 +28,7 @@ #include "intel_dpll_mgr.h" #include "intel_pch_refclk.h" #include "intel_tc.h" +#include "intel_tc_phy_regs.h" /** * DOC: Display PLLs diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index 40faa18947c9..4eefe7b0bb26 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -8,6 +8,7 @@ #include "intel_display_types.h" #include "intel_dp_mst.h" #include "intel_tc.h" +#include "intel_tc_phy_regs.h" static const char *tc_port_mode_name(enum tc_port_mode mode) { diff --git a/drivers/gpu/drm/i915/display/intel_tc_phy_regs.h b/drivers/gpu/drm/i915/display/intel_tc_phy_regs.h new file mode 100644 index 000000000000..87b74c3c35a7 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_tc_phy_regs.h @@ -0,0 +1,344 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __INTEL_TC_PHY_REGS__ +#define __INTEL_TC_PHY_REGS__ + +#include "i915_reg_defs.h" + +#define MG_PHY_PORT_LN(ln, tc_port, ln0p1, ln0p2, ln1p1) \ + _MMIO(_PORT(tc_port, ln0p1, ln0p2) + (ln) * ((ln1p1) - (ln0p1))) + +#define MG_TX_LINK_PARAMS_TX1LN0_PORT1 0x16812C +#define MG_TX_LINK_PARAMS_TX1LN1_PORT1 0x16852C +#define MG_TX_LINK_PARAMS_TX1LN0_PORT2 0x16912C +#define MG_TX_LINK_PARAMS_TX1LN1_PORT2 0x16952C +#define MG_TX_LINK_PARAMS_TX1LN0_PORT3 0x16A12C +#define MG_TX_LINK_PARAMS_TX1LN1_PORT3 0x16A52C +#define MG_TX_LINK_PARAMS_TX1LN0_PORT4 0x16B12C +#define MG_TX_LINK_PARAMS_TX1LN1_PORT4 0x16B52C +#define MG_TX1_LINK_PARAMS(ln, tc_port) \ + MG_PHY_PORT_LN(ln, tc_port, MG_TX_LINK_PARAMS_TX1LN0_PORT1, \ + MG_TX_LINK_PARAMS_TX1LN0_PORT2, \ + MG_TX_LINK_PARAMS_TX1LN1_PORT1) + +#define MG_TX_LINK_PARAMS_TX2LN0_PORT1 0x1680AC +#define MG_TX_LINK_PARAMS_TX2LN1_PORT1 0x1684AC +#define MG_TX_LINK_PARAMS_TX2LN0_PORT2 0x1690AC +#define MG_TX_LINK_PARAMS_TX2LN1_PORT2 0x1694AC +#define MG_TX_LINK_PARAMS_TX2LN0_PORT3 0x16A0AC +#define MG_TX_LINK_PARAMS_TX2LN1_PORT3 0x16A4AC +#define MG_TX_LINK_PARAMS_TX2LN0_PORT4 0x16B0AC +#define MG_TX_LINK_PARAMS_TX2LN1_PORT4 0x16B4AC +#define MG_TX2_LINK_PARAMS(ln, tc_port) \ + MG_PHY_PORT_LN(ln, tc_port, MG_TX_LINK_PARAMS_TX2LN0_PORT1, \ + MG_TX_LINK_PARAMS_TX2LN0_PORT2, \ + MG_TX_LINK_PARAMS_TX2LN1_PORT1) +#define CRI_USE_FS32 (1 << 5) + +#define MG_TX_PISO_READLOAD_TX1LN0_PORT1 0x16814C +#define MG_TX_PISO_READLOAD_TX1LN1_PORT1 0x16854C +#define MG_TX_PISO_READLOAD_TX1LN0_PORT2 0x16914C +#define MG_TX_PISO_READLOAD_TX1LN1_PORT2 0x16954C +#define MG_TX_PISO_READLOAD_TX1LN0_PORT3 0x16A14C +#define MG_TX_PISO_READLOAD_TX1LN1_PORT3 0x16A54C +#define MG_TX_PISO_READLOAD_TX1LN0_PORT4 0x16B14C +#define MG_TX_PISO_READLOAD_TX1LN1_PORT4 0x16B54C +#define MG_TX1_PISO_READLOAD(ln, tc_port) \ + MG_PHY_PORT_LN(ln, tc_port, MG_TX_PISO_READLOAD_TX1LN0_PORT1, \ + MG_TX_PISO_READLOAD_TX1LN0_PORT2, \ + MG_TX_PISO_READLOAD_TX1LN1_PORT1) + +#define MG_TX_PISO_READLOAD_TX2LN0_PORT1 0x1680CC +#define MG_TX_PISO_READLOAD_TX2LN1_PORT1 0x1684CC +#define MG_TX_PISO_READLOAD_TX2LN0_PORT2 0x1690CC +#define MG_TX_PISO_READLOAD_TX2LN1_PORT2 0x1694CC +#define MG_TX_PISO_READLOAD_TX2LN0_PORT3 0x16A0CC +#define MG_TX_PISO_READLOAD_TX2LN1_PORT3 0x16A4CC +#define MG_TX_PISO_READLOAD_TX2LN0_PORT4 0x16B0CC +#define MG_TX_PISO_READLOAD_TX2LN1_PORT4 0x16B4CC +#define MG_TX2_PISO_READLOAD(ln, tc_port) \ + MG_PHY_PORT_LN(ln, tc_port, MG_TX_PISO_READLOAD_TX2LN0_PORT1, \ + MG_TX_PISO_READLOAD_TX2LN0_PORT2, \ + MG_TX_PISO_READLOAD_TX2LN1_PORT1) +#define CRI_CALCINIT (1 << 1) + +#define MG_TX_SWINGCTRL_TX1LN0_PORT1 0x168148 +#define MG_TX_SWINGCTRL_TX1LN1_PORT1 0x168548 +#define MG_TX_SWINGCTRL_TX1LN0_PORT2 0x169148 +#define MG_TX_SWINGCTRL_TX1LN1_PORT2 0x169548 +#define MG_TX_SWINGCTRL_TX1LN0_PORT3 0x16A148 +#define MG_TX_SWINGCTRL_TX1LN1_PORT3 0x16A548 +#define MG_TX_SWINGCTRL_TX1LN0_PORT4 0x16B148 +#define MG_TX_SWINGCTRL_TX1LN1_PORT4 0x16B548 +#define MG_TX1_SWINGCTRL(ln, tc_port) \ + MG_PHY_PORT_LN(ln, tc_port, MG_TX_SWINGCTRL_TX1LN0_PORT1, \ + MG_TX_SWINGCTRL_TX1LN0_PORT2, \ + MG_TX_SWINGCTRL_TX1LN1_PORT1) + +#define MG_TX_SWINGCTRL_TX2LN0_PORT1 0x1680C8 +#define MG_TX_SWINGCTRL_TX2LN1_PORT1 0x1684C8 +#define MG_TX_SWINGCTRL_TX2LN0_PORT2 0x1690C8 +#define MG_TX_SWINGCTRL_TX2LN1_PORT2 0x1694C8 +#define MG_TX_SWINGCTRL_TX2LN0_PORT3 0x16A0C8 +#define MG_TX_SWINGCTRL_TX2LN1_PORT3 0x16A4C8 +#define MG_TX_SWINGCTRL_TX2LN0_PORT4 0x16B0C8 +#define MG_TX_SWINGCTRL_TX2LN1_PORT4 0x16B4C8 +#define MG_TX2_SWINGCTRL(ln, tc_port) \ + MG_PHY_PORT_LN(ln, tc_port, MG_TX_SWINGCTRL_TX2LN0_PORT1, \ + MG_TX_SWINGCTRL_TX2LN0_PORT2, \ + MG_TX_SWINGCTRL_TX2LN1_PORT1) +#define CRI_TXDEEMPH_OVERRIDE_17_12(x) ((x) << 0) +#define CRI_TXDEEMPH_OVERRIDE_17_12_MASK (0x3F << 0) + +#define MG_TX_DRVCTRL_TX1LN0_TXPORT1 0x168144 +#define MG_TX_DRVCTRL_TX1LN1_TXPORT1 0x168544 +#define MG_TX_DRVCTRL_TX1LN0_TXPORT2 0x169144 +#define MG_TX_DRVCTRL_TX1LN1_TXPORT2 0x169544 +#define MG_TX_DRVCTRL_TX1LN0_TXPORT3 0x16A144 +#define MG_TX_DRVCTRL_TX1LN1_TXPORT3 0x16A544 +#define MG_TX_DRVCTRL_TX1LN0_TXPORT4 0x16B144 +#define MG_TX_DRVCTRL_TX1LN1_TXPORT4 0x16B544 +#define MG_TX1_DRVCTRL(ln, tc_port) \ + MG_PHY_PORT_LN(ln, tc_port, MG_TX_DRVCTRL_TX1LN0_TXPORT1, \ + MG_TX_DRVCTRL_TX1LN0_TXPORT2, \ + MG_TX_DRVCTRL_TX1LN1_TXPORT1) + +#define MG_TX_DRVCTRL_TX2LN0_PORT1 0x1680C4 +#define MG_TX_DRVCTRL_TX2LN1_PORT1 0x1684C4 +#define MG_TX_DRVCTRL_TX2LN0_PORT2 0x1690C4 +#define MG_TX_DRVCTRL_TX2LN1_PORT2 0x1694C4 +#define MG_TX_DRVCTRL_TX2LN0_PORT3 0x16A0C4 +#define MG_TX_DRVCTRL_TX2LN1_PORT3 0x16A4C4 +#define MG_TX_DRVCTRL_TX2LN0_PORT4 0x16B0C4 +#define MG_TX_DRVCTRL_TX2LN1_PORT4 0x16B4C4 +#define MG_TX2_DRVCTRL(ln, tc_port) \ + MG_PHY_PORT_LN(ln, tc_port, MG_TX_DRVCTRL_TX2LN0_PORT1, \ + MG_TX_DRVCTRL_TX2LN0_PORT2, \ + MG_TX_DRVCTRL_TX2LN1_PORT1) +#define CRI_TXDEEMPH_OVERRIDE_11_6(x) ((x) << 24) +#define CRI_TXDEEMPH_OVERRIDE_11_6_MASK (0x3F << 24) +#define CRI_TXDEEMPH_OVERRIDE_EN (1 << 22) +#define CRI_TXDEEMPH_OVERRIDE_5_0(x) ((x) << 16) +#define CRI_TXDEEMPH_OVERRIDE_5_0_MASK (0x3F << 16) +#define CRI_LOADGEN_SEL(x) ((x) << 12) +#define CRI_LOADGEN_SEL_MASK (0x3 << 12) + +#define MG_CLKHUB_LN0_PORT1 0x16839C +#define MG_CLKHUB_LN1_PORT1 0x16879C +#define MG_CLKHUB_LN0_PORT2 0x16939C +#define MG_CLKHUB_LN1_PORT2 0x16979C +#define MG_CLKHUB_LN0_PORT3 0x16A39C +#define MG_CLKHUB_LN1_PORT3 0x16A79C +#define MG_CLKHUB_LN0_PORT4 0x16B39C +#define MG_CLKHUB_LN1_PORT4 0x16B79C +#define MG_CLKHUB(ln, tc_port) \ + MG_PHY_PORT_LN(ln, tc_port, MG_CLKHUB_LN0_PORT1, \ + MG_CLKHUB_LN0_PORT2, \ + MG_CLKHUB_LN1_PORT1) +#define CFG_LOW_RATE_LKREN_EN (1 << 11) + +#define MG_TX_DCC_TX1LN0_PORT1 0x168110 +#define MG_TX_DCC_TX1LN1_PORT1 0x168510 +#define MG_TX_DCC_TX1LN0_PORT2 0x169110 +#define MG_TX_DCC_TX1LN1_PORT2 0x169510 +#define MG_TX_DCC_TX1LN0_PORT3 0x16A110 +#define MG_TX_DCC_TX1LN1_PORT3 0x16A510 +#define MG_TX_DCC_TX1LN0_PORT4 0x16B110 +#define MG_TX_DCC_TX1LN1_PORT4 0x16B510 +#define MG_TX1_DCC(ln, tc_port) \ + MG_PHY_PORT_LN(ln, tc_port, MG_TX_DCC_TX1LN0_PORT1, \ + MG_TX_DCC_TX1LN0_PORT2, \ + MG_TX_DCC_TX1LN1_PORT1) +#define MG_TX_DCC_TX2LN0_PORT1 0x168090 +#define MG_TX_DCC_TX2LN1_PORT1 0x168490 +#define MG_TX_DCC_TX2LN0_PORT2 0x169090 +#define MG_TX_DCC_TX2LN1_PORT2 0x169490 +#define MG_TX_DCC_TX2LN0_PORT3 0x16A090 +#define MG_TX_DCC_TX2LN1_PORT3 0x16A490 +#define MG_TX_DCC_TX2LN0_PORT4 0x16B090 +#define MG_TX_DCC_TX2LN1_PORT4 0x16B490 +#define MG_TX2_DCC(ln, tc_port) \ + MG_PHY_PORT_LN(ln, tc_port, MG_TX_DCC_TX2LN0_PORT1, \ + MG_TX_DCC_TX2LN0_PORT2, \ + MG_TX_DCC_TX2LN1_PORT1) +#define CFG_AMI_CK_DIV_OVERRIDE_VAL(x) ((x) << 25) +#define CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK (0x3 << 25) +#define CFG_AMI_CK_DIV_OVERRIDE_EN (1 << 24) + +#define MG_DP_MODE_LN0_ACU_PORT1 0x1683A0 +#define MG_DP_MODE_LN1_ACU_PORT1 0x1687A0 +#define MG_DP_MODE_LN0_ACU_PORT2 0x1693A0 +#define MG_DP_MODE_LN1_ACU_PORT2 0x1697A0 +#define MG_DP_MODE_LN0_ACU_PORT3 0x16A3A0 +#define MG_DP_MODE_LN1_ACU_PORT3 0x16A7A0 +#define MG_DP_MODE_LN0_ACU_PORT4 0x16B3A0 +#define MG_DP_MODE_LN1_ACU_PORT4 0x16B7A0 +#define MG_DP_MODE(ln, tc_port) \ + MG_PHY_PORT_LN(ln, tc_port, MG_DP_MODE_LN0_ACU_PORT1, \ + MG_DP_MODE_LN0_ACU_PORT2, \ + MG_DP_MODE_LN1_ACU_PORT1) +#define MG_DP_MODE_CFG_DP_X2_MODE (1 << 7) +#define MG_DP_MODE_CFG_DP_X1_MODE (1 << 6) + +#define FIA1_BASE 0x163000 +#define FIA2_BASE 0x16E000 +#define FIA3_BASE 0x16F000 +#define _FIA(fia) _PICK((fia), FIA1_BASE, FIA2_BASE, FIA3_BASE) +#define _MMIO_FIA(fia, off) _MMIO(_FIA(fia) + (off)) + +/* ICL PHY DFLEX registers */ +#define PORT_TX_DFLEXDPMLE1(fia) _MMIO_FIA((fia), 0x008C0) +#define DFLEXDPMLE1_DPMLETC_MASK(idx) (0xf << (4 * (idx))) +#define DFLEXDPMLE1_DPMLETC_ML0(idx) (1 << (4 * (idx))) +#define DFLEXDPMLE1_DPMLETC_ML1_0(idx) (3 << (4 * (idx))) +#define DFLEXDPMLE1_DPMLETC_ML3(idx) (8 << (4 * (idx))) +#define DFLEXDPMLE1_DPMLETC_ML3_2(idx) (12 << (4 * (idx))) +#define DFLEXDPMLE1_DPMLETC_ML3_0(idx) (15 << (4 * (idx))) + +#define _MG_REFCLKIN_CTL_PORT1 0x16892C +#define _MG_REFCLKIN_CTL_PORT2 0x16992C +#define _MG_REFCLKIN_CTL_PORT3 0x16A92C +#define _MG_REFCLKIN_CTL_PORT4 0x16B92C +#define MG_REFCLKIN_CTL_OD_2_MUX(x) ((x) << 8) +#define MG_REFCLKIN_CTL_OD_2_MUX_MASK (0x7 << 8) +#define MG_REFCLKIN_CTL(tc_port) _MMIO_PORT((tc_port), \ + _MG_REFCLKIN_CTL_PORT1, \ + _MG_REFCLKIN_CTL_PORT2) + +#define _MG_CLKTOP2_CORECLKCTL1_PORT1 0x1688D8 +#define _MG_CLKTOP2_CORECLKCTL1_PORT2 0x1698D8 +#define _MG_CLKTOP2_CORECLKCTL1_PORT3 0x16A8D8 +#define _MG_CLKTOP2_CORECLKCTL1_PORT4 0x16B8D8 +#define MG_CLKTOP2_CORECLKCTL1_B_DIVRATIO(x) ((x) << 16) +#define MG_CLKTOP2_CORECLKCTL1_B_DIVRATIO_MASK (0xff << 16) +#define MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(x) ((x) << 8) +#define MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK (0xff << 8) +#define MG_CLKTOP2_CORECLKCTL1(tc_port) _MMIO_PORT((tc_port), \ + _MG_CLKTOP2_CORECLKCTL1_PORT1, \ + _MG_CLKTOP2_CORECLKCTL1_PORT2) + +#define _MG_CLKTOP2_HSCLKCTL_PORT1 0x1688D4 +#define _MG_CLKTOP2_HSCLKCTL_PORT2 0x1698D4 +#define _MG_CLKTOP2_HSCLKCTL_PORT3 0x16A8D4 +#define _MG_CLKTOP2_HSCLKCTL_PORT4 0x16B8D4 +#define MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(x) ((x) << 16) +#define MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK (0x1 << 16) +#define MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(x) ((x) << 14) +#define MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK (0x3 << 14) +#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK (0x3 << 12) +#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2 (0 << 12) +#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3 (1 << 12) +#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5 (2 << 12) +#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7 (3 << 12) +#define MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(x) ((x) << 8) +#define MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT 8 +#define MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK (0xf << 8) +#define MG_CLKTOP2_HSCLKCTL(tc_port) _MMIO_PORT((tc_port), \ + _MG_CLKTOP2_HSCLKCTL_PORT1, \ + _MG_CLKTOP2_HSCLKCTL_PORT2) + +#define _MG_PLL_DIV0_PORT1 0x168A00 +#define _MG_PLL_DIV0_PORT2 0x169A00 +#define _MG_PLL_DIV0_PORT3 0x16AA00 +#define _MG_PLL_DIV0_PORT4 0x16BA00 +#define MG_PLL_DIV0_FRACNEN_H (1 << 30) +#define MG_PLL_DIV0_FBDIV_FRAC_MASK (0x3fffff << 8) +#define MG_PLL_DIV0_FBDIV_FRAC_SHIFT 8 +#define MG_PLL_DIV0_FBDIV_FRAC(x) ((x) << 8) +#define MG_PLL_DIV0_FBDIV_INT_MASK (0xff << 0) +#define MG_PLL_DIV0_FBDIV_INT(x) ((x) << 0) +#define MG_PLL_DIV0(tc_port) _MMIO_PORT((tc_port), _MG_PLL_DIV0_PORT1, \ + _MG_PLL_DIV0_PORT2) + +#define _MG_PLL_DIV1_PORT1 0x168A04 +#define _MG_PLL_DIV1_PORT2 0x169A04 +#define _MG_PLL_DIV1_PORT3 0x16AA04 +#define _MG_PLL_DIV1_PORT4 0x16BA04 +#define MG_PLL_DIV1_IREF_NDIVRATIO(x) ((x) << 16) +#define MG_PLL_DIV1_DITHER_DIV_1 (0 << 12) +#define MG_PLL_DIV1_DITHER_DIV_2 (1 << 12) +#define MG_PLL_DIV1_DITHER_DIV_4 (2 << 12) +#define MG_PLL_DIV1_DITHER_DIV_8 (3 << 12) +#define MG_PLL_DIV1_NDIVRATIO(x) ((x) << 4) +#define MG_PLL_DIV1_FBPREDIV_MASK (0xf << 0) +#define MG_PLL_DIV1_FBPREDIV(x) ((x) << 0) +#define MG_PLL_DIV1(tc_port) _MMIO_PORT((tc_port), _MG_PLL_DIV1_PORT1, \ + _MG_PLL_DIV1_PORT2) + +#define _MG_PLL_LF_PORT1 0x168A08 +#define _MG_PLL_LF_PORT2 0x169A08 +#define _MG_PLL_LF_PORT3 0x16AA08 +#define _MG_PLL_LF_PORT4 0x16BA08 +#define MG_PLL_LF_TDCTARGETCNT(x) ((x) << 24) +#define MG_PLL_LF_AFCCNTSEL_256 (0 << 20) +#define MG_PLL_LF_AFCCNTSEL_512 (1 << 20) +#define MG_PLL_LF_GAINCTRL(x) ((x) << 16) +#define MG_PLL_LF_INT_COEFF(x) ((x) << 8) +#define MG_PLL_LF_PROP_COEFF(x) ((x) << 0) +#define MG_PLL_LF(tc_port) _MMIO_PORT((tc_port), _MG_PLL_LF_PORT1, \ + _MG_PLL_LF_PORT2) + +#define _MG_PLL_FRAC_LOCK_PORT1 0x168A0C +#define _MG_PLL_FRAC_LOCK_PORT2 0x169A0C +#define _MG_PLL_FRAC_LOCK_PORT3 0x16AA0C +#define _MG_PLL_FRAC_LOCK_PORT4 0x16BA0C +#define MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 (1 << 18) +#define MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 (1 << 16) +#define MG_PLL_FRAC_LOCK_LOCKTHRESH(x) ((x) << 11) +#define MG_PLL_FRAC_LOCK_DCODITHEREN (1 << 10) +#define MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN (1 << 8) +#define MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(x) ((x) << 0) +#define MG_PLL_FRAC_LOCK(tc_port) _MMIO_PORT((tc_port), \ + _MG_PLL_FRAC_LOCK_PORT1, \ + _MG_PLL_FRAC_LOCK_PORT2) + +#define _MG_PLL_SSC_PORT1 0x168A10 +#define _MG_PLL_SSC_PORT2 0x169A10 +#define _MG_PLL_SSC_PORT3 0x16AA10 +#define _MG_PLL_SSC_PORT4 0x16BA10 +#define MG_PLL_SSC_EN (1 << 28) +#define MG_PLL_SSC_TYPE(x) ((x) << 26) +#define MG_PLL_SSC_STEPLENGTH(x) ((x) << 16) +#define MG_PLL_SSC_STEPNUM(x) ((x) << 10) +#define MG_PLL_SSC_FLLEN (1 << 9) +#define MG_PLL_SSC_STEPSIZE(x) ((x) << 0) +#define MG_PLL_SSC(tc_port) _MMIO_PORT((tc_port), _MG_PLL_SSC_PORT1, \ + _MG_PLL_SSC_PORT2) + +#define _MG_PLL_BIAS_PORT1 0x168A14 +#define _MG_PLL_BIAS_PORT2 0x169A14 +#define _MG_PLL_BIAS_PORT3 0x16AA14 +#define _MG_PLL_BIAS_PORT4 0x16BA14 +#define MG_PLL_BIAS_BIAS_GB_SEL(x) ((x) << 30) +#define MG_PLL_BIAS_BIAS_GB_SEL_MASK (0x3 << 30) +#define MG_PLL_BIAS_INIT_DCOAMP(x) ((x) << 24) +#define MG_PLL_BIAS_INIT_DCOAMP_MASK (0x3f << 24) +#define MG_PLL_BIAS_BIAS_BONUS(x) ((x) << 16) +#define MG_PLL_BIAS_BIAS_BONUS_MASK (0xff << 16) +#define MG_PLL_BIAS_BIASCAL_EN (1 << 15) +#define MG_PLL_BIAS_CTRIM(x) ((x) << 8) +#define MG_PLL_BIAS_CTRIM_MASK (0x1f << 8) +#define MG_PLL_BIAS_VREF_RDAC(x) ((x) << 5) +#define MG_PLL_BIAS_VREF_RDAC_MASK (0x7 << 5) +#define MG_PLL_BIAS_IREFTRIM(x) ((x) << 0) +#define MG_PLL_BIAS_IREFTRIM_MASK (0x1f << 0) +#define MG_PLL_BIAS(tc_port) _MMIO_PORT((tc_port), _MG_PLL_BIAS_PORT1, \ + _MG_PLL_BIAS_PORT2) + +#define _MG_PLL_TDC_COLDST_BIAS_PORT1 0x168A18 +#define _MG_PLL_TDC_COLDST_BIAS_PORT2 0x169A18 +#define _MG_PLL_TDC_COLDST_BIAS_PORT3 0x16AA18 +#define _MG_PLL_TDC_COLDST_BIAS_PORT4 0x16BA18 +#define MG_PLL_TDC_COLDST_IREFINT_EN (1 << 27) +#define MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(x) ((x) << 17) +#define MG_PLL_TDC_COLDST_COLDSTART (1 << 16) +#define MG_PLL_TDC_TDCOVCCORR_EN (1 << 2) +#define MG_PLL_TDC_TDCSEL(x) ((x) << 0) +#define MG_PLL_TDC_COLDST_BIAS(tc_port) _MMIO_PORT((tc_port), \ + _MG_PLL_TDC_COLDST_BIAS_PORT1, \ + _MG_PLL_TDC_COLDST_BIAS_PORT2) + +#endif /* __INTEL_TC_PHY_REGS__ */ diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 1b9d98343f03..b3a05ed86734 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -1536,181 +1536,6 @@ #define OCL2_LDOFUSE_PWR_DIS (1 << 6) #define BXT_PORT_CL1CM_DW30(phy) _BXT_PHY((phy), _PORT_CL1CM_DW30_BC) -#define MG_PHY_PORT_LN(ln, tc_port, ln0p1, ln0p2, ln1p1) \ - _MMIO(_PORT(tc_port, ln0p1, ln0p2) + (ln) * ((ln1p1) - (ln0p1))) - -#define MG_TX_LINK_PARAMS_TX1LN0_PORT1 0x16812C -#define MG_TX_LINK_PARAMS_TX1LN1_PORT1 0x16852C -#define MG_TX_LINK_PARAMS_TX1LN0_PORT2 0x16912C -#define MG_TX_LINK_PARAMS_TX1LN1_PORT2 0x16952C -#define MG_TX_LINK_PARAMS_TX1LN0_PORT3 0x16A12C -#define MG_TX_LINK_PARAMS_TX1LN1_PORT3 0x16A52C -#define MG_TX_LINK_PARAMS_TX1LN0_PORT4 0x16B12C -#define MG_TX_LINK_PARAMS_TX1LN1_PORT4 0x16B52C -#define MG_TX1_LINK_PARAMS(ln, tc_port) \ - MG_PHY_PORT_LN(ln, tc_port, MG_TX_LINK_PARAMS_TX1LN0_PORT1, \ - MG_TX_LINK_PARAMS_TX1LN0_PORT2, \ - MG_TX_LINK_PARAMS_TX1LN1_PORT1) - -#define MG_TX_LINK_PARAMS_TX2LN0_PORT1 0x1680AC -#define MG_TX_LINK_PARAMS_TX2LN1_PORT1 0x1684AC -#define MG_TX_LINK_PARAMS_TX2LN0_PORT2 0x1690AC -#define MG_TX_LINK_PARAMS_TX2LN1_PORT2 0x1694AC -#define MG_TX_LINK_PARAMS_TX2LN0_PORT3 0x16A0AC -#define MG_TX_LINK_PARAMS_TX2LN1_PORT3 0x16A4AC -#define MG_TX_LINK_PARAMS_TX2LN0_PORT4 0x16B0AC -#define MG_TX_LINK_PARAMS_TX2LN1_PORT4 0x16B4AC -#define MG_TX2_LINK_PARAMS(ln, tc_port) \ - MG_PHY_PORT_LN(ln, tc_port, MG_TX_LINK_PARAMS_TX2LN0_PORT1, \ - MG_TX_LINK_PARAMS_TX2LN0_PORT2, \ - MG_TX_LINK_PARAMS_TX2LN1_PORT1) -#define CRI_USE_FS32 (1 << 5) - -#define MG_TX_PISO_READLOAD_TX1LN0_PORT1 0x16814C -#define MG_TX_PISO_READLOAD_TX1LN1_PORT1 0x16854C -#define MG_TX_PISO_READLOAD_TX1LN0_PORT2 0x16914C -#define MG_TX_PISO_READLOAD_TX1LN1_PORT2 0x16954C -#define MG_TX_PISO_READLOAD_TX1LN0_PORT3 0x16A14C -#define MG_TX_PISO_READLOAD_TX1LN1_PORT3 0x16A54C -#define MG_TX_PISO_READLOAD_TX1LN0_PORT4 0x16B14C -#define MG_TX_PISO_READLOAD_TX1LN1_PORT4 0x16B54C -#define MG_TX1_PISO_READLOAD(ln, tc_port) \ - MG_PHY_PORT_LN(ln, tc_port, MG_TX_PISO_READLOAD_TX1LN0_PORT1, \ - MG_TX_PISO_READLOAD_TX1LN0_PORT2, \ - MG_TX_PISO_READLOAD_TX1LN1_PORT1) - -#define MG_TX_PISO_READLOAD_TX2LN0_PORT1 0x1680CC -#define MG_TX_PISO_READLOAD_TX2LN1_PORT1 0x1684CC -#define MG_TX_PISO_READLOAD_TX2LN0_PORT2 0x1690CC -#define MG_TX_PISO_READLOAD_TX2LN1_PORT2 0x1694CC -#define MG_TX_PISO_READLOAD_TX2LN0_PORT3 0x16A0CC -#define MG_TX_PISO_READLOAD_TX2LN1_PORT3 0x16A4CC -#define MG_TX_PISO_READLOAD_TX2LN0_PORT4 0x16B0CC -#define MG_TX_PISO_READLOAD_TX2LN1_PORT4 0x16B4CC -#define MG_TX2_PISO_READLOAD(ln, tc_port) \ - MG_PHY_PORT_LN(ln, tc_port, MG_TX_PISO_READLOAD_TX2LN0_PORT1, \ - MG_TX_PISO_READLOAD_TX2LN0_PORT2, \ - MG_TX_PISO_READLOAD_TX2LN1_PORT1) -#define CRI_CALCINIT (1 << 1) - -#define MG_TX_SWINGCTRL_TX1LN0_PORT1 0x168148 -#define MG_TX_SWINGCTRL_TX1LN1_PORT1 0x168548 -#define MG_TX_SWINGCTRL_TX1LN0_PORT2 0x169148 -#define MG_TX_SWINGCTRL_TX1LN1_PORT2 0x169548 -#define MG_TX_SWINGCTRL_TX1LN0_PORT3 0x16A148 -#define MG_TX_SWINGCTRL_TX1LN1_PORT3 0x16A548 -#define MG_TX_SWINGCTRL_TX1LN0_PORT4 0x16B148 -#define MG_TX_SWINGCTRL_TX1LN1_PORT4 0x16B548 -#define MG_TX1_SWINGCTRL(ln, tc_port) \ - MG_PHY_PORT_LN(ln, tc_port, MG_TX_SWINGCTRL_TX1LN0_PORT1, \ - MG_TX_SWINGCTRL_TX1LN0_PORT2, \ - MG_TX_SWINGCTRL_TX1LN1_PORT1) - -#define MG_TX_SWINGCTRL_TX2LN0_PORT1 0x1680C8 -#define MG_TX_SWINGCTRL_TX2LN1_PORT1 0x1684C8 -#define MG_TX_SWINGCTRL_TX2LN0_PORT2 0x1690C8 -#define MG_TX_SWINGCTRL_TX2LN1_PORT2 0x1694C8 -#define MG_TX_SWINGCTRL_TX2LN0_PORT3 0x16A0C8 -#define MG_TX_SWINGCTRL_TX2LN1_PORT3 0x16A4C8 -#define MG_TX_SWINGCTRL_TX2LN0_PORT4 0x16B0C8 -#define MG_TX_SWINGCTRL_TX2LN1_PORT4 0x16B4C8 -#define MG_TX2_SWINGCTRL(ln, tc_port) \ - MG_PHY_PORT_LN(ln, tc_port, MG_TX_SWINGCTRL_TX2LN0_PORT1, \ - MG_TX_SWINGCTRL_TX2LN0_PORT2, \ - MG_TX_SWINGCTRL_TX2LN1_PORT1) -#define CRI_TXDEEMPH_OVERRIDE_17_12(x) ((x) << 0) -#define CRI_TXDEEMPH_OVERRIDE_17_12_MASK (0x3F << 0) - -#define MG_TX_DRVCTRL_TX1LN0_TXPORT1 0x168144 -#define MG_TX_DRVCTRL_TX1LN1_TXPORT1 0x168544 -#define MG_TX_DRVCTRL_TX1LN0_TXPORT2 0x169144 -#define MG_TX_DRVCTRL_TX1LN1_TXPORT2 0x169544 -#define MG_TX_DRVCTRL_TX1LN0_TXPORT3 0x16A144 -#define MG_TX_DRVCTRL_TX1LN1_TXPORT3 0x16A544 -#define MG_TX_DRVCTRL_TX1LN0_TXPORT4 0x16B144 -#define MG_TX_DRVCTRL_TX1LN1_TXPORT4 0x16B544 -#define MG_TX1_DRVCTRL(ln, tc_port) \ - MG_PHY_PORT_LN(ln, tc_port, MG_TX_DRVCTRL_TX1LN0_TXPORT1, \ - MG_TX_DRVCTRL_TX1LN0_TXPORT2, \ - MG_TX_DRVCTRL_TX1LN1_TXPORT1) - -#define MG_TX_DRVCTRL_TX2LN0_PORT1 0x1680C4 -#define MG_TX_DRVCTRL_TX2LN1_PORT1 0x1684C4 -#define MG_TX_DRVCTRL_TX2LN0_PORT2 0x1690C4 -#define MG_TX_DRVCTRL_TX2LN1_PORT2 0x1694C4 -#define MG_TX_DRVCTRL_TX2LN0_PORT3 0x16A0C4 -#define MG_TX_DRVCTRL_TX2LN1_PORT3 0x16A4C4 -#define MG_TX_DRVCTRL_TX2LN0_PORT4 0x16B0C4 -#define MG_TX_DRVCTRL_TX2LN1_PORT4 0x16B4C4 -#define MG_TX2_DRVCTRL(ln, tc_port) \ - MG_PHY_PORT_LN(ln, tc_port, MG_TX_DRVCTRL_TX2LN0_PORT1, \ - MG_TX_DRVCTRL_TX2LN0_PORT2, \ - MG_TX_DRVCTRL_TX2LN1_PORT1) -#define CRI_TXDEEMPH_OVERRIDE_11_6(x) ((x) << 24) -#define CRI_TXDEEMPH_OVERRIDE_11_6_MASK (0x3F << 24) -#define CRI_TXDEEMPH_OVERRIDE_EN (1 << 22) -#define CRI_TXDEEMPH_OVERRIDE_5_0(x) ((x) << 16) -#define CRI_TXDEEMPH_OVERRIDE_5_0_MASK (0x3F << 16) -#define CRI_LOADGEN_SEL(x) ((x) << 12) -#define CRI_LOADGEN_SEL_MASK (0x3 << 12) - -#define MG_CLKHUB_LN0_PORT1 0x16839C -#define MG_CLKHUB_LN1_PORT1 0x16879C -#define MG_CLKHUB_LN0_PORT2 0x16939C -#define MG_CLKHUB_LN1_PORT2 0x16979C -#define MG_CLKHUB_LN0_PORT3 0x16A39C -#define MG_CLKHUB_LN1_PORT3 0x16A79C -#define MG_CLKHUB_LN0_PORT4 0x16B39C -#define MG_CLKHUB_LN1_PORT4 0x16B79C -#define MG_CLKHUB(ln, tc_port) \ - MG_PHY_PORT_LN(ln, tc_port, MG_CLKHUB_LN0_PORT1, \ - MG_CLKHUB_LN0_PORT2, \ - MG_CLKHUB_LN1_PORT1) -#define CFG_LOW_RATE_LKREN_EN (1 << 11) - -#define MG_TX_DCC_TX1LN0_PORT1 0x168110 -#define MG_TX_DCC_TX1LN1_PORT1 0x168510 -#define MG_TX_DCC_TX1LN0_PORT2 0x169110 -#define MG_TX_DCC_TX1LN1_PORT2 0x169510 -#define MG_TX_DCC_TX1LN0_PORT3 0x16A110 -#define MG_TX_DCC_TX1LN1_PORT3 0x16A510 -#define MG_TX_DCC_TX1LN0_PORT4 0x16B110 -#define MG_TX_DCC_TX1LN1_PORT4 0x16B510 -#define MG_TX1_DCC(ln, tc_port) \ - MG_PHY_PORT_LN(ln, tc_port, MG_TX_DCC_TX1LN0_PORT1, \ - MG_TX_DCC_TX1LN0_PORT2, \ - MG_TX_DCC_TX1LN1_PORT1) -#define MG_TX_DCC_TX2LN0_PORT1 0x168090 -#define MG_TX_DCC_TX2LN1_PORT1 0x168490 -#define MG_TX_DCC_TX2LN0_PORT2 0x169090 -#define MG_TX_DCC_TX2LN1_PORT2 0x169490 -#define MG_TX_DCC_TX2LN0_PORT3 0x16A090 -#define MG_TX_DCC_TX2LN1_PORT3 0x16A490 -#define MG_TX_DCC_TX2LN0_PORT4 0x16B090 -#define MG_TX_DCC_TX2LN1_PORT4 0x16B490 -#define MG_TX2_DCC(ln, tc_port) \ - MG_PHY_PORT_LN(ln, tc_port, MG_TX_DCC_TX2LN0_PORT1, \ - MG_TX_DCC_TX2LN0_PORT2, \ - MG_TX_DCC_TX2LN1_PORT1) -#define CFG_AMI_CK_DIV_OVERRIDE_VAL(x) ((x) << 25) -#define CFG_AMI_CK_DIV_OVERRIDE_VAL_MASK (0x3 << 25) -#define CFG_AMI_CK_DIV_OVERRIDE_EN (1 << 24) - -#define MG_DP_MODE_LN0_ACU_PORT1 0x1683A0 -#define MG_DP_MODE_LN1_ACU_PORT1 0x1687A0 -#define MG_DP_MODE_LN0_ACU_PORT2 0x1693A0 -#define MG_DP_MODE_LN1_ACU_PORT2 0x1697A0 -#define MG_DP_MODE_LN0_ACU_PORT3 0x16A3A0 -#define MG_DP_MODE_LN1_ACU_PORT3 0x16A7A0 -#define MG_DP_MODE_LN0_ACU_PORT4 0x16B3A0 -#define MG_DP_MODE_LN1_ACU_PORT4 0x16B7A0 -#define MG_DP_MODE(ln, tc_port) \ - MG_PHY_PORT_LN(ln, tc_port, MG_DP_MODE_LN0_ACU_PORT1, \ - MG_DP_MODE_LN0_ACU_PORT2, \ - MG_DP_MODE_LN1_ACU_PORT1) -#define MG_DP_MODE_CFG_DP_X2_MODE (1 << 7) -#define MG_DP_MODE_CFG_DP_X1_MODE (1 << 6) - /* The spec defines this only for BXT PHY0, but lets assume that this * would exist for PHY1 too if it had a second channel. */ @@ -1719,21 +1544,6 @@ #define BXT_PORT_CL2CM_DW6(phy) _BXT_PHY((phy), _PORT_CL2CM_DW6_BC) #define DW6_OLDO_DYN_PWR_DOWN_EN (1 << 28) -#define FIA1_BASE 0x163000 -#define FIA2_BASE 0x16E000 -#define FIA3_BASE 0x16F000 -#define _FIA(fia) _PICK((fia), FIA1_BASE, FIA2_BASE, FIA3_BASE) -#define _MMIO_FIA(fia, off) _MMIO(_FIA(fia) + (off)) - -/* ICL PHY DFLEX registers */ -#define PORT_TX_DFLEXDPMLE1(fia) _MMIO_FIA((fia), 0x008C0) -#define DFLEXDPMLE1_DPMLETC_MASK(idx) (0xf << (4 * (idx))) -#define DFLEXDPMLE1_DPMLETC_ML0(idx) (1 << (4 * (idx))) -#define DFLEXDPMLE1_DPMLETC_ML1_0(idx) (3 << (4 * (idx))) -#define DFLEXDPMLE1_DPMLETC_ML3(idx) (8 << (4 * (idx))) -#define DFLEXDPMLE1_DPMLETC_ML3_2(idx) (12 << (4 * (idx))) -#define DFLEXDPMLE1_DPMLETC_ML3_0(idx) (15 << (4 * (idx))) - /* BXT PHY Ref registers */ #define _PORT_REF_DW3_A 0x16218C #define _PORT_REF_DW3_BC 0x6C18C @@ -9962,149 +9772,6 @@ enum skl_power_gate { PORTTC1_PLL_ENABLE, \ PORTTC2_PLL_ENABLE) -#define _MG_REFCLKIN_CTL_PORT1 0x16892C -#define _MG_REFCLKIN_CTL_PORT2 0x16992C -#define _MG_REFCLKIN_CTL_PORT3 0x16A92C -#define _MG_REFCLKIN_CTL_PORT4 0x16B92C -#define MG_REFCLKIN_CTL_OD_2_MUX(x) ((x) << 8) -#define MG_REFCLKIN_CTL_OD_2_MUX_MASK (0x7 << 8) -#define MG_REFCLKIN_CTL(tc_port) _MMIO_PORT((tc_port), \ - _MG_REFCLKIN_CTL_PORT1, \ - _MG_REFCLKIN_CTL_PORT2) - -#define _MG_CLKTOP2_CORECLKCTL1_PORT1 0x1688D8 -#define _MG_CLKTOP2_CORECLKCTL1_PORT2 0x1698D8 -#define _MG_CLKTOP2_CORECLKCTL1_PORT3 0x16A8D8 -#define _MG_CLKTOP2_CORECLKCTL1_PORT4 0x16B8D8 -#define MG_CLKTOP2_CORECLKCTL1_B_DIVRATIO(x) ((x) << 16) -#define MG_CLKTOP2_CORECLKCTL1_B_DIVRATIO_MASK (0xff << 16) -#define MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(x) ((x) << 8) -#define MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO_MASK (0xff << 8) -#define MG_CLKTOP2_CORECLKCTL1(tc_port) _MMIO_PORT((tc_port), \ - _MG_CLKTOP2_CORECLKCTL1_PORT1, \ - _MG_CLKTOP2_CORECLKCTL1_PORT2) - -#define _MG_CLKTOP2_HSCLKCTL_PORT1 0x1688D4 -#define _MG_CLKTOP2_HSCLKCTL_PORT2 0x1698D4 -#define _MG_CLKTOP2_HSCLKCTL_PORT3 0x16A8D4 -#define _MG_CLKTOP2_HSCLKCTL_PORT4 0x16B8D4 -#define MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(x) ((x) << 16) -#define MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK (0x1 << 16) -#define MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(x) ((x) << 14) -#define MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL_MASK (0x3 << 14) -#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_MASK (0x3 << 12) -#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_2 (0 << 12) -#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_3 (1 << 12) -#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_5 (2 << 12) -#define MG_CLKTOP2_HSCLKCTL_HSDIV_RATIO_7 (3 << 12) -#define MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO(x) ((x) << 8) -#define MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_SHIFT 8 -#define MG_CLKTOP2_HSCLKCTL_DSDIV_RATIO_MASK (0xf << 8) -#define MG_CLKTOP2_HSCLKCTL(tc_port) _MMIO_PORT((tc_port), \ - _MG_CLKTOP2_HSCLKCTL_PORT1, \ - _MG_CLKTOP2_HSCLKCTL_PORT2) - -#define _MG_PLL_DIV0_PORT1 0x168A00 -#define _MG_PLL_DIV0_PORT2 0x169A00 -#define _MG_PLL_DIV0_PORT3 0x16AA00 -#define _MG_PLL_DIV0_PORT4 0x16BA00 -#define MG_PLL_DIV0_FRACNEN_H (1 << 30) -#define MG_PLL_DIV0_FBDIV_FRAC_MASK (0x3fffff << 8) -#define MG_PLL_DIV0_FBDIV_FRAC_SHIFT 8 -#define MG_PLL_DIV0_FBDIV_FRAC(x) ((x) << 8) -#define MG_PLL_DIV0_FBDIV_INT_MASK (0xff << 0) -#define MG_PLL_DIV0_FBDIV_INT(x) ((x) << 0) -#define MG_PLL_DIV0(tc_port) _MMIO_PORT((tc_port), _MG_PLL_DIV0_PORT1, \ - _MG_PLL_DIV0_PORT2) - -#define _MG_PLL_DIV1_PORT1 0x168A04 -#define _MG_PLL_DIV1_PORT2 0x169A04 -#define _MG_PLL_DIV1_PORT3 0x16AA04 -#define _MG_PLL_DIV1_PORT4 0x16BA04 -#define MG_PLL_DIV1_IREF_NDIVRATIO(x) ((x) << 16) -#define MG_PLL_DIV1_DITHER_DIV_1 (0 << 12) -#define MG_PLL_DIV1_DITHER_DIV_2 (1 << 12) -#define MG_PLL_DIV1_DITHER_DIV_4 (2 << 12) -#define MG_PLL_DIV1_DITHER_DIV_8 (3 << 12) -#define MG_PLL_DIV1_NDIVRATIO(x) ((x) << 4) -#define MG_PLL_DIV1_FBPREDIV_MASK (0xf << 0) -#define MG_PLL_DIV1_FBPREDIV(x) ((x) << 0) -#define MG_PLL_DIV1(tc_port) _MMIO_PORT((tc_port), _MG_PLL_DIV1_PORT1, \ - _MG_PLL_DIV1_PORT2) - -#define _MG_PLL_LF_PORT1 0x168A08 -#define _MG_PLL_LF_PORT2 0x169A08 -#define _MG_PLL_LF_PORT3 0x16AA08 -#define _MG_PLL_LF_PORT4 0x16BA08 -#define MG_PLL_LF_TDCTARGETCNT(x) ((x) << 24) -#define MG_PLL_LF_AFCCNTSEL_256 (0 << 20) -#define MG_PLL_LF_AFCCNTSEL_512 (1 << 20) -#define MG_PLL_LF_GAINCTRL(x) ((x) << 16) -#define MG_PLL_LF_INT_COEFF(x) ((x) << 8) -#define MG_PLL_LF_PROP_COEFF(x) ((x) << 0) -#define MG_PLL_LF(tc_port) _MMIO_PORT((tc_port), _MG_PLL_LF_PORT1, \ - _MG_PLL_LF_PORT2) - -#define _MG_PLL_FRAC_LOCK_PORT1 0x168A0C -#define _MG_PLL_FRAC_LOCK_PORT2 0x169A0C -#define _MG_PLL_FRAC_LOCK_PORT3 0x16AA0C -#define _MG_PLL_FRAC_LOCK_PORT4 0x16BA0C -#define MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 (1 << 18) -#define MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 (1 << 16) -#define MG_PLL_FRAC_LOCK_LOCKTHRESH(x) ((x) << 11) -#define MG_PLL_FRAC_LOCK_DCODITHEREN (1 << 10) -#define MG_PLL_FRAC_LOCK_FEEDFWRDCAL_EN (1 << 8) -#define MG_PLL_FRAC_LOCK_FEEDFWRDGAIN(x) ((x) << 0) -#define MG_PLL_FRAC_LOCK(tc_port) _MMIO_PORT((tc_port), \ - _MG_PLL_FRAC_LOCK_PORT1, \ - _MG_PLL_FRAC_LOCK_PORT2) - -#define _MG_PLL_SSC_PORT1 0x168A10 -#define _MG_PLL_SSC_PORT2 0x169A10 -#define _MG_PLL_SSC_PORT3 0x16AA10 -#define _MG_PLL_SSC_PORT4 0x16BA10 -#define MG_PLL_SSC_EN (1 << 28) -#define MG_PLL_SSC_TYPE(x) ((x) << 26) -#define MG_PLL_SSC_STEPLENGTH(x) ((x) << 16) -#define MG_PLL_SSC_STEPNUM(x) ((x) << 10) -#define MG_PLL_SSC_FLLEN (1 << 9) -#define MG_PLL_SSC_STEPSIZE(x) ((x) << 0) -#define MG_PLL_SSC(tc_port) _MMIO_PORT((tc_port), _MG_PLL_SSC_PORT1, \ - _MG_PLL_SSC_PORT2) - -#define _MG_PLL_BIAS_PORT1 0x168A14 -#define _MG_PLL_BIAS_PORT2 0x169A14 -#define _MG_PLL_BIAS_PORT3 0x16AA14 -#define _MG_PLL_BIAS_PORT4 0x16BA14 -#define MG_PLL_BIAS_BIAS_GB_SEL(x) ((x) << 30) -#define MG_PLL_BIAS_BIAS_GB_SEL_MASK (0x3 << 30) -#define MG_PLL_BIAS_INIT_DCOAMP(x) ((x) << 24) -#define MG_PLL_BIAS_INIT_DCOAMP_MASK (0x3f << 24) -#define MG_PLL_BIAS_BIAS_BONUS(x) ((x) << 16) -#define MG_PLL_BIAS_BIAS_BONUS_MASK (0xff << 16) -#define MG_PLL_BIAS_BIASCAL_EN (1 << 15) -#define MG_PLL_BIAS_CTRIM(x) ((x) << 8) -#define MG_PLL_BIAS_CTRIM_MASK (0x1f << 8) -#define MG_PLL_BIAS_VREF_RDAC(x) ((x) << 5) -#define MG_PLL_BIAS_VREF_RDAC_MASK (0x7 << 5) -#define MG_PLL_BIAS_IREFTRIM(x) ((x) << 0) -#define MG_PLL_BIAS_IREFTRIM_MASK (0x1f << 0) -#define MG_PLL_BIAS(tc_port) _MMIO_PORT((tc_port), _MG_PLL_BIAS_PORT1, \ - _MG_PLL_BIAS_PORT2) - -#define _MG_PLL_TDC_COLDST_BIAS_PORT1 0x168A18 -#define _MG_PLL_TDC_COLDST_BIAS_PORT2 0x169A18 -#define _MG_PLL_TDC_COLDST_BIAS_PORT3 0x16AA18 -#define _MG_PLL_TDC_COLDST_BIAS_PORT4 0x16BA18 -#define MG_PLL_TDC_COLDST_IREFINT_EN (1 << 27) -#define MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(x) ((x) << 17) -#define MG_PLL_TDC_COLDST_COLDSTART (1 << 16) -#define MG_PLL_TDC_TDCOVCCORR_EN (1 << 2) -#define MG_PLL_TDC_TDCSEL(x) ((x) << 0) -#define MG_PLL_TDC_COLDST_BIAS(tc_port) _MMIO_PORT((tc_port), \ - _MG_PLL_TDC_COLDST_BIAS_PORT1, \ - _MG_PLL_TDC_COLDST_BIAS_PORT2) - #define _ICL_DPLL0_CFGCR0 0x164000 #define _ICL_DPLL1_CFGCR0 0x164080 #define ICL_DPLL_CFGCR0(pll) _MMIO_PLL(pll, _ICL_DPLL0_CFGCR0, \ -- cgit From 43571e15c057f69734d0ee3be45fdf9e4adee614 Mon Sep 17 00:00:00 2001 From: Matt Roper Date: Mon, 10 Jan 2022 21:16:00 -0800 Subject: drm/i915: Drop unused _PORT3 and _PORT4 TC phy register offsets All MG/DKL PHY register regions are evenly spaced offset-wise (0x168000, 0x169000, 0x16A000, 0x16B000) so the _MMIO_PORT() macro we use to access their registers only needs the first two offsets. We can drop the _PORT3 and _PORT4 offsets which are never directly referenced. Cc: Jani Nikula Signed-off-by: Matt Roper Reviewed-by: Jani Nikula Link: https://patchwork.freedesktop.org/patch/msgid/20220111051600.3429104-12-matthew.d.roper@intel.com --- drivers/gpu/drm/i915/display/intel_tc_phy_regs.h | 64 ------------------------ 1 file changed, 64 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_tc_phy_regs.h b/drivers/gpu/drm/i915/display/intel_tc_phy_regs.h index 87b74c3c35a7..5a545086f959 100644 --- a/drivers/gpu/drm/i915/display/intel_tc_phy_regs.h +++ b/drivers/gpu/drm/i915/display/intel_tc_phy_regs.h @@ -15,10 +15,6 @@ #define MG_TX_LINK_PARAMS_TX1LN1_PORT1 0x16852C #define MG_TX_LINK_PARAMS_TX1LN0_PORT2 0x16912C #define MG_TX_LINK_PARAMS_TX1LN1_PORT2 0x16952C -#define MG_TX_LINK_PARAMS_TX1LN0_PORT3 0x16A12C -#define MG_TX_LINK_PARAMS_TX1LN1_PORT3 0x16A52C -#define MG_TX_LINK_PARAMS_TX1LN0_PORT4 0x16B12C -#define MG_TX_LINK_PARAMS_TX1LN1_PORT4 0x16B52C #define MG_TX1_LINK_PARAMS(ln, tc_port) \ MG_PHY_PORT_LN(ln, tc_port, MG_TX_LINK_PARAMS_TX1LN0_PORT1, \ MG_TX_LINK_PARAMS_TX1LN0_PORT2, \ @@ -28,10 +24,6 @@ #define MG_TX_LINK_PARAMS_TX2LN1_PORT1 0x1684AC #define MG_TX_LINK_PARAMS_TX2LN0_PORT2 0x1690AC #define MG_TX_LINK_PARAMS_TX2LN1_PORT2 0x1694AC -#define MG_TX_LINK_PARAMS_TX2LN0_PORT3 0x16A0AC -#define MG_TX_LINK_PARAMS_TX2LN1_PORT3 0x16A4AC -#define MG_TX_LINK_PARAMS_TX2LN0_PORT4 0x16B0AC -#define MG_TX_LINK_PARAMS_TX2LN1_PORT4 0x16B4AC #define MG_TX2_LINK_PARAMS(ln, tc_port) \ MG_PHY_PORT_LN(ln, tc_port, MG_TX_LINK_PARAMS_TX2LN0_PORT1, \ MG_TX_LINK_PARAMS_TX2LN0_PORT2, \ @@ -42,10 +34,6 @@ #define MG_TX_PISO_READLOAD_TX1LN1_PORT1 0x16854C #define MG_TX_PISO_READLOAD_TX1LN0_PORT2 0x16914C #define MG_TX_PISO_READLOAD_TX1LN1_PORT2 0x16954C -#define MG_TX_PISO_READLOAD_TX1LN0_PORT3 0x16A14C -#define MG_TX_PISO_READLOAD_TX1LN1_PORT3 0x16A54C -#define MG_TX_PISO_READLOAD_TX1LN0_PORT4 0x16B14C -#define MG_TX_PISO_READLOAD_TX1LN1_PORT4 0x16B54C #define MG_TX1_PISO_READLOAD(ln, tc_port) \ MG_PHY_PORT_LN(ln, tc_port, MG_TX_PISO_READLOAD_TX1LN0_PORT1, \ MG_TX_PISO_READLOAD_TX1LN0_PORT2, \ @@ -55,10 +43,6 @@ #define MG_TX_PISO_READLOAD_TX2LN1_PORT1 0x1684CC #define MG_TX_PISO_READLOAD_TX2LN0_PORT2 0x1690CC #define MG_TX_PISO_READLOAD_TX2LN1_PORT2 0x1694CC -#define MG_TX_PISO_READLOAD_TX2LN0_PORT3 0x16A0CC -#define MG_TX_PISO_READLOAD_TX2LN1_PORT3 0x16A4CC -#define MG_TX_PISO_READLOAD_TX2LN0_PORT4 0x16B0CC -#define MG_TX_PISO_READLOAD_TX2LN1_PORT4 0x16B4CC #define MG_TX2_PISO_READLOAD(ln, tc_port) \ MG_PHY_PORT_LN(ln, tc_port, MG_TX_PISO_READLOAD_TX2LN0_PORT1, \ MG_TX_PISO_READLOAD_TX2LN0_PORT2, \ @@ -69,10 +53,6 @@ #define MG_TX_SWINGCTRL_TX1LN1_PORT1 0x168548 #define MG_TX_SWINGCTRL_TX1LN0_PORT2 0x169148 #define MG_TX_SWINGCTRL_TX1LN1_PORT2 0x169548 -#define MG_TX_SWINGCTRL_TX1LN0_PORT3 0x16A148 -#define MG_TX_SWINGCTRL_TX1LN1_PORT3 0x16A548 -#define MG_TX_SWINGCTRL_TX1LN0_PORT4 0x16B148 -#define MG_TX_SWINGCTRL_TX1LN1_PORT4 0x16B548 #define MG_TX1_SWINGCTRL(ln, tc_port) \ MG_PHY_PORT_LN(ln, tc_port, MG_TX_SWINGCTRL_TX1LN0_PORT1, \ MG_TX_SWINGCTRL_TX1LN0_PORT2, \ @@ -82,10 +62,6 @@ #define MG_TX_SWINGCTRL_TX2LN1_PORT1 0x1684C8 #define MG_TX_SWINGCTRL_TX2LN0_PORT2 0x1690C8 #define MG_TX_SWINGCTRL_TX2LN1_PORT2 0x1694C8 -#define MG_TX_SWINGCTRL_TX2LN0_PORT3 0x16A0C8 -#define MG_TX_SWINGCTRL_TX2LN1_PORT3 0x16A4C8 -#define MG_TX_SWINGCTRL_TX2LN0_PORT4 0x16B0C8 -#define MG_TX_SWINGCTRL_TX2LN1_PORT4 0x16B4C8 #define MG_TX2_SWINGCTRL(ln, tc_port) \ MG_PHY_PORT_LN(ln, tc_port, MG_TX_SWINGCTRL_TX2LN0_PORT1, \ MG_TX_SWINGCTRL_TX2LN0_PORT2, \ @@ -110,10 +86,6 @@ #define MG_TX_DRVCTRL_TX2LN1_PORT1 0x1684C4 #define MG_TX_DRVCTRL_TX2LN0_PORT2 0x1690C4 #define MG_TX_DRVCTRL_TX2LN1_PORT2 0x1694C4 -#define MG_TX_DRVCTRL_TX2LN0_PORT3 0x16A0C4 -#define MG_TX_DRVCTRL_TX2LN1_PORT3 0x16A4C4 -#define MG_TX_DRVCTRL_TX2LN0_PORT4 0x16B0C4 -#define MG_TX_DRVCTRL_TX2LN1_PORT4 0x16B4C4 #define MG_TX2_DRVCTRL(ln, tc_port) \ MG_PHY_PORT_LN(ln, tc_port, MG_TX_DRVCTRL_TX2LN0_PORT1, \ MG_TX_DRVCTRL_TX2LN0_PORT2, \ @@ -130,10 +102,6 @@ #define MG_CLKHUB_LN1_PORT1 0x16879C #define MG_CLKHUB_LN0_PORT2 0x16939C #define MG_CLKHUB_LN1_PORT2 0x16979C -#define MG_CLKHUB_LN0_PORT3 0x16A39C -#define MG_CLKHUB_LN1_PORT3 0x16A79C -#define MG_CLKHUB_LN0_PORT4 0x16B39C -#define MG_CLKHUB_LN1_PORT4 0x16B79C #define MG_CLKHUB(ln, tc_port) \ MG_PHY_PORT_LN(ln, tc_port, MG_CLKHUB_LN0_PORT1, \ MG_CLKHUB_LN0_PORT2, \ @@ -144,10 +112,6 @@ #define MG_TX_DCC_TX1LN1_PORT1 0x168510 #define MG_TX_DCC_TX1LN0_PORT2 0x169110 #define MG_TX_DCC_TX1LN1_PORT2 0x169510 -#define MG_TX_DCC_TX1LN0_PORT3 0x16A110 -#define MG_TX_DCC_TX1LN1_PORT3 0x16A510 -#define MG_TX_DCC_TX1LN0_PORT4 0x16B110 -#define MG_TX_DCC_TX1LN1_PORT4 0x16B510 #define MG_TX1_DCC(ln, tc_port) \ MG_PHY_PORT_LN(ln, tc_port, MG_TX_DCC_TX1LN0_PORT1, \ MG_TX_DCC_TX1LN0_PORT2, \ @@ -156,10 +120,6 @@ #define MG_TX_DCC_TX2LN1_PORT1 0x168490 #define MG_TX_DCC_TX2LN0_PORT2 0x169090 #define MG_TX_DCC_TX2LN1_PORT2 0x169490 -#define MG_TX_DCC_TX2LN0_PORT3 0x16A090 -#define MG_TX_DCC_TX2LN1_PORT3 0x16A490 -#define MG_TX_DCC_TX2LN0_PORT4 0x16B090 -#define MG_TX_DCC_TX2LN1_PORT4 0x16B490 #define MG_TX2_DCC(ln, tc_port) \ MG_PHY_PORT_LN(ln, tc_port, MG_TX_DCC_TX2LN0_PORT1, \ MG_TX_DCC_TX2LN0_PORT2, \ @@ -172,10 +132,6 @@ #define MG_DP_MODE_LN1_ACU_PORT1 0x1687A0 #define MG_DP_MODE_LN0_ACU_PORT2 0x1693A0 #define MG_DP_MODE_LN1_ACU_PORT2 0x1697A0 -#define MG_DP_MODE_LN0_ACU_PORT3 0x16A3A0 -#define MG_DP_MODE_LN1_ACU_PORT3 0x16A7A0 -#define MG_DP_MODE_LN0_ACU_PORT4 0x16B3A0 -#define MG_DP_MODE_LN1_ACU_PORT4 0x16B7A0 #define MG_DP_MODE(ln, tc_port) \ MG_PHY_PORT_LN(ln, tc_port, MG_DP_MODE_LN0_ACU_PORT1, \ MG_DP_MODE_LN0_ACU_PORT2, \ @@ -200,8 +156,6 @@ #define _MG_REFCLKIN_CTL_PORT1 0x16892C #define _MG_REFCLKIN_CTL_PORT2 0x16992C -#define _MG_REFCLKIN_CTL_PORT3 0x16A92C -#define _MG_REFCLKIN_CTL_PORT4 0x16B92C #define MG_REFCLKIN_CTL_OD_2_MUX(x) ((x) << 8) #define MG_REFCLKIN_CTL_OD_2_MUX_MASK (0x7 << 8) #define MG_REFCLKIN_CTL(tc_port) _MMIO_PORT((tc_port), \ @@ -210,8 +164,6 @@ #define _MG_CLKTOP2_CORECLKCTL1_PORT1 0x1688D8 #define _MG_CLKTOP2_CORECLKCTL1_PORT2 0x1698D8 -#define _MG_CLKTOP2_CORECLKCTL1_PORT3 0x16A8D8 -#define _MG_CLKTOP2_CORECLKCTL1_PORT4 0x16B8D8 #define MG_CLKTOP2_CORECLKCTL1_B_DIVRATIO(x) ((x) << 16) #define MG_CLKTOP2_CORECLKCTL1_B_DIVRATIO_MASK (0xff << 16) #define MG_CLKTOP2_CORECLKCTL1_A_DIVRATIO(x) ((x) << 8) @@ -222,8 +174,6 @@ #define _MG_CLKTOP2_HSCLKCTL_PORT1 0x1688D4 #define _MG_CLKTOP2_HSCLKCTL_PORT2 0x1698D4 -#define _MG_CLKTOP2_HSCLKCTL_PORT3 0x16A8D4 -#define _MG_CLKTOP2_HSCLKCTL_PORT4 0x16B8D4 #define MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL(x) ((x) << 16) #define MG_CLKTOP2_HSCLKCTL_CORE_INPUTSEL_MASK (0x1 << 16) #define MG_CLKTOP2_HSCLKCTL_TLINEDRV_CLKSEL(x) ((x) << 14) @@ -242,8 +192,6 @@ #define _MG_PLL_DIV0_PORT1 0x168A00 #define _MG_PLL_DIV0_PORT2 0x169A00 -#define _MG_PLL_DIV0_PORT3 0x16AA00 -#define _MG_PLL_DIV0_PORT4 0x16BA00 #define MG_PLL_DIV0_FRACNEN_H (1 << 30) #define MG_PLL_DIV0_FBDIV_FRAC_MASK (0x3fffff << 8) #define MG_PLL_DIV0_FBDIV_FRAC_SHIFT 8 @@ -255,8 +203,6 @@ #define _MG_PLL_DIV1_PORT1 0x168A04 #define _MG_PLL_DIV1_PORT2 0x169A04 -#define _MG_PLL_DIV1_PORT3 0x16AA04 -#define _MG_PLL_DIV1_PORT4 0x16BA04 #define MG_PLL_DIV1_IREF_NDIVRATIO(x) ((x) << 16) #define MG_PLL_DIV1_DITHER_DIV_1 (0 << 12) #define MG_PLL_DIV1_DITHER_DIV_2 (1 << 12) @@ -270,8 +216,6 @@ #define _MG_PLL_LF_PORT1 0x168A08 #define _MG_PLL_LF_PORT2 0x169A08 -#define _MG_PLL_LF_PORT3 0x16AA08 -#define _MG_PLL_LF_PORT4 0x16BA08 #define MG_PLL_LF_TDCTARGETCNT(x) ((x) << 24) #define MG_PLL_LF_AFCCNTSEL_256 (0 << 20) #define MG_PLL_LF_AFCCNTSEL_512 (1 << 20) @@ -283,8 +227,6 @@ #define _MG_PLL_FRAC_LOCK_PORT1 0x168A0C #define _MG_PLL_FRAC_LOCK_PORT2 0x169A0C -#define _MG_PLL_FRAC_LOCK_PORT3 0x16AA0C -#define _MG_PLL_FRAC_LOCK_PORT4 0x16BA0C #define MG_PLL_FRAC_LOCK_TRUELOCK_CRIT_32 (1 << 18) #define MG_PLL_FRAC_LOCK_EARLYLOCK_CRIT_32 (1 << 16) #define MG_PLL_FRAC_LOCK_LOCKTHRESH(x) ((x) << 11) @@ -297,8 +239,6 @@ #define _MG_PLL_SSC_PORT1 0x168A10 #define _MG_PLL_SSC_PORT2 0x169A10 -#define _MG_PLL_SSC_PORT3 0x16AA10 -#define _MG_PLL_SSC_PORT4 0x16BA10 #define MG_PLL_SSC_EN (1 << 28) #define MG_PLL_SSC_TYPE(x) ((x) << 26) #define MG_PLL_SSC_STEPLENGTH(x) ((x) << 16) @@ -310,8 +250,6 @@ #define _MG_PLL_BIAS_PORT1 0x168A14 #define _MG_PLL_BIAS_PORT2 0x169A14 -#define _MG_PLL_BIAS_PORT3 0x16AA14 -#define _MG_PLL_BIAS_PORT4 0x16BA14 #define MG_PLL_BIAS_BIAS_GB_SEL(x) ((x) << 30) #define MG_PLL_BIAS_BIAS_GB_SEL_MASK (0x3 << 30) #define MG_PLL_BIAS_INIT_DCOAMP(x) ((x) << 24) @@ -330,8 +268,6 @@ #define _MG_PLL_TDC_COLDST_BIAS_PORT1 0x168A18 #define _MG_PLL_TDC_COLDST_BIAS_PORT2 0x169A18 -#define _MG_PLL_TDC_COLDST_BIAS_PORT3 0x16AA18 -#define _MG_PLL_TDC_COLDST_BIAS_PORT4 0x16BA18 #define MG_PLL_TDC_COLDST_IREFINT_EN (1 << 27) #define MG_PLL_TDC_COLDST_REFBIAS_START_PULSE_W(x) ((x) << 17) #define MG_PLL_TDC_COLDST_COLDSTART (1 << 16) -- cgit From 919606f5e7d8cfbdef47ab7e24bf37cf86dd1512 Mon Sep 17 00:00:00 2001 From: Guenter Roeck Date: Sun, 23 May 2021 10:23:04 -0700 Subject: drm/i915/gvt: Use list_entry to access list members Use list_entry() instead of container_of() to access list members. Also drop unnecessary and misleading NULL checks on the result of list_entry(). Signed-off-by: Guenter Roeck Signed-off-by: Zhenyu Wang Link: http://patchwork.freedesktop.org/patch/msgid/20210523172304.3033229-1-linux@roeck-us.net Reviewed-by: Zhenyu Wang Signed-off-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/dmabuf.c | 18 +++++------------- 1 file changed, 5 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/dmabuf.c b/drivers/gpu/drm/i915/gvt/dmabuf.c index 8e65cd8258b9..ebe1ecd54ef8 100644 --- a/drivers/gpu/drm/i915/gvt/dmabuf.c +++ b/drivers/gpu/drm/i915/gvt/dmabuf.c @@ -148,8 +148,7 @@ static void dmabuf_gem_object_free(struct kref *kref) if (vgpu && vgpu->active && !list_empty(&vgpu->dmabuf_obj_list_head)) { list_for_each(pos, &vgpu->dmabuf_obj_list_head) { - dmabuf_obj = container_of(pos, - struct intel_vgpu_dmabuf_obj, list); + dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list); if (dmabuf_obj == obj) { list_del(pos); intel_gvt_hypervisor_put_vfio_device(vgpu); @@ -357,10 +356,8 @@ pick_dmabuf_by_info(struct intel_vgpu *vgpu, struct intel_vgpu_dmabuf_obj *ret = NULL; list_for_each(pos, &vgpu->dmabuf_obj_list_head) { - dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj, - list); - if ((dmabuf_obj == NULL) || - (dmabuf_obj->info == NULL)) + dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list); + if (!dmabuf_obj->info) continue; fb_info = (struct intel_vgpu_fb_info *)dmabuf_obj->info; @@ -387,11 +384,7 @@ pick_dmabuf_by_num(struct intel_vgpu *vgpu, u32 id) struct intel_vgpu_dmabuf_obj *ret = NULL; list_for_each(pos, &vgpu->dmabuf_obj_list_head) { - dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj, - list); - if (!dmabuf_obj) - continue; - + dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list); if (dmabuf_obj->dmabuf_id == id) { ret = dmabuf_obj; break; @@ -600,8 +593,7 @@ void intel_vgpu_dmabuf_cleanup(struct intel_vgpu *vgpu) mutex_lock(&vgpu->dmabuf_lock); list_for_each_safe(pos, n, &vgpu->dmabuf_obj_list_head) { - dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj, - list); + dmabuf_obj = list_entry(pos, struct intel_vgpu_dmabuf_obj, list); dmabuf_obj->vgpu = NULL; idr_remove(&vgpu->object_idr, dmabuf_obj->dmabuf_id); -- cgit From 3e1f4c491559998615cc8ee287c673f0f7e66534 Mon Sep 17 00:00:00 2001 From: Zhenyu Wang Date: Mon, 11 Oct 2021 12:33:29 +0800 Subject: drm/i915/gvt: Fix cmd parser error for Passmark9 This is to add one new register required for windows guest driver update when running Passmark9, otherwise cmd parser would complain and fail guest workload. Cc: Terrence Xu Signed-off-by: Zhenyu Wang Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20211011043329.3519093-1-zhenyuw@linux.intel.com Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/handlers.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index cde0a477fb49..805fee4e91ef 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -3436,6 +3436,7 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) MMIO_DFH(GAMT_CHKN_BIT_REG, D_KBL | D_CFL, F_CMD_ACCESS, NULL, NULL); MMIO_D(GEN9_CTX_PREEMPT_REG, D_SKL_PLUS & ~D_BXT); + MMIO_DFH(_MMIO(0xe4cc), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); return 0; } -- cgit From c41aadd26496db9c21deb612445801f3e44ee8b2 Mon Sep 17 00:00:00 2001 From: Rikard Falkeborn Date: Sat, 4 Dec 2021 11:55:19 +0100 Subject: drm/i915/gvt: Constify intel_gvt_gtt_gma_ops These are never modified, so make them const to allow the compiler to put them in read-only memory. Signed-off-by: Rikard Falkeborn Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20211204105527.15741-2-rikard.falkeborn@gmail.com Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/gtt.c | 4 ++-- drivers/gpu/drm/i915/gvt/gtt.h | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 53d0cb327539..6efa48727052 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -516,7 +516,7 @@ static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = { .set_pfn = gen8_gtt_set_pfn, }; -static struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = { +static const struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = { .gma_to_ggtt_pte_index = gma_to_ggtt_pte_index, .gma_to_pte_index = gen8_gma_to_pte_index, .gma_to_pde_index = gen8_gma_to_pde_index, @@ -2097,7 +2097,7 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma) struct intel_vgpu *vgpu = mm->vgpu; struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt_gtt_pte_ops *pte_ops = gvt->gtt.pte_ops; - struct intel_gvt_gtt_gma_ops *gma_ops = gvt->gtt.gma_ops; + const struct intel_gvt_gtt_gma_ops *gma_ops = gvt->gtt.gma_ops; unsigned long gpa = INTEL_GVT_INVALID_ADDR; unsigned long gma_index[4]; struct intel_gvt_gtt_entry e; diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index 3bf45672ef98..d0d598322404 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -92,7 +92,7 @@ struct intel_gvt_gtt_gma_ops { struct intel_gvt_gtt { struct intel_gvt_gtt_pte_ops *pte_ops; - struct intel_gvt_gtt_gma_ops *gma_ops; + const struct intel_gvt_gtt_gma_ops *gma_ops; int (*mm_alloc_page_table)(struct intel_vgpu_mm *mm); void (*mm_free_page_table)(struct intel_vgpu_mm *mm); struct list_head oos_page_use_list_head; -- cgit From 5512445c9b64a2fd78f37c41796745d72c02e9a3 Mon Sep 17 00:00:00 2001 From: Rikard Falkeborn Date: Sat, 4 Dec 2021 11:55:20 +0100 Subject: drm/i915/gvt: Constify intel_gvt_gtt_pte_ops These are never modified, so make them const to allow the compiler to put them in read-only memory. Signed-off-by: Rikard Falkeborn Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20211204105527.15741-3-rikard.falkeborn@gmail.com Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/gtt.c | 62 +++++++++++++++++++++--------------------- drivers/gpu/drm/i915/gvt/gtt.h | 2 +- 2 files changed, 32 insertions(+), 32 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index 6efa48727052..c8cd6bf28ea8 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -499,7 +499,7 @@ DEFINE_PPGTT_GMA_TO_INDEX(gen8, l3_pdp, (gma >> 30 & 0x3)); DEFINE_PPGTT_GMA_TO_INDEX(gen8, l4_pdp, (gma >> 30 & 0x1ff)); DEFINE_PPGTT_GMA_TO_INDEX(gen8, pml4, (gma >> 39 & 0x1ff)); -static struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = { +static const struct intel_gvt_gtt_pte_ops gen8_gtt_pte_ops = { .get_entry = gtt_get_entry64, .set_entry = gtt_set_entry64, .clear_present = gtt_entry_clear_present, @@ -526,7 +526,7 @@ static const struct intel_gvt_gtt_gma_ops gen8_gtt_gma_ops = { }; /* Update entry type per pse and ips bit. */ -static void update_entry_type_for_real(struct intel_gvt_gtt_pte_ops *pte_ops, +static void update_entry_type_for_real(const struct intel_gvt_gtt_pte_ops *pte_ops, struct intel_gvt_gtt_entry *entry, bool ips) { switch (entry->type) { @@ -553,7 +553,7 @@ static void _ppgtt_get_root_entry(struct intel_vgpu_mm *mm, struct intel_gvt_gtt_entry *entry, unsigned long index, bool guest) { - struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; GEM_BUG_ON(mm->type != INTEL_GVT_MM_PPGTT); @@ -580,7 +580,7 @@ static void _ppgtt_set_root_entry(struct intel_vgpu_mm *mm, struct intel_gvt_gtt_entry *entry, unsigned long index, bool guest) { - struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; pte_ops->set_entry(guest ? mm->ppgtt_mm.guest_pdps : mm->ppgtt_mm.shadow_pdps, @@ -596,7 +596,7 @@ static inline void ppgtt_set_shadow_root_entry(struct intel_vgpu_mm *mm, static void ggtt_get_guest_entry(struct intel_vgpu_mm *mm, struct intel_gvt_gtt_entry *entry, unsigned long index) { - struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); @@ -608,7 +608,7 @@ static void ggtt_get_guest_entry(struct intel_vgpu_mm *mm, static void ggtt_set_guest_entry(struct intel_vgpu_mm *mm, struct intel_gvt_gtt_entry *entry, unsigned long index) { - struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); @@ -619,7 +619,7 @@ static void ggtt_set_guest_entry(struct intel_vgpu_mm *mm, static void ggtt_get_host_entry(struct intel_vgpu_mm *mm, struct intel_gvt_gtt_entry *entry, unsigned long index) { - struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); @@ -629,7 +629,7 @@ static void ggtt_get_host_entry(struct intel_vgpu_mm *mm, static void ggtt_set_host_entry(struct intel_vgpu_mm *mm, struct intel_gvt_gtt_entry *entry, unsigned long index) { - struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; unsigned long offset = index; GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); @@ -655,7 +655,7 @@ static inline int ppgtt_spt_get_entry( bool guest) { struct intel_gvt *gvt = spt->vgpu->gvt; - struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; int ret; e->type = get_entry_type(type); @@ -684,7 +684,7 @@ static inline int ppgtt_spt_set_entry( bool guest) { struct intel_gvt *gvt = spt->vgpu->gvt; - struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n")) return -EINVAL; @@ -947,7 +947,7 @@ static int ppgtt_invalidate_spt_by_shadow_entry(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *e) { struct drm_i915_private *i915 = vgpu->gvt->gt->i915; - struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_vgpu_ppgtt_spt *s; enum intel_gvt_gtt_type cur_pt_type; @@ -984,7 +984,7 @@ static inline void ppgtt_invalidate_pte(struct intel_vgpu_ppgtt_spt *spt, struct intel_gvt_gtt_entry *entry) { struct intel_vgpu *vgpu = spt->vgpu; - struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; unsigned long pfn; int type; @@ -1072,7 +1072,7 @@ static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt); static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry( struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *we) { - struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_vgpu_ppgtt_spt *spt = NULL; bool ips = false; int ret; @@ -1136,7 +1136,7 @@ err: static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se, struct intel_vgpu_ppgtt_spt *s, struct intel_gvt_gtt_entry *ge) { - struct intel_gvt_gtt_pte_ops *ops = s->vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = s->vgpu->gvt->gtt.pte_ops; se->type = ge->type; se->val64 = ge->val64; @@ -1159,7 +1159,7 @@ static inline void ppgtt_generate_shadow_entry(struct intel_gvt_gtt_entry *se, static int is_2MB_gtt_possible(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *entry) { - struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; unsigned long pfn; if (!HAS_PAGE_SIZES(vgpu->gvt->gt->i915, I915_GTT_PAGE_SIZE_2M)) @@ -1176,7 +1176,7 @@ static int split_2MB_gtt_entry(struct intel_vgpu *vgpu, struct intel_vgpu_ppgtt_spt *spt, unsigned long index, struct intel_gvt_gtt_entry *se) { - struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_vgpu_ppgtt_spt *sub_spt; struct intel_gvt_gtt_entry sub_se; unsigned long start_gfn; @@ -1223,7 +1223,7 @@ static int split_64KB_gtt_entry(struct intel_vgpu *vgpu, struct intel_vgpu_ppgtt_spt *spt, unsigned long index, struct intel_gvt_gtt_entry *se) { - struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_gvt_gtt_entry entry = *se; unsigned long start_gfn; dma_addr_t dma_addr; @@ -1254,7 +1254,7 @@ static int ppgtt_populate_shadow_entry(struct intel_vgpu *vgpu, struct intel_vgpu_ppgtt_spt *spt, unsigned long index, struct intel_gvt_gtt_entry *ge) { - struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; struct intel_gvt_gtt_entry se = *ge; unsigned long gfn, page_size = PAGE_SIZE; dma_addr_t dma_addr; @@ -1308,7 +1308,7 @@ static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt) { struct intel_vgpu *vgpu = spt->vgpu; struct intel_gvt *gvt = vgpu->gvt; - struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; struct intel_vgpu_ppgtt_spt *s; struct intel_gvt_gtt_entry se, ge; unsigned long gfn, i; @@ -1351,7 +1351,7 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_ppgtt_spt *spt, struct intel_gvt_gtt_entry *se, unsigned long index) { struct intel_vgpu *vgpu = spt->vgpu; - struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; int ret; trace_spt_guest_change(spt->vgpu->id, "remove", spt, @@ -1432,7 +1432,7 @@ static int sync_oos_page(struct intel_vgpu *vgpu, { const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; struct intel_gvt *gvt = vgpu->gvt; - struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; struct intel_vgpu_ppgtt_spt *spt = oos_page->spt; struct intel_gvt_gtt_entry old, new; int index; @@ -1603,7 +1603,7 @@ static int ppgtt_handle_guest_write_page_table( { struct intel_vgpu *vgpu = spt->vgpu; int type = spt->shadow_page.type; - struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_gvt_gtt_entry old_se; int new_present; int i, ret; @@ -1720,7 +1720,7 @@ static int ppgtt_handle_guest_write_page_table_bytes( u64 pa, void *p_data, int bytes) { struct intel_vgpu *vgpu = spt->vgpu; - struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; struct intel_gvt_gtt_entry we, se; unsigned long index; @@ -1785,7 +1785,7 @@ static void invalidate_ppgtt_mm(struct intel_vgpu_mm *mm) struct intel_vgpu *vgpu = mm->vgpu; struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt_gtt *gtt = &gvt->gtt; - struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops; struct intel_gvt_gtt_entry se; int index; @@ -1815,7 +1815,7 @@ static int shadow_ppgtt_mm(struct intel_vgpu_mm *mm) struct intel_vgpu *vgpu = mm->vgpu; struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt_gtt *gtt = &gvt->gtt; - struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = gtt->pte_ops; struct intel_vgpu_ppgtt_spt *spt; struct intel_gvt_gtt_entry ge, se; int index, ret; @@ -2067,7 +2067,7 @@ static inline int ppgtt_get_next_level_entry(struct intel_vgpu_mm *mm, struct intel_gvt_gtt_entry *e, unsigned long index, bool guest) { struct intel_vgpu *vgpu = mm->vgpu; - struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_vgpu_ppgtt_spt *s; s = intel_vgpu_find_spt_by_mfn(vgpu, ops->get_pfn(e)); @@ -2096,7 +2096,7 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma) { struct intel_vgpu *vgpu = mm->vgpu; struct intel_gvt *gvt = vgpu->gvt; - struct intel_gvt_gtt_pte_ops *pte_ops = gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *pte_ops = gvt->gtt.pte_ops; const struct intel_gvt_gtt_gma_ops *gma_ops = gvt->gtt.gma_ops; unsigned long gpa = INTEL_GVT_INVALID_ADDR; unsigned long gma_index[4]; @@ -2221,7 +2221,7 @@ int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, unsigned int off, static void ggtt_invalidate_pte(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *entry) { - struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; unsigned long pfn; pfn = pte_ops->get_pfn(entry); @@ -2236,7 +2236,7 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, struct intel_gvt *gvt = vgpu->gvt; const struct intel_gvt_device_info *info = &gvt->device_info; struct intel_vgpu_mm *ggtt_mm = vgpu->gtt.ggtt_mm; - struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; unsigned long g_gtt_index = off >> info->gtt_entry_size_shift; unsigned long gma, gfn; struct intel_gvt_gtt_entry e = {.val64 = 0, .type = GTT_TYPE_GGTT_PTE}; @@ -2391,7 +2391,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, { struct drm_i915_private *i915 = vgpu->gvt->gt->i915; struct intel_vgpu_gtt *gtt = &vgpu->gtt; - struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; int page_entry_num = I915_GTT_PAGE_SIZE >> vgpu->gvt->device_info.gtt_entry_size_shift; void *scratch_pt; @@ -2822,7 +2822,7 @@ void intel_vgpu_invalidate_ppgtt(struct intel_vgpu *vgpu) void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu, bool invalidate_old) { struct intel_gvt *gvt = vgpu->gvt; - struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; + const struct intel_gvt_gtt_pte_ops *pte_ops = vgpu->gvt->gtt.pte_ops; struct intel_gvt_gtt_entry entry = {.type = GTT_TYPE_GGTT_PTE}; struct intel_gvt_gtt_entry old_entry; u32 index; diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h index d0d598322404..a3b0f59ec8bd 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.h +++ b/drivers/gpu/drm/i915/gvt/gtt.h @@ -91,7 +91,7 @@ struct intel_gvt_gtt_gma_ops { }; struct intel_gvt_gtt { - struct intel_gvt_gtt_pte_ops *pte_ops; + const struct intel_gvt_gtt_pte_ops *pte_ops; const struct intel_gvt_gtt_gma_ops *gma_ops; int (*mm_alloc_page_table)(struct intel_vgpu_mm *mm); void (*mm_free_page_table)(struct intel_vgpu_mm *mm); -- cgit From 1b277c892940af1d06c2433f3f3a39d4bd146c89 Mon Sep 17 00:00:00 2001 From: Rikard Falkeborn Date: Sat, 4 Dec 2021 11:55:21 +0100 Subject: drm/i915/gvt: Constify intel_gvt_irq_ops These are never modified, so make them const to allow the compiler to put them in read-only memory. Signed-off-by: Rikard Falkeborn Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20211204105527.15741-4-rikard.falkeborn@gmail.com Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/interrupt.c | 10 +++++----- drivers/gpu/drm/i915/gvt/interrupt.h | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/interrupt.c b/drivers/gpu/drm/i915/gvt/interrupt.c index 614b951d919f..9ccc6b1ecc28 100644 --- a/drivers/gpu/drm/i915/gvt/interrupt.c +++ b/drivers/gpu/drm/i915/gvt/interrupt.c @@ -176,7 +176,7 @@ int intel_vgpu_reg_imr_handler(struct intel_vgpu *vgpu, unsigned int reg, void *p_data, unsigned int bytes) { struct intel_gvt *gvt = vgpu->gvt; - struct intel_gvt_irq_ops *ops = gvt->irq.ops; + const struct intel_gvt_irq_ops *ops = gvt->irq.ops; u32 imr = *(u32 *)p_data; trace_write_ir(vgpu->id, "IMR", reg, imr, vgpu_vreg(vgpu, reg), @@ -206,7 +206,7 @@ int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu, unsigned int reg, void *p_data, unsigned int bytes) { struct intel_gvt *gvt = vgpu->gvt; - struct intel_gvt_irq_ops *ops = gvt->irq.ops; + const struct intel_gvt_irq_ops *ops = gvt->irq.ops; u32 ier = *(u32 *)p_data; u32 virtual_ier = vgpu_vreg(vgpu, reg); @@ -246,7 +246,7 @@ int intel_vgpu_reg_ier_handler(struct intel_vgpu *vgpu, { struct intel_gvt *gvt = vgpu->gvt; struct drm_i915_private *i915 = gvt->gt->i915; - struct intel_gvt_irq_ops *ops = gvt->irq.ops; + const struct intel_gvt_irq_ops *ops = gvt->irq.ops; struct intel_gvt_irq_info *info; u32 ier = *(u32 *)p_data; @@ -604,7 +604,7 @@ static void gen8_init_irq( SET_BIT_INFO(irq, 25, PCU_PCODE2DRIVER_MAILBOX, INTEL_GVT_IRQ_INFO_PCU); } -static struct intel_gvt_irq_ops gen8_irq_ops = { +static const struct intel_gvt_irq_ops gen8_irq_ops = { .init_irq = gen8_init_irq, .check_pending_irq = gen8_check_pending_irq, }; @@ -626,7 +626,7 @@ void intel_vgpu_trigger_virtual_event(struct intel_vgpu *vgpu, struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt_irq *irq = &gvt->irq; gvt_event_virt_handler_t handler; - struct intel_gvt_irq_ops *ops = gvt->irq.ops; + const struct intel_gvt_irq_ops *ops = gvt->irq.ops; handler = get_event_virt_handler(irq, event); drm_WARN_ON(&i915->drm, !handler); diff --git a/drivers/gpu/drm/i915/gvt/interrupt.h b/drivers/gpu/drm/i915/gvt/interrupt.h index 6c47d3e33161..0989e180ed54 100644 --- a/drivers/gpu/drm/i915/gvt/interrupt.h +++ b/drivers/gpu/drm/i915/gvt/interrupt.h @@ -203,7 +203,7 @@ struct intel_gvt_irq_map { /* structure containing device specific IRQ state */ struct intel_gvt_irq { - struct intel_gvt_irq_ops *ops; + const struct intel_gvt_irq_ops *ops; struct intel_gvt_irq_info *info[INTEL_GVT_IRQ_INFO_MAX]; DECLARE_BITMAP(irq_info_bitmap, INTEL_GVT_IRQ_INFO_MAX); struct intel_gvt_event_info events[INTEL_GVT_EVENT_MAX]; -- cgit From 4642077775a65566c0d25e63bf918fb5e5235163 Mon Sep 17 00:00:00 2001 From: Rikard Falkeborn Date: Sat, 4 Dec 2021 11:55:22 +0100 Subject: drm/i915/gvt: Constify intel_gvt_sched_policy_ops These are never modified, so make them const to allow the compiler to put them in read-only memory. Signed-off-by: Rikard Falkeborn Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20211204105527.15741-5-rikard.falkeborn@gmail.com Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/sched_policy.c | 2 +- drivers/gpu/drm/i915/gvt/scheduler.h | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/sched_policy.c b/drivers/gpu/drm/i915/gvt/sched_policy.c index 036b74fe9298..c077fb4674f0 100644 --- a/drivers/gpu/drm/i915/gvt/sched_policy.c +++ b/drivers/gpu/drm/i915/gvt/sched_policy.c @@ -368,7 +368,7 @@ static void tbs_sched_stop_schedule(struct intel_vgpu *vgpu) vgpu_data->active = false; } -static struct intel_gvt_sched_policy_ops tbs_schedule_ops = { +static const struct intel_gvt_sched_policy_ops tbs_schedule_ops = { .init = tbs_sched_init, .clean = tbs_sched_clean, .init_vgpu = tbs_sched_init_vgpu, diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h index 7c86984a842f..1f391b3da2cc 100644 --- a/drivers/gpu/drm/i915/gvt/scheduler.h +++ b/drivers/gpu/drm/i915/gvt/scheduler.h @@ -56,7 +56,7 @@ struct intel_gvt_workload_scheduler { wait_queue_head_t waitq[I915_NUM_ENGINES]; void *sched_data; - struct intel_gvt_sched_policy_ops *sched_ops; + const struct intel_gvt_sched_policy_ops *sched_ops; }; #define INDIRECT_CTX_ADDR_MASK 0xffffffc0 -- cgit From ca1777797ad84cba3a72b73f74bd80092a7aa220 Mon Sep 17 00:00:00 2001 From: Rikard Falkeborn Date: Sat, 4 Dec 2021 11:55:23 +0100 Subject: drm/i915/gvt: Constify gvt_mmio_block These are never modified, so make them const to allow the compiler to put it in read-only memory. Signed-off-by: Rikard Falkeborn Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20211204105527.15741-6-rikard.falkeborn@gmail.com Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/gvt.h | 2 +- drivers/gpu/drm/i915/gvt/handlers.c | 12 ++++++------ 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h index 0c0615602343..0ebffc327528 100644 --- a/drivers/gpu/drm/i915/gvt/gvt.h +++ b/drivers/gpu/drm/i915/gvt/gvt.h @@ -272,7 +272,7 @@ struct intel_gvt_mmio { /* Value of command write of this reg needs to be patched */ #define F_CMD_WRITE_PATCH (1 << 8) - struct gvt_mmio_block *mmio_block; + const struct gvt_mmio_block *mmio_block; unsigned int num_mmio_block; DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS); diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 805fee4e91ef..3cefaf5527e0 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -3628,11 +3628,11 @@ static int init_bxt_mmio_info(struct intel_gvt *gvt) return 0; } -static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt, - unsigned int offset) +static const struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt, + unsigned int offset) { unsigned long device = intel_gvt_get_device_type(gvt); - struct gvt_mmio_block *block = gvt->mmio.mmio_block; + const struct gvt_mmio_block *block = gvt->mmio.mmio_block; int num = gvt->mmio.num_mmio_block; int i; @@ -3671,7 +3671,7 @@ void intel_gvt_clean_mmio_info(struct intel_gvt *gvt) * accessible (should have no F_CMD_ACCESS flag). * otherwise, need to update cmd_reg_handler in cmd_parser.c */ -static struct gvt_mmio_block mmio_blocks[] = { +static const struct gvt_mmio_block mmio_blocks[] = { {D_SKL_PLUS, _MMIO(DMC_MMIO_START_RANGE), 0x3000, NULL, NULL}, {D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL}, {D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE, @@ -3754,7 +3754,7 @@ int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt, int (*handler)(struct intel_gvt *gvt, u32 offset, void *data), void *data) { - struct gvt_mmio_block *block = gvt->mmio.mmio_block; + const struct gvt_mmio_block *block = gvt->mmio.mmio_block; struct intel_gvt_mmio_info *e; int i, j, ret; @@ -3872,7 +3872,7 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset, struct drm_i915_private *i915 = vgpu->gvt->gt->i915; struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt_mmio_info *mmio_info; - struct gvt_mmio_block *mmio_block; + const struct gvt_mmio_block *mmio_block; gvt_mmio_func func; int ret; -- cgit From 0b782e669298e30853e235b963fdebfdedf45383 Mon Sep 17 00:00:00 2001 From: Rikard Falkeborn Date: Sat, 4 Dec 2021 11:55:24 +0100 Subject: drm/i915/gvt: Constify cmd_interrupt_events It is never modified, so make it const to allow the compiler to put it in read-only memory. Signed-off-by: Rikard Falkeborn Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20211204105527.15741-7-rikard.falkeborn@gmail.com Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/cmd_parser.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c index c4118b808268..ce9307546e7f 100644 --- a/drivers/gpu/drm/i915/gvt/cmd_parser.c +++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c @@ -1144,7 +1144,7 @@ struct cmd_interrupt_event { int mi_user_interrupt; }; -static struct cmd_interrupt_event cmd_interrupt_events[] = { +static const struct cmd_interrupt_event cmd_interrupt_events[] = { [RCS0] = { .pipe_control_notify = RCS_PIPE_CONTROL, .mi_flush_dw = INTEL_GVT_EVENT_RESERVED, -- cgit From b17639c7f7fc1fbb23b761c38ba3233cd5d082d9 Mon Sep 17 00:00:00 2001 From: Rikard Falkeborn Date: Sat, 4 Dec 2021 11:55:25 +0100 Subject: drm/i915/gvt: Constify formats These are never modified, so make them const to allow the compiler to put them in read-only memory. WHile at it, make the description const char* since it is never modified. Signed-off-by: Rikard Falkeborn Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20211204105527.15741-8-rikard.falkeborn@gmail.com Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/fb_decoder.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c index 9ec064199364..1aabfa9cda02 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.c +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c @@ -40,12 +40,12 @@ #define PRIMARY_FORMAT_NUM 16 struct pixel_format { - int drm_format; /* Pixel format in DRM definition */ - int bpp; /* Bits per pixel, 0 indicates invalid */ - char *desc; /* The description */ + int drm_format; /* Pixel format in DRM definition */ + int bpp; /* Bits per pixel, 0 indicates invalid */ + const char *desc; /* The description */ }; -static struct pixel_format bdw_pixel_formats[] = { +static const struct pixel_format bdw_pixel_formats[] = { {DRM_FORMAT_C8, 8, "8-bit Indexed"}, {DRM_FORMAT_RGB565, 16, "16-bit BGRX (5:6:5 MSB-R:G:B)"}, {DRM_FORMAT_XRGB8888, 32, "32-bit BGRX (8:8:8:8 MSB-X:R:G:B)"}, @@ -58,7 +58,7 @@ static struct pixel_format bdw_pixel_formats[] = { {0, 0, NULL}, }; -static struct pixel_format skl_pixel_formats[] = { +static const struct pixel_format skl_pixel_formats[] = { {DRM_FORMAT_YUYV, 16, "16-bit packed YUYV (8:8:8:8 MSB-V:Y2:U:Y1)"}, {DRM_FORMAT_UYVY, 16, "16-bit packed UYVY (8:8:8:8 MSB-Y2:V:Y1:U)"}, {DRM_FORMAT_YVYU, 16, "16-bit packed YVYU (8:8:8:8 MSB-U:Y2:V:Y1)"}, @@ -278,14 +278,14 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, #define CURSOR_FORMAT_NUM (1 << 6) struct cursor_mode_format { - int drm_format; /* Pixel format in DRM definition */ - u8 bpp; /* Bits per pixel; 0 indicates invalid */ - u32 width; /* In pixel */ - u32 height; /* In lines */ - char *desc; /* The description */ + int drm_format; /* Pixel format in DRM definition */ + u8 bpp; /* Bits per pixel; 0 indicates invalid */ + u32 width; /* In pixel */ + u32 height; /* In lines */ + const char *desc; /* The description */ }; -static struct cursor_mode_format cursor_pixel_formats[] = { +static const struct cursor_mode_format cursor_pixel_formats[] = { {DRM_FORMAT_ARGB8888, 32, 128, 128, "128x128 32bpp ARGB"}, {DRM_FORMAT_ARGB8888, 32, 256, 256, "256x256 32bpp ARGB"}, {DRM_FORMAT_ARGB8888, 32, 64, 64, "64x64 32bpp ARGB"}, @@ -391,7 +391,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu, #define SPRITE_FORMAT_NUM (1 << 3) -static struct pixel_format sprite_pixel_formats[SPRITE_FORMAT_NUM] = { +static const struct pixel_format sprite_pixel_formats[SPRITE_FORMAT_NUM] = { [0x0] = {DRM_FORMAT_YUV422, 16, "YUV 16-bit 4:2:2 packed"}, [0x1] = {DRM_FORMAT_XRGB2101010, 32, "RGB 32-bit 2:10:10:10"}, [0x2] = {DRM_FORMAT_XRGB8888, 32, "RGB 32-bit 8:8:8:8"}, -- cgit From 38bd13a0b151093f5f26c95ee106659008896995 Mon Sep 17 00:00:00 2001 From: Rikard Falkeborn Date: Sat, 4 Dec 2021 11:55:26 +0100 Subject: drm/i915/gvt: Constify gtt_type_table_entry It is never modified, so make it const to allow the compiler to put it in read-only memory. Signed-off-by: Rikard Falkeborn Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20211204105527.15741-9-rikard.falkeborn@gmail.com Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/gtt.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c index c8cd6bf28ea8..614156856f16 100644 --- a/drivers/gpu/drm/i915/gvt/gtt.c +++ b/drivers/gpu/drm/i915/gvt/gtt.c @@ -185,7 +185,7 @@ struct gtt_type_table_entry { .pse_entry_type = pse_type, \ } -static struct gtt_type_table_entry gtt_type_table[] = { +static const struct gtt_type_table_entry gtt_type_table[] = { GTT_TYPE_TABLE_ENTRY(GTT_TYPE_PPGTT_ROOT_L4_ENTRY, GTT_TYPE_PPGTT_ROOT_L4_ENTRY, GTT_TYPE_INVALID, -- cgit From 3f8bd465a6f083a4112d82c18f4a85c9052d2132 Mon Sep 17 00:00:00 2001 From: Rikard Falkeborn Date: Sat, 4 Dec 2021 11:55:27 +0100 Subject: drm/i915/gvt: Constify vgpu_types It is never modified, so make it const to allow the compiler to put it in read-only memory. While at it, make name a const char*. Signed-off-by: Rikard Falkeborn Signed-off-by: Zhi Wang Link: http://patchwork.freedesktop.org/patch/msgid/20211204105527.15741-10-rikard.falkeborn@gmail.com Reviewed-by: Zhi Wang --- drivers/gpu/drm/i915/gvt/vgpu.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index fa6b92615799..8dddd0a940a1 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -77,7 +77,7 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu) #define VGPU_WEIGHT(vgpu_num) \ (VGPU_MAX_WEIGHT / (vgpu_num)) -static struct { +static const struct { unsigned int low_mm; unsigned int high_mm; unsigned int fence; @@ -88,7 +88,7 @@ static struct { */ unsigned int weight; enum intel_vgpu_edid edid; - char *name; + const char *name; } vgpu_types[] = { /* Fixed vGPU type table */ { MB_TO_BYTES(64), MB_TO_BYTES(384), 4, VGPU_WEIGHT(8), GVT_EDID_1024_768, "8" }, -- cgit From f7a6021aaf02088870559f82fc13c58cda7fea1a Mon Sep 17 00:00:00 2001 From: Jiasheng Jiang Date: Tue, 11 Jan 2022 10:50:48 +0800 Subject: ASoC: cpcap: Check for NULL pointer after calling of_get_child_by_name If the device does not exist, of_get_child_by_name() will return NULL pointer. And devm_snd_soc_register_component() does not check it. Also, I have noticed that cpcap_codec_driver has not been used yet. Therefore, it should be better to check it in order to avoid the future dereference of the NULL pointer. Fixes: f6cdf2d3445d ("ASoC: cpcap: new codec") Signed-off-by: Jiasheng Jiang Link: https://lore.kernel.org/r/20220111025048.524134-1-jiasheng@iscas.ac.cn Signed-off-by: Mark Brown --- sound/soc/codecs/cpcap.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sound/soc/codecs/cpcap.c b/sound/soc/codecs/cpcap.c index 598e09024e23..ffdf8b615efa 100644 --- a/sound/soc/codecs/cpcap.c +++ b/sound/soc/codecs/cpcap.c @@ -1667,6 +1667,8 @@ static int cpcap_codec_probe(struct platform_device *pdev) { struct device_node *codec_node = of_get_child_by_name(pdev->dev.parent->of_node, "audio-codec"); + if (!codec_node) + return -ENODEV; pdev->dev.of_node = codec_node; -- cgit From d068eebbd4822b6c14a7ea375dfe53ca5c69c776 Mon Sep 17 00:00:00 2001 From: Michal Koutný Date: Fri, 17 Dec 2021 16:48:54 +0100 Subject: cgroup/cpuset: Make child cpusets restrict parents on v1 hierarchy MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The commit 1f1562fcd04a ("cgroup/cpuset: Don't let child cpusets restrict parent in default hierarchy") inteded to relax the check only on the default hierarchy (or v2 mode) but it dropped the check in v1 too. This patch returns and separates the legacy-only validations so that they can be considered only in the v1 mode, which should enforce the old constraints for the sake of compatibility. Fixes: 1f1562fcd04a ("cgroup/cpuset: Don't let child cpusets restrict parent in default hierarchy") Suggested-by: Waiman Long Signed-off-by: Michal Koutný Reviewed-by: Waiman Long Signed-off-by: Tejun Heo --- kernel/cgroup/cpuset.c | 52 ++++++++++++++++++++++++++++++++++++++------------ 1 file changed, 40 insertions(+), 12 deletions(-) diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index dc653ab26e50..bb3531e7fda7 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -590,6 +590,35 @@ static inline void free_cpuset(struct cpuset *cs) kfree(cs); } +/* + * validate_change_legacy() - Validate conditions specific to legacy (v1) + * behavior. + */ +static int validate_change_legacy(struct cpuset *cur, struct cpuset *trial) +{ + struct cgroup_subsys_state *css; + struct cpuset *c, *par; + int ret; + + WARN_ON_ONCE(!rcu_read_lock_held()); + + /* Each of our child cpusets must be a subset of us */ + ret = -EBUSY; + cpuset_for_each_child(c, css, cur) + if (!is_cpuset_subset(c, trial)) + goto out; + + /* On legacy hierarchy, we must be a subset of our parent cpuset. */ + ret = -EACCES; + par = parent_cs(cur); + if (par && !is_cpuset_subset(trial, par)) + goto out; + + ret = 0; +out: + return ret; +} + /* * validate_change() - Used to validate that any proposed cpuset change * follows the structural rules for cpusets. @@ -614,20 +643,21 @@ static int validate_change(struct cpuset *cur, struct cpuset *trial) { struct cgroup_subsys_state *css; struct cpuset *c, *par; - int ret; - - /* The checks don't apply to root cpuset */ - if (cur == &top_cpuset) - return 0; + int ret = 0; rcu_read_lock(); - par = parent_cs(cur); - /* On legacy hierarchy, we must be a subset of our parent cpuset. */ - ret = -EACCES; - if (!is_in_v2_mode() && !is_cpuset_subset(trial, par)) + if (!is_in_v2_mode()) + ret = validate_change_legacy(cur, trial); + if (ret) + goto out; + + /* Remaining checks don't apply to root cpuset */ + if (cur == &top_cpuset) goto out; + par = parent_cs(cur); + /* * If either I or some sibling (!= me) is exclusive, we can't * overlap @@ -1175,9 +1205,7 @@ enum subparts_cmd { * * Because of the implicit cpu exclusive nature of a partition root, * cpumask changes that violates the cpu exclusivity rule will not be - * permitted when checked by validate_change(). The validate_change() - * function will also prevent any changes to the cpu list if it is not - * a superset of children's cpu lists. + * permitted when checked by validate_change(). */ static int update_parent_subparts_cpumask(struct cpuset *cpuset, int cmd, struct cpumask *newmask, -- cgit From 2616be2eac4b1c361ece55dfd8f942dcecb25de2 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 12 Jan 2022 12:57:03 +0200 Subject: drm/i915/dp: make intel_dp_pack_aux() static again MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The last user of intel_dp_pack_aux() outside intel_dp_aux.c got removed in commit ad26451a7902 ("drm/i915/display: Drop PSR support from HSW and BDW"). Make the function static again. Rename the pack/unpack functions to follow the usual naming conventions while at it. Signed-off-by: Jani Nikula Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220112105703.1151391-1-jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_dp_aux.c | 8 ++++---- drivers/gpu/drm/i915/display/intel_dp_aux.h | 4 ---- 2 files changed, 4 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.c b/drivers/gpu/drm/i915/display/intel_dp_aux.c index 5fbb767fcd63..2bc119374555 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux.c @@ -10,7 +10,7 @@ #include "intel_pps.h" #include "intel_tc.h" -u32 intel_dp_pack_aux(const u8 *src, int src_bytes) +static u32 intel_dp_aux_pack(const u8 *src, int src_bytes) { int i; u32 v = 0; @@ -22,7 +22,7 @@ u32 intel_dp_pack_aux(const u8 *src, int src_bytes) return v; } -static void intel_dp_unpack_aux(u32 src, u8 *dst, int dst_bytes) +static void intel_dp_aux_unpack(u32 src, u8 *dst, int dst_bytes) { int i; @@ -267,7 +267,7 @@ intel_dp_aux_xfer(struct intel_dp *intel_dp, for (i = 0; i < send_bytes; i += 4) intel_uncore_write(uncore, ch_data[i >> 2], - intel_dp_pack_aux(send + i, + intel_dp_aux_pack(send + i, send_bytes - i)); /* Send the command and wait for it to complete */ @@ -352,7 +352,7 @@ done: recv_bytes = recv_size; for (i = 0; i < recv_bytes; i += 4) - intel_dp_unpack_aux(intel_uncore_read(uncore, ch_data[i >> 2]), + intel_dp_aux_unpack(intel_uncore_read(uncore, ch_data[i >> 2]), recv + i, recv_bytes - i); ret = recv_bytes; diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux.h b/drivers/gpu/drm/i915/display/intel_dp_aux.h index 4afbe76217b9..738577537bc7 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux.h +++ b/drivers/gpu/drm/i915/display/intel_dp_aux.h @@ -6,12 +6,8 @@ #ifndef __INTEL_DP_AUX_H__ #define __INTEL_DP_AUX_H__ -#include - struct intel_dp; -u32 intel_dp_pack_aux(const u8 *src, int src_bytes); - void intel_dp_aux_fini(struct intel_dp *intel_dp); void intel_dp_aux_init(struct intel_dp *intel_dp); -- cgit From 6650ebcbea1314bf91bf161802ecaddbb72651b5 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 12 Jan 2022 13:17:40 +0200 Subject: drm/i915/pcode: rename sandybridge_pcode_* to snb_pcode_* MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Prefer acronym-based naming to be in line with the rest of the driver. Signed-off-by: Jani Nikula Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220112111740.1208374-1-jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_bw.c | 13 ++++----- drivers/gpu/drm/i915/display/intel_cdclk.c | 30 ++++++++++----------- drivers/gpu/drm/i915/display/intel_display.c | 6 ++--- drivers/gpu/drm/i915/display/intel_display_power.c | 11 +++----- drivers/gpu/drm/i915/display/intel_hdcp.c | 3 +-- drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c | 8 +++--- drivers/gpu/drm/i915/gt/intel_llc.c | 9 +++---- drivers/gpu/drm/i915/gt/intel_rc6.c | 5 ++-- drivers/gpu/drm/i915/gt/intel_rps.c | 8 +++--- drivers/gpu/drm/i915/gt/selftest_llc.c | 5 ++-- drivers/gpu/drm/i915/gt/selftest_rps.c | 5 ++-- drivers/gpu/drm/i915/intel_dram.c | 6 ++--- drivers/gpu/drm/i915/intel_pcode.c | 31 ++++++++-------------- drivers/gpu/drm/i915/intel_pcode.h | 12 ++++----- drivers/gpu/drm/i915/intel_pm.c | 20 +++++++------- 15 files changed, 70 insertions(+), 102 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index abec394f6869..156b060236c2 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c @@ -75,10 +75,9 @@ static int icl_pcode_read_qgv_point_info(struct drm_i915_private *dev_priv, u16 dclk; int ret; - ret = sandybridge_pcode_read(dev_priv, - ICL_PCODE_MEM_SUBSYSYSTEM_INFO | - ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point), - &val, &val2); + ret = snb_pcode_read(dev_priv, ICL_PCODE_MEM_SUBSYSYSTEM_INFO | + ICL_PCODE_MEM_SS_READ_QGV_POINT_INFO(point), + &val, &val2); if (ret) return ret; @@ -102,10 +101,8 @@ static int adls_pcode_read_psf_gv_point_info(struct drm_i915_private *dev_priv, int ret; int i; - ret = sandybridge_pcode_read(dev_priv, - ICL_PCODE_MEM_SUBSYSYSTEM_INFO | - ADL_PCODE_MEM_SS_READ_PSF_GV_INFO, - &val, NULL); + ret = snb_pcode_read(dev_priv, ICL_PCODE_MEM_SUBSYSYSTEM_INFO | + ADL_PCODE_MEM_SS_READ_PSF_GV_INFO, &val, NULL); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 1f13398e8ac2..7e20967307df 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -805,8 +805,7 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv, "trying to change cdclk frequency with cdclk not enabled\n")) return; - ret = sandybridge_pcode_write(dev_priv, - BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0); + ret = snb_pcode_write(dev_priv, BDW_PCODE_DISPLAY_FREQ_CHANGE_REQ, 0x0); if (ret) { drm_err(&dev_priv->drm, "failed to inform pcode about cdclk change\n"); @@ -834,8 +833,8 @@ static void bdw_set_cdclk(struct drm_i915_private *dev_priv, LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) drm_err(&dev_priv->drm, "Switching back to LCPLL failed\n"); - sandybridge_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, - cdclk_config->voltage_level); + snb_pcode_write(dev_priv, HSW_PCODE_DE_WRITE_FREQ_REQ, + cdclk_config->voltage_level); intel_de_write(dev_priv, CDCLK_FREQ, DIV_ROUND_CLOSEST(cdclk, 1000) - 1); @@ -1138,8 +1137,8 @@ static void skl_set_cdclk(struct drm_i915_private *dev_priv, intel_de_posting_read(dev_priv, CDCLK_CTL); /* inform PCU of the change */ - sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, - cdclk_config->voltage_level); + snb_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, + cdclk_config->voltage_level); intel_update_cdclk(dev_priv); } @@ -1717,10 +1716,9 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, * BSpec requires us to wait up to 150usec, but that leads to * timeouts; the 2ms used here is based on experiment. */ - ret = sandybridge_pcode_write_timeout(dev_priv, - HSW_PCODE_DE_WRITE_FREQ_REQ, - 0x80000000, 150, 2); - + ret = snb_pcode_write_timeout(dev_priv, + HSW_PCODE_DE_WRITE_FREQ_REQ, + 0x80000000, 150, 2); if (ret) { drm_err(&dev_priv->drm, "Failed to inform PCU about cdclk change (err %d, freq %d)\n", @@ -1781,8 +1779,8 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, intel_crtc_wait_for_next_vblank(intel_crtc_for_pipe(dev_priv, pipe)); if (DISPLAY_VER(dev_priv) >= 11) { - ret = sandybridge_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, - cdclk_config->voltage_level); + ret = snb_pcode_write(dev_priv, SKL_PCODE_CDCLK_CONTROL, + cdclk_config->voltage_level); } else { /* * The timeout isn't specified, the 2ms used here is based on @@ -1790,10 +1788,10 @@ static void bxt_set_cdclk(struct drm_i915_private *dev_priv, * FIXME: Waiting for the request completion could be delayed * until the next PCODE request based on BSpec. */ - ret = sandybridge_pcode_write_timeout(dev_priv, - HSW_PCODE_DE_WRITE_FREQ_REQ, - cdclk_config->voltage_level, - 150, 2); + ret = snb_pcode_write_timeout(dev_priv, + HSW_PCODE_DE_WRITE_FREQ_REQ, + cdclk_config->voltage_level, + 150, 2); } if (ret) { diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index e278a662b247..ebad83d9a2aa 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -1118,8 +1118,8 @@ void hsw_enable_ips(const struct intel_crtc_state *crtc_state) drm_WARN_ON(dev, !(crtc_state->active_planes & ~BIT(PLANE_CURSOR))); if (IS_BROADWELL(dev_priv)) { - drm_WARN_ON(dev, sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, - IPS_ENABLE | IPS_PCODE_CONTROL)); + drm_WARN_ON(dev, snb_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, + IPS_ENABLE | IPS_PCODE_CONTROL)); /* Quoting Art Runyan: "its not safe to expect any particular * value in IPS_CTL bit 31 after enabling IPS through the * mailbox." Moreover, the mailbox may return a bogus state, @@ -1149,7 +1149,7 @@ void hsw_disable_ips(const struct intel_crtc_state *crtc_state) if (IS_BROADWELL(dev_priv)) { drm_WARN_ON(dev, - sandybridge_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); + snb_pcode_write(dev_priv, DISPLAY_IPS_CONTROL, 0)); /* * Wait for PCODE to finish disabling IPS. The BSpec specified * 42ms timeout value leads to occasional timeouts so use 100ms diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index fba35fb6d2df..ee4617299e64 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -683,9 +683,8 @@ static void icl_tc_cold_exit(struct drm_i915_private *i915) int ret, tries = 0; while (1) { - ret = sandybridge_pcode_write_timeout(i915, - ICL_PCODE_EXIT_TCCOLD, - 0, 250, 1); + ret = snb_pcode_write_timeout(i915, ICL_PCODE_EXIT_TCCOLD, 0, + 250, 1); if (ret != -EAGAIN || ++tries == 3) break; msleep(1); @@ -4053,8 +4052,7 @@ tgl_tc_cold_request(struct drm_i915_private *i915, bool block) * Spec states that we should timeout the request after 200us * but the function below will timeout after 500us */ - ret = sandybridge_pcode_read(i915, TGL_PCODE_TCCOLD, &low_val, - &high_val); + ret = snb_pcode_read(i915, TGL_PCODE_TCCOLD, &low_val, &high_val); if (ret == 0) { if (block && (low_val & TGL_PCODE_EXIT_TCCOLD_DATA_L_EXIT_FAILED)) @@ -5469,8 +5467,7 @@ static u32 hsw_read_dcomp(struct drm_i915_private *dev_priv) static void hsw_write_dcomp(struct drm_i915_private *dev_priv, u32 val) { if (IS_HASWELL(dev_priv)) { - if (sandybridge_pcode_write(dev_priv, - GEN6_PCODE_WRITE_D_COMP, val)) + if (snb_pcode_write(dev_priv, GEN6_PCODE_WRITE_D_COMP, val)) drm_dbg_kms(&dev_priv->drm, "Failed to write to D_COMP\n"); } else { diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index 4509fe7438e8..e1ecf38db0ef 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -297,8 +297,7 @@ static int intel_hdcp_load_keys(struct drm_i915_private *dev_priv) * Mailbox interface. */ if (DISPLAY_VER(dev_priv) == 9 && !IS_BROXTON(dev_priv)) { - ret = sandybridge_pcode_write(dev_priv, - SKL_PCODE_LOAD_HDCP_KEYS, 1); + ret = snb_pcode_write(dev_priv, SKL_PCODE_LOAD_HDCP_KEYS, 1); if (ret) { drm_err(&dev_priv->drm, "Failed to initiate HDCP key load (%d)\n", diff --git a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c index 404dfa7673c6..6c5c1d0363bf 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_pm_debugfs.c @@ -134,8 +134,7 @@ static int gen6_drpc(struct seq_file *m) } if (GRAPHICS_VER(i915) <= 7) - sandybridge_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS, - &rc6vids, NULL); + snb_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS, &rc6vids, NULL); seq_printf(m, "RC1e Enabled: %s\n", yesno(rcctl1 & GEN6_RC_CTL_RC1e_ENABLE)); @@ -557,9 +556,8 @@ static int llc_show(struct seq_file *m, void *data) wakeref = intel_runtime_pm_get(gt->uncore->rpm); for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) { ia_freq = gpu_freq; - sandybridge_pcode_read(i915, - GEN6_PCODE_READ_MIN_FREQ_TABLE, - &ia_freq, NULL); + snb_pcode_read(i915, GEN6_PCODE_READ_MIN_FREQ_TABLE, + &ia_freq, NULL); seq_printf(m, "%d\t\t%d\t\t\t\t%d\n", intel_gpu_freq(rps, (gpu_freq * diff --git a/drivers/gpu/drm/i915/gt/intel_llc.c b/drivers/gpu/drm/i915/gt/intel_llc.c index 08d7d5ae263a..63f18830c611 100644 --- a/drivers/gpu/drm/i915/gt/intel_llc.c +++ b/drivers/gpu/drm/i915/gt/intel_llc.c @@ -140,11 +140,10 @@ static void gen6_update_ring_freq(struct intel_llc *llc) unsigned int ia_freq, ring_freq; calc_ia_freq(llc, gpu_freq, &consts, &ia_freq, &ring_freq); - sandybridge_pcode_write(i915, - GEN6_PCODE_WRITE_MIN_FREQ_TABLE, - ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT | - ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT | - gpu_freq); + snb_pcode_write(i915, GEN6_PCODE_WRITE_MIN_FREQ_TABLE, + ia_freq << GEN6_PCODE_FREQ_IA_RATIO_SHIFT | + ring_freq << GEN6_PCODE_FREQ_RING_RATIO_SHIFT | + gpu_freq); } } diff --git a/drivers/gpu/drm/i915/gt/intel_rc6.c b/drivers/gpu/drm/i915/gt/intel_rc6.c index 8be1d005d53b..799578ae3ed8 100644 --- a/drivers/gpu/drm/i915/gt/intel_rc6.c +++ b/drivers/gpu/drm/i915/gt/intel_rc6.c @@ -261,8 +261,7 @@ static void gen6_rc6_enable(struct intel_rc6 *rc6) GEN6_RC_CTL_HW_ENABLE; rc6vids = 0; - ret = sandybridge_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS, - &rc6vids, NULL); + ret = snb_pcode_read(i915, GEN6_PCODE_READ_RC6VIDS, &rc6vids, NULL); if (GRAPHICS_VER(i915) == 6 && ret) { drm_dbg(&i915->drm, "Couldn't check for BIOS workaround\n"); } else if (GRAPHICS_VER(i915) == 6 && @@ -272,7 +271,7 @@ static void gen6_rc6_enable(struct intel_rc6 *rc6) GEN6_DECODE_RC6_VID(rc6vids & 0xff), 450); rc6vids &= 0xffff00; rc6vids |= GEN6_ENCODE_RC6_VID(450); - ret = sandybridge_pcode_write(i915, GEN6_PCODE_WRITE_RC6VIDS, rc6vids); + ret = snb_pcode_write(i915, GEN6_PCODE_WRITE_RC6VIDS, rc6vids); if (ret) drm_err(&i915->drm, "Couldn't fix incorrect rc6 voltage\n"); diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index 8f5bce298574..d4f4eb2fc2b5 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -1019,9 +1019,8 @@ static void gen6_rps_init(struct intel_rps *rps) IS_GEN9_BC(i915) || GRAPHICS_VER(i915) >= 11) { u32 ddcc_status = 0; - if (sandybridge_pcode_read(i915, - HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL, - &ddcc_status, NULL) == 0) + if (snb_pcode_read(i915, HSW_PCODE_DYNAMIC_DUTY_CYCLE_CONTROL, + &ddcc_status, NULL) == 0) rps->efficient_freq = clamp_t(u8, (ddcc_status >> 8) & 0xff, @@ -1869,8 +1868,7 @@ void intel_rps_init(struct intel_rps *rps) if (GRAPHICS_VER(i915) == 6 || IS_IVYBRIDGE(i915) || IS_HASWELL(i915)) { u32 params = 0; - sandybridge_pcode_read(i915, GEN6_READ_OC_PARAMS, - ¶ms, NULL); + snb_pcode_read(i915, GEN6_READ_OC_PARAMS, ¶ms, NULL); if (params & BIT(31)) { /* OC supported */ drm_dbg(&i915->drm, "Overclocking supported, max: %dMHz, overclock: %dMHz\n", diff --git a/drivers/gpu/drm/i915/gt/selftest_llc.c b/drivers/gpu/drm/i915/gt/selftest_llc.c index 459b775f163a..2cd184ab32b1 100644 --- a/drivers/gpu/drm/i915/gt/selftest_llc.c +++ b/drivers/gpu/drm/i915/gt/selftest_llc.c @@ -31,9 +31,8 @@ static int gen6_verify_ring_freq(struct intel_llc *llc) calc_ia_freq(llc, gpu_freq, &consts, &ia_freq, &ring_freq); val = gpu_freq; - if (sandybridge_pcode_read(i915, - GEN6_PCODE_READ_MIN_FREQ_TABLE, - &val, NULL)) { + if (snb_pcode_read(i915, GEN6_PCODE_READ_MIN_FREQ_TABLE, + &val, NULL)) { pr_err("Failed to read freq table[%d], range [%d, %d]\n", gpu_freq, consts.min_gpu_freq, consts.max_gpu_freq); err = -ENXIO; diff --git a/drivers/gpu/drm/i915/gt/selftest_rps.c b/drivers/gpu/drm/i915/gt/selftest_rps.c index bd170ba1cf00..e1e5dd5f7638 100644 --- a/drivers/gpu/drm/i915/gt/selftest_rps.c +++ b/drivers/gpu/drm/i915/gt/selftest_rps.c @@ -519,9 +519,8 @@ static void show_pcu_config(struct intel_rps *rps) for (gpu_freq = min_gpu_freq; gpu_freq <= max_gpu_freq; gpu_freq++) { int ia_freq = gpu_freq; - sandybridge_pcode_read(i915, - GEN6_PCODE_READ_MIN_FREQ_TABLE, - &ia_freq, NULL); + snb_pcode_read(i915, GEN6_PCODE_READ_MIN_FREQ_TABLE, + &ia_freq, NULL); pr_info("%5d %5d %5d\n", gpu_freq * 50, diff --git a/drivers/gpu/drm/i915/intel_dram.c b/drivers/gpu/drm/i915/intel_dram.c index 84bb212bae4b..3e26ccabf7f9 100644 --- a/drivers/gpu/drm/i915/intel_dram.c +++ b/drivers/gpu/drm/i915/intel_dram.c @@ -389,10 +389,8 @@ static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv) u32 val = 0; int ret; - ret = sandybridge_pcode_read(dev_priv, - ICL_PCODE_MEM_SUBSYSYSTEM_INFO | - ICL_PCODE_MEM_SS_READ_GLOBAL_INFO, - &val, NULL); + ret = snb_pcode_read(dev_priv, ICL_PCODE_MEM_SUBSYSYSTEM_INFO | + ICL_PCODE_MEM_SS_READ_GLOBAL_INFO, &val, NULL); if (ret) return ret; diff --git a/drivers/gpu/drm/i915/intel_pcode.c b/drivers/gpu/drm/i915/intel_pcode.c index e8c886e4e78d..db4403f63cac 100644 --- a/drivers/gpu/drm/i915/intel_pcode.c +++ b/drivers/gpu/drm/i915/intel_pcode.c @@ -51,11 +51,10 @@ static int gen7_check_mailbox_status(u32 mbox) } } -static int __sandybridge_pcode_rw(struct drm_i915_private *i915, - u32 mbox, u32 *val, u32 *val1, - int fast_timeout_us, - int slow_timeout_ms, - bool is_read) +static int __snb_pcode_rw(struct drm_i915_private *i915, u32 mbox, + u32 *val, u32 *val1, + int fast_timeout_us, int slow_timeout_ms, + bool is_read) { struct intel_uncore *uncore = &i915->uncore; @@ -94,15 +93,12 @@ static int __sandybridge_pcode_rw(struct drm_i915_private *i915, return gen6_check_mailbox_status(mbox); } -int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox, - u32 *val, u32 *val1) +int snb_pcode_read(struct drm_i915_private *i915, u32 mbox, u32 *val, u32 *val1) { int err; mutex_lock(&i915->sb_lock); - err = __sandybridge_pcode_rw(i915, mbox, val, val1, - 500, 20, - true); + err = __snb_pcode_rw(i915, mbox, val, val1, 500, 20, true); mutex_unlock(&i915->sb_lock); if (err) { @@ -114,17 +110,14 @@ int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox, return err; } -int sandybridge_pcode_write_timeout(struct drm_i915_private *i915, - u32 mbox, u32 val, - int fast_timeout_us, - int slow_timeout_ms) +int snb_pcode_write_timeout(struct drm_i915_private *i915, u32 mbox, u32 val, + int fast_timeout_us, int slow_timeout_ms) { int err; mutex_lock(&i915->sb_lock); - err = __sandybridge_pcode_rw(i915, mbox, &val, NULL, - fast_timeout_us, slow_timeout_ms, - false); + err = __snb_pcode_rw(i915, mbox, &val, NULL, + fast_timeout_us, slow_timeout_ms, false); mutex_unlock(&i915->sb_lock); if (err) { @@ -140,9 +133,7 @@ static bool skl_pcode_try_request(struct drm_i915_private *i915, u32 mbox, u32 request, u32 reply_mask, u32 reply, u32 *status) { - *status = __sandybridge_pcode_rw(i915, mbox, &request, NULL, - 500, 0, - true); + *status = __snb_pcode_rw(i915, mbox, &request, NULL, 500, 0, true); return *status || ((request & reply_mask) == reply); } diff --git a/drivers/gpu/drm/i915/intel_pcode.h b/drivers/gpu/drm/i915/intel_pcode.h index 50806649d4b6..0962a17fac48 100644 --- a/drivers/gpu/drm/i915/intel_pcode.h +++ b/drivers/gpu/drm/i915/intel_pcode.h @@ -10,13 +10,11 @@ struct drm_i915_private; -int sandybridge_pcode_read(struct drm_i915_private *i915, u32 mbox, - u32 *val, u32 *val1); -int sandybridge_pcode_write_timeout(struct drm_i915_private *i915, u32 mbox, - u32 val, int fast_timeout_us, - int slow_timeout_ms); -#define sandybridge_pcode_write(i915, mbox, val) \ - sandybridge_pcode_write_timeout(i915, mbox, val, 500, 0) +int snb_pcode_read(struct drm_i915_private *i915, u32 mbox, u32 *val, u32 *val1); +int snb_pcode_write_timeout(struct drm_i915_private *i915, u32 mbox, u32 val, + int fast_timeout_us, int slow_timeout_ms); +#define snb_pcode_write(i915, mbox, val) \ + snb_pcode_write_timeout(i915, mbox, val, 500, 0) int skl_pcode_request(struct drm_i915_private *i915, u32 mbox, u32 request, u32 reply_mask, u32 reply, int timeout_base_ms); diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 76e1da70f4ad..a83b71af551b 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2890,9 +2890,8 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv, /* read the first set of memory latencies[0:3] */ val = 0; /* data0 to be programmed to 0 for first set */ - ret = sandybridge_pcode_read(dev_priv, - GEN9_PCODE_READ_MEM_LATENCY, - &val, NULL); + ret = snb_pcode_read(dev_priv, GEN9_PCODE_READ_MEM_LATENCY, + &val, NULL); if (ret) { drm_err(&dev_priv->drm, @@ -2910,9 +2909,8 @@ static void intel_read_wm_latency(struct drm_i915_private *dev_priv, /* read the second set of memory latencies[4:7] */ val = 1; /* data0 to be programmed to 1 for second set */ - ret = sandybridge_pcode_read(dev_priv, - GEN9_PCODE_READ_MEM_LATENCY, - &val, NULL); + ret = snb_pcode_read(dev_priv, GEN9_PCODE_READ_MEM_LATENCY, + &val, NULL); if (ret) { drm_err(&dev_priv->drm, "SKL Mailbox read error = %d\n", ret); @@ -3702,9 +3700,9 @@ skl_setup_sagv_block_time(struct drm_i915_private *dev_priv) u32 val = 0; int ret; - ret = sandybridge_pcode_read(dev_priv, - GEN12_PCODE_READ_SAGV_BLOCK_TIME_US, - &val, NULL); + ret = snb_pcode_read(dev_priv, + GEN12_PCODE_READ_SAGV_BLOCK_TIME_US, + &val, NULL); if (!ret) { dev_priv->sagv_block_time_us = val; return; @@ -3751,8 +3749,8 @@ intel_enable_sagv(struct drm_i915_private *dev_priv) return 0; drm_dbg_kms(&dev_priv->drm, "Enabling SAGV\n"); - ret = sandybridge_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL, - GEN9_SAGV_ENABLE); + ret = snb_pcode_write(dev_priv, GEN9_PCODE_SAGV_CONTROL, + GEN9_SAGV_ENABLE); /* We don't need to wait for SAGV when enabling */ -- cgit From 5ec7baef52c367cdbda964aa662f7135c25bab1f Mon Sep 17 00:00:00 2001 From: José Roberto de Souza Date: Thu, 13 Jan 2022 08:04:37 -0800 Subject: drm/i915/display/ehl: Update voltage swing table MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit EHL table was recently updated with some minor fixes. BSpec: 21257 Cc: stable@vger.kernel.org Cc: Clint Taylor Signed-off-by: José Roberto de Souza Reviewed-by: Clint Taylor Link: https://patchwork.freedesktop.org/patch/msgid/20220113160437.49059-1-jose.souza@intel.com --- drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c index 09d6ab13536c..0c32210bf503 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c +++ b/drivers/gpu/drm/i915/display/intel_ddi_buf_trans.c @@ -477,14 +477,14 @@ static const struct intel_ddi_buf_trans icl_combo_phy_trans_hdmi = { static const union intel_ddi_buf_trans_entry _ehl_combo_phy_trans_dp[] = { /* NT mV Trans mV db */ { .icl = { 0xA, 0x33, 0x3F, 0x00, 0x00 } }, /* 350 350 0.0 */ - { .icl = { 0xA, 0x47, 0x36, 0x00, 0x09 } }, /* 350 500 3.1 */ - { .icl = { 0xC, 0x64, 0x34, 0x00, 0x0B } }, /* 350 700 6.0 */ - { .icl = { 0x6, 0x7F, 0x30, 0x00, 0x0F } }, /* 350 900 8.2 */ + { .icl = { 0xA, 0x47, 0x38, 0x00, 0x07 } }, /* 350 500 3.1 */ + { .icl = { 0xC, 0x64, 0x33, 0x00, 0x0C } }, /* 350 700 6.0 */ + { .icl = { 0x6, 0x7F, 0x2F, 0x00, 0x10 } }, /* 350 900 8.2 */ { .icl = { 0xA, 0x46, 0x3F, 0x00, 0x00 } }, /* 500 500 0.0 */ - { .icl = { 0xC, 0x64, 0x38, 0x00, 0x07 } }, /* 500 700 2.9 */ + { .icl = { 0xC, 0x64, 0x37, 0x00, 0x08 } }, /* 500 700 2.9 */ { .icl = { 0x6, 0x7F, 0x32, 0x00, 0x0D } }, /* 500 900 5.1 */ { .icl = { 0xC, 0x61, 0x3F, 0x00, 0x00 } }, /* 650 700 0.6 */ - { .icl = { 0x6, 0x7F, 0x38, 0x00, 0x07 } }, /* 600 900 3.5 */ + { .icl = { 0x6, 0x7F, 0x37, 0x00, 0x08 } }, /* 600 900 3.5 */ { .icl = { 0x6, 0x7F, 0x3F, 0x00, 0x00 } }, /* 900 900 0.0 */ }; -- cgit From 5ff59dddacd4738edcbd01847d9df7682348cf86 Mon Sep 17 00:00:00 2001 From: José Roberto de Souza Date: Thu, 13 Jan 2022 09:48:26 -0800 Subject: drm/i915/display/adlp: Implement new step in the TC voltage swing prog sequence MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit TC voltage swing programming sequence was updated with a new step. BSpec: 54956 Cc: stable@vger.kernel.org Cc: Jani Nikula Cc: Clint Taylor Cc: Imre Deak Signed-off-by: José Roberto de Souza Reviewed-by: Clint Taylor Link: https://patchwork.freedesktop.org/patch/msgid/20220113174826.50272-1-jose.souza@intel.com --- drivers/gpu/drm/i915/display/intel_ddi.c | 22 ++++++++++++++++++++++ drivers/gpu/drm/i915/i915_reg.h | 8 ++++++-- 2 files changed, 28 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 6ee0f77b7927..4e93eac926a5 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -1300,6 +1300,28 @@ static void tgl_dkl_phy_set_signal_levels(struct intel_encoder *encoder, intel_de_rmw(dev_priv, DKL_TX_DPCNTL2(tc_port), DKL_TX_DP20BITMODE, 0); + + if (IS_ALDERLAKE_P(dev_priv)) { + u32 val; + + if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) { + if (ln == 0) { + val = DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1(0); + val |= DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2(2); + } else { + val = DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1(3); + val |= DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2(3); + } + } else { + val = DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1(0); + val |= DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2(0); + } + + intel_de_rmw(dev_priv, DKL_TX_DPCNTL2(tc_port), + DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1_MASK | + DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2_MASK, + val); + } } } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index b3a05ed86734..4424807c8dec 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -9968,8 +9968,12 @@ enum skl_power_gate { _DKL_PHY2_BASE) + \ _DKL_TX_DPCNTL1) -#define _DKL_TX_DPCNTL2 0x2C8 -#define DKL_TX_DP20BITMODE (1 << 2) +#define _DKL_TX_DPCNTL2 0x2C8 +#define DKL_TX_DP20BITMODE REG_BIT(2) +#define DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1_MASK REG_GENMASK(4, 3) +#define DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1(val) REG_FIELD_PREP(DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX1_MASK, (val)) +#define DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2_MASK REG_GENMASK(6, 5) +#define DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2(val) REG_FIELD_PREP(DKL_TX_DPCNTL2_CFG_LOADGENSELECT_TX2_MASK, (val)) #define DKL_TX_DPCNTL2(tc_port) _MMIO(_PORT(tc_port, \ _DKL_PHY1_BASE, \ _DKL_PHY2_BASE) + \ -- cgit From a8cf6073d20a8eb19355832006bb2e19a4477c49 Mon Sep 17 00:00:00 2001 From: Juston Li Date: Thu, 6 Jan 2022 12:02:36 -0800 Subject: drm/i915/pxp: Hold RPM wakelock during PXP unbind Similar to commit b8d8436840ca ("drm/i915/gt: Hold RPM wakelock during PXP suspend") but to fix the same warning for unbind during shutdown: ------------[ cut here ]------------ RPM wakelock ref not held during HW access WARNING: CPU: 0 PID: 4139 at drivers/gpu/drm/i915/intel_runtime_pm.h:115 gen12_fwtable_write32+0x1b7/0 Modules linked in: 8021q ccm rfcomm cmac algif_hash algif_skcipher af_alg uinput snd_hda_codec_hdmi vf industrialio iwl7000_mac80211 cros_ec_sensorhub lzo_rle lzo_compress zram iwlwifi cfg80211 joydev CPU: 0 PID: 4139 Comm: halt Tainted: G U W 5.10.84 #13 344e11e079c4a03940d949e537eab645f6 RIP: 0010:gen12_fwtable_write32+0x1b7/0x200 Code: 48 c7 c7 fc b3 b5 89 31 c0 e8 2c f3 ad ff 0f 0b e9 04 ff ff ff c6 05 71 e9 1d 01 01 48 c7 c7 d67 RSP: 0018:ffffa09ec0bb3bb0 EFLAGS: 00010246 RAX: 12dde97bbd260300 RBX: 00000000000320f0 RCX: ffffffff89e60ea0 RDX: 0000000000000000 RSI: 00000000ffffdfff RDI: ffffffff89e60e70 RBP: ffffa09ec0bb3bd8 R08: 0000000000000000 R09: ffffa09ec0bb3950 R10: 00000000ffffdfff R11: ffffffff89e91160 R12: 0000000000000000 R13: 0000000028121969 R14: ffff9515c32f0990 R15: 0000000040000000 FS: 0000790dcf225740(0000) GS:ffff951737800000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 000058b25efae147 CR3: 0000000133ea6001 CR4: 0000000000770ef0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000ffff07f0 DR7: 0000000000000400 PKRU: 55555554 Call Trace: intel_pxp_fini_hw+0x2f/0x39 i915_pxp_tee_component_unbind+0x1c/0x42 component_unbind+0x32/0x48 component_unbind_all+0x80/0x9d take_down_master+0x24/0x36 component_master_del+0x56/0x70 mei_pxp_remove+0x2c/0x68 mei_cl_device_remove+0x35/0x68 device_release_driver_internal+0x100/0x1a1 mei_cl_bus_remove_device+0x21/0x79 mei_cl_bus_remove_devices+0x3b/0x51 mei_stop+0x3b/0xae mei_me_shutdown+0x23/0x58 device_shutdown+0x144/0x1d3 kernel_power_off+0x13/0x4c __se_sys_reboot+0x1d4/0x1e9 do_syscall_64+0x43/0x55 entry_SYSCALL_64_after_hwframe+0x44/0xa9 RIP: 0033:0x790dcf316273 Code: 64 89 01 48 83 c8 ff c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 44 00 00 89 fa be 69 19 12 28 bf ad8 RSP: 002b:00007ffca0df9198 EFLAGS: 00000202 ORIG_RAX: 00000000000000a9 RAX: ffffffffffffffda RBX: 000000004321fedc RCX: 0000790dcf316273 RDX: 000000004321fedc RSI: 0000000028121969 RDI: 00000000fee1dead RBP: 00007ffca0df9200 R08: 0000000000000007 R09: 0000563ce8cd8970 R10: 0000000000000000 R11: 0000000000000202 R12: 00007ffca0df9308 R13: 0000000000000001 R14: 0000000000000000 R15: 0000000000000003 ---[ end trace 2f501b01b348f114 ]--- ACPI: Preparing to enter system sleep state S5 reboot: Power down Changes since v1: - Rebase to latest drm-tip Fixes: 0cfab4cb3c4e ("drm/i915/pxp: Enable PXP power management") Suggested-by: Lee Shawn C Signed-off-by: Juston Li Reviewed-by: Daniele Ceraolo Spurio Reviewed-by: Rodrigo Vivi Signed-off-by: Rodrigo Vivi Link: https://patchwork.freedesktop.org/patch/msgid/20220106200236.489656-2-juston.li@intel.com --- drivers/gpu/drm/i915/pxp/intel_pxp_tee.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c index 5d169624ad60..f2fc50d7dfd3 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_tee.c @@ -105,9 +105,12 @@ static int i915_pxp_tee_component_bind(struct device *i915_kdev, static void i915_pxp_tee_component_unbind(struct device *i915_kdev, struct device *tee_kdev, void *data) { + struct drm_i915_private *i915 = kdev_to_i915(i915_kdev); struct intel_pxp *pxp = i915_dev_to_pxp(i915_kdev); + intel_wakeref_t wakeref; - intel_pxp_fini_hw(pxp); + with_intel_runtime_pm_if_in_use(&i915->runtime_pm, wakeref) + intel_pxp_fini_hw(pxp); mutex_lock(&pxp->tee_mutex); pxp->pxp_component = NULL; -- cgit From 4ee7e4a6c9b298da44029ed9ec8ed23ae49cc209 Mon Sep 17 00:00:00 2001 From: Christoph Fritz Date: Wed, 12 Jan 2022 19:33:21 +0100 Subject: ovl: fix NULL pointer dereference in copy up warning This patch is fixing a NULL pointer dereference to get a recently introduced warning message working. Fixes: 5b0a414d06c3 ("ovl: fix filattr copy-up failure") Signed-off-by: Christoph Fritz Cc: # v5.15 Signed-off-by: Miklos Szeredi --- fs/overlayfs/copy_up.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c index b193d08a3dc3..347b06479663 100644 --- a/fs/overlayfs/copy_up.c +++ b/fs/overlayfs/copy_up.c @@ -145,7 +145,7 @@ static int ovl_copy_fileattr(struct inode *inode, struct path *old, if (err == -ENOTTY || err == -EINVAL) return 0; pr_warn("failed to retrieve lower fileattr (%pd2, err=%i)\n", - old, err); + old->dentry, err); return err; } @@ -168,7 +168,7 @@ static int ovl_copy_fileattr(struct inode *inode, struct path *old, err = ovl_real_fileattr_get(new, &newfa); if (err) { pr_warn("failed to retrieve upper fileattr (%pd2, err=%i)\n", - new, err); + new->dentry, err); return err; } -- cgit From 94fd19752b28aa66c98e7991734af91dfc529f8f Mon Sep 17 00:00:00 2001 From: Miklos Szeredi Date: Fri, 14 Jan 2022 16:57:56 +0100 Subject: ovl: don't fail copy up if no fileattr support on upper Christoph Fritz is reporting that failure to copy up fileattr when upper doesn't support fileattr or xattr results in a regression. Return success in these failure cases; this reverts overlayfs to the old behavior. Add a pr_warn_once() in these cases to still let the user know about the copy up failures. Reported-by: Christoph Fritz Fixes: 72db82115d2b ("ovl: copy up sync/noatime fileattr flags") Cc: # v5.15 Signed-off-by: Miklos Szeredi --- fs/overlayfs/copy_up.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c index 347b06479663..e040970408d4 100644 --- a/fs/overlayfs/copy_up.c +++ b/fs/overlayfs/copy_up.c @@ -157,7 +157,9 @@ static int ovl_copy_fileattr(struct inode *inode, struct path *old, */ if (oldfa.flags & OVL_PROT_FS_FLAGS_MASK) { err = ovl_set_protattr(inode, new->dentry, &oldfa); - if (err) + if (err == -EPERM) + pr_warn_once("copying fileattr: no xattr on upper\n"); + else if (err) return err; } @@ -167,6 +169,14 @@ static int ovl_copy_fileattr(struct inode *inode, struct path *old, err = ovl_real_fileattr_get(new, &newfa); if (err) { + /* + * Returning an error if upper doesn't support fileattr will + * result in a regression, so revert to the old behavior. + */ + if (err == -ENOTTY || err == -EINVAL) { + pr_warn_once("copying fileattr: no support on upper\n"); + return 0; + } pr_warn("failed to retrieve upper fileattr (%pd2, err=%i)\n", new->dentry, err); return err; -- cgit From 6a8cf6349c7cae12f072889991a2aa0a1ba0bd32 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 12 Jan 2022 16:24:17 -0500 Subject: drm/amdgpu/swsmu: make sienna cichlid function static Unused outside of the file. Reported-by: kernel test robot Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 777f717c37ae..d8c57b780bca 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -3832,9 +3832,9 @@ static void sienna_cichlid_stb_init(struct smu_context *smu) } -int sienna_cichlid_stb_get_data_direct(struct smu_context *smu, - void *buf, - uint32_t size) +static int sienna_cichlid_stb_get_data_direct(struct smu_context *smu, + void *buf, + uint32_t size) { uint32_t *p = buf; struct amdgpu_device *adev = smu->adev; -- cgit From de05abe6b9d0fe08f65d744f7f75a4cba4df27ad Mon Sep 17 00:00:00 2001 From: Solomon Chiu Date: Thu, 6 Jan 2022 17:11:48 +0800 Subject: drm/amd/display: Enable Freesync Video Mode by default [Why&How] Freesync Video Mode is a experimental feature previously, and need to be enabled by kernel parameter. We enable it by default with removing module paramterter in amdgpu_dm. v2: squash the patches together Signed-off-by: Solomon Chiu Reviewed-by: Aurabindo Jayamohanan Pillai Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 - drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 27 ----------------------- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 12 +++++----- 3 files changed, 5 insertions(+), 35 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 4f771f9eb0e3..182d673103ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -197,7 +197,6 @@ extern int amdgpu_emu_mode; extern uint amdgpu_smu_memory_pool_size; extern int amdgpu_smu_pptable_id; extern uint amdgpu_dc_feature_mask; -extern uint amdgpu_freesync_vid_mode; extern uint amdgpu_dc_debug_mask; extern uint amdgpu_dm_abm_level; extern int amdgpu_backlight; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 366e475056bd..10e01928ffad 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -173,7 +173,6 @@ int amdgpu_mes; int amdgpu_noretry = -1; int amdgpu_force_asic_type = -1; int amdgpu_tmz = -1; /* auto */ -uint amdgpu_freesync_vid_mode; int amdgpu_reset_method = -1; /* auto */ int amdgpu_num_kcq = -1; int amdgpu_smartshift_bias; @@ -842,32 +841,6 @@ module_param_named(backlight, amdgpu_backlight, bint, 0444); MODULE_PARM_DESC(tmz, "Enable TMZ feature (-1 = auto (default), 0 = off, 1 = on)"); module_param_named(tmz, amdgpu_tmz, int, 0444); -/** - * DOC: freesync_video (uint) - * Enable the optimization to adjust front porch timing to achieve seamless - * mode change experience when setting a freesync supported mode for which full - * modeset is not needed. - * - * The Display Core will add a set of modes derived from the base FreeSync - * video mode into the corresponding connector's mode list based on commonly - * used refresh rates and VRR range of the connected display, when users enable - * this feature. From the userspace perspective, they can see a seamless mode - * change experience when the change between different refresh rates under the - * same resolution. Additionally, userspace applications such as Video playback - * can read this modeset list and change the refresh rate based on the video - * frame rate. Finally, the userspace can also derive an appropriate mode for a - * particular refresh rate based on the FreeSync Mode and add it to the - * connector's mode list. - * - * Note: This is an experimental feature. - * - * The default value: 0 (off). - */ -MODULE_PARM_DESC( - freesync_video, - "Enable freesync modesetting optimization feature (0 = off (default), 1 = on)"); -module_param_named(freesync_video, amdgpu_freesync_vid_mode, uint, 0444); - /** * DOC: reset_method (int) * GPU reset method (-1 = auto (default), 0 = legacy, 1 = mode0, 2 = mode1, 3 = mode2, 4 = baco, 5 = pci) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 7f9773f8dab6..afadec2d1993 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -6435,8 +6435,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, */ DRM_DEBUG_DRIVER("No preferred mode found\n"); } else { - recalculate_timing = amdgpu_freesync_vid_mode && - is_freesync_video_mode(&mode, aconnector); + recalculate_timing = is_freesync_video_mode(&mode, aconnector); if (recalculate_timing) { freesync_mode = get_highest_refresh_rate_mode(aconnector, false); saved_mode = mode; @@ -8304,7 +8303,7 @@ static void amdgpu_dm_connector_add_freesync_modes(struct drm_connector *connect struct amdgpu_dm_connector *amdgpu_dm_connector = to_amdgpu_dm_connector(connector); - if (!(amdgpu_freesync_vid_mode && edid)) + if (!edid) return; if (amdgpu_dm_connector->max_vfreq - amdgpu_dm_connector->min_vfreq > 10) @@ -10271,8 +10270,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, * TODO: Refactor this function to allow this check to work * in all conditions. */ - if (amdgpu_freesync_vid_mode && - dm_new_crtc_state->stream && + if (dm_new_crtc_state->stream && is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state)) goto skip_modeset; @@ -10307,7 +10305,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, if (!dm_old_crtc_state->stream) goto skip_modeset; - if (amdgpu_freesync_vid_mode && dm_new_crtc_state->stream && + if (dm_new_crtc_state->stream && is_timing_unchanged_for_freesync(new_crtc_state, old_crtc_state)) { new_crtc_state->mode_changed = false; @@ -10319,7 +10317,7 @@ static int dm_update_crtc_state(struct amdgpu_display_manager *dm, set_freesync_fixed_config(dm_new_crtc_state); goto skip_modeset; - } else if (amdgpu_freesync_vid_mode && aconnector && + } else if (aconnector && is_freesync_video_mode(&new_crtc_state->mode, aconnector)) { struct drm_display_mode *high_mode; -- cgit From 8c2d34eb53b96755b33a125c65c3e807dbe430a1 Mon Sep 17 00:00:00 2001 From: Jonathan Gray Date: Thu, 30 Dec 2021 16:00:19 +1100 Subject: drm/radeon: use kernel is_power_of_2 rather than local version MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Follow the amdgpu change made in 7611750784664db46d0db95631e322aeb263dde7 and replace local radeon function with is_power_of_2(). Reviewed-by: Christian König Signed-off-by: Jonathan Gray Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_device.c | 19 +++---------------- 1 file changed, 3 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index 4f0fbf667431..15692cb241fc 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -1085,19 +1085,6 @@ static unsigned int radeon_vga_set_decode(struct pci_dev *pdev, bool state) return VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM; } -/** - * radeon_check_pot_argument - check that argument is a power of two - * - * @arg: value to check - * - * Validates that a certain argument is a power of two (all asics). - * Returns true if argument is valid. - */ -static bool radeon_check_pot_argument(int arg) -{ - return (arg & (arg - 1)) == 0; -} - /** * radeon_gart_size_auto - Determine a sensible default GART size * according to ASIC family. @@ -1126,7 +1113,7 @@ static int radeon_gart_size_auto(enum radeon_family family) static void radeon_check_arguments(struct radeon_device *rdev) { /* vramlimit must be a power of two */ - if (!radeon_check_pot_argument(radeon_vram_limit)) { + if (!is_power_of_2(radeon_vram_limit)) { dev_warn(rdev->dev, "vram limit (%d) must be a power of 2\n", radeon_vram_limit); radeon_vram_limit = 0; @@ -1140,7 +1127,7 @@ static void radeon_check_arguments(struct radeon_device *rdev) dev_warn(rdev->dev, "gart size (%d) too small\n", radeon_gart_size); radeon_gart_size = radeon_gart_size_auto(rdev->family); - } else if (!radeon_check_pot_argument(radeon_gart_size)) { + } else if (!is_power_of_2(radeon_gart_size)) { dev_warn(rdev->dev, "gart size (%d) must be a power of 2\n", radeon_gart_size); radeon_gart_size = radeon_gart_size_auto(rdev->family); @@ -1163,7 +1150,7 @@ static void radeon_check_arguments(struct radeon_device *rdev) break; } - if (!radeon_check_pot_argument(radeon_vm_size)) { + if (!is_power_of_2(radeon_vm_size)) { dev_warn(rdev->dev, "VM size (%d) must be a power of 2\n", radeon_vm_size); radeon_vm_size = 4; -- cgit From bc143d8b8387ff0a22e4ef8e2375e63aa24bc311 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 22 Nov 2021 10:57:20 +0800 Subject: drm/amd/pm: do not expose implementation details to other blocks out of power Those implementation details(whether swsmu supported, some ppt_funcs supported, accessing internal statistics ...)should be kept internally. It's not a good practice and even error prone to expose implementation details. Signed-off-by: Evan Quan Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/aldebaran.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 25 +++---- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 6 +- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 18 +----- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 7 -- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 5 +- drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c | 5 +- drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c | 2 +- drivers/gpu/drm/amd/include/kgd_pp_interface.h | 4 ++ drivers/gpu/drm/amd/pm/amdgpu_dpm.c | 90 ++++++++++++++++++++++++++ drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h | 25 ++++++- drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h | 11 +--- drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 26 +++++--- 13 files changed, 161 insertions(+), 65 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/aldebaran.c b/drivers/gpu/drm/amd/amdgpu/aldebaran.c index bcfdb63b1d42..a545df4efce1 100644 --- a/drivers/gpu/drm/amd/amdgpu/aldebaran.c +++ b/drivers/gpu/drm/amd/amdgpu/aldebaran.c @@ -260,7 +260,7 @@ static int aldebaran_mode2_restore_ip(struct amdgpu_device *adev) adev->gfx.rlc.funcs->resume(adev); /* Wait for FW reset event complete */ - r = smu_wait_for_event(adev, SMU_EVENT_RESET_COMPLETE, 0); + r = amdgpu_dpm_wait_for_event(adev, SMU_EVENT_RESET_COMPLETE, 0); if (r) { dev_err(adev->dev, "Failed to get response from firmware after reset\n"); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 25e2e5bf90eb..9aea1cc5b27e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -1585,22 +1585,25 @@ static int amdgpu_debugfs_sclk_set(void *data, u64 val) return ret; } - if (is_support_sw_smu(adev)) { - ret = smu_get_dpm_freq_range(&adev->smu, SMU_SCLK, &min_freq, &max_freq); - if (ret || val > max_freq || val < min_freq) - return -EINVAL; - ret = smu_set_soft_freq_range(&adev->smu, SMU_SCLK, (uint32_t)val, (uint32_t)val); - } else { - return 0; + ret = amdgpu_dpm_get_dpm_freq_range(adev, PP_SCLK, &min_freq, &max_freq); + if (ret == -EOPNOTSUPP) { + ret = 0; + goto out; } + if (ret || val > max_freq || val < min_freq) { + ret = -EINVAL; + goto out; + } + + ret = amdgpu_dpm_set_soft_freq_range(adev, PP_SCLK, (uint32_t)val, (uint32_t)val); + if (ret) + ret = -EINVAL; +out: pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - if (ret) - return -EINVAL; - - return 0; + return ret; } DEFINE_DEBUGFS_ATTRIBUTE(fops_ib_preempt, NULL, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index cf7fad88c138..596bb2fdb8a2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2624,7 +2624,7 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev) /* For passthrough configuration on arcturus and aldebaran, enable special handling SBR */ if (amdgpu_passthrough(adev) && ((adev->asic_type == CHIP_ARCTURUS && adev->gmc.xgmi.num_physical_nodes > 1)|| adev->asic_type == CHIP_ALDEBARAN )) - smu_handle_passthrough_sbr(&adev->smu, true); + amdgpu_dpm_handle_passthrough_sbr(adev, true); if (adev->gmc.xgmi.num_physical_nodes > 1) { mutex_lock(&mgpu_info.mutex); @@ -2881,7 +2881,7 @@ static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev) int i, r; if (adev->in_s0ix) - amdgpu_gfx_state_change_set(adev, sGpuChangeState_D3Entry); + amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D3Entry); for (i = adev->num_ip_blocks - 1; i >= 0; i--) { if (!adev->ip_blocks[i].status.valid) @@ -4044,7 +4044,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon) return 0; if (adev->in_s0ix) - amdgpu_gfx_state_change_set(adev, sGpuChangeState_D0Entry); + amdgpu_dpm_gfx_state_change(adev, sGpuChangeState_D0Entry); /* post card */ if (amdgpu_device_need_post(adev)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 1916ec84dd71..3d8f82dc8c97 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -615,7 +615,7 @@ int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value) mutex_lock(&adev->gfx.gfx_off_mutex); - r = smu_get_status_gfxoff(adev, value); + r = amdgpu_dpm_get_status_gfxoff(adev, value); mutex_unlock(&adev->gfx.gfx_off_mutex); @@ -852,19 +852,3 @@ int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev) } return amdgpu_num_kcq; } - -/* amdgpu_gfx_state_change_set - Handle gfx power state change set - * @adev: amdgpu_device pointer - * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry) - * - */ - -void amdgpu_gfx_state_change_set(struct amdgpu_device *adev, enum gfx_change_state state) -{ - mutex_lock(&adev->pm.mutex); - if (adev->powerplay.pp_funcs && - adev->powerplay.pp_funcs->gfx_state_change_set) - ((adev)->powerplay.pp_funcs->gfx_state_change_set( - (adev)->powerplay.pp_handle, state)); - mutex_unlock(&adev->pm.mutex); -} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index f851196c83a5..776c886fd94a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -47,12 +47,6 @@ enum amdgpu_gfx_pipe_priority { AMDGPU_GFX_PIPE_PRIO_HIGH = AMDGPU_RING_PRIO_2 }; -/* Argument for PPSMC_MSG_GpuChangeState */ -enum gfx_change_state { - sGpuChangeState_D0Entry = 1, - sGpuChangeState_D3Entry, -}; - #define AMDGPU_GFX_QUEUE_PRIORITY_MINIMUM 0 #define AMDGPU_GFX_QUEUE_PRIORITY_MAXIMUM 15 @@ -410,5 +404,4 @@ int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev, uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg); void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v); int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev); -void amdgpu_gfx_state_change_set(struct amdgpu_device *adev, enum gfx_change_state state); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 586a30ad13e2..4a9970423e7d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -901,7 +901,7 @@ static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_d * choosing right query method according to * whether smu support query error information */ - ret = smu_get_ecc_info(&adev->smu, (void *)&(ras->umc_ecc)); + ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(ras->umc_ecc)); if (ret == -EOPNOTSUPP) { if (adev->umc.ras_funcs && adev->umc.ras_funcs->query_ras_error_count) @@ -2141,8 +2141,7 @@ int amdgpu_ras_recovery_init(struct amdgpu_device *adev) if (ret) goto free; - if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->send_hbm_bad_pages_num) - adev->smu.ppt_funcs->send_hbm_bad_pages_num(&adev->smu, con->eeprom_control.ras_num_recs); + amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs); } #ifdef CONFIG_X86_MCE_AMD diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c index 46264a4002f7..25951b2d83b6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c @@ -33,7 +33,7 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev, int ret = 0; kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); - ret = smu_get_ecc_info(&adev->smu, (void *)&(con->umc_ecc)); + ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(con->umc_ecc)); if (ret == -EOPNOTSUPP) { if (adev->umc.ras_funcs && adev->umc.ras_funcs->query_ras_error_count) @@ -96,8 +96,7 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev, err_data->err_addr_cnt); amdgpu_ras_save_bad_pages(adev); - if (adev->smu.ppt_funcs && adev->smu.ppt_funcs->send_hbm_bad_pages_num) - adev->smu.ppt_funcs->send_hbm_bad_pages_num(&adev->smu, con->eeprom_control.ras_num_recs); + amdgpu_dpm_send_hbm_bad_pages_num(adev, con->eeprom_control.ras_num_recs); } if (reset) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c index deae12dc777d..329a4c89f1e6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c @@ -222,7 +222,7 @@ void kfd_smi_event_update_thermal_throttling(struct kfd_dev *dev, len = snprintf(fifo_in, sizeof(fifo_in), "%x %llx:%llx\n", KFD_SMI_EVENT_THERMAL_THROTTLE, throttle_bitmask, - atomic64_read(&dev->adev->smu.throttle_int_counter)); + amdgpu_dpm_get_thermal_throttling_counter(dev->adev)); add_event_to_kfifo(dev, KFD_SMI_EVENT_THERMAL_THROTTLE, fifo_in, len); } diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index 5c0867ebcfce..2e295facd086 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -26,6 +26,10 @@ extern const struct amdgpu_ip_block_version pp_smu_ip_block; +enum smu_event_type { + SMU_EVENT_RESET_COMPLETE = 0, +}; + struct amd_vce_state { /* vce clocks */ u32 evclk; diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index 08362d506534..73f3d2912f13 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -1614,3 +1614,93 @@ int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_versio return 0; } + +int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable) +{ + return smu_handle_passthrough_sbr(&adev->smu, enable); +} + +int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size) +{ + return smu_send_hbm_bad_pages_num(&adev->smu, size); +} + +int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev, + enum pp_clock_type type, + uint32_t *min, + uint32_t *max) +{ + if (!is_support_sw_smu(adev)) + return -EOPNOTSUPP; + + switch (type) { + case PP_SCLK: + return smu_get_dpm_freq_range(&adev->smu, SMU_SCLK, min, max); + default: + return -EINVAL; + } +} + +int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev, + enum pp_clock_type type, + uint32_t min, + uint32_t max) +{ + if (!is_support_sw_smu(adev)) + return -EOPNOTSUPP; + + switch (type) { + case PP_SCLK: + return smu_set_soft_freq_range(&adev->smu, SMU_SCLK, min, max); + default: + return -EINVAL; + } +} + +int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev, + enum smu_event_type event, + uint64_t event_arg) +{ + if (!is_support_sw_smu(adev)) + return -EOPNOTSUPP; + + return smu_wait_for_event(&adev->smu, event, event_arg); +} + +int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value) +{ + if (!is_support_sw_smu(adev)) + return -EOPNOTSUPP; + + return smu_get_status_gfxoff(&adev->smu, value); +} + +uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev) +{ + return atomic64_read(&adev->smu.throttle_int_counter); +} + +/* amdgpu_dpm_gfx_state_change - Handle gfx power state change set + * @adev: amdgpu_device pointer + * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry) + * + */ +void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev, + enum gfx_change_state state) +{ + mutex_lock(&adev->pm.mutex); + if (adev->powerplay.pp_funcs && + adev->powerplay.pp_funcs->gfx_state_change_set) + ((adev)->powerplay.pp_funcs->gfx_state_change_set( + (adev)->powerplay.pp_handle, state)); + mutex_unlock(&adev->pm.mutex); +} + +int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev, + void *umc_ecc) +{ + if (!is_support_sw_smu(adev)) + return -EOPNOTSUPP; + + return smu_get_ecc_info(&adev->smu, umc_ecc); +} diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h index c464a045000d..b444937c3b68 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h @@ -23,6 +23,12 @@ #ifndef __AMDGPU_DPM_H__ #define __AMDGPU_DPM_H__ +/* Argument for PPSMC_MSG_GpuChangeState */ +enum gfx_change_state { + sGpuChangeState_D0Entry = 1, + sGpuChangeState_D3Entry, +}; + enum amdgpu_int_thermal_type { THERMAL_TYPE_NONE, THERMAL_TYPE_EXTERNAL, @@ -582,5 +588,22 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable); void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable); void amdgpu_pm_print_power_states(struct amdgpu_device *adev); int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version); - +int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable); +int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size); +int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev, + enum pp_clock_type type, + uint32_t *min, + uint32_t *max); +int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev, + enum pp_clock_type type, + uint32_t min, + uint32_t max); +int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev, enum smu_event_type event, + uint64_t event_arg); +int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value); +uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev); +void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev, + enum gfx_change_state state); +int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev, + void *umc_ecc); #endif diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h index ba7565bc8104..b90ed0ec9322 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h @@ -241,11 +241,6 @@ struct smu_user_dpm_profile { uint32_t clk_dependency; }; -enum smu_event_type { - - SMU_EVENT_RESET_COMPLETE = 0, -}; - #define SMU_TABLE_INIT(tables, table_id, s, a, d) \ do { \ tables[table_id].size = s; \ @@ -1413,15 +1408,15 @@ int smu_set_ac_dc(struct smu_context *smu); int smu_allow_xgmi_power_down(struct smu_context *smu, bool en); -int smu_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value); +int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value); int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable); -int smu_wait_for_event(struct amdgpu_device *adev, enum smu_event_type event, +int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event, uint64_t event_arg); int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc); int smu_stb_collect_info(struct smu_context *smu, void *buff, uint32_t size); void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev); - +int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size); #endif #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index d93d28c1af95..9d5a5a1f15df 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -100,17 +100,14 @@ static int smu_sys_set_pp_feature_mask(void *handle, return ret; } -int smu_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value) +int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value) { - int ret = 0; - struct smu_context *smu = &adev->smu; + if (!smu->ppt_funcs->get_gfx_off_status) + return -EINVAL; - if (is_support_sw_smu(adev) && smu->ppt_funcs->get_gfx_off_status) - *value = smu_get_gfx_off_status(smu); - else - ret = -EINVAL; + *value = smu_get_gfx_off_status(smu); - return ret; + return 0; } int smu_set_soft_freq_range(struct smu_context *smu, @@ -3165,11 +3162,10 @@ static const struct amd_pm_funcs swsmu_pm_funcs = { .get_smu_prv_buf_details = smu_get_prv_buffer_details, }; -int smu_wait_for_event(struct amdgpu_device *adev, enum smu_event_type event, +int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event, uint64_t event_arg) { int ret = -EINVAL; - struct smu_context *smu = &adev->smu; if (smu->ppt_funcs->wait_for_event) { mutex_lock(&smu->mutex); @@ -3283,3 +3279,13 @@ void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev) #endif } + +int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size) +{ + int ret = 0; + + if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_pages_num) + ret = smu->ppt_funcs->send_hbm_bad_pages_num(smu, size); + + return ret; +} -- cgit From 79c65f3fcbb1288b84473d45e6d001820a971d54 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 22 Nov 2021 11:05:22 +0800 Subject: drm/amd/pm: do not expose power implementation details to amdgpu_pm.c amdgpu_pm.c holds all the user sysfs/hwmon interfaces. It's another client of our power APIs. It's not proper to spike into power implementation details there. Signed-off-by: Evan Quan Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/amdgpu_dpm.c | 461 +++++++++++++++++++++++++- drivers/gpu/drm/amd/pm/amdgpu_pm.c | 519 +++++++++++------------------- drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h | 160 ++++----- drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 3 - 4 files changed, 712 insertions(+), 431 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index 73f3d2912f13..494cf1459d0f 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -1453,7 +1453,9 @@ static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) if (equal) return; - amdgpu_dpm_set_power_state(adev); + if (adev->powerplay.pp_funcs->set_power_state) + adev->powerplay.pp_funcs->set_power_state(adev->powerplay.pp_handle); + amdgpu_dpm_post_set_power_state(adev); adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; @@ -1704,3 +1706,460 @@ int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev, return smu_get_ecc_info(&adev->smu, umc_ecc); } + +struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev, + uint32_t idx) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs->get_vce_clock_state) + return NULL; + + return pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle, + idx); +} + +void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev, + enum amd_pm_state_type *state) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs->get_current_power_state) { + *state = adev->pm.dpm.user_state; + return; + } + + *state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle); + if (*state < POWER_STATE_TYPE_DEFAULT || + *state > POWER_STATE_TYPE_INTERNAL_3DPERF) + *state = adev->pm.dpm.user_state; +} + +void amdgpu_dpm_set_power_state(struct amdgpu_device *adev, + enum amd_pm_state_type state) +{ + adev->pm.dpm.user_state = state; + + if (is_support_sw_smu(adev)) + return; + + if (amdgpu_dpm_dispatch_task(adev, + AMD_PP_TASK_ENABLE_USER_STATE, + &state) == -EOPNOTSUPP) + amdgpu_pm_compute_clocks(adev); +} + +enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + enum amd_dpm_forced_level level; + + if (pp_funcs->get_performance_level) + level = pp_funcs->get_performance_level(adev->powerplay.pp_handle); + else + level = adev->pm.dpm.forced_level; + + return level; +} + +int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev, + enum amd_dpm_forced_level level) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (pp_funcs->force_performance_level) { + if (adev->pm.dpm.thermal_active) + return -EINVAL; + + if (pp_funcs->force_performance_level(adev->powerplay.pp_handle, + level)) + return -EINVAL; + + adev->pm.dpm.forced_level = level; + } + + return 0; +} + +int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev, + struct pp_states_info *states) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs->get_pp_num_states) + return -EOPNOTSUPP; + + return pp_funcs->get_pp_num_states(adev->powerplay.pp_handle, states); +} + +int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev, + enum amd_pp_task task_id, + enum amd_pm_state_type *user_state) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs->dispatch_tasks) + return -EOPNOTSUPP; + + return pp_funcs->dispatch_tasks(adev->powerplay.pp_handle, task_id, user_state); +} + +int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs->get_pp_table) + return 0; + + return pp_funcs->get_pp_table(adev->powerplay.pp_handle, table); +} + +int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev, + uint32_t type, + long *input, + uint32_t size) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs->set_fine_grain_clk_vol) + return 0; + + return pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle, + type, + input, + size); +} + +int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev, + uint32_t type, + long *input, + uint32_t size) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs->odn_edit_dpm_table) + return 0; + + return pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle, + type, + input, + size); +} + +int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev, + enum pp_clock_type type, + char *buf) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs->print_clock_levels) + return 0; + + return pp_funcs->print_clock_levels(adev->powerplay.pp_handle, + type, + buf); +} + +int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev, + uint64_t ppfeature_masks) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs->set_ppfeature_status) + return 0; + + return pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle, + ppfeature_masks); +} + +int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs->get_ppfeature_status) + return 0; + + return pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle, + buf); +} + +int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev, + enum pp_clock_type type, + uint32_t mask) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs->force_clock_level) + return 0; + + return pp_funcs->force_clock_level(adev->powerplay.pp_handle, + type, + mask); +} + +int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs->get_sclk_od) + return 0; + + return pp_funcs->get_sclk_od(adev->powerplay.pp_handle); +} + +int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (is_support_sw_smu(adev)) + return 0; + + if (pp_funcs->set_sclk_od) + pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value); + + if (amdgpu_dpm_dispatch_task(adev, + AMD_PP_TASK_READJUST_POWER_STATE, + NULL) == -EOPNOTSUPP) { + adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; + amdgpu_pm_compute_clocks(adev); + } + + return 0; +} + +int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs->get_mclk_od) + return 0; + + return pp_funcs->get_mclk_od(adev->powerplay.pp_handle); +} + +int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (is_support_sw_smu(adev)) + return 0; + + if (pp_funcs->set_mclk_od) + pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value); + + if (amdgpu_dpm_dispatch_task(adev, + AMD_PP_TASK_READJUST_POWER_STATE, + NULL) == -EOPNOTSUPP) { + adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; + amdgpu_pm_compute_clocks(adev); + } + + return 0; +} + +int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev, + char *buf) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs->get_power_profile_mode) + return -EOPNOTSUPP; + + return pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle, + buf); +} + +int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev, + long *input, uint32_t size) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs->set_power_profile_mode) + return 0; + + return pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle, + input, + size); +} + +int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs->get_gpu_metrics) + return 0; + + return pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle, table); +} + +int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev, + uint32_t *fan_mode) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs->get_fan_control_mode) + return -EOPNOTSUPP; + + *fan_mode = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle); + + return 0; +} + +int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev, + uint32_t speed) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs->set_fan_speed_pwm) + return -EINVAL; + + return pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle, speed); +} + +int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev, + uint32_t *speed) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs->get_fan_speed_pwm) + return -EINVAL; + + return pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle, speed); +} + +int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev, + uint32_t *speed) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs->get_fan_speed_rpm) + return -EINVAL; + + return pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle, speed); +} + +int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev, + uint32_t speed) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs->set_fan_speed_rpm) + return -EINVAL; + + return pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle, speed); +} + +int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev, + uint32_t mode) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs->set_fan_control_mode) + return -EOPNOTSUPP; + + pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle, mode); + + return 0; +} + +int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev, + uint32_t *limit, + enum pp_power_limit_level pp_limit_level, + enum pp_power_type power_type) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs->get_power_limit) + return -ENODATA; + + return pp_funcs->get_power_limit(adev->powerplay.pp_handle, + limit, + pp_limit_level, + power_type); +} + +int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev, + uint32_t limit) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs->set_power_limit) + return -EINVAL; + + return pp_funcs->set_power_limit(adev->powerplay.pp_handle, limit); +} + +int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev) +{ + if (!is_support_sw_smu(adev)) + return false; + + return is_support_cclk_dpm(adev); +} + +int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, + struct seq_file *m) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs->debugfs_print_current_performance_level) + return -EOPNOTSUPP; + + pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle, + m); + + return 0; +} + +int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev, + void **addr, + size_t *size) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs->get_smu_prv_buf_details) + return -ENOSYS; + + return pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle, + addr, + size); +} + +int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev) +{ + struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; + + if ((is_support_sw_smu(adev) && adev->smu.od_enabled) || + (is_support_sw_smu(adev) && adev->smu.is_apu) || + (!is_support_sw_smu(adev) && hwmgr->od_enabled)) + return true; + + return false; +} + +int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev, + const char *buf, + size_t size) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs->set_pp_table) + return -EOPNOTSUPP; + + return pp_funcs->set_pp_table(adev->powerplay.pp_handle, + buf, + size); +} + +int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev) +{ + return adev->smu.cpu_core_num; +} + +void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev) +{ + if (!is_support_sw_smu(adev)) + return; + + amdgpu_smu_stb_debug_fs_init(adev); +} diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index e2cae97f4ff1..b16bb67b93f1 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -34,7 +34,6 @@ #include #include #include -#include "hwmgr.h" static const struct cg_flag_name clocks[] = { {AMD_CG_SUPPORT_GFX_FGCG, "Graphics Fine Grain Clock Gating"}, @@ -132,7 +131,6 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; enum amd_pm_state_type pm; int ret; @@ -147,11 +145,7 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev, return ret; } - if (pp_funcs->get_current_power_state) { - pm = amdgpu_dpm_get_current_power_state(adev); - } else { - pm = adev->pm.dpm.user_state; - } + amdgpu_dpm_get_current_power_state(adev, &pm); pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); @@ -191,19 +185,8 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev, return ret; } - if (is_support_sw_smu(adev)) { - mutex_lock(&adev->pm.mutex); - adev->pm.dpm.user_state = state; - mutex_unlock(&adev->pm.mutex); - } else if (adev->powerplay.pp_funcs->dispatch_tasks) { - amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state); - } else { - mutex_lock(&adev->pm.mutex); - adev->pm.dpm.user_state = state; - mutex_unlock(&adev->pm.mutex); + amdgpu_dpm_set_power_state(adev, state); - amdgpu_pm_compute_clocks(adev); - } pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); @@ -290,10 +273,7 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev, return ret; } - if (adev->powerplay.pp_funcs->get_performance_level) - level = amdgpu_dpm_get_performance_level(adev); - else - level = adev->pm.dpm.forced_level; + level = amdgpu_dpm_get_performance_level(adev); pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); @@ -318,7 +298,6 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; enum amd_dpm_forced_level level; enum amd_dpm_forced_level current_level; int ret = 0; @@ -358,11 +337,7 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, return ret; } - if (pp_funcs->get_performance_level) - current_level = amdgpu_dpm_get_performance_level(adev); - else - current_level = adev->pm.dpm.forced_level; - + current_level = amdgpu_dpm_get_performance_level(adev); if (current_level == level) { pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); @@ -390,25 +365,12 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, return -EINVAL; } - if (pp_funcs->force_performance_level) { - mutex_lock(&adev->pm.mutex); - if (adev->pm.dpm.thermal_active) { - mutex_unlock(&adev->pm.mutex); - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); - return -EINVAL; - } - ret = amdgpu_dpm_force_performance_level(adev, level); - if (ret) { - mutex_unlock(&adev->pm.mutex); - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); - return -EINVAL; - } else { - adev->pm.dpm.forced_level = level; - } - mutex_unlock(&adev->pm.mutex); + if (amdgpu_dpm_force_performance_level(adev, level)) { + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); + return -EINVAL; } + pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); @@ -421,7 +383,6 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; struct pp_states_info data; uint32_t i; int buf_len, ret; @@ -437,11 +398,8 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev, return ret; } - if (pp_funcs->get_pp_num_states) { - amdgpu_dpm_get_pp_num_states(adev, &data); - } else { + if (amdgpu_dpm_get_pp_num_states(adev, &data)) memset(&data, 0, sizeof(data)); - } pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); @@ -463,7 +421,6 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev, { struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; struct pp_states_info data = {0}; enum amd_pm_state_type pm = 0; int i = 0, ret = 0; @@ -479,15 +436,16 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev, return ret; } - if (pp_funcs->get_current_power_state - && pp_funcs->get_pp_num_states) { - pm = amdgpu_dpm_get_current_power_state(adev); - amdgpu_dpm_get_pp_num_states(adev, &data); - } + amdgpu_dpm_get_current_power_state(adev, &pm); + + ret = amdgpu_dpm_get_pp_num_states(adev, &data); pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); + if (ret) + return ret; + for (i = 0; i < data.nums; i++) { if (pm == data.states[i]) break; @@ -525,6 +483,7 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); enum amd_pm_state_type state = 0; + struct pp_states_info data; unsigned long idx; int ret; @@ -533,41 +492,49 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - if (strlen(buf) == 1) - adev->pp_force_state_enabled = false; - else if (is_support_sw_smu(adev)) - adev->pp_force_state_enabled = false; - else if (adev->powerplay.pp_funcs->dispatch_tasks && - adev->powerplay.pp_funcs->get_pp_num_states) { - struct pp_states_info data; - - ret = kstrtoul(buf, 0, &idx); - if (ret || idx >= ARRAY_SIZE(data.states)) - return -EINVAL; + adev->pp_force_state_enabled = false; - idx = array_index_nospec(idx, ARRAY_SIZE(data.states)); + if (strlen(buf) == 1) + return count; - amdgpu_dpm_get_pp_num_states(adev, &data); - state = data.states[idx]; + ret = kstrtoul(buf, 0, &idx); + if (ret || idx >= ARRAY_SIZE(data.states)) + return -EINVAL; - ret = pm_runtime_get_sync(ddev->dev); - if (ret < 0) { - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } + idx = array_index_nospec(idx, ARRAY_SIZE(data.states)); - /* only set user selected power states */ - if (state != POWER_STATE_TYPE_INTERNAL_BOOT && - state != POWER_STATE_TYPE_DEFAULT) { - amdgpu_dpm_dispatch_task(adev, - AMD_PP_TASK_ENABLE_USER_STATE, &state); - adev->pp_force_state_enabled = true; - } - pm_runtime_mark_last_busy(ddev->dev); + ret = pm_runtime_get_sync(ddev->dev); + if (ret < 0) { pm_runtime_put_autosuspend(ddev->dev); + return ret; + } + + ret = amdgpu_dpm_get_pp_num_states(adev, &data); + if (ret) + goto err_out; + + state = data.states[idx]; + + /* only set user selected power states */ + if (state != POWER_STATE_TYPE_INTERNAL_BOOT && + state != POWER_STATE_TYPE_DEFAULT) { + ret = amdgpu_dpm_dispatch_task(adev, + AMD_PP_TASK_ENABLE_USER_STATE, &state); + if (ret) + goto err_out; + + adev->pp_force_state_enabled = true; } + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); + return count; + +err_out: + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); + return ret; } /** @@ -601,17 +568,13 @@ static ssize_t amdgpu_get_pp_table(struct device *dev, return ret; } - if (adev->powerplay.pp_funcs->get_pp_table) { - size = amdgpu_dpm_get_pp_table(adev, &table); - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); - if (size < 0) - return size; - } else { - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); - return 0; - } + size = amdgpu_dpm_get_pp_table(adev, &table); + + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); + + if (size <= 0) + return size; if (size >= PAGE_SIZE) size = PAGE_SIZE - 1; @@ -642,15 +605,13 @@ static ssize_t amdgpu_set_pp_table(struct device *dev, } ret = amdgpu_dpm_set_pp_table(adev, buf, count); - if (ret) { - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); - return ret; - } pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); + if (ret) + return ret; + return count; } @@ -866,46 +827,32 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, return ret; } - if (adev->powerplay.pp_funcs->set_fine_grain_clk_vol) { - ret = amdgpu_dpm_set_fine_grain_clk_vol(adev, type, - parameter, - parameter_size); - if (ret) { - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); - return -EINVAL; - } - } + if (amdgpu_dpm_set_fine_grain_clk_vol(adev, + type, + parameter, + parameter_size)) + goto err_out; - if (adev->powerplay.pp_funcs->odn_edit_dpm_table) { - ret = amdgpu_dpm_odn_edit_dpm_table(adev, type, - parameter, parameter_size); - if (ret) { - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); - return -EINVAL; - } - } + if (amdgpu_dpm_odn_edit_dpm_table(adev, type, + parameter, parameter_size)) + goto err_out; if (type == PP_OD_COMMIT_DPM_TABLE) { - if (adev->powerplay.pp_funcs->dispatch_tasks) { - amdgpu_dpm_dispatch_task(adev, - AMD_PP_TASK_READJUST_POWER_STATE, - NULL); - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); - return count; - } else { - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); - return -EINVAL; - } + if (amdgpu_dpm_dispatch_task(adev, + AMD_PP_TASK_READJUST_POWER_STATE, + NULL)) + goto err_out; } pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); return count; + +err_out: + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); + return -EINVAL; } static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, @@ -928,8 +875,8 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, return ret; } - if (adev->powerplay.pp_funcs->print_clock_levels) { - size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf); + size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf); + if (size > 0) { size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size); size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size); size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf+size); @@ -985,17 +932,14 @@ static ssize_t amdgpu_set_pp_features(struct device *dev, return ret; } - if (adev->powerplay.pp_funcs->set_ppfeature_status) { - ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask); - if (ret) { - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); - return -EINVAL; - } - } + ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask); + pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); + if (ret) + return -EINVAL; + return count; } @@ -1019,9 +963,8 @@ static ssize_t amdgpu_get_pp_features(struct device *dev, return ret; } - if (adev->powerplay.pp_funcs->get_ppfeature_status) - size = amdgpu_dpm_get_ppfeature_status(adev, buf); - else + size = amdgpu_dpm_get_ppfeature_status(adev, buf); + if (size <= 0) size = sysfs_emit(buf, "\n"); pm_runtime_mark_last_busy(ddev->dev); @@ -1080,9 +1023,8 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev, return ret; } - if (adev->powerplay.pp_funcs->print_clock_levels) - size = amdgpu_dpm_print_clock_levels(adev, type, buf); - else + size = amdgpu_dpm_print_clock_levels(adev, type, buf); + if (size <= 0) size = sysfs_emit(buf, "\n"); pm_runtime_mark_last_busy(ddev->dev); @@ -1151,10 +1093,7 @@ static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev, return ret; } - if (adev->powerplay.pp_funcs->force_clock_level) - ret = amdgpu_dpm_force_clock_level(adev, type, mask); - else - ret = 0; + ret = amdgpu_dpm_force_clock_level(adev, type, mask); pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); @@ -1305,10 +1244,7 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev, return ret; } - if (is_support_sw_smu(adev)) - value = 0; - else if (adev->powerplay.pp_funcs->get_sclk_od) - value = amdgpu_dpm_get_sclk_od(adev); + value = amdgpu_dpm_get_sclk_od(adev); pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); @@ -1342,19 +1278,7 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, return ret; } - if (is_support_sw_smu(adev)) { - value = 0; - } else { - if (adev->powerplay.pp_funcs->set_sclk_od) - amdgpu_dpm_set_sclk_od(adev, (uint32_t)value); - - if (adev->powerplay.pp_funcs->dispatch_tasks) { - amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL); - } else { - adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; - amdgpu_pm_compute_clocks(adev); - } - } + amdgpu_dpm_set_sclk_od(adev, (uint32_t)value); pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); @@ -1382,10 +1306,7 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev, return ret; } - if (is_support_sw_smu(adev)) - value = 0; - else if (adev->powerplay.pp_funcs->get_mclk_od) - value = amdgpu_dpm_get_mclk_od(adev); + value = amdgpu_dpm_get_mclk_od(adev); pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); @@ -1419,19 +1340,7 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, return ret; } - if (is_support_sw_smu(adev)) { - value = 0; - } else { - if (adev->powerplay.pp_funcs->set_mclk_od) - amdgpu_dpm_set_mclk_od(adev, (uint32_t)value); - - if (adev->powerplay.pp_funcs->dispatch_tasks) { - amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL); - } else { - adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; - amdgpu_pm_compute_clocks(adev); - } - } + amdgpu_dpm_set_mclk_od(adev, (uint32_t)value); pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); @@ -1479,9 +1388,8 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev, return ret; } - if (adev->powerplay.pp_funcs->get_power_profile_mode) - size = amdgpu_dpm_get_power_profile_mode(adev, buf); - else + size = amdgpu_dpm_get_power_profile_mode(adev, buf); + if (size <= 0) size = sysfs_emit(buf, "\n"); pm_runtime_mark_last_busy(ddev->dev); @@ -1545,8 +1453,7 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, return ret; } - if (adev->powerplay.pp_funcs->set_power_profile_mode) - ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size); + ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size); pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); @@ -1812,9 +1719,7 @@ static ssize_t amdgpu_get_gpu_metrics(struct device *dev, return ret; } - if (adev->powerplay.pp_funcs->get_gpu_metrics) - size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics); - + size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics); if (size <= 0) goto out; @@ -2053,7 +1958,6 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ { struct device_attribute *dev_attr = &attr->dev_attr; const char *attr_name = dev_attr->attr.name; - struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; enum amd_asic_type asic_type = adev->asic_type; if (!(attr->flags & mask)) { @@ -2076,9 +1980,7 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ *states = ATTR_STATE_UNSUPPORTED; } else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) { *states = ATTR_STATE_UNSUPPORTED; - if ((is_support_sw_smu(adev) && adev->smu.od_enabled) || - (is_support_sw_smu(adev) && adev->smu.is_apu) || - (!is_support_sw_smu(adev) && hwmgr->od_enabled)) + if (amdgpu_dpm_is_overdrive_supported(adev)) *states = ATTR_STATE_SUPPORTED; } else if (DEVICE_ATTR_IS(mem_busy_percent)) { if (adev->flags & AMD_IS_APU || asic_type == CHIP_VEGA10) @@ -2106,8 +2008,7 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_ if (!(asic_type == CHIP_VANGOGH || asic_type == CHIP_SIENNA_CICHLID)) *states = ATTR_STATE_UNSUPPORTED; } else if (DEVICE_ATTR_IS(pp_power_profile_mode)) { - if (!adev->powerplay.pp_funcs->get_power_profile_mode || - amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP) + if (amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP) *states = ATTR_STATE_UNSUPPORTED; } @@ -2396,17 +2297,14 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev, return ret; } - if (!adev->powerplay.pp_funcs->get_fan_control_mode) { - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return -EINVAL; - } - - pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); + ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + if (ret) + return -EINVAL; + return sysfs_emit(buf, "%u\n", pwm_mode); } @@ -2434,17 +2332,14 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, return ret; } - if (!adev->powerplay.pp_funcs->set_fan_control_mode) { - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return -EINVAL; - } - - amdgpu_dpm_set_fan_control_mode(adev, value); + ret = amdgpu_dpm_set_fan_control_mode(adev, value); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + if (ret) + return -EINVAL; + return count; } @@ -2476,32 +2371,29 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; + err = kstrtou32(buf, 10, &value); + if (err) + return err; + err = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (err < 0) { pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return err; } - pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); + err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode); + if (err) + goto out; + if (pwm_mode != AMD_FAN_CTRL_MANUAL) { pr_info("manual fan speed control should be enabled first\n"); - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return -EINVAL; + err = -EINVAL; + goto out; } - err = kstrtou32(buf, 10, &value); - if (err) { - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return err; - } - - if (adev->powerplay.pp_funcs->set_fan_speed_pwm) - err = amdgpu_dpm_set_fan_speed_pwm(adev, value); - else - err = -EINVAL; + err = amdgpu_dpm_set_fan_speed_pwm(adev, value); +out: pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); @@ -2530,10 +2422,7 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev, return err; } - if (adev->powerplay.pp_funcs->get_fan_speed_pwm) - err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed); - else - err = -EINVAL; + err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); @@ -2563,10 +2452,7 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev, return err; } - if (adev->powerplay.pp_funcs->get_fan_speed_rpm) - err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed); - else - err = -EINVAL; + err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); @@ -2660,10 +2546,7 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev, return err; } - if (adev->powerplay.pp_funcs->get_fan_speed_rpm) - err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm); - else - err = -EINVAL; + err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); @@ -2688,32 +2571,28 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; + err = kstrtou32(buf, 10, &value); + if (err) + return err; + err = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (err < 0) { pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return err; } - pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); + err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode); + if (err) + goto out; if (pwm_mode != AMD_FAN_CTRL_MANUAL) { - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return -ENODATA; - } - - err = kstrtou32(buf, 10, &value); - if (err) { - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return err; + err = -ENODATA; + goto out; } - if (adev->powerplay.pp_funcs->set_fan_speed_rpm) - err = amdgpu_dpm_set_fan_speed_rpm(adev, value); - else - err = -EINVAL; + err = amdgpu_dpm_set_fan_speed_rpm(adev, value); +out: pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); @@ -2742,17 +2621,14 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev, return ret; } - if (!adev->powerplay.pp_funcs->get_fan_control_mode) { - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return -EINVAL; - } - - pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); + ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + if (ret) + return -EINVAL; + return sysfs_emit(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1); } @@ -2788,16 +2664,14 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev, return err; } - if (!adev->powerplay.pp_funcs->set_fan_control_mode) { - pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); - pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); - return -EINVAL; - } - amdgpu_dpm_set_fan_control_mode(adev, pwm_mode); + err = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); + if (err) + return -EINVAL; + return count; } @@ -2933,7 +2807,6 @@ static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev, enum pp_power_limit_level pp_limit_level) { struct amdgpu_device *adev = dev_get_drvdata(dev); - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; enum pp_power_type power_type = to_sensor_dev_attr(attr)->index; uint32_t limit; ssize_t size; @@ -2944,16 +2817,13 @@ static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - if ( !(pp_funcs && pp_funcs->get_power_limit)) - return -ENODATA; - r = pm_runtime_get_sync(adev_to_drm(adev)->dev); if (r < 0) { pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; } - r = pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, + r = amdgpu_dpm_get_power_limit(adev, &limit, pp_limit_level, power_type); if (!r) @@ -3008,7 +2878,6 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, size_t count) { struct amdgpu_device *adev = dev_get_drvdata(dev); - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int limit_type = to_sensor_dev_attr(attr)->index; int err; u32 value; @@ -3034,10 +2903,7 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, return err; } - if (pp_funcs && pp_funcs->set_power_limit) - err = pp_funcs->set_power_limit(adev->powerplay.pp_handle, value); - else - err = -EINVAL; + err = amdgpu_dpm_set_power_limit(adev, value); pm_runtime_mark_last_busy(adev_to_drm(adev)->dev); pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); @@ -3310,6 +3176,7 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, struct device *dev = kobj_to_dev(kobj); struct amdgpu_device *adev = dev_get_drvdata(dev); umode_t effective_mode = attr->mode; + uint32_t speed = 0; /* under multi-vf mode, the hwmon attributes are all not supported */ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) @@ -3374,20 +3241,18 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) return 0; - if (!is_support_sw_smu(adev)) { - /* mask fan attributes if we have no bindings for this asic to expose */ - if ((!adev->powerplay.pp_funcs->get_fan_speed_pwm && - attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */ - (!adev->powerplay.pp_funcs->get_fan_control_mode && - attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */ - effective_mode &= ~S_IRUGO; + /* mask fan attributes if we have no bindings for this asic to expose */ + if (((amdgpu_dpm_get_fan_speed_pwm(adev, &speed) == -EINVAL) && + attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */ + ((amdgpu_dpm_get_fan_control_mode(adev, &speed) == -EOPNOTSUPP) && + attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */ + effective_mode &= ~S_IRUGO; - if ((!adev->powerplay.pp_funcs->set_fan_speed_pwm && - attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */ - (!adev->powerplay.pp_funcs->set_fan_control_mode && - attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */ - effective_mode &= ~S_IWUSR; - } + if (((amdgpu_dpm_set_fan_speed_pwm(adev, speed) == -EINVAL) && + attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */ + ((amdgpu_dpm_set_fan_control_mode(adev, speed) == -EOPNOTSUPP) && + attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */ + effective_mode &= ~S_IWUSR; if (((adev->family == AMDGPU_FAMILY_SI) || ((adev->flags & AMD_IS_APU) && @@ -3404,22 +3269,20 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, (attr == &sensor_dev_attr_power1_average.dev_attr.attr)) return 0; - if (!is_support_sw_smu(adev)) { - /* hide max/min values if we can't both query and manage the fan */ - if ((!adev->powerplay.pp_funcs->set_fan_speed_pwm && - !adev->powerplay.pp_funcs->get_fan_speed_pwm) && - (!adev->powerplay.pp_funcs->set_fan_speed_rpm && - !adev->powerplay.pp_funcs->get_fan_speed_rpm) && - (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || - attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) - return 0; + /* hide max/min values if we can't both query and manage the fan */ + if (((amdgpu_dpm_set_fan_speed_pwm(adev, speed) == -EINVAL) && + (amdgpu_dpm_get_fan_speed_pwm(adev, &speed) == -EINVAL) && + (amdgpu_dpm_set_fan_speed_rpm(adev, speed) == -EINVAL) && + (amdgpu_dpm_get_fan_speed_rpm(adev, &speed) == -EINVAL)) && + (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || + attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) + return 0; - if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm && - !adev->powerplay.pp_funcs->get_fan_speed_rpm) && - (attr == &sensor_dev_attr_fan1_max.dev_attr.attr || - attr == &sensor_dev_attr_fan1_min.dev_attr.attr)) - return 0; - } + if ((amdgpu_dpm_set_fan_speed_rpm(adev, speed) == -EINVAL) && + (amdgpu_dpm_get_fan_speed_rpm(adev, &speed) == -EINVAL) && + (attr == &sensor_dev_attr_fan1_max.dev_attr.attr || + attr == &sensor_dev_attr_fan1_min.dev_attr.attr)) + return 0; if ((adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */ adev->family == AMDGPU_FAMILY_KV) && /* not implemented yet */ @@ -3549,14 +3412,15 @@ static void amdgpu_debugfs_prints_cpu_info(struct seq_file *m, uint16_t *p_val; uint32_t size; int i; + uint32_t num_cpu_cores = amdgpu_dpm_get_num_cpu_cores(adev); - if (is_support_cclk_dpm(adev)) { - p_val = kcalloc(adev->smu.cpu_core_num, sizeof(uint16_t), + if (amdgpu_dpm_is_cclk_dpm_supported(adev)) { + p_val = kcalloc(num_cpu_cores, sizeof(uint16_t), GFP_KERNEL); if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_CPU_CLK, (void *)p_val, &size)) { - for (i = 0; i < adev->smu.cpu_core_num; i++) + for (i = 0; i < num_cpu_cores; i++) seq_printf(m, "\t%u MHz (CPU%d)\n", *(p_val + i), i); } @@ -3684,27 +3548,11 @@ static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused) return r; } - if (!adev->pm.dpm_enabled) { - seq_printf(m, "dpm not enabled\n"); - pm_runtime_mark_last_busy(dev->dev); - pm_runtime_put_autosuspend(dev->dev); - return 0; - } - - if (!is_support_sw_smu(adev) && - adev->powerplay.pp_funcs->debugfs_print_current_performance_level) { - mutex_lock(&adev->pm.mutex); - if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level) - adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m); - else - seq_printf(m, "Debugfs support not implemented for this asic\n"); - mutex_unlock(&adev->pm.mutex); - r = 0; - } else { + if (amdgpu_dpm_debugfs_print_current_performance_level(adev, m)) { r = amdgpu_debugfs_pm_info_pp(m, adev); + if (r) + goto out; } - if (r) - goto out; amdgpu_device_ip_get_clockgating_state(adev, &flags); @@ -3730,21 +3578,18 @@ static ssize_t amdgpu_pm_prv_buffer_read(struct file *f, char __user *buf, size_t size, loff_t *pos) { struct amdgpu_device *adev = file_inode(f)->i_private; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - void *pp_handle = adev->powerplay.pp_handle; size_t smu_prv_buf_size; void *smu_prv_buf; + int ret = 0; if (amdgpu_in_reset(adev)) return -EPERM; if (adev->in_suspend && !adev->in_runpm) return -EPERM; - if (pp_funcs && pp_funcs->get_smu_prv_buf_details) - pp_funcs->get_smu_prv_buf_details(pp_handle, &smu_prv_buf, - &smu_prv_buf_size); - else - return -ENOSYS; + ret = amdgpu_dpm_get_smu_prv_buf_details(adev, &smu_prv_buf, &smu_prv_buf_size); + if (ret) + return ret; if (!smu_prv_buf || !smu_prv_buf_size) return -EINVAL; @@ -3777,6 +3622,6 @@ void amdgpu_debugfs_pm_init(struct amdgpu_device *adev) &amdgpu_debugfs_pm_prv_buffer_fops, adev->pm.smu_prv_buffer_size); - amdgpu_smu_stb_debug_fs_init(adev); + amdgpu_dpm_stb_debug_fs_init(adev); #endif } diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h index b444937c3b68..ef0df75b4040 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h @@ -262,9 +262,6 @@ enum amdgpu_pcie_gen { #define amdgpu_dpm_pre_set_power_state(adev) \ ((adev)->powerplay.pp_funcs->pre_set_power_state((adev)->powerplay.pp_handle)) -#define amdgpu_dpm_set_power_state(adev) \ - ((adev)->powerplay.pp_funcs->set_power_state((adev)->powerplay.pp_handle)) - #define amdgpu_dpm_post_set_power_state(adev) \ ((adev)->powerplay.pp_funcs->post_set_power_state((adev)->powerplay.pp_handle)) @@ -280,100 +277,13 @@ enum amdgpu_pcie_gen { #define amdgpu_dpm_enable_bapm(adev, e) \ ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e))) -#define amdgpu_dpm_set_fan_control_mode(adev, m) \ - ((adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m))) - -#define amdgpu_dpm_get_fan_control_mode(adev) \ - ((adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle)) - -#define amdgpu_dpm_set_fan_speed_pwm(adev, s) \ - ((adev)->powerplay.pp_funcs->set_fan_speed_pwm((adev)->powerplay.pp_handle, (s))) - -#define amdgpu_dpm_get_fan_speed_pwm(adev, s) \ - ((adev)->powerplay.pp_funcs->get_fan_speed_pwm((adev)->powerplay.pp_handle, (s))) - -#define amdgpu_dpm_get_fan_speed_rpm(adev, s) \ - ((adev)->powerplay.pp_funcs->get_fan_speed_rpm)((adev)->powerplay.pp_handle, (s)) - -#define amdgpu_dpm_set_fan_speed_rpm(adev, s) \ - ((adev)->powerplay.pp_funcs->set_fan_speed_rpm)((adev)->powerplay.pp_handle, (s)) - -#define amdgpu_dpm_force_performance_level(adev, l) \ - ((adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l))) - -#define amdgpu_dpm_get_current_power_state(adev) \ - ((adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle)) - -#define amdgpu_dpm_get_pp_num_states(adev, data) \ - ((adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data)) - -#define amdgpu_dpm_get_pp_table(adev, table) \ - ((adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table)) - -#define amdgpu_dpm_set_pp_table(adev, buf, size) \ - ((adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size)) - -#define amdgpu_dpm_print_clock_levels(adev, type, buf) \ - ((adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf)) - -#define amdgpu_dpm_force_clock_level(adev, type, level) \ - ((adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level)) - -#define amdgpu_dpm_get_sclk_od(adev) \ - ((adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle)) - -#define amdgpu_dpm_set_sclk_od(adev, value) \ - ((adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value)) - -#define amdgpu_dpm_get_mclk_od(adev) \ - ((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle)) - -#define amdgpu_dpm_set_mclk_od(adev, value) \ - ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value)) - -#define amdgpu_dpm_dispatch_task(adev, task_id, user_state) \ - ((adev)->powerplay.pp_funcs->dispatch_tasks)((adev)->powerplay.pp_handle, (task_id), (user_state)) - #define amdgpu_dpm_check_state_equal(adev, cps, rps, equal) \ ((adev)->powerplay.pp_funcs->check_state_equal((adev)->powerplay.pp_handle, (cps), (rps), (equal))) -#define amdgpu_dpm_get_vce_clock_state(adev, i) \ - ((adev)->powerplay.pp_funcs->get_vce_clock_state((adev)->powerplay.pp_handle, (i))) - -#define amdgpu_dpm_get_performance_level(adev) \ - ((adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle)) - #define amdgpu_dpm_reset_power_profile_state(adev, request) \ ((adev)->powerplay.pp_funcs->reset_power_profile_state(\ (adev)->powerplay.pp_handle, request)) -#define amdgpu_dpm_get_power_profile_mode(adev, buf) \ - ((adev)->powerplay.pp_funcs->get_power_profile_mode(\ - (adev)->powerplay.pp_handle, buf)) - -#define amdgpu_dpm_set_power_profile_mode(adev, parameter, size) \ - ((adev)->powerplay.pp_funcs->set_power_profile_mode(\ - (adev)->powerplay.pp_handle, parameter, size)) - -#define amdgpu_dpm_set_fine_grain_clk_vol(adev, type, parameter, size) \ - ((adev)->powerplay.pp_funcs->set_fine_grain_clk_vol(\ - (adev)->powerplay.pp_handle, type, parameter, size)) - -#define amdgpu_dpm_odn_edit_dpm_table(adev, type, parameter, size) \ - ((adev)->powerplay.pp_funcs->odn_edit_dpm_table(\ - (adev)->powerplay.pp_handle, type, parameter, size)) - -#define amdgpu_dpm_get_ppfeature_status(adev, buf) \ - ((adev)->powerplay.pp_funcs->get_ppfeature_status(\ - (adev)->powerplay.pp_handle, (buf))) - -#define amdgpu_dpm_set_ppfeature_status(adev, ppfeatures) \ - ((adev)->powerplay.pp_funcs->set_ppfeature_status(\ - (adev)->powerplay.pp_handle, (ppfeatures))) - -#define amdgpu_dpm_get_gpu_metrics(adev, table) \ - ((adev)->powerplay.pp_funcs->get_gpu_metrics((adev)->powerplay.pp_handle, table)) - struct amdgpu_dpm { struct amdgpu_ps *ps; /* number of valid power states */ @@ -606,4 +516,74 @@ void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev, enum gfx_change_state state); int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev, void *umc_ecc); +struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev, + uint32_t idx); +void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev, enum amd_pm_state_type *state); +void amdgpu_dpm_set_power_state(struct amdgpu_device *adev, + enum amd_pm_state_type state); +enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev); +int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev, + enum amd_dpm_forced_level level); +int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev, + struct pp_states_info *states); +int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev, + enum amd_pp_task task_id, + enum amd_pm_state_type *user_state); +int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table); +int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev, + uint32_t type, + long *input, + uint32_t size); +int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev, + uint32_t type, + long *input, + uint32_t size); +int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev, + enum pp_clock_type type, + char *buf); +int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev, + uint64_t ppfeature_masks); +int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf); +int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev, + enum pp_clock_type type, + uint32_t mask); +int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev); +int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value); +int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev); +int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value); +int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev, + char *buf); +int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev, + long *input, uint32_t size); +int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table); +int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev, + uint32_t *fan_mode); +int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev, + uint32_t speed); +int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev, + uint32_t *speed); +int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev, + uint32_t *speed); +int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev, + uint32_t speed); +int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev, + uint32_t mode); +int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev, + uint32_t *limit, + enum pp_power_limit_level pp_limit_level, + enum pp_power_type power_type); +int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev, + uint32_t limit); +int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev); +int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, + struct seq_file *m); +int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev, + void **addr, + size_t *size); +int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev); +int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev, + const char *buf, + size_t size); +int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev); +void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 9d5a5a1f15df..7191b3928e8e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -470,9 +470,6 @@ bool is_support_cclk_dpm(struct amdgpu_device *adev) { struct smu_context *smu = &adev->smu; - if (!is_support_sw_smu(adev)) - return false; - if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT)) return false; -- cgit From 13f5dbd6e3d9a4ea0a8d061bb2a2f19bb21cdacc Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 22 Nov 2021 11:09:08 +0800 Subject: drm/amd/pm: do not expose power implementation details to display Display is another client of our power APIs. It's not proper to spike into power implementation details there. Signed-off-by: Evan Quan Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 6 +- .../drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c | 246 ++++++++------------- drivers/gpu/drm/amd/pm/amdgpu_dpm.c | 218 ++++++++++++++++++ drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h | 38 ++++ 4 files changed, 344 insertions(+), 164 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index afadec2d1993..6692c8f75f23 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -2179,12 +2179,8 @@ static void s3_handle_mst(struct drm_device *dev, bool suspend) static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) { - struct smu_context *smu = &adev->smu; int ret = 0; - if (!is_support_sw_smu(adev)) - return 0; - /* This interface is for dGPU Navi1x.Linux dc-pplib interface depends * on window driver dc implementation. * For Navi1x, clock settings of dcn watermarks are fixed. the settings @@ -2223,7 +2219,7 @@ static int amdgpu_dm_smu_write_watermarks_table(struct amdgpu_device *adev) return 0; } - ret = smu_write_watermarks_table(smu); + ret = amdgpu_dpm_write_watermarks_table(adev); if (ret) { DRM_ERROR("Failed to update WMTABLE!\n"); return ret; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c index eba270121698..46550811da00 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c @@ -99,10 +99,7 @@ bool dm_pp_apply_display_requirements( adev->pm.pm_display_cfg.displays[i].controller_id = dc_cfg->pipe_idx + 1; } - if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->display_configuration_change) - adev->powerplay.pp_funcs->display_configuration_change( - adev->powerplay.pp_handle, - &adev->pm.pm_display_cfg); + amdgpu_dpm_display_configuration_change(adev, &adev->pm.pm_display_cfg); amdgpu_pm_compute_clocks(adev); } @@ -298,31 +295,25 @@ bool dm_pp_get_clock_levels_by_type( struct dm_pp_clock_levels *dc_clks) { struct amdgpu_device *adev = ctx->driver_context; - void *pp_handle = adev->powerplay.pp_handle; struct amd_pp_clocks pp_clks = { 0 }; struct amd_pp_simple_clock_info validation_clks = { 0 }; uint32_t i; - if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_clock_by_type) { - if (adev->powerplay.pp_funcs->get_clock_by_type(pp_handle, - dc_to_pp_clock_type(clk_type), &pp_clks)) { - /* Error in pplib. Provide default values. */ - get_default_clock_levels(clk_type, dc_clks); - return true; - } + if (amdgpu_dpm_get_clock_by_type(adev, + dc_to_pp_clock_type(clk_type), &pp_clks)) { + /* Error in pplib. Provide default values. */ + get_default_clock_levels(clk_type, dc_clks); + return true; } pp_to_dc_clock_levels(&pp_clks, dc_clks, clk_type); - if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_display_mode_validation_clocks) { - if (adev->powerplay.pp_funcs->get_display_mode_validation_clocks( - pp_handle, &validation_clks)) { - /* Error in pplib. Provide default values. */ - DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n"); - validation_clks.engine_max_clock = 72000; - validation_clks.memory_max_clock = 80000; - validation_clks.level = 0; - } + if (amdgpu_dpm_get_display_mode_validation_clks(adev, &validation_clks)) { + /* Error in pplib. Provide default values. */ + DRM_INFO("DM_PPLIB: Warning: using default validation clocks!\n"); + validation_clks.engine_max_clock = 72000; + validation_clks.memory_max_clock = 80000; + validation_clks.level = 0; } DRM_INFO("DM_PPLIB: Validation clocks:\n"); @@ -370,18 +361,14 @@ bool dm_pp_get_clock_levels_by_type_with_latency( struct dm_pp_clock_levels_with_latency *clk_level_info) { struct amdgpu_device *adev = ctx->driver_context; - void *pp_handle = adev->powerplay.pp_handle; struct pp_clock_levels_with_latency pp_clks = { 0 }; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int ret; - if (pp_funcs && pp_funcs->get_clock_by_type_with_latency) { - ret = pp_funcs->get_clock_by_type_with_latency(pp_handle, - dc_to_pp_clock_type(clk_type), - &pp_clks); - if (ret) - return false; - } + ret = amdgpu_dpm_get_clock_by_type_with_latency(adev, + dc_to_pp_clock_type(clk_type), + &pp_clks); + if (ret) + return false; pp_to_dc_clock_levels_with_latency(&pp_clks, clk_level_info, clk_type); @@ -394,18 +381,14 @@ bool dm_pp_get_clock_levels_by_type_with_voltage( struct dm_pp_clock_levels_with_voltage *clk_level_info) { struct amdgpu_device *adev = ctx->driver_context; - void *pp_handle = adev->powerplay.pp_handle; struct pp_clock_levels_with_voltage pp_clk_info = {0}; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int ret; - if (pp_funcs && pp_funcs->get_clock_by_type_with_voltage) { - ret = pp_funcs->get_clock_by_type_with_voltage(pp_handle, - dc_to_pp_clock_type(clk_type), - &pp_clk_info); - if (ret) - return false; - } + ret = amdgpu_dpm_get_clock_by_type_with_voltage(adev, + dc_to_pp_clock_type(clk_type), + &pp_clk_info); + if (ret) + return false; pp_to_dc_clock_levels_with_voltage(&pp_clk_info, clk_level_info, clk_type); @@ -417,19 +400,16 @@ bool dm_pp_notify_wm_clock_changes( struct dm_pp_wm_sets_with_clock_ranges *wm_with_clock_ranges) { struct amdgpu_device *adev = ctx->driver_context; - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; /* * Limit this watermark setting for Polaris for now * TODO: expand this to other ASICs */ - if ((adev->asic_type >= CHIP_POLARIS10) && (adev->asic_type <= CHIP_VEGAM) - && pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges) { - if (!pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, + if ((adev->asic_type >= CHIP_POLARIS10) && + (adev->asic_type <= CHIP_VEGAM) && + !amdgpu_dpm_set_watermarks_for_clocks_ranges(adev, (void *)wm_with_clock_ranges)) return true; - } return false; } @@ -456,12 +436,10 @@ bool dm_pp_apply_clock_for_voltage_request( if (!pp_clock_request.clock_type) return false; - if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->display_clock_voltage_request) - ret = adev->powerplay.pp_funcs->display_clock_voltage_request( - adev->powerplay.pp_handle, - &pp_clock_request); - if (ret) + ret = amdgpu_dpm_display_clock_voltage_request(adev, &pp_clock_request); + if (ret && (ret != -EOPNOTSUPP)) return false; + return true; } @@ -471,15 +449,8 @@ bool dm_pp_get_static_clocks( { struct amdgpu_device *adev = ctx->driver_context; struct amd_pp_clock_info pp_clk_info = {0}; - int ret = 0; - if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_current_clocks) - ret = adev->powerplay.pp_funcs->get_current_clocks( - adev->powerplay.pp_handle, - &pp_clk_info); - else - return false; - if (ret) + if (amdgpu_dpm_get_current_clocks(adev, &pp_clk_info)) return false; static_clk_info->max_clocks_state = pp_to_dc_powerlevel_state(pp_clk_info.max_clocks_state); @@ -494,8 +465,6 @@ static void pp_rv_set_wm_ranges(struct pp_smu *pp, { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; struct dm_pp_wm_sets_with_clock_ranges_soc15 wm_with_clock_ranges; struct dm_pp_clock_range_for_dmif_wm_set_soc15 *wm_dce_clocks = wm_with_clock_ranges.wm_dmif_clocks_ranges; struct dm_pp_clock_range_for_mcif_wm_set_soc15 *wm_soc_clocks = wm_with_clock_ranges.wm_mcif_clocks_ranges; @@ -536,72 +505,48 @@ static void pp_rv_set_wm_ranges(struct pp_smu *pp, ranges->writer_wm_sets[i].min_drain_clk_mhz * 1000; } - if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges) - pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, - &wm_with_clock_ranges); + amdgpu_dpm_set_watermarks_for_clocks_ranges(adev, + &wm_with_clock_ranges); } static void pp_rv_set_pme_wa_enable(struct pp_smu *pp) { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - if (pp_funcs && pp_funcs->notify_smu_enable_pwe) - pp_funcs->notify_smu_enable_pwe(pp_handle); + amdgpu_dpm_notify_smu_enable_pwe(adev); } static void pp_rv_set_active_display_count(struct pp_smu *pp, int count) { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - if (!pp_funcs || !pp_funcs->set_active_display_count) - return; - - pp_funcs->set_active_display_count(pp_handle, count); + amdgpu_dpm_set_active_display_count(adev, count); } static void pp_rv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int clock) { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - - if (!pp_funcs || !pp_funcs->set_min_deep_sleep_dcefclk) - return; - pp_funcs->set_min_deep_sleep_dcefclk(pp_handle, clock); + amdgpu_dpm_set_min_deep_sleep_dcefclk(adev, clock); } static void pp_rv_set_hard_min_dcefclk_by_freq(struct pp_smu *pp, int clock) { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - if (!pp_funcs || !pp_funcs->set_hard_min_dcefclk_by_freq) - return; - - pp_funcs->set_hard_min_dcefclk_by_freq(pp_handle, clock); + amdgpu_dpm_set_hard_min_dcefclk_by_freq(adev, clock); } static void pp_rv_set_hard_min_fclk_by_freq(struct pp_smu *pp, int mhz) { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - - if (!pp_funcs || !pp_funcs->set_hard_min_fclk_by_freq) - return; - pp_funcs->set_hard_min_fclk_by_freq(pp_handle, mhz); + amdgpu_dpm_set_hard_min_fclk_by_freq(adev, mhz); } static enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp, @@ -609,11 +554,8 @@ static enum pp_smu_status pp_nv_set_wm_ranges(struct pp_smu *pp, { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges) - pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, ranges); + amdgpu_dpm_set_watermarks_for_clocks_ranges(adev, ranges); return PP_SMU_RESULT_OK; } @@ -622,14 +564,13 @@ static enum pp_smu_status pp_nv_set_display_count(struct pp_smu *pp, int count) { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; - if (!pp_funcs || !pp_funcs->set_active_display_count) + ret = amdgpu_dpm_set_active_display_count(adev, count); + if (ret == -EOPNOTSUPP) return PP_SMU_RESULT_UNSUPPORTED; - - /* 0: successful or smu.ppt_funcs->set_display_count = NULL; 1: fail */ - if (pp_funcs->set_active_display_count(pp_handle, count)) + else if (ret) + /* 0: successful or smu.ppt_funcs->set_display_count = NULL; 1: fail */ return PP_SMU_RESULT_FAIL; return PP_SMU_RESULT_OK; @@ -640,14 +581,13 @@ pp_nv_set_min_deep_sleep_dcfclk(struct pp_smu *pp, int mhz) { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - - if (!pp_funcs || !pp_funcs->set_min_deep_sleep_dcefclk) - return PP_SMU_RESULT_UNSUPPORTED; + int ret = 0; /* 0: successful or smu.ppt_funcs->set_deep_sleep_dcefclk = NULL;1: fail */ - if (pp_funcs->set_min_deep_sleep_dcefclk(pp_handle, mhz)) + ret = amdgpu_dpm_set_min_deep_sleep_dcefclk(adev, mhz); + if (ret == -EOPNOTSUPP) + return PP_SMU_RESULT_UNSUPPORTED; + else if (ret) return PP_SMU_RESULT_FAIL; return PP_SMU_RESULT_OK; @@ -658,12 +598,8 @@ static enum pp_smu_status pp_nv_set_hard_min_dcefclk_by_freq( { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; struct pp_display_clock_request clock_req; - - if (!pp_funcs || !pp_funcs->display_clock_voltage_request) - return PP_SMU_RESULT_UNSUPPORTED; + int ret = 0; clock_req.clock_type = amd_pp_dcef_clock; clock_req.clock_freq_in_khz = mhz * 1000; @@ -671,7 +607,10 @@ static enum pp_smu_status pp_nv_set_hard_min_dcefclk_by_freq( /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL * 1: fail */ - if (pp_funcs->display_clock_voltage_request(pp_handle, &clock_req)) + ret = amdgpu_dpm_display_clock_voltage_request(adev, &clock_req); + if (ret == -EOPNOTSUPP) + return PP_SMU_RESULT_UNSUPPORTED; + else if (ret) return PP_SMU_RESULT_FAIL; return PP_SMU_RESULT_OK; @@ -682,12 +621,8 @@ pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz) { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; struct pp_display_clock_request clock_req; - - if (!pp_funcs || !pp_funcs->display_clock_voltage_request) - return PP_SMU_RESULT_UNSUPPORTED; + int ret = 0; clock_req.clock_type = amd_pp_mem_clock; clock_req.clock_freq_in_khz = mhz * 1000; @@ -695,7 +630,10 @@ pp_nv_set_hard_min_uclk_by_freq(struct pp_smu *pp, int mhz) /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL * 1: fail */ - if (pp_funcs->display_clock_voltage_request(pp_handle, &clock_req)) + ret = amdgpu_dpm_display_clock_voltage_request(adev, &clock_req); + if (ret == -EOPNOTSUPP) + return PP_SMU_RESULT_UNSUPPORTED; + else if (ret) return PP_SMU_RESULT_FAIL; return PP_SMU_RESULT_OK; @@ -706,14 +644,10 @@ static enum pp_smu_status pp_nv_set_pstate_handshake_support( { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - if (pp_funcs && pp_funcs->display_disable_memory_clock_switch) { - if (pp_funcs->display_disable_memory_clock_switch(pp_handle, - !pstate_handshake_supported)) - return PP_SMU_RESULT_FAIL; - } + if (amdgpu_dpm_display_disable_memory_clock_switch(adev, + !pstate_handshake_supported)) + return PP_SMU_RESULT_FAIL; return PP_SMU_RESULT_OK; } @@ -723,12 +657,8 @@ static enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp, { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; struct pp_display_clock_request clock_req; - - if (!pp_funcs || !pp_funcs->display_clock_voltage_request) - return PP_SMU_RESULT_UNSUPPORTED; + int ret = 0; switch (clock_id) { case PP_SMU_NV_DISPCLK: @@ -748,7 +678,10 @@ static enum pp_smu_status pp_nv_set_voltage_by_freq(struct pp_smu *pp, /* 0: successful or smu.ppt_funcs->display_clock_voltage_request = NULL * 1: fail */ - if (pp_funcs->display_clock_voltage_request(pp_handle, &clock_req)) + ret = amdgpu_dpm_display_clock_voltage_request(adev, &clock_req); + if (ret == -EOPNOTSUPP) + return PP_SMU_RESULT_UNSUPPORTED; + else if (ret) return PP_SMU_RESULT_FAIL; return PP_SMU_RESULT_OK; @@ -759,16 +692,16 @@ static enum pp_smu_status pp_nv_get_maximum_sustainable_clocks( { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; - if (!pp_funcs || !pp_funcs->get_max_sustainable_clocks_by_dc) + ret = amdgpu_dpm_get_max_sustainable_clocks_by_dc(adev, + max_clocks); + if (ret == -EOPNOTSUPP) return PP_SMU_RESULT_UNSUPPORTED; + else if (ret) + return PP_SMU_RESULT_FAIL; - if (!pp_funcs->get_max_sustainable_clocks_by_dc(pp_handle, max_clocks)) - return PP_SMU_RESULT_OK; - - return PP_SMU_RESULT_FAIL; + return PP_SMU_RESULT_OK; } static enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp, @@ -776,18 +709,17 @@ static enum pp_smu_status pp_nv_get_uclk_dpm_states(struct pp_smu *pp, { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; - if (!pp_funcs || !pp_funcs->get_uclk_dpm_states) + ret = amdgpu_dpm_get_uclk_dpm_states(adev, + clock_values_in_khz, + num_states); + if (ret == -EOPNOTSUPP) return PP_SMU_RESULT_UNSUPPORTED; + else if (ret) + return PP_SMU_RESULT_FAIL; - if (!pp_funcs->get_uclk_dpm_states(pp_handle, - clock_values_in_khz, - num_states)) - return PP_SMU_RESULT_OK; - - return PP_SMU_RESULT_FAIL; + return PP_SMU_RESULT_OK; } static enum pp_smu_status pp_rn_get_dpm_clock_table( @@ -795,16 +727,15 @@ static enum pp_smu_status pp_rn_get_dpm_clock_table( { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; - if (!pp_funcs || !pp_funcs->get_dpm_clock_table) + ret = amdgpu_dpm_get_dpm_clock_table(adev, clock_table); + if (ret == -EOPNOTSUPP) return PP_SMU_RESULT_UNSUPPORTED; + else if (ret) + return PP_SMU_RESULT_FAIL; - if (!pp_funcs->get_dpm_clock_table(pp_handle, clock_table)) - return PP_SMU_RESULT_OK; - - return PP_SMU_RESULT_FAIL; + return PP_SMU_RESULT_OK; } static enum pp_smu_status pp_rn_set_wm_ranges(struct pp_smu *pp, @@ -812,11 +743,8 @@ static enum pp_smu_status pp_rn_set_wm_ranges(struct pp_smu *pp, { const struct dc_context *ctx = pp->dm; struct amdgpu_device *adev = ctx->driver_context; - void *pp_handle = adev->powerplay.pp_handle; - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - if (pp_funcs && pp_funcs->set_watermarks_for_clocks_ranges) - pp_funcs->set_watermarks_for_clocks_ranges(pp_handle, ranges); + amdgpu_dpm_set_watermarks_for_clocks_ranges(adev, ranges); return PP_SMU_RESULT_OK; } diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index 494cf1459d0f..6adc174bb981 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -1659,6 +1659,14 @@ int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev, } } +int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev) +{ + if (!is_support_sw_smu(adev)) + return 0; + + return smu_write_watermarks_table(&adev->smu); +} + int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev, enum smu_event_type event, uint64_t event_arg) @@ -2163,3 +2171,213 @@ void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev) amdgpu_smu_stb_debug_fs_init(adev); } + +int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev, + const struct amd_pp_display_configuration *input) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs->display_configuration_change) + return 0; + + return pp_funcs->display_configuration_change(adev->powerplay.pp_handle, + input); +} + +int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev, + enum amd_pp_clock_type type, + struct amd_pp_clocks *clocks) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs->get_clock_by_type) + return 0; + + return pp_funcs->get_clock_by_type(adev->powerplay.pp_handle, + type, + clocks); +} + +int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev, + struct amd_pp_simple_clock_info *clocks) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs->get_display_mode_validation_clocks) + return 0; + + return pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle, + clocks); +} + +int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev, + enum amd_pp_clock_type type, + struct pp_clock_levels_with_latency *clocks) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs->get_clock_by_type_with_latency) + return 0; + + return pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle, + type, + clocks); +} + +int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev, + enum amd_pp_clock_type type, + struct pp_clock_levels_with_voltage *clocks) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs->get_clock_by_type_with_voltage) + return 0; + + return pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle, + type, + clocks); +} + +int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev, + void *clock_ranges) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs->set_watermarks_for_clocks_ranges) + return -EOPNOTSUPP; + + return pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle, + clock_ranges); +} + +int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev, + struct pp_display_clock_request *clock) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs->display_clock_voltage_request) + return -EOPNOTSUPP; + + return pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle, + clock); +} + +int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev, + struct amd_pp_clock_info *clocks) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs->get_current_clocks) + return -EOPNOTSUPP; + + return pp_funcs->get_current_clocks(adev->powerplay.pp_handle, + clocks); +} + +void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs->notify_smu_enable_pwe) + return; + + pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle); +} + +int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev, + uint32_t count) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs->set_active_display_count) + return -EOPNOTSUPP; + + return pp_funcs->set_active_display_count(adev->powerplay.pp_handle, + count); +} + +int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev, + uint32_t clock) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs->set_min_deep_sleep_dcefclk) + return -EOPNOTSUPP; + + return pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle, + clock); +} + +void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev, + uint32_t clock) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs->set_hard_min_dcefclk_by_freq) + return; + + pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle, + clock); +} + +void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev, + uint32_t clock) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs->set_hard_min_fclk_by_freq) + return; + + pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle, + clock); +} + +int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev, + bool disable_memory_clock_switch) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs->display_disable_memory_clock_switch) + return 0; + + return pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle, + disable_memory_clock_switch); +} + +int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev, + struct pp_smu_nv_clock_table *max_clocks) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs->get_max_sustainable_clocks_by_dc) + return -EOPNOTSUPP; + + return pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle, + max_clocks); +} + +enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev, + unsigned int *clock_values_in_khz, + unsigned int *num_states) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs->get_uclk_dpm_states) + return -EOPNOTSUPP; + + return pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle, + clock_values_in_khz, + num_states); +} + +int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev, + struct dpm_clocks *clock_table) +{ + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + + if (!pp_funcs->get_dpm_clock_table) + return -EOPNOTSUPP; + + return pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle, + clock_table); +} diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h index ef0df75b4040..649041dadf52 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h @@ -508,6 +508,7 @@ int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev, enum pp_clock_type type, uint32_t min, uint32_t max); +int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev); int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev, enum smu_event_type event, uint64_t event_arg); int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value); @@ -586,4 +587,41 @@ int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev, size_t size); int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev); void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev); +int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev, + const struct amd_pp_display_configuration *input); +int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev, + enum amd_pp_clock_type type, + struct amd_pp_clocks *clocks); +int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev, + struct amd_pp_simple_clock_info *clocks); +int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev, + enum amd_pp_clock_type type, + struct pp_clock_levels_with_latency *clocks); +int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev, + enum amd_pp_clock_type type, + struct pp_clock_levels_with_voltage *clocks); +int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev, + void *clock_ranges); +int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev, + struct pp_display_clock_request *clock); +int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev, + struct amd_pp_clock_info *clocks); +void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev); +int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev, + uint32_t count); +int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev, + uint32_t clock); +void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev, + uint32_t clock); +void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev, + uint32_t clock); +int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev, + bool disable_memory_clock_switch); +int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev, + struct pp_smu_nv_clock_table *max_clocks); +enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev, + unsigned int *clock_values_in_khz, + unsigned int *num_states); +int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev, + struct dpm_clocks *clock_table); #endif -- cgit From d448157665870bfbf9b6baa5991cd9d6bc98c7e6 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Thu, 11 Nov 2021 10:02:20 +0800 Subject: drm/amd/pm: do not expose those APIs used internally only in amdgpu_dpm.c Move them to amdgpu_dpm.c instead. Signed-off-by: Evan Quan Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/amdgpu_dpm.c | 25 +++++++++++++++++++++++-- drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h | 23 ----------------------- 2 files changed, 23 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index 6adc174bb981..dce9b02460e6 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -34,6 +34,27 @@ #define WIDTH_4K 3840 +#define amdgpu_dpm_pre_set_power_state(adev) \ + ((adev)->powerplay.pp_funcs->pre_set_power_state((adev)->powerplay.pp_handle)) + +#define amdgpu_dpm_post_set_power_state(adev) \ + ((adev)->powerplay.pp_funcs->post_set_power_state((adev)->powerplay.pp_handle)) + +#define amdgpu_dpm_display_configuration_changed(adev) \ + ((adev)->powerplay.pp_funcs->display_configuration_changed((adev)->powerplay.pp_handle)) + +#define amdgpu_dpm_print_power_state(adev, ps) \ + ((adev)->powerplay.pp_funcs->print_power_state((adev)->powerplay.pp_handle, (ps))) + +#define amdgpu_dpm_vblank_too_short(adev) \ + ((adev)->powerplay.pp_funcs->vblank_too_short((adev)->powerplay.pp_handle)) + +#define amdgpu_dpm_enable_bapm(adev, e) \ + ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e))) + +#define amdgpu_dpm_check_state_equal(adev, cps, rps, equal) \ + ((adev)->powerplay.pp_funcs->check_state_equal((adev)->powerplay.pp_handle, (cps), (rps), (equal))) + void amdgpu_dpm_print_class_info(u32 class, u32 class2) { const char *s; @@ -120,7 +141,7 @@ void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev, pr_cont("\n"); } -void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev) +static void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev) { struct drm_device *ddev = adev_to_drm(adev); struct drm_crtc *crtc; @@ -168,7 +189,7 @@ u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev) return vblank_time_us; } -u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev) +static u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev) { struct drm_device *dev = adev_to_drm(adev); struct drm_crtc *crtc; diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h index 649041dadf52..2897d395ff31 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h @@ -259,27 +259,6 @@ enum amdgpu_pcie_gen { AMDGPU_PCIE_GEN_INVALID = 0xffff }; -#define amdgpu_dpm_pre_set_power_state(adev) \ - ((adev)->powerplay.pp_funcs->pre_set_power_state((adev)->powerplay.pp_handle)) - -#define amdgpu_dpm_post_set_power_state(adev) \ - ((adev)->powerplay.pp_funcs->post_set_power_state((adev)->powerplay.pp_handle)) - -#define amdgpu_dpm_display_configuration_changed(adev) \ - ((adev)->powerplay.pp_funcs->display_configuration_changed((adev)->powerplay.pp_handle)) - -#define amdgpu_dpm_print_power_state(adev, ps) \ - ((adev)->powerplay.pp_funcs->print_power_state((adev)->powerplay.pp_handle, (ps))) - -#define amdgpu_dpm_vblank_too_short(adev) \ - ((adev)->powerplay.pp_funcs->vblank_too_short((adev)->powerplay.pp_handle)) - -#define amdgpu_dpm_enable_bapm(adev, e) \ - ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e))) - -#define amdgpu_dpm_check_state_equal(adev, cps, rps, equal) \ - ((adev)->powerplay.pp_funcs->check_state_equal((adev)->powerplay.pp_handle, (cps), (rps), (equal))) - #define amdgpu_dpm_reset_power_profile_state(adev, request) \ ((adev)->powerplay.pp_funcs->reset_power_profile_state(\ (adev)->powerplay.pp_handle, request)) @@ -420,8 +399,6 @@ void amdgpu_dpm_print_cap_info(u32 caps); void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev, struct amdgpu_ps *rps); u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev); -u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev); -void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev); int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor, void *data, uint32_t *size); -- cgit From a79110f2dc0194326d2e8c2cf83db6c784c37e97 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Thu, 11 Nov 2021 10:12:29 +0800 Subject: drm/amd/pm: do not expose those APIs used internally only in si_dpm.c Move them to si_dpm.c instead. Signed-off-by: Evan Quan Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/amdgpu_dpm.c | 25 ------- drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h | 25 ------- drivers/gpu/drm/amd/pm/powerplay/si_dpm.c | 106 ++++++++++++++++++++---------- drivers/gpu/drm/amd/pm/powerplay/si_dpm.h | 15 +++-- 4 files changed, 83 insertions(+), 88 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index dce9b02460e6..c97e79862779 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -894,31 +894,6 @@ void amdgpu_add_thermal_controller(struct amdgpu_device *adev) } } -enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev, - u32 sys_mask, - enum amdgpu_pcie_gen asic_gen, - enum amdgpu_pcie_gen default_gen) -{ - switch (asic_gen) { - case AMDGPU_PCIE_GEN1: - return AMDGPU_PCIE_GEN1; - case AMDGPU_PCIE_GEN2: - return AMDGPU_PCIE_GEN2; - case AMDGPU_PCIE_GEN3: - return AMDGPU_PCIE_GEN3; - default: - if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) && - (default_gen == AMDGPU_PCIE_GEN3)) - return AMDGPU_PCIE_GEN3; - else if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) && - (default_gen == AMDGPU_PCIE_GEN2)) - return AMDGPU_PCIE_GEN2; - else - return AMDGPU_PCIE_GEN1; - } - return AMDGPU_PCIE_GEN1; -} - struct amd_vce_state* amdgpu_get_vce_clock_state(void *handle, u32 idx) { diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h index 2897d395ff31..108706ee8760 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h @@ -45,19 +45,6 @@ enum amdgpu_int_thermal_type { THERMAL_TYPE_KV, }; -enum amdgpu_dpm_auto_throttle_src { - AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, - AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL -}; - -enum amdgpu_dpm_event_src { - AMDGPU_DPM_EVENT_SRC_ANALOG = 0, - AMDGPU_DPM_EVENT_SRC_EXTERNAL = 1, - AMDGPU_DPM_EVENT_SRC_DIGITAL = 2, - AMDGPU_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, - AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4 -}; - struct amdgpu_ps { u32 caps; /* vbios flags */ u32 class; /* vbios flags */ @@ -252,13 +239,6 @@ struct amdgpu_dpm_fan { bool ucode_fan_control; }; -enum amdgpu_pcie_gen { - AMDGPU_PCIE_GEN1 = 0, - AMDGPU_PCIE_GEN2 = 1, - AMDGPU_PCIE_GEN3 = 2, - AMDGPU_PCIE_GEN_INVALID = 0xffff -}; - #define amdgpu_dpm_reset_power_profile_state(adev, request) \ ((adev)->powerplay.pp_funcs->reset_power_profile_state(\ (adev)->powerplay.pp_handle, request)) @@ -411,11 +391,6 @@ void amdgpu_free_extended_power_table(struct amdgpu_device *adev); void amdgpu_add_thermal_controller(struct amdgpu_device *adev); -enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev, - u32 sys_mask, - enum amdgpu_pcie_gen asic_gen, - enum amdgpu_pcie_gen default_gen); - struct amd_vce_state* amdgpu_get_vce_clock_state(void *handle, u32 idx); diff --git a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c index 81f82aa05ec2..2823376797d3 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c +++ b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c @@ -96,6 +96,19 @@ union pplib_clock_info { struct _ATOM_PPLIB_SI_CLOCK_INFO si; }; +enum si_dpm_auto_throttle_src { + SI_DPM_AUTO_THROTTLE_SRC_THERMAL, + SI_DPM_AUTO_THROTTLE_SRC_EXTERNAL +}; + +enum si_dpm_event_src { + SI_DPM_EVENT_SRC_ANALOG = 0, + SI_DPM_EVENT_SRC_EXTERNAL = 1, + SI_DPM_EVENT_SRC_DIGITAL = 2, + SI_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, + SI_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4 +}; + static const u32 r600_utc[R600_PM_NUMBER_OF_TC] = { R600_UTC_DFLT_00, @@ -3718,25 +3731,25 @@ static void si_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources) { struct rv7xx_power_info *pi = rv770_get_pi(adev); bool want_thermal_protection; - enum amdgpu_dpm_event_src dpm_event_src; + enum si_dpm_event_src dpm_event_src; switch (sources) { case 0: default: want_thermal_protection = false; break; - case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL): + case (1 << SI_DPM_AUTO_THROTTLE_SRC_THERMAL): want_thermal_protection = true; - dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGITAL; + dpm_event_src = SI_DPM_EVENT_SRC_DIGITAL; break; - case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL): + case (1 << SI_DPM_AUTO_THROTTLE_SRC_EXTERNAL): want_thermal_protection = true; - dpm_event_src = AMDGPU_DPM_EVENT_SRC_EXTERNAL; + dpm_event_src = SI_DPM_EVENT_SRC_EXTERNAL; break; - case ((1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL) | - (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL)): + case ((1 << SI_DPM_AUTO_THROTTLE_SRC_EXTERNAL) | + (1 << SI_DPM_AUTO_THROTTLE_SRC_THERMAL)): want_thermal_protection = true; - dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL; + dpm_event_src = SI_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL; break; } @@ -3750,7 +3763,7 @@ static void si_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources) } static void si_enable_auto_throttle_source(struct amdgpu_device *adev, - enum amdgpu_dpm_auto_throttle_src source, + enum si_dpm_auto_throttle_src source, bool enable) { struct rv7xx_power_info *pi = rv770_get_pi(adev); @@ -4927,6 +4940,31 @@ static int si_populate_smc_initial_state(struct amdgpu_device *adev, return 0; } +static enum si_pcie_gen si_gen_pcie_gen_support(struct amdgpu_device *adev, + u32 sys_mask, + enum si_pcie_gen asic_gen, + enum si_pcie_gen default_gen) +{ + switch (asic_gen) { + case SI_PCIE_GEN1: + return SI_PCIE_GEN1; + case SI_PCIE_GEN2: + return SI_PCIE_GEN2; + case SI_PCIE_GEN3: + return SI_PCIE_GEN3; + default: + if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) && + (default_gen == SI_PCIE_GEN3)) + return SI_PCIE_GEN3; + else if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) && + (default_gen == SI_PCIE_GEN2)) + return SI_PCIE_GEN2; + else + return SI_PCIE_GEN1; + } + return SI_PCIE_GEN1; +} + static int si_populate_smc_acpi_state(struct amdgpu_device *adev, SISLANDS_SMC_STATETABLE *table) { @@ -4989,10 +5027,10 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev, &table->ACPIState.level.std_vddc); } table->ACPIState.level.gen2PCIE = - (u8)amdgpu_get_pcie_gen_support(adev, - si_pi->sys_pcie_mask, - si_pi->boot_pcie_gen, - AMDGPU_PCIE_GEN1); + (u8)si_gen_pcie_gen_support(adev, + si_pi->sys_pcie_mask, + si_pi->boot_pcie_gen, + SI_PCIE_GEN1); if (si_pi->vddc_phase_shed_control) si_populate_phase_shedding_value(adev, @@ -5430,7 +5468,7 @@ static int si_convert_power_level_to_smc(struct amdgpu_device *adev, bool gmc_pg = false; if (eg_pi->pcie_performance_request && - (si_pi->force_pcie_gen != AMDGPU_PCIE_GEN_INVALID)) + (si_pi->force_pcie_gen != SI_PCIE_GEN_INVALID)) level->gen2PCIE = (u8)si_pi->force_pcie_gen; else level->gen2PCIE = (u8)pl->pcie_gen; @@ -6147,8 +6185,8 @@ static void si_enable_voltage_control(struct amdgpu_device *adev, bool enable) WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN); } -static enum amdgpu_pcie_gen si_get_maximum_link_speed(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_state) +static enum si_pcie_gen si_get_maximum_link_speed(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_state) { struct si_ps *state = si_get_ps(amdgpu_state); int i; @@ -6177,27 +6215,27 @@ static void si_request_link_speed_change_before_state_change(struct amdgpu_devic struct amdgpu_ps *amdgpu_current_state) { struct si_power_info *si_pi = si_get_pi(adev); - enum amdgpu_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state); - enum amdgpu_pcie_gen current_link_speed; + enum si_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state); + enum si_pcie_gen current_link_speed; - if (si_pi->force_pcie_gen == AMDGPU_PCIE_GEN_INVALID) + if (si_pi->force_pcie_gen == SI_PCIE_GEN_INVALID) current_link_speed = si_get_maximum_link_speed(adev, amdgpu_current_state); else current_link_speed = si_pi->force_pcie_gen; - si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID; + si_pi->force_pcie_gen = SI_PCIE_GEN_INVALID; si_pi->pspp_notify_required = false; if (target_link_speed > current_link_speed) { switch (target_link_speed) { #if defined(CONFIG_ACPI) - case AMDGPU_PCIE_GEN3: + case SI_PCIE_GEN3: if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN3, false) == 0) break; - si_pi->force_pcie_gen = AMDGPU_PCIE_GEN2; - if (current_link_speed == AMDGPU_PCIE_GEN2) + si_pi->force_pcie_gen = SI_PCIE_GEN2; + if (current_link_speed == SI_PCIE_GEN2) break; fallthrough; - case AMDGPU_PCIE_GEN2: + case SI_PCIE_GEN2: if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0) break; fallthrough; @@ -6217,13 +6255,13 @@ static void si_notify_link_speed_change_after_state_change(struct amdgpu_device struct amdgpu_ps *amdgpu_current_state) { struct si_power_info *si_pi = si_get_pi(adev); - enum amdgpu_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state); + enum si_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state); u8 request; if (si_pi->pspp_notify_required) { - if (target_link_speed == AMDGPU_PCIE_GEN3) + if (target_link_speed == SI_PCIE_GEN3) request = PCIE_PERF_REQ_PECI_GEN3; - else if (target_link_speed == AMDGPU_PCIE_GEN2) + else if (target_link_speed == SI_PCIE_GEN2) request = PCIE_PERF_REQ_PECI_GEN2; else request = PCIE_PERF_REQ_PECI_GEN1; @@ -6864,7 +6902,7 @@ static int si_dpm_enable(struct amdgpu_device *adev) si_enable_sclk_control(adev, true); si_start_dpm(adev); - si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true); + si_enable_auto_throttle_source(adev, SI_DPM_AUTO_THROTTLE_SRC_THERMAL, true); si_thermal_start_thermal_controller(adev); ni_update_current_ps(adev, boot_ps); @@ -6904,7 +6942,7 @@ static void si_dpm_disable(struct amdgpu_device *adev) si_enable_power_containment(adev, boot_ps, false); si_enable_smc_cac(adev, boot_ps, false); si_enable_spread_spectrum(adev, false); - si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, false); + si_enable_auto_throttle_source(adev, SI_DPM_AUTO_THROTTLE_SRC_THERMAL, false); si_stop_dpm(adev); si_reset_to_default(adev); si_dpm_stop_smc(adev); @@ -7148,10 +7186,10 @@ static void si_parse_pplib_clock_info(struct amdgpu_device *adev, pl->vddc = le16_to_cpu(clock_info->si.usVDDC); pl->vddci = le16_to_cpu(clock_info->si.usVDDCI); pl->flags = le32_to_cpu(clock_info->si.ulFlags); - pl->pcie_gen = amdgpu_get_pcie_gen_support(adev, - si_pi->sys_pcie_mask, - si_pi->boot_pcie_gen, - clock_info->si.ucPCIEGen); + pl->pcie_gen = si_gen_pcie_gen_support(adev, + si_pi->sys_pcie_mask, + si_pi->boot_pcie_gen, + clock_info->si.ucPCIEGen); /* patch up vddc if necessary */ ret = si_get_leakage_voltage_from_leakage_index(adev, pl->vddc, @@ -7318,7 +7356,7 @@ static int si_dpm_init(struct amdgpu_device *adev) si_pi->sys_pcie_mask = adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK; - si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID; + si_pi->force_pcie_gen = SI_PCIE_GEN_INVALID; si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev); si_set_max_cu_value(adev); diff --git a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.h b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.h index bc0be6818e21..11cb7874a6bb 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.h +++ b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.h @@ -595,13 +595,20 @@ struct rv7xx_power_info { RV770_SMC_STATETABLE smc_statetable; }; +enum si_pcie_gen { + SI_PCIE_GEN1 = 0, + SI_PCIE_GEN2 = 1, + SI_PCIE_GEN3 = 2, + SI_PCIE_GEN_INVALID = 0xffff +}; + struct rv7xx_pl { u32 sclk; u32 mclk; u16 vddc; u16 vddci; /* eg+ only */ u32 flags; - enum amdgpu_pcie_gen pcie_gen; /* si+ only */ + enum si_pcie_gen pcie_gen; /* si+ only */ }; struct rv7xx_ps { @@ -967,9 +974,9 @@ struct si_power_info { struct si_ulv_param ulv; u32 max_cu; /* pcie gen */ - enum amdgpu_pcie_gen force_pcie_gen; - enum amdgpu_pcie_gen boot_pcie_gen; - enum amdgpu_pcie_gen acpi_pcie_gen; + enum si_pcie_gen force_pcie_gen; + enum si_pcie_gen boot_pcie_gen; + enum si_pcie_gen acpi_pcie_gen; u32 sys_pcie_mask; /* flags */ bool enable_dte; -- cgit From 28a31774b050261371953401e8072ae15200c91e Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Thu, 11 Nov 2021 14:29:00 +0800 Subject: drm/amd/pm: do not expose the API used internally only in kv_dpm.c Move it to kv_dpm.c instead. Signed-off-by: Evan Quan Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/amdgpu_dpm.c | 23 ----------------------- drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h | 2 -- drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c | 25 ++++++++++++++++++++++++- 3 files changed, 24 insertions(+), 26 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index c97e79862779..9dfb57cceca5 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -209,29 +209,6 @@ static u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev) return vrefresh; } -bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor) -{ - switch (sensor) { - case THERMAL_TYPE_RV6XX: - case THERMAL_TYPE_RV770: - case THERMAL_TYPE_EVERGREEN: - case THERMAL_TYPE_SUMO: - case THERMAL_TYPE_NI: - case THERMAL_TYPE_SI: - case THERMAL_TYPE_CI: - case THERMAL_TYPE_KV: - return true; - case THERMAL_TYPE_ADT7473_WITH_INTERNAL: - case THERMAL_TYPE_EMC2103_WITH_INTERNAL: - return false; /* need special handling */ - case THERMAL_TYPE_NONE: - case THERMAL_TYPE_EXTERNAL: - case THERMAL_TYPE_EXTERNAL_GPIO: - default: - return false; - } -} - union power_info { struct _ATOM_POWERPLAY_INFO info; struct _ATOM_POWERPLAY_INFO_V2 info_2; diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h index 108706ee8760..ecababa562af 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h @@ -382,8 +382,6 @@ u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev); int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor, void *data, uint32_t *size); -bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor); - int amdgpu_get_platform_caps(struct amdgpu_device *adev); int amdgpu_parse_extended_power_table(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c b/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c index bcae42cef374..380a5336c74f 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c +++ b/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c @@ -1256,6 +1256,29 @@ static void kv_dpm_enable_bapm(void *handle, bool enable) } } +static bool kv_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor) +{ + switch (sensor) { + case THERMAL_TYPE_RV6XX: + case THERMAL_TYPE_RV770: + case THERMAL_TYPE_EVERGREEN: + case THERMAL_TYPE_SUMO: + case THERMAL_TYPE_NI: + case THERMAL_TYPE_SI: + case THERMAL_TYPE_CI: + case THERMAL_TYPE_KV: + return true; + case THERMAL_TYPE_ADT7473_WITH_INTERNAL: + case THERMAL_TYPE_EMC2103_WITH_INTERNAL: + return false; /* need special handling */ + case THERMAL_TYPE_NONE: + case THERMAL_TYPE_EXTERNAL: + case THERMAL_TYPE_EXTERNAL_GPIO: + default: + return false; + } +} + static int kv_dpm_enable(struct amdgpu_device *adev) { struct kv_power_info *pi = kv_get_pi(adev); @@ -1352,7 +1375,7 @@ static int kv_dpm_enable(struct amdgpu_device *adev) } if (adev->irq.installed && - amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) { + kv_is_internal_thermal_sensor(adev->pm.int_thermal_type)) { ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX); if (ret) { DRM_ERROR("kv_set_thermal_temperature_range failed\n"); -- cgit From 84176663e70d93836d30d2a480a4201c7f790b42 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Thu, 11 Nov 2021 14:19:33 +0800 Subject: drm/amd/pm: create a new holder for those APIs used only by legacy ASICs(si/kv) Those APIs are used only by legacy ASICs(si/kv). They cannot be shared by other ASICs. So, we create a new holder for them. Signed-off-by: Evan Quan Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/dce_v6_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | 2 +- .../drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c | 2 +- drivers/gpu/drm/amd/include/kgd_pp_interface.h | 1 + drivers/gpu/drm/amd/pm/amdgpu_dpm.c | 1022 +------------------ drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h | 17 +- drivers/gpu/drm/amd/pm/powerplay/Makefile | 2 +- drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c | 6 +- drivers/gpu/drm/amd/pm/powerplay/legacy_dpm.c | 1024 ++++++++++++++++++++ drivers/gpu/drm/amd/pm/powerplay/legacy_dpm.h | 37 + drivers/gpu/drm/amd/pm/powerplay/si_dpm.c | 6 +- 13 files changed, 1091 insertions(+), 1034 deletions(-) create mode 100644 drivers/gpu/drm/amd/pm/powerplay/legacy_dpm.c create mode 100644 drivers/gpu/drm/amd/pm/powerplay/legacy_dpm.h diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index d1570a462a51..5d5205870861 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -2532,7 +2532,7 @@ static void dce_v10_0_crtc_dpms(struct drm_crtc *crtc, int mode) break; } /* adjust pm to dpms */ - amdgpu_pm_compute_clocks(adev); + amdgpu_dpm_compute_clocks(adev); } static void dce_v10_0_crtc_prepare(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 18a7b3bd633b..4d812b22c54f 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -2608,7 +2608,7 @@ static void dce_v11_0_crtc_dpms(struct drm_crtc *crtc, int mode) break; } /* adjust pm to dpms */ - amdgpu_pm_compute_clocks(adev); + amdgpu_dpm_compute_clocks(adev); } static void dce_v11_0_crtc_prepare(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index c7803dc2b2d5..b90bc2adf778 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -2424,7 +2424,7 @@ static void dce_v6_0_crtc_dpms(struct drm_crtc *crtc, int mode) break; } /* adjust pm to dpms */ - amdgpu_pm_compute_clocks(adev); + amdgpu_dpm_compute_clocks(adev); } static void dce_v6_0_crtc_prepare(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 8318ee8339f1..7c1379b02f94 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -2433,7 +2433,7 @@ static void dce_v8_0_crtc_dpms(struct drm_crtc *crtc, int mode) break; } /* adjust pm to dpms */ - amdgpu_pm_compute_clocks(adev); + amdgpu_dpm_compute_clocks(adev); } static void dce_v8_0_crtc_prepare(struct drm_crtc *crtc) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c index 46550811da00..75284e2cec74 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c @@ -101,7 +101,7 @@ bool dm_pp_apply_display_requirements( amdgpu_dpm_display_configuration_change(adev, &adev->pm.pm_display_cfg); - amdgpu_pm_compute_clocks(adev); + amdgpu_dpm_compute_clocks(adev); } return true; diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index 2e295facd086..cdf724dcf832 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -404,6 +404,7 @@ struct amd_pm_funcs { int (*get_dpm_clock_table)(void *handle, struct dpm_clocks *clock_table); int (*get_smu_prv_buf_details)(void *handle, void **addr, size_t *size); + int (*change_power_state)(void *handle); }; struct metrics_table_header { diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index 9dfb57cceca5..438c56df9f23 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -34,113 +34,9 @@ #define WIDTH_4K 3840 -#define amdgpu_dpm_pre_set_power_state(adev) \ - ((adev)->powerplay.pp_funcs->pre_set_power_state((adev)->powerplay.pp_handle)) - -#define amdgpu_dpm_post_set_power_state(adev) \ - ((adev)->powerplay.pp_funcs->post_set_power_state((adev)->powerplay.pp_handle)) - -#define amdgpu_dpm_display_configuration_changed(adev) \ - ((adev)->powerplay.pp_funcs->display_configuration_changed((adev)->powerplay.pp_handle)) - -#define amdgpu_dpm_print_power_state(adev, ps) \ - ((adev)->powerplay.pp_funcs->print_power_state((adev)->powerplay.pp_handle, (ps))) - -#define amdgpu_dpm_vblank_too_short(adev) \ - ((adev)->powerplay.pp_funcs->vblank_too_short((adev)->powerplay.pp_handle)) - #define amdgpu_dpm_enable_bapm(adev, e) \ ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e))) -#define amdgpu_dpm_check_state_equal(adev, cps, rps, equal) \ - ((adev)->powerplay.pp_funcs->check_state_equal((adev)->powerplay.pp_handle, (cps), (rps), (equal))) - -void amdgpu_dpm_print_class_info(u32 class, u32 class2) -{ - const char *s; - - switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) { - case ATOM_PPLIB_CLASSIFICATION_UI_NONE: - default: - s = "none"; - break; - case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY: - s = "battery"; - break; - case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED: - s = "balanced"; - break; - case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE: - s = "performance"; - break; - } - printk("\tui class: %s\n", s); - printk("\tinternal class:"); - if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) && - (class2 == 0)) - pr_cont(" none"); - else { - if (class & ATOM_PPLIB_CLASSIFICATION_BOOT) - pr_cont(" boot"); - if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL) - pr_cont(" thermal"); - if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE) - pr_cont(" limited_pwr"); - if (class & ATOM_PPLIB_CLASSIFICATION_REST) - pr_cont(" rest"); - if (class & ATOM_PPLIB_CLASSIFICATION_FORCED) - pr_cont(" forced"); - if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) - pr_cont(" 3d_perf"); - if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE) - pr_cont(" ovrdrv"); - if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) - pr_cont(" uvd"); - if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW) - pr_cont(" 3d_low"); - if (class & ATOM_PPLIB_CLASSIFICATION_ACPI) - pr_cont(" acpi"); - if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) - pr_cont(" uvd_hd2"); - if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) - pr_cont(" uvd_hd"); - if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) - pr_cont(" uvd_sd"); - if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2) - pr_cont(" limited_pwr2"); - if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) - pr_cont(" ulv"); - if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) - pr_cont(" uvd_mvc"); - } - pr_cont("\n"); -} - -void amdgpu_dpm_print_cap_info(u32 caps) -{ - printk("\tcaps:"); - if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) - pr_cont(" single_disp"); - if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK) - pr_cont(" video"); - if (caps & ATOM_PPLIB_DISALLOW_ON_DC) - pr_cont(" no_dc"); - pr_cont("\n"); -} - -void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev, - struct amdgpu_ps *rps) -{ - printk("\tstatus:"); - if (rps == adev->pm.dpm.current_ps) - pr_cont(" c"); - if (rps == adev->pm.dpm.requested_ps) - pr_cont(" r"); - if (rps == adev->pm.dpm.boot_ps) - pr_cont(" b"); - pr_cont("\n"); -} - static void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev) { struct drm_device *ddev = adev_to_drm(adev); @@ -161,7 +57,6 @@ static void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev) } } - u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev) { struct drm_device *dev = adev_to_drm(adev); @@ -209,679 +104,6 @@ static u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev) return vrefresh; } -union power_info { - struct _ATOM_POWERPLAY_INFO info; - struct _ATOM_POWERPLAY_INFO_V2 info_2; - struct _ATOM_POWERPLAY_INFO_V3 info_3; - struct _ATOM_PPLIB_POWERPLAYTABLE pplib; - struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; - struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; - struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4; - struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5; -}; - -union fan_info { - struct _ATOM_PPLIB_FANTABLE fan; - struct _ATOM_PPLIB_FANTABLE2 fan2; - struct _ATOM_PPLIB_FANTABLE3 fan3; -}; - -static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table, - ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table) -{ - u32 size = atom_table->ucNumEntries * - sizeof(struct amdgpu_clock_voltage_dependency_entry); - int i; - ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry; - - amdgpu_table->entries = kzalloc(size, GFP_KERNEL); - if (!amdgpu_table->entries) - return -ENOMEM; - - entry = &atom_table->entries[0]; - for (i = 0; i < atom_table->ucNumEntries; i++) { - amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) | - (entry->ucClockHigh << 16); - amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage); - entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *) - ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record)); - } - amdgpu_table->count = atom_table->ucNumEntries; - - return 0; -} - -int amdgpu_get_platform_caps(struct amdgpu_device *adev) -{ - struct amdgpu_mode_info *mode_info = &adev->mode_info; - union power_info *power_info; - int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); - u16 data_offset; - u8 frev, crev; - - if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, - &frev, &crev, &data_offset)) - return -EINVAL; - power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); - - adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps); - adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); - adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); - - return 0; -} - -/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */ -#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12 -#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14 -#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16 -#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18 -#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20 -#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22 -#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24 -#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26 - -int amdgpu_parse_extended_power_table(struct amdgpu_device *adev) -{ - struct amdgpu_mode_info *mode_info = &adev->mode_info; - union power_info *power_info; - union fan_info *fan_info; - ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table; - int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); - u16 data_offset; - u8 frev, crev; - int ret, i; - - if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, - &frev, &crev, &data_offset)) - return -EINVAL; - power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); - - /* fan table */ - if (le16_to_cpu(power_info->pplib.usTableSize) >= - sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) { - if (power_info->pplib3.usFanTableOffset) { - fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib3.usFanTableOffset)); - adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst; - adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin); - adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed); - adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh); - adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin); - adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed); - adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh); - if (fan_info->fan.ucFanTableFormat >= 2) - adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax); - else - adev->pm.dpm.fan.t_max = 10900; - adev->pm.dpm.fan.cycle_delay = 100000; - if (fan_info->fan.ucFanTableFormat >= 3) { - adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode; - adev->pm.dpm.fan.default_max_fan_pwm = - le16_to_cpu(fan_info->fan3.usFanPWMMax); - adev->pm.dpm.fan.default_fan_output_sensitivity = 4836; - adev->pm.dpm.fan.fan_output_sensitivity = - le16_to_cpu(fan_info->fan3.usFanOutputSensitivity); - } - adev->pm.dpm.fan.ucode_fan_control = true; - } - } - - /* clock dependancy tables, shedding tables */ - if (le16_to_cpu(power_info->pplib.usTableSize) >= - sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) { - if (power_info->pplib4.usVddcDependencyOnSCLKOffset) { - dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset)); - ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk, - dep_table); - if (ret) { - amdgpu_free_extended_power_table(adev); - return ret; - } - } - if (power_info->pplib4.usVddciDependencyOnMCLKOffset) { - dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset)); - ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk, - dep_table); - if (ret) { - amdgpu_free_extended_power_table(adev); - return ret; - } - } - if (power_info->pplib4.usVddcDependencyOnMCLKOffset) { - dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset)); - ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk, - dep_table); - if (ret) { - amdgpu_free_extended_power_table(adev); - return ret; - } - } - if (power_info->pplib4.usMvddDependencyOnMCLKOffset) { - dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset)); - ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk, - dep_table); - if (ret) { - amdgpu_free_extended_power_table(adev); - return ret; - } - } - if (power_info->pplib4.usMaxClockVoltageOnDCOffset) { - ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v = - (ATOM_PPLIB_Clock_Voltage_Limit_Table *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset)); - if (clk_v->ucNumEntries) { - adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk = - le16_to_cpu(clk_v->entries[0].usSclkLow) | - (clk_v->entries[0].ucSclkHigh << 16); - adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk = - le16_to_cpu(clk_v->entries[0].usMclkLow) | - (clk_v->entries[0].ucMclkHigh << 16); - adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc = - le16_to_cpu(clk_v->entries[0].usVddc); - adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci = - le16_to_cpu(clk_v->entries[0].usVddci); - } - } - if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) { - ATOM_PPLIB_PhaseSheddingLimits_Table *psl = - (ATOM_PPLIB_PhaseSheddingLimits_Table *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset)); - ATOM_PPLIB_PhaseSheddingLimits_Record *entry; - - adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries = - kcalloc(psl->ucNumEntries, - sizeof(struct amdgpu_phase_shedding_limits_entry), - GFP_KERNEL); - if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) { - amdgpu_free_extended_power_table(adev); - return -ENOMEM; - } - - entry = &psl->entries[0]; - for (i = 0; i < psl->ucNumEntries; i++) { - adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk = - le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16); - adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk = - le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16); - adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage = - le16_to_cpu(entry->usVoltage); - entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *) - ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record)); - } - adev->pm.dpm.dyn_state.phase_shedding_limits_table.count = - psl->ucNumEntries; - } - } - - /* cac data */ - if (le16_to_cpu(power_info->pplib.usTableSize) >= - sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) { - adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit); - adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit); - adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit; - adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit); - if (adev->pm.dpm.tdp_od_limit) - adev->pm.dpm.power_control = true; - else - adev->pm.dpm.power_control = false; - adev->pm.dpm.tdp_adjustment = 0; - adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold); - adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage); - adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope); - if (power_info->pplib5.usCACLeakageTableOffset) { - ATOM_PPLIB_CAC_Leakage_Table *cac_table = - (ATOM_PPLIB_CAC_Leakage_Table *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset)); - ATOM_PPLIB_CAC_Leakage_Record *entry; - u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table); - adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL); - if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) { - amdgpu_free_extended_power_table(adev); - return -ENOMEM; - } - entry = &cac_table->entries[0]; - for (i = 0; i < cac_table->ucNumEntries; i++) { - if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) { - adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 = - le16_to_cpu(entry->usVddc1); - adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 = - le16_to_cpu(entry->usVddc2); - adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 = - le16_to_cpu(entry->usVddc3); - } else { - adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc = - le16_to_cpu(entry->usVddc); - adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage = - le32_to_cpu(entry->ulLeakageValue); - } - entry = (ATOM_PPLIB_CAC_Leakage_Record *) - ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record)); - } - adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries; - } - } - - /* ext tables */ - if (le16_to_cpu(power_info->pplib.usTableSize) >= - sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) { - ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset)); - if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) && - ext_hdr->usVCETableOffset) { - VCEClockInfoArray *array = (VCEClockInfoArray *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(ext_hdr->usVCETableOffset) + 1); - ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits = - (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(ext_hdr->usVCETableOffset) + 1 + - 1 + array->ucNumEntries * sizeof(VCEClockInfo)); - ATOM_PPLIB_VCE_State_Table *states = - (ATOM_PPLIB_VCE_State_Table *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(ext_hdr->usVCETableOffset) + 1 + - 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) + - 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record))); - ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry; - ATOM_PPLIB_VCE_State_Record *state_entry; - VCEClockInfo *vce_clk; - u32 size = limits->numEntries * - sizeof(struct amdgpu_vce_clock_voltage_dependency_entry); - adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries = - kzalloc(size, GFP_KERNEL); - if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) { - amdgpu_free_extended_power_table(adev); - return -ENOMEM; - } - adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count = - limits->numEntries; - entry = &limits->entries[0]; - state_entry = &states->entries[0]; - for (i = 0; i < limits->numEntries; i++) { - vce_clk = (VCEClockInfo *) - ((u8 *)&array->entries[0] + - (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo))); - adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk = - le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16); - adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk = - le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16); - adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v = - le16_to_cpu(entry->usVoltage); - entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *) - ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)); - } - adev->pm.dpm.num_of_vce_states = - states->numEntries > AMD_MAX_VCE_LEVELS ? - AMD_MAX_VCE_LEVELS : states->numEntries; - for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) { - vce_clk = (VCEClockInfo *) - ((u8 *)&array->entries[0] + - (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo))); - adev->pm.dpm.vce_states[i].evclk = - le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16); - adev->pm.dpm.vce_states[i].ecclk = - le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16); - adev->pm.dpm.vce_states[i].clk_idx = - state_entry->ucClockInfoIndex & 0x3f; - adev->pm.dpm.vce_states[i].pstate = - (state_entry->ucClockInfoIndex & 0xc0) >> 6; - state_entry = (ATOM_PPLIB_VCE_State_Record *) - ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record)); - } - } - if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) && - ext_hdr->usUVDTableOffset) { - UVDClockInfoArray *array = (UVDClockInfoArray *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(ext_hdr->usUVDTableOffset) + 1); - ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits = - (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 + - 1 + (array->ucNumEntries * sizeof (UVDClockInfo))); - ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry; - u32 size = limits->numEntries * - sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry); - adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries = - kzalloc(size, GFP_KERNEL); - if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) { - amdgpu_free_extended_power_table(adev); - return -ENOMEM; - } - adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count = - limits->numEntries; - entry = &limits->entries[0]; - for (i = 0; i < limits->numEntries; i++) { - UVDClockInfo *uvd_clk = (UVDClockInfo *) - ((u8 *)&array->entries[0] + - (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo))); - adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk = - le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16); - adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk = - le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16); - adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v = - le16_to_cpu(entry->usVoltage); - entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *) - ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record)); - } - } - if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) && - ext_hdr->usSAMUTableOffset) { - ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits = - (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1); - ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry; - u32 size = limits->numEntries * - sizeof(struct amdgpu_clock_voltage_dependency_entry); - adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries = - kzalloc(size, GFP_KERNEL); - if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) { - amdgpu_free_extended_power_table(adev); - return -ENOMEM; - } - adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count = - limits->numEntries; - entry = &limits->entries[0]; - for (i = 0; i < limits->numEntries; i++) { - adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk = - le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16); - adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v = - le16_to_cpu(entry->usVoltage); - entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *) - ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record)); - } - } - if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) && - ext_hdr->usPPMTableOffset) { - ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(ext_hdr->usPPMTableOffset)); - adev->pm.dpm.dyn_state.ppm_table = - kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL); - if (!adev->pm.dpm.dyn_state.ppm_table) { - amdgpu_free_extended_power_table(adev); - return -ENOMEM; - } - adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign; - adev->pm.dpm.dyn_state.ppm_table->cpu_core_number = - le16_to_cpu(ppm->usCpuCoreNumber); - adev->pm.dpm.dyn_state.ppm_table->platform_tdp = - le32_to_cpu(ppm->ulPlatformTDP); - adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp = - le32_to_cpu(ppm->ulSmallACPlatformTDP); - adev->pm.dpm.dyn_state.ppm_table->platform_tdc = - le32_to_cpu(ppm->ulPlatformTDC); - adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc = - le32_to_cpu(ppm->ulSmallACPlatformTDC); - adev->pm.dpm.dyn_state.ppm_table->apu_tdp = - le32_to_cpu(ppm->ulApuTDP); - adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp = - le32_to_cpu(ppm->ulDGpuTDP); - adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power = - le32_to_cpu(ppm->ulDGpuUlvPower); - adev->pm.dpm.dyn_state.ppm_table->tj_max = - le32_to_cpu(ppm->ulTjmax); - } - if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) && - ext_hdr->usACPTableOffset) { - ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits = - (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(ext_hdr->usACPTableOffset) + 1); - ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry; - u32 size = limits->numEntries * - sizeof(struct amdgpu_clock_voltage_dependency_entry); - adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries = - kzalloc(size, GFP_KERNEL); - if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) { - amdgpu_free_extended_power_table(adev); - return -ENOMEM; - } - adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count = - limits->numEntries; - entry = &limits->entries[0]; - for (i = 0; i < limits->numEntries; i++) { - adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk = - le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16); - adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v = - le16_to_cpu(entry->usVoltage); - entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *) - ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record)); - } - } - if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) && - ext_hdr->usPowerTuneTableOffset) { - u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset + - le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); - ATOM_PowerTune_Table *pt; - adev->pm.dpm.dyn_state.cac_tdp_table = - kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL); - if (!adev->pm.dpm.dyn_state.cac_tdp_table) { - amdgpu_free_extended_power_table(adev); - return -ENOMEM; - } - if (rev > 0) { - ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); - adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = - ppt->usMaximumPowerDeliveryLimit; - pt = &ppt->power_tune_table; - } else { - ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); - adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255; - pt = &ppt->power_tune_table; - } - adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP); - adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp = - le16_to_cpu(pt->usConfigurableTDP); - adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC); - adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit = - le16_to_cpu(pt->usBatteryPowerLimit); - adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit = - le16_to_cpu(pt->usSmallPowerLimit); - adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage = - le16_to_cpu(pt->usLowCACLeakage); - adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage = - le16_to_cpu(pt->usHighCACLeakage); - } - if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) && - ext_hdr->usSclkVddgfxTableOffset) { - dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset)); - ret = amdgpu_parse_clk_voltage_dep_table( - &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk, - dep_table); - if (ret) { - kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries); - return ret; - } - } - } - - return 0; -} - -void amdgpu_free_extended_power_table(struct amdgpu_device *adev) -{ - struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state; - - kfree(dyn_state->vddc_dependency_on_sclk.entries); - kfree(dyn_state->vddci_dependency_on_mclk.entries); - kfree(dyn_state->vddc_dependency_on_mclk.entries); - kfree(dyn_state->mvdd_dependency_on_mclk.entries); - kfree(dyn_state->cac_leakage_table.entries); - kfree(dyn_state->phase_shedding_limits_table.entries); - kfree(dyn_state->ppm_table); - kfree(dyn_state->cac_tdp_table); - kfree(dyn_state->vce_clock_voltage_dependency_table.entries); - kfree(dyn_state->uvd_clock_voltage_dependency_table.entries); - kfree(dyn_state->samu_clock_voltage_dependency_table.entries); - kfree(dyn_state->acp_clock_voltage_dependency_table.entries); - kfree(dyn_state->vddgfx_dependency_on_sclk.entries); -} - -static const char *pp_lib_thermal_controller_names[] = { - "NONE", - "lm63", - "adm1032", - "adm1030", - "max6649", - "lm64", - "f75375", - "RV6xx", - "RV770", - "adt7473", - "NONE", - "External GPIO", - "Evergreen", - "emc2103", - "Sumo", - "Northern Islands", - "Southern Islands", - "lm96163", - "Sea Islands", - "Kaveri/Kabini", -}; - -void amdgpu_add_thermal_controller(struct amdgpu_device *adev) -{ - struct amdgpu_mode_info *mode_info = &adev->mode_info; - ATOM_PPLIB_POWERPLAYTABLE *power_table; - int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); - ATOM_PPLIB_THERMALCONTROLLER *controller; - struct amdgpu_i2c_bus_rec i2c_bus; - u16 data_offset; - u8 frev, crev; - - if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, - &frev, &crev, &data_offset)) - return; - power_table = (ATOM_PPLIB_POWERPLAYTABLE *) - (mode_info->atom_context->bios + data_offset); - controller = &power_table->sThermalController; - - /* add the i2c bus for thermal/fan chip */ - if (controller->ucType > 0) { - if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) - adev->pm.no_fan = true; - adev->pm.fan_pulses_per_revolution = - controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK; - if (adev->pm.fan_pulses_per_revolution) { - adev->pm.fan_min_rpm = controller->ucFanMinRPM; - adev->pm.fan_max_rpm = controller->ucFanMaxRPM; - } - if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) { - DRM_INFO("Internal thermal controller %s fan control\n", - (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); - adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX; - } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) { - DRM_INFO("Internal thermal controller %s fan control\n", - (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); - adev->pm.int_thermal_type = THERMAL_TYPE_RV770; - } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) { - DRM_INFO("Internal thermal controller %s fan control\n", - (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); - adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN; - } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) { - DRM_INFO("Internal thermal controller %s fan control\n", - (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); - adev->pm.int_thermal_type = THERMAL_TYPE_SUMO; - } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) { - DRM_INFO("Internal thermal controller %s fan control\n", - (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); - adev->pm.int_thermal_type = THERMAL_TYPE_NI; - } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) { - DRM_INFO("Internal thermal controller %s fan control\n", - (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); - adev->pm.int_thermal_type = THERMAL_TYPE_SI; - } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) { - DRM_INFO("Internal thermal controller %s fan control\n", - (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); - adev->pm.int_thermal_type = THERMAL_TYPE_CI; - } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) { - DRM_INFO("Internal thermal controller %s fan control\n", - (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); - adev->pm.int_thermal_type = THERMAL_TYPE_KV; - } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) { - DRM_INFO("External GPIO thermal controller %s fan control\n", - (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); - adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO; - } else if (controller->ucType == - ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) { - DRM_INFO("ADT7473 with internal thermal controller %s fan control\n", - (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); - adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL; - } else if (controller->ucType == - ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) { - DRM_INFO("EMC2103 with internal thermal controller %s fan control\n", - (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); - adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL; - } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) { - DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", - pp_lib_thermal_controller_names[controller->ucType], - controller->ucI2cAddress >> 1, - (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); - adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL; - i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine); - adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus); - if (adev->pm.i2c_bus) { - struct i2c_board_info info = { }; - const char *name = pp_lib_thermal_controller_names[controller->ucType]; - info.addr = controller->ucI2cAddress >> 1; - strlcpy(info.type, name, sizeof(info.type)); - i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info); - } - } else { - DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n", - controller->ucType, - controller->ucI2cAddress >> 1, - (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); - } - } -} - -struct amd_vce_state* -amdgpu_get_vce_clock_state(void *handle, u32 idx) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (idx < adev->pm.dpm.num_of_vce_states) - return &adev->pm.dpm.vce_states[idx]; - - return NULL; -} - int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; @@ -1240,215 +462,10 @@ void amdgpu_dpm_thermal_work_handler(struct work_struct *work) adev->pm.dpm.state = dpm_state; mutex_unlock(&adev->pm.mutex); - amdgpu_pm_compute_clocks(adev); + amdgpu_dpm_compute_clocks(adev); } -static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev, - enum amd_pm_state_type dpm_state) -{ - int i; - struct amdgpu_ps *ps; - u32 ui_class; - bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ? - true : false; - - /* check if the vblank period is too short to adjust the mclk */ - if (single_display && adev->powerplay.pp_funcs->vblank_too_short) { - if (amdgpu_dpm_vblank_too_short(adev)) - single_display = false; - } - - /* certain older asics have a separare 3D performance state, - * so try that first if the user selected performance - */ - if (dpm_state == POWER_STATE_TYPE_PERFORMANCE) - dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF; - /* balanced states don't exist at the moment */ - if (dpm_state == POWER_STATE_TYPE_BALANCED) - dpm_state = POWER_STATE_TYPE_PERFORMANCE; - -restart_search: - /* Pick the best power state based on current conditions */ - for (i = 0; i < adev->pm.dpm.num_ps; i++) { - ps = &adev->pm.dpm.ps[i]; - ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK; - switch (dpm_state) { - /* user states */ - case POWER_STATE_TYPE_BATTERY: - if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) { - if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { - if (single_display) - return ps; - } else - return ps; - } - break; - case POWER_STATE_TYPE_BALANCED: - if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) { - if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { - if (single_display) - return ps; - } else - return ps; - } - break; - case POWER_STATE_TYPE_PERFORMANCE: - if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) { - if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { - if (single_display) - return ps; - } else - return ps; - } - break; - /* internal states */ - case POWER_STATE_TYPE_INTERNAL_UVD: - if (adev->pm.dpm.uvd_ps) - return adev->pm.dpm.uvd_ps; - else - break; - case POWER_STATE_TYPE_INTERNAL_UVD_SD: - if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) - return ps; - break; - case POWER_STATE_TYPE_INTERNAL_UVD_HD: - if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) - return ps; - break; - case POWER_STATE_TYPE_INTERNAL_UVD_HD2: - if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) - return ps; - break; - case POWER_STATE_TYPE_INTERNAL_UVD_MVC: - if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) - return ps; - break; - case POWER_STATE_TYPE_INTERNAL_BOOT: - return adev->pm.dpm.boot_ps; - case POWER_STATE_TYPE_INTERNAL_THERMAL: - if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL) - return ps; - break; - case POWER_STATE_TYPE_INTERNAL_ACPI: - if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) - return ps; - break; - case POWER_STATE_TYPE_INTERNAL_ULV: - if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) - return ps; - break; - case POWER_STATE_TYPE_INTERNAL_3DPERF: - if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) - return ps; - break; - default: - break; - } - } - /* use a fallback state if we didn't match */ - switch (dpm_state) { - case POWER_STATE_TYPE_INTERNAL_UVD_SD: - dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD; - goto restart_search; - case POWER_STATE_TYPE_INTERNAL_UVD_HD: - case POWER_STATE_TYPE_INTERNAL_UVD_HD2: - case POWER_STATE_TYPE_INTERNAL_UVD_MVC: - if (adev->pm.dpm.uvd_ps) { - return adev->pm.dpm.uvd_ps; - } else { - dpm_state = POWER_STATE_TYPE_PERFORMANCE; - goto restart_search; - } - case POWER_STATE_TYPE_INTERNAL_THERMAL: - dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI; - goto restart_search; - case POWER_STATE_TYPE_INTERNAL_ACPI: - dpm_state = POWER_STATE_TYPE_BATTERY; - goto restart_search; - case POWER_STATE_TYPE_BATTERY: - case POWER_STATE_TYPE_BALANCED: - case POWER_STATE_TYPE_INTERNAL_3DPERF: - dpm_state = POWER_STATE_TYPE_PERFORMANCE; - goto restart_search; - default: - break; - } - - return NULL; -} - -static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) -{ - struct amdgpu_ps *ps; - enum amd_pm_state_type dpm_state; - int ret; - bool equal = false; - - /* if dpm init failed */ - if (!adev->pm.dpm_enabled) - return; - - if (adev->pm.dpm.user_state != adev->pm.dpm.state) { - /* add other state override checks here */ - if ((!adev->pm.dpm.thermal_active) && - (!adev->pm.dpm.uvd_active)) - adev->pm.dpm.state = adev->pm.dpm.user_state; - } - dpm_state = adev->pm.dpm.state; - - ps = amdgpu_dpm_pick_power_state(adev, dpm_state); - if (ps) - adev->pm.dpm.requested_ps = ps; - else - return; - - if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) { - printk("switching from power state:\n"); - amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps); - printk("switching to power state:\n"); - amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps); - } - - /* update whether vce is active */ - ps->vce_active = adev->pm.dpm.vce_active; - if (adev->powerplay.pp_funcs->display_configuration_changed) - amdgpu_dpm_display_configuration_changed(adev); - - ret = amdgpu_dpm_pre_set_power_state(adev); - if (ret) - return; - - if (adev->powerplay.pp_funcs->check_state_equal) { - if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal)) - equal = false; - } - - if (equal) - return; - - if (adev->powerplay.pp_funcs->set_power_state) - adev->powerplay.pp_funcs->set_power_state(adev->powerplay.pp_handle); - - amdgpu_dpm_post_set_power_state(adev); - - adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; - adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count; - - if (adev->powerplay.pp_funcs->force_performance_level) { - if (adev->pm.dpm.thermal_active) { - enum amd_dpm_forced_level level = adev->pm.dpm.forced_level; - /* force low perf level for thermal */ - amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW); - /* save the user's level */ - adev->pm.dpm.forced_level = level; - } else { - /* otherwise, user selected level */ - amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level); - } - } -} - -void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) +void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev) { int i = 0; @@ -1464,7 +481,13 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) amdgpu_fence_wait_empty(ring); } - if (adev->powerplay.pp_funcs->dispatch_tasks) { + if ((adev->family == AMDGPU_FAMILY_SI) || + (adev->family == AMDGPU_FAMILY_KV)) { + mutex_lock(&adev->pm.mutex); + amdgpu_dpm_get_active_displays(adev); + adev->powerplay.pp_funcs->change_power_state(adev->powerplay.pp_handle); + mutex_unlock(&adev->pm.mutex); + } else { if (!amdgpu_device_has_dc_support(adev)) { mutex_lock(&adev->pm.mutex); amdgpu_dpm_get_active_displays(adev); @@ -1483,11 +506,6 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) mutex_unlock(&adev->pm.mutex); } amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL); - } else { - mutex_lock(&adev->pm.mutex); - amdgpu_dpm_get_active_displays(adev); - amdgpu_dpm_change_power_state_locked(adev); - mutex_unlock(&adev->pm.mutex); } } @@ -1505,7 +523,7 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) } mutex_unlock(&adev->pm.mutex); - amdgpu_pm_compute_clocks(adev); + amdgpu_dpm_compute_clocks(adev); } else { ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable); if (ret) @@ -1541,7 +559,7 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) } mutex_unlock(&adev->pm.mutex); - amdgpu_pm_compute_clocks(adev); + amdgpu_dpm_compute_clocks(adev); } else { ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable); if (ret) @@ -1550,18 +568,6 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) } } -void amdgpu_pm_print_power_states(struct amdgpu_device *adev) -{ - int i; - - if (adev->powerplay.pp_funcs->print_power_state == NULL) - return; - - for (i = 0; i < adev->pm.dpm.num_ps; i++) - amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]); - -} - void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable) { int ret = 0; @@ -1727,7 +733,7 @@ void amdgpu_dpm_set_power_state(struct amdgpu_device *adev, if (amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state) == -EOPNOTSUPP) - amdgpu_pm_compute_clocks(adev); + amdgpu_dpm_compute_clocks(adev); } enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev) @@ -1902,7 +908,7 @@ int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value) AMD_PP_TASK_READJUST_POWER_STATE, NULL) == -EOPNOTSUPP) { adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; - amdgpu_pm_compute_clocks(adev); + amdgpu_dpm_compute_clocks(adev); } return 0; @@ -1932,7 +938,7 @@ int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value) AMD_PP_TASK_READJUST_POWER_STATE, NULL) == -EOPNOTSUPP) { adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; - amdgpu_pm_compute_clocks(adev); + amdgpu_dpm_compute_clocks(adev); } return 0; diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h index ecababa562af..1958e0e488b0 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h @@ -374,24 +374,10 @@ enum amdgpu_display_gap AMDGPU_PM_DISPLAY_GAP_IGNORE = 3, }; -void amdgpu_dpm_print_class_info(u32 class, u32 class2); -void amdgpu_dpm_print_cap_info(u32 caps); -void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev, - struct amdgpu_ps *rps); u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev); int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor, void *data, uint32_t *size); -int amdgpu_get_platform_caps(struct amdgpu_device *adev); - -int amdgpu_parse_extended_power_table(struct amdgpu_device *adev); -void amdgpu_free_extended_power_table(struct amdgpu_device *adev); - -void amdgpu_add_thermal_controller(struct amdgpu_device *adev); - -struct amd_vce_state* -amdgpu_get_vce_clock_state(void *handle, u32 idx); - int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate); @@ -442,11 +428,10 @@ int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors senso void amdgpu_dpm_thermal_work_handler(struct work_struct *work); -void amdgpu_pm_compute_clocks(struct amdgpu_device *adev); +void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev); void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable); void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable); void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable); -void amdgpu_pm_print_power_states(struct amdgpu_device *adev); int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version); int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable); int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size); diff --git a/drivers/gpu/drm/amd/pm/powerplay/Makefile b/drivers/gpu/drm/amd/pm/powerplay/Makefile index 0fb114adc79f..614d8b6a58ad 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/Makefile +++ b/drivers/gpu/drm/amd/pm/powerplay/Makefile @@ -28,7 +28,7 @@ AMD_POWERPLAY = $(addsuffix /Makefile,$(addprefix $(FULL_AMD_PATH)/pm/powerplay/ include $(AMD_POWERPLAY) -POWER_MGR-y = amd_powerplay.o +POWER_MGR-y = amd_powerplay.o legacy_dpm.o POWER_MGR-$(CONFIG_DRM_AMDGPU_CIK)+= kv_dpm.o kv_smc.o diff --git a/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c b/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c index 380a5336c74f..eed89835231c 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c +++ b/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c @@ -36,6 +36,7 @@ #include "gca/gfx_7_2_d.h" #include "gca/gfx_7_2_sh_mask.h" +#include "legacy_dpm.h" #define KV_MAX_DEEPSLEEP_DIVIDER_ID 5 #define KV_MINIMUM_ENGINE_CLOCK 800 @@ -3087,7 +3088,7 @@ static int kv_dpm_hw_init(void *handle) else adev->pm.dpm_enabled = true; mutex_unlock(&adev->pm.mutex); - amdgpu_pm_compute_clocks(adev); + amdgpu_dpm_compute_clocks(adev); return ret; } @@ -3135,7 +3136,7 @@ static int kv_dpm_resume(void *handle) adev->pm.dpm_enabled = true; mutex_unlock(&adev->pm.mutex); if (adev->pm.dpm_enabled) - amdgpu_pm_compute_clocks(adev); + amdgpu_dpm_compute_clocks(adev); } return 0; } @@ -3389,6 +3390,7 @@ static const struct amd_pm_funcs kv_dpm_funcs = { .get_vce_clock_state = amdgpu_get_vce_clock_state, .check_state_equal = kv_check_state_equal, .read_sensor = &kv_dpm_read_sensor, + .change_power_state = amdgpu_dpm_change_power_state_locked, }; static const struct amdgpu_irq_src_funcs kv_dpm_irq_funcs = { diff --git a/drivers/gpu/drm/amd/pm/powerplay/legacy_dpm.c b/drivers/gpu/drm/amd/pm/powerplay/legacy_dpm.c new file mode 100644 index 000000000000..67c84b7ad8e7 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/powerplay/legacy_dpm.c @@ -0,0 +1,1024 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include "amdgpu.h" +#include "amdgpu_i2c.h" +#include "amdgpu_atombios.h" +#include "atom.h" +#include "amd_pcie.h" +#include "legacy_dpm.h" + +#define amdgpu_dpm_pre_set_power_state(adev) \ + ((adev)->powerplay.pp_funcs->pre_set_power_state((adev)->powerplay.pp_handle)) + +#define amdgpu_dpm_post_set_power_state(adev) \ + ((adev)->powerplay.pp_funcs->post_set_power_state((adev)->powerplay.pp_handle)) + +#define amdgpu_dpm_display_configuration_changed(adev) \ + ((adev)->powerplay.pp_funcs->display_configuration_changed((adev)->powerplay.pp_handle)) + +#define amdgpu_dpm_print_power_state(adev, ps) \ + ((adev)->powerplay.pp_funcs->print_power_state((adev)->powerplay.pp_handle, (ps))) + +#define amdgpu_dpm_vblank_too_short(adev) \ + ((adev)->powerplay.pp_funcs->vblank_too_short((adev)->powerplay.pp_handle)) + +#define amdgpu_dpm_check_state_equal(adev, cps, rps, equal) \ + ((adev)->powerplay.pp_funcs->check_state_equal((adev)->powerplay.pp_handle, (cps), (rps), (equal))) + +void amdgpu_dpm_print_class_info(u32 class, u32 class2) +{ + const char *s; + + switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) { + case ATOM_PPLIB_CLASSIFICATION_UI_NONE: + default: + s = "none"; + break; + case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY: + s = "battery"; + break; + case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED: + s = "balanced"; + break; + case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE: + s = "performance"; + break; + } + printk("\tui class: %s\n", s); + printk("\tinternal class:"); + if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) && + (class2 == 0)) + pr_cont(" none"); + else { + if (class & ATOM_PPLIB_CLASSIFICATION_BOOT) + pr_cont(" boot"); + if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL) + pr_cont(" thermal"); + if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE) + pr_cont(" limited_pwr"); + if (class & ATOM_PPLIB_CLASSIFICATION_REST) + pr_cont(" rest"); + if (class & ATOM_PPLIB_CLASSIFICATION_FORCED) + pr_cont(" forced"); + if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) + pr_cont(" 3d_perf"); + if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE) + pr_cont(" ovrdrv"); + if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) + pr_cont(" uvd"); + if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW) + pr_cont(" 3d_low"); + if (class & ATOM_PPLIB_CLASSIFICATION_ACPI) + pr_cont(" acpi"); + if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) + pr_cont(" uvd_hd2"); + if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) + pr_cont(" uvd_hd"); + if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) + pr_cont(" uvd_sd"); + if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2) + pr_cont(" limited_pwr2"); + if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) + pr_cont(" ulv"); + if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) + pr_cont(" uvd_mvc"); + } + pr_cont("\n"); +} + +void amdgpu_dpm_print_cap_info(u32 caps) +{ + printk("\tcaps:"); + if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) + pr_cont(" single_disp"); + if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK) + pr_cont(" video"); + if (caps & ATOM_PPLIB_DISALLOW_ON_DC) + pr_cont(" no_dc"); + pr_cont("\n"); +} + +void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev, + struct amdgpu_ps *rps) +{ + printk("\tstatus:"); + if (rps == adev->pm.dpm.current_ps) + pr_cont(" c"); + if (rps == adev->pm.dpm.requested_ps) + pr_cont(" r"); + if (rps == adev->pm.dpm.boot_ps) + pr_cont(" b"); + pr_cont("\n"); +} + +void amdgpu_pm_print_power_states(struct amdgpu_device *adev) +{ + int i; + + if (adev->powerplay.pp_funcs->print_power_state == NULL) + return; + + for (i = 0; i < adev->pm.dpm.num_ps; i++) + amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]); + +} + +union power_info { + struct _ATOM_POWERPLAY_INFO info; + struct _ATOM_POWERPLAY_INFO_V2 info_2; + struct _ATOM_POWERPLAY_INFO_V3 info_3; + struct _ATOM_PPLIB_POWERPLAYTABLE pplib; + struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; + struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; + struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4; + struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5; +}; + +int amdgpu_get_platform_caps(struct amdgpu_device *adev) +{ + struct amdgpu_mode_info *mode_info = &adev->mode_info; + union power_info *power_info; + int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); + u16 data_offset; + u8 frev, crev; + + if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) + return -EINVAL; + power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); + + adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps); + adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); + adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); + + return 0; +} + +union fan_info { + struct _ATOM_PPLIB_FANTABLE fan; + struct _ATOM_PPLIB_FANTABLE2 fan2; + struct _ATOM_PPLIB_FANTABLE3 fan3; +}; + +static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table, + ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table) +{ + u32 size = atom_table->ucNumEntries * + sizeof(struct amdgpu_clock_voltage_dependency_entry); + int i; + ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry; + + amdgpu_table->entries = kzalloc(size, GFP_KERNEL); + if (!amdgpu_table->entries) + return -ENOMEM; + + entry = &atom_table->entries[0]; + for (i = 0; i < atom_table->ucNumEntries; i++) { + amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) | + (entry->ucClockHigh << 16); + amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage); + entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *) + ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record)); + } + amdgpu_table->count = atom_table->ucNumEntries; + + return 0; +} + +/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */ +#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12 +#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14 +#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16 +#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18 +#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20 +#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22 +#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24 +#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26 + +int amdgpu_parse_extended_power_table(struct amdgpu_device *adev) +{ + struct amdgpu_mode_info *mode_info = &adev->mode_info; + union power_info *power_info; + union fan_info *fan_info; + ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table; + int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); + u16 data_offset; + u8 frev, crev; + int ret, i; + + if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) + return -EINVAL; + power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); + + /* fan table */ + if (le16_to_cpu(power_info->pplib.usTableSize) >= + sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) { + if (power_info->pplib3.usFanTableOffset) { + fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib3.usFanTableOffset)); + adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst; + adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin); + adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed); + adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh); + adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin); + adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed); + adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh); + if (fan_info->fan.ucFanTableFormat >= 2) + adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax); + else + adev->pm.dpm.fan.t_max = 10900; + adev->pm.dpm.fan.cycle_delay = 100000; + if (fan_info->fan.ucFanTableFormat >= 3) { + adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode; + adev->pm.dpm.fan.default_max_fan_pwm = + le16_to_cpu(fan_info->fan3.usFanPWMMax); + adev->pm.dpm.fan.default_fan_output_sensitivity = 4836; + adev->pm.dpm.fan.fan_output_sensitivity = + le16_to_cpu(fan_info->fan3.usFanOutputSensitivity); + } + adev->pm.dpm.fan.ucode_fan_control = true; + } + } + + /* clock dependancy tables, shedding tables */ + if (le16_to_cpu(power_info->pplib.usTableSize) >= + sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) { + if (power_info->pplib4.usVddcDependencyOnSCLKOffset) { + dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset)); + ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk, + dep_table); + if (ret) { + amdgpu_free_extended_power_table(adev); + return ret; + } + } + if (power_info->pplib4.usVddciDependencyOnMCLKOffset) { + dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset)); + ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk, + dep_table); + if (ret) { + amdgpu_free_extended_power_table(adev); + return ret; + } + } + if (power_info->pplib4.usVddcDependencyOnMCLKOffset) { + dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset)); + ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk, + dep_table); + if (ret) { + amdgpu_free_extended_power_table(adev); + return ret; + } + } + if (power_info->pplib4.usMvddDependencyOnMCLKOffset) { + dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset)); + ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk, + dep_table); + if (ret) { + amdgpu_free_extended_power_table(adev); + return ret; + } + } + if (power_info->pplib4.usMaxClockVoltageOnDCOffset) { + ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v = + (ATOM_PPLIB_Clock_Voltage_Limit_Table *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset)); + if (clk_v->ucNumEntries) { + adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk = + le16_to_cpu(clk_v->entries[0].usSclkLow) | + (clk_v->entries[0].ucSclkHigh << 16); + adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk = + le16_to_cpu(clk_v->entries[0].usMclkLow) | + (clk_v->entries[0].ucMclkHigh << 16); + adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc = + le16_to_cpu(clk_v->entries[0].usVddc); + adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci = + le16_to_cpu(clk_v->entries[0].usVddci); + } + } + if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) { + ATOM_PPLIB_PhaseSheddingLimits_Table *psl = + (ATOM_PPLIB_PhaseSheddingLimits_Table *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset)); + ATOM_PPLIB_PhaseSheddingLimits_Record *entry; + + adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries = + kcalloc(psl->ucNumEntries, + sizeof(struct amdgpu_phase_shedding_limits_entry), + GFP_KERNEL); + if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) { + amdgpu_free_extended_power_table(adev); + return -ENOMEM; + } + + entry = &psl->entries[0]; + for (i = 0; i < psl->ucNumEntries; i++) { + adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk = + le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16); + adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk = + le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16); + adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage = + le16_to_cpu(entry->usVoltage); + entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *) + ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record)); + } + adev->pm.dpm.dyn_state.phase_shedding_limits_table.count = + psl->ucNumEntries; + } + } + + /* cac data */ + if (le16_to_cpu(power_info->pplib.usTableSize) >= + sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) { + adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit); + adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit); + adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit; + adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit); + if (adev->pm.dpm.tdp_od_limit) + adev->pm.dpm.power_control = true; + else + adev->pm.dpm.power_control = false; + adev->pm.dpm.tdp_adjustment = 0; + adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold); + adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage); + adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope); + if (power_info->pplib5.usCACLeakageTableOffset) { + ATOM_PPLIB_CAC_Leakage_Table *cac_table = + (ATOM_PPLIB_CAC_Leakage_Table *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset)); + ATOM_PPLIB_CAC_Leakage_Record *entry; + u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table); + adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL); + if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) { + amdgpu_free_extended_power_table(adev); + return -ENOMEM; + } + entry = &cac_table->entries[0]; + for (i = 0; i < cac_table->ucNumEntries; i++) { + if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) { + adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 = + le16_to_cpu(entry->usVddc1); + adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 = + le16_to_cpu(entry->usVddc2); + adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 = + le16_to_cpu(entry->usVddc3); + } else { + adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc = + le16_to_cpu(entry->usVddc); + adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage = + le32_to_cpu(entry->ulLeakageValue); + } + entry = (ATOM_PPLIB_CAC_Leakage_Record *) + ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record)); + } + adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries; + } + } + + /* ext tables */ + if (le16_to_cpu(power_info->pplib.usTableSize) >= + sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) { + ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset)); + if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) && + ext_hdr->usVCETableOffset) { + VCEClockInfoArray *array = (VCEClockInfoArray *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(ext_hdr->usVCETableOffset) + 1); + ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits = + (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(ext_hdr->usVCETableOffset) + 1 + + 1 + array->ucNumEntries * sizeof(VCEClockInfo)); + ATOM_PPLIB_VCE_State_Table *states = + (ATOM_PPLIB_VCE_State_Table *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(ext_hdr->usVCETableOffset) + 1 + + 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) + + 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record))); + ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry; + ATOM_PPLIB_VCE_State_Record *state_entry; + VCEClockInfo *vce_clk; + u32 size = limits->numEntries * + sizeof(struct amdgpu_vce_clock_voltage_dependency_entry); + adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries = + kzalloc(size, GFP_KERNEL); + if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) { + amdgpu_free_extended_power_table(adev); + return -ENOMEM; + } + adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count = + limits->numEntries; + entry = &limits->entries[0]; + state_entry = &states->entries[0]; + for (i = 0; i < limits->numEntries; i++) { + vce_clk = (VCEClockInfo *) + ((u8 *)&array->entries[0] + + (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo))); + adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk = + le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16); + adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk = + le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16); + adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v = + le16_to_cpu(entry->usVoltage); + entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *) + ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)); + } + adev->pm.dpm.num_of_vce_states = + states->numEntries > AMD_MAX_VCE_LEVELS ? + AMD_MAX_VCE_LEVELS : states->numEntries; + for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) { + vce_clk = (VCEClockInfo *) + ((u8 *)&array->entries[0] + + (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo))); + adev->pm.dpm.vce_states[i].evclk = + le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16); + adev->pm.dpm.vce_states[i].ecclk = + le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16); + adev->pm.dpm.vce_states[i].clk_idx = + state_entry->ucClockInfoIndex & 0x3f; + adev->pm.dpm.vce_states[i].pstate = + (state_entry->ucClockInfoIndex & 0xc0) >> 6; + state_entry = (ATOM_PPLIB_VCE_State_Record *) + ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record)); + } + } + if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) && + ext_hdr->usUVDTableOffset) { + UVDClockInfoArray *array = (UVDClockInfoArray *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(ext_hdr->usUVDTableOffset) + 1); + ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits = + (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 + + 1 + (array->ucNumEntries * sizeof (UVDClockInfo))); + ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry; + u32 size = limits->numEntries * + sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry); + adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries = + kzalloc(size, GFP_KERNEL); + if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) { + amdgpu_free_extended_power_table(adev); + return -ENOMEM; + } + adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count = + limits->numEntries; + entry = &limits->entries[0]; + for (i = 0; i < limits->numEntries; i++) { + UVDClockInfo *uvd_clk = (UVDClockInfo *) + ((u8 *)&array->entries[0] + + (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo))); + adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk = + le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16); + adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk = + le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16); + adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v = + le16_to_cpu(entry->usVoltage); + entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *) + ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record)); + } + } + if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) && + ext_hdr->usSAMUTableOffset) { + ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits = + (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1); + ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry; + u32 size = limits->numEntries * + sizeof(struct amdgpu_clock_voltage_dependency_entry); + adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries = + kzalloc(size, GFP_KERNEL); + if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) { + amdgpu_free_extended_power_table(adev); + return -ENOMEM; + } + adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count = + limits->numEntries; + entry = &limits->entries[0]; + for (i = 0; i < limits->numEntries; i++) { + adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk = + le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16); + adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v = + le16_to_cpu(entry->usVoltage); + entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *) + ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record)); + } + } + if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) && + ext_hdr->usPPMTableOffset) { + ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(ext_hdr->usPPMTableOffset)); + adev->pm.dpm.dyn_state.ppm_table = + kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL); + if (!adev->pm.dpm.dyn_state.ppm_table) { + amdgpu_free_extended_power_table(adev); + return -ENOMEM; + } + adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign; + adev->pm.dpm.dyn_state.ppm_table->cpu_core_number = + le16_to_cpu(ppm->usCpuCoreNumber); + adev->pm.dpm.dyn_state.ppm_table->platform_tdp = + le32_to_cpu(ppm->ulPlatformTDP); + adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp = + le32_to_cpu(ppm->ulSmallACPlatformTDP); + adev->pm.dpm.dyn_state.ppm_table->platform_tdc = + le32_to_cpu(ppm->ulPlatformTDC); + adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc = + le32_to_cpu(ppm->ulSmallACPlatformTDC); + adev->pm.dpm.dyn_state.ppm_table->apu_tdp = + le32_to_cpu(ppm->ulApuTDP); + adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp = + le32_to_cpu(ppm->ulDGpuTDP); + adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power = + le32_to_cpu(ppm->ulDGpuUlvPower); + adev->pm.dpm.dyn_state.ppm_table->tj_max = + le32_to_cpu(ppm->ulTjmax); + } + if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) && + ext_hdr->usACPTableOffset) { + ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits = + (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(ext_hdr->usACPTableOffset) + 1); + ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry; + u32 size = limits->numEntries * + sizeof(struct amdgpu_clock_voltage_dependency_entry); + adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries = + kzalloc(size, GFP_KERNEL); + if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) { + amdgpu_free_extended_power_table(adev); + return -ENOMEM; + } + adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count = + limits->numEntries; + entry = &limits->entries[0]; + for (i = 0; i < limits->numEntries; i++) { + adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk = + le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16); + adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v = + le16_to_cpu(entry->usVoltage); + entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *) + ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record)); + } + } + if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) && + ext_hdr->usPowerTuneTableOffset) { + u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset + + le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); + ATOM_PowerTune_Table *pt; + adev->pm.dpm.dyn_state.cac_tdp_table = + kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL); + if (!adev->pm.dpm.dyn_state.cac_tdp_table) { + amdgpu_free_extended_power_table(adev); + return -ENOMEM; + } + if (rev > 0) { + ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); + adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = + ppt->usMaximumPowerDeliveryLimit; + pt = &ppt->power_tune_table; + } else { + ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); + adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255; + pt = &ppt->power_tune_table; + } + adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP); + adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp = + le16_to_cpu(pt->usConfigurableTDP); + adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC); + adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit = + le16_to_cpu(pt->usBatteryPowerLimit); + adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit = + le16_to_cpu(pt->usSmallPowerLimit); + adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage = + le16_to_cpu(pt->usLowCACLeakage); + adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage = + le16_to_cpu(pt->usHighCACLeakage); + } + if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) && + ext_hdr->usSclkVddgfxTableOffset) { + dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset)); + ret = amdgpu_parse_clk_voltage_dep_table( + &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk, + dep_table); + if (ret) { + kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries); + return ret; + } + } + } + + return 0; +} + +void amdgpu_free_extended_power_table(struct amdgpu_device *adev) +{ + struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state; + + kfree(dyn_state->vddc_dependency_on_sclk.entries); + kfree(dyn_state->vddci_dependency_on_mclk.entries); + kfree(dyn_state->vddc_dependency_on_mclk.entries); + kfree(dyn_state->mvdd_dependency_on_mclk.entries); + kfree(dyn_state->cac_leakage_table.entries); + kfree(dyn_state->phase_shedding_limits_table.entries); + kfree(dyn_state->ppm_table); + kfree(dyn_state->cac_tdp_table); + kfree(dyn_state->vce_clock_voltage_dependency_table.entries); + kfree(dyn_state->uvd_clock_voltage_dependency_table.entries); + kfree(dyn_state->samu_clock_voltage_dependency_table.entries); + kfree(dyn_state->acp_clock_voltage_dependency_table.entries); + kfree(dyn_state->vddgfx_dependency_on_sclk.entries); +} + +static const char *pp_lib_thermal_controller_names[] = { + "NONE", + "lm63", + "adm1032", + "adm1030", + "max6649", + "lm64", + "f75375", + "RV6xx", + "RV770", + "adt7473", + "NONE", + "External GPIO", + "Evergreen", + "emc2103", + "Sumo", + "Northern Islands", + "Southern Islands", + "lm96163", + "Sea Islands", + "Kaveri/Kabini", +}; + +void amdgpu_add_thermal_controller(struct amdgpu_device *adev) +{ + struct amdgpu_mode_info *mode_info = &adev->mode_info; + ATOM_PPLIB_POWERPLAYTABLE *power_table; + int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); + ATOM_PPLIB_THERMALCONTROLLER *controller; + struct amdgpu_i2c_bus_rec i2c_bus; + u16 data_offset; + u8 frev, crev; + + if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) + return; + power_table = (ATOM_PPLIB_POWERPLAYTABLE *) + (mode_info->atom_context->bios + data_offset); + controller = &power_table->sThermalController; + + /* add the i2c bus for thermal/fan chip */ + if (controller->ucType > 0) { + if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) + adev->pm.no_fan = true; + adev->pm.fan_pulses_per_revolution = + controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK; + if (adev->pm.fan_pulses_per_revolution) { + adev->pm.fan_min_rpm = controller->ucFanMinRPM; + adev->pm.fan_max_rpm = controller->ucFanMaxRPM; + } + if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) { + DRM_INFO("Internal thermal controller %s fan control\n", + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX; + } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) { + DRM_INFO("Internal thermal controller %s fan control\n", + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + adev->pm.int_thermal_type = THERMAL_TYPE_RV770; + } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) { + DRM_INFO("Internal thermal controller %s fan control\n", + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN; + } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) { + DRM_INFO("Internal thermal controller %s fan control\n", + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + adev->pm.int_thermal_type = THERMAL_TYPE_SUMO; + } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) { + DRM_INFO("Internal thermal controller %s fan control\n", + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + adev->pm.int_thermal_type = THERMAL_TYPE_NI; + } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) { + DRM_INFO("Internal thermal controller %s fan control\n", + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + adev->pm.int_thermal_type = THERMAL_TYPE_SI; + } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) { + DRM_INFO("Internal thermal controller %s fan control\n", + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + adev->pm.int_thermal_type = THERMAL_TYPE_CI; + } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) { + DRM_INFO("Internal thermal controller %s fan control\n", + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + adev->pm.int_thermal_type = THERMAL_TYPE_KV; + } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) { + DRM_INFO("External GPIO thermal controller %s fan control\n", + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO; + } else if (controller->ucType == + ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) { + DRM_INFO("ADT7473 with internal thermal controller %s fan control\n", + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL; + } else if (controller->ucType == + ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) { + DRM_INFO("EMC2103 with internal thermal controller %s fan control\n", + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL; + } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) { + DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", + pp_lib_thermal_controller_names[controller->ucType], + controller->ucI2cAddress >> 1, + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL; + i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine); + adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus); + if (adev->pm.i2c_bus) { + struct i2c_board_info info = { }; + const char *name = pp_lib_thermal_controller_names[controller->ucType]; + info.addr = controller->ucI2cAddress >> 1; + strlcpy(info.type, name, sizeof(info.type)); + i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info); + } + } else { + DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n", + controller->ucType, + controller->ucI2cAddress >> 1, + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + } + } +} + +struct amd_vce_state* amdgpu_get_vce_clock_state(void *handle, u32 idx) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (idx < adev->pm.dpm.num_of_vce_states) + return &adev->pm.dpm.vce_states[idx]; + + return NULL; +} + +static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev, + enum amd_pm_state_type dpm_state) +{ + int i; + struct amdgpu_ps *ps; + u32 ui_class; + bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ? + true : false; + + /* check if the vblank period is too short to adjust the mclk */ + if (single_display && adev->powerplay.pp_funcs->vblank_too_short) { + if (amdgpu_dpm_vblank_too_short(adev)) + single_display = false; + } + + /* certain older asics have a separare 3D performance state, + * so try that first if the user selected performance + */ + if (dpm_state == POWER_STATE_TYPE_PERFORMANCE) + dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF; + /* balanced states don't exist at the moment */ + if (dpm_state == POWER_STATE_TYPE_BALANCED) + dpm_state = POWER_STATE_TYPE_PERFORMANCE; + +restart_search: + /* Pick the best power state based on current conditions */ + for (i = 0; i < adev->pm.dpm.num_ps; i++) { + ps = &adev->pm.dpm.ps[i]; + ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK; + switch (dpm_state) { + /* user states */ + case POWER_STATE_TYPE_BATTERY: + if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) { + if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { + if (single_display) + return ps; + } else + return ps; + } + break; + case POWER_STATE_TYPE_BALANCED: + if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) { + if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { + if (single_display) + return ps; + } else + return ps; + } + break; + case POWER_STATE_TYPE_PERFORMANCE: + if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) { + if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { + if (single_display) + return ps; + } else + return ps; + } + break; + /* internal states */ + case POWER_STATE_TYPE_INTERNAL_UVD: + if (adev->pm.dpm.uvd_ps) + return adev->pm.dpm.uvd_ps; + else + break; + case POWER_STATE_TYPE_INTERNAL_UVD_SD: + if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) + return ps; + break; + case POWER_STATE_TYPE_INTERNAL_UVD_HD: + if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) + return ps; + break; + case POWER_STATE_TYPE_INTERNAL_UVD_HD2: + if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) + return ps; + break; + case POWER_STATE_TYPE_INTERNAL_UVD_MVC: + if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) + return ps; + break; + case POWER_STATE_TYPE_INTERNAL_BOOT: + return adev->pm.dpm.boot_ps; + case POWER_STATE_TYPE_INTERNAL_THERMAL: + if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL) + return ps; + break; + case POWER_STATE_TYPE_INTERNAL_ACPI: + if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) + return ps; + break; + case POWER_STATE_TYPE_INTERNAL_ULV: + if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) + return ps; + break; + case POWER_STATE_TYPE_INTERNAL_3DPERF: + if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) + return ps; + break; + default: + break; + } + } + /* use a fallback state if we didn't match */ + switch (dpm_state) { + case POWER_STATE_TYPE_INTERNAL_UVD_SD: + dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD; + goto restart_search; + case POWER_STATE_TYPE_INTERNAL_UVD_HD: + case POWER_STATE_TYPE_INTERNAL_UVD_HD2: + case POWER_STATE_TYPE_INTERNAL_UVD_MVC: + if (adev->pm.dpm.uvd_ps) { + return adev->pm.dpm.uvd_ps; + } else { + dpm_state = POWER_STATE_TYPE_PERFORMANCE; + goto restart_search; + } + case POWER_STATE_TYPE_INTERNAL_THERMAL: + dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI; + goto restart_search; + case POWER_STATE_TYPE_INTERNAL_ACPI: + dpm_state = POWER_STATE_TYPE_BATTERY; + goto restart_search; + case POWER_STATE_TYPE_BATTERY: + case POWER_STATE_TYPE_BALANCED: + case POWER_STATE_TYPE_INTERNAL_3DPERF: + dpm_state = POWER_STATE_TYPE_PERFORMANCE; + goto restart_search; + default: + break; + } + + return NULL; +} + +int amdgpu_dpm_change_power_state_locked(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_ps *ps; + enum amd_pm_state_type dpm_state; + int ret; + bool equal = false; + + /* if dpm init failed */ + if (!adev->pm.dpm_enabled) + return 0; + + if (adev->pm.dpm.user_state != adev->pm.dpm.state) { + /* add other state override checks here */ + if ((!adev->pm.dpm.thermal_active) && + (!adev->pm.dpm.uvd_active)) + adev->pm.dpm.state = adev->pm.dpm.user_state; + } + dpm_state = adev->pm.dpm.state; + + ps = amdgpu_dpm_pick_power_state(adev, dpm_state); + if (ps) + adev->pm.dpm.requested_ps = ps; + else + return -EINVAL; + + if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) { + printk("switching from power state:\n"); + amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps); + printk("switching to power state:\n"); + amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps); + } + + /* update whether vce is active */ + ps->vce_active = adev->pm.dpm.vce_active; + if (adev->powerplay.pp_funcs->display_configuration_changed) + amdgpu_dpm_display_configuration_changed(adev); + + ret = amdgpu_dpm_pre_set_power_state(adev); + if (ret) + return ret; + + if (adev->powerplay.pp_funcs->check_state_equal) { + if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal)) + equal = false; + } + + if (equal) + return 0; + + if (adev->powerplay.pp_funcs->set_power_state) + adev->powerplay.pp_funcs->set_power_state(adev->powerplay.pp_handle); + + amdgpu_dpm_post_set_power_state(adev); + + adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; + adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count; + + if (adev->powerplay.pp_funcs->force_performance_level) { + if (adev->pm.dpm.thermal_active) { + enum amd_dpm_forced_level level = adev->pm.dpm.forced_level; + /* force low perf level for thermal */ + amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW); + /* save the user's level */ + adev->pm.dpm.forced_level = level; + } else { + /* otherwise, user selected level */ + amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level); + } + } + + return 0; +} diff --git a/drivers/gpu/drm/amd/pm/powerplay/legacy_dpm.h b/drivers/gpu/drm/amd/pm/powerplay/legacy_dpm.h new file mode 100644 index 000000000000..7ac30f1aed20 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/powerplay/legacy_dpm.h @@ -0,0 +1,37 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __LEGACY_DPM_H__ +#define __LEGACY_DPM_H__ + +void amdgpu_dpm_print_class_info(u32 class, u32 class2); +void amdgpu_dpm_print_cap_info(u32 caps); +void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev, + struct amdgpu_ps *rps); +int amdgpu_get_platform_caps(struct amdgpu_device *adev); +int amdgpu_parse_extended_power_table(struct amdgpu_device *adev); +void amdgpu_free_extended_power_table(struct amdgpu_device *adev); +void amdgpu_add_thermal_controller(struct amdgpu_device *adev); +struct amd_vce_state* amdgpu_get_vce_clock_state(void *handle, u32 idx); +int amdgpu_dpm_change_power_state_locked(void *handle); +void amdgpu_pm_print_power_states(struct amdgpu_device *adev); +#endif diff --git a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c index 2823376797d3..7becb9e9275b 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c +++ b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c @@ -37,6 +37,7 @@ #include #include #include +#include #define MC_CG_ARB_FREQ_F0 0x0a #define MC_CG_ARB_FREQ_F1 0x0b @@ -7800,7 +7801,7 @@ static int si_dpm_hw_init(void *handle) else adev->pm.dpm_enabled = true; mutex_unlock(&adev->pm.mutex); - amdgpu_pm_compute_clocks(adev); + amdgpu_dpm_compute_clocks(adev); return ret; } @@ -7848,7 +7849,7 @@ static int si_dpm_resume(void *handle) adev->pm.dpm_enabled = true; mutex_unlock(&adev->pm.mutex); if (adev->pm.dpm_enabled) - amdgpu_pm_compute_clocks(adev); + amdgpu_dpm_compute_clocks(adev); } return 0; } @@ -8101,6 +8102,7 @@ static const struct amd_pm_funcs si_dpm_funcs = { .check_state_equal = &si_check_state_equal, .get_vce_clock_state = amdgpu_get_vce_clock_state, .read_sensor = &si_dpm_read_sensor, + .change_power_state = amdgpu_dpm_change_power_state_locked, }; static const struct amdgpu_irq_src_funcs si_dpm_irq_funcs = { -- cgit From d698a2c4859de2d4d42d2f3c3806d6dce821d663 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 15 Nov 2021 15:24:05 +0800 Subject: drm/amd/pm: move pp_force_state_enabled member to amdgpu_pm structure As it lables an internal pm state and amdgpu_pm structure is the more proper place than amdgpu_device structure for it. Signed-off-by: Evan Quan Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 - drivers/gpu/drm/amd/pm/amdgpu_pm.c | 6 +++--- drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h | 2 ++ 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 182d673103ed..d6dff14e7a2d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -949,7 +949,6 @@ struct amdgpu_device { /* powerplay */ struct amd_powerplay powerplay; - bool pp_force_state_enabled; /* smu */ struct smu_context smu; diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index b16bb67b93f1..cd32f4ed373e 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -469,7 +469,7 @@ static ssize_t amdgpu_get_pp_force_state(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - if (adev->pp_force_state_enabled) + if (adev->pm.pp_force_state_enabled) return amdgpu_get_pp_cur_state(dev, attr, buf); else return sysfs_emit(buf, "\n"); @@ -492,7 +492,7 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev, if (adev->in_suspend && !adev->in_runpm) return -EPERM; - adev->pp_force_state_enabled = false; + adev->pm.pp_force_state_enabled = false; if (strlen(buf) == 1) return count; @@ -523,7 +523,7 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev, if (ret) goto err_out; - adev->pp_force_state_enabled = true; + adev->pm.pp_force_state_enabled = true; } pm_runtime_mark_last_busy(ddev->dev); diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h index 1958e0e488b0..7afff23305a5 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h @@ -343,6 +343,8 @@ struct amdgpu_pm { * 0 = disabled (default), otherwise enable corresponding debug mode */ uint32_t smu_debug_mask; + + bool pp_force_state_enabled; }; #define R600_SSTU_DFLT 0 -- cgit From 6ddbd37f10749830e0a6ddf839ca4313a007d3f5 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Wed, 17 Nov 2021 10:46:24 +0800 Subject: drm/amd/pm: optimize the amdgpu_pm_compute_clocks() implementations Drop cross callings and multi-function APIs. Also avoid exposing internal implementations details. Signed-off-by: Evan Quan Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/kgd_pp_interface.h | 2 +- drivers/gpu/drm/amd/pm/Makefile | 2 +- drivers/gpu/drm/amd/pm/amdgpu_dpm.c | 202 +++-------------------- drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c | 94 +++++++++++ drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h | 2 - drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h | 32 ++++ drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c | 39 ++++- drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c | 6 +- drivers/gpu/drm/amd/pm/powerplay/legacy_dpm.c | 60 ++++++- drivers/gpu/drm/amd/pm/powerplay/legacy_dpm.h | 3 +- drivers/gpu/drm/amd/pm/powerplay/si_dpm.c | 41 ++++- 11 files changed, 289 insertions(+), 194 deletions(-) create mode 100644 drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c create mode 100644 drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index cdf724dcf832..7919e96e772b 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -404,7 +404,7 @@ struct amd_pm_funcs { int (*get_dpm_clock_table)(void *handle, struct dpm_clocks *clock_table); int (*get_smu_prv_buf_details)(void *handle, void **addr, size_t *size); - int (*change_power_state)(void *handle); + void (*pm_compute_clocks)(void *handle); }; struct metrics_table_header { diff --git a/drivers/gpu/drm/amd/pm/Makefile b/drivers/gpu/drm/amd/pm/Makefile index 8cf6eff1ea93..d35ffde387f1 100644 --- a/drivers/gpu/drm/amd/pm/Makefile +++ b/drivers/gpu/drm/amd/pm/Makefile @@ -40,7 +40,7 @@ AMD_PM = $(addsuffix /Makefile,$(addprefix $(FULL_AMD_PATH)/pm/,$(PM_LIBS))) include $(AMD_PM) -PM_MGR = amdgpu_dpm.o amdgpu_pm.o +PM_MGR = amdgpu_dpm.o amdgpu_pm.o amdgpu_dpm_internal.o AMD_PM_POWER = $(addprefix $(AMD_PM_PATH)/,$(PM_MGR)) diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index 438c56df9f23..fd2a7a2edf7d 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -37,73 +37,6 @@ #define amdgpu_dpm_enable_bapm(adev, e) \ ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e))) -static void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev) -{ - struct drm_device *ddev = adev_to_drm(adev); - struct drm_crtc *crtc; - struct amdgpu_crtc *amdgpu_crtc; - - adev->pm.dpm.new_active_crtcs = 0; - adev->pm.dpm.new_active_crtc_count = 0; - if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { - list_for_each_entry(crtc, - &ddev->mode_config.crtc_list, head) { - amdgpu_crtc = to_amdgpu_crtc(crtc); - if (amdgpu_crtc->enabled) { - adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id); - adev->pm.dpm.new_active_crtc_count++; - } - } - } -} - -u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev) -{ - struct drm_device *dev = adev_to_drm(adev); - struct drm_crtc *crtc; - struct amdgpu_crtc *amdgpu_crtc; - u32 vblank_in_pixels; - u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */ - - if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - amdgpu_crtc = to_amdgpu_crtc(crtc); - if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) { - vblank_in_pixels = - amdgpu_crtc->hw_mode.crtc_htotal * - (amdgpu_crtc->hw_mode.crtc_vblank_end - - amdgpu_crtc->hw_mode.crtc_vdisplay + - (amdgpu_crtc->v_border * 2)); - - vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock; - break; - } - } - } - - return vblank_time_us; -} - -static u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev) -{ - struct drm_device *dev = adev_to_drm(adev); - struct drm_crtc *crtc; - struct amdgpu_crtc *amdgpu_crtc; - u32 vrefresh = 0; - - if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { - list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { - amdgpu_crtc = to_amdgpu_crtc(crtc); - if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) { - vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode); - break; - } - } - } - - return vrefresh; -} - int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; @@ -432,115 +365,38 @@ int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors senso return ret; } -void amdgpu_dpm_thermal_work_handler(struct work_struct *work) -{ - struct amdgpu_device *adev = - container_of(work, struct amdgpu_device, - pm.dpm.thermal.work); - /* switch to the thermal state */ - enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL; - int temp, size = sizeof(temp); - - if (!adev->pm.dpm_enabled) - return; - - if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, - (void *)&temp, &size)) { - if (temp < adev->pm.dpm.thermal.min_temp) - /* switch back the user state */ - dpm_state = adev->pm.dpm.user_state; - } else { - if (adev->pm.dpm.thermal.high_to_low) - /* switch back the user state */ - dpm_state = adev->pm.dpm.user_state; - } - mutex_lock(&adev->pm.mutex); - if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL) - adev->pm.dpm.thermal_active = true; - else - adev->pm.dpm.thermal_active = false; - adev->pm.dpm.state = dpm_state; - mutex_unlock(&adev->pm.mutex); - - amdgpu_dpm_compute_clocks(adev); -} - void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev) { - int i = 0; + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; if (!adev->pm.dpm_enabled) return; - if (adev->mode_info.num_crtc) - amdgpu_display_bandwidth_update(adev); - - for (i = 0; i < AMDGPU_MAX_RINGS; i++) { - struct amdgpu_ring *ring = adev->rings[i]; - if (ring && ring->sched.ready) - amdgpu_fence_wait_empty(ring); - } + if (!pp_funcs->pm_compute_clocks) + return; - if ((adev->family == AMDGPU_FAMILY_SI) || - (adev->family == AMDGPU_FAMILY_KV)) { - mutex_lock(&adev->pm.mutex); - amdgpu_dpm_get_active_displays(adev); - adev->powerplay.pp_funcs->change_power_state(adev->powerplay.pp_handle); - mutex_unlock(&adev->pm.mutex); - } else { - if (!amdgpu_device_has_dc_support(adev)) { - mutex_lock(&adev->pm.mutex); - amdgpu_dpm_get_active_displays(adev); - adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count; - adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev); - adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev); - /* we have issues with mclk switching with - * refresh rates over 120 hz on the non-DC code. - */ - if (adev->pm.pm_display_cfg.vrefresh > 120) - adev->pm.pm_display_cfg.min_vblank_time = 0; - if (adev->powerplay.pp_funcs->display_configuration_change) - adev->powerplay.pp_funcs->display_configuration_change( - adev->powerplay.pp_handle, - &adev->pm.pm_display_cfg); - mutex_unlock(&adev->pm.mutex); - } - amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL); - } + pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle); } void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) { int ret = 0; - if (adev->family == AMDGPU_FAMILY_SI) { - mutex_lock(&adev->pm.mutex); - if (enable) { - adev->pm.dpm.uvd_active = true; - adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; - } else { - adev->pm.dpm.uvd_active = false; - } - mutex_unlock(&adev->pm.mutex); + ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable); + if (ret) + DRM_ERROR("Dpm %s uvd failed, ret = %d. \n", + enable ? "enable" : "disable", ret); - amdgpu_dpm_compute_clocks(adev); - } else { - ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable); - if (ret) - DRM_ERROR("Dpm %s uvd failed, ret = %d. \n", - enable ? "enable" : "disable", ret); - - /* enable/disable Low Memory PState for UVD (4k videos) */ - if (adev->asic_type == CHIP_STONEY && - adev->uvd.decode_image_width >= WIDTH_4K) { - struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; - - if (hwmgr && hwmgr->hwmgr_func && - hwmgr->hwmgr_func->update_nbdpm_pstate) - hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr, - !enable, - true); - } + /* enable/disable Low Memory PState for UVD (4k videos) */ + if (adev->asic_type == CHIP_STONEY && + adev->uvd.decode_image_width >= WIDTH_4K) { + struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; + + if (hwmgr && hwmgr->hwmgr_func && + hwmgr->hwmgr_func->update_nbdpm_pstate) + hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr, + !enable, + true); } } @@ -548,24 +404,10 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) { int ret = 0; - if (adev->family == AMDGPU_FAMILY_SI) { - mutex_lock(&adev->pm.mutex); - if (enable) { - adev->pm.dpm.vce_active = true; - /* XXX select vce level based on ring/task */ - adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL; - } else { - adev->pm.dpm.vce_active = false; - } - mutex_unlock(&adev->pm.mutex); - - amdgpu_dpm_compute_clocks(adev); - } else { - ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable); - if (ret) - DRM_ERROR("Dpm %s vce failed, ret = %d. \n", - enable ? "enable" : "disable", ret); - } + ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable); + if (ret) + DRM_ERROR("Dpm %s vce failed, ret = %d. \n", + enable ? "enable" : "disable", ret); } void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable) diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c new file mode 100644 index 000000000000..ba5f6413412d --- /dev/null +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c @@ -0,0 +1,94 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu.h" +#include "amdgpu_display.h" +#include "hwmgr.h" +#include "amdgpu_smu.h" + +void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev) +{ + struct drm_device *ddev = adev_to_drm(adev); + struct drm_crtc *crtc; + struct amdgpu_crtc *amdgpu_crtc; + + adev->pm.dpm.new_active_crtcs = 0; + adev->pm.dpm.new_active_crtc_count = 0; + if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { + list_for_each_entry(crtc, + &ddev->mode_config.crtc_list, head) { + amdgpu_crtc = to_amdgpu_crtc(crtc); + if (amdgpu_crtc->enabled) { + adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id); + adev->pm.dpm.new_active_crtc_count++; + } + } + } +} + +u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev) +{ + struct drm_device *dev = adev_to_drm(adev); + struct drm_crtc *crtc; + struct amdgpu_crtc *amdgpu_crtc; + u32 vblank_in_pixels; + u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */ + + if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + amdgpu_crtc = to_amdgpu_crtc(crtc); + if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) { + vblank_in_pixels = + amdgpu_crtc->hw_mode.crtc_htotal * + (amdgpu_crtc->hw_mode.crtc_vblank_end - + amdgpu_crtc->hw_mode.crtc_vdisplay + + (amdgpu_crtc->v_border * 2)); + + vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock; + break; + } + } + } + + return vblank_time_us; +} + +u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev) +{ + struct drm_device *dev = adev_to_drm(adev); + struct drm_crtc *crtc; + struct amdgpu_crtc *amdgpu_crtc; + u32 vrefresh = 0; + + if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) { + list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { + amdgpu_crtc = to_amdgpu_crtc(crtc); + if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) { + vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode); + break; + } + } + } + + return vrefresh; +} diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h index 7afff23305a5..63302b3aebe0 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h @@ -428,8 +428,6 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev); int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor, void *data, uint32_t *size); -void amdgpu_dpm_thermal_work_handler(struct work_struct *work); - void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev); void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable); void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable); diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h new file mode 100644 index 000000000000..5c2a89f0d5d5 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h @@ -0,0 +1,32 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __AMDGPU_DPM_INTERNAL_H__ +#define __AMDGPU_DPM_INTERNAL_H__ + +void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev); + +u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev); + +u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev); + +#endif diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c index 3ab67b232cd4..6a06a1f0b79b 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c @@ -31,7 +31,8 @@ #include "power_state.h" #include "amdgpu.h" #include "hwmgr.h" - +#include "amdgpu_dpm_internal.h" +#include "amdgpu_display.h" static const struct amd_pm_funcs pp_dpm_funcs; @@ -1683,6 +1684,41 @@ static int pp_get_prv_buffer_details(void *handle, void **addr, size_t *size) return 0; } +static void pp_pm_compute_clocks(void *handle) +{ + struct pp_hwmgr *hwmgr = handle; + struct amdgpu_device *adev = hwmgr->adev; + int i = 0; + + if (adev->mode_info.num_crtc) + amdgpu_display_bandwidth_update(adev); + + for (i = 0; i < AMDGPU_MAX_RINGS; i++) { + struct amdgpu_ring *ring = adev->rings[i]; + if (ring && ring->sched.ready) + amdgpu_fence_wait_empty(ring); + } + + if (!amdgpu_device_has_dc_support(adev)) { + amdgpu_dpm_get_active_displays(adev); + adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count; + adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev); + adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev); + /* we have issues with mclk switching with + * refresh rates over 120 hz on the non-DC code. + */ + if (adev->pm.pm_display_cfg.vrefresh > 120) + adev->pm.pm_display_cfg.min_vblank_time = 0; + + pp_display_configuration_change(handle, + &adev->pm.pm_display_cfg); + } + + pp_dpm_dispatch_tasks(handle, + AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, + NULL); +} + static const struct amd_pm_funcs pp_dpm_funcs = { .load_firmware = pp_dpm_load_fw, .wait_for_fw_loading_complete = pp_dpm_fw_loading_complete, @@ -1747,4 +1783,5 @@ static const struct amd_pm_funcs pp_dpm_funcs = { .get_gpu_metrics = pp_get_gpu_metrics, .gfx_state_change_set = pp_gfx_state_change_set, .get_smu_prv_buf_details = pp_get_prv_buffer_details, + .pm_compute_clocks = pp_pm_compute_clocks, }; diff --git a/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c b/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c index eed89835231c..72824ef61edd 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c +++ b/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c @@ -3088,7 +3088,7 @@ static int kv_dpm_hw_init(void *handle) else adev->pm.dpm_enabled = true; mutex_unlock(&adev->pm.mutex); - amdgpu_dpm_compute_clocks(adev); + amdgpu_legacy_dpm_compute_clocks(adev); return ret; } @@ -3136,7 +3136,7 @@ static int kv_dpm_resume(void *handle) adev->pm.dpm_enabled = true; mutex_unlock(&adev->pm.mutex); if (adev->pm.dpm_enabled) - amdgpu_dpm_compute_clocks(adev); + amdgpu_legacy_dpm_compute_clocks(adev); } return 0; } @@ -3390,7 +3390,7 @@ static const struct amd_pm_funcs kv_dpm_funcs = { .get_vce_clock_state = amdgpu_get_vce_clock_state, .check_state_equal = kv_check_state_equal, .read_sensor = &kv_dpm_read_sensor, - .change_power_state = amdgpu_dpm_change_power_state_locked, + .pm_compute_clocks = amdgpu_legacy_dpm_compute_clocks, }; static const struct amdgpu_irq_src_funcs kv_dpm_irq_funcs = { diff --git a/drivers/gpu/drm/amd/pm/powerplay/legacy_dpm.c b/drivers/gpu/drm/amd/pm/powerplay/legacy_dpm.c index 67c84b7ad8e7..3c6ee493e410 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/legacy_dpm.c +++ b/drivers/gpu/drm/amd/pm/powerplay/legacy_dpm.c @@ -26,6 +26,8 @@ #include "atom.h" #include "amd_pcie.h" #include "legacy_dpm.h" +#include "amdgpu_dpm_internal.h" +#include "amdgpu_display.h" #define amdgpu_dpm_pre_set_power_state(adev) \ ((adev)->powerplay.pp_funcs->pre_set_power_state((adev)->powerplay.pp_handle)) @@ -949,9 +951,8 @@ restart_search: return NULL; } -int amdgpu_dpm_change_power_state_locked(void *handle) +static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) { - struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct amdgpu_ps *ps; enum amd_pm_state_type dpm_state; int ret; @@ -1022,3 +1023,58 @@ int amdgpu_dpm_change_power_state_locked(void *handle) return 0; } + +void amdgpu_legacy_dpm_compute_clocks(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i = 0; + + if (adev->mode_info.num_crtc) + amdgpu_display_bandwidth_update(adev); + + for (i = 0; i < AMDGPU_MAX_RINGS; i++) { + struct amdgpu_ring *ring = adev->rings[i]; + if (ring && ring->sched.ready) + amdgpu_fence_wait_empty(ring); + } + + amdgpu_dpm_get_active_displays(adev); + + amdgpu_dpm_change_power_state_locked(adev); +} + +void amdgpu_dpm_thermal_work_handler(struct work_struct *work) +{ + struct amdgpu_device *adev = + container_of(work, struct amdgpu_device, + pm.dpm.thermal.work); + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + /* switch to the thermal state */ + enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL; + int temp, size = sizeof(temp); + + if (!adev->pm.dpm_enabled) + return; + + if (!pp_funcs->read_sensor(adev->powerplay.pp_handle, + AMDGPU_PP_SENSOR_GPU_TEMP, + (void *)&temp, + &size)) { + if (temp < adev->pm.dpm.thermal.min_temp) + /* switch back the user state */ + dpm_state = adev->pm.dpm.user_state; + } else { + if (adev->pm.dpm.thermal.high_to_low) + /* switch back the user state */ + dpm_state = adev->pm.dpm.user_state; + } + + if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL) + adev->pm.dpm.thermal_active = true; + else + adev->pm.dpm.thermal_active = false; + + adev->pm.dpm.state = dpm_state; + + amdgpu_legacy_dpm_compute_clocks(adev->powerplay.pp_handle); +} diff --git a/drivers/gpu/drm/amd/pm/powerplay/legacy_dpm.h b/drivers/gpu/drm/amd/pm/powerplay/legacy_dpm.h index 7ac30f1aed20..93bd3973330c 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/legacy_dpm.h +++ b/drivers/gpu/drm/amd/pm/powerplay/legacy_dpm.h @@ -32,6 +32,7 @@ int amdgpu_parse_extended_power_table(struct amdgpu_device *adev); void amdgpu_free_extended_power_table(struct amdgpu_device *adev); void amdgpu_add_thermal_controller(struct amdgpu_device *adev); struct amd_vce_state* amdgpu_get_vce_clock_state(void *handle, u32 idx); -int amdgpu_dpm_change_power_state_locked(void *handle); void amdgpu_pm_print_power_states(struct amdgpu_device *adev); +void amdgpu_legacy_dpm_compute_clocks(void *handle); +void amdgpu_dpm_thermal_work_handler(struct work_struct *work); #endif diff --git a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c index 7becb9e9275b..9f8cc81cb7ca 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c +++ b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c @@ -3891,6 +3891,40 @@ static int si_set_boot_state(struct amdgpu_device *adev) } #endif +static int si_set_powergating_by_smu(void *handle, + uint32_t block_type, + bool gate) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + switch (block_type) { + case AMD_IP_BLOCK_TYPE_UVD: + if (!gate) { + adev->pm.dpm.uvd_active = true; + adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; + } else { + adev->pm.dpm.uvd_active = false; + } + + amdgpu_legacy_dpm_compute_clocks(handle); + break; + case AMD_IP_BLOCK_TYPE_VCE: + if (!gate) { + adev->pm.dpm.vce_active = true; + /* XXX select vce level based on ring/task */ + adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL; + } else { + adev->pm.dpm.vce_active = false; + } + + amdgpu_legacy_dpm_compute_clocks(handle); + break; + default: + break; + } + return 0; +} + static int si_set_sw_state(struct amdgpu_device *adev) { return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_SwitchToSwState) == PPSMC_Result_OK) ? @@ -7801,7 +7835,7 @@ static int si_dpm_hw_init(void *handle) else adev->pm.dpm_enabled = true; mutex_unlock(&adev->pm.mutex); - amdgpu_dpm_compute_clocks(adev); + amdgpu_legacy_dpm_compute_clocks(adev); return ret; } @@ -7849,7 +7883,7 @@ static int si_dpm_resume(void *handle) adev->pm.dpm_enabled = true; mutex_unlock(&adev->pm.mutex); if (adev->pm.dpm_enabled) - amdgpu_dpm_compute_clocks(adev); + amdgpu_legacy_dpm_compute_clocks(adev); } return 0; } @@ -8094,6 +8128,7 @@ static const struct amd_pm_funcs si_dpm_funcs = { .print_power_state = &si_dpm_print_power_state, .debugfs_print_current_performance_level = &si_dpm_debugfs_print_current_performance_level, .force_performance_level = &si_dpm_force_performance_level, + .set_powergating_by_smu = &si_set_powergating_by_smu, .vblank_too_short = &si_dpm_vblank_too_short, .set_fan_control_mode = &si_dpm_set_fan_control_mode, .get_fan_control_mode = &si_dpm_get_fan_control_mode, @@ -8102,7 +8137,7 @@ static const struct amd_pm_funcs si_dpm_funcs = { .check_state_equal = &si_check_state_equal, .get_vce_clock_state = amdgpu_get_vce_clock_state, .read_sensor = &si_dpm_read_sensor, - .change_power_state = amdgpu_dpm_change_power_state_locked, + .pm_compute_clocks = amdgpu_legacy_dpm_compute_clocks, }; static const struct amdgpu_irq_src_funcs si_dpm_irq_funcs = { -- cgit From a627967e803e58c5525ac92a4af0d15379189a32 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Wed, 17 Nov 2021 11:29:05 +0800 Subject: drm/amd/pm: move those code piece used by Stoney only to smu8_hwmgr.c Instead of putting them in amdgpu_dpm.c. Signed-off-by: Evan Quan Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/amdgpu_dpm.c | 14 -------------- drivers/gpu/drm/amd/pm/inc/hwmgr.h | 3 --- drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c | 10 +++++++++- 3 files changed, 9 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index fd2a7a2edf7d..88909b7c40db 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -32,8 +32,6 @@ #include "hwmgr.h" #include -#define WIDTH_4K 3840 - #define amdgpu_dpm_enable_bapm(adev, e) \ ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e))) @@ -386,18 +384,6 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) if (ret) DRM_ERROR("Dpm %s uvd failed, ret = %d. \n", enable ? "enable" : "disable", ret); - - /* enable/disable Low Memory PState for UVD (4k videos) */ - if (adev->asic_type == CHIP_STONEY && - adev->uvd.decode_image_width >= WIDTH_4K) { - struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; - - if (hwmgr && hwmgr->hwmgr_func && - hwmgr->hwmgr_func->update_nbdpm_pstate) - hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr, - !enable, - true); - } } void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) diff --git a/drivers/gpu/drm/amd/pm/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/inc/hwmgr.h index 8ed01071fe5a..03226baea65e 100644 --- a/drivers/gpu/drm/amd/pm/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/pm/inc/hwmgr.h @@ -331,9 +331,6 @@ struct pp_hwmgr_func { uint32_t mc_addr_low, uint32_t mc_addr_hi, uint32_t size); - int (*update_nbdpm_pstate)(struct pp_hwmgr *hwmgr, - bool enable, - bool lock); int (*get_thermal_temperature_range)(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *range); int (*get_power_profile_mode)(struct pp_hwmgr *hwmgr, char *buf); diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c index 03bf8f069222..b50fd4a4a3d1 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c @@ -1950,9 +1950,12 @@ static void smu8_dpm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate) smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerON, NULL); } +#define WIDTH_4K 3840 + static void smu8_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) { struct smu8_hwmgr *data = hwmgr->backend; + struct amdgpu_device *adev = hwmgr->adev; data->uvd_power_gated = bgate; @@ -1976,6 +1979,12 @@ static void smu8_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate) smu8_dpm_update_uvd_dpm(hwmgr, false); } + /* enable/disable Low Memory PState for UVD (4k videos) */ + if (adev->asic_type == CHIP_STONEY && + adev->uvd.decode_image_width >= WIDTH_4K) + smu8_nbdpm_pstate_enable_disable(hwmgr, + bgate, + true); } static void smu8_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate) @@ -2037,7 +2046,6 @@ static const struct pp_hwmgr_func smu8_hwmgr_funcs = { .power_state_set = smu8_set_power_state_tasks, .dynamic_state_management_disable = smu8_disable_dpm_tasks, .notify_cac_buffer_info = smu8_notify_cac_buffer_info, - .update_nbdpm_pstate = smu8_nbdpm_pstate_enable_disable, .get_thermal_temperature_range = smu8_get_thermal_temperature_range, }; -- cgit From 7689dab48259073ea13d64d32365b77860b84e7f Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Thu, 11 Nov 2021 12:03:14 +0800 Subject: drm/amd/pm: drop redundant or unused APIs and data structures Drop those unused APIs and data structures. Signed-off-by: Evan Quan Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h | 49 --------------------------------- drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h | 4 +++ 2 files changed, 4 insertions(+), 49 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h index 63302b3aebe0..ba857ca75392 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h @@ -88,19 +88,6 @@ struct amdgpu_dpm_thermal { struct amdgpu_irq_src irq; }; -enum amdgpu_clk_action -{ - AMDGPU_SCLK_UP = 1, - AMDGPU_SCLK_DOWN -}; - -struct amdgpu_blacklist_clocks -{ - u32 sclk; - u32 mclk; - enum amdgpu_clk_action action; -}; - struct amdgpu_clock_and_voltage_limits { u32 sclk; u32 mclk; @@ -239,10 +226,6 @@ struct amdgpu_dpm_fan { bool ucode_fan_control; }; -#define amdgpu_dpm_reset_power_profile_state(adev, request) \ - ((adev)->powerplay.pp_funcs->reset_power_profile_state(\ - (adev)->powerplay.pp_handle, request)) - struct amdgpu_dpm { struct amdgpu_ps *ps; /* number of valid power states */ @@ -347,35 +330,6 @@ struct amdgpu_pm { bool pp_force_state_enabled; }; -#define R600_SSTU_DFLT 0 -#define R600_SST_DFLT 0x00C8 - -/* XXX are these ok? */ -#define R600_TEMP_RANGE_MIN (90 * 1000) -#define R600_TEMP_RANGE_MAX (120 * 1000) - -#define FDO_PWM_MODE_STATIC 1 -#define FDO_PWM_MODE_STATIC_RPM 5 - -enum amdgpu_td { - AMDGPU_TD_AUTO, - AMDGPU_TD_UP, - AMDGPU_TD_DOWN, -}; - -enum amdgpu_display_watermark { - AMDGPU_DISPLAY_WATERMARK_LOW = 0, - AMDGPU_DISPLAY_WATERMARK_HIGH = 1, -}; - -enum amdgpu_display_gap -{ - AMDGPU_PM_DISPLAY_GAP_VBLANK_OR_WM = 0, - AMDGPU_PM_DISPLAY_GAP_VBLANK = 1, - AMDGPU_PM_DISPLAY_GAP_WATERMARK = 2, - AMDGPU_PM_DISPLAY_GAP_IGNORE = 3, -}; - u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev); int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor, void *data, uint32_t *size); @@ -425,9 +379,6 @@ int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev, void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev); -int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor, - void *data, uint32_t *size); - void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev); void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable); void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h index beea03810bca..67a25da79256 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h @@ -26,6 +26,10 @@ #include "amdgpu_smu.h" #if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3) || defined(SWSMU_CODE_LAYER_L4) + +#define FDO_PWM_MODE_STATIC 1 +#define FDO_PWM_MODE_STATIC_RPM 5 + int smu_cmn_send_msg_without_waiting(struct smu_context *smu, uint16_t msg_index, uint32_t param); -- cgit From ebfc253335af81db2e40e6e8ed17cd76edf9080f Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 22 Nov 2021 13:11:06 +0800 Subject: drm/amd/pm: do not expose the smu_context structure used internally in power This can cover the power implementation details. And as what did for powerplay framework, we hook the smu_context to adev->powerplay.pp_handle. Signed-off-by: Evan Quan Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 6 --- drivers/gpu/drm/amd/include/kgd_pp_interface.h | 3 ++ drivers/gpu/drm/amd/pm/amdgpu_dpm.c | 50 +++++++++++++++------- drivers/gpu/drm/amd/pm/amdgpu_pm.c | 2 +- drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h | 4 -- drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 48 +++++++++++++-------- drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c | 9 ++-- drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c | 9 ++-- .../drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c | 9 ++-- drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c | 4 +- drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c | 9 ++-- drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c | 8 ++-- 12 files changed, 94 insertions(+), 67 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index d6dff14e7a2d..f0132a5cc58d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -99,7 +99,6 @@ #include "amdgpu_gem.h" #include "amdgpu_doorbell.h" #include "amdgpu_amdkfd.h" -#include "amdgpu_smu.h" #include "amdgpu_discovery.h" #include "amdgpu_mes.h" #include "amdgpu_umc.h" @@ -949,11 +948,6 @@ struct amdgpu_device { /* powerplay */ struct amd_powerplay powerplay; - - /* smu */ - struct smu_context smu; - - /* dpm */ struct amdgpu_pm pm; u32 cg_flags; u32 pg_flags; diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index 7919e96e772b..a8eec91c0995 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -25,6 +25,9 @@ #define __KGD_PP_INTERFACE_H__ extern const struct amdgpu_ip_block_version pp_smu_ip_block; +extern const struct amdgpu_ip_block_version smu_v11_0_ip_block; +extern const struct amdgpu_ip_block_version smu_v12_0_ip_block; +extern const struct amdgpu_ip_block_version smu_v13_0_ip_block; enum smu_event_type { SMU_EVENT_RESET_COMPLETE = 0, diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index 88909b7c40db..2756f52b74c1 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -31,6 +31,7 @@ #include "amdgpu_display.h" #include "hwmgr.h" #include +#include "amdgpu_smu.h" #define amdgpu_dpm_enable_bapm(adev, e) \ ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e))) @@ -213,7 +214,7 @@ int amdgpu_dpm_baco_reset(struct amdgpu_device *adev) bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev) { - struct smu_context *smu = &adev->smu; + struct smu_context *smu = adev->powerplay.pp_handle; if (is_support_sw_smu(adev)) return smu_mode1_reset_is_support(smu); @@ -223,7 +224,7 @@ bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev) int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev) { - struct smu_context *smu = &adev->smu; + struct smu_context *smu = adev->powerplay.pp_handle; if (is_support_sw_smu(adev)) return smu_mode1_reset(smu); @@ -276,7 +277,7 @@ int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev, int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en) { - struct smu_context *smu = &adev->smu; + struct smu_context *smu = adev->powerplay.pp_handle; if (is_support_sw_smu(adev)) return smu_allow_xgmi_power_down(smu, en); @@ -341,7 +342,7 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) mutex_unlock(&adev->pm.mutex); if (is_support_sw_smu(adev)) - smu_set_ac_dc(&adev->smu); + smu_set_ac_dc(adev->powerplay.pp_handle); } } @@ -426,12 +427,14 @@ int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_versio int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable) { - return smu_handle_passthrough_sbr(&adev->smu, enable); + return smu_handle_passthrough_sbr(adev->powerplay.pp_handle, enable); } int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size) { - return smu_send_hbm_bad_pages_num(&adev->smu, size); + struct smu_context *smu = adev->powerplay.pp_handle; + + return smu_send_hbm_bad_pages_num(smu, size); } int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev, @@ -444,7 +447,7 @@ int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev, switch (type) { case PP_SCLK: - return smu_get_dpm_freq_range(&adev->smu, SMU_SCLK, min, max); + return smu_get_dpm_freq_range(adev->powerplay.pp_handle, SMU_SCLK, min, max); default: return -EINVAL; } @@ -455,12 +458,14 @@ int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev, uint32_t min, uint32_t max) { + struct smu_context *smu = adev->powerplay.pp_handle; + if (!is_support_sw_smu(adev)) return -EOPNOTSUPP; switch (type) { case PP_SCLK: - return smu_set_soft_freq_range(&adev->smu, SMU_SCLK, min, max); + return smu_set_soft_freq_range(smu, SMU_SCLK, min, max); default: return -EINVAL; } @@ -468,33 +473,41 @@ int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev, int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev) { + struct smu_context *smu = adev->powerplay.pp_handle; + if (!is_support_sw_smu(adev)) return 0; - return smu_write_watermarks_table(&adev->smu); + return smu_write_watermarks_table(smu); } int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev, enum smu_event_type event, uint64_t event_arg) { + struct smu_context *smu = adev->powerplay.pp_handle; + if (!is_support_sw_smu(adev)) return -EOPNOTSUPP; - return smu_wait_for_event(&adev->smu, event, event_arg); + return smu_wait_for_event(smu, event, event_arg); } int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value) { + struct smu_context *smu = adev->powerplay.pp_handle; + if (!is_support_sw_smu(adev)) return -EOPNOTSUPP; - return smu_get_status_gfxoff(&adev->smu, value); + return smu_get_status_gfxoff(smu, value); } uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev) { - return atomic64_read(&adev->smu.throttle_int_counter); + struct smu_context *smu = adev->powerplay.pp_handle; + + return atomic64_read(&smu->throttle_int_counter); } /* amdgpu_dpm_gfx_state_change - Handle gfx power state change set @@ -516,10 +529,12 @@ void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev, int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev, void *umc_ecc) { + struct smu_context *smu = adev->powerplay.pp_handle; + if (!is_support_sw_smu(adev)) return -EOPNOTSUPP; - return smu_get_ecc_info(&adev->smu, umc_ecc); + return smu_get_ecc_info(smu, umc_ecc); } struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev, @@ -943,9 +958,10 @@ int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev, int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev) { struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; + struct smu_context *smu = adev->powerplay.pp_handle; - if ((is_support_sw_smu(adev) && adev->smu.od_enabled) || - (is_support_sw_smu(adev) && adev->smu.is_apu) || + if ((is_support_sw_smu(adev) && smu->od_enabled) || + (is_support_sw_smu(adev) && smu->is_apu) || (!is_support_sw_smu(adev) && hwmgr->od_enabled)) return true; @@ -968,7 +984,9 @@ int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev, int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev) { - return adev->smu.cpu_core_num; + struct smu_context *smu = adev->powerplay.pp_handle; + + return smu->cpu_core_num; } void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index cd32f4ed373e..b14b004577e5 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -2869,7 +2869,7 @@ static ssize_t amdgpu_hwmon_show_power_label(struct device *dev, int limit_type = to_sensor_dev_attr(attr)->index; return sysfs_emit(buf, "%s\n", - limit_type == SMU_FAST_PPT_LIMIT ? "fastPPT" : "slowPPT"); + limit_type == PP_PWR_TYPE_FAST ? "fastPPT" : "slowPPT"); } static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h index b90ed0ec9322..8a689baeaf82 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h @@ -1390,10 +1390,6 @@ int smu_mode1_reset(struct smu_context *smu); extern const struct amd_ip_funcs smu_ip_funcs; -extern const struct amdgpu_ip_block_version smu_v11_0_ip_block; -extern const struct amdgpu_ip_block_version smu_v12_0_ip_block; -extern const struct amdgpu_ip_block_version smu_v13_0_ip_block; - bool is_support_sw_smu(struct amdgpu_device *adev); bool is_support_cclk_dpm(struct amdgpu_device *adev); int smu_write_watermarks_table(struct smu_context *smu); diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 7191b3928e8e..c898ea67354a 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -468,7 +468,7 @@ bool is_support_sw_smu(struct amdgpu_device *adev) bool is_support_cclk_dpm(struct amdgpu_device *adev) { - struct smu_context *smu = &adev->smu; + struct smu_context *smu = adev->powerplay.pp_handle; if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT)) return false; @@ -572,7 +572,7 @@ static int smu_get_driver_allowed_feature_mask(struct smu_context *smu) static int smu_set_funcs(struct amdgpu_device *adev) { - struct smu_context *smu = &adev->smu; + struct smu_context *smu = adev->powerplay.pp_handle; if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) smu->od_enabled = true; @@ -624,7 +624,11 @@ static int smu_set_funcs(struct amdgpu_device *adev) static int smu_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct smu_context *smu = &adev->smu; + struct smu_context *smu; + + smu = kzalloc(sizeof(struct smu_context), GFP_KERNEL); + if (!smu) + return -ENOMEM; smu->adev = adev; smu->pm_enabled = !!amdgpu_dpm; @@ -684,7 +688,7 @@ err0_out: static int smu_late_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct smu_context *smu = &adev->smu; + struct smu_context *smu = adev->powerplay.pp_handle; int ret = 0; smu_set_fine_grain_gfx_freq_parameters(smu); @@ -730,7 +734,7 @@ static int smu_late_init(void *handle) smu_get_fan_parameters(smu); - smu_handle_task(&adev->smu, + smu_handle_task(smu, smu->smu_dpm.dpm_level, AMD_PP_TASK_COMPLETE_INIT, false); @@ -1020,7 +1024,7 @@ static void smu_interrupt_work_fn(struct work_struct *work) static int smu_sw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct smu_context *smu = &adev->smu; + struct smu_context *smu = adev->powerplay.pp_handle; int ret; smu->pool_size = adev->pm.smu_prv_buffer_size; @@ -1095,7 +1099,7 @@ static int smu_sw_init(void *handle) static int smu_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct smu_context *smu = &adev->smu; + struct smu_context *smu = adev->powerplay.pp_handle; int ret; ret = smu_smc_table_sw_fini(smu); @@ -1330,7 +1334,7 @@ static int smu_hw_init(void *handle) { int ret; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct smu_context *smu = &adev->smu; + struct smu_context *smu = adev->powerplay.pp_handle; if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) { smu->pm_enabled = false; @@ -1346,7 +1350,7 @@ static int smu_hw_init(void *handle) if (smu->is_apu) { smu_dpm_set_vcn_enable(smu, true); smu_dpm_set_jpeg_enable(smu, true); - smu_set_gfx_cgpg(&adev->smu, true); + smu_set_gfx_cgpg(smu, true); } if (!smu->pm_enabled) @@ -1506,7 +1510,7 @@ static int smu_smc_hw_cleanup(struct smu_context *smu) static int smu_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct smu_context *smu = &adev->smu; + struct smu_context *smu = adev->powerplay.pp_handle; if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) return 0; @@ -1525,6 +1529,14 @@ static int smu_hw_fini(void *handle) return smu_smc_hw_cleanup(smu); } +static void smu_late_fini(void *handle) +{ + struct amdgpu_device *adev = handle; + struct smu_context *smu = adev->powerplay.pp_handle; + + kfree(smu); +} + static int smu_reset(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; @@ -1552,7 +1564,7 @@ static int smu_reset(struct smu_context *smu) static int smu_suspend(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct smu_context *smu = &adev->smu; + struct smu_context *smu = adev->powerplay.pp_handle; int ret; if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) @@ -1569,7 +1581,7 @@ static int smu_suspend(void *handle) smu->watermarks_bitmap &= ~(WATERMARKS_LOADED); - smu_set_gfx_cgpg(&adev->smu, false); + smu_set_gfx_cgpg(smu, false); return 0; } @@ -1578,7 +1590,7 @@ static int smu_resume(void *handle) { int ret; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct smu_context *smu = &adev->smu; + struct smu_context *smu = adev->powerplay.pp_handle; if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev)) return 0; @@ -1600,7 +1612,7 @@ static int smu_resume(void *handle) return ret; } - smu_set_gfx_cgpg(&adev->smu, true); + smu_set_gfx_cgpg(smu, true); smu->disable_uclk_switch = 0; @@ -2132,6 +2144,7 @@ const struct amd_ip_funcs smu_ip_funcs = { .sw_fini = smu_sw_fini, .hw_init = smu_hw_init, .hw_fini = smu_hw_fini, + .late_fini = smu_late_fini, .suspend = smu_suspend, .resume = smu_resume, .is_idle = NULL, @@ -3196,7 +3209,7 @@ int smu_stb_collect_info(struct smu_context *smu, void *buf, uint32_t size) static int smu_stb_debugfs_open(struct inode *inode, struct file *filp) { struct amdgpu_device *adev = filp->f_inode->i_private; - struct smu_context *smu = &adev->smu; + struct smu_context *smu = adev->powerplay.pp_handle; unsigned char *buf; int r; @@ -3221,7 +3234,7 @@ static ssize_t smu_stb_debugfs_read(struct file *filp, char __user *buf, size_t loff_t *pos) { struct amdgpu_device *adev = filp->f_inode->i_private; - struct smu_context *smu = &adev->smu; + struct smu_context *smu = adev->powerplay.pp_handle; if (!filp->private_data) @@ -3262,7 +3275,7 @@ void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev) { #if defined(CONFIG_DEBUG_FS) - struct smu_context *smu = &adev->smu; + struct smu_context *smu = adev->powerplay.pp_handle; if (!smu->stb_context.stb_buf_size) return; @@ -3274,7 +3287,6 @@ void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev) &smu_stb_debugfs_fops, smu->stb_context.stb_buf_size); #endif - } int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index 505d2fb94fd9..0a1547c27305 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -2072,7 +2072,8 @@ static int arcturus_i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msg, int num_msgs) { struct amdgpu_device *adev = to_amdgpu_device(i2c_adap); - struct smu_table_context *smu_table = &adev->smu.smu_table; + struct smu_context *smu = adev->powerplay.pp_handle; + struct smu_table_context *smu_table = &smu->smu_table; struct smu_table *table = &smu_table->driver_table; SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr; int i, j, r, c; @@ -2118,9 +2119,9 @@ static int arcturus_i2c_xfer(struct i2c_adapter *i2c_adap, } } } - mutex_lock(&adev->smu.mutex); - r = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, req, true); - mutex_unlock(&adev->smu.mutex); + mutex_lock(&smu->mutex); + r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true); + mutex_unlock(&smu->mutex); if (r) goto fail; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 2bb7816b245a..37e11716e919 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -2779,7 +2779,8 @@ static int navi10_i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msg, int num_msgs) { struct amdgpu_device *adev = to_amdgpu_device(i2c_adap); - struct smu_table_context *smu_table = &adev->smu.smu_table; + struct smu_context *smu = adev->powerplay.pp_handle; + struct smu_table_context *smu_table = &smu->smu_table; struct smu_table *table = &smu_table->driver_table; SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr; int i, j, r, c; @@ -2825,9 +2826,9 @@ static int navi10_i2c_xfer(struct i2c_adapter *i2c_adap, } } } - mutex_lock(&adev->smu.mutex); - r = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, req, true); - mutex_unlock(&adev->smu.mutex); + mutex_lock(&smu->mutex); + r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true); + mutex_unlock(&smu->mutex); if (r) goto fail; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index d8c57b780bca..efe6b2eff6a0 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -3459,7 +3459,8 @@ static int sienna_cichlid_i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msg, int num_msgs) { struct amdgpu_device *adev = to_amdgpu_device(i2c_adap); - struct smu_table_context *smu_table = &adev->smu.smu_table; + struct smu_context *smu = adev->powerplay.pp_handle; + struct smu_table_context *smu_table = &smu->smu_table; struct smu_table *table = &smu_table->driver_table; SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr; int i, j, r, c; @@ -3505,9 +3506,9 @@ static int sienna_cichlid_i2c_xfer(struct i2c_adapter *i2c_adap, } } } - mutex_lock(&adev->smu.mutex); - r = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, req, true); - mutex_unlock(&adev->smu.mutex); + mutex_lock(&smu->mutex); + r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true); + mutex_unlock(&smu->mutex); if (r) goto fail; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index 4e9e2cf39859..e176e6a74f44 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -1372,7 +1372,7 @@ static int smu_v11_0_set_irq_state(struct amdgpu_device *adev, unsigned tyep, enum amdgpu_interrupt_state state) { - struct smu_context *smu = &adev->smu; + struct smu_context *smu = adev->powerplay.pp_handle; uint32_t low, high; uint32_t val = 0; @@ -1441,7 +1441,7 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { - struct smu_context *smu = &adev->smu; + struct smu_context *smu = adev->powerplay.pp_handle; uint32_t client_id = entry->client_id; uint32_t src_id = entry->src_id; /* diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index 4885c4ae78b7..a4d46f7069bc 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -1475,7 +1475,8 @@ static int aldebaran_i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msg, int num_msgs) { struct amdgpu_device *adev = to_amdgpu_device(i2c_adap); - struct smu_table_context *smu_table = &adev->smu.smu_table; + struct smu_context *smu = adev->powerplay.pp_handle; + struct smu_table_context *smu_table = &smu->smu_table; struct smu_table *table = &smu_table->driver_table; SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr; int i, j, r, c; @@ -1521,9 +1522,9 @@ static int aldebaran_i2c_xfer(struct i2c_adapter *i2c_adap, } } } - mutex_lock(&adev->smu.mutex); - r = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, req, true); - mutex_unlock(&adev->smu.mutex); + mutex_lock(&smu->mutex); + r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true); + mutex_unlock(&smu->mutex); if (r) goto fail; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index b54790d3483e..f1d7b4900aa1 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -1200,7 +1200,7 @@ static int smu_v13_0_set_irq_state(struct amdgpu_device *adev, unsigned tyep, enum amdgpu_interrupt_state state) { - struct smu_context *smu = &adev->smu; + struct smu_context *smu = adev->powerplay.pp_handle; uint32_t low, high; uint32_t val = 0; @@ -1275,7 +1275,7 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry) { - struct smu_context *smu = &adev->smu; + struct smu_context *smu = adev->powerplay.pp_handle; uint32_t client_id = entry->client_id; uint32_t src_id = entry->src_id; /* @@ -1321,11 +1321,11 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev, switch (ctxid) { case 0x3: dev_dbg(adev->dev, "Switched to AC mode!\n"); - smu_v13_0_ack_ac_dc_interrupt(&adev->smu); + smu_v13_0_ack_ac_dc_interrupt(smu); break; case 0x4: dev_dbg(adev->dev, "Switched to DC mode!\n"); - smu_v13_0_ack_ac_dc_interrupt(&adev->smu); + smu_v13_0_ack_ac_dc_interrupt(smu); break; case 0x7: /* -- cgit From 837d542a09cd533055423dfca7e621a9c1d13c5b Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Tue, 16 Nov 2021 13:30:53 +0800 Subject: drm/amd/pm: relocate the power related headers Instead of centralizing all headers in the same folder. Separate them into different folders and place them among those source files those who really need them. Signed-off-by: Evan Quan Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/Makefile | 11 +- drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h | 130 - drivers/gpu/drm/amd/pm/inc/amd_powerplay.h | 35 - drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h | 1418 ---- drivers/gpu/drm/amd/pm/inc/arcturus_ppsmc.h | 134 - drivers/gpu/drm/amd/pm/inc/cz_ppsmc.h | 186 - drivers/gpu/drm/amd/pm/inc/fiji_ppsmc.h | 412 - drivers/gpu/drm/amd/pm/inc/hardwaremanager.h | 469 -- drivers/gpu/drm/amd/pm/inc/hwmgr.h | 833 -- drivers/gpu/drm/amd/pm/inc/polaris10_pwrvirus.h | 1793 ----- drivers/gpu/drm/amd/pm/inc/power_state.h | 196 - drivers/gpu/drm/amd/pm/inc/pp_debug.h | 62 - drivers/gpu/drm/amd/pm/inc/pp_endian.h | 38 - drivers/gpu/drm/amd/pm/inc/pp_thermal.h | 44 - drivers/gpu/drm/amd/pm/inc/ppinterrupt.h | 46 - drivers/gpu/drm/amd/pm/inc/rv_ppsmc.h | 96 - drivers/gpu/drm/amd/pm/inc/smu10.h | 188 - drivers/gpu/drm/amd/pm/inc/smu10_driver_if.h | 117 - drivers/gpu/drm/amd/pm/inc/smu11_driver_if.h | 893 --- .../gpu/drm/amd/pm/inc/smu11_driver_if_arcturus.h | 931 --- .../amd/pm/inc/smu11_driver_if_cyan_skillfish.h | 79 - .../gpu/drm/amd/pm/inc/smu11_driver_if_navi10.h | 1220 --- .../amd/pm/inc/smu11_driver_if_sienna_cichlid.h | 1691 ---- .../gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h | 282 - drivers/gpu/drm/amd/pm/inc/smu12_driver_if.h | 232 - .../gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h | 538 -- .../drm/amd/pm/inc/smu13_driver_if_yellow_carp.h | 222 - drivers/gpu/drm/amd/pm/inc/smu7.h | 189 - drivers/gpu/drm/amd/pm/inc/smu71.h | 510 -- drivers/gpu/drm/amd/pm/inc/smu71_discrete.h | 631 -- drivers/gpu/drm/amd/pm/inc/smu72.h | 687 -- drivers/gpu/drm/amd/pm/inc/smu72_discrete.h | 783 -- drivers/gpu/drm/amd/pm/inc/smu73.h | 720 -- drivers/gpu/drm/amd/pm/inc/smu73_discrete.h | 799 -- drivers/gpu/drm/amd/pm/inc/smu74.h | 833 -- drivers/gpu/drm/amd/pm/inc/smu74_discrete.h | 850 -- drivers/gpu/drm/amd/pm/inc/smu75.h | 760 -- drivers/gpu/drm/amd/pm/inc/smu75_discrete.h | 886 --- drivers/gpu/drm/amd/pm/inc/smu7_common.h | 54 - drivers/gpu/drm/amd/pm/inc/smu7_discrete.h | 515 -- drivers/gpu/drm/amd/pm/inc/smu7_fusion.h | 300 - drivers/gpu/drm/amd/pm/inc/smu7_ppsmc.h | 427 - drivers/gpu/drm/amd/pm/inc/smu8.h | 72 - drivers/gpu/drm/amd/pm/inc/smu8_fusion.h | 135 - drivers/gpu/drm/amd/pm/inc/smu9.h | 148 - drivers/gpu/drm/amd/pm/inc/smu9_driver_if.h | 486 -- drivers/gpu/drm/amd/pm/inc/smu_11_0_cdr_table.h | 194 - drivers/gpu/drm/amd/pm/inc/smu_types.h | 373 - drivers/gpu/drm/amd/pm/inc/smu_ucode_xfer_cz.h | 169 - drivers/gpu/drm/amd/pm/inc/smu_ucode_xfer_vi.h | 101 - drivers/gpu/drm/amd/pm/inc/smu_v11_0.h | 320 - drivers/gpu/drm/amd/pm/inc/smu_v11_0_7_ppsmc.h | 143 - drivers/gpu/drm/amd/pm/inc/smu_v11_0_7_pptable.h | 196 - drivers/gpu/drm/amd/pm/inc/smu_v11_0_ppsmc.h | 141 - drivers/gpu/drm/amd/pm/inc/smu_v11_0_pptable.h | 167 - drivers/gpu/drm/amd/pm/inc/smu_v11_5_pmfw.h | 123 - drivers/gpu/drm/amd/pm/inc/smu_v11_5_ppsmc.h | 119 - drivers/gpu/drm/amd/pm/inc/smu_v11_8_pmfw.h | 152 - drivers/gpu/drm/amd/pm/inc/smu_v11_8_ppsmc.h | 77 - drivers/gpu/drm/amd/pm/inc/smu_v12_0.h | 66 - drivers/gpu/drm/amd/pm/inc/smu_v12_0_ppsmc.h | 106 - drivers/gpu/drm/amd/pm/inc/smu_v13_0.h | 273 - drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_pmfw.h | 141 - drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_ppsmc.h | 97 - drivers/gpu/drm/amd/pm/inc/smu_v13_0_pptable.h | 165 - drivers/gpu/drm/amd/pm/inc/smumgr.h | 118 - drivers/gpu/drm/amd/pm/inc/tonga_ppsmc.h | 420 - drivers/gpu/drm/amd/pm/inc/vega10_ppsmc.h | 144 - drivers/gpu/drm/amd/pm/inc/vega12/smu9_driver_if.h | 767 -- drivers/gpu/drm/amd/pm/inc/vega12_ppsmc.h | 123 - drivers/gpu/drm/amd/pm/inc/vega20_ppsmc.h | 131 - drivers/gpu/drm/amd/pm/legacy-dpm/Makefile | 32 + drivers/gpu/drm/amd/pm/legacy-dpm/cik_dpm.h | 29 + drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c | 3405 ++++++++ drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.h | 229 + drivers/gpu/drm/amd/pm/legacy-dpm/kv_smc.c | 218 + drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c | 1080 +++ drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h | 38 + drivers/gpu/drm/amd/pm/legacy-dpm/ppsmc.h | 200 + drivers/gpu/drm/amd/pm/legacy-dpm/r600_dpm.h | 127 + drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c | 8153 ++++++++++++++++++++ drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.h | 1022 +++ drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c | 273 + drivers/gpu/drm/amd/pm/legacy-dpm/sislands_smc.h | 431 ++ drivers/gpu/drm/amd/pm/powerplay/Makefile | 6 +- drivers/gpu/drm/amd/pm/powerplay/cik_dpm.h | 29 - .../gpu/drm/amd/pm/powerplay/inc/amd_powerplay.h | 35 + drivers/gpu/drm/amd/pm/powerplay/inc/cz_ppsmc.h | 186 + drivers/gpu/drm/amd/pm/powerplay/inc/fiji_ppsmc.h | 412 + .../gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h | 469 ++ drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h | 833 ++ .../drm/amd/pm/powerplay/inc/polaris10_pwrvirus.h | 1793 +++++ drivers/gpu/drm/amd/pm/powerplay/inc/power_state.h | 196 + drivers/gpu/drm/amd/pm/powerplay/inc/pp_debug.h | 62 + drivers/gpu/drm/amd/pm/powerplay/inc/pp_endian.h | 38 + drivers/gpu/drm/amd/pm/powerplay/inc/pp_thermal.h | 44 + drivers/gpu/drm/amd/pm/powerplay/inc/ppinterrupt.h | 46 + drivers/gpu/drm/amd/pm/powerplay/inc/rv_ppsmc.h | 96 + drivers/gpu/drm/amd/pm/powerplay/inc/smu10.h | 188 + .../gpu/drm/amd/pm/powerplay/inc/smu10_driver_if.h | 117 + .../gpu/drm/amd/pm/powerplay/inc/smu11_driver_if.h | 893 +++ drivers/gpu/drm/amd/pm/powerplay/inc/smu7.h | 189 + drivers/gpu/drm/amd/pm/powerplay/inc/smu71.h | 510 ++ .../gpu/drm/amd/pm/powerplay/inc/smu71_discrete.h | 631 ++ drivers/gpu/drm/amd/pm/powerplay/inc/smu72.h | 687 ++ .../gpu/drm/amd/pm/powerplay/inc/smu72_discrete.h | 783 ++ drivers/gpu/drm/amd/pm/powerplay/inc/smu73.h | 720 ++ .../gpu/drm/amd/pm/powerplay/inc/smu73_discrete.h | 799 ++ drivers/gpu/drm/amd/pm/powerplay/inc/smu74.h | 833 ++ .../gpu/drm/amd/pm/powerplay/inc/smu74_discrete.h | 850 ++ drivers/gpu/drm/amd/pm/powerplay/inc/smu75.h | 760 ++ .../gpu/drm/amd/pm/powerplay/inc/smu75_discrete.h | 886 +++ drivers/gpu/drm/amd/pm/powerplay/inc/smu7_common.h | 54 + .../gpu/drm/amd/pm/powerplay/inc/smu7_discrete.h | 515 ++ drivers/gpu/drm/amd/pm/powerplay/inc/smu7_fusion.h | 300 + drivers/gpu/drm/amd/pm/powerplay/inc/smu7_ppsmc.h | 427 + drivers/gpu/drm/amd/pm/powerplay/inc/smu8.h | 72 + drivers/gpu/drm/amd/pm/powerplay/inc/smu8_fusion.h | 135 + drivers/gpu/drm/amd/pm/powerplay/inc/smu9.h | 148 + .../gpu/drm/amd/pm/powerplay/inc/smu9_driver_if.h | 486 ++ .../drm/amd/pm/powerplay/inc/smu_ucode_xfer_cz.h | 169 + .../drm/amd/pm/powerplay/inc/smu_ucode_xfer_vi.h | 101 + drivers/gpu/drm/amd/pm/powerplay/inc/smumgr.h | 118 + drivers/gpu/drm/amd/pm/powerplay/inc/tonga_ppsmc.h | 420 + .../gpu/drm/amd/pm/powerplay/inc/vega10_ppsmc.h | 144 + .../amd/pm/powerplay/inc/vega12/smu9_driver_if.h | 767 ++ .../gpu/drm/amd/pm/powerplay/inc/vega12_ppsmc.h | 123 + .../gpu/drm/amd/pm/powerplay/inc/vega20_ppsmc.h | 131 + drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c | 3405 -------- drivers/gpu/drm/amd/pm/powerplay/kv_dpm.h | 229 - drivers/gpu/drm/amd/pm/powerplay/kv_smc.c | 218 - drivers/gpu/drm/amd/pm/powerplay/legacy_dpm.c | 1080 --- drivers/gpu/drm/amd/pm/powerplay/legacy_dpm.h | 38 - drivers/gpu/drm/amd/pm/powerplay/ppsmc.h | 200 - drivers/gpu/drm/amd/pm/powerplay/r600_dpm.h | 127 - drivers/gpu/drm/amd/pm/powerplay/si_dpm.c | 8153 -------------------- drivers/gpu/drm/amd/pm/powerplay/si_dpm.h | 1022 --- drivers/gpu/drm/amd/pm/powerplay/si_smc.c | 273 - drivers/gpu/drm/amd/pm/powerplay/sislands_smc.h | 431 -- drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h | 1418 ++++ .../drm/amd/pm/swsmu/inc/pmfw_if/aldebaran_ppsmc.h | 130 + .../drm/amd/pm/swsmu/inc/pmfw_if/arcturus_ppsmc.h | 134 + .../swsmu/inc/pmfw_if/smu11_driver_if_arcturus.h | 931 +++ .../inc/pmfw_if/smu11_driver_if_cyan_skillfish.h | 79 + .../pm/swsmu/inc/pmfw_if/smu11_driver_if_navi10.h | 1220 +++ .../inc/pmfw_if/smu11_driver_if_sienna_cichlid.h | 1691 ++++ .../pm/swsmu/inc/pmfw_if/smu11_driver_if_vangogh.h | 282 + .../drm/amd/pm/swsmu/inc/pmfw_if/smu12_driver_if.h | 232 + .../swsmu/inc/pmfw_if/smu13_driver_if_aldebaran.h | 538 ++ .../inc/pmfw_if/smu13_driver_if_yellow_carp.h | 222 + .../amd/pm/swsmu/inc/pmfw_if/smu_v11_0_7_ppsmc.h | 143 + .../drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_0_ppsmc.h | 141 + .../drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_5_pmfw.h | 123 + .../drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_5_ppsmc.h | 119 + .../drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_8_pmfw.h | 152 + .../drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_8_ppsmc.h | 77 + .../drm/amd/pm/swsmu/inc/pmfw_if/smu_v12_0_ppsmc.h | 106 + .../amd/pm/swsmu/inc/pmfw_if/smu_v13_0_1_pmfw.h | 141 + .../amd/pm/swsmu/inc/pmfw_if/smu_v13_0_1_ppsmc.h | 97 + .../gpu/drm/amd/pm/swsmu/inc/smu_11_0_cdr_table.h | 194 + drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h | 373 + drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h | 320 + .../gpu/drm/amd/pm/swsmu/inc/smu_v11_0_7_pptable.h | 196 + .../gpu/drm/amd/pm/swsmu/inc/smu_v11_0_pptable.h | 167 + drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h | 66 + drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h | 273 + .../gpu/drm/amd/pm/swsmu/inc/smu_v13_0_pptable.h | 165 + drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c | 1 - drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c | 1 - 169 files changed, 42141 insertions(+), 42112 deletions(-) delete mode 100644 drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/amd_powerplay.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/arcturus_ppsmc.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/cz_ppsmc.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/fiji_ppsmc.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/hardwaremanager.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/hwmgr.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/polaris10_pwrvirus.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/power_state.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/pp_debug.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/pp_endian.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/pp_thermal.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/ppinterrupt.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/rv_ppsmc.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu10.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu10_driver_if.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu11_driver_if.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu11_driver_if_arcturus.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu11_driver_if_cyan_skillfish.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu11_driver_if_navi10.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu11_driver_if_sienna_cichlid.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu12_driver_if.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu13_driver_if_yellow_carp.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu7.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu71.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu71_discrete.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu72.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu72_discrete.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu73.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu73_discrete.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu74.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu74_discrete.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu75.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu75_discrete.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu7_common.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu7_discrete.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu7_fusion.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu7_ppsmc.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu8.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu8_fusion.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu9.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu9_driver_if.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu_11_0_cdr_table.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu_types.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu_ucode_xfer_cz.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu_ucode_xfer_vi.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu_v11_0.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu_v11_0_7_ppsmc.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu_v11_0_7_pptable.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu_v11_0_ppsmc.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu_v11_0_pptable.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu_v11_5_pmfw.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu_v11_5_ppsmc.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu_v11_8_pmfw.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu_v11_8_ppsmc.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu_v12_0.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu_v12_0_ppsmc.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu_v13_0.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_pmfw.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_ppsmc.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smu_v13_0_pptable.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/smumgr.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/tonga_ppsmc.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/vega10_ppsmc.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/vega12/smu9_driver_if.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/vega12_ppsmc.h delete mode 100644 drivers/gpu/drm/amd/pm/inc/vega20_ppsmc.h create mode 100644 drivers/gpu/drm/amd/pm/legacy-dpm/Makefile create mode 100644 drivers/gpu/drm/amd/pm/legacy-dpm/cik_dpm.h create mode 100644 drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c create mode 100644 drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.h create mode 100644 drivers/gpu/drm/amd/pm/legacy-dpm/kv_smc.c create mode 100644 drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c create mode 100644 drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h create mode 100644 drivers/gpu/drm/amd/pm/legacy-dpm/ppsmc.h create mode 100644 drivers/gpu/drm/amd/pm/legacy-dpm/r600_dpm.h create mode 100644 drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c create mode 100644 drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.h create mode 100644 drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c create mode 100644 drivers/gpu/drm/amd/pm/legacy-dpm/sislands_smc.h delete mode 100644 drivers/gpu/drm/amd/pm/powerplay/cik_dpm.h create mode 100644 drivers/gpu/drm/amd/pm/powerplay/inc/amd_powerplay.h create mode 100644 drivers/gpu/drm/amd/pm/powerplay/inc/cz_ppsmc.h create mode 100644 drivers/gpu/drm/amd/pm/powerplay/inc/fiji_ppsmc.h create mode 100644 drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h create mode 100644 drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h create mode 100644 drivers/gpu/drm/amd/pm/powerplay/inc/polaris10_pwrvirus.h create mode 100644 drivers/gpu/drm/amd/pm/powerplay/inc/power_state.h create mode 100644 drivers/gpu/drm/amd/pm/powerplay/inc/pp_debug.h create mode 100644 drivers/gpu/drm/amd/pm/powerplay/inc/pp_endian.h create mode 100644 drivers/gpu/drm/amd/pm/powerplay/inc/pp_thermal.h create mode 100644 drivers/gpu/drm/amd/pm/powerplay/inc/ppinterrupt.h create mode 100644 drivers/gpu/drm/amd/pm/powerplay/inc/rv_ppsmc.h create mode 100644 drivers/gpu/drm/amd/pm/powerplay/inc/smu10.h create mode 100644 drivers/gpu/drm/amd/pm/powerplay/inc/smu10_driver_if.h create mode 100644 drivers/gpu/drm/amd/pm/powerplay/inc/smu11_driver_if.h create mode 100644 drivers/gpu/drm/amd/pm/powerplay/inc/smu7.h create mode 100644 drivers/gpu/drm/amd/pm/powerplay/inc/smu71.h create mode 100644 drivers/gpu/drm/amd/pm/powerplay/inc/smu71_discrete.h create mode 100644 drivers/gpu/drm/amd/pm/powerplay/inc/smu72.h create mode 100644 drivers/gpu/drm/amd/pm/powerplay/inc/smu72_discrete.h create mode 100644 drivers/gpu/drm/amd/pm/powerplay/inc/smu73.h create mode 100644 drivers/gpu/drm/amd/pm/powerplay/inc/smu73_discrete.h create mode 100644 drivers/gpu/drm/amd/pm/powerplay/inc/smu74.h create mode 100644 drivers/gpu/drm/amd/pm/powerplay/inc/smu74_discrete.h create mode 100644 drivers/gpu/drm/amd/pm/powerplay/inc/smu75.h create mode 100644 drivers/gpu/drm/amd/pm/powerplay/inc/smu75_discrete.h create mode 100644 drivers/gpu/drm/amd/pm/powerplay/inc/smu7_common.h create mode 100644 drivers/gpu/drm/amd/pm/powerplay/inc/smu7_discrete.h create mode 100644 drivers/gpu/drm/amd/pm/powerplay/inc/smu7_fusion.h create mode 100644 drivers/gpu/drm/amd/pm/powerplay/inc/smu7_ppsmc.h create mode 100644 drivers/gpu/drm/amd/pm/powerplay/inc/smu8.h create mode 100644 drivers/gpu/drm/amd/pm/powerplay/inc/smu8_fusion.h create mode 100644 drivers/gpu/drm/amd/pm/powerplay/inc/smu9.h create mode 100644 drivers/gpu/drm/amd/pm/powerplay/inc/smu9_driver_if.h create mode 100644 drivers/gpu/drm/amd/pm/powerplay/inc/smu_ucode_xfer_cz.h create mode 100644 drivers/gpu/drm/amd/pm/powerplay/inc/smu_ucode_xfer_vi.h create mode 100644 drivers/gpu/drm/amd/pm/powerplay/inc/smumgr.h create mode 100644 drivers/gpu/drm/amd/pm/powerplay/inc/tonga_ppsmc.h create mode 100644 drivers/gpu/drm/amd/pm/powerplay/inc/vega10_ppsmc.h create mode 100644 drivers/gpu/drm/amd/pm/powerplay/inc/vega12/smu9_driver_if.h create mode 100644 drivers/gpu/drm/amd/pm/powerplay/inc/vega12_ppsmc.h create mode 100644 drivers/gpu/drm/amd/pm/powerplay/inc/vega20_ppsmc.h delete mode 100644 drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c delete mode 100644 drivers/gpu/drm/amd/pm/powerplay/kv_dpm.h delete mode 100644 drivers/gpu/drm/amd/pm/powerplay/kv_smc.c delete mode 100644 drivers/gpu/drm/amd/pm/powerplay/legacy_dpm.c delete mode 100644 drivers/gpu/drm/amd/pm/powerplay/legacy_dpm.h delete mode 100644 drivers/gpu/drm/amd/pm/powerplay/ppsmc.h delete mode 100644 drivers/gpu/drm/amd/pm/powerplay/r600_dpm.h delete mode 100644 drivers/gpu/drm/amd/pm/powerplay/si_dpm.c delete mode 100644 drivers/gpu/drm/amd/pm/powerplay/si_dpm.h delete mode 100644 drivers/gpu/drm/amd/pm/powerplay/si_smc.c delete mode 100644 drivers/gpu/drm/amd/pm/powerplay/sislands_smc.h create mode 100644 drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h create mode 100644 drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/aldebaran_ppsmc.h create mode 100644 drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/arcturus_ppsmc.h create mode 100644 drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_arcturus.h create mode 100644 drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_cyan_skillfish.h create mode 100644 drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_navi10.h create mode 100644 drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_sienna_cichlid.h create mode 100644 drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_vangogh.h create mode 100644 drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu12_driver_if.h create mode 100644 drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_aldebaran.h create mode 100644 drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_yellow_carp.h create mode 100644 drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_0_7_ppsmc.h create mode 100644 drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_0_ppsmc.h create mode 100644 drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_5_pmfw.h create mode 100644 drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_5_ppsmc.h create mode 100644 drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_8_pmfw.h create mode 100644 drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_8_ppsmc.h create mode 100644 drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v12_0_ppsmc.h create mode 100644 drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_1_pmfw.h create mode 100644 drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_1_ppsmc.h create mode 100644 drivers/gpu/drm/amd/pm/swsmu/inc/smu_11_0_cdr_table.h create mode 100644 drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h create mode 100644 drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h create mode 100644 drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_7_pptable.h create mode 100644 drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_pptable.h create mode 100644 drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h create mode 100644 drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h create mode 100644 drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_pptable.h diff --git a/drivers/gpu/drm/amd/pm/Makefile b/drivers/gpu/drm/amd/pm/Makefile index d35ffde387f1..51751db436b0 100644 --- a/drivers/gpu/drm/amd/pm/Makefile +++ b/drivers/gpu/drm/amd/pm/Makefile @@ -21,20 +21,23 @@ # subdir-ccflags-y += \ - -I$(FULL_AMD_PATH)/pm/inc/ \ -I$(FULL_AMD_PATH)/include/asic_reg \ -I$(FULL_AMD_PATH)/include \ + -I$(FULL_AMD_PATH)/pm/inc/ \ -I$(FULL_AMD_PATH)/pm/swsmu \ + -I$(FULL_AMD_PATH)/pm/swsmu/inc \ + -I$(FULL_AMD_PATH)/pm/swsmu/inc/pmfw_if \ -I$(FULL_AMD_PATH)/pm/swsmu/smu11 \ -I$(FULL_AMD_PATH)/pm/swsmu/smu12 \ -I$(FULL_AMD_PATH)/pm/swsmu/smu13 \ - -I$(FULL_AMD_PATH)/pm/powerplay \ + -I$(FULL_AMD_PATH)/pm/powerplay/inc \ -I$(FULL_AMD_PATH)/pm/powerplay/smumgr\ - -I$(FULL_AMD_PATH)/pm/powerplay/hwmgr + -I$(FULL_AMD_PATH)/pm/powerplay/hwmgr \ + -I$(FULL_AMD_PATH)/pm/legacy-dpm AMD_PM_PATH = ../pm -PM_LIBS = swsmu powerplay +PM_LIBS = swsmu powerplay legacy-dpm AMD_PM = $(addsuffix /Makefile,$(addprefix $(FULL_AMD_PATH)/pm/,$(PM_LIBS))) diff --git a/drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h deleted file mode 100644 index ab66a4b9e438..000000000000 --- a/drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h +++ /dev/null @@ -1,130 +0,0 @@ -/* - * Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef ALDEBARAN_PP_SMC_H -#define ALDEBARAN_PP_SMC_H - -#pragma pack(push, 1) - -// SMU Response Codes: -#define PPSMC_Result_OK 0x1 -#define PPSMC_Result_Failed 0xFF -#define PPSMC_Result_UnknownCmd 0xFE -#define PPSMC_Result_CmdRejectedPrereq 0xFD -#define PPSMC_Result_CmdRejectedBusy 0xFC - -// Message Definitions: -#define PPSMC_MSG_TestMessage 0x1 -#define PPSMC_MSG_GetSmuVersion 0x2 -#define PPSMC_MSG_GfxDriverReset 0x3 -#define PPSMC_MSG_GetDriverIfVersion 0x4 -#define PPSMC_MSG_spare1 0x5 -#define PPSMC_MSG_spare2 0x6 -#define PPSMC_MSG_EnableAllSmuFeatures 0x7 -#define PPSMC_MSG_DisableAllSmuFeatures 0x8 -#define PPSMC_MSG_spare3 0x9 -#define PPSMC_MSG_spare4 0xA -#define PPSMC_MSG_spare5 0xB -#define PPSMC_MSG_spare6 0xC -#define PPSMC_MSG_GetEnabledSmuFeaturesLow 0xD -#define PPSMC_MSG_GetEnabledSmuFeaturesHigh 0xE -#define PPSMC_MSG_SetDriverDramAddrHigh 0xF -#define PPSMC_MSG_SetDriverDramAddrLow 0x10 -#define PPSMC_MSG_SetToolsDramAddrHigh 0x11 -#define PPSMC_MSG_SetToolsDramAddrLow 0x12 -#define PPSMC_MSG_TransferTableSmu2Dram 0x13 -#define PPSMC_MSG_TransferTableDram2Smu 0x14 -#define PPSMC_MSG_UseDefaultPPTable 0x15 -#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x16 -#define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x17 -#define PPSMC_MSG_SetSoftMinByFreq 0x18 -#define PPSMC_MSG_SetSoftMaxByFreq 0x19 -#define PPSMC_MSG_SetHardMinByFreq 0x1A -#define PPSMC_MSG_SetHardMaxByFreq 0x1B -#define PPSMC_MSG_GetMinDpmFreq 0x1C -#define PPSMC_MSG_GetMaxDpmFreq 0x1D -#define PPSMC_MSG_GetDpmFreqByIndex 0x1E -#define PPSMC_MSG_SetWorkloadMask 0x1F -#define PPSMC_MSG_GetVoltageByDpm 0x20 -#define PPSMC_MSG_GetVoltageByDpmOverdrive 0x21 -#define PPSMC_MSG_SetPptLimit 0x22 -#define PPSMC_MSG_GetPptLimit 0x23 -#define PPSMC_MSG_PrepareMp1ForUnload 0x24 -#define PPSMC_MSG_PrepareMp1ForReset 0x25 //retired in 68.07 -#define PPSMC_MSG_SoftReset 0x26 //retired in 68.07 -#define PPSMC_MSG_RunDcBtc 0x27 -#define PPSMC_MSG_DramLogSetDramAddrHigh 0x28 -#define PPSMC_MSG_DramLogSetDramAddrLow 0x29 -#define PPSMC_MSG_DramLogSetDramSize 0x2A -#define PPSMC_MSG_GetDebugData 0x2B -#define PPSMC_MSG_WaflTest 0x2C -#define PPSMC_MSG_spare7 0x2D -#define PPSMC_MSG_SetMemoryChannelEnable 0x2E -#define PPSMC_MSG_SetNumBadHbmPagesRetired 0x2F -#define PPSMC_MSG_DFCstateControl 0x32 -#define PPSMC_MSG_GetGmiPwrDnHyst 0x33 -#define PPSMC_MSG_SetGmiPwrDnHyst 0x34 -#define PPSMC_MSG_GmiPwrDnControl 0x35 -#define PPSMC_MSG_EnterGfxoff 0x36 -#define PPSMC_MSG_ExitGfxoff 0x37 -#define PPSMC_MSG_SetExecuteDMATest 0x38 -#define PPSMC_MSG_EnableDeterminism 0x39 -#define PPSMC_MSG_DisableDeterminism 0x3A -#define PPSMC_MSG_SetUclkDpmMode 0x3B - -//STB to dram log -#define PPSMC_MSG_DumpSTBtoDram 0x3C -#define PPSMC_MSG_STBtoDramLogSetDramAddrHigh 0x3D -#define PPSMC_MSG_STBtoDramLogSetDramAddrLow 0x3E -#define PPSMC_MSG_STBtoDramLogSetDramSize 0x3F -#define PPSMC_MSG_SetSystemVirtualSTBtoDramAddrHigh 0x40 -#define PPSMC_MSG_SetSystemVirtualSTBtoDramAddrLow 0x41 - -#define PPSMC_MSG_GfxDriverResetRecovery 0x42 -#define PPSMC_MSG_BoardPowerCalibration 0x43 -#define PPSMC_MSG_HeavySBR 0x45 -#define PPSMC_Message_Count 0x46 - - -//PPSMC Reset Types -#define PPSMC_RESET_TYPE_WARM_RESET 0x00 -#define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x01 //driver msg argument should be 1 for mode-1 -#define PPSMC_RESET_TYPE_DRIVER_MODE_2_RESET 0x02 //and 2 for mode-2 -#define PPSMC_RESET_TYPE_PCIE_LINK_RESET 0x03 -#define PPSMC_RESET_TYPE_BIF_LINK_RESET 0x04 -#define PPSMC_RESET_TYPE_PF0_FLR_RESET 0x05 - - -typedef enum { - GFXOFF_ERROR_NO_ERROR, - GFXOFF_ERROR_DISALLOWED, - GFXOFF_ERROR_GFX_BUSY, - GFXOFF_ERROR_GFX_OFF, - GFXOFF_ERROR_GFX_ON, -} GFXOFF_ERROR_e; - -typedef uint32_t PPSMC_Result; -typedef uint32_t PPSMC_Msg; -#pragma pack(pop) - -#endif diff --git a/drivers/gpu/drm/amd/pm/inc/amd_powerplay.h b/drivers/gpu/drm/amd/pm/inc/amd_powerplay.h deleted file mode 100644 index fe3665965416..000000000000 --- a/drivers/gpu/drm/amd/pm/inc/amd_powerplay.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#ifndef _AMD_POWERPLAY_H_ -#define _AMD_POWERPLAY_H_ - -#include -#include -#include -#include "amd_shared.h" -#include "cgs_common.h" -#include "dm_pp_interface.h" -#include "kgd_pp_interface.h" -#include "amdgpu.h" - -#endif /* _AMD_POWERPLAY_H_ */ diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h deleted file mode 100644 index 8a689baeaf82..000000000000 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h +++ /dev/null @@ -1,1418 +0,0 @@ -/* - * Copyright 2019 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ -#ifndef __AMDGPU_SMU_H__ -#define __AMDGPU_SMU_H__ - -#include "amdgpu.h" -#include "kgd_pp_interface.h" -#include "dm_pp_interface.h" -#include "dm_pp_smu.h" -#include "smu_types.h" - -#define SMU_THERMAL_MINIMUM_ALERT_TEMP 0 -#define SMU_THERMAL_MAXIMUM_ALERT_TEMP 255 -#define SMU_TEMPERATURE_UNITS_PER_CENTIGRADES 1000 -#define SMU_FW_NAME_LEN 0x24 - -#define SMU_DPM_USER_PROFILE_RESTORE (1 << 0) -#define SMU_CUSTOM_FAN_SPEED_RPM (1 << 1) -#define SMU_CUSTOM_FAN_SPEED_PWM (1 << 2) - -// Power Throttlers -#define SMU_THROTTLER_PPT0_BIT 0 -#define SMU_THROTTLER_PPT1_BIT 1 -#define SMU_THROTTLER_PPT2_BIT 2 -#define SMU_THROTTLER_PPT3_BIT 3 -#define SMU_THROTTLER_SPL_BIT 4 -#define SMU_THROTTLER_FPPT_BIT 5 -#define SMU_THROTTLER_SPPT_BIT 6 -#define SMU_THROTTLER_SPPT_APU_BIT 7 - -// Current Throttlers -#define SMU_THROTTLER_TDC_GFX_BIT 16 -#define SMU_THROTTLER_TDC_SOC_BIT 17 -#define SMU_THROTTLER_TDC_MEM_BIT 18 -#define SMU_THROTTLER_TDC_VDD_BIT 19 -#define SMU_THROTTLER_TDC_CVIP_BIT 20 -#define SMU_THROTTLER_EDC_CPU_BIT 21 -#define SMU_THROTTLER_EDC_GFX_BIT 22 -#define SMU_THROTTLER_APCC_BIT 23 - -// Temperature -#define SMU_THROTTLER_TEMP_GPU_BIT 32 -#define SMU_THROTTLER_TEMP_CORE_BIT 33 -#define SMU_THROTTLER_TEMP_MEM_BIT 34 -#define SMU_THROTTLER_TEMP_EDGE_BIT 35 -#define SMU_THROTTLER_TEMP_HOTSPOT_BIT 36 -#define SMU_THROTTLER_TEMP_SOC_BIT 37 -#define SMU_THROTTLER_TEMP_VR_GFX_BIT 38 -#define SMU_THROTTLER_TEMP_VR_SOC_BIT 39 -#define SMU_THROTTLER_TEMP_VR_MEM0_BIT 40 -#define SMU_THROTTLER_TEMP_VR_MEM1_BIT 41 -#define SMU_THROTTLER_TEMP_LIQUID0_BIT 42 -#define SMU_THROTTLER_TEMP_LIQUID1_BIT 43 -#define SMU_THROTTLER_VRHOT0_BIT 44 -#define SMU_THROTTLER_VRHOT1_BIT 45 -#define SMU_THROTTLER_PROCHOT_CPU_BIT 46 -#define SMU_THROTTLER_PROCHOT_GFX_BIT 47 - -// Other -#define SMU_THROTTLER_PPM_BIT 56 -#define SMU_THROTTLER_FIT_BIT 57 - -struct smu_hw_power_state { - unsigned int magic; -}; - -struct smu_power_state; - -enum smu_state_ui_label { - SMU_STATE_UI_LABEL_NONE, - SMU_STATE_UI_LABEL_BATTERY, - SMU_STATE_UI_TABEL_MIDDLE_LOW, - SMU_STATE_UI_LABEL_BALLANCED, - SMU_STATE_UI_LABEL_MIDDLE_HIGHT, - SMU_STATE_UI_LABEL_PERFORMANCE, - SMU_STATE_UI_LABEL_BACO, -}; - -enum smu_state_classification_flag { - SMU_STATE_CLASSIFICATION_FLAG_BOOT = 0x0001, - SMU_STATE_CLASSIFICATION_FLAG_THERMAL = 0x0002, - SMU_STATE_CLASSIFICATIN_FLAG_LIMITED_POWER_SOURCE = 0x0004, - SMU_STATE_CLASSIFICATION_FLAG_RESET = 0x0008, - SMU_STATE_CLASSIFICATION_FLAG_FORCED = 0x0010, - SMU_STATE_CLASSIFICATION_FLAG_USER_3D_PERFORMANCE = 0x0020, - SMU_STATE_CLASSIFICATION_FLAG_USER_2D_PERFORMANCE = 0x0040, - SMU_STATE_CLASSIFICATION_FLAG_3D_PERFORMANCE = 0x0080, - SMU_STATE_CLASSIFICATION_FLAG_AC_OVERDIRVER_TEMPLATE = 0x0100, - SMU_STATE_CLASSIFICATION_FLAG_UVD = 0x0200, - SMU_STATE_CLASSIFICATION_FLAG_3D_PERFORMANCE_LOW = 0x0400, - SMU_STATE_CLASSIFICATION_FLAG_ACPI = 0x0800, - SMU_STATE_CLASSIFICATION_FLAG_HD2 = 0x1000, - SMU_STATE_CLASSIFICATION_FLAG_UVD_HD = 0x2000, - SMU_STATE_CLASSIFICATION_FLAG_UVD_SD = 0x4000, - SMU_STATE_CLASSIFICATION_FLAG_USER_DC_PERFORMANCE = 0x8000, - SMU_STATE_CLASSIFICATION_FLAG_DC_OVERDIRVER_TEMPLATE = 0x10000, - SMU_STATE_CLASSIFICATION_FLAG_BACO = 0x20000, - SMU_STATE_CLASSIFICATIN_FLAG_LIMITED_POWER_SOURCE2 = 0x40000, - SMU_STATE_CLASSIFICATION_FLAG_ULV = 0x80000, - SMU_STATE_CLASSIFICATION_FLAG_UVD_MVC = 0x100000, -}; - -struct smu_state_classification_block { - enum smu_state_ui_label ui_label; - enum smu_state_classification_flag flags; - int bios_index; - bool temporary_state; - bool to_be_deleted; -}; - -struct smu_state_pcie_block { - unsigned int lanes; -}; - -enum smu_refreshrate_source { - SMU_REFRESHRATE_SOURCE_EDID, - SMU_REFRESHRATE_SOURCE_EXPLICIT -}; - -struct smu_state_display_block { - bool disable_frame_modulation; - bool limit_refreshrate; - enum smu_refreshrate_source refreshrate_source; - int explicit_refreshrate; - int edid_refreshrate_index; - bool enable_vari_bright; -}; - -struct smu_state_memory_block { - bool dll_off; - uint8_t m3arb; - uint8_t unused[3]; -}; - -struct smu_state_software_algorithm_block { - bool disable_load_balancing; - bool enable_sleep_for_timestamps; -}; - -struct smu_temperature_range { - int min; - int max; - int edge_emergency_max; - int hotspot_min; - int hotspot_crit_max; - int hotspot_emergency_max; - int mem_min; - int mem_crit_max; - int mem_emergency_max; - int software_shutdown_temp; -}; - -struct smu_state_validation_block { - bool single_display_only; - bool disallow_on_dc; - uint8_t supported_power_levels; -}; - -struct smu_uvd_clocks { - uint32_t vclk; - uint32_t dclk; -}; - -/** -* Structure to hold a SMU Power State. -*/ -struct smu_power_state { - uint32_t id; - struct list_head ordered_list; - struct list_head all_states_list; - - struct smu_state_classification_block classification; - struct smu_state_validation_block validation; - struct smu_state_pcie_block pcie; - struct smu_state_display_block display; - struct smu_state_memory_block memory; - struct smu_state_software_algorithm_block software; - struct smu_uvd_clocks uvd_clocks; - struct smu_hw_power_state hardware; -}; - -enum smu_power_src_type -{ - SMU_POWER_SOURCE_AC, - SMU_POWER_SOURCE_DC, - SMU_POWER_SOURCE_COUNT, -}; - -enum smu_ppt_limit_type -{ - SMU_DEFAULT_PPT_LIMIT = 0, - SMU_FAST_PPT_LIMIT, -}; - -enum smu_ppt_limit_level -{ - SMU_PPT_LIMIT_MIN = -1, - SMU_PPT_LIMIT_CURRENT, - SMU_PPT_LIMIT_DEFAULT, - SMU_PPT_LIMIT_MAX, -}; - -enum smu_memory_pool_size -{ - SMU_MEMORY_POOL_SIZE_ZERO = 0, - SMU_MEMORY_POOL_SIZE_256_MB = 0x10000000, - SMU_MEMORY_POOL_SIZE_512_MB = 0x20000000, - SMU_MEMORY_POOL_SIZE_1_GB = 0x40000000, - SMU_MEMORY_POOL_SIZE_2_GB = 0x80000000, -}; - -struct smu_user_dpm_profile { - uint32_t fan_mode; - uint32_t power_limit; - uint32_t fan_speed_pwm; - uint32_t fan_speed_rpm; - uint32_t flags; - uint32_t user_od; - - /* user clock state information */ - uint32_t clk_mask[SMU_CLK_COUNT]; - uint32_t clk_dependency; -}; - -#define SMU_TABLE_INIT(tables, table_id, s, a, d) \ - do { \ - tables[table_id].size = s; \ - tables[table_id].align = a; \ - tables[table_id].domain = d; \ - } while (0) - -struct smu_table { - uint64_t size; - uint32_t align; - uint8_t domain; - uint64_t mc_address; - void *cpu_addr; - struct amdgpu_bo *bo; -}; - -enum smu_perf_level_designation { - PERF_LEVEL_ACTIVITY, - PERF_LEVEL_POWER_CONTAINMENT, -}; - -struct smu_performance_level { - uint32_t core_clock; - uint32_t memory_clock; - uint32_t vddc; - uint32_t vddci; - uint32_t non_local_mem_freq; - uint32_t non_local_mem_width; -}; - -struct smu_clock_info { - uint32_t min_mem_clk; - uint32_t max_mem_clk; - uint32_t min_eng_clk; - uint32_t max_eng_clk; - uint32_t min_bus_bandwidth; - uint32_t max_bus_bandwidth; -}; - -struct smu_bios_boot_up_values -{ - uint32_t revision; - uint32_t gfxclk; - uint32_t uclk; - uint32_t socclk; - uint32_t dcefclk; - uint32_t eclk; - uint32_t vclk; - uint32_t dclk; - uint16_t vddc; - uint16_t vddci; - uint16_t mvddc; - uint16_t vdd_gfx; - uint8_t cooling_id; - uint32_t pp_table_id; - uint32_t format_revision; - uint32_t content_revision; - uint32_t fclk; - uint32_t lclk; - uint32_t firmware_caps; -}; - -enum smu_table_id -{ - SMU_TABLE_PPTABLE = 0, - SMU_TABLE_WATERMARKS, - SMU_TABLE_CUSTOM_DPM, - SMU_TABLE_DPMCLOCKS, - SMU_TABLE_AVFS, - SMU_TABLE_AVFS_PSM_DEBUG, - SMU_TABLE_AVFS_FUSE_OVERRIDE, - SMU_TABLE_PMSTATUSLOG, - SMU_TABLE_SMU_METRICS, - SMU_TABLE_DRIVER_SMU_CONFIG, - SMU_TABLE_ACTIVITY_MONITOR_COEFF, - SMU_TABLE_OVERDRIVE, - SMU_TABLE_I2C_COMMANDS, - SMU_TABLE_PACE, - SMU_TABLE_ECCINFO, - SMU_TABLE_COUNT, -}; - -struct smu_table_context -{ - void *power_play_table; - uint32_t power_play_table_size; - void *hardcode_pptable; - unsigned long metrics_time; - void *metrics_table; - void *clocks_table; - void *watermarks_table; - - void *max_sustainable_clocks; - struct smu_bios_boot_up_values boot_values; - void *driver_pptable; - void *ecc_table; - struct smu_table tables[SMU_TABLE_COUNT]; - /* - * The driver table is just a staging buffer for - * uploading/downloading content from the SMU. - * - * And the table_id for SMU_MSG_TransferTableSmu2Dram/ - * SMU_MSG_TransferTableDram2Smu instructs SMU - * which content driver is interested. - */ - struct smu_table driver_table; - struct smu_table memory_pool; - struct smu_table dummy_read_1_table; - uint8_t thermal_controller_type; - - void *overdrive_table; - void *boot_overdrive_table; - void *user_overdrive_table; - - uint32_t gpu_metrics_table_size; - void *gpu_metrics_table; -}; - -struct smu_dpm_context { - uint32_t dpm_context_size; - void *dpm_context; - void *golden_dpm_context; - bool enable_umd_pstate; - enum amd_dpm_forced_level dpm_level; - enum amd_dpm_forced_level saved_dpm_level; - enum amd_dpm_forced_level requested_dpm_level; - struct smu_power_state *dpm_request_power_state; - struct smu_power_state *dpm_current_power_state; - struct mclock_latency_table *mclk_latency_table; -}; - -struct smu_power_gate { - bool uvd_gated; - bool vce_gated; - atomic_t vcn_gated; - atomic_t jpeg_gated; - struct mutex vcn_gate_lock; - struct mutex jpeg_gate_lock; -}; - -struct smu_power_context { - void *power_context; - uint32_t power_context_size; - struct smu_power_gate power_gate; -}; - -#define SMU_FEATURE_MAX (64) -struct smu_feature -{ - uint32_t feature_num; - DECLARE_BITMAP(supported, SMU_FEATURE_MAX); - DECLARE_BITMAP(allowed, SMU_FEATURE_MAX); - DECLARE_BITMAP(enabled, SMU_FEATURE_MAX); - struct mutex mutex; -}; - -struct smu_clocks { - uint32_t engine_clock; - uint32_t memory_clock; - uint32_t bus_bandwidth; - uint32_t engine_clock_in_sr; - uint32_t dcef_clock; - uint32_t dcef_clock_in_sr; -}; - -#define MAX_REGULAR_DPM_NUM 16 -struct mclk_latency_entries { - uint32_t frequency; - uint32_t latency; -}; -struct mclock_latency_table { - uint32_t count; - struct mclk_latency_entries entries[MAX_REGULAR_DPM_NUM]; -}; - -enum smu_reset_mode -{ - SMU_RESET_MODE_0, - SMU_RESET_MODE_1, - SMU_RESET_MODE_2, -}; - -enum smu_baco_state -{ - SMU_BACO_STATE_ENTER = 0, - SMU_BACO_STATE_EXIT, -}; - -struct smu_baco_context -{ - struct mutex mutex; - uint32_t state; - bool platform_support; -}; - -struct smu_freq_info { - uint32_t min; - uint32_t max; - uint32_t freq_level; -}; - -struct pstates_clk_freq { - uint32_t min; - uint32_t standard; - uint32_t peak; - struct smu_freq_info custom; - struct smu_freq_info curr; -}; - -struct smu_umd_pstate_table { - struct pstates_clk_freq gfxclk_pstate; - struct pstates_clk_freq socclk_pstate; - struct pstates_clk_freq uclk_pstate; - struct pstates_clk_freq vclk_pstate; - struct pstates_clk_freq dclk_pstate; -}; - -struct cmn2asic_msg_mapping { - int valid_mapping; - int map_to; - int valid_in_vf; -}; - -struct cmn2asic_mapping { - int valid_mapping; - int map_to; -}; - -struct stb_context { - uint32_t stb_buf_size; - bool enabled; - spinlock_t lock; -}; - -#define WORKLOAD_POLICY_MAX 7 - -struct smu_context -{ - struct amdgpu_device *adev; - struct amdgpu_irq_src irq_source; - - const struct pptable_funcs *ppt_funcs; - const struct cmn2asic_msg_mapping *message_map; - const struct cmn2asic_mapping *clock_map; - const struct cmn2asic_mapping *feature_map; - const struct cmn2asic_mapping *table_map; - const struct cmn2asic_mapping *pwr_src_map; - const struct cmn2asic_mapping *workload_map; - struct mutex mutex; - struct mutex sensor_lock; - struct mutex metrics_lock; - struct mutex message_lock; - uint64_t pool_size; - - struct smu_table_context smu_table; - struct smu_dpm_context smu_dpm; - struct smu_power_context smu_power; - struct smu_feature smu_feature; - struct amd_pp_display_configuration *display_config; - struct smu_baco_context smu_baco; - struct smu_temperature_range thermal_range; - void *od_settings; - - struct smu_umd_pstate_table pstate_table; - uint32_t pstate_sclk; - uint32_t pstate_mclk; - - bool od_enabled; - uint32_t current_power_limit; - uint32_t default_power_limit; - uint32_t max_power_limit; - - /* soft pptable */ - uint32_t ppt_offset_bytes; - uint32_t ppt_size_bytes; - uint8_t *ppt_start_addr; - - bool support_power_containment; - bool disable_watermark; - -#define WATERMARKS_EXIST (1 << 0) -#define WATERMARKS_LOADED (1 << 1) - uint32_t watermarks_bitmap; - uint32_t hard_min_uclk_req_from_dal; - bool disable_uclk_switch; - - uint32_t workload_mask; - uint32_t workload_prority[WORKLOAD_POLICY_MAX]; - uint32_t workload_setting[WORKLOAD_POLICY_MAX]; - uint32_t power_profile_mode; - uint32_t default_power_profile_mode; - bool pm_enabled; - bool is_apu; - - uint32_t smc_driver_if_version; - uint32_t smc_fw_if_version; - uint32_t smc_fw_version; - - bool uploading_custom_pp_table; - bool dc_controlled_by_gpio; - - struct work_struct throttling_logging_work; - atomic64_t throttle_int_counter; - struct work_struct interrupt_work; - - unsigned fan_max_rpm; - unsigned manual_fan_speed_pwm; - - uint32_t gfx_default_hard_min_freq; - uint32_t gfx_default_soft_max_freq; - uint32_t gfx_actual_hard_min_freq; - uint32_t gfx_actual_soft_max_freq; - - /* APU only */ - uint32_t cpu_default_soft_min_freq; - uint32_t cpu_default_soft_max_freq; - uint32_t cpu_actual_soft_min_freq; - uint32_t cpu_actual_soft_max_freq; - uint32_t cpu_core_id_select; - uint16_t cpu_core_num; - - struct smu_user_dpm_profile user_dpm_profile; - - struct stb_context stb_context; -}; - -struct i2c_adapter; - -/** - * struct pptable_funcs - Callbacks used to interact with the SMU. - */ -struct pptable_funcs { - /** - * @run_btc: Calibrate voltage/frequency curve to fit the system's - * power delivery and voltage margins. Required for adaptive - * voltage frequency scaling (AVFS). - */ - int (*run_btc)(struct smu_context *smu); - - /** - * @get_allowed_feature_mask: Get allowed feature mask. - * &feature_mask: Array to store feature mask. - * &num: Elements in &feature_mask. - */ - int (*get_allowed_feature_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num); - - /** - * @get_current_power_state: Get the current power state. - * - * Return: Current power state on success, negative errno on failure. - */ - enum amd_pm_state_type (*get_current_power_state)(struct smu_context *smu); - - /** - * @set_default_dpm_table: Retrieve the default overdrive settings from - * the SMU. - */ - int (*set_default_dpm_table)(struct smu_context *smu); - - int (*set_power_state)(struct smu_context *smu); - - /** - * @populate_umd_state_clk: Populate the UMD power state table with - * defaults. - */ - int (*populate_umd_state_clk)(struct smu_context *smu); - - /** - * @print_clk_levels: Print DPM clock levels for a clock domain - * to buffer. Star current level. - * - * Used for sysfs interfaces. - */ - int (*print_clk_levels)(struct smu_context *smu, enum smu_clk_type clk_type, char *buf); - - /** - * @force_clk_levels: Set a range of allowed DPM levels for a clock - * domain. - * &clk_type: Clock domain. - * &mask: Range of allowed DPM levels. - */ - int (*force_clk_levels)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t mask); - - /** - * @od_edit_dpm_table: Edit the custom overdrive DPM table. - * &type: Type of edit. - * &input: Edit parameters. - * &size: Size of &input. - */ - int (*od_edit_dpm_table)(struct smu_context *smu, - enum PP_OD_DPM_TABLE_COMMAND type, - long *input, uint32_t size); - - /** - * @restore_user_od_settings: Restore the user customized - * OD settings on S3/S4/Runpm resume. - */ - int (*restore_user_od_settings)(struct smu_context *smu); - - /** - * @get_clock_by_type_with_latency: Get the speed and latency of a clock - * domain. - */ - int (*get_clock_by_type_with_latency)(struct smu_context *smu, - enum smu_clk_type clk_type, - struct - pp_clock_levels_with_latency - *clocks); - /** - * @get_clock_by_type_with_voltage: Get the speed and voltage of a clock - * domain. - */ - int (*get_clock_by_type_with_voltage)(struct smu_context *smu, - enum amd_pp_clock_type type, - struct - pp_clock_levels_with_voltage - *clocks); - - /** - * @get_power_profile_mode: Print all power profile modes to - * buffer. Star current mode. - */ - int (*get_power_profile_mode)(struct smu_context *smu, char *buf); - - /** - * @set_power_profile_mode: Set a power profile mode. Also used to - * create/set custom power profile modes. - * &input: Power profile mode parameters. - * &size: Size of &input. - */ - int (*set_power_profile_mode)(struct smu_context *smu, long *input, uint32_t size); - - /** - * @dpm_set_vcn_enable: Enable/disable VCN engine dynamic power - * management. - */ - int (*dpm_set_vcn_enable)(struct smu_context *smu, bool enable); - - /** - * @dpm_set_jpeg_enable: Enable/disable JPEG engine dynamic power - * management. - */ - int (*dpm_set_jpeg_enable)(struct smu_context *smu, bool enable); - - /** - * @read_sensor: Read data from a sensor. - * &sensor: Sensor to read data from. - * &data: Sensor reading. - * &size: Size of &data. - */ - int (*read_sensor)(struct smu_context *smu, enum amd_pp_sensors sensor, - void *data, uint32_t *size); - - /** - * @pre_display_config_changed: Prepare GPU for a display configuration - * change. - * - * Disable display tracking and pin memory clock speed to maximum. Used - * in display component synchronization. - */ - int (*pre_display_config_changed)(struct smu_context *smu); - - /** - * @display_config_changed: Notify the SMU of the current display - * configuration. - * - * Allows SMU to properly track blanking periods for memory clock - * adjustment. Used in display component synchronization. - */ - int (*display_config_changed)(struct smu_context *smu); - - int (*apply_clocks_adjust_rules)(struct smu_context *smu); - - /** - * @notify_smc_display_config: Applies display requirements to the - * current power state. - * - * Optimize deep sleep DCEFclk and mclk for the current display - * configuration. Used in display component synchronization. - */ - int (*notify_smc_display_config)(struct smu_context *smu); - - /** - * @is_dpm_running: Check if DPM is running. - * - * Return: True if DPM is running, false otherwise. - */ - bool (*is_dpm_running)(struct smu_context *smu); - - /** - * @get_fan_speed_pwm: Get the current fan speed in PWM. - */ - int (*get_fan_speed_pwm)(struct smu_context *smu, uint32_t *speed); - - /** - * @get_fan_speed_rpm: Get the current fan speed in rpm. - */ - int (*get_fan_speed_rpm)(struct smu_context *smu, uint32_t *speed); - - /** - * @set_watermarks_table: Configure and upload the watermarks tables to - * the SMU. - */ - int (*set_watermarks_table)(struct smu_context *smu, - struct pp_smu_wm_range_sets *clock_ranges); - - /** - * @get_thermal_temperature_range: Get safe thermal limits in Celcius. - */ - int (*get_thermal_temperature_range)(struct smu_context *smu, struct smu_temperature_range *range); - - /** - * @get_uclk_dpm_states: Get memory clock DPM levels in kHz. - * &clocks_in_khz: Array of DPM levels. - * &num_states: Elements in &clocks_in_khz. - */ - int (*get_uclk_dpm_states)(struct smu_context *smu, uint32_t *clocks_in_khz, uint32_t *num_states); - - /** - * @set_default_od_settings: Set the overdrive tables to defaults. - */ - int (*set_default_od_settings)(struct smu_context *smu); - - /** - * @set_performance_level: Set a performance level. - */ - int (*set_performance_level)(struct smu_context *smu, enum amd_dpm_forced_level level); - - /** - * @display_disable_memory_clock_switch: Enable/disable dynamic memory - * clock switching. - * - * Disabling this feature forces memory clock speed to maximum. - * Enabling sets the minimum memory clock capable of driving the - * current display configuration. - */ - int (*display_disable_memory_clock_switch)(struct smu_context *smu, bool disable_memory_clock_switch); - - /** - * @dump_pptable: Print the power play table to the system log. - */ - void (*dump_pptable)(struct smu_context *smu); - - /** - * @get_power_limit: Get the device's power limits. - */ - int (*get_power_limit)(struct smu_context *smu, - uint32_t *current_power_limit, - uint32_t *default_power_limit, - uint32_t *max_power_limit); - - /** - * @get_ppt_limit: Get the device's ppt limits. - */ - int (*get_ppt_limit)(struct smu_context *smu, uint32_t *ppt_limit, - enum smu_ppt_limit_type limit_type, enum smu_ppt_limit_level limit_level); - - /** - * @set_df_cstate: Set data fabric cstate. - */ - int (*set_df_cstate)(struct smu_context *smu, enum pp_df_cstate state); - - /** - * @allow_xgmi_power_down: Enable/disable external global memory - * interconnect power down. - */ - int (*allow_xgmi_power_down)(struct smu_context *smu, bool en); - - /** - * @update_pcie_parameters: Update and upload the system's PCIe - * capabilites to the SMU. - * &pcie_gen_cap: Maximum allowed PCIe generation. - * &pcie_width_cap: Maximum allowed PCIe width. - */ - int (*update_pcie_parameters)(struct smu_context *smu, uint32_t pcie_gen_cap, uint32_t pcie_width_cap); - - /** - * @i2c_init: Initialize i2c. - * - * The i2c bus is used internally by the SMU voltage regulators and - * other devices. The i2c's EEPROM also stores bad page tables on boards - * with ECC. - */ - int (*i2c_init)(struct smu_context *smu, struct i2c_adapter *control); - - /** - * @i2c_fini: Tear down i2c. - */ - void (*i2c_fini)(struct smu_context *smu, struct i2c_adapter *control); - - /** - * @get_unique_id: Get the GPU's unique id. Used for asset tracking. - */ - void (*get_unique_id)(struct smu_context *smu); - - /** - * @get_dpm_clock_table: Get a copy of the DPM clock table. - * - * Used by display component in bandwidth and watermark calculations. - */ - int (*get_dpm_clock_table)(struct smu_context *smu, struct dpm_clocks *clock_table); - - /** - * @init_microcode: Request the SMU's firmware from the kernel. - */ - int (*init_microcode)(struct smu_context *smu); - - /** - * @load_microcode: Load firmware onto the SMU. - */ - int (*load_microcode)(struct smu_context *smu); - - /** - * @fini_microcode: Release the SMU's firmware. - */ - void (*fini_microcode)(struct smu_context *smu); - - /** - * @init_smc_tables: Initialize the SMU tables. - */ - int (*init_smc_tables)(struct smu_context *smu); - - /** - * @fini_smc_tables: Release the SMU tables. - */ - int (*fini_smc_tables)(struct smu_context *smu); - - /** - * @init_power: Initialize the power gate table context. - */ - int (*init_power)(struct smu_context *smu); - - /** - * @fini_power: Release the power gate table context. - */ - int (*fini_power)(struct smu_context *smu); - - /** - * @check_fw_status: Check the SMU's firmware status. - * - * Return: Zero if check passes, negative errno on failure. - */ - int (*check_fw_status)(struct smu_context *smu); - - /** - * @set_mp1_state: put SMU into a correct state for comming - * resume from runpm or gpu reset. - */ - int (*set_mp1_state)(struct smu_context *smu, - enum pp_mp1_state mp1_state); - - /** - * @setup_pptable: Initialize the power play table and populate it with - * default values. - */ - int (*setup_pptable)(struct smu_context *smu); - - /** - * @get_vbios_bootup_values: Get default boot values from the VBIOS. - */ - int (*get_vbios_bootup_values)(struct smu_context *smu); - - /** - * @check_fw_version: Print driver and SMU interface versions to the - * system log. - * - * Interface mismatch is not a critical failure. - */ - int (*check_fw_version)(struct smu_context *smu); - - /** - * @powergate_sdma: Power up/down system direct memory access. - */ - int (*powergate_sdma)(struct smu_context *smu, bool gate); - - /** - * @set_gfx_cgpg: Enable/disable graphics engine course grain power - * gating. - */ - int (*set_gfx_cgpg)(struct smu_context *smu, bool enable); - - /** - * @write_pptable: Write the power play table to the SMU. - */ - int (*write_pptable)(struct smu_context *smu); - - /** - * @set_driver_table_location: Send the location of the driver table to - * the SMU. - */ - int (*set_driver_table_location)(struct smu_context *smu); - - /** - * @set_tool_table_location: Send the location of the tool table to the - * SMU. - */ - int (*set_tool_table_location)(struct smu_context *smu); - - /** - * @notify_memory_pool_location: Send the location of the memory pool to - * the SMU. - */ - int (*notify_memory_pool_location)(struct smu_context *smu); - - /** - * @system_features_control: Enable/disable all SMU features. - */ - int (*system_features_control)(struct smu_context *smu, bool en); - - /** - * @send_smc_msg_with_param: Send a message with a parameter to the SMU. - * &msg: Type of message. - * ¶m: Message parameter. - * &read_arg: SMU response (optional). - */ - int (*send_smc_msg_with_param)(struct smu_context *smu, - enum smu_message_type msg, uint32_t param, uint32_t *read_arg); - - /** - * @send_smc_msg: Send a message to the SMU. - * &msg: Type of message. - * &read_arg: SMU response (optional). - */ - int (*send_smc_msg)(struct smu_context *smu, - enum smu_message_type msg, - uint32_t *read_arg); - - /** - * @init_display_count: Notify the SMU of the number of display - * components in current display configuration. - */ - int (*init_display_count)(struct smu_context *smu, uint32_t count); - - /** - * @set_allowed_mask: Notify the SMU of the features currently allowed - * by the driver. - */ - int (*set_allowed_mask)(struct smu_context *smu); - - /** - * @get_enabled_mask: Get a mask of features that are currently enabled - * on the SMU. - * &feature_mask: Array representing enabled feature mask. - * &num: Elements in &feature_mask. - */ - int (*get_enabled_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num); - - /** - * @feature_is_enabled: Test if a feature is enabled. - * - * Return: One if enabled, zero if disabled. - */ - int (*feature_is_enabled)(struct smu_context *smu, enum smu_feature_mask mask); - - /** - * @disable_all_features_with_exception: Disable all features with - * exception to those in &mask. - */ - int (*disable_all_features_with_exception)(struct smu_context *smu, - bool no_hw_disablement, - enum smu_feature_mask mask); - - /** - * @notify_display_change: Enable fast memory clock switching. - * - * Allows for fine grained memory clock switching but has more stringent - * timing requirements. - */ - int (*notify_display_change)(struct smu_context *smu); - - /** - * @set_power_limit: Set power limit in watts. - */ - int (*set_power_limit)(struct smu_context *smu, - enum smu_ppt_limit_type limit_type, - uint32_t limit); - - /** - * @init_max_sustainable_clocks: Populate max sustainable clock speed - * table with values from the SMU. - */ - int (*init_max_sustainable_clocks)(struct smu_context *smu); - - /** - * @enable_thermal_alert: Enable thermal alert interrupts. - */ - int (*enable_thermal_alert)(struct smu_context *smu); - - /** - * @disable_thermal_alert: Disable thermal alert interrupts. - */ - int (*disable_thermal_alert)(struct smu_context *smu); - - /** - * @set_min_dcef_deep_sleep: Set a minimum display fabric deep sleep - * clock speed in MHz. - */ - int (*set_min_dcef_deep_sleep)(struct smu_context *smu, uint32_t clk); - - /** - * @display_clock_voltage_request: Set a hard minimum frequency - * for a clock domain. - */ - int (*display_clock_voltage_request)(struct smu_context *smu, struct - pp_display_clock_request - *clock_req); - - /** - * @get_fan_control_mode: Get the current fan control mode. - */ - uint32_t (*get_fan_control_mode)(struct smu_context *smu); - - /** - * @set_fan_control_mode: Set the fan control mode. - */ - int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode); - - /** - * @set_fan_speed_pwm: Set a static fan speed in PWM. - */ - int (*set_fan_speed_pwm)(struct smu_context *smu, uint32_t speed); - - /** - * @set_fan_speed_rpm: Set a static fan speed in rpm. - */ - int (*set_fan_speed_rpm)(struct smu_context *smu, uint32_t speed); - - /** - * @set_xgmi_pstate: Set inter-chip global memory interconnect pstate. - * &pstate: Pstate to set. D0 if Nonzero, D3 otherwise. - */ - int (*set_xgmi_pstate)(struct smu_context *smu, uint32_t pstate); - - /** - * @gfx_off_control: Enable/disable graphics engine poweroff. - */ - int (*gfx_off_control)(struct smu_context *smu, bool enable); - - - /** - * @get_gfx_off_status: Get graphics engine poweroff status. - * - * Return: - * 0 - GFXOFF(default). - * 1 - Transition out of GFX State. - * 2 - Not in GFXOFF. - * 3 - Transition into GFXOFF. - */ - uint32_t (*get_gfx_off_status)(struct smu_context *smu); - - /** - * @register_irq_handler: Register interupt request handlers. - */ - int (*register_irq_handler)(struct smu_context *smu); - - /** - * @set_azalia_d3_pme: Wake the audio decode engine from d3 sleep. - */ - int (*set_azalia_d3_pme)(struct smu_context *smu); - - /** - * @get_max_sustainable_clocks_by_dc: Get a copy of the max sustainable - * clock speeds table. - * - * Provides a way for the display component (DC) to get the max - * sustainable clocks from the SMU. - */ - int (*get_max_sustainable_clocks_by_dc)(struct smu_context *smu, struct pp_smu_nv_clock_table *max_clocks); - - /** - * @baco_is_support: Check if GPU supports BACO (Bus Active, Chip Off). - */ - bool (*baco_is_support)(struct smu_context *smu); - - /** - * @baco_get_state: Get the current BACO state. - * - * Return: Current BACO state. - */ - enum smu_baco_state (*baco_get_state)(struct smu_context *smu); - - /** - * @baco_set_state: Enter/exit BACO. - */ - int (*baco_set_state)(struct smu_context *smu, enum smu_baco_state state); - - /** - * @baco_enter: Enter BACO. - */ - int (*baco_enter)(struct smu_context *smu); - - /** - * @baco_exit: Exit Baco. - */ - int (*baco_exit)(struct smu_context *smu); - - /** - * @mode1_reset_is_support: Check if GPU supports mode1 reset. - */ - bool (*mode1_reset_is_support)(struct smu_context *smu); - /** - * @mode2_reset_is_support: Check if GPU supports mode2 reset. - */ - bool (*mode2_reset_is_support)(struct smu_context *smu); - - /** - * @mode1_reset: Perform mode1 reset. - * - * Complete GPU reset. - */ - int (*mode1_reset)(struct smu_context *smu); - - /** - * @mode2_reset: Perform mode2 reset. - * - * Mode2 reset generally does not reset as many IPs as mode1 reset. The - * IPs reset varies by asic. - */ - int (*mode2_reset)(struct smu_context *smu); - - /** - * @get_dpm_ultimate_freq: Get the hard frequency range of a clock - * domain in MHz. - */ - int (*get_dpm_ultimate_freq)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max); - - /** - * @set_soft_freq_limited_range: Set the soft frequency range of a clock - * domain in MHz. - */ - int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max); - - /** - * @set_power_source: Notify the SMU of the current power source. - */ - int (*set_power_source)(struct smu_context *smu, enum smu_power_src_type power_src); - - /** - * @log_thermal_throttling_event: Print a thermal throttling warning to - * the system's log. - */ - void (*log_thermal_throttling_event)(struct smu_context *smu); - - /** - * @get_pp_feature_mask: Print a human readable table of enabled - * features to buffer. - */ - size_t (*get_pp_feature_mask)(struct smu_context *smu, char *buf); - - /** - * @set_pp_feature_mask: Request the SMU enable/disable features to - * match those enabled in &new_mask. - */ - int (*set_pp_feature_mask)(struct smu_context *smu, uint64_t new_mask); - - /** - * @get_gpu_metrics: Get a copy of the GPU metrics table from the SMU. - * - * Return: Size of &table - */ - ssize_t (*get_gpu_metrics)(struct smu_context *smu, void **table); - - /** - * @enable_mgpu_fan_boost: Enable multi-GPU fan boost. - */ - int (*enable_mgpu_fan_boost)(struct smu_context *smu); - - /** - * @gfx_ulv_control: Enable/disable ultra low voltage. - */ - int (*gfx_ulv_control)(struct smu_context *smu, bool enablement); - - /** - * @deep_sleep_control: Enable/disable deep sleep. - */ - int (*deep_sleep_control)(struct smu_context *smu, bool enablement); - - /** - * @get_fan_parameters: Get fan parameters. - * - * Get maximum fan speed from the power play table. - */ - int (*get_fan_parameters)(struct smu_context *smu); - - /** - * @post_init: Helper function for asic specific workarounds. - */ - int (*post_init)(struct smu_context *smu); - - /** - * @interrupt_work: Work task scheduled from SMU interrupt handler. - */ - void (*interrupt_work)(struct smu_context *smu); - - /** - * @gpo_control: Enable/disable graphics power optimization if supported. - */ - int (*gpo_control)(struct smu_context *smu, bool enablement); - - /** - * @gfx_state_change_set: Send the current graphics state to the SMU. - */ - int (*gfx_state_change_set)(struct smu_context *smu, uint32_t state); - - /** - * @set_fine_grain_gfx_freq_parameters: Set fine grain graphics clock - * parameters to defaults. - */ - int (*set_fine_grain_gfx_freq_parameters)(struct smu_context *smu); - - /** - * @smu_handle_passthrough_sbr: Send message to SMU about special handling for SBR. - */ - int (*smu_handle_passthrough_sbr)(struct smu_context *smu, bool enable); - - /** - * @wait_for_event: Wait for events from SMU. - */ - int (*wait_for_event)(struct smu_context *smu, - enum smu_event_type event, uint64_t event_arg); - - /** - * @sned_hbm_bad_pages_num: message SMU to update bad page number - * of SMUBUS table. - */ - int (*send_hbm_bad_pages_num)(struct smu_context *smu, uint32_t size); - - /** - * @get_ecc_table: message SMU to get ECC INFO table. - */ - ssize_t (*get_ecc_info)(struct smu_context *smu, void *table); - - - /** - * @stb_collect_info: Collects Smart Trace Buffers data. - */ - int (*stb_collect_info)(struct smu_context *smu, void *buf, uint32_t size); -}; - -typedef enum { - METRICS_CURR_GFXCLK, - METRICS_CURR_SOCCLK, - METRICS_CURR_UCLK, - METRICS_CURR_VCLK, - METRICS_CURR_VCLK1, - METRICS_CURR_DCLK, - METRICS_CURR_DCLK1, - METRICS_CURR_FCLK, - METRICS_CURR_DCEFCLK, - METRICS_AVERAGE_CPUCLK, - METRICS_AVERAGE_GFXCLK, - METRICS_AVERAGE_SOCCLK, - METRICS_AVERAGE_FCLK, - METRICS_AVERAGE_UCLK, - METRICS_AVERAGE_VCLK, - METRICS_AVERAGE_DCLK, - METRICS_AVERAGE_GFXACTIVITY, - METRICS_AVERAGE_MEMACTIVITY, - METRICS_AVERAGE_VCNACTIVITY, - METRICS_AVERAGE_SOCKETPOWER, - METRICS_TEMPERATURE_EDGE, - METRICS_TEMPERATURE_HOTSPOT, - METRICS_TEMPERATURE_MEM, - METRICS_TEMPERATURE_VRGFX, - METRICS_TEMPERATURE_VRSOC, - METRICS_TEMPERATURE_VRMEM, - METRICS_THROTTLER_STATUS, - METRICS_CURR_FANSPEED, - METRICS_VOLTAGE_VDDSOC, - METRICS_VOLTAGE_VDDGFX, - METRICS_SS_APU_SHARE, - METRICS_SS_DGPU_SHARE, -} MetricsMember_t; - -enum smu_cmn2asic_mapping_type { - CMN2ASIC_MAPPING_MSG, - CMN2ASIC_MAPPING_CLK, - CMN2ASIC_MAPPING_FEATURE, - CMN2ASIC_MAPPING_TABLE, - CMN2ASIC_MAPPING_PWR, - CMN2ASIC_MAPPING_WORKLOAD, -}; - -#define MSG_MAP(msg, index, valid_in_vf) \ - [SMU_MSG_##msg] = {1, (index), (valid_in_vf)} - -#define CLK_MAP(clk, index) \ - [SMU_##clk] = {1, (index)} - -#define FEA_MAP(fea) \ - [SMU_FEATURE_##fea##_BIT] = {1, FEATURE_##fea##_BIT} - -#define FEA_MAP_REVERSE(fea) \ - [SMU_FEATURE_DPM_##fea##_BIT] = {1, FEATURE_##fea##_DPM_BIT} - -#define FEA_MAP_HALF_REVERSE(fea) \ - [SMU_FEATURE_DPM_##fea##CLK_BIT] = {1, FEATURE_##fea##_DPM_BIT} - -#define TAB_MAP(tab) \ - [SMU_TABLE_##tab] = {1, TABLE_##tab} - -#define TAB_MAP_VALID(tab) \ - [SMU_TABLE_##tab] = {1, TABLE_##tab} - -#define TAB_MAP_INVALID(tab) \ - [SMU_TABLE_##tab] = {0, TABLE_##tab} - -#define PWR_MAP(tab) \ - [SMU_POWER_SOURCE_##tab] = {1, POWER_SOURCE_##tab} - -#define WORKLOAD_MAP(profile, workload) \ - [profile] = {1, (workload)} - -/** - * smu_memcpy_trailing - Copy the end of one structure into the middle of another - * - * @dst: Pointer to destination struct - * @first_dst_member: The member name in @dst where the overwrite begins - * @last_dst_member: The member name in @dst where the overwrite ends after - * @src: Pointer to the source struct - * @first_src_member: The member name in @src where the copy begins - * - */ -#define smu_memcpy_trailing(dst, first_dst_member, last_dst_member, \ - src, first_src_member) \ -({ \ - size_t __src_offset = offsetof(typeof(*(src)), first_src_member); \ - size_t __src_size = sizeof(*(src)) - __src_offset; \ - size_t __dst_offset = offsetof(typeof(*(dst)), first_dst_member); \ - size_t __dst_size = offsetofend(typeof(*(dst)), last_dst_member) - \ - __dst_offset; \ - BUILD_BUG_ON(__src_size != __dst_size); \ - __builtin_memcpy((u8 *)(dst) + __dst_offset, \ - (u8 *)(src) + __src_offset, \ - __dst_size); \ -}) - -#if !defined(SWSMU_CODE_LAYER_L2) && !defined(SWSMU_CODE_LAYER_L3) && !defined(SWSMU_CODE_LAYER_L4) -int smu_get_power_limit(void *handle, - uint32_t *limit, - enum pp_power_limit_level pp_limit_level, - enum pp_power_type pp_power_type); - -bool smu_mode1_reset_is_support(struct smu_context *smu); -bool smu_mode2_reset_is_support(struct smu_context *smu); -int smu_mode1_reset(struct smu_context *smu); - -extern const struct amd_ip_funcs smu_ip_funcs; - -bool is_support_sw_smu(struct amdgpu_device *adev); -bool is_support_cclk_dpm(struct amdgpu_device *adev); -int smu_write_watermarks_table(struct smu_context *smu); - -int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, - uint32_t *min, uint32_t *max); - -int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, - uint32_t min, uint32_t max); - -int smu_set_ac_dc(struct smu_context *smu); - -int smu_allow_xgmi_power_down(struct smu_context *smu, bool en); - -int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value); - -int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable); - -int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event, - uint64_t event_arg); -int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc); -int smu_stb_collect_info(struct smu_context *smu, void *buff, uint32_t size); -void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev); -int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size); -#endif -#endif diff --git a/drivers/gpu/drm/amd/pm/inc/arcturus_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/arcturus_ppsmc.h deleted file mode 100644 index 45f5d29bc705..000000000000 --- a/drivers/gpu/drm/amd/pm/inc/arcturus_ppsmc.h +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Copyright 2019 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef ARCTURUS_PP_SMC_H -#define ARCTURUS_PP_SMC_H - -#pragma pack(push, 1) - -// SMU Response Codes: -#define PPSMC_Result_OK 0x1 -#define PPSMC_Result_Failed 0xFF -#define PPSMC_Result_UnknownCmd 0xFE -#define PPSMC_Result_CmdRejectedPrereq 0xFD -#define PPSMC_Result_CmdRejectedBusy 0xFC - -// Message Definitions: -// BASIC -#define PPSMC_MSG_TestMessage 0x1 -#define PPSMC_MSG_GetSmuVersion 0x2 -#define PPSMC_MSG_GetDriverIfVersion 0x3 -#define PPSMC_MSG_SetAllowedFeaturesMaskLow 0x4 -#define PPSMC_MSG_SetAllowedFeaturesMaskHigh 0x5 -#define PPSMC_MSG_EnableAllSmuFeatures 0x6 -#define PPSMC_MSG_DisableAllSmuFeatures 0x7 -#define PPSMC_MSG_EnableSmuFeaturesLow 0x8 -#define PPSMC_MSG_EnableSmuFeaturesHigh 0x9 -#define PPSMC_MSG_DisableSmuFeaturesLow 0xA -#define PPSMC_MSG_DisableSmuFeaturesHigh 0xB -#define PPSMC_MSG_GetEnabledSmuFeaturesLow 0xC -#define PPSMC_MSG_GetEnabledSmuFeaturesHigh 0xD -#define PPSMC_MSG_SetDriverDramAddrHigh 0xE -#define PPSMC_MSG_SetDriverDramAddrLow 0xF -#define PPSMC_MSG_SetToolsDramAddrHigh 0x10 -#define PPSMC_MSG_SetToolsDramAddrLow 0x11 -#define PPSMC_MSG_TransferTableSmu2Dram 0x12 -#define PPSMC_MSG_TransferTableDram2Smu 0x13 -#define PPSMC_MSG_UseDefaultPPTable 0x14 -#define PPSMC_MSG_UseBackupPPTable 0x15 -#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x16 -#define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x17 - -//BACO/BAMACO/BOMACO -#define PPSMC_MSG_EnterBaco 0x18 -#define PPSMC_MSG_ExitBaco 0x19 -#define PPSMC_MSG_ArmD3 0x1A - -//DPM -#define PPSMC_MSG_SetSoftMinByFreq 0x1B -#define PPSMC_MSG_SetSoftMaxByFreq 0x1C -#define PPSMC_MSG_SetHardMinByFreq 0x1D -#define PPSMC_MSG_SetHardMaxByFreq 0x1E -#define PPSMC_MSG_GetMinDpmFreq 0x1F -#define PPSMC_MSG_GetMaxDpmFreq 0x20 -#define PPSMC_MSG_GetDpmFreqByIndex 0x21 - -#define PPSMC_MSG_SetWorkloadMask 0x22 -#define PPSMC_MSG_SetDfSwitchType 0x23 -#define PPSMC_MSG_GetVoltageByDpm 0x24 -#define PPSMC_MSG_GetVoltageByDpmOverdrive 0x25 - -#define PPSMC_MSG_SetPptLimit 0x26 -#define PPSMC_MSG_GetPptLimit 0x27 - -//Power Gating -#define PPSMC_MSG_PowerUpVcn0 0x28 -#define PPSMC_MSG_PowerDownVcn0 0x29 -#define PPSMC_MSG_PowerUpVcn1 0x2A -#define PPSMC_MSG_PowerDownVcn1 0x2B - -//Resets and reload -#define PPSMC_MSG_PrepareMp1ForUnload 0x2C -#define PPSMC_MSG_PrepareMp1ForReset 0x2D -#define PPSMC_MSG_PrepareMp1ForShutdown 0x2E -#define PPSMC_MSG_SoftReset 0x2F - -//BTC -#define PPSMC_MSG_RunAfllBtc 0x30 -#define PPSMC_MSG_RunDcBtc 0x31 - -//Debug -#define PPSMC_MSG_DramLogSetDramAddrHigh 0x33 -#define PPSMC_MSG_DramLogSetDramAddrLow 0x34 -#define PPSMC_MSG_DramLogSetDramSize 0x35 -#define PPSMC_MSG_GetDebugData 0x36 - -//WAFL and XGMI -#define PPSMC_MSG_WaflTest 0x37 -#define PPSMC_MSG_SetXgmiMode 0x38 - -//Others -#define PPSMC_MSG_SetMemoryChannelEnable 0x39 - -//OOB -#define PPSMC_MSG_SetNumBadHbmPagesRetired 0x3A - -#define PPSMC_MSG_DFCstateControl 0x3B -#define PPSMC_MSG_GmiPwrDnControl 0x3D -#define PPSMC_Message_Count 0x3E - -#define PPSMC_MSG_ReadSerialNumTop32 0x40 -#define PPSMC_MSG_ReadSerialNumBottom32 0x41 - -/* paramater for MSG_LightSBR - * 1 -- Enable light secondary bus reset, only do nbio respond without further handling, - * leave driver to handle the real reset - * 0 -- Disable LightSBR, default behavior, SMU will pass the reset to PSP - */ -#define PPSMC_MSG_LightSBR 0x42 - -typedef uint32_t PPSMC_Result; -typedef uint32_t PPSMC_Msg; -#pragma pack(pop) - -#endif diff --git a/drivers/gpu/drm/amd/pm/inc/cz_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/cz_ppsmc.h deleted file mode 100644 index 9b698780aed8..000000000000 --- a/drivers/gpu/drm/amd/pm/inc/cz_ppsmc.h +++ /dev/null @@ -1,186 +0,0 @@ -/* - * Copyright 2014 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef CZ_PP_SMC_H -#define CZ_PP_SMC_H - -#pragma pack(push, 1) - -/* Fan control algorithm:*/ -#define FDO_MODE_HARDWARE 0 -#define FDO_MODE_PIECE_WISE_LINEAR 1 - -enum FAN_CONTROL { - FAN_CONTROL_FUZZY, - FAN_CONTROL_TABLE -}; - -enum DPM_ARRAY { - DPM_ARRAY_HARD_MAX, - DPM_ARRAY_HARD_MIN, - DPM_ARRAY_SOFT_MAX, - DPM_ARRAY_SOFT_MIN -}; - -/* - * Return codes for driver to SMC communication. - * Leave these #define-s, enums might not be exactly 8-bits on the microcontroller. - */ -#define PPSMC_Result_OK ((uint16_t)0x01) -#define PPSMC_Result_NoMore ((uint16_t)0x02) -#define PPSMC_Result_NotNow ((uint16_t)0x03) -#define PPSMC_Result_Failed ((uint16_t)0xFF) -#define PPSMC_Result_UnknownCmd ((uint16_t)0xFE) -#define PPSMC_Result_UnknownVT ((uint16_t)0xFD) - -#define PPSMC_isERROR(x) ((uint16_t)0x80 & (x)) - -/* - * Supported driver messages - */ -#define PPSMC_MSG_Test ((uint16_t) 0x1) -#define PPSMC_MSG_GetFeatureStatus ((uint16_t) 0x2) -#define PPSMC_MSG_EnableAllSmuFeatures ((uint16_t) 0x3) -#define PPSMC_MSG_DisableAllSmuFeatures ((uint16_t) 0x4) -#define PPSMC_MSG_OptimizeBattery ((uint16_t) 0x5) -#define PPSMC_MSG_MaximizePerf ((uint16_t) 0x6) -#define PPSMC_MSG_UVDPowerOFF ((uint16_t) 0x7) -#define PPSMC_MSG_UVDPowerON ((uint16_t) 0x8) -#define PPSMC_MSG_VCEPowerOFF ((uint16_t) 0x9) -#define PPSMC_MSG_VCEPowerON ((uint16_t) 0xA) -#define PPSMC_MSG_ACPPowerOFF ((uint16_t) 0xB) -#define PPSMC_MSG_ACPPowerON ((uint16_t) 0xC) -#define PPSMC_MSG_SDMAPowerOFF ((uint16_t) 0xD) -#define PPSMC_MSG_SDMAPowerON ((uint16_t) 0xE) -#define PPSMC_MSG_XDMAPowerOFF ((uint16_t) 0xF) -#define PPSMC_MSG_XDMAPowerON ((uint16_t) 0x10) -#define PPSMC_MSG_SetMinDeepSleepSclk ((uint16_t) 0x11) -#define PPSMC_MSG_SetSclkSoftMin ((uint16_t) 0x12) -#define PPSMC_MSG_SetSclkSoftMax ((uint16_t) 0x13) -#define PPSMC_MSG_SetSclkHardMin ((uint16_t) 0x14) -#define PPSMC_MSG_SetSclkHardMax ((uint16_t) 0x15) -#define PPSMC_MSG_SetLclkSoftMin ((uint16_t) 0x16) -#define PPSMC_MSG_SetLclkSoftMax ((uint16_t) 0x17) -#define PPSMC_MSG_SetLclkHardMin ((uint16_t) 0x18) -#define PPSMC_MSG_SetLclkHardMax ((uint16_t) 0x19) -#define PPSMC_MSG_SetUvdSoftMin ((uint16_t) 0x1A) -#define PPSMC_MSG_SetUvdSoftMax ((uint16_t) 0x1B) -#define PPSMC_MSG_SetUvdHardMin ((uint16_t) 0x1C) -#define PPSMC_MSG_SetUvdHardMax ((uint16_t) 0x1D) -#define PPSMC_MSG_SetEclkSoftMin ((uint16_t) 0x1E) -#define PPSMC_MSG_SetEclkSoftMax ((uint16_t) 0x1F) -#define PPSMC_MSG_SetEclkHardMin ((uint16_t) 0x20) -#define PPSMC_MSG_SetEclkHardMax ((uint16_t) 0x21) -#define PPSMC_MSG_SetAclkSoftMin ((uint16_t) 0x22) -#define PPSMC_MSG_SetAclkSoftMax ((uint16_t) 0x23) -#define PPSMC_MSG_SetAclkHardMin ((uint16_t) 0x24) -#define PPSMC_MSG_SetAclkHardMax ((uint16_t) 0x25) -#define PPSMC_MSG_SetNclkSoftMin ((uint16_t) 0x26) -#define PPSMC_MSG_SetNclkSoftMax ((uint16_t) 0x27) -#define PPSMC_MSG_SetNclkHardMin ((uint16_t) 0x28) -#define PPSMC_MSG_SetNclkHardMax ((uint16_t) 0x29) -#define PPSMC_MSG_SetPstateSoftMin ((uint16_t) 0x2A) -#define PPSMC_MSG_SetPstateSoftMax ((uint16_t) 0x2B) -#define PPSMC_MSG_SetPstateHardMin ((uint16_t) 0x2C) -#define PPSMC_MSG_SetPstateHardMax ((uint16_t) 0x2D) -#define PPSMC_MSG_DisableLowMemoryPstate ((uint16_t) 0x2E) -#define PPSMC_MSG_EnableLowMemoryPstate ((uint16_t) 0x2F) -#define PPSMC_MSG_UcodeAddressLow ((uint16_t) 0x30) -#define PPSMC_MSG_UcodeAddressHigh ((uint16_t) 0x31) -#define PPSMC_MSG_UcodeLoadStatus ((uint16_t) 0x32) -#define PPSMC_MSG_DriverDramAddrHi ((uint16_t) 0x33) -#define PPSMC_MSG_DriverDramAddrLo ((uint16_t) 0x34) -#define PPSMC_MSG_CondExecDramAddrHi ((uint16_t) 0x35) -#define PPSMC_MSG_CondExecDramAddrLo ((uint16_t) 0x36) -#define PPSMC_MSG_LoadUcodes ((uint16_t) 0x37) -#define PPSMC_MSG_DriverResetMode ((uint16_t) 0x38) -#define PPSMC_MSG_PowerStateNotify ((uint16_t) 0x39) -#define PPSMC_MSG_SetDisplayPhyConfig ((uint16_t) 0x3A) -#define PPSMC_MSG_GetMaxSclkLevel ((uint16_t) 0x3B) -#define PPSMC_MSG_GetMaxLclkLevel ((uint16_t) 0x3C) -#define PPSMC_MSG_GetMaxUvdLevel ((uint16_t) 0x3D) -#define PPSMC_MSG_GetMaxEclkLevel ((uint16_t) 0x3E) -#define PPSMC_MSG_GetMaxAclkLevel ((uint16_t) 0x3F) -#define PPSMC_MSG_GetMaxNclkLevel ((uint16_t) 0x40) -#define PPSMC_MSG_GetMaxPstate ((uint16_t) 0x41) -#define PPSMC_MSG_DramAddrHiVirtual ((uint16_t) 0x42) -#define PPSMC_MSG_DramAddrLoVirtual ((uint16_t) 0x43) -#define PPSMC_MSG_DramAddrHiPhysical ((uint16_t) 0x44) -#define PPSMC_MSG_DramAddrLoPhysical ((uint16_t) 0x45) -#define PPSMC_MSG_DramBufferSize ((uint16_t) 0x46) -#define PPSMC_MSG_SetMmPwrLogDramAddrHi ((uint16_t) 0x47) -#define PPSMC_MSG_SetMmPwrLogDramAddrLo ((uint16_t) 0x48) -#define PPSMC_MSG_SetClkTableAddrHi ((uint16_t) 0x49) -#define PPSMC_MSG_SetClkTableAddrLo ((uint16_t) 0x4A) -#define PPSMC_MSG_GetConservativePowerLimit ((uint16_t) 0x4B) - -#define PPSMC_MSG_InitJobs ((uint16_t) 0x252) -#define PPSMC_MSG_ExecuteJob ((uint16_t) 0x254) - -#define PPSMC_MSG_NBDPM_Enable ((uint16_t) 0x140) -#define PPSMC_MSG_NBDPM_Disable ((uint16_t) 0x141) - -#define PPSMC_MSG_DPM_FPS_Mode ((uint16_t) 0x15d) -#define PPSMC_MSG_DPM_Activity_Mode ((uint16_t) 0x15e) - -#define PPSMC_MSG_PmStatusLogStart ((uint16_t) 0x170) -#define PPSMC_MSG_PmStatusLogSample ((uint16_t) 0x171) - -#define PPSMC_MSG_AllowLowSclkInterrupt ((uint16_t) 0x184) -#define PPSMC_MSG_MmPowerMonitorStart ((uint16_t) 0x18F) -#define PPSMC_MSG_MmPowerMonitorStop ((uint16_t) 0x190) -#define PPSMC_MSG_MmPowerMonitorRestart ((uint16_t) 0x191) - -#define PPSMC_MSG_SetClockGateMask ((uint16_t) 0x260) -#define PPSMC_MSG_SetFpsThresholdLo ((uint16_t) 0x264) -#define PPSMC_MSG_SetFpsThresholdHi ((uint16_t) 0x265) -#define PPSMC_MSG_SetLowSclkIntrThreshold ((uint16_t) 0x266) - -#define PPSMC_MSG_ClkTableXferToDram ((uint16_t) 0x267) -#define PPSMC_MSG_ClkTableXferToSmu ((uint16_t) 0x268) -#define PPSMC_MSG_GetAverageGraphicsActivity ((uint16_t) 0x269) -#define PPSMC_MSG_GetAverageGioActivity ((uint16_t) 0x26A) -#define PPSMC_MSG_SetLoggerBufferSize ((uint16_t) 0x26B) -#define PPSMC_MSG_SetLoggerAddressHigh ((uint16_t) 0x26C) -#define PPSMC_MSG_SetLoggerAddressLow ((uint16_t) 0x26D) -#define PPSMC_MSG_SetWatermarkFrequency ((uint16_t) 0x26E) -#define PPSMC_MSG_SetDisplaySizePowerParams ((uint16_t) 0x26F) - -/* REMOVE LATER*/ -#define PPSMC_MSG_DPM_ForceState ((uint16_t) 0x104) - -/* Feature Enable Masks*/ -#define NB_DPM_MASK 0x00000800 -#define VDDGFX_MASK 0x00800000 -#define VCE_DPM_MASK 0x00400000 -#define ACP_DPM_MASK 0x00040000 -#define UVD_DPM_MASK 0x00010000 -#define GFX_CU_PG_MASK 0x00004000 -#define SCLK_DPM_MASK 0x00080000 - -#if !defined(SMC_MICROCODE) -#pragma pack(pop) - -#endif - -#endif diff --git a/drivers/gpu/drm/amd/pm/inc/fiji_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/fiji_ppsmc.h deleted file mode 100644 index 7ae494569a60..000000000000 --- a/drivers/gpu/drm/amd/pm/inc/fiji_ppsmc.h +++ /dev/null @@ -1,412 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - - -#ifndef _FIJI_PP_SMC_H_ -#define _FIJI_PP_SMC_H_ - -#pragma pack(push, 1) - -#define PPSMC_SWSTATE_FLAG_DC 0x01 -#define PPSMC_SWSTATE_FLAG_UVD 0x02 -#define PPSMC_SWSTATE_FLAG_VCE 0x04 - -#define PPSMC_THERMAL_PROTECT_TYPE_INTERNAL 0x00 -#define PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL 0x01 -#define PPSMC_THERMAL_PROTECT_TYPE_NONE 0xff - -#define PPSMC_SYSTEMFLAG_GPIO_DC 0x01 -#define PPSMC_SYSTEMFLAG_STEPVDDC 0x02 -#define PPSMC_SYSTEMFLAG_GDDR5 0x04 - -#define PPSMC_SYSTEMFLAG_DISABLE_BABYSTEP 0x08 - -#define PPSMC_SYSTEMFLAG_REGULATOR_HOT 0x10 -#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_ANALOG 0x20 - -#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_MASK 0x07 -#define PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK 0x08 - -#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTODPMLOWSTATE 0x00 -#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE 0x01 - -/* Defines for DPM 2.0 */ -#define PPSMC_DPM2FLAGS_TDPCLMP 0x01 -#define PPSMC_DPM2FLAGS_PWRSHFT 0x02 -#define PPSMC_DPM2FLAGS_OCP 0x04 - -/* Defines for display watermark level */ -#define PPSMC_DISPLAY_WATERMARK_LOW 0 -#define PPSMC_DISPLAY_WATERMARK_HIGH 1 - -/* In the HW performance level's state flags: */ -#define PPSMC_STATEFLAG_AUTO_PULSE_SKIP 0x01 -#define PPSMC_STATEFLAG_POWERBOOST 0x02 -#define PPSMC_STATEFLAG_PSKIP_ON_TDP_FAULT 0x04 -#define PPSMC_STATEFLAG_POWERSHIFT 0x08 -#define PPSMC_STATEFLAG_SLOW_READ_MARGIN 0x10 -#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 0x20 -#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS 0x40 - -/* Fan control algorithm: */ -#define FDO_MODE_HARDWARE 0 -#define FDO_MODE_PIECE_WISE_LINEAR 1 - -enum FAN_CONTROL { - FAN_CONTROL_FUZZY, - FAN_CONTROL_TABLE -}; - -/* Gemini Modes*/ -#define PPSMC_GeminiModeNone 0 /*Single GPU board*/ -#define PPSMC_GeminiModeMaster 1 /*Master GPU on a Gemini board*/ -#define PPSMC_GeminiModeSlave 2 /*Slave GPU on a Gemini board*/ - - -/* Return codes for driver to SMC communication. */ -#define PPSMC_Result_OK ((uint16_t)0x01) -#define PPSMC_Result_NoMore ((uint16_t)0x02) - -#define PPSMC_Result_NotNow ((uint16_t)0x03) - -#define PPSMC_Result_Failed ((uint16_t)0xFF) -#define PPSMC_Result_UnknownCmd ((uint16_t)0xFE) -#define PPSMC_Result_UnknownVT ((uint16_t)0xFD) - -#define PPSMC_isERROR(x) ((uint16_t)0x80 & (x)) - - -#define PPSMC_MSG_Halt ((uint16_t)0x10) -#define PPSMC_MSG_Resume ((uint16_t)0x11) -#define PPSMC_MSG_EnableDPMLevel ((uint16_t)0x12) -#define PPSMC_MSG_ZeroLevelsDisabled ((uint16_t)0x13) -#define PPSMC_MSG_OneLevelsDisabled ((uint16_t)0x14) -#define PPSMC_MSG_TwoLevelsDisabled ((uint16_t)0x15) -#define PPSMC_MSG_EnableThermalInterrupt ((uint16_t)0x16) -#define PPSMC_MSG_RunningOnAC ((uint16_t)0x17) -#define PPSMC_MSG_LevelUp ((uint16_t)0x18) -#define PPSMC_MSG_LevelDown ((uint16_t)0x19) -#define PPSMC_MSG_ResetDPMCounters ((uint16_t)0x1a) -#define PPSMC_MSG_SwitchToSwState ((uint16_t)0x20) - -#define PPSMC_MSG_SwitchToSwStateLast ((uint16_t)0x3f) -#define PPSMC_MSG_SwitchToInitialState ((uint16_t)0x40) -#define PPSMC_MSG_NoForcedLevel ((uint16_t)0x41) -#define PPSMC_MSG_ForceHigh ((uint16_t)0x42) -#define PPSMC_MSG_ForceMediumOrHigh ((uint16_t)0x43) - -#define PPSMC_MSG_SwitchToMinimumPower ((uint16_t)0x51) -#define PPSMC_MSG_ResumeFromMinimumPower ((uint16_t)0x52) -#define PPSMC_MSG_EnableCac ((uint16_t)0x53) -#define PPSMC_MSG_DisableCac ((uint16_t)0x54) -#define PPSMC_DPMStateHistoryStart ((uint16_t)0x55) -#define PPSMC_DPMStateHistoryStop ((uint16_t)0x56) -#define PPSMC_CACHistoryStart ((uint16_t)0x57) -#define PPSMC_CACHistoryStop ((uint16_t)0x58) -#define PPSMC_TDPClampingActive ((uint16_t)0x59) -#define PPSMC_TDPClampingInactive ((uint16_t)0x5A) -#define PPSMC_StartFanControl ((uint16_t)0x5B) -#define PPSMC_StopFanControl ((uint16_t)0x5C) -#define PPSMC_NoDisplay ((uint16_t)0x5D) -#define PPSMC_HasDisplay ((uint16_t)0x5E) -#define PPSMC_MSG_UVDPowerOFF ((uint16_t)0x60) -#define PPSMC_MSG_UVDPowerON ((uint16_t)0x61) -#define PPSMC_MSG_EnableULV ((uint16_t)0x62) -#define PPSMC_MSG_DisableULV ((uint16_t)0x63) -#define PPSMC_MSG_EnterULV ((uint16_t)0x64) -#define PPSMC_MSG_ExitULV ((uint16_t)0x65) -#define PPSMC_PowerShiftActive ((uint16_t)0x6A) -#define PPSMC_PowerShiftInactive ((uint16_t)0x6B) -#define PPSMC_OCPActive ((uint16_t)0x6C) -#define PPSMC_OCPInactive ((uint16_t)0x6D) -#define PPSMC_CACLongTermAvgEnable ((uint16_t)0x6E) -#define PPSMC_CACLongTermAvgDisable ((uint16_t)0x6F) -#define PPSMC_MSG_InferredStateSweep_Start ((uint16_t)0x70) -#define PPSMC_MSG_InferredStateSweep_Stop ((uint16_t)0x71) -#define PPSMC_MSG_SwitchToLowestInfState ((uint16_t)0x72) -#define PPSMC_MSG_SwitchToNonInfState ((uint16_t)0x73) -#define PPSMC_MSG_AllStateSweep_Start ((uint16_t)0x74) -#define PPSMC_MSG_AllStateSweep_Stop ((uint16_t)0x75) -#define PPSMC_MSG_SwitchNextLowerInfState ((uint16_t)0x76) -#define PPSMC_MSG_SwitchNextHigherInfState ((uint16_t)0x77) -#define PPSMC_MSG_MclkRetrainingTest ((uint16_t)0x78) -#define PPSMC_MSG_ForceTDPClamping ((uint16_t)0x79) -#define PPSMC_MSG_CollectCAC_PowerCorreln ((uint16_t)0x7A) -#define PPSMC_MSG_CollectCAC_WeightCalib ((uint16_t)0x7B) -#define PPSMC_MSG_CollectCAC_SQonly ((uint16_t)0x7C) -#define PPSMC_MSG_CollectCAC_TemperaturePwr ((uint16_t)0x7D) - -#define PPSMC_MSG_ExtremitiesTest_Start ((uint16_t)0x7E) -#define PPSMC_MSG_ExtremitiesTest_Stop ((uint16_t)0x7F) -#define PPSMC_FlushDataCache ((uint16_t)0x80) -#define PPSMC_FlushInstrCache ((uint16_t)0x81) - -#define PPSMC_MSG_SetEnabledLevels ((uint16_t)0x82) -#define PPSMC_MSG_SetForcedLevels ((uint16_t)0x83) - -#define PPSMC_MSG_ResetToDefaults ((uint16_t)0x84) - -#define PPSMC_MSG_SetForcedLevelsAndJump ((uint16_t)0x85) -#define PPSMC_MSG_SetCACHistoryMode ((uint16_t)0x86) -#define PPSMC_MSG_EnableDTE ((uint16_t)0x87) -#define PPSMC_MSG_DisableDTE ((uint16_t)0x88) - -#define PPSMC_MSG_SmcSpaceSetAddress ((uint16_t)0x89) - -#define PPSMC_MSG_BREAK ((uint16_t)0xF8) - -/* Trinity Specific Messages*/ -#define PPSMC_MSG_Test ((uint16_t) 0x100) -#define PPSMC_MSG_DPM_Voltage_Pwrmgt ((uint16_t) 0x101) -#define PPSMC_MSG_DPM_Config ((uint16_t) 0x102) -#define PPSMC_MSG_PM_Controller_Start ((uint16_t) 0x103) -#define PPSMC_MSG_DPM_ForceState ((uint16_t) 0x104) -#define PPSMC_MSG_PG_PowerDownSIMD ((uint16_t) 0x105) -#define PPSMC_MSG_PG_PowerUpSIMD ((uint16_t) 0x106) -#define PPSMC_MSG_PM_Controller_Stop ((uint16_t) 0x107) -#define PPSMC_MSG_PG_SIMD_Config ((uint16_t) 0x108) -#define PPSMC_MSG_Voltage_Cntl_Enable ((uint16_t) 0x109) -#define PPSMC_MSG_Thermal_Cntl_Enable ((uint16_t) 0x10a) -#define PPSMC_MSG_Reset_Service ((uint16_t) 0x10b) -#define PPSMC_MSG_VCEPowerOFF ((uint16_t) 0x10e) -#define PPSMC_MSG_VCEPowerON ((uint16_t) 0x10f) -#define PPSMC_MSG_DPM_Disable_VCE_HS ((uint16_t) 0x110) -#define PPSMC_MSG_DPM_Enable_VCE_HS ((uint16_t) 0x111) -#define PPSMC_MSG_DPM_N_LevelsDisabled ((uint16_t) 0x112) -#define PPSMC_MSG_DCEPowerOFF ((uint16_t) 0x113) -#define PPSMC_MSG_DCEPowerON ((uint16_t) 0x114) -#define PPSMC_MSG_PCIE_DDIPowerDown ((uint16_t) 0x117) -#define PPSMC_MSG_PCIE_DDIPowerUp ((uint16_t) 0x118) -#define PPSMC_MSG_PCIE_CascadePLLPowerDown ((uint16_t) 0x119) -#define PPSMC_MSG_PCIE_CascadePLLPowerUp ((uint16_t) 0x11a) -#define PPSMC_MSG_SYSPLLPowerOff ((uint16_t) 0x11b) -#define PPSMC_MSG_SYSPLLPowerOn ((uint16_t) 0x11c) -#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint16_t) 0x11d) -#define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint16_t) 0x11e) -#define PPSMC_MSG_DISPLAYPHYStatusNotify ((uint16_t) 0x11f) -#define PPSMC_MSG_EnableBAPM ((uint16_t) 0x120) -#define PPSMC_MSG_DisableBAPM ((uint16_t) 0x121) -#define PPSMC_MSG_Spmi_Enable ((uint16_t) 0x122) -#define PPSMC_MSG_Spmi_Timer ((uint16_t) 0x123) -#define PPSMC_MSG_LCLK_DPM_Config ((uint16_t) 0x124) -#define PPSMC_MSG_VddNB_Request ((uint16_t) 0x125) -#define PPSMC_MSG_PCIE_DDIPhyPowerDown ((uint32_t) 0x126) -#define PPSMC_MSG_PCIE_DDIPhyPowerUp ((uint32_t) 0x127) -#define PPSMC_MSG_MCLKDPM_Config ((uint16_t) 0x128) - -#define PPSMC_MSG_UVDDPM_Config ((uint16_t) 0x129) -#define PPSMC_MSG_VCEDPM_Config ((uint16_t) 0x12A) -#define PPSMC_MSG_ACPDPM_Config ((uint16_t) 0x12B) -#define PPSMC_MSG_SAMUDPM_Config ((uint16_t) 0x12C) -#define PPSMC_MSG_UVDDPM_SetEnabledMask ((uint16_t) 0x12D) -#define PPSMC_MSG_VCEDPM_SetEnabledMask ((uint16_t) 0x12E) -#define PPSMC_MSG_ACPDPM_SetEnabledMask ((uint16_t) 0x12F) -#define PPSMC_MSG_SAMUDPM_SetEnabledMask ((uint16_t) 0x130) -#define PPSMC_MSG_MCLKDPM_ForceState ((uint16_t) 0x131) -#define PPSMC_MSG_MCLKDPM_NoForcedLevel ((uint16_t) 0x132) -#define PPSMC_MSG_Thermal_Cntl_Disable ((uint16_t) 0x133) -#define PPSMC_MSG_SetTDPLimit ((uint16_t) 0x134) -#define PPSMC_MSG_Voltage_Cntl_Disable ((uint16_t) 0x135) -#define PPSMC_MSG_PCIeDPM_Enable ((uint16_t) 0x136) -#define PPSMC_MSG_ACPPowerOFF ((uint16_t) 0x137) -#define PPSMC_MSG_ACPPowerON ((uint16_t) 0x138) -#define PPSMC_MSG_SAMPowerOFF ((uint16_t) 0x139) -#define PPSMC_MSG_SAMPowerON ((uint16_t) 0x13a) -#define PPSMC_MSG_SDMAPowerOFF ((uint16_t) 0x13b) -#define PPSMC_MSG_SDMAPowerON ((uint16_t) 0x13c) -#define PPSMC_MSG_PCIeDPM_Disable ((uint16_t) 0x13d) -#define PPSMC_MSG_IOMMUPowerOFF ((uint16_t) 0x13e) -#define PPSMC_MSG_IOMMUPowerON ((uint16_t) 0x13f) -#define PPSMC_MSG_NBDPM_Enable ((uint16_t) 0x140) -#define PPSMC_MSG_NBDPM_Disable ((uint16_t) 0x141) -#define PPSMC_MSG_NBDPM_ForceNominal ((uint16_t) 0x142) -#define PPSMC_MSG_NBDPM_ForcePerformance ((uint16_t) 0x143) -#define PPSMC_MSG_NBDPM_UnForce ((uint16_t) 0x144) -#define PPSMC_MSG_SCLKDPM_SetEnabledMask ((uint16_t) 0x145) -#define PPSMC_MSG_MCLKDPM_SetEnabledMask ((uint16_t) 0x146) -#define PPSMC_MSG_PCIeDPM_ForceLevel ((uint16_t) 0x147) -#define PPSMC_MSG_PCIeDPM_UnForceLevel ((uint16_t) 0x148) -#define PPSMC_MSG_EnableACDCGPIOInterrupt ((uint16_t) 0x149) -#define PPSMC_MSG_EnableVRHotGPIOInterrupt ((uint16_t) 0x14a) -#define PPSMC_MSG_SwitchToAC ((uint16_t) 0x14b) - -#define PPSMC_MSG_XDMAPowerOFF ((uint16_t) 0x14c) -#define PPSMC_MSG_XDMAPowerON ((uint16_t) 0x14d) - -#define PPSMC_MSG_DPM_Enable ((uint16_t) 0x14e) -#define PPSMC_MSG_DPM_Disable ((uint16_t) 0x14f) -#define PPSMC_MSG_MCLKDPM_Enable ((uint16_t) 0x150) -#define PPSMC_MSG_MCLKDPM_Disable ((uint16_t) 0x151) -#define PPSMC_MSG_LCLKDPM_Enable ((uint16_t) 0x152) -#define PPSMC_MSG_LCLKDPM_Disable ((uint16_t) 0x153) -#define PPSMC_MSG_UVDDPM_Enable ((uint16_t) 0x154) -#define PPSMC_MSG_UVDDPM_Disable ((uint16_t) 0x155) -#define PPSMC_MSG_SAMUDPM_Enable ((uint16_t) 0x156) -#define PPSMC_MSG_SAMUDPM_Disable ((uint16_t) 0x157) -#define PPSMC_MSG_ACPDPM_Enable ((uint16_t) 0x158) -#define PPSMC_MSG_ACPDPM_Disable ((uint16_t) 0x159) -#define PPSMC_MSG_VCEDPM_Enable ((uint16_t) 0x15a) -#define PPSMC_MSG_VCEDPM_Disable ((uint16_t) 0x15b) -#define PPSMC_MSG_LCLKDPM_SetEnabledMask ((uint16_t) 0x15c) -#define PPSMC_MSG_DPM_FPS_Mode ((uint16_t) 0x15d) -#define PPSMC_MSG_DPM_Activity_Mode ((uint16_t) 0x15e) -#define PPSMC_MSG_VddC_Request ((uint16_t) 0x15f) -#define PPSMC_MSG_MCLKDPM_GetEnabledMask ((uint16_t) 0x160) -#define PPSMC_MSG_LCLKDPM_GetEnabledMask ((uint16_t) 0x161) -#define PPSMC_MSG_SCLKDPM_GetEnabledMask ((uint16_t) 0x162) -#define PPSMC_MSG_UVDDPM_GetEnabledMask ((uint16_t) 0x163) -#define PPSMC_MSG_SAMUDPM_GetEnabledMask ((uint16_t) 0x164) -#define PPSMC_MSG_ACPDPM_GetEnabledMask ((uint16_t) 0x165) -#define PPSMC_MSG_VCEDPM_GetEnabledMask ((uint16_t) 0x166) -#define PPSMC_MSG_PCIeDPM_SetEnabledMask ((uint16_t) 0x167) -#define PPSMC_MSG_PCIeDPM_GetEnabledMask ((uint16_t) 0x168) -#define PPSMC_MSG_TDCLimitEnable ((uint16_t) 0x169) -#define PPSMC_MSG_TDCLimitDisable ((uint16_t) 0x16a) -#define PPSMC_MSG_DPM_AutoRotate_Mode ((uint16_t) 0x16b) -#define PPSMC_MSG_DISPCLK_FROM_FCH ((uint16_t) 0x16c) -#define PPSMC_MSG_DISPCLK_FROM_DFS ((uint16_t) 0x16d) -#define PPSMC_MSG_DPREFCLK_FROM_FCH ((uint16_t) 0x16e) -#define PPSMC_MSG_DPREFCLK_FROM_DFS ((uint16_t) 0x16f) -#define PPSMC_MSG_PmStatusLogStart ((uint16_t) 0x170) -#define PPSMC_MSG_PmStatusLogSample ((uint16_t) 0x171) -#define PPSMC_MSG_SCLK_AutoDPM_ON ((uint16_t) 0x172) -#define PPSMC_MSG_MCLK_AutoDPM_ON ((uint16_t) 0x173) -#define PPSMC_MSG_LCLK_AutoDPM_ON ((uint16_t) 0x174) -#define PPSMC_MSG_UVD_AutoDPM_ON ((uint16_t) 0x175) -#define PPSMC_MSG_SAMU_AutoDPM_ON ((uint16_t) 0x176) -#define PPSMC_MSG_ACP_AutoDPM_ON ((uint16_t) 0x177) -#define PPSMC_MSG_VCE_AutoDPM_ON ((uint16_t) 0x178) -#define PPSMC_MSG_PCIe_AutoDPM_ON ((uint16_t) 0x179) -#define PPSMC_MSG_MASTER_AutoDPM_ON ((uint16_t) 0x17a) -#define PPSMC_MSG_MASTER_AutoDPM_OFF ((uint16_t) 0x17b) -#define PPSMC_MSG_DYNAMICDISPPHYPOWER ((uint16_t) 0x17c) -#define PPSMC_MSG_CAC_COLLECTION_ON ((uint16_t) 0x17d) -#define PPSMC_MSG_CAC_COLLECTION_OFF ((uint16_t) 0x17e) -#define PPSMC_MSG_CAC_CORRELATION_ON ((uint16_t) 0x17f) -#define PPSMC_MSG_CAC_CORRELATION_OFF ((uint16_t) 0x180) -#define PPSMC_MSG_PM_STATUS_TO_DRAM_ON ((uint16_t) 0x181) -#define PPSMC_MSG_PM_STATUS_TO_DRAM_OFF ((uint16_t) 0x182) -#define PPSMC_MSG_ALLOW_LOWSCLK_INTERRUPT ((uint16_t) 0x184) -#define PPSMC_MSG_PkgPwrLimitEnable ((uint16_t) 0x185) -#define PPSMC_MSG_PkgPwrLimitDisable ((uint16_t) 0x186) -#define PPSMC_MSG_PkgPwrSetLimit ((uint16_t) 0x187) -#define PPSMC_MSG_OverDriveSetTargetTdp ((uint16_t) 0x188) -#define PPSMC_MSG_SCLKDPM_FreezeLevel ((uint16_t) 0x189) -#define PPSMC_MSG_SCLKDPM_UnfreezeLevel ((uint16_t) 0x18A) -#define PPSMC_MSG_MCLKDPM_FreezeLevel ((uint16_t) 0x18B) -#define PPSMC_MSG_MCLKDPM_UnfreezeLevel ((uint16_t) 0x18C) -#define PPSMC_MSG_START_DRAM_LOGGING ((uint16_t) 0x18D) -#define PPSMC_MSG_STOP_DRAM_LOGGING ((uint16_t) 0x18E) -#define PPSMC_MSG_MASTER_DeepSleep_ON ((uint16_t) 0x18F) -#define PPSMC_MSG_MASTER_DeepSleep_OFF ((uint16_t) 0x190) -#define PPSMC_MSG_Remove_DC_Clamp ((uint16_t) 0x191) -#define PPSMC_MSG_DisableACDCGPIOInterrupt ((uint16_t) 0x192) -#define PPSMC_MSG_OverrideVoltageControl_SetVddc ((uint16_t) 0x193) -#define PPSMC_MSG_OverrideVoltageControl_SetVddci ((uint16_t) 0x194) -#define PPSMC_MSG_SetVidOffset_1 ((uint16_t) 0x195) -#define PPSMC_MSG_SetVidOffset_2 ((uint16_t) 0x207) -#define PPSMC_MSG_GetVidOffset_1 ((uint16_t) 0x196) -#define PPSMC_MSG_GetVidOffset_2 ((uint16_t) 0x208) -#define PPSMC_MSG_THERMAL_OVERDRIVE_Enable ((uint16_t) 0x197) -#define PPSMC_MSG_THERMAL_OVERDRIVE_Disable ((uint16_t) 0x198) -#define PPSMC_MSG_SetTjMax ((uint16_t) 0x199) -#define PPSMC_MSG_SetFanPwmMax ((uint16_t) 0x19A) -#define PPSMC_MSG_WaitForMclkSwitchFinish ((uint16_t) 0x19B) -#define PPSMC_MSG_ENABLE_THERMAL_DPM ((uint16_t) 0x19C) -#define PPSMC_MSG_DISABLE_THERMAL_DPM ((uint16_t) 0x19D) - -#define PPSMC_MSG_API_GetSclkFrequency ((uint16_t) 0x200) -#define PPSMC_MSG_API_GetMclkFrequency ((uint16_t) 0x201) -#define PPSMC_MSG_API_GetSclkBusy ((uint16_t) 0x202) -#define PPSMC_MSG_API_GetMclkBusy ((uint16_t) 0x203) -#define PPSMC_MSG_API_GetAsicPower ((uint16_t) 0x204) -#define PPSMC_MSG_SetFanRpmMax ((uint16_t) 0x205) -#define PPSMC_MSG_SetFanSclkTarget ((uint16_t) 0x206) -#define PPSMC_MSG_SetFanMinPwm ((uint16_t) 0x209) -#define PPSMC_MSG_SetFanTemperatureTarget ((uint16_t) 0x20A) - -#define PPSMC_MSG_BACO_StartMonitor ((uint16_t) 0x240) -#define PPSMC_MSG_BACO_Cancel ((uint16_t) 0x241) -#define PPSMC_MSG_EnableVddGfx ((uint16_t) 0x242) -#define PPSMC_MSG_DisableVddGfx ((uint16_t) 0x243) -#define PPSMC_MSG_UcodeAddressLow ((uint16_t) 0x244) -#define PPSMC_MSG_UcodeAddressHigh ((uint16_t) 0x245) -#define PPSMC_MSG_UcodeLoadStatus ((uint16_t) 0x246) - -#define PPSMC_MSG_DRV_DRAM_ADDR_HI ((uint16_t) 0x250) -#define PPSMC_MSG_DRV_DRAM_ADDR_LO ((uint16_t) 0x251) -#define PPSMC_MSG_SMU_DRAM_ADDR_HI ((uint16_t) 0x252) -#define PPSMC_MSG_SMU_DRAM_ADDR_LO ((uint16_t) 0x253) -#define PPSMC_MSG_LoadUcodes ((uint16_t) 0x254) -#define PPSMC_MSG_PowerStateNotify ((uint16_t) 0x255) -#define PPSMC_MSG_COND_EXEC_DRAM_ADDR_HI ((uint16_t) 0x256) -#define PPSMC_MSG_COND_EXEC_DRAM_ADDR_LO ((uint16_t) 0x257) -#define PPSMC_MSG_VBIOS_DRAM_ADDR_HI ((uint16_t) 0x258) -#define PPSMC_MSG_VBIOS_DRAM_ADDR_LO ((uint16_t) 0x259) -#define PPSMC_MSG_LoadVBios ((uint16_t) 0x25A) -#define PPSMC_MSG_GetUcodeVersion ((uint16_t) 0x25B) -#define DMCUSMC_MSG_PSREntry ((uint16_t) 0x25C) -#define DMCUSMC_MSG_PSRExit ((uint16_t) 0x25D) -#define PPSMC_MSG_EnableClockGatingFeature ((uint16_t) 0x260) -#define PPSMC_MSG_DisableClockGatingFeature ((uint16_t) 0x261) -#define PPSMC_MSG_IsDeviceRunning ((uint16_t) 0x262) -#define PPSMC_MSG_LoadMetaData ((uint16_t) 0x263) -#define PPSMC_MSG_TMON_AutoCaliberate_Enable ((uint16_t) 0x264) -#define PPSMC_MSG_TMON_AutoCaliberate_Disable ((uint16_t) 0x265) -#define PPSMC_MSG_GetTelemetry1Slope ((uint16_t) 0x266) -#define PPSMC_MSG_GetTelemetry1Offset ((uint16_t) 0x267) -#define PPSMC_MSG_GetTelemetry2Slope ((uint16_t) 0x268) -#define PPSMC_MSG_GetTelemetry2Offset ((uint16_t) 0x269) -#define PPSMC_MSG_EnableAvfs ((uint16_t) 0x26A) -#define PPSMC_MSG_DisableAvfs ((uint16_t) 0x26B) -#define PPSMC_MSG_PerformBtc ((uint16_t) 0x26C) -#define PPSMC_MSG_GetHbmCode ((uint16_t) 0x26D) -#define PPSMC_MSG_GetVrVddcTemperature ((uint16_t) 0x26E) -#define PPSMC_MSG_GetVrMvddTemperature ((uint16_t) 0x26F) -#define PPSMC_MSG_GetLiquidTemperature ((uint16_t) 0x270) -#define PPSMC_MSG_GetPlxTemperature ((uint16_t) 0x271) -#define PPSMC_MSG_RequestI2CControl ((uint16_t) 0x272) -#define PPSMC_MSG_ReleaseI2CControl ((uint16_t) 0x273) -#define PPSMC_MSG_LedConfig ((uint16_t) 0x274) -#define PPSMC_MSG_SetHbmFanCode ((uint16_t) 0x275) -#define PPSMC_MSG_SetHbmThrottleCode ((uint16_t) 0x276) - -#define PPSMC_MSG_GetEnabledPsm ((uint16_t) 0x400) -#define PPSMC_MSG_AgmStartPsm ((uint16_t) 0x401) -#define PPSMC_MSG_AgmReadPsm ((uint16_t) 0x402) -#define PPSMC_MSG_AgmResetPsm ((uint16_t) 0x403) -#define PPSMC_MSG_ReadVftCell ((uint16_t) 0x404) - -/* AVFS Only - Remove Later */ -#define PPSMC_MSG_VftTableIsValid ((uint16_t) 0x666) - -/* If the SMC firmware has an event status soft register this is what the individual bits mean.*/ -#define PPSMC_EVENT_STATUS_THERMAL 0x00000001 -#define PPSMC_EVENT_STATUS_REGULATORHOT 0x00000002 -#define PPSMC_EVENT_STATUS_DC 0x00000004 - -typedef uint16_t PPSMC_Msg; - -#pragma pack(pop) - -#endif diff --git a/drivers/gpu/drm/amd/pm/inc/hardwaremanager.h b/drivers/gpu/drm/amd/pm/inc/hardwaremanager.h deleted file mode 100644 index 6e0be6027705..000000000000 --- a/drivers/gpu/drm/amd/pm/inc/hardwaremanager.h +++ /dev/null @@ -1,469 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#ifndef _HARDWARE_MANAGER_H_ -#define _HARDWARE_MANAGER_H_ - - - -struct pp_hwmgr; -struct pp_hw_power_state; -struct pp_power_state; -enum amd_dpm_forced_level; -struct PP_TemperatureRange; - - -struct phm_fan_speed_info { - uint32_t min_percent; - uint32_t max_percent; - uint32_t min_rpm; - uint32_t max_rpm; - bool supports_percent_read; - bool supports_percent_write; - bool supports_rpm_read; - bool supports_rpm_write; -}; - -/* Automatic Power State Throttling */ -enum PHM_AutoThrottleSource -{ - PHM_AutoThrottleSource_Thermal, - PHM_AutoThrottleSource_External -}; - -typedef enum PHM_AutoThrottleSource PHM_AutoThrottleSource; - -enum phm_platform_caps { - PHM_PlatformCaps_AtomBiosPpV1 = 0, - PHM_PlatformCaps_PowerPlaySupport, - PHM_PlatformCaps_ACOverdriveSupport, - PHM_PlatformCaps_BacklightSupport, - PHM_PlatformCaps_ThermalController, - PHM_PlatformCaps_BiosPowerSourceControl, - PHM_PlatformCaps_DisableVoltageTransition, - PHM_PlatformCaps_DisableEngineTransition, - PHM_PlatformCaps_DisableMemoryTransition, - PHM_PlatformCaps_DynamicPowerManagement, - PHM_PlatformCaps_EnableASPML0s, - PHM_PlatformCaps_EnableASPML1, - PHM_PlatformCaps_OD5inACSupport, - PHM_PlatformCaps_OD5inDCSupport, - PHM_PlatformCaps_SoftStateOD5, - PHM_PlatformCaps_NoOD5Support, - PHM_PlatformCaps_ContinuousHardwarePerformanceRange, - PHM_PlatformCaps_ActivityReporting, - PHM_PlatformCaps_EnableBackbias, - PHM_PlatformCaps_OverdriveDisabledByPowerBudget, - PHM_PlatformCaps_ShowPowerBudgetWarning, - PHM_PlatformCaps_PowerBudgetWaiverAvailable, - PHM_PlatformCaps_GFXClockGatingSupport, - PHM_PlatformCaps_MMClockGatingSupport, - PHM_PlatformCaps_AutomaticDCTransition, - PHM_PlatformCaps_GeminiPrimary, - PHM_PlatformCaps_MemorySpreadSpectrumSupport, - PHM_PlatformCaps_EngineSpreadSpectrumSupport, - PHM_PlatformCaps_StepVddc, - PHM_PlatformCaps_DynamicPCIEGen2Support, - PHM_PlatformCaps_SMC, - PHM_PlatformCaps_FaultyInternalThermalReading, /* Internal thermal controller reports faulty temperature value when DAC2 is active */ - PHM_PlatformCaps_EnableVoltageControl, /* indicates voltage can be controlled */ - PHM_PlatformCaps_EnableSideportControl, /* indicates Sideport can be controlled */ - PHM_PlatformCaps_VideoPlaybackEEUNotification, /* indicates EEU notification of video start/stop is required */ - PHM_PlatformCaps_TurnOffPll_ASPML1, /* PCIE Turn Off PLL in ASPM L1 */ - PHM_PlatformCaps_EnableHTLinkControl, /* indicates HT Link can be controlled by ACPI or CLMC overridden/automated mode. */ - PHM_PlatformCaps_PerformanceStateOnly, /* indicates only performance power state to be used on current system. */ - PHM_PlatformCaps_ExclusiveModeAlwaysHigh, /* In Exclusive (3D) mode always stay in High state. */ - PHM_PlatformCaps_DisableMGClockGating, /* to disable Medium Grain Clock Gating or not */ - PHM_PlatformCaps_DisableMGCGTSSM, /* TO disable Medium Grain Clock Gating Shader Complex control */ - PHM_PlatformCaps_UVDAlwaysHigh, /* In UVD mode always stay in High state */ - PHM_PlatformCaps_DisablePowerGating, /* to disable power gating */ - PHM_PlatformCaps_CustomThermalPolicy, /* indicates only performance power state to be used on current system. */ - PHM_PlatformCaps_StayInBootState, /* Stay in Boot State, do not do clock/voltage or PCIe Lane and Gen switching (RV7xx and up). */ - PHM_PlatformCaps_SMCAllowSeparateSWThermalState, /* SMC use separate SW thermal state, instead of the default SMC thermal policy. */ - PHM_PlatformCaps_MultiUVDStateSupport, /* Powerplay state table supports multi UVD states. */ - PHM_PlatformCaps_EnableSCLKDeepSleepForUVD, /* With HW ECOs, we don't need to disable SCLK Deep Sleep for UVD state. */ - PHM_PlatformCaps_EnableMCUHTLinkControl, /* Enable HT link control by MCU */ - PHM_PlatformCaps_ABM, /* ABM support.*/ - PHM_PlatformCaps_KongThermalPolicy, /* A thermal policy specific for Kong */ - PHM_PlatformCaps_SwitchVDDNB, /* if the users want to switch VDDNB */ - PHM_PlatformCaps_ULPS, /* support ULPS mode either through ACPI state or ULPS state */ - PHM_PlatformCaps_NativeULPS, /* hardware capable of ULPS state (other than through the ACPI state) */ - PHM_PlatformCaps_EnableMVDDControl, /* indicates that memory voltage can be controlled */ - PHM_PlatformCaps_ControlVDDCI, /* Control VDDCI separately from VDDC. */ - PHM_PlatformCaps_DisableDCODT, /* indicates if DC ODT apply or not */ - PHM_PlatformCaps_DynamicACTiming, /* if the SMC dynamically re-programs MC SEQ register values */ - PHM_PlatformCaps_EnableThermalIntByGPIO, /* enable throttle control through GPIO */ - PHM_PlatformCaps_BootStateOnAlert, /* Go to boot state on alerts, e.g. on an AC->DC transition. */ - PHM_PlatformCaps_DontWaitForVBlankOnAlert, /* Do NOT wait for VBLANK during an alert (e.g. AC->DC transition). */ - PHM_PlatformCaps_Force3DClockSupport, /* indicates if the platform supports force 3D clock. */ - PHM_PlatformCaps_MicrocodeFanControl, /* Fan is controlled by the SMC microcode. */ - PHM_PlatformCaps_AdjustUVDPriorityForSP, - PHM_PlatformCaps_DisableLightSleep, /* Light sleep for evergreen family. */ - PHM_PlatformCaps_DisableMCLS, /* MC Light sleep */ - PHM_PlatformCaps_RegulatorHot, /* Enable throttling on 'regulator hot' events. */ - PHM_PlatformCaps_BACO, /* Support Bus Alive Chip Off mode */ - PHM_PlatformCaps_DisableDPM, /* Disable DPM, supported from Llano */ - PHM_PlatformCaps_DynamicM3Arbiter, /* support dynamically change m3 arbitor parameters */ - PHM_PlatformCaps_SclkDeepSleep, /* support sclk deep sleep */ - PHM_PlatformCaps_DynamicPatchPowerState, /* this ASIC supports to patch power state dynamically */ - PHM_PlatformCaps_ThermalAutoThrottling, /* enabling auto thermal throttling, */ - PHM_PlatformCaps_SumoThermalPolicy, /* A thermal policy specific for Sumo */ - PHM_PlatformCaps_PCIEPerformanceRequest, /* support to change RC voltage */ - PHM_PlatformCaps_BLControlledByGPU, /* support varibright */ - PHM_PlatformCaps_PowerContainment, /* support DPM2 power containment (AKA TDP clamping) */ - PHM_PlatformCaps_SQRamping, /* support DPM2 SQ power throttle */ - PHM_PlatformCaps_CAC, /* support Capacitance * Activity power estimation */ - PHM_PlatformCaps_NIChipsets, /* Northern Island and beyond chipsets */ - PHM_PlatformCaps_TrinityChipsets, /* Trinity chipset */ - PHM_PlatformCaps_EvergreenChipsets, /* Evergreen family chipset */ - PHM_PlatformCaps_PowerControl, /* Cayman and beyond chipsets */ - PHM_PlatformCaps_DisableLSClockGating, /* to disable Light Sleep control for HDP memories */ - PHM_PlatformCaps_BoostState, /* this ASIC supports boost state */ - PHM_PlatformCaps_UserMaxClockForMultiDisplays, /* indicates if max memory clock is used for all status when multiple displays are connected */ - PHM_PlatformCaps_RegWriteDelay, /* indicates if back to back reg write delay is required */ - PHM_PlatformCaps_NonABMSupportInPPLib, /* ABM is not supported in PPLIB, (moved from PPLIB to DAL) */ - PHM_PlatformCaps_GFXDynamicMGPowerGating, /* Enable Dynamic MG PowerGating on Trinity */ - PHM_PlatformCaps_DisableSMUUVDHandshake, /* Disable SMU UVD Handshake */ - PHM_PlatformCaps_DTE, /* Support Digital Temperature Estimation */ - PHM_PlatformCaps_W5100Specifc_SmuSkipMsgDTE, /* This is for the feature requested by David B., and Tonny W.*/ - PHM_PlatformCaps_UVDPowerGating, /* enable UVD power gating, supported from Llano */ - PHM_PlatformCaps_UVDDynamicPowerGating, /* enable UVD Dynamic power gating, supported from UVD5 */ - PHM_PlatformCaps_VCEPowerGating, /* Enable VCE power gating, supported for TN and later ASICs */ - PHM_PlatformCaps_SamuPowerGating, /* Enable SAMU power gating, supported for KV and later ASICs */ - PHM_PlatformCaps_UVDDPM, /* UVD clock DPM */ - PHM_PlatformCaps_VCEDPM, /* VCE clock DPM */ - PHM_PlatformCaps_SamuDPM, /* SAMU clock DPM */ - PHM_PlatformCaps_AcpDPM, /* ACP clock DPM */ - PHM_PlatformCaps_SclkDeepSleepAboveLow, /* Enable SCLK Deep Sleep on all DPM states */ - PHM_PlatformCaps_DynamicUVDState, /* Dynamic UVD State */ - PHM_PlatformCaps_WantSAMClkWithDummyBackEnd, /* Set SAM Clk With Dummy Back End */ - PHM_PlatformCaps_WantUVDClkWithDummyBackEnd, /* Set UVD Clk With Dummy Back End */ - PHM_PlatformCaps_WantVCEClkWithDummyBackEnd, /* Set VCE Clk With Dummy Back End */ - PHM_PlatformCaps_WantACPClkWithDummyBackEnd, /* Set SAM Clk With Dummy Back End */ - PHM_PlatformCaps_OD6inACSupport, /* indicates that the ASIC/back end supports OD6 */ - PHM_PlatformCaps_OD6inDCSupport, /* indicates that the ASIC/back end supports OD6 in DC */ - PHM_PlatformCaps_EnablePlatformPowerManagement, /* indicates that Platform Power Management feature is supported */ - PHM_PlatformCaps_SurpriseRemoval, /* indicates that surprise removal feature is requested */ - PHM_PlatformCaps_NewCACVoltage, /* indicates new CAC voltage table support */ - PHM_PlatformCaps_DiDtSupport, /* for dI/dT feature */ - PHM_PlatformCaps_DBRamping, /* for dI/dT feature */ - PHM_PlatformCaps_TDRamping, /* for dI/dT feature */ - PHM_PlatformCaps_TCPRamping, /* for dI/dT feature */ - PHM_PlatformCaps_DBRRamping, /* for dI/dT feature */ - PHM_PlatformCaps_DiDtEDCEnable, /* for dI/dT feature */ - PHM_PlatformCaps_GCEDC, /* for dI/dT feature */ - PHM_PlatformCaps_PSM, /* for dI/dT feature */ - PHM_PlatformCaps_EnableSMU7ThermalManagement, /* SMC will manage thermal events */ - PHM_PlatformCaps_FPS, /* FPS support */ - PHM_PlatformCaps_ACP, /* ACP support */ - PHM_PlatformCaps_SclkThrottleLowNotification, /* SCLK Throttle Low Notification */ - PHM_PlatformCaps_XDMAEnabled, /* XDMA engine is enabled */ - PHM_PlatformCaps_UseDummyBackEnd, /* use dummy back end */ - PHM_PlatformCaps_EnableDFSBypass, /* Enable DFS bypass */ - PHM_PlatformCaps_VddNBDirectRequest, - PHM_PlatformCaps_PauseMMSessions, - PHM_PlatformCaps_UnTabledHardwareInterface, /* Tableless/direct call hardware interface for CI and newer ASICs */ - PHM_PlatformCaps_SMU7, /* indicates that vpuRecoveryBegin without SMU shutdown */ - PHM_PlatformCaps_RevertGPIO5Polarity, /* indicates revert GPIO5 plarity table support */ - PHM_PlatformCaps_Thermal2GPIO17, /* indicates thermal2GPIO17 table support */ - PHM_PlatformCaps_ThermalOutGPIO, /* indicates ThermalOutGPIO support, pin number is assigned by VBIOS */ - PHM_PlatformCaps_DisableMclkSwitchingForFrameLock, /* Disable memory clock switch during Framelock */ - PHM_PlatformCaps_ForceMclkHigh, /* Disable memory clock switching by forcing memory clock high */ - PHM_PlatformCaps_VRHotGPIOConfigurable, /* indicates VR_HOT GPIO configurable */ - PHM_PlatformCaps_TempInversion, /* enable Temp Inversion feature */ - PHM_PlatformCaps_IOIC3, - PHM_PlatformCaps_ConnectedStandby, - PHM_PlatformCaps_EVV, - PHM_PlatformCaps_EnableLongIdleBACOSupport, - PHM_PlatformCaps_CombinePCCWithThermalSignal, - PHM_PlatformCaps_DisableUsingActualTemperatureForPowerCalc, - PHM_PlatformCaps_StablePState, - PHM_PlatformCaps_OD6PlusinACSupport, - PHM_PlatformCaps_OD6PlusinDCSupport, - PHM_PlatformCaps_ODThermalLimitUnlock, - PHM_PlatformCaps_ReducePowerLimit, - PHM_PlatformCaps_ODFuzzyFanControlSupport, - PHM_PlatformCaps_GeminiRegulatorFanControlSupport, - PHM_PlatformCaps_ControlVDDGFX, - PHM_PlatformCaps_BBBSupported, - PHM_PlatformCaps_DisableVoltageIsland, - PHM_PlatformCaps_FanSpeedInTableIsRPM, - PHM_PlatformCaps_GFXClockGatingManagedInCAIL, - PHM_PlatformCaps_IcelandULPSSWWorkAround, - PHM_PlatformCaps_FPSEnhancement, - PHM_PlatformCaps_LoadPostProductionFirmware, - PHM_PlatformCaps_VpuRecoveryInProgress, - PHM_PlatformCaps_Falcon_QuickTransition, - PHM_PlatformCaps_AVFS, - PHM_PlatformCaps_ClockStretcher, - PHM_PlatformCaps_TablelessHardwareInterface, - PHM_PlatformCaps_EnableDriverEVV, - PHM_PlatformCaps_SPLLShutdownSupport, - PHM_PlatformCaps_VirtualBatteryState, - PHM_PlatformCaps_IgnoreForceHighClockRequestsInAPUs, - PHM_PlatformCaps_DisableMclkSwitchForVR, - PHM_PlatformCaps_SMU8, - PHM_PlatformCaps_VRHotPolarityHigh, - PHM_PlatformCaps_IPS_UlpsExclusive, - PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme, - PHM_PlatformCaps_GeminiAsymmetricPower, - PHM_PlatformCaps_OCLPowerOptimization, - PHM_PlatformCaps_MaxPCIEBandWidth, - PHM_PlatformCaps_PerfPerWattOptimizationSupport, - PHM_PlatformCaps_UVDClientMCTuning, - PHM_PlatformCaps_ODNinACSupport, - PHM_PlatformCaps_ODNinDCSupport, - PHM_PlatformCaps_OD8inACSupport, - PHM_PlatformCaps_OD8inDCSupport, - PHM_PlatformCaps_UMDPState, - PHM_PlatformCaps_AutoWattmanSupport, - PHM_PlatformCaps_AutoWattmanEnable_CCCState, - PHM_PlatformCaps_FreeSyncActive, - PHM_PlatformCaps_EnableShadowPstate, - PHM_PlatformCaps_customThermalManagement, - PHM_PlatformCaps_staticFanControl, - PHM_PlatformCaps_Virtual_System, - PHM_PlatformCaps_LowestUclkReservedForUlv, - PHM_PlatformCaps_EnableBoostState, - PHM_PlatformCaps_AVFSSupport, - PHM_PlatformCaps_ThermalPolicyDelay, - PHM_PlatformCaps_CustomFanControlSupport, - PHM_PlatformCaps_BAMACO, - PHM_PlatformCaps_Max -}; - -#define PHM_MAX_NUM_CAPS_BITS_PER_FIELD (sizeof(uint32_t)*8) - -/* Number of uint32_t entries used by CAPS table */ -#define PHM_MAX_NUM_CAPS_ULONG_ENTRIES \ - ((PHM_PlatformCaps_Max + ((PHM_MAX_NUM_CAPS_BITS_PER_FIELD) - 1)) / (PHM_MAX_NUM_CAPS_BITS_PER_FIELD)) - -struct pp_hw_descriptor { - uint32_t hw_caps[PHM_MAX_NUM_CAPS_ULONG_ENTRIES]; -}; - -enum PHM_PerformanceLevelDesignation { - PHM_PerformanceLevelDesignation_Activity, - PHM_PerformanceLevelDesignation_PowerContainment -}; - -typedef enum PHM_PerformanceLevelDesignation PHM_PerformanceLevelDesignation; - -struct PHM_PerformanceLevel { - uint32_t coreClock; - uint32_t memory_clock; - uint32_t vddc; - uint32_t vddci; - uint32_t nonLocalMemoryFreq; - uint32_t nonLocalMemoryWidth; -}; - -typedef struct PHM_PerformanceLevel PHM_PerformanceLevel; - -/* Function for setting a platform cap */ -static inline void phm_cap_set(uint32_t *caps, - enum phm_platform_caps c) -{ - caps[c / PHM_MAX_NUM_CAPS_BITS_PER_FIELD] |= (1UL << - (c & (PHM_MAX_NUM_CAPS_BITS_PER_FIELD - 1))); -} - -static inline void phm_cap_unset(uint32_t *caps, - enum phm_platform_caps c) -{ - caps[c / PHM_MAX_NUM_CAPS_BITS_PER_FIELD] &= ~(1UL << (c & (PHM_MAX_NUM_CAPS_BITS_PER_FIELD - 1))); -} - -static inline bool phm_cap_enabled(const uint32_t *caps, enum phm_platform_caps c) -{ - return (0 != (caps[c / PHM_MAX_NUM_CAPS_BITS_PER_FIELD] & - (1UL << (c & (PHM_MAX_NUM_CAPS_BITS_PER_FIELD - 1))))); -} - -#define PP_CAP(c) phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, (c)) - -#define PP_PCIEGenInvalid 0xffff -enum PP_PCIEGen { - PP_PCIEGen1 = 0, /* PCIE 1.0 - Transfer rate of 2.5 GT/s */ - PP_PCIEGen2, /*PCIE 2.0 - Transfer rate of 5.0 GT/s */ - PP_PCIEGen3 /*PCIE 3.0 - Transfer rate of 8.0 GT/s */ -}; - -typedef enum PP_PCIEGen PP_PCIEGen; - -#define PP_Min_PCIEGen PP_PCIEGen1 -#define PP_Max_PCIEGen PP_PCIEGen3 -#define PP_Min_PCIELane 1 -#define PP_Max_PCIELane 16 - -enum phm_clock_Type { - PHM_DispClock = 1, - PHM_SClock, - PHM_MemClock -}; - -#define MAX_NUM_CLOCKS 16 - -struct PP_Clocks { - uint32_t engineClock; - uint32_t memoryClock; - uint32_t BusBandwidth; - uint32_t engineClockInSR; - uint32_t dcefClock; - uint32_t dcefClockInSR; -}; - -struct pp_clock_info { - uint32_t min_mem_clk; - uint32_t max_mem_clk; - uint32_t min_eng_clk; - uint32_t max_eng_clk; - uint32_t min_bus_bandwidth; - uint32_t max_bus_bandwidth; -}; - -struct phm_platform_descriptor { - uint32_t platformCaps[PHM_MAX_NUM_CAPS_ULONG_ENTRIES]; - uint32_t vbiosInterruptId; - struct PP_Clocks overdriveLimit; - struct PP_Clocks clockStep; - uint32_t hardwareActivityPerformanceLevels; - uint32_t minimumClocksReductionPercentage; - uint32_t minOverdriveVDDC; - uint32_t maxOverdriveVDDC; - uint32_t overdriveVDDCStep; - uint32_t hardwarePerformanceLevels; - uint16_t powerBudget; - uint32_t TDPLimit; - uint32_t nearTDPLimit; - uint32_t nearTDPLimitAdjusted; - uint32_t SQRampingThreshold; - uint32_t CACLeakage; - uint16_t TDPODLimit; - uint32_t TDPAdjustment; - bool TDPAdjustmentPolarity; - uint16_t LoadLineSlope; - uint32_t VidMinLimit; - uint32_t VidMaxLimit; - uint32_t VidStep; - uint32_t VidAdjustment; - bool VidAdjustmentPolarity; -}; - -struct phm_clocks { - uint32_t num_of_entries; - uint32_t clock[MAX_NUM_CLOCKS]; -}; - -#define DPMTABLE_OD_UPDATE_SCLK 0x00000001 -#define DPMTABLE_OD_UPDATE_MCLK 0x00000002 -#define DPMTABLE_UPDATE_SCLK 0x00000004 -#define DPMTABLE_UPDATE_MCLK 0x00000008 -#define DPMTABLE_OD_UPDATE_VDDC 0x00000010 -#define DPMTABLE_UPDATE_SOCCLK 0x00000020 - -struct phm_odn_performance_level { - uint32_t clock; - uint32_t vddc; - bool enabled; -}; - -struct phm_odn_clock_levels { - uint32_t size; - uint32_t options; - uint32_t flags; - uint32_t num_of_pl; - /* variable-sized array, specify by num_of_pl. */ - struct phm_odn_performance_level entries[8]; -}; - -extern int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr); -extern int phm_powerdown_uvd(struct pp_hwmgr *hwmgr); -extern int phm_setup_asic(struct pp_hwmgr *hwmgr); -extern int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr); -extern int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr); -extern bool phm_is_hw_access_blocked(struct pp_hwmgr *hwmgr); -extern int phm_block_hw_access(struct pp_hwmgr *hwmgr, bool block); -extern int phm_set_power_state(struct pp_hwmgr *hwmgr, - const struct pp_hw_power_state *pcurrent_state, - const struct pp_hw_power_state *pnew_power_state); - -extern int phm_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, - struct pp_power_state *adjusted_ps, - const struct pp_power_state *current_ps); - -extern int phm_apply_clock_adjust_rules(struct pp_hwmgr *hwmgr); - -extern int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level); -extern int phm_pre_display_configuration_changed(struct pp_hwmgr *hwmgr); -extern int phm_display_configuration_changed(struct pp_hwmgr *hwmgr); -extern int phm_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr); -extern int phm_register_irq_handlers(struct pp_hwmgr *hwmgr); -extern int phm_start_thermal_controller(struct pp_hwmgr *hwmgr); -extern int phm_stop_thermal_controller(struct pp_hwmgr *hwmgr); -extern bool phm_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr); - -extern int phm_check_states_equal(struct pp_hwmgr *hwmgr, - const struct pp_hw_power_state *pstate1, - const struct pp_hw_power_state *pstate2, - bool *equal); - -extern int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr, - const struct amd_pp_display_configuration *display_config); - -extern int phm_get_dal_power_level(struct pp_hwmgr *hwmgr, - struct amd_pp_simple_clock_info *info); - -extern int phm_set_cpu_power_state(struct pp_hwmgr *hwmgr); - -extern int phm_power_down_asic(struct pp_hwmgr *hwmgr); - -extern int phm_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, - PHM_PerformanceLevelDesignation designation, uint32_t index, - PHM_PerformanceLevel *level); - -extern int phm_get_clock_info(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, - struct pp_clock_info *pclock_info, - PHM_PerformanceLevelDesignation designation); - -extern int phm_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, struct pp_clock_info *clock_info); - -extern int phm_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks); - -extern int phm_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr, - enum amd_pp_clock_type type, - struct pp_clock_levels_with_latency *clocks); -extern int phm_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr, - enum amd_pp_clock_type type, - struct pp_clock_levels_with_voltage *clocks); -extern int phm_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr, - void *clock_ranges); -extern int phm_display_clock_voltage_request(struct pp_hwmgr *hwmgr, - struct pp_display_clock_request *clock); - -extern int phm_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks); -extern int phm_disable_smc_firmware_ctf(struct pp_hwmgr *hwmgr); - -extern int phm_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count); - -#endif /* _HARDWARE_MANAGER_H_ */ - diff --git a/drivers/gpu/drm/amd/pm/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/inc/hwmgr.h deleted file mode 100644 index 03226baea65e..000000000000 --- a/drivers/gpu/drm/amd/pm/inc/hwmgr.h +++ /dev/null @@ -1,833 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#ifndef _HWMGR_H_ -#define _HWMGR_H_ - -#include -#include "amd_powerplay.h" -#include "hardwaremanager.h" -#include "hwmgr_ppt.h" -#include "ppatomctrl.h" -#include "power_state.h" -#include "smu_helper.h" - -struct pp_hwmgr; -struct phm_fan_speed_info; -struct pp_atomctrl_voltage_table; - -#define VOLTAGE_SCALE 4 -#define VOLTAGE_VID_OFFSET_SCALE1 625 -#define VOLTAGE_VID_OFFSET_SCALE2 100 - -enum DISPLAY_GAP { - DISPLAY_GAP_VBLANK_OR_WM = 0, /* Wait for vblank or MCHG watermark. */ - DISPLAY_GAP_VBLANK = 1, /* Wait for vblank. */ - DISPLAY_GAP_WATERMARK = 2, /* Wait for MCHG watermark. (Note that HW may deassert WM in VBI depending on DC_STUTTER_CNTL.) */ - DISPLAY_GAP_IGNORE = 3 /* Do not wait. */ -}; -typedef enum DISPLAY_GAP DISPLAY_GAP; - -enum BACO_STATE { - BACO_STATE_OUT = 0, - BACO_STATE_IN, -}; - -struct vi_dpm_level { - bool enabled; - uint32_t value; - uint32_t param1; -}; - -struct vi_dpm_table { - uint32_t count; - struct vi_dpm_level dpm_level[]; -}; - -#define PCIE_PERF_REQ_REMOVE_REGISTRY 0 -#define PCIE_PERF_REQ_FORCE_LOWPOWER 1 -#define PCIE_PERF_REQ_GEN1 2 -#define PCIE_PERF_REQ_GEN2 3 -#define PCIE_PERF_REQ_GEN3 4 - -enum PHM_BackEnd_Magic { - PHM_Dummy_Magic = 0xAA5555AA, - PHM_RV770_Magic = 0xDCBAABCD, - PHM_Kong_Magic = 0x239478DF, - PHM_NIslands_Magic = 0x736C494E, - PHM_Sumo_Magic = 0x8339FA11, - PHM_SIslands_Magic = 0x369431AC, - PHM_Trinity_Magic = 0x96751873, - PHM_CIslands_Magic = 0x38AC78B0, - PHM_Kv_Magic = 0xDCBBABC0, - PHM_VIslands_Magic = 0x20130307, - PHM_Cz_Magic = 0x67DCBA25, - PHM_Rv_Magic = 0x20161121 -}; - -struct phm_set_power_state_input { - const struct pp_hw_power_state *pcurrent_state; - const struct pp_hw_power_state *pnew_state; -}; - -struct phm_clock_array { - uint32_t count; - uint32_t values[]; -}; - -struct phm_clock_voltage_dependency_record { - uint32_t clk; - uint32_t v; -}; - -struct phm_vceclock_voltage_dependency_record { - uint32_t ecclk; - uint32_t evclk; - uint32_t v; -}; - -struct phm_uvdclock_voltage_dependency_record { - uint32_t vclk; - uint32_t dclk; - uint32_t v; -}; - -struct phm_samuclock_voltage_dependency_record { - uint32_t samclk; - uint32_t v; -}; - -struct phm_acpclock_voltage_dependency_record { - uint32_t acpclk; - uint32_t v; -}; - -struct phm_clock_voltage_dependency_table { - uint32_t count; /* Number of entries. */ - struct phm_clock_voltage_dependency_record entries[]; /* Dynamically allocate count entries. */ -}; - -struct phm_phase_shedding_limits_record { - uint32_t Voltage; - uint32_t Sclk; - uint32_t Mclk; -}; - -struct phm_uvd_clock_voltage_dependency_record { - uint32_t vclk; - uint32_t dclk; - uint32_t v; -}; - -struct phm_uvd_clock_voltage_dependency_table { - uint8_t count; - struct phm_uvd_clock_voltage_dependency_record entries[]; -}; - -struct phm_acp_clock_voltage_dependency_record { - uint32_t acpclk; - uint32_t v; -}; - -struct phm_acp_clock_voltage_dependency_table { - uint32_t count; - struct phm_acp_clock_voltage_dependency_record entries[]; -}; - -struct phm_vce_clock_voltage_dependency_record { - uint32_t ecclk; - uint32_t evclk; - uint32_t v; -}; - -struct phm_phase_shedding_limits_table { - uint32_t count; - struct phm_phase_shedding_limits_record entries[]; -}; - -struct phm_vceclock_voltage_dependency_table { - uint8_t count; /* Number of entries. */ - struct phm_vceclock_voltage_dependency_record entries[1]; /* Dynamically allocate count entries. */ -}; - -struct phm_uvdclock_voltage_dependency_table { - uint8_t count; /* Number of entries. */ - struct phm_uvdclock_voltage_dependency_record entries[1]; /* Dynamically allocate count entries. */ -}; - -struct phm_samuclock_voltage_dependency_table { - uint8_t count; /* Number of entries. */ - struct phm_samuclock_voltage_dependency_record entries[1]; /* Dynamically allocate count entries. */ -}; - -struct phm_acpclock_voltage_dependency_table { - uint32_t count; /* Number of entries. */ - struct phm_acpclock_voltage_dependency_record entries[1]; /* Dynamically allocate count entries. */ -}; - -struct phm_vce_clock_voltage_dependency_table { - uint8_t count; - struct phm_vce_clock_voltage_dependency_record entries[]; -}; - - -enum SMU_ASIC_RESET_MODE -{ - SMU_ASIC_RESET_MODE_0, - SMU_ASIC_RESET_MODE_1, - SMU_ASIC_RESET_MODE_2, -}; - -struct pp_smumgr_func { - char *name; - int (*smu_init)(struct pp_hwmgr *hwmgr); - int (*smu_fini)(struct pp_hwmgr *hwmgr); - int (*start_smu)(struct pp_hwmgr *hwmgr); - int (*check_fw_load_finish)(struct pp_hwmgr *hwmgr, - uint32_t firmware); - int (*request_smu_load_fw)(struct pp_hwmgr *hwmgr); - int (*request_smu_load_specific_fw)(struct pp_hwmgr *hwmgr, - uint32_t firmware); - uint32_t (*get_argument)(struct pp_hwmgr *hwmgr); - int (*send_msg_to_smc)(struct pp_hwmgr *hwmgr, uint16_t msg); - int (*send_msg_to_smc_with_parameter)(struct pp_hwmgr *hwmgr, - uint16_t msg, uint32_t parameter); - int (*download_pptable_settings)(struct pp_hwmgr *hwmgr, - void **table); - int (*upload_pptable_settings)(struct pp_hwmgr *hwmgr); - int (*update_smc_table)(struct pp_hwmgr *hwmgr, uint32_t type); - int (*process_firmware_header)(struct pp_hwmgr *hwmgr); - int (*update_sclk_threshold)(struct pp_hwmgr *hwmgr); - int (*thermal_setup_fan_table)(struct pp_hwmgr *hwmgr); - int (*thermal_avfs_enable)(struct pp_hwmgr *hwmgr); - int (*init_smc_table)(struct pp_hwmgr *hwmgr); - int (*populate_all_graphic_levels)(struct pp_hwmgr *hwmgr); - int (*populate_all_memory_levels)(struct pp_hwmgr *hwmgr); - int (*initialize_mc_reg_table)(struct pp_hwmgr *hwmgr); - uint32_t (*get_offsetof)(uint32_t type, uint32_t member); - uint32_t (*get_mac_definition)(uint32_t value); - bool (*is_dpm_running)(struct pp_hwmgr *hwmgr); - bool (*is_hw_avfs_present)(struct pp_hwmgr *hwmgr); - int (*update_dpm_settings)(struct pp_hwmgr *hwmgr, void *profile_setting); - int (*smc_table_manager)(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw); /*rw: true for read, false for write */ - int (*stop_smc)(struct pp_hwmgr *hwmgr); -}; - -struct pp_hwmgr_func { - int (*backend_init)(struct pp_hwmgr *hw_mgr); - int (*backend_fini)(struct pp_hwmgr *hw_mgr); - int (*asic_setup)(struct pp_hwmgr *hw_mgr); - int (*get_power_state_size)(struct pp_hwmgr *hw_mgr); - - int (*apply_state_adjust_rules)(struct pp_hwmgr *hwmgr, - struct pp_power_state *prequest_ps, - const struct pp_power_state *pcurrent_ps); - - int (*apply_clocks_adjust_rules)(struct pp_hwmgr *hwmgr); - - int (*force_dpm_level)(struct pp_hwmgr *hw_mgr, - enum amd_dpm_forced_level level); - - int (*dynamic_state_management_enable)( - struct pp_hwmgr *hw_mgr); - int (*dynamic_state_management_disable)( - struct pp_hwmgr *hw_mgr); - - int (*patch_boot_state)(struct pp_hwmgr *hwmgr, - struct pp_hw_power_state *hw_ps); - - int (*get_pp_table_entry)(struct pp_hwmgr *hwmgr, - unsigned long, struct pp_power_state *); - int (*get_num_of_pp_table_entries)(struct pp_hwmgr *hwmgr); - int (*powerdown_uvd)(struct pp_hwmgr *hwmgr); - void (*powergate_vce)(struct pp_hwmgr *hwmgr, bool bgate); - void (*powergate_uvd)(struct pp_hwmgr *hwmgr, bool bgate); - void (*powergate_acp)(struct pp_hwmgr *hwmgr, bool bgate); - uint32_t (*get_mclk)(struct pp_hwmgr *hwmgr, bool low); - uint32_t (*get_sclk)(struct pp_hwmgr *hwmgr, bool low); - int (*power_state_set)(struct pp_hwmgr *hwmgr, - const void *state); - int (*notify_smc_display_config_after_ps_adjustment)(struct pp_hwmgr *hwmgr); - int (*pre_display_config_changed)(struct pp_hwmgr *hwmgr); - int (*display_config_changed)(struct pp_hwmgr *hwmgr); - int (*disable_clock_power_gating)(struct pp_hwmgr *hwmgr); - int (*update_clock_gatings)(struct pp_hwmgr *hwmgr, - const uint32_t *msg_id); - int (*set_max_fan_rpm_output)(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm); - int (*set_max_fan_pwm_output)(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm); - int (*stop_thermal_controller)(struct pp_hwmgr *hwmgr); - int (*get_fan_speed_info)(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info); - void (*set_fan_control_mode)(struct pp_hwmgr *hwmgr, uint32_t mode); - uint32_t (*get_fan_control_mode)(struct pp_hwmgr *hwmgr); - int (*set_fan_speed_pwm)(struct pp_hwmgr *hwmgr, uint32_t speed); - int (*get_fan_speed_pwm)(struct pp_hwmgr *hwmgr, uint32_t *speed); - int (*set_fan_speed_rpm)(struct pp_hwmgr *hwmgr, uint32_t speed); - int (*get_fan_speed_rpm)(struct pp_hwmgr *hwmgr, uint32_t *speed); - int (*reset_fan_speed_to_default)(struct pp_hwmgr *hwmgr); - int (*uninitialize_thermal_controller)(struct pp_hwmgr *hwmgr); - int (*register_irq_handlers)(struct pp_hwmgr *hwmgr); - bool (*check_smc_update_required_for_display_configuration)(struct pp_hwmgr *hwmgr); - int (*check_states_equal)(struct pp_hwmgr *hwmgr, - const struct pp_hw_power_state *pstate1, - const struct pp_hw_power_state *pstate2, - bool *equal); - int (*set_cpu_power_state)(struct pp_hwmgr *hwmgr); - int (*store_cc6_data)(struct pp_hwmgr *hwmgr, uint32_t separation_time, - bool cc6_disable, bool pstate_disable, - bool pstate_switch_disable); - int (*get_dal_power_level)(struct pp_hwmgr *hwmgr, - struct amd_pp_simple_clock_info *info); - int (*get_performance_level)(struct pp_hwmgr *, const struct pp_hw_power_state *, - PHM_PerformanceLevelDesignation, uint32_t, PHM_PerformanceLevel *); - int (*get_current_shallow_sleep_clocks)(struct pp_hwmgr *hwmgr, - const struct pp_hw_power_state *state, struct pp_clock_info *clock_info); - int (*get_clock_by_type)(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks); - int (*get_clock_by_type_with_latency)(struct pp_hwmgr *hwmgr, - enum amd_pp_clock_type type, - struct pp_clock_levels_with_latency *clocks); - int (*get_clock_by_type_with_voltage)(struct pp_hwmgr *hwmgr, - enum amd_pp_clock_type type, - struct pp_clock_levels_with_voltage *clocks); - int (*set_watermarks_for_clocks_ranges)(struct pp_hwmgr *hwmgr, void *clock_ranges); - int (*display_clock_voltage_request)(struct pp_hwmgr *hwmgr, - struct pp_display_clock_request *clock); - int (*get_max_high_clocks)(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks); - int (*power_off_asic)(struct pp_hwmgr *hwmgr); - int (*force_clock_level)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask); - int (*print_clock_levels)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, char *buf); - int (*powergate_gfx)(struct pp_hwmgr *hwmgr, bool enable); - int (*get_sclk_od)(struct pp_hwmgr *hwmgr); - int (*set_sclk_od)(struct pp_hwmgr *hwmgr, uint32_t value); - int (*get_mclk_od)(struct pp_hwmgr *hwmgr); - int (*set_mclk_od)(struct pp_hwmgr *hwmgr, uint32_t value); - int (*read_sensor)(struct pp_hwmgr *hwmgr, int idx, void *value, int *size); - int (*avfs_control)(struct pp_hwmgr *hwmgr, bool enable); - int (*disable_smc_firmware_ctf)(struct pp_hwmgr *hwmgr); - int (*set_active_display_count)(struct pp_hwmgr *hwmgr, uint32_t count); - int (*set_min_deep_sleep_dcefclk)(struct pp_hwmgr *hwmgr, uint32_t clock); - int (*start_thermal_controller)(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *range); - int (*notify_cac_buffer_info)(struct pp_hwmgr *hwmgr, - uint32_t virtual_addr_low, - uint32_t virtual_addr_hi, - uint32_t mc_addr_low, - uint32_t mc_addr_hi, - uint32_t size); - int (*get_thermal_temperature_range)(struct pp_hwmgr *hwmgr, - struct PP_TemperatureRange *range); - int (*get_power_profile_mode)(struct pp_hwmgr *hwmgr, char *buf); - int (*set_power_profile_mode)(struct pp_hwmgr *hwmgr, long *input, uint32_t size); - int (*odn_edit_dpm_table)(struct pp_hwmgr *hwmgr, - enum PP_OD_DPM_TABLE_COMMAND type, - long *input, uint32_t size); - int (*set_fine_grain_clk_vol)(struct pp_hwmgr *hwmgr, - enum PP_OD_DPM_TABLE_COMMAND type, - long *input, uint32_t size); - int (*set_power_limit)(struct pp_hwmgr *hwmgr, uint32_t n); - int (*powergate_mmhub)(struct pp_hwmgr *hwmgr); - int (*smus_notify_pwe)(struct pp_hwmgr *hwmgr); - int (*powergate_sdma)(struct pp_hwmgr *hwmgr, bool bgate); - int (*enable_mgpu_fan_boost)(struct pp_hwmgr *hwmgr); - int (*set_hard_min_dcefclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock); - int (*set_hard_min_fclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock); - int (*set_hard_min_gfxclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock); - int (*set_soft_max_gfxclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock); - int (*get_asic_baco_capability)(struct pp_hwmgr *hwmgr, bool *cap); - int (*get_asic_baco_state)(struct pp_hwmgr *hwmgr, enum BACO_STATE *state); - int (*set_asic_baco_state)(struct pp_hwmgr *hwmgr, enum BACO_STATE state); - int (*get_ppfeature_status)(struct pp_hwmgr *hwmgr, char *buf); - int (*set_ppfeature_status)(struct pp_hwmgr *hwmgr, uint64_t ppfeature_masks); - int (*set_mp1_state)(struct pp_hwmgr *hwmgr, enum pp_mp1_state mp1_state); - int (*asic_reset)(struct pp_hwmgr *hwmgr, enum SMU_ASIC_RESET_MODE mode); - int (*smu_i2c_bus_access)(struct pp_hwmgr *hwmgr, bool aquire); - int (*set_df_cstate)(struct pp_hwmgr *hwmgr, enum pp_df_cstate state); - int (*set_xgmi_pstate)(struct pp_hwmgr *hwmgr, uint32_t pstate); - int (*disable_power_features_for_compute_performance)(struct pp_hwmgr *hwmgr, - bool disable); - ssize_t (*get_gpu_metrics)(struct pp_hwmgr *hwmgr, void **table); - int (*gfx_state_change)(struct pp_hwmgr *hwmgr, uint32_t state); -}; - -struct pp_table_func { - int (*pptable_init)(struct pp_hwmgr *hw_mgr); - int (*pptable_fini)(struct pp_hwmgr *hw_mgr); - int (*pptable_get_number_of_vce_state_table_entries)(struct pp_hwmgr *hw_mgr); - int (*pptable_get_vce_state_table_entry)( - struct pp_hwmgr *hwmgr, - unsigned long i, - struct amd_vce_state *vce_state, - void **clock_info, - unsigned long *flag); -}; - -union phm_cac_leakage_record { - struct { - uint16_t Vddc; /* in CI, we use it for StdVoltageHiSidd */ - uint32_t Leakage; /* in CI, we use it for StdVoltageLoSidd */ - }; - struct { - uint16_t Vddc1; - uint16_t Vddc2; - uint16_t Vddc3; - }; -}; - -struct phm_cac_leakage_table { - uint32_t count; - union phm_cac_leakage_record entries[]; -}; - -struct phm_samu_clock_voltage_dependency_record { - uint32_t samclk; - uint32_t v; -}; - - -struct phm_samu_clock_voltage_dependency_table { - uint8_t count; - struct phm_samu_clock_voltage_dependency_record entries[]; -}; - -struct phm_cac_tdp_table { - uint16_t usTDP; - uint16_t usConfigurableTDP; - uint16_t usTDC; - uint16_t usBatteryPowerLimit; - uint16_t usSmallPowerLimit; - uint16_t usLowCACLeakage; - uint16_t usHighCACLeakage; - uint16_t usMaximumPowerDeliveryLimit; - uint16_t usEDCLimit; - uint16_t usOperatingTempMinLimit; - uint16_t usOperatingTempMaxLimit; - uint16_t usOperatingTempStep; - uint16_t usOperatingTempHyst; - uint16_t usDefaultTargetOperatingTemp; - uint16_t usTargetOperatingTemp; - uint16_t usPowerTuneDataSetID; - uint16_t usSoftwareShutdownTemp; - uint16_t usClockStretchAmount; - uint16_t usTemperatureLimitHotspot; - uint16_t usTemperatureLimitLiquid1; - uint16_t usTemperatureLimitLiquid2; - uint16_t usTemperatureLimitVrVddc; - uint16_t usTemperatureLimitVrMvdd; - uint16_t usTemperatureLimitPlx; - uint8_t ucLiquid1_I2C_address; - uint8_t ucLiquid2_I2C_address; - uint8_t ucLiquid_I2C_Line; - uint8_t ucVr_I2C_address; - uint8_t ucVr_I2C_Line; - uint8_t ucPlx_I2C_address; - uint8_t ucPlx_I2C_Line; - uint32_t usBoostPowerLimit; - uint8_t ucCKS_LDO_REFSEL; - uint8_t ucHotSpotOnly; -}; - -struct phm_tdp_table { - uint16_t usTDP; - uint16_t usConfigurableTDP; - uint16_t usTDC; - uint16_t usBatteryPowerLimit; - uint16_t usSmallPowerLimit; - uint16_t usLowCACLeakage; - uint16_t usHighCACLeakage; - uint16_t usMaximumPowerDeliveryLimit; - uint16_t usEDCLimit; - uint16_t usOperatingTempMinLimit; - uint16_t usOperatingTempMaxLimit; - uint16_t usOperatingTempStep; - uint16_t usOperatingTempHyst; - uint16_t usDefaultTargetOperatingTemp; - uint16_t usTargetOperatingTemp; - uint16_t usPowerTuneDataSetID; - uint16_t usSoftwareShutdownTemp; - uint16_t usClockStretchAmount; - uint16_t usTemperatureLimitTedge; - uint16_t usTemperatureLimitHotspot; - uint16_t usTemperatureLimitLiquid1; - uint16_t usTemperatureLimitLiquid2; - uint16_t usTemperatureLimitHBM; - uint16_t usTemperatureLimitVrVddc; - uint16_t usTemperatureLimitVrMvdd; - uint16_t usTemperatureLimitPlx; - uint8_t ucLiquid1_I2C_address; - uint8_t ucLiquid2_I2C_address; - uint8_t ucLiquid_I2C_Line; - uint8_t ucVr_I2C_address; - uint8_t ucVr_I2C_Line; - uint8_t ucPlx_I2C_address; - uint8_t ucPlx_I2C_Line; - uint8_t ucLiquid_I2C_LineSDA; - uint8_t ucVr_I2C_LineSDA; - uint8_t ucPlx_I2C_LineSDA; - uint32_t usBoostPowerLimit; - uint16_t usBoostStartTemperature; - uint16_t usBoostStopTemperature; - uint32_t ulBoostClock; -}; - -struct phm_ppm_table { - uint8_t ppm_design; - uint16_t cpu_core_number; - uint32_t platform_tdp; - uint32_t small_ac_platform_tdp; - uint32_t platform_tdc; - uint32_t small_ac_platform_tdc; - uint32_t apu_tdp; - uint32_t dgpu_tdp; - uint32_t dgpu_ulv_power; - uint32_t tj_max; -}; - -struct phm_vq_budgeting_record { - uint32_t ulCUs; - uint32_t ulSustainableSOCPowerLimitLow; - uint32_t ulSustainableSOCPowerLimitHigh; - uint32_t ulMinSclkLow; - uint32_t ulMinSclkHigh; - uint8_t ucDispConfig; - uint32_t ulDClk; - uint32_t ulEClk; - uint32_t ulSustainableSclk; - uint32_t ulSustainableCUs; -}; - -struct phm_vq_budgeting_table { - uint8_t numEntries; - struct phm_vq_budgeting_record entries[1]; -}; - -struct phm_clock_and_voltage_limits { - uint32_t sclk; - uint32_t mclk; - uint32_t gfxclk; - uint16_t vddc; - uint16_t vddci; - uint16_t vddgfx; - uint16_t vddmem; -}; - -/* Structure to hold PPTable information */ - -struct phm_ppt_v1_information { - struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_on_sclk; - struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_on_mclk; - struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_on_socclk; - struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_on_dcefclk; - struct phm_clock_array *valid_sclk_values; - struct phm_clock_array *valid_mclk_values; - struct phm_clock_array *valid_socclk_values; - struct phm_clock_array *valid_dcefclk_values; - struct phm_clock_and_voltage_limits max_clock_voltage_on_dc; - struct phm_clock_and_voltage_limits max_clock_voltage_on_ac; - struct phm_clock_voltage_dependency_table *vddc_dep_on_dal_pwrl; - struct phm_ppm_table *ppm_parameter_table; - struct phm_cac_tdp_table *cac_dtp_table; - struct phm_tdp_table *tdp_table; - struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_dep_table; - struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table; - struct phm_ppt_v1_voltage_lookup_table *vddgfx_lookup_table; - struct phm_ppt_v1_voltage_lookup_table *vddmem_lookup_table; - struct phm_ppt_v1_pcie_table *pcie_table; - struct phm_ppt_v1_gpio_table *gpio_table; - uint16_t us_ulv_voltage_offset; - uint16_t us_ulv_smnclk_did; - uint16_t us_ulv_mp1clk_did; - uint16_t us_ulv_gfxclk_bypass; - uint16_t us_gfxclk_slew_rate; - uint16_t us_min_gfxclk_freq_limit; -}; - -struct phm_ppt_v2_information { - struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_on_sclk; - struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_on_mclk; - struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_on_socclk; - struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_on_dcefclk; - struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_on_pixclk; - struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_on_dispclk; - struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_on_phyclk; - struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_dep_table; - - struct phm_clock_voltage_dependency_table *vddc_dep_on_dalpwrl; - - struct phm_clock_array *valid_sclk_values; - struct phm_clock_array *valid_mclk_values; - struct phm_clock_array *valid_socclk_values; - struct phm_clock_array *valid_dcefclk_values; - - struct phm_clock_and_voltage_limits max_clock_voltage_on_dc; - struct phm_clock_and_voltage_limits max_clock_voltage_on_ac; - - struct phm_ppm_table *ppm_parameter_table; - struct phm_cac_tdp_table *cac_dtp_table; - struct phm_tdp_table *tdp_table; - - struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table; - struct phm_ppt_v1_voltage_lookup_table *vddgfx_lookup_table; - struct phm_ppt_v1_voltage_lookup_table *vddmem_lookup_table; - struct phm_ppt_v1_voltage_lookup_table *vddci_lookup_table; - - struct phm_ppt_v1_pcie_table *pcie_table; - - uint16_t us_ulv_voltage_offset; - uint16_t us_ulv_smnclk_did; - uint16_t us_ulv_mp1clk_did; - uint16_t us_ulv_gfxclk_bypass; - uint16_t us_gfxclk_slew_rate; - uint16_t us_min_gfxclk_freq_limit; - - uint8_t uc_gfx_dpm_voltage_mode; - uint8_t uc_soc_dpm_voltage_mode; - uint8_t uc_uclk_dpm_voltage_mode; - uint8_t uc_uvd_dpm_voltage_mode; - uint8_t uc_vce_dpm_voltage_mode; - uint8_t uc_mp0_dpm_voltage_mode; - uint8_t uc_dcef_dpm_voltage_mode; -}; - -struct phm_ppt_v3_information -{ - uint8_t uc_thermal_controller_type; - - uint16_t us_small_power_limit1; - uint16_t us_small_power_limit2; - uint16_t us_boost_power_limit; - - uint16_t us_od_turbo_power_limit; - uint16_t us_od_powersave_power_limit; - uint16_t us_software_shutdown_temp; - - uint32_t *power_saving_clock_max; - uint32_t *power_saving_clock_min; - - uint8_t *od_feature_capabilities; - uint32_t *od_settings_max; - uint32_t *od_settings_min; - - void *smc_pptable; -}; - -struct phm_dynamic_state_info { - struct phm_clock_voltage_dependency_table *vddc_dependency_on_sclk; - struct phm_clock_voltage_dependency_table *vddci_dependency_on_mclk; - struct phm_clock_voltage_dependency_table *vddc_dependency_on_mclk; - struct phm_clock_voltage_dependency_table *mvdd_dependency_on_mclk; - struct phm_clock_voltage_dependency_table *vddc_dep_on_dal_pwrl; - struct phm_clock_array *valid_sclk_values; - struct phm_clock_array *valid_mclk_values; - struct phm_clock_and_voltage_limits max_clock_voltage_on_dc; - struct phm_clock_and_voltage_limits max_clock_voltage_on_ac; - uint32_t mclk_sclk_ratio; - uint32_t sclk_mclk_delta; - uint32_t vddc_vddci_delta; - uint32_t min_vddc_for_pcie_gen2; - struct phm_cac_leakage_table *cac_leakage_table; - struct phm_phase_shedding_limits_table *vddc_phase_shed_limits_table; - - struct phm_vce_clock_voltage_dependency_table - *vce_clock_voltage_dependency_table; - struct phm_uvd_clock_voltage_dependency_table - *uvd_clock_voltage_dependency_table; - struct phm_acp_clock_voltage_dependency_table - *acp_clock_voltage_dependency_table; - struct phm_samu_clock_voltage_dependency_table - *samu_clock_voltage_dependency_table; - - struct phm_ppm_table *ppm_parameter_table; - struct phm_cac_tdp_table *cac_dtp_table; - struct phm_clock_voltage_dependency_table *vdd_gfx_dependency_on_sclk; -}; - -struct pp_fan_info { - bool bNoFan; - uint8_t ucTachometerPulsesPerRevolution; - uint32_t ulMinRPM; - uint32_t ulMaxRPM; -}; - -struct pp_advance_fan_control_parameters { - uint16_t usTMin; /* The temperature, in 0.01 centigrades, below which we just run at a minimal PWM. */ - uint16_t usTMed; /* The middle temperature where we change slopes. */ - uint16_t usTHigh; /* The high temperature for setting the second slope. */ - uint16_t usPWMMin; /* The minimum PWM value in percent (0.01% increments). */ - uint16_t usPWMMed; /* The PWM value (in percent) at TMed. */ - uint16_t usPWMHigh; /* The PWM value at THigh. */ - uint8_t ucTHyst; /* Temperature hysteresis. Integer. */ - uint32_t ulCycleDelay; /* The time between two invocations of the fan control routine in microseconds. */ - uint16_t usTMax; /* The max temperature */ - uint8_t ucFanControlMode; - uint16_t usFanPWMMinLimit; - uint16_t usFanPWMMaxLimit; - uint16_t usFanPWMStep; - uint16_t usDefaultMaxFanPWM; - uint16_t usFanOutputSensitivity; - uint16_t usDefaultFanOutputSensitivity; - uint16_t usMaxFanPWM; /* The max Fan PWM value for Fuzzy Fan Control feature */ - uint16_t usFanRPMMinLimit; /* Minimum limit range in percentage, need to calculate based on minRPM/MaxRpm */ - uint16_t usFanRPMMaxLimit; /* Maximum limit range in percentage, usually set to 100% by default */ - uint16_t usFanRPMStep; /* Step increments/decerements, in percent */ - uint16_t usDefaultMaxFanRPM; /* The max Fan RPM value for Fuzzy Fan Control feature, default from PPTable */ - uint16_t usMaxFanRPM; /* The max Fan RPM value for Fuzzy Fan Control feature, user defined */ - uint16_t usFanCurrentLow; /* Low current */ - uint16_t usFanCurrentHigh; /* High current */ - uint16_t usFanRPMLow; /* Low RPM */ - uint16_t usFanRPMHigh; /* High RPM */ - uint32_t ulMinFanSCLKAcousticLimit; /* Minimum Fan Controller SCLK Frequency Acoustic Limit. */ - uint8_t ucTargetTemperature; /* Advanced fan controller target temperature. */ - uint8_t ucMinimumPWMLimit; /* The minimum PWM that the advanced fan controller can set. This should be set to the highest PWM that will run the fan at its lowest RPM. */ - uint16_t usFanGainEdge; /* The following is added for Fiji */ - uint16_t usFanGainHotspot; - uint16_t usFanGainLiquid; - uint16_t usFanGainVrVddc; - uint16_t usFanGainVrMvdd; - uint16_t usFanGainPlx; - uint16_t usFanGainHbm; - uint8_t ucEnableZeroRPM; - uint8_t ucFanStopTemperature; - uint8_t ucFanStartTemperature; - uint32_t ulMaxFanSCLKAcousticLimit; /* Maximum Fan Controller SCLK Frequency Acoustic Limit. */ - uint32_t ulTargetGfxClk; - uint16_t usZeroRPMStartTemperature; - uint16_t usZeroRPMStopTemperature; - uint16_t usMGpuThrottlingRPMLimit; -}; - -struct pp_thermal_controller_info { - uint8_t ucType; - uint8_t ucI2cLine; - uint8_t ucI2cAddress; - uint8_t use_hw_fan_control; - struct pp_fan_info fanInfo; - struct pp_advance_fan_control_parameters advanceFanControlParameters; -}; - -struct phm_microcode_version_info { - uint32_t SMC; - uint32_t DMCU; - uint32_t MC; - uint32_t NB; -}; - -enum PP_TABLE_VERSION { - PP_TABLE_V0 = 0, - PP_TABLE_V1, - PP_TABLE_V2, - PP_TABLE_MAX -}; - -/** - * The main hardware manager structure. - */ -#define Workload_Policy_Max 6 - -struct pp_hwmgr { - void *adev; - uint32_t chip_family; - uint32_t chip_id; - uint32_t smu_version; - bool not_vf; - bool pm_en; - bool pp_one_vf; - struct mutex smu_lock; - struct mutex msg_lock; - - uint32_t pp_table_version; - void *device; - struct pp_smumgr *smumgr; - const void *soft_pp_table; - uint32_t soft_pp_table_size; - void *hardcode_pp_table; - bool need_pp_table_upload; - - struct amd_vce_state vce_states[AMD_MAX_VCE_LEVELS]; - uint32_t num_vce_state_tables; - - enum amd_dpm_forced_level dpm_level; - enum amd_dpm_forced_level saved_dpm_level; - enum amd_dpm_forced_level request_dpm_level; - uint32_t usec_timeout; - void *pptable; - struct phm_platform_descriptor platform_descriptor; - void *backend; - - void *smu_backend; - const struct pp_smumgr_func *smumgr_funcs; - bool is_kicker; - - enum PP_DAL_POWERLEVEL dal_power_level; - struct phm_dynamic_state_info dyn_state; - const struct pp_hwmgr_func *hwmgr_func; - const struct pp_table_func *pptable_func; - - struct pp_power_state *ps; - uint32_t num_ps; - struct pp_thermal_controller_info thermal_controller; - bool fan_ctrl_is_in_default_mode; - uint32_t fan_ctrl_default_mode; - bool fan_ctrl_enabled; - uint32_t tmin; - struct phm_microcode_version_info microcode_version_info; - uint32_t ps_size; - struct pp_power_state *current_ps; - struct pp_power_state *request_ps; - struct pp_power_state *boot_ps; - struct pp_power_state *uvd_ps; - const struct amd_pp_display_configuration *display_config; - uint32_t feature_mask; - bool avfs_supported; - /* UMD Pstate */ - bool en_umd_pstate; - uint32_t power_profile_mode; - uint32_t default_power_profile_mode; - uint32_t pstate_sclk; - uint32_t pstate_mclk; - bool od_enabled; - uint32_t power_limit; - uint32_t default_power_limit; - uint32_t workload_mask; - uint32_t workload_prority[Workload_Policy_Max]; - uint32_t workload_setting[Workload_Policy_Max]; - bool gfxoff_state_changed_by_workload; -}; - -int hwmgr_early_init(struct pp_hwmgr *hwmgr); -int hwmgr_sw_init(struct pp_hwmgr *hwmgr); -int hwmgr_sw_fini(struct pp_hwmgr *hwmgr); -int hwmgr_hw_init(struct pp_hwmgr *hwmgr); -int hwmgr_hw_fini(struct pp_hwmgr *hwmgr); -int hwmgr_suspend(struct pp_hwmgr *hwmgr); -int hwmgr_resume(struct pp_hwmgr *hwmgr); - -int hwmgr_handle_task(struct pp_hwmgr *hwmgr, - enum amd_pp_task task_id, - enum amd_pm_state_type *user_state); - - -#define PHM_ENTIRE_REGISTER_MASK 0xFFFFFFFFU - -int smu7_init_function_pointers(struct pp_hwmgr *hwmgr); -int smu8_init_function_pointers(struct pp_hwmgr *hwmgr); -int vega12_hwmgr_init(struct pp_hwmgr *hwmgr); -int vega20_hwmgr_init(struct pp_hwmgr *hwmgr); - -#endif /* _HWMGR_H_ */ diff --git a/drivers/gpu/drm/amd/pm/inc/polaris10_pwrvirus.h b/drivers/gpu/drm/amd/pm/inc/polaris10_pwrvirus.h deleted file mode 100644 index 6a53b7e74ccd..000000000000 --- a/drivers/gpu/drm/amd/pm/inc/polaris10_pwrvirus.h +++ /dev/null @@ -1,1793 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#ifndef _POLARIS10_PWRVIRUS_H -#define _POLARIS10_PWRVIRUS_H - - -#define mmCP_HYP_MEC1_UCODE_ADDR 0xf81a -#define mmCP_HYP_MEC1_UCODE_DATA 0xf81b -#define mmCP_HYP_MEC2_UCODE_ADDR 0xf81c -#define mmCP_HYP_MEC2_UCODE_DATA 0xf81d - -struct PWR_Command_Table { - uint32_t data; - uint32_t reg; -}; - -typedef struct PWR_Command_Table PWR_Command_Table; - -struct PWR_DFY_Section { - uint32_t dfy_cntl; - uint32_t dfy_addr_hi, dfy_addr_lo; - uint32_t dfy_size; - uint32_t dfy_data[]; -}; - -typedef struct PWR_DFY_Section PWR_DFY_Section; - -static const PWR_Command_Table pwr_virus_table_pre[] = { - { 0x00000000, mmRLC_CNTL }, - { 0x00000002, mmRLC_SRM_CNTL }, - { 0x15000000, mmCP_ME_CNTL }, - { 0x50000000, mmCP_MEC_CNTL }, - { 0x80000004, mmCP_DFY_CNTL }, - { 0x0840800a, mmCP_RB0_CNTL }, - { 0xf30fff0f, mmTCC_CTRL }, - { 0x00000002, mmTCC_EXE_DISABLE }, - { 0x000000ff, mmTCP_ADDR_CONFIG }, - { 0x540ff000, mmCP_CPC_IC_BASE_LO }, - { 0x000000b4, mmCP_CPC_IC_BASE_HI }, - { 0x00010000, mmCP_HYP_MEC1_UCODE_ADDR }, - { 0x00041b75, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000710e8, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000910dd, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000a1081, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000b016f, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000c0e3c, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000d10ec, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000e0188, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x00101b5d, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x00150a6c, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x00170c5e, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x001d0c8c, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x001e0cfe, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x00221408, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x00370d7b, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x00390dcb, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x003c142f, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x003f0b27, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x00400e63, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x00500f62, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x00460fa7, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x00490fa7, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x005811d4, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x00680ad6, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x00760b00, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x00780b0c, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x00790af7, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x007d1aba, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x007e1abe, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x00591260, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x005a12fb, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x00861ac7, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x008c1b01, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x008d1b34, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x00a014b9, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x00a1152e, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x00a216fb, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x00a41890, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x00a31906, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x00a50b14, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x00621387, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x005c0b27, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x00160a75, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, - { 0x00010000, mmCP_HYP_MEC2_UCODE_ADDR }, - { 0x00041b75, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000710e8, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000910dd, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000a1081, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000b016f, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000c0e3c, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000d10ec, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000e0188, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x00101b5d, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x00150a6c, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x00170c5e, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x001d0c8c, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x001e0cfe, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x00221408, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x00370d7b, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x00390dcb, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x003c142f, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x003f0b27, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x00400e63, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x00500f62, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x00460fa7, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x00490fa7, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x005811d4, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x00680ad6, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x00760b00, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x00780b0c, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x00790af7, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x007d1aba, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x007e1abe, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x00591260, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x005a12fb, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x00861ac7, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x008c1b01, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x008d1b34, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x00a014b9, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x00a1152e, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x00a216fb, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x00a41890, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x00a31906, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x00a50b14, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x00621387, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x005c0b27, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x00160a75, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, - { 0x00000000, 0xFFFFFFFF }, -}; - -static const PWR_DFY_Section pwr_virus_section1 = { - .dfy_cntl = 0x80000004, - .dfy_addr_hi = 0x000000b4, - .dfy_addr_lo = 0x540fe800, - .dfy_data = { - 0x7e000200, 0x7e020201, 0x7e040204, 0x7e060205, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, - 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, - 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, - 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, - 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, - 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, - 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, - 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, - 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, - 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, - 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, - 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, - 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, - 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, - 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, - 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, - 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, - 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, - 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, - 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, - 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, - 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, - 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, - 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, - 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, - 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, - 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, - 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, - 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, - 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, - 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, - 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, - 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, - 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, - 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, - 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, - 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, - 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, - 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, - 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, - 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, - 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, - 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, - 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, - 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, - 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, - 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, - 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, - 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0xbf810000, 0x00000000, 0x00000000, 0x00000000, - 0x00000005, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x54106f00, 0x000400b4, 0x00004000, 0x00804fac, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - }, - .dfy_size = 416 -}; - -static const PWR_DFY_Section pwr_virus_section2 = { - .dfy_cntl = 0x80000004, - .dfy_addr_hi = 0x000000b4, - .dfy_addr_lo = 0x540fef00, - .dfy_data = { - 0xc0031502, 0x00001e00, 0x00000001, 0x00000001, 0x00000001, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - }, - .dfy_size = 16 -}; - -static const PWR_DFY_Section pwr_virus_section3 = { - .dfy_cntl = 0x80000004, - .dfy_addr_hi = 0x000000b4, - .dfy_addr_lo = 0x540ff000, - .dfy_data = { - 0xc424000b, 0x80000145, 0x94800001, 0x94c00001, 0x95000001, 0x95400001, 0x95800001, 0xdc810000, - 0xdcc10000, 0xdd010000, 0xdd410000, 0xdd810000, 0xc4080061, 0xd8400013, 0xd8000003, 0xc40c0001, - 0x24ccffff, 0x3cd08000, 0x9500fffd, 0x1cd0ffcf, 0x7d018001, 0xc4140004, 0x050c0019, 0xd8400008, - 0x84c00000, 0x80000023, 0x80000067, 0x8000006a, 0x8000006d, 0x80000079, 0x80000084, 0x8000008f, - 0x80000099, 0x800000a0, 0x800000af, 0xd8400053, 0xc4080007, 0x388c0001, 0x08880002, 0x04100003, - 0x94c00005, 0x98800003, 0x04100004, 0x8000002d, 0x04100005, 0x8c00003f, 0x8c000043, 0x28cc0000, - 0xccc00050, 0x8c000055, 0x28080001, 0xcc000004, 0x7d808001, 0xd8400013, 0xd88130b8, 0xcd400008, - 0xdc180000, 0xdc140000, 0xdc100000, 0xdc0c0000, 0xcc800005, 0xdc080000, 0x80000168, 0xc40c000e, - 0x28cc0008, 0xccc00013, 0x90000000, 0xcd013278, 0xc4113278, 0x95000001, 0x24cc0700, 0xd8400029, - 0xc4113255, 0xcd01324f, 0xc4113254, 0x1d10ffdf, 0xcd013254, 0x10cc0014, 0x1d10c017, 0x7d0d000a, - 0xd8400013, 0xd8400008, 0xcd0130b7, 0x14cc0010, 0x90000000, 0xd9c00036, 0x8000005d, 0xd8400013, - 0xc00c4000, 0xccc130b5, 0xc40c000e, 0x28cc0008, 0xccc00013, 0xc40c0021, 0x14d00011, 0x9500fffe, - 0xdc030000, 0xd800000c, 0xd800000d, 0xc40c005e, 0x94c01b10, 0xd8400013, 0x90000000, 0xc00e0080, - 0xccc130b5, 0x8000013b, 0xc00e0800, 0xccc130b5, 0x8000013b, 0xd8400053, 0x04100006, 0x8c00003f, - 0x8c000043, 0x28cc0000, 0xccc00050, 0x8c000055, 0x280c0008, 0xccc00052, 0xd8000021, 0x28180039, - 0x80000034, 0xd8400053, 0x04100007, 0x8c00003f, 0x8c000043, 0x28cc0001, 0xccc00050, 0x8c000055, - 0x280c0010, 0xccc00052, 0x28180039, 0x80000034, 0xd8400053, 0x04100008, 0x8c00003f, 0x8c000043, - 0x28cc0003, 0xccc00050, 0x8c000055, 0x280c0020, 0xccc00052, 0x28180039, 0x80000034, 0xdc030000, - 0xd8000069, 0x28080001, 0xc428000d, 0x7ca88004, 0xcc800079, 0x04280001, 0xcc00006f, 0x8000013b, - 0x80000034, 0x04100010, 0x8c00003f, 0x8c000043, 0xccc00078, 0x8c000055, 0x28180080, 0x80000034, - 0x04100001, 0xc40c000e, 0x28cc0008, 0xccc00013, 0xcd013278, 0xc4113278, 0x95000001, 0xc00c4000, - 0xc4113254, 0x1d10c017, 0xd8400013, 0xd8400008, 0xccc130b5, 0xcd0130b7, 0x8000013b, 0x95c00001, - 0x96000001, 0x96400001, 0x96800001, 0x96c00001, 0x97000001, 0x97400001, 0x97800001, 0x97c00001, - 0xdc810000, 0xc40c000c, 0xcd4c0380, 0xcdcc0388, 0x55dc0020, 0xcdcc038c, 0xce0c0390, 0x56200020, - 0xce0c0394, 0xce4c0398, 0x56640020, 0xce4c039c, 0xce8c03a0, 0x56a80020, 0xce8c03a4, 0xcecc03a8, - 0x56ec0020, 0xcecc03ac, 0xcf0c03b0, 0x57300020, 0xcf0c03b4, 0xcf4c03b8, 0x57740020, 0xcf4c03bc, - 0xcf8c03c0, 0x57b80020, 0xcf8c03c4, 0xcfcc03c8, 0x57fc0020, 0xcfcc03cc, 0xd9000033, 0xc41c0009, - 0x25dc0010, 0x95c0fffe, 0xd8400013, 0xc41c000c, 0x05dc002f, 0xcdc12009, 0xc41d200a, 0xd8400013, - 0xcc012009, 0xd9000034, 0x25e01c00, 0x12200013, 0x25e40300, 0x12640008, 0x25e800c0, 0x12a80002, - 0x25ec003f, 0x7e25c00a, 0x7eae400a, 0x7de5c00a, 0xddc10000, 0xc02ee000, 0xcec1c200, 0xc40c005f, - 0xccc00037, 0x24d000ff, 0x31100006, 0x9500007b, 0x8c000190, 0xdc1c0000, 0xd8400013, 0xcdc1c200, - 0xc40c000c, 0xc4df0388, 0xc4d7038c, 0x51540020, 0x7d5dc01a, 0xc4e30390, 0xc4d70394, 0x51540020, - 0x7d62001a, 0xc4e70398, 0xc4d7039c, 0x51540020, 0x7d66401a, 0xc4eb03a0, 0xc4d703a4, 0x51540020, - 0x7d6a801a, 0xc4ef03a8, 0xc4d703ac, 0x51540020, 0x7d6ec01a, 0xc4f303b0, 0xc4d703b4, 0x51540020, - 0x7d73001a, 0xc4f703b8, 0xc4d703bc, 0x51540020, 0x7d77401a, 0xc4fb03c0, 0xc4d703c4, 0x51540020, - 0x7d7b801a, 0xc4ff03c8, 0xc4d703cc, 0x51540020, 0x7d7fc01a, 0xdc080000, 0xcc800013, 0xc4d70380, - 0xc4080001, 0x1c88001c, 0xcd400008, 0xc40c0083, 0x94c00010, 0xdc0e0000, 0x94c0000e, 0xc40c0082, - 0x24d00001, 0x9900000b, 0x18cc01e3, 0x3cd00004, 0x95000008, 0xc40c0085, 0x18cc006a, 0x98c00005, - 0xc40c0082, 0x18cc01e3, 0x3cd00004, 0x9900fffa, 0xdc180000, 0xdc140000, 0xdc100000, 0xdc0c0000, - 0xcc800004, 0xdc080000, 0x90000000, 0xc4080001, 0x1c88001c, 0xcd400008, 0xdc180000, 0xdc140000, - 0xdc100000, 0xdc0c0000, 0xcc800004, 0xdc080000, 0x90000000, 0xd8400051, 0xc428000c, 0x04180018, - 0x32640002, 0x9a80001f, 0x9a40001e, 0xcd800013, 0xc4293265, 0x040c0000, 0x1aac0027, 0x2aa80080, - 0xce813265, 0x9ac00017, 0xd80002f1, 0x04080002, 0x08880001, 0xd8080250, 0xd8080258, 0xd8080230, - 0xd8080238, 0xd8080240, 0xd8080248, 0xd8080268, 0xd8080270, 0xd8080278, 0xd8080280, 0xd8080228, - 0xd8000367, 0x9880fff3, 0x04080010, 0x08880001, 0xd80c0309, 0xd80c0319, 0x04cc0001, 0x9880fffc, - 0x7c408001, 0x88000000, 0xc00e0100, 0xd8400013, 0xd8400008, 0xccc130b5, 0x8000016e, 0xc4180032, - 0x29980008, 0xcd800013, 0x95800001, 0x7c40c001, 0x18d0003f, 0x24d4001f, 0x24d80001, 0x155c0001, - 0x05e80180, 0x9900000b, 0x202c003d, 0xcd800010, 0xcec1325b, 0xc42d325b, 0x96c00001, 0x86800000, - 0x80000168, 0x80000aa7, 0x80000bfc, 0x800012e9, 0xc4200007, 0x0a200001, 0xce000010, 0x80001b70, - 0x7c40c001, 0x8c000190, 0xc410001b, 0xd8000032, 0xd8000031, 0x9900091a, 0x7c408001, 0x88000000, - 0x24d000ff, 0x05280196, 0x18d4fe04, 0x29540008, 0xcd400013, 0x86800000, 0x800001b4, 0x8000032b, - 0x80000350, 0x80000352, 0x8000035f, 0x80000701, 0x8000047c, 0x8000019f, 0x80000800, 0xc419325b, - 0x1d98001f, 0xcd81325b, 0x8c00003f, 0xc4140004, 0xd8400008, 0x04100002, 0x8c000043, 0x28cc0002, - 0xccc00050, 0xc43c0044, 0x27fc0003, 0x9bc00002, 0x97c00006, 0xc00c4000, 0xccc130b5, 0x8c000055, - 0xd8400013, 0xd88130b8, 0xcd400008, 0x90000000, 0xd8400008, 0xcd400013, 0x7d40c001, 0xd8400028, - 0xd8400029, 0xd9400036, 0xc4193256, 0xc41d3254, 0x15540008, 0xcd400009, 0xcd40005b, 0xcd40005e, - 0xcd40005d, 0xd840006d, 0xc421325a, 0xc42d3249, 0x11540015, 0x19a4003c, 0x1998003f, 0x1af0007d, - 0x11dc000b, 0x1264001f, 0x15dc000d, 0x7d65400a, 0x13300018, 0x1a38003f, 0x7dd5c00a, 0x7df1c00a, - 0xcd800045, 0xcdc00100, 0xc411326a, 0xc415326b, 0xc419326c, 0xc41d326d, 0xc425326e, 0xc4293279, - 0xce800077, 0xcd000056, 0xcd400057, 0xcd800058, 0xcdc00059, 0xc4193265, 0x259c8000, 0x99c00004, - 0xce40005a, 0x29988000, 0xcd813265, 0xc4113248, 0x2510000f, 0xcd000073, 0xc418000d, 0xc411326f, - 0x17300019, 0x97000009, 0x25140fff, 0x95400007, 0xd800003a, 0x8c001b6d, 0xc4153279, 0xcd400077, - 0xcd00005f, 0xd8000075, 0x26f00001, 0x15100010, 0x7d190004, 0xcd000035, 0x97000035, 0x1af07fe8, - 0xd8800013, 0xd8400010, 0xd8400008, 0xcf00000d, 0xcf00000a, 0x8c001427, 0x04340022, 0x07740001, - 0x04300010, 0xdf430000, 0x7c434001, 0x7c408001, 0xd4412e01, 0x0434001e, 0xdf430000, 0xd4400078, - 0xdf030000, 0xd4412e40, 0xd8400013, 0xcc41c030, 0xcc41c031, 0xc43dc031, 0xccc00013, 0x04343000, - 0xc4113246, 0xc41d3245, 0xcf413267, 0x51100020, 0x7dd1c01a, 0xc4353267, 0x45dc0160, 0xc810001f, - 0x1b4c0057, 0x1b700213, 0x1b740199, 0x7f4f400a, 0x7f73400a, 0x55180020, 0x2198003f, 0xd1c00025, - 0xcf400024, 0xcd000026, 0xcd800026, 0xd8400027, 0x9bc00001, 0x248dfffe, 0xd8800013, 0xccc12e00, - 0x7c434001, 0x7c434001, 0x8c00142b, 0xc43c000e, 0x1af4007d, 0x2bfc0008, 0x33740003, 0x26d80001, - 0xcfc00013, 0x1ae8003e, 0x9680000c, 0xc4253277, 0x26680001, 0x96800009, 0x2a640002, 0xce413277, - 0xd8400013, 0xc4253348, 0xce413348, 0xc4253348, 0x96400001, 0xcfc00013, 0x9b400003, 0x958000d8, - 0x80000315, 0xc4253277, 0x04303000, 0x26680001, 0xcf013267, 0xc4193246, 0xc41d3245, 0xc4313267, - 0x96800041, 0x51980020, 0x1b342010, 0x7d9d801a, 0x1714000c, 0x25540800, 0x1b30c012, 0x459801b0, - 0x7d77400a, 0x7f37000a, 0x2b300000, 0xcf00001c, 0xd180001e, 0xd8400021, 0x04240010, 0x199c01e2, - 0x7e5e4002, 0x3e5c0004, 0x3e540002, 0xc428000f, 0x9a80ffff, 0x95c00006, 0xc80c0011, 0xc8140011, - 0x54d00020, 0x55580020, 0x80000282, 0x95400015, 0xc80c0011, 0x0a640002, 0x041c0001, 0x45980008, - 0x54d00020, 0x96400004, 0xc8140011, 0x45980004, 0x041c0000, 0xcf00001c, 0xd180001e, 0xd8400021, - 0xc428000f, 0x9a80ffff, 0x99c00003, 0xc8180011, 0x80000282, 0xc8140011, 0x55580020, 0x80000282, - 0x45980004, 0xc80c0011, 0xcf00001c, 0xd180001e, 0xd8400021, 0xc428000f, 0x9a80ffff, 0xc8100011, - 0xc8140011, 0x55580020, 0xd8400013, 0xccc1334e, 0xcd01334f, 0xcd413350, 0xcd813351, 0xd881334d, - 0xcfc00013, 0xc4193273, 0xc41d3275, 0xc40d3271, 0xc4113270, 0xc4153274, 0x50cc0020, 0x7cd0c01a, - 0x7cdcc011, 0x05900008, 0xcd00006a, 0xcdc0006b, 0xc41d3272, 0x7d594002, 0x54d00020, 0xd8800013, - 0xccc12e23, 0xcd012e24, 0xcdc12e25, 0xcfc00013, 0xc4193246, 0xc41d3245, 0xc4313267, 0x15540002, - 0x51980020, 0x7d9d801a, 0xc81c001f, 0x1b340057, 0x1b280213, 0x1b300199, 0x45980198, 0x7f37000a, - 0x7f2b000a, 0x55e40020, 0xcf000024, 0xd1800025, 0xcdc00026, 0xce400026, 0xd8400027, 0xcd40000d, - 0xcd40000a, 0xc40d3249, 0x20cc003c, 0xccc13249, 0xc4113274, 0xdd430000, 0xc01e0001, 0x29dc0002, - 0x04280000, 0xd8000036, 0xcc400078, 0xcc400078, 0x2d540002, 0x95400022, 0x078c0000, 0x07d40000, - 0x8c00120d, 0x8c001239, 0x8c001232, 0x04f80000, 0x057c0000, 0xcdc00013, 0xc414000d, 0xc41c0019, - 0x7dd5c005, 0x25dc0001, 0xd840007c, 0xd8400074, 0xd8400069, 0xc40c005e, 0x94c018a6, 0xd4412e22, - 0xd800007c, 0xc40c005e, 0x94c018a2, 0x95c00007, 0xc40c0019, 0x7cd4c005, 0x24cc0001, 0x94c00008, - 0x9680fffc, 0x800002e3, 0xc40c0057, 0x7cd0c002, 0x94c00003, 0x9680fffd, 0x800002e3, 0xd8000069, - 0xcfc00013, 0xcd013273, 0xcd013275, 0xd8000074, 0xc414005e, 0x9540188f, 0xcfc00013, 0xc40d3249, - 0xc013cfff, 0x7cd0c009, 0xccc13249, 0x9680000b, 0xc40c0077, 0x38d00001, 0x99000006, 0x04cc0002, - 0xdcc30000, 0xc40c005e, 0x94c01882, 0xd4400078, 0xd800000d, 0x80000304, 0x7c41c001, 0x7c41c001, - 0xd840002f, 0xc41c0015, 0x95c0ffff, 0xd8400030, 0xc41c0016, 0x95c0ffff, 0xd8000030, 0xc41c0016, - 0x99c0ffff, 0xd800002f, 0xc41c0015, 0x99c0ffff, 0xc81c001f, 0x49980198, 0x55e40020, 0x459801a0, - 0xcf000024, 0xd1800025, 0xcdc00026, 0xce400026, 0xd8400027, 0x04302000, 0xcfc00013, 0xcf013267, - 0xc4313267, 0x96800004, 0x97000001, 0xd8000036, 0x80000329, 0xd8800013, 0xcc812e00, 0x04302000, - 0xcfc00013, 0xcf013267, 0xc4313267, 0x97000001, 0xc4193256, 0xc42d3249, 0x16ec001f, 0xd8000028, - 0xd800002b, 0x1998003e, 0xcec00031, 0xd8000036, 0xd8000010, 0x97800004, 0xd8400010, 0xce00000a, - 0x1a18003e, 0xcd800008, 0x90000000, 0xc4380004, 0xd8400008, 0xd8400013, 0xd88130b8, 0x04100000, - 0x7d43c001, 0xcd400013, 0xc4093249, 0x1888003e, 0x94800015, 0xd8400074, 0x8c000671, 0xcd400013, - 0x9a400006, 0xc419324c, 0x259c0001, 0x1598001f, 0x95c0000d, 0x9580000c, 0x99000003, 0xd8400036, - 0x04100001, 0xc40c0021, 0x14d80011, 0x24dc00ff, 0x31e00002, 0x31dc0003, 0x9580fff0, 0x9a000003, - 0x99c00002, 0xd9c00036, 0x94800004, 0xd8000074, 0xc418005e, 0x95801827, 0xcf800008, 0x90000000, - 0xd8800036, 0x90000000, 0xd8c00036, 0xc424000b, 0x32640002, 0x9a400004, 0xc4180014, 0x9580ffff, - 0xd840002f, 0xc40c0021, 0x14dc0011, 0x95c0fffe, 0xccc00037, 0x8c000190, 0x90000000, 0xd8400008, - 0xd800006d, 0xc41d3246, 0xc4193245, 0x51dc0020, 0x7d9d801a, 0xd8400028, 0xd8400029, 0xc420000b, - 0x32200002, 0x9a0000ad, 0x04200032, 0xd9000010, 0xde030000, 0xd8400033, 0x04080000, 0xc43c0009, - 0x27fc0002, 0x97c0fffe, 0xc42c0015, 0x96c0ffff, 0xd800002e, 0xc42d3249, 0x1af4003e, 0x9740004d, - 0xc428000d, 0xc4080060, 0x7ca88005, 0x24880001, 0x7f4b4009, 0x97400046, 0xc4313274, 0xc4100057, - 0x7d33400c, 0x97400009, 0x28240100, 0x7e6a4004, 0xce400079, 0x1eecffdd, 0xcec13249, 0xcf013273, - 0xcf013275, 0x800003c3, 0xc429326f, 0x1aa80030, 0x96800006, 0x28240001, 0xc428000d, 0x06a80008, - 0x7e6a8004, 0xce800035, 0xc41d3272, 0x25cc0001, 0x10cc0004, 0x19e80042, 0x25dc0006, 0x11dc0001, - 0x7e8e800a, 0x7de9c00a, 0xc40d3271, 0xc4293270, 0x50cc0020, 0x7ce8c01a, 0x7cd30011, 0x11e80007, - 0x2aa80000, 0xce80001c, 0xd300001e, 0xd8400021, 0xc428000f, 0x9a80ffff, 0xc4300011, 0x1b30003f, - 0x33300000, 0xc4240059, 0x1660001f, 0x7e320009, 0xc0328000, 0x7e72400a, 0x0430000c, 0x9a000002, - 0x04300008, 0xc02ac000, 0x7d310002, 0x17300002, 0x2aa87600, 0x7cd0c011, 0xcdc00024, 0xd0c00025, - 0xce800026, 0x04280222, 0xce800026, 0x96000002, 0xce400026, 0xd8400027, 0xc4280058, 0x22ec003d, - 0xcec13249, 0xcd013273, 0xce813275, 0xd800007b, 0xc8380018, 0x57b00020, 0x04343108, 0xc429325d, - 0x040c3000, 0x13740008, 0x2374007e, 0x32a80003, 0xccc13267, 0xc40d3267, 0x18ec0057, 0x18e40213, - 0x18cc0199, 0x7cecc00a, 0x7ce4c00a, 0x94800003, 0xd4400078, 0x800003e7, 0x04200022, 0xde030000, - 0xccc00024, 0xd1800025, 0xcf400026, 0xd4400026, 0xd8400027, 0x04200010, 0xde030000, 0xccc00024, - 0x45980104, 0xd1800025, 0xd4400026, 0xcf800026, 0xcf000026, 0xd8400027, 0x49980104, 0x9a80000a, - 0xc81c001f, 0x45980168, 0x55e00020, 0xccc00024, 0xd1800025, 0xcdc00026, 0xce000026, 0xd8400027, - 0x800003f2, 0x8c000448, 0xcd400013, 0x040c2000, 0xccc13267, 0xc40d3267, 0x94c00001, 0xc40d3249, - 0x18cc003e, 0xd8400030, 0xc42c0016, 0x96c0ffff, 0xd8000030, 0xc42c0016, 0x9ac0ffff, 0xd800002f, - 0xc42c0015, 0x9ac0ffff, 0xd8400034, 0xc4300025, 0xc4340024, 0xc4380081, 0xcf813279, 0xcf41326e, - 0xcf01326d, 0x94c0000d, 0x254c0700, 0xc424001e, 0x10cc0010, 0x1a641fe8, 0x28cc0726, 0x2a640200, - 0xd8400013, 0xccc1237b, 0x2264003f, 0xcd400013, 0xd8813260, 0xce41325b, 0xc4240033, 0xc4280034, - 0xd9000036, 0xd8000010, 0x8c001427, 0x96400006, 0xde430000, 0xce40000c, 0xc40c005e, 0x94c01755, - 0xd4400078, 0x9680000a, 0xce80000a, 0x06a80002, 0xd8400010, 0xde830000, 0xce80000d, 0xc40c005e, - 0x94c0174c, 0xd4400078, 0xd8000010, 0x8c00142b, 0xc4393265, 0x2bb80040, 0xd8400032, 0xcf813265, - 0xc4200012, 0x9a00ffff, 0xc4100044, 0x19180024, 0xc8100072, 0x551c003f, 0x99c00003, 0x95800010, - 0x8000043d, 0xc00c8000, 0xd840006c, 0x28200000, 0x8000043f, 0xc00c4000, 0x282000f0, 0xcd400013, - 0xd8400008, 0xc4113255, 0xcd01324f, 0xd8400013, 0xd88130b8, 0xccc130b5, 0xce000053, 0x90000000, - 0x195c00e8, 0xc4100004, 0x2555fff0, 0xc0360001, 0x042c0000, 0x29540001, 0xd8400008, 0x04240000, - 0x04280004, 0xc420000b, 0x32200002, 0x9a000009, 0xcd400013, 0xcec1c200, 0xc5e124dc, 0x0aa80001, - 0x7ef6c001, 0x7e624001, 0x96000001, 0x9a80fff9, 0xc02ee000, 0xcd400013, 0x2555fff0, 0xcec1c200, - 0x29540008, 0xc81c001f, 0xcd400013, 0x55e00020, 0xc42d3255, 0xc4353259, 0xd8013260, 0x45980158, - 0xccc00024, 0xd1800025, 0xcdc00026, 0xce000026, 0xd8400027, 0x49980158, 0x45980170, 0xc4200012, - 0x16200010, 0x9a00fffe, 0xccc00024, 0xd1800025, 0xc429324f, 0xce400026, 0xce800026, 0xcec00026, - 0xcf400026, 0xd8400027, 0xcd000008, 0x90000000, 0xc40d325b, 0x7d43c001, 0x195400e8, 0x1154000a, - 0x18dc00e8, 0x05e80488, 0x18d0006c, 0x18f807f0, 0x18e40077, 0x18ec0199, 0x7e6e400a, 0x86800000, - 0x8000048e, 0x80000494, 0x800004de, 0x80000685, 0x80000686, 0x800006ac, 0x1ccc001f, 0xccc1325b, - 0xc411325d, 0x251001ef, 0xcd01325d, 0x90000000, 0xc4293254, 0x1264000a, 0xc4300004, 0x7d79400a, - 0x7e7a400a, 0x52a8001e, 0x15180001, 0x7d69401a, 0x202c007d, 0xcec1325b, 0x95000008, 0x95800028, - 0xc42d3267, 0xc4193246, 0xc41d3245, 0x1aec0028, 0xc40d325c, 0x800004cc, 0xc42d3256, 0xc419324e, - 0x26e8003f, 0x1aec003e, 0x12f4000e, 0xc41d324d, 0xc40d324f, 0x7d75401a, 0x04100002, 0x7d290004, - 0x7f8f4001, 0x7f52800f, 0x51980020, 0x7d9d801a, 0x50e00002, 0x51980008, 0x9a800002, 0x800004d1, - 0x7d0dc002, 0x6665fc00, 0x7e5e401a, 0xcec00008, 0x7da1c011, 0xd140000b, 0xd1c00002, 0x2a644000, - 0xce400002, 0x7f534002, 0x6665fc00, 0x7e76401a, 0xd1800002, 0xce400002, 0x800004d7, 0xc42d325a, - 0xc4193258, 0x1aec003e, 0xc41d3257, 0xc4213259, 0x12f4000e, 0x7d75401a, 0x51980020, 0x52200002, - 0x7d9d801a, 0xcec00008, 0x7da1c011, 0xd140000b, 0xd1c00002, 0x2a644000, 0xce400002, 0x202c003d, - 0xcf000008, 0xcfc00013, 0xcec1325b, 0xc42d325b, 0x96c00001, 0x90000000, 0xc4193260, 0x259c0007, - 0x15980004, 0x05e804e3, 0x86800000, 0x800004e7, 0x800004f0, 0x80000505, 0x8000016a, 0xc4380004, - 0xcfc00013, 0xd8400008, 0xc435325d, 0xd801325b, 0x277401ef, 0xcf41325d, 0xcf800008, 0x90000000, - 0xc4380004, 0xd8400008, 0x8c000671, 0x9640fff4, 0x17e00008, 0xc418000d, 0xce000009, 0xd84131db, - 0xcf800008, 0xcd800009, 0xc430001e, 0xcfc00013, 0xc42d325b, 0x1b301ff8, 0x2b300400, 0x2330003f, - 0x26edf000, 0x7ef2c00a, 0xd8413260, 0xcec1325b, 0x90000000, 0x05a80507, 0x86800000, 0x8000050c, - 0x80000528, 0x8000057d, 0x800005c2, 0x800005f3, 0xc4380004, 0xd8400008, 0x8c000671, 0xcfc00013, - 0x9a400012, 0x1bd400e8, 0xc42c004a, 0xcd40005e, 0xc41c004d, 0xcec0005e, 0x99c0000c, 0xc4100019, - 0x7d150005, 0x25100001, 0x99000008, 0x8c00063b, 0xcfc00013, 0xc4113277, 0x2511fffd, 0xcd013277, - 0xd801326f, 0x80000624, 0x04240012, 0x1be00fe4, 0xce413260, 0xce000066, 0xcf800008, 0x90000000, - 0xd8400068, 0xc4380004, 0xd8400008, 0x8c000671, 0xcfc00013, 0x9a400013, 0x1bd400e8, 0xc42c004a, - 0xcd40005e, 0xc41c004d, 0xcec0005e, 0x99c0000d, 0xc4100019, 0x7d150005, 0x25100001, 0x99000009, - 0xd8400067, 0x8c00063b, 0xcfc00013, 0xc4113277, 0x2511fffd, 0xcd013277, 0xd801326f, 0x80000624, - 0x1bd400e8, 0xc42c0060, 0x7ed6c005, 0x26ec0001, 0xc4113271, 0xc4153270, 0xc4193272, 0xc41d3273, - 0x04280022, 0x51100020, 0x7d51401a, 0xc4113274, 0xc4213275, 0xc4253276, 0xc4313248, 0xd1400061, - 0x2730000f, 0x13300010, 0x7db1800a, 0xcd800060, 0x96c00002, 0x05dc0008, 0xcdc00062, 0x042c3000, - 0xcd000063, 0xce000064, 0xce400065, 0xcec13267, 0xc42d3246, 0xc4313245, 0xc4353267, 0xce813260, - 0x52ec0020, 0x7ef2c01a, 0xc820001f, 0x1b700057, 0x1b680213, 0x1b740199, 0x46ec0188, 0x7f73400a, - 0x7f6b400a, 0x56240020, 0xcf400024, 0xd2c00025, 0xce000026, 0xce400026, 0x042c2000, 0xd8400027, - 0xc418000d, 0x17e00008, 0xce000009, 0xcec13267, 0xc42d3267, 0x26e01000, 0x9a00fffe, 0xd8400013, - 0xd9c131fc, 0xcd800009, 0xcf800008, 0x96c00001, 0x90000000, 0xc4380004, 0xd8400008, 0xc4113277, - 0xc41c000b, 0xc420000c, 0x11dc0002, 0x7de1c001, 0x11dc0008, 0x29dc0001, 0x25140001, 0x191807e4, - 0x192007ec, 0x95400004, 0xd8400013, 0xcdc1334a, 0xcfc00013, 0x9580000e, 0x09980001, 0x041c0001, - 0x95800005, 0x09980001, 0x51dc0001, 0x69dc0001, 0x9980fffd, 0x7de20014, 0x561c0020, 0xd8400013, - 0xce013344, 0xcdc13345, 0xcfc00013, 0x95400022, 0x042c3000, 0xcec13267, 0xc42d3246, 0xc4313245, - 0xc4353267, 0xd8400013, 0xc425334d, 0x26640001, 0x9640fffe, 0xc419334e, 0xc41d334f, 0xc4213350, - 0xc4253351, 0x52ec0020, 0x1b680057, 0x7ef2c01a, 0x1b700213, 0x1b740199, 0x46ec01b0, 0x7f6b400a, - 0x7f73400a, 0xcfc00013, 0xcf400024, 0xd2c00025, 0xcd800026, 0xcdc00026, 0xce000026, 0xce400026, - 0x042c2000, 0xd8400027, 0xcec13267, 0xc42d3267, 0x96c00001, 0x04280032, 0xce813260, 0xd8800068, - 0xcf800008, 0x90000000, 0xc4380004, 0xd8400008, 0x2010007d, 0xcd01325b, 0xc411325b, 0x1910003e, - 0x9500fffe, 0x04100040, 0xcd00001b, 0xd8400021, 0xc410000f, 0x9900ffff, 0x04100060, 0xcd00001b, - 0xd8400021, 0xc410000f, 0x9900ffff, 0xcfc00013, 0x2010003d, 0xcd01325b, 0xc4113277, 0x25140001, - 0x191807e4, 0x9540000b, 0x2511fffd, 0xcd013277, 0xc41c000b, 0xc420000c, 0x11dc0002, 0x7de1c001, - 0x11dc0008, 0xd8400013, 0xcdc1334a, 0xcfc00013, 0x95800005, 0xd8400013, 0xd8013344, 0xd8013345, - 0xcfc00013, 0xc4180050, 0xc41c0052, 0x04280042, 0xcd813273, 0xcdc13275, 0xce813260, 0xd9000068, - 0xd8400067, 0xcf800008, 0x90000000, 0x07d40000, 0x8c00120d, 0x8c00124f, 0x8c001232, 0x057c0000, - 0x042c3000, 0xc4380004, 0xcfc00013, 0xd8400008, 0xcec13267, 0xc42d3246, 0xc4313245, 0xc4353267, - 0x52ec0020, 0x7ef2c01a, 0x1b680057, 0x1b700213, 0x1b740199, 0xc820001f, 0x46ec0190, 0x7f6b400a, - 0x7f73400a, 0x56240020, 0xcf400024, 0xd2c00025, 0xce000026, 0xce400026, 0x042c2000, 0xd8400027, - 0xcfc00013, 0xcec13267, 0xc4153249, 0x2154003d, 0xc41c0019, 0x1bd800e8, 0x7dd9c005, 0x25dc0001, - 0xc42c004a, 0xcd80005e, 0xc420004d, 0xcec0005e, 0x11dc0010, 0x7e1e000a, 0xcd413249, 0xce01326f, - 0x28340001, 0x05980008, 0x7f598004, 0xcd800035, 0x1be800e8, 0xc42c004a, 0xce80005e, 0xd801327a, - 0xd800005f, 0xd8000075, 0xd800007f, 0xc424004c, 0xce41326e, 0xcec0005e, 0x28240100, 0x7e6a4004, - 0xce400079, 0xc435325d, 0x277401ef, 0x04240020, 0xce41325e, 0xd801325b, 0xd8013260, 0xcf41325d, - 0xda000068, 0xcf800008, 0x90000000, 0xc4113277, 0xc41c000b, 0xc420000c, 0x11dc0002, 0x7de1c001, - 0x11dc0008, 0x29dc0001, 0x25140001, 0x9540002d, 0xd8400013, 0xcdc1334a, 0xcfc00013, 0x042c3000, - 0xcec13267, 0xc42d3246, 0xc4313245, 0xc4353267, 0xd8400013, 0xc425334d, 0x26640001, 0x9640fffe, - 0xc419334e, 0xc41d334f, 0xc4213350, 0xc4253351, 0x52ec0020, 0x1b680057, 0x7ef2c01a, 0x1b700213, - 0x1b740199, 0x46ec01b0, 0x7f6b400a, 0x7f73400a, 0xcfc00013, 0xcf400024, 0xd2c00025, 0xcd800026, - 0xcdc00026, 0xce000026, 0xce400026, 0x042c2000, 0xd8400027, 0xcec13267, 0xc42d3267, 0x96c00001, - 0xc41c000b, 0xc420000c, 0x11dc0002, 0x7de1c001, 0x11dc0008, 0xd8400013, 0xcdc1334a, 0xcfc00013, - 0x90000000, 0xc430000b, 0x33300002, 0x04240000, 0x9b000010, 0x1be000e8, 0x042c0000, 0xc0360001, - 0x04280004, 0xd8400013, 0xcec1c200, 0xc63124dc, 0x0aa80001, 0x7ef6c001, 0x7e724001, 0x97000001, - 0x9a80fff9, 0xc02ee000, 0xd8400013, 0xcec1c200, 0x90000000, 0x90000000, 0xc4253260, 0x7fc14001, - 0xc40d3249, 0x18cc003e, 0x98c00005, 0x194c1c03, 0xccc0003b, 0xc40c002d, 0x80000697, 0xc420004a, - 0x194c00e8, 0xccc0005e, 0xc40c004c, 0xc431326d, 0x27301fff, 0xce00005e, 0x7cf0c00d, 0x98c00003, - 0x8c0007e0, 0x95c00008, 0xc430001e, 0x1b301ff8, 0x2b300400, 0x2330003f, 0xcd400013, 0xcf01325b, - 0x90000000, 0xcd400013, 0xd801325b, 0xc411325d, 0x251001ef, 0xcd01325d, 0x25100007, 0x31100005, - 0x9900008e, 0xc40c0007, 0xd9000010, 0x8000075e, 0x202c007d, 0xcec1325b, 0xc4293265, 0xc4353254, - 0x26a9feff, 0xc4380004, 0xd8400008, 0x1374000b, 0xc40c000d, 0xd8000009, 0x1774000d, 0xd8400013, - 0xc41d30b8, 0xcfc00013, 0x95c00008, 0xc411325d, 0xd801325b, 0xccc00009, 0xcf800008, 0x251001ef, - 0xcd01325d, 0x90000000, 0xce813265, 0xcf400100, 0xc00ac006, 0xc00e0000, 0x28880700, 0x28cc0014, - 0x8c0006de, 0x14cc0010, 0x30d4000f, 0x04cc0001, 0x10cc0010, 0x28cc0014, 0x99400009, 0xd8400013, - 0xc41530b8, 0xcfc00013, 0xc4193265, 0x19980028, 0x99400003, 0x99800002, 0x800006c8, 0xcfc00013, - 0xc411325d, 0xd801325b, 0xcf800008, 0x251001ef, 0xcd01325d, 0x90000000, 0x15600008, 0xce000009, - 0xc8380023, 0xc4180081, 0x11a00002, 0x7fa38011, 0xc4100026, 0x05980008, 0x7d1a0002, 0x282c2002, - 0x3e280008, 0xcec00013, 0xc4300027, 0x042c0008, 0xd3800025, 0xcf000024, 0x202400d0, 0x7ca48001, - 0xcc800026, 0xccc00026, 0x28240006, 0xcc000026, 0x0a640001, 0x9a40fffe, 0x9a800004, 0x32280000, - 0x9a800002, 0x9a000000, 0xd8400027, 0x24d8003f, 0xd840003c, 0xcec0003a, 0xd8800013, 0xcd81a2a4, - 0x90000000, 0xc41d325d, 0x25dc0007, 0xc40d3249, 0x18cc003e, 0x94c0000a, 0xc420004a, 0x194c00e8, - 0xccc0005e, 0xc40c004c, 0xc431326d, 0x27301fff, 0xce00005e, 0x7cf0c00d, 0x80000712, 0x194c1c03, - 0xccc0003b, 0xc40c002d, 0x05e80714, 0x86800000, 0x8000071c, 0x80000720, 0x80000747, 0x8000071d, - 0x800007c4, 0x80000732, 0x80000745, 0x80000744, 0x90000000, 0x98c00006, 0x8000072e, 0x90000000, - 0x98c00003, 0x8c0007e0, 0x95c0000c, 0xcd400013, 0xc4253265, 0x2a64008c, 0xce413265, 0xc430001e, - 0x1b301fe8, 0x2b300400, 0x2330003f, 0xd8013260, 0xcf01325b, 0x90000000, 0xc40c0007, 0xd9000010, - 0x04240000, 0x8000075e, 0x98c0fff1, 0x8c0007e0, 0x95c00002, 0x80000723, 0xcd400013, 0xc41f02f1, - 0x95c00004, 0xd8013247, 0xd801325d, 0x80000743, 0xd8813247, 0xd801325d, 0xc4100004, 0xd8400008, - 0xd8400013, 0xd88130b8, 0xcd000008, 0x90000000, 0x04100001, 0x98c0ffde, 0x8000072e, 0x98c00003, - 0x8c0007e0, 0x95c00012, 0xc4340004, 0xd8400008, 0x15600008, 0xc418000d, 0xce000009, 0xd8400013, - 0xd84131db, 0xcf400008, 0xcd800009, 0xc430001e, 0x1b301ff8, 0x2b300400, 0x2330003f, 0xcd400013, - 0xd8413260, 0xcf01325b, 0x90000000, 0xc40c0007, 0xd9000010, 0x04240000, 0xcd400013, 0x041c3000, - 0xcdc13267, 0xc41d3267, 0xc41d3265, 0x25dc8000, 0x95c00007, 0xc41c004a, 0x195800e8, 0xcd80005e, - 0xc418004c, 0xcd81326e, 0xcdc0005e, 0xc41d3265, 0x25dd7fff, 0xcdc13265, 0xc41d3246, 0xc4193245, - 0xc42d3267, 0x51e00020, 0x7e1a001a, 0x46200200, 0x04283247, 0x04300033, 0x1af80057, 0x1af40213, - 0x042c000c, 0x7f7b400a, 0x7f6f400a, 0xcf400024, 0xd2000025, 0xcd800026, 0xcdc00026, 0xc6990000, - 0x329c325d, 0x99c00008, 0x329c3269, 0x99c00006, 0x329c3267, 0x95c00005, 0xc01defff, 0x7d9d8009, - 0x8000078a, 0x25980000, 0x0b300001, 0x06a80001, 0xcd800026, 0x9b00fff2, 0xd8400027, 0xc43c0012, - 0x9bc0ffff, 0xcd400013, 0xd801325b, 0xc431325a, 0xc03e7ff0, 0x7f3f0009, 0xcf01325a, 0xc4313249, - 0x1f30001f, 0xcf013249, 0xc03e4000, 0xcfc13254, 0xcd400013, 0xd8013254, 0xc431325d, 0xd801324f, - 0xd8013255, 0xd8013247, 0xd801325d, 0x1b300028, 0x8c00120d, 0x8c001219, 0x8c001232, 0xc4380004, - 0xd8400008, 0xd8400013, 0x9900000d, 0xd88130b8, 0x9700000b, 0xc43d30b5, 0x1bf0003a, 0x9b000b80, - 0x203c003a, 0xc430000e, 0x27300700, 0x13300014, 0x2b300001, 0xcf0130b7, 0xcfc130b5, 0x46200008, - 0xcf400024, 0xd2000025, 0xd8000026, 0xd8400027, 0x043c2000, 0xcd400013, 0xcfc13267, 0xc43d3267, - 0x9bc00001, 0xccc00010, 0xcf800008, 0x90000000, 0xc4080007, 0xd9000010, 0xc4193260, 0x259c0003, - 0x31dc0003, 0x95c00014, 0x040c3000, 0xd8400008, 0xccc13267, 0xc40d3267, 0x18ec0057, 0x18e40213, - 0x18cc0199, 0x7cecc00a, 0x7ce4c00a, 0xc4193246, 0xc41d3245, 0x51980020, 0x7d9d801a, 0x8c000448, - 0xcd400013, 0x040c2000, 0xccc13267, 0xc40d3267, 0x94c00001, 0xcc800010, 0xd801325d, 0x90000000, - 0xc418000b, 0x31980002, 0x041c0000, 0x9980001c, 0x19580066, 0x15600008, 0x040c0000, 0xc0120001, - 0x11980003, 0x04240004, 0x7da18001, 0xc4200007, 0xc4340004, 0xd9000010, 0xd8400008, 0xd8400013, - 0xccc1c200, 0xc41d24db, 0x7cd0c001, 0x0a640001, 0x7dd9c005, 0x25dc0001, 0x99c00002, 0x9a40fff8, - 0xc418005e, 0x9580137b, 0xc00ee000, 0xd8400013, 0xccc1c200, 0xce000010, 0xcf400008, 0x90000000, - 0xd840004f, 0xc4113269, 0x19080070, 0x190c00e8, 0x2510003f, 0x2518000f, 0xcd813268, 0x05a80809, - 0x86800000, 0x8000080e, 0x8000080f, 0x80000898, 0x80000946, 0x800009e1, 0x80000a5a, 0x04a80811, - 0x86800000, 0x80000815, 0x80000834, 0x8000085e, 0x8000085e, 0x04341001, 0xcf400013, 0xc4380004, - 0xd8400008, 0xc42d3045, 0xcec1c091, 0x31300021, 0x9700000b, 0xd84002f1, 0xd8400013, 0xc43130b8, - 0x27300001, 0xc4293059, 0x56a8001f, 0x7f2b000a, 0xcf800008, 0x9b000241, 0x8000084a, 0xcf400013, - 0xd8400008, 0xc43130b6, 0x9b000003, 0xc02f0001, 0xcec130b6, 0xc4252087, 0x5668001a, 0x26a80005, - 0x9a80fffd, 0xcf400013, 0xd80130b6, 0x8000084a, 0xc4380004, 0xd8400008, 0x04341001, 0xcf400013, - 0xc431ecaa, 0x27300080, 0x9b000010, 0xc02e0001, 0xcec130b6, 0xcf400013, 0xd80130b6, 0x31300021, - 0x9700000a, 0xd84002f1, 0xd8400013, 0xc43130b8, 0x27300001, 0xc4293059, 0x56a8001f, 0x7f2b000a, - 0xcf800008, 0x9b00021d, 0xdd410000, 0x040c0005, 0xd84802e9, 0x8c001a41, 0xc43b02f1, 0x9b800006, - 0xc4380004, 0xd8400008, 0xd8400013, 0xd88130b8, 0xcf800008, 0xcec80278, 0x56f00020, 0xcf080280, - 0x8c001608, 0xdc140000, 0xcd400013, 0xd8813247, 0xd80802e9, 0x8000085e, 0xcd400013, 0x31100011, - 0x950001fa, 0xc02e0001, 0x2aec0008, 0xc01c0020, 0xc0180001, 0xc00c0007, 0x11a40006, 0x7de6000a, - 0x10e40008, 0x7e26000a, 0x7e2e000a, 0xce000013, 0xc4113254, 0x1d10ffdf, 0x2110003e, 0xcd013254, - 0xd801324f, 0xd8013255, 0x1d10ff9e, 0xcd013254, 0xd8013247, 0xd801325d, 0xd801325e, 0xc0245301, - 0xce413249, 0xd801325f, 0xc425326c, 0xc0121fff, 0x29108eff, 0x7e524009, 0xce41326c, 0xc425325a, - 0xc0127ff0, 0x7e524009, 0xce41325a, 0xc425325b, 0xc0131fff, 0x7e524009, 0xce41325b, 0xd801326d, - 0xd801326e, 0xd8013279, 0x94c00003, 0x08cc0001, 0x80000866, 0xc00c0007, 0x95800003, 0x09980001, - 0x80000866, 0xc0100010, 0x7dd2400c, 0x9a400004, 0xc0180003, 0x7dd1c002, 0x80000866, 0x80000a5a, - 0x04a8089a, 0x86800000, 0x8000089e, 0x800008fa, 0x80000945, 0x80000945, 0x31300022, 0x97000007, - 0xc4380004, 0xd8400008, 0xd8400013, 0xc43130b8, 0x27300001, 0xcf800008, 0xcd400013, 0x04183000, - 0xcd813267, 0xc4113246, 0xc4193245, 0x51100020, 0x7d91801a, 0x459801e0, 0xc4313267, 0x2738000f, - 0x1b342010, 0x172c000c, 0x26ec0800, 0x1b30c012, 0x7ef7400a, 0x7f37000a, 0x2b300000, 0xcf00001c, - 0xd180001e, 0xd8400021, 0xc42c000f, 0x9ac0ffff, 0xc8300011, 0x97000036, 0x45980008, 0xd180001e, - 0xd8400021, 0xc42c000f, 0x9ac0ffff, 0xc8340011, 0x9740002f, 0xc43c0004, 0xd8400008, 0xd8400013, - 0x13b80001, 0xc79d3300, 0xc7a13301, 0x96000001, 0xd8393300, 0xc0260001, 0xce793301, 0xc424005e, - 0x964012a4, 0x7c028009, 0x9740001c, 0x27580001, 0x99800004, 0x57740001, 0x06a80400, 0x800008d2, - 0xc4180006, 0x9980ffff, 0x29640001, 0xce40001a, 0x242c0000, 0x06ec0400, 0x57740001, 0x27580001, - 0x9980fffd, 0xc02620c0, 0xce41c078, 0xce81c080, 0xcc01c081, 0xcf01c082, 0x57240020, 0xce41c083, - 0xc0260400, 0x7e6e400a, 0xce41c084, 0x7eae8001, 0x7f2f0011, 0x800008d2, 0xc4180006, 0x9980ffff, - 0xcdf93300, 0xce393301, 0xcfc00008, 0xcd400013, 0xc43c0004, 0xd8400008, 0x04182000, 0xcd813267, - 0xcfc00008, 0x80000903, 0x31240022, 0x96400008, 0x04100001, 0xc4380004, 0xd8400008, 0xd8400013, - 0xc43130b8, 0x27300001, 0xcf800008, 0xc4af0280, 0xc4b30278, 0x52ec0020, 0x7ef2c01a, 0x7ec30011, - 0x32f80000, 0x9b800011, 0x043c0020, 0x04280000, 0x67180001, 0x0bfc0001, 0x57300001, 0x95800006, - 0x8c001628, 0x9a400003, 0xd981325d, 0x80000915, 0xd9c1325d, 0x06a80001, 0x9bc0fff6, 0x7f818001, - 0x8c001606, 0x7d838001, 0x94800010, 0xcd400013, 0xc41d3259, 0xc421325a, 0x16240014, 0x12640014, - 0x1a2801f0, 0x12a80010, 0x2620ffff, 0x7e2a000a, 0x7de1c001, 0x7e5e400a, 0x9b800002, 0x2264003f, - 0xce41325a, 0xd8013259, 0xc40c0007, 0xd9000010, 0x8c00075e, 0xc4af0228, 0x043c0000, 0x66d80001, - 0x95800010, 0x04300002, 0x1330000d, 0x13f40014, 0x7f73400a, 0xcf400013, 0x04380040, 0xcf80001b, - 0xd8400021, 0xc438000f, 0x9b80ffff, 0x04380060, 0xcf80001b, 0xd8400021, 0xc438000f, 0x9b80ffff, - 0x07fc0001, 0x56ec0001, 0x33e80010, 0x9680ffec, 0x80000a5a, 0x80000a5a, 0x04a80948, 0x86800000, - 0x8000094c, 0x8000099b, 0x800009e0, 0x800009e0, 0xc43c0004, 0xd8400008, 0xcd400013, 0x04183000, - 0xcd813267, 0xc4113246, 0xc4193245, 0x51100020, 0x7d91801a, 0x459801e0, 0xc4313267, 0x2738000f, - 0x1b342010, 0x172c000c, 0x26ec0800, 0x1b30c012, 0x7ef7400a, 0x7f37000a, 0x2b300000, 0xcf00001c, - 0xd180001e, 0xd8400021, 0xc42c000f, 0x9ac0ffff, 0xc8300011, 0x97000033, 0x45980008, 0xd180001e, - 0xd8400021, 0xc42c000f, 0x9ac0ffff, 0xc8340011, 0x9740002c, 0xd8400013, 0x13b80001, 0xc79d3300, - 0xc7a13301, 0x96000001, 0xd8393300, 0xc0260001, 0xce793301, 0xc424005e, 0x964011fe, 0x7c028009, - 0x9740001c, 0x27580001, 0x99800004, 0x57740001, 0x06a80400, 0x80000978, 0xc4180006, 0x9980ffff, - 0x29640001, 0xce40001a, 0x242c0000, 0x06ec0400, 0x57740001, 0x27580001, 0x9980fffd, 0xc0260010, - 0xce41c078, 0xcf01c080, 0x57240020, 0xce41c081, 0xce81c082, 0xcc01c083, 0xc0260800, 0x7e6e400a, - 0xce41c084, 0x7eae8001, 0x7f2f0011, 0x80000978, 0xc4180006, 0x9980ffff, 0xcdf93300, 0xce393301, - 0x04182000, 0xcd813267, 0xcfc00008, 0xcd400013, 0xc4193246, 0xc41d3245, 0x51980020, 0x7dda801a, - 0x7d41c001, 0x7e838011, 0xd84802e9, 0x8c001802, 0x469c0390, 0xc4313267, 0x04183000, 0xcd813267, - 0x1b342010, 0x172c000c, 0x26ec0800, 0x1b30c012, 0x7ef7400a, 0x7f37000a, 0x2b300000, 0xcf00001c, - 0x45dc0004, 0xd1c0001e, 0xd8400021, 0xc418000f, 0x9980ffff, 0xc4200011, 0x45dc0004, 0xd1c0001e, - 0xd8400021, 0xc418000f, 0x9980ffff, 0xc4240011, 0x45dc0004, 0xd1c0001e, 0xd8400021, 0xc418000f, - 0x9980ffff, 0xc4280011, 0x45dc0004, 0xd1c0001e, 0xd8400021, 0xc418000f, 0x9980ffff, 0xc42c0011, - 0x45dc0004, 0xd1c0001e, 0xd8400021, 0xc418000f, 0x9980ffff, 0xc4300011, 0x45dc0004, 0xd1c0001e, - 0xd8400021, 0xc418000f, 0x9980ffff, 0xc4340011, 0x45dc0004, 0xd1c0001e, 0xd8400021, 0xc418000f, - 0x9980ffff, 0xc4380011, 0xcd400013, 0x04182000, 0xcd813267, 0x043c0001, 0x8c0014df, 0x80000a5a, - 0x80000a5a, 0x31280014, 0xce8802ef, 0x9a800062, 0x31280034, 0x9a800060, 0x04a809e8, 0x86800000, - 0x800009ec, 0x80000a45, 0x80000a59, 0x80000a59, 0xcd400013, 0xc4113246, 0xc4193245, 0x51100020, - 0x7d91801a, 0x45980400, 0xc4b30258, 0xc4a70250, 0x53300020, 0x7e72401a, 0xc4313267, 0x1b342010, - 0x172c000c, 0x26ec0800, 0x1b30c012, 0x7ef7400a, 0x7f37000a, 0x2b300000, 0xcf00001c, 0x042c0020, - 0x66740001, 0x97400041, 0xcd400013, 0x04383000, 0xcf813267, 0xc4393267, 0x9b800001, 0xd180001e, - 0xd8400021, 0xc438000f, 0x9b80ffff, 0xc4300011, 0x1b38007e, 0x33b40003, 0x9b400003, 0x4598001c, - 0x9740002f, 0x45980004, 0xd180001e, 0xd8400021, 0xc438000f, 0x9b80ffff, 0xc40c0011, 0x45980004, - 0xd180001e, 0xd8400021, 0xc438000f, 0x9b80ffff, 0xc4100011, 0x45980004, 0xd180001e, 0xd8400021, - 0xc438000f, 0x9b80ffff, 0xc4340011, 0xcf4002eb, 0x45980004, 0xd180001e, 0xd8400021, 0xc438000f, - 0x9b80ffff, 0xc4340011, 0xcf4002ec, 0x45980004, 0xd180001e, 0xd8400021, 0xc438000f, 0x9b80ffff, - 0xc4340011, 0xcf4002ed, 0x45980004, 0xd180001e, 0xd8400021, 0xc438000f, 0x9b80ffff, 0xc4340011, - 0xcf4002ee, 0x45980004, 0xcd400013, 0x04382000, 0xcf813267, 0xd84802e9, 0x8c001715, 0xcd400013, - 0x04382000, 0xcf813267, 0x56640001, 0x0aec0001, 0x9ac0ffbc, 0xc4380004, 0xd8400008, 0x04341001, - 0xcf400013, 0x94800005, 0xc431ecaa, 0x27300080, 0x97000002, 0x80000a55, 0xc43130b6, 0x233c0032, - 0xcfc130b6, 0xcf400013, 0xcf0130b6, 0xc49302ef, 0x99000003, 0xcd400013, 0xd8413247, 0xcf800008, - 0x80000a5a, 0x80000a5a, 0xcd400013, 0x04180001, 0x5198001f, 0xcd813268, 0xc4193269, 0x2598000f, - 0x9980fffe, 0xd80002f1, 0xcd400013, 0xd8013268, 0xd800004f, 0x90000000, 0xcd400013, 0x04380001, - 0x53b8001f, 0x7db9801a, 0xcd813268, 0x80000a5e, 0xd8400029, 0xc40c005e, 0x94c01106, 0xd8800013, - 0xcc412e01, 0xcc412e02, 0xcc412e03, 0xcc412e00, 0x80000aa7, 0xd8400029, 0xc40c005e, 0x94c010fd, - 0x7c40c001, 0x50640020, 0x7ce4c01a, 0xd0c00072, 0xc80c0072, 0x58e801fc, 0x12a80009, 0x2aa80000, - 0xd0c0001e, 0xce80001c, 0xd8400021, 0xc424000f, 0x9a40ffff, 0x04240010, 0x18dc01e2, 0x7e5e4002, - 0x3e5c0003, 0x3e540002, 0x95c00006, 0xc8180011, 0xc8100011, 0xc8100011, 0x55140020, 0x80000aa2, - 0x9540000a, 0xc8180011, 0x44cc0008, 0x55900020, 0xd0c0001e, 0xd8400021, 0xc424000f, 0x9a40ffff, - 0xc4140011, 0x80000aa2, 0x44cc0004, 0xc4180011, 0xd0c0001e, 0xd8400021, 0xc424000f, 0x9a40ffff, - 0xc8100011, 0x55140020, 0xd8800013, 0xcd812e01, 0xcd012e02, 0xcd412e03, 0xcc412e00, 0xc428000e, - 0x2aa80008, 0xce800013, 0xc4253249, 0x2264003f, 0xce413249, 0xce800013, 0xc4253249, 0x96400001, - 0xd800002a, 0xc410001a, 0xc40c0021, 0xc4140028, 0x95000005, 0x1e64001f, 0xce800013, 0xce413249, - 0x80001b70, 0x14d00010, 0xc4180030, 0xc41c0007, 0x99000004, 0x99400009, 0x9980000c, 0x80000ab1, - 0xccc00037, 0x8c000190, 0xc420001c, 0xd8000032, 0x9a0010ac, 0x80000aa7, 0xd880003f, 0x95c00002, - 0xd8c0003f, 0x80001082, 0xd8800040, 0x95c00002, 0xd8c00040, 0x800010de, 0xc010ffff, 0x18d403f7, - 0x7d0cc009, 0xc41b0367, 0x7d958004, 0x7d85800a, 0xdc1e0000, 0x90000000, 0xc424000b, 0x32640002, - 0x7c40c001, 0x18d001fc, 0x05280adc, 0x86800000, 0x80000af1, 0x80000adf, 0x80000ae7, 0x8c000ace, - 0xd8c00013, 0x96400002, 0xd8400013, 0xcd8d2000, 0x99c00010, 0x7c408001, 0x88000000, 0x18d803f7, - 0xc010ffff, 0x7d0cc009, 0x04140000, 0x11940014, 0x29544001, 0x9a400002, 0x29544003, 0xcd400013, - 0x80000af4, 0xd8c00013, 0x96400002, 0xd8400013, 0xd44d2000, 0x7c408001, 0x88000000, 0xc424000b, - 0x32640002, 0x7c40c001, 0xd8c00013, 0x96400002, 0xd8400013, 0xd44dc000, 0x7c408001, 0x88000000, - 0x7c40c001, 0x18d0003c, 0x95000006, 0x8c000ace, 0xd8800013, 0xcd8d2c00, 0x99c00003, 0x80000b0a, - 0xd8800013, 0xd44d2c00, 0x7c408001, 0x88000000, 0x7c40c001, 0x28148004, 0x24d800ff, 0xccc00019, - 0xcd400013, 0xd4593240, 0x7c408001, 0x88000000, 0xd8400029, 0xc40c005e, 0x94c0105e, 0x7c410001, - 0x50540020, 0x7c418001, 0x2198003f, 0x199c0034, 0xc40c0007, 0x95c00028, 0xc428000e, 0x2aa80008, - 0xce800013, 0xc42d324f, 0xc4313255, 0x7ef3400c, 0x9b400021, 0xd800002a, 0x80001b70, 0xc40c0007, - 0x14e80001, 0x9a8000af, 0xd9000010, 0x041c0002, 0x042c01c8, 0x8c000d61, 0xccc00010, 0xd8400029, - 0xc40c005e, 0x94c01043, 0x7c410001, 0x50540020, 0x7c418001, 0x18a01fe8, 0x3620005c, 0x9a00000e, - 0x2464003f, 0xd8400013, 0xc6290ce7, 0x16ac001f, 0x96c00004, 0x26ac003f, 0x7ee6c00d, 0x96c00005, - 0x06200001, 0x2620000f, 0x9a00fff8, 0x8000016a, 0xce000367, 0xc424005e, 0x9640102e, 0xc428000e, - 0x199c0037, 0x19a00035, 0x2aa80008, 0xce800013, 0x95c0005d, 0xd800002a, 0xc42d3256, 0xc431325a, - 0x2330003f, 0x16f8001f, 0x9780000d, 0xc4253248, 0xc035f0ff, 0x7e764009, 0x19b401f8, 0x13740008, - 0x7e76400a, 0xce800013, 0xce413248, 0xcf01325a, 0xce800013, 0xc431325a, 0x97000001, 0x7d15001a, - 0xd1000072, 0xc8100072, 0x55140020, 0x199c0034, 0xd8400010, 0xd8400029, 0x9b800004, 0x1ae4003e, - 0xce400008, 0x80000b7c, 0xc4353254, 0x16a80008, 0x1aec003c, 0x19a4003f, 0x12a80015, 0x12ec001f, - 0x1374000b, 0x7eae800a, 0xc02e4000, 0x1774000d, 0x7eae800a, 0xce400008, 0x7f6b400a, 0x95c00005, - 0xc43d3248, 0x1bfc01e8, 0x13fc0018, 0x7dbd800a, 0x1d98ff15, 0x592c00fc, 0xcd80000a, 0x12e00016, - 0x7da1800a, 0x592c007e, 0x12e00015, 0x7da1800a, 0xd1000001, 0xcd800001, 0x11a0000c, 0x1264001e, - 0x1620000c, 0x7e26000a, 0x7e32000a, 0x12e4001b, 0x7e26000a, 0x5924007e, 0x12640017, 0x7e26000a, - 0x19a4003c, 0x12640018, 0x7e26000a, 0xd800002a, 0xce01325a, 0xcd013257, 0xcd413258, 0xc429325a, - 0xc40c005e, 0x94c00fdb, 0x96800001, 0x95c00003, 0x7c40c001, 0x7c410001, 0x9780f5ca, 0xcf400100, - 0xc40c0007, 0xd9000010, 0x8c00120d, 0x8c001219, 0x8c001232, 0xccc00010, 0x8c001b6d, 0x7c408001, - 0x88000000, 0xc42d324e, 0xc431324d, 0x52ec0020, 0x7ef2c01a, 0xc435324f, 0xc4293256, 0x52ec0008, - 0x07740003, 0x04240002, 0x269c003f, 0x7e5e4004, 0x7f67000f, 0x97000003, 0x7f674002, 0x0b740001, - 0x53740002, 0x7ef6c011, 0x1ab42010, 0x1ab8c006, 0x16a8000c, 0x26a80800, 0x2b740000, 0x7f7b400a, - 0x7f6b400a, 0xcf40001c, 0xd2c0001e, 0xd8400021, 0xc438000f, 0x9b80ffff, 0xc4180011, 0x9a000003, - 0x8c000bec, 0x80000b47, 0xc42c001d, 0xc4313256, 0x1b34060b, 0x1b300077, 0x7f370009, 0x13300017, - 0x04340100, 0x26ec00ff, 0xc03a8004, 0x7ef6c00a, 0x7f3b000a, 0x7ef2c00a, 0xcec1325b, 0x80000c16, - 0xc40c0032, 0xc410001d, 0x28cc0008, 0xccc00013, 0xc415325b, 0x7c418001, 0x7c418001, 0x18580037, - 0x251000ff, 0xc421325d, 0x262001ef, 0xce01325d, 0x99800004, 0x7d15400a, 0xcd41325b, 0x80000168, - 0x1d54001f, 0xcd41325b, 0x7c408001, 0x88000000, 0xc428000b, 0xc42c000c, 0x12a80001, 0x26a80004, - 0x7eae800a, 0xc40c0021, 0xc4340028, 0x14f00010, 0xc4380030, 0xc43c0007, 0xcd280200, 0xcd680208, - 0xcda80210, 0x9b00000c, 0x9b400014, 0x9b800017, 0xc428000b, 0xc42c000c, 0x12a80001, 0x26a80004, - 0x7eae800a, 0xc6930200, 0xc6970208, 0xc69b0210, 0x90000000, 0x17300001, 0x9b000005, 0xccc00037, - 0x8c000190, 0xd8000032, 0x90000000, 0xd8000028, 0xd800002b, 0x80000168, 0xd900003f, 0x97c00002, - 0xd940003f, 0x80001082, 0xd9000040, 0x97c00002, 0xd9400040, 0x800010de, 0xc40c0021, 0x14fc0011, - 0x24f800ff, 0x33b80001, 0x97c0fffc, 0x9b800007, 0xccc00037, 0x8c000190, 0xd8000032, 0xd8000028, - 0xd800002b, 0x80001b70, 0xc4380004, 0xd8400008, 0xd8400013, 0xd88130b8, 0x04100000, 0x04140000, - 0xc418000e, 0x29980008, 0x7d83c001, 0xcd800013, 0xc4093249, 0x1888003e, 0x94800020, 0xd8400074, - 0x8c000671, 0x9a400009, 0xc418000e, 0x29980008, 0xcd800013, 0xc419324c, 0x259c0001, 0x1598001f, - 0x95c00016, 0x95800015, 0x99000003, 0xd8400036, 0x04100001, 0xc40c0021, 0x14d80011, 0x24e000ff, - 0x321c0002, 0x32200001, 0x9580ffee, 0x99c00014, 0x96000004, 0xccc00037, 0x04140001, 0x80000c30, - 0x9480000a, 0xd8000074, 0xc418005e, 0x95800f29, 0xcf800008, 0x80000c16, 0x94800004, 0xd8000074, - 0xc418005e, 0x95800f23, 0xd9c00036, 0x99400002, 0xccc00037, 0xcf800008, 0x80000c16, 0x94800004, - 0xd8000074, 0xc418005e, 0x95800f1a, 0xccc00037, 0xd8800036, 0x80001b70, 0x041c0003, 0x042c01c8, - 0x8c000d61, 0xc4200007, 0xc40c0077, 0x94c00001, 0x7c418001, 0xc428000e, 0x9600f502, 0x0a200001, - 0x98c0f500, 0x2aa80008, 0xce000010, 0x9a000f05, 0xce800013, 0xc431325a, 0xc42d3256, 0x1f30001f, - 0x16e4001f, 0xcf01325a, 0xc431325a, 0x97000001, 0x9640f4f4, 0xc434000b, 0x33740002, 0x9b40f4f1, - 0xc4353254, 0x16a80008, 0x1aec003c, 0x12a80015, 0x12ec001f, 0x1374000b, 0x7eae800a, 0xc02e4000, - 0x1774000d, 0x7eae800a, 0x7f6b400a, 0xcf400100, 0x12780001, 0x2bb80001, 0xc00ac005, 0xc00e0002, - 0x28cc8000, 0x28884900, 0x28cc0014, 0x80000ff3, 0xc43c0007, 0x7c40c001, 0x17fc0001, 0xd8400013, - 0x9bc00004, 0xd8400029, 0xc424005e, 0x96400ee1, 0xcc41c40a, 0xcc41c40c, 0xcc41c40d, 0x7c414001, - 0x24d0007f, 0x15580010, 0x255400ff, 0xcd01c411, 0xcd81c40f, 0xcd41c40e, 0xcc41c410, 0x7c414001, - 0x7c418001, 0x04200000, 0x18e80033, 0x18ec0034, 0xcc41c414, 0xcc41c415, 0xcd81c413, 0xcd41c412, - 0x18dc0032, 0x7c030011, 0x7c038011, 0x95c00027, 0x96c00002, 0xc431c417, 0xc435c416, 0x96800004, - 0x96c00002, 0xc439c419, 0xc43dc418, 0xc41c000e, 0x29dc0008, 0xcdc00013, 0xcf413261, 0x96c00002, - 0xcf013262, 0x96800004, 0xcfc13263, 0x96c00002, 0xcf813264, 0x18dc0030, 0xc43c0007, 0x95c00017, - 0x17fc0001, 0x9ac00005, 0x7d77000c, 0x9bc00015, 0x9700000a, 0x80000cd6, 0x51b80020, 0x53300020, - 0x7f97801a, 0x7f37001a, 0x7f3b000c, 0x9bc0000d, 0x97800002, 0x80000cd6, 0x9a000018, 0xd8400013, - 0x28200001, 0x80000ca7, 0x18dc0031, 0x95c00003, 0xc435c40b, 0x9740fffd, 0xd800002a, 0x80001b70, - 0xc4280032, 0x2aa80008, 0xce800013, 0xc40d325b, 0x97000002, 0x800012c2, 0xc438001d, 0x1bb81ff0, - 0x7f8cc00a, 0xccc1325b, 0xc411325d, 0x251001ef, 0xcd01325d, 0x80001b70, 0xc428000e, 0xc43c0007, - 0x2aa80008, 0xc438001d, 0xce800013, 0x13f4000c, 0x9bc00006, 0xc43d3256, 0x1bf0060b, 0x1bfc0077, - 0x7ff3c00a, 0x80000cf4, 0xc43d325a, 0x1bfc0677, 0x13fc0017, 0x04300100, 0x1bb81fe8, 0x7f73400a, - 0xc032800b, 0x7fb7800a, 0x7ff3c00a, 0x7ffbc00a, 0xcfc1325b, 0x80000c16, 0xc43c0007, 0x7c40c001, - 0x18d42011, 0x17fc0001, 0x18d001e8, 0x24cc007f, 0x7cd4c00a, 0x9bc00004, 0xd8400029, 0xc428005e, - 0x96800e6c, 0x7c414001, 0x50580020, 0x7d59401a, 0xd1400072, 0xc8140072, 0x596001fc, 0x12200009, - 0x7ce0c00a, 0x7c418001, 0x505c0020, 0x7d9d801a, 0x7c41c001, 0x50600020, 0x7de1c01a, 0x7c420001, - 0xccc0001b, 0xd140001d, 0xd180001f, 0xd1c00020, 0xd8400021, 0x95000010, 0x04300000, 0xc428000f, - 0x9a80ffff, 0xc8240010, 0x7e5e800c, 0x9bc00015, 0x9a80000c, 0x9b000024, 0x28300001, 0x122c0004, - 0x06ec0001, 0x0aec0001, 0x9ac0ffff, 0xd8400021, 0x80000d1f, 0xc428000f, 0x9a80ffff, 0xc8240010, - 0x566c0020, 0xc428000e, 0x2aa80008, 0xce800013, 0xce413261, 0xcec13262, 0xd800002a, 0x80001b70, - 0xc4340032, 0x2b740008, 0xcf400013, 0xc40d325b, 0x96800005, 0x566c0020, 0xce413261, 0xcec13262, - 0x800012c2, 0xc438001d, 0x1bb81fe8, 0x7f8cc00a, 0xccc1325b, 0xc411325d, 0x251001ef, 0xcd01325d, - 0x80001b70, 0xc43c0007, 0xc438001d, 0xc428000e, 0x2aa80008, 0xce800013, 0x13f4000c, 0x9bc00006, - 0xc43d3256, 0x1bf0060b, 0x1bfc0077, 0x7ff3c00a, 0x80000d57, 0xc43d325a, 0x1bfc0677, 0x13fc0017, - 0x04300100, 0x1bb81fe8, 0x7f73400a, 0xc0328009, 0x7fb7800a, 0x7ff3c00a, 0x7ffbc00a, 0xcfc1325b, - 0x80000c16, 0xc43c000e, 0x2bfc0008, 0xcfc00013, 0xc4253246, 0xc4113245, 0x04143000, 0xcd413267, - 0x52640020, 0x7e51001a, 0xc4153267, 0x7d2d0011, 0x19640057, 0x19580213, 0x19600199, 0x7da6400a, - 0x7e26400a, 0xd1000025, 0xce400024, 0xcdc00026, 0xd8400027, 0x04142000, 0xcfc00013, 0xcd413267, - 0xc4153267, 0x99400001, 0x90000000, 0x7c40c001, 0x18d001e8, 0x18d40030, 0x18d80034, 0x05280d83, - 0x7c420001, 0x7c424001, 0x86800000, 0x80000d8a, 0x8000016a, 0x80000d95, 0x80000db1, 0x8000016a, - 0x80000d95, 0x80000dbc, 0x11540010, 0x7e010001, 0x8c00187c, 0x7d75400a, 0xcd400013, 0xd4610000, - 0x9580f3d8, 0xc439c040, 0x97800001, 0x7c408001, 0x88000000, 0xd8000016, 0x526c0020, 0x18e80058, - 0x7e2ec01a, 0xd2c00072, 0xc82c0072, 0x5ae0073a, 0x7ea2800a, 0x9940000a, 0xce800024, 0xd2c00025, - 0xd4400026, 0xd8400027, 0x9580f3c6, 0xc4380012, 0x9b80ffff, 0x7c408001, 0x88000000, 0xdc3a0000, - 0x0bb80001, 0xce800024, 0xd2c00025, 0xcc400026, 0xd8400027, 0x9b80fffb, 0x9980fff5, 0x7c408001, - 0x88000000, 0xc02a0001, 0x2aa80001, 0x16200002, 0xce800013, 0xce01c405, 0xd441c406, 0x9580f3b1, - 0xc439c409, 0x97800001, 0x7c408001, 0x88000000, 0xc424000b, 0x32640002, 0x9a40000b, 0x11540010, - 0x29540002, 0xcd400013, 0xd4610000, 0x9580f3a5, 0xd8400013, 0xc439c040, 0x97800001, 0x7c408001, - 0x88000000, 0xd4400078, 0x80000168, 0xd8400029, 0xc40c005e, 0x94c00da7, 0x7c40c001, 0x50500020, - 0x7cd0c01a, 0xd0c00072, 0xc8280072, 0x5aac007e, 0x12d80017, 0x7c41c001, 0x7d9d800a, 0x56a00020, - 0x2620ffff, 0x7da1800a, 0x51980020, 0x7e82400a, 0x7e58c01a, 0x19d4003d, 0x28182002, 0x99400030, - 0x8c00104f, 0xc430000d, 0xc4340035, 0xd800002a, 0xcd800013, 0xc8140023, 0xc4180081, 0x13300005, - 0xc011000f, 0xc4240004, 0x11a00002, 0x7c908009, 0x12640004, 0x7d614011, 0xc4100026, 0x05980008, - 0x7ca4800a, 0x7d1a0002, 0x7cb0800a, 0x3e280008, 0x20880188, 0x54ec0020, 0x7cb4800a, 0xc4300027, - 0x04380008, 0xd1400025, 0xcf000024, 0x20240090, 0x7ca48001, 0xcc800026, 0xccc00026, 0xcec00026, - 0xcec00026, 0x28240004, 0xcc000026, 0x0a640001, 0x9a40fffe, 0x9a800005, 0x32280000, 0x9a800002, - 0x9a000000, 0x7c018001, 0xd8400027, 0xd8000016, 0xcf80003a, 0xd901a2a4, 0x80001037, 0xc418000e, - 0x29980008, 0xcd800013, 0xc421326c, 0x1624001f, 0x9a40fffe, 0xd841325f, 0xd8800033, 0xc43c0009, - 0x27fc0004, 0x97c0fffe, 0xd8000039, 0xd0c00038, 0xc43c0022, 0x9bc0ffff, 0xd8800034, 0xc429325f, - 0x26ac0001, 0x9ac0fffe, 0x26ac0002, 0x96c00003, 0xd800002a, 0x80001b70, 0xc43c0007, 0xc430001e, - 0xd8800033, 0x13f4000c, 0x1b301ff0, 0x2b300300, 0x2330003f, 0x7f37000a, 0x9680000b, 0xc43c0009, - 0x27fc0004, 0x97c0fffe, 0xd8400039, 0xd0c00038, 0xc43c0022, 0x9bc0ffff, 0xcf01325b, 0xd8800034, - 0x80000c16, 0xd8800034, 0x8c0001a2, 0x80001b70, 0xcc80003b, 0x24b00008, 0xc418000e, 0x1330000a, - 0x18ac0024, 0x2b304000, 0x7c40c001, 0xcec00008, 0x18a800e5, 0x1d980008, 0x12a80008, 0x7da9800a, - 0x29980008, 0xcd800013, 0xc4113249, 0x1910003e, 0x99000002, 0xd840003d, 0x7c410001, 0xd4400078, - 0x51100020, 0xcf01326c, 0x7cd0c01a, 0xc421326c, 0x12a80014, 0x2220003f, 0x7e2a000a, 0xcd800013, - 0xce01326c, 0xd8800033, 0xc43c0009, 0x27fc0004, 0x97c0fffe, 0xd8000039, 0xd0c00038, 0xc43c0022, - 0x9bc0ffff, 0xd8800034, 0x80001190, 0x7c40c001, 0x18dc003d, 0x95c00004, 0x041c0001, 0x042c01c8, - 0x8c000d61, 0x18d40030, 0x18d001e8, 0x18fc0034, 0x24e8000f, 0x06a80e71, 0x7c418001, 0x7c41c001, - 0x86800000, 0x80000edd, 0x80000e91, 0x80000e91, 0x80000ea1, 0x80000eaa, 0x80000e7c, 0x80000e7f, - 0x80000e7f, 0x80000e87, 0x80000e8f, 0x8000016a, 0x51dc0020, 0x7d9e001a, 0x80000ee6, 0xc420000e, - 0x2a200008, 0xce000013, 0xc4213262, 0xc4253261, 0x52200020, 0x7e26001a, 0x80000ee6, 0xc420000e, - 0x2a200008, 0xce000013, 0xc4213264, 0xc4253263, 0x52200020, 0x7e26001a, 0x80000ee6, 0xc820001f, - 0x80000ee6, 0x18e82005, 0x51e00020, 0x2aa80000, 0x7da1801a, 0xd1800072, 0xc8180072, 0x59a001fc, - 0x12200009, 0x7ea2800a, 0xce80001c, 0xd180001e, 0xd8400021, 0xc428000f, 0x9a80ffff, 0xc8200011, - 0x80000ee6, 0x15980002, 0xd8400013, 0xcd81c400, 0xc421c401, 0x95400041, 0xc425c401, 0x52640020, - 0x7e26001a, 0x80000ee6, 0x31ac2580, 0x9ac00011, 0x31ac260c, 0x9ac0000f, 0x31ac0800, 0x9ac0000d, - 0x31ac0828, 0x9ac0000b, 0x31ac2440, 0x9ac00009, 0x31ac2390, 0x9ac00007, 0x31ac0093, 0x9ac00005, - 0x31ac31dc, 0x9ac00003, 0x31ac31e6, 0x96c00004, 0xc4340004, 0xd8400008, 0x80000ede, 0x39ac7c06, - 0x3db07c00, 0x9ac00003, 0x97000002, 0x80000ebc, 0x39acc337, 0x3db0c330, 0x9ac00003, 0x97000002, - 0x80000ebc, 0x39acc335, 0x3db0c336, 0x9ac00003, 0x97000002, 0x80000ebc, 0x39ac9002, 0x3db09001, - 0x9ac00003, 0x97000002, 0x80000ebc, 0x39ac9012, 0x3db09011, 0x9ac00003, 0x97000002, 0x80000ebc, - 0x39acec70, 0x3db0ec6f, 0x9ac00003, 0x97000002, 0x80000ebc, 0xc4340004, 0xd8400013, 0xc5a10000, - 0x95400005, 0x05980001, 0xc5a50000, 0x52640020, 0x7e26001a, 0xcf400008, 0x05280eea, 0x7c418001, - 0x7c41c001, 0x86800000, 0x80000ef1, 0x8000016a, 0x80000efe, 0x80000f11, 0x80000f2e, 0x80000efe, - 0x80000f1f, 0xc4340004, 0xd8400013, 0xce190000, 0x95400005, 0x05980001, 0x56200020, 0xce190000, - 0xcf400008, 0x97c0f26f, 0xc439c040, 0x97800001, 0x7c408001, 0x88000000, 0x51ec0020, 0x18e80058, - 0x7daec01a, 0xd2c00072, 0xc82c0072, 0x5af8073a, 0x7eba800a, 0xd2c00025, 0xce800024, 0xce000026, - 0x95400003, 0x56240020, 0xce400026, 0xd8400027, 0x97c0f25c, 0xc4380012, 0x9b80ffff, 0x7c408001, - 0x88000000, 0xc02a0001, 0x2aa80001, 0x15980002, 0xce800013, 0xcd81c405, 0xce01c406, 0x95400003, - 0x56240020, 0xce41c406, 0x97c0f24e, 0xc439c409, 0x97800001, 0x7c408001, 0x88000000, 0xc424000b, - 0x32640002, 0x9a40f247, 0xd8800013, 0xce190000, 0x95400004, 0x05980001, 0x56200020, 0xce190000, - 0x97c0f240, 0xd8400013, 0xc439c040, 0x97800001, 0x7c408001, 0x88000000, 0x31ac2580, 0x9ac00011, - 0x31ac260c, 0x9ac0000f, 0x31ac0800, 0x9ac0000d, 0x31ac0828, 0x9ac0000b, 0x31ac2440, 0x9ac00009, - 0x31ac2390, 0x9ac00007, 0x31ac0093, 0x9ac00005, 0x31ac31dc, 0x9ac00003, 0x31ac31e6, 0x96c00004, - 0xc4340004, 0xd8400008, 0x80000ef2, 0x39ac7c06, 0x3db07c00, 0x9ac00003, 0x97000002, 0x80000f40, - 0x39acc337, 0x3db0c330, 0x9ac00003, 0x97000002, 0x80000f40, 0x39acc335, 0x3db0c336, 0x9ac00003, - 0x97000002, 0x80000f40, 0x39acec70, 0x3db0ec6f, 0x9ac00003, 0x97000002, 0x80000f40, 0x39ac9002, - 0x3db09002, 0x9ac00003, 0x97000002, 0x80000f40, 0x39ac9012, 0x3db09012, 0x9ac00003, 0x97000002, - 0x80000f40, 0x80000ef1, 0xc40c0006, 0x98c0ffff, 0x7c40c001, 0x7c410001, 0x7c414001, 0x7c418001, - 0x7c41c001, 0x7c43c001, 0x95c00001, 0xc434000e, 0x2b740008, 0x2b780001, 0xcf400013, 0xd8c1325e, - 0xcf80001a, 0xd8400013, 0x7c034001, 0x7c038001, 0x18e0007d, 0x32240003, 0x9a400006, 0x32240000, - 0x9a400004, 0xcd01c080, 0xcd41c081, 0x80000f88, 0x51640020, 0x7e52401a, 0xd2400072, 0xc8280072, - 0xce81c080, 0x56ac0020, 0x26f0ffff, 0xcf01c081, 0x1af000fc, 0x1334000a, 0x24e02000, 0x7f63400a, - 0x18e00074, 0x32240003, 0x9a400006, 0x32240000, 0x9a400004, 0xcd81c082, 0xcdc1c083, 0x80000f9d, - 0x51e40020, 0x7e5a401a, 0xd2400072, 0xc8280072, 0xce81c082, 0x56ac0020, 0x26f0ffff, 0xcf01c083, - 0x1af000fc, 0x13380016, 0x18e00039, 0x12200019, 0x7fa3800a, 0x7fb7800a, 0x18e0007d, 0x1220001d, - 0x7fa3800a, 0x18e00074, 0x12200014, 0x7fa3800a, 0xcf81c078, 0xcfc1c084, 0x80000c16, 0x7c40c001, - 0x18dc003d, 0x95c00004, 0x041c0000, 0x042c01c8, 0x8c000d61, 0x18d001e8, 0x31140005, 0x99400003, - 0x31140006, 0x95400002, 0x8c00104f, 0x05280fb7, 0x28140002, 0xcd400013, 0x86800000, 0x80000fbe, - 0x80000fbe, 0x80000fc2, 0x80000fbe, 0x80000fd1, 0x80000ff2, 0x80000ff2, 0x24cc003f, 0xccc1a2a4, - 0x7c408001, 0x88000000, 0x7c414001, 0x18e80039, 0x52a8003b, 0x50580020, 0x24cc003f, 0x7d59401a, - 0xd1400072, 0xc8140072, 0x7d69401a, 0xc41c0017, 0x99c0ffff, 0xd140004b, 0xccc1a2a4, 0x7c408001, - 0x88000000, 0xc414000d, 0x04180001, 0x24cc003f, 0x7d958004, 0xcd800035, 0xccc1a2a4, 0xc43c000e, - 0x2bfc0008, 0xcfc00013, 0xc43d3249, 0x1bfc003e, 0x97c00002, 0xd8400074, 0xc4100019, 0x7d150005, - 0x25100001, 0x9500000b, 0x97c0fffc, 0xc4180021, 0x159c0011, 0x259800ff, 0x31a00003, 0x31a40001, - 0x7e25800a, 0x95c0fff5, 0x9580fff4, 0x80000fef, 0xc411326f, 0x1d100010, 0xcd01326f, 0x97c00002, - 0xd8000074, 0x80001b70, 0x04380000, 0xc430000d, 0xc8140023, 0xc4180081, 0x13300005, 0xc011000f, - 0xc4240004, 0x33b40003, 0x97400003, 0xc0340008, 0x80000ffe, 0xc4340035, 0x11a00002, 0x7c908009, - 0x12640004, 0x7d614011, 0xc4100026, 0x05980008, 0x7ca4800a, 0x7d1a0002, 0x7cb0800a, 0x282c2002, - 0x208801a8, 0x3e280008, 0x7cb4800a, 0xcec00013, 0xc4300027, 0x042c0008, 0xd1400025, 0xcf000024, - 0x20240030, 0x7ca48001, 0xcc800026, 0xccc00026, 0x9b800013, 0xcc400026, 0x7c414001, 0x28340000, - 0xcf400013, 0x507c0020, 0x7d7d401a, 0xd1400072, 0xc8140072, 0x557c0020, 0x28342002, 0xcf400013, - 0xcd400026, 0xcfc00026, 0xd4400026, 0x9a80000e, 0x32280000, 0x9a80000b, 0x8000102f, 0xcc000026, - 0xcc000026, 0xcc000026, 0xcc000026, 0xcc000026, 0x9a800005, 0x32280000, 0x9a800002, 0x9a000000, - 0x7c018001, 0xcc000026, 0xd8400027, 0x1cccfe08, 0xd8800013, 0xcec0003a, 0xccc1a2a4, 0xc43c000e, - 0x2bfc0008, 0xcfc00013, 0xc43d3249, 0x1bfc003e, 0x9bc00007, 0xc428000e, 0x16a80008, 0xce800009, - 0xc42c005e, 0x96c00b33, 0xd840003c, 0xc4200025, 0x7da2400f, 0x7da28002, 0x7e1ac002, 0x0aec0001, - 0x96400002, 0x7d2ac002, 0x3ef40010, 0x9b40f11d, 0x04380030, 0xcf81325e, 0x80000c16, 0xde410000, - 0xdcc10000, 0xdd010000, 0xdd410000, 0xdd810000, 0xddc10000, 0xde010000, 0xc40c000e, 0x7c024001, - 0x28cc0008, 0xccc00013, 0xc8100086, 0x5510003f, 0xc40d3249, 0x18cc003e, 0x98c00003, 0x99000011, - 0x80001075, 0x9900000c, 0xc40c0026, 0xc4100081, 0xc4140025, 0x7d15800f, 0x7d15c002, 0x7d520002, - 0x0a200001, 0x95800002, 0x7cde0002, 0x3e20001a, 0x9a000009, 0x040c0030, 0xccc1325e, 0x80001071, - 0xd9c00036, 0xd8400029, 0xc40c005e, 0x94c00b01, 0x04240001, 0xdc200000, 0xdc1c0000, 0xdc180000, - 0xdc140000, 0xdc100000, 0xdc0c0000, 0x96400004, 0xdc240000, 0xdc0c0000, 0x80000c16, 0xdc240000, - 0x90000000, 0xcc40003f, 0xd8c00010, 0xc4080029, 0xcc80003b, 0xc418000e, 0x18a800e5, 0x1d980008, - 0x12a80008, 0x7da9800a, 0x29980008, 0xcd800013, 0x18a400e5, 0x12500009, 0x248c0008, 0x94c00006, - 0x200c006d, 0x7cd0c00a, 0xccc1326c, 0xc421326c, 0x96000001, 0xcd800013, 0x200c0228, 0x7cd0c00a, - 0xccc1326c, 0xc421326c, 0x96000001, 0xc40c002a, 0xc410002b, 0x18881fe8, 0x18d4072c, 0x18cc00d1, - 0x7cd4c00a, 0x3094000d, 0x38d80000, 0x311c0003, 0x99400006, 0x30940007, 0x1620001f, 0x9940001d, - 0x9a000023, 0x800010c4, 0x9580001a, 0x99c00019, 0xccc00041, 0x25140001, 0xc418002c, 0x9940000d, - 0x259c007f, 0x95c00013, 0x19a00030, 0xcdc0001b, 0xd8400021, 0xd8400022, 0xc430000f, 0x17300001, - 0x9b00fffe, 0x9a000012, 0xd8400023, 0x800010cb, 0x199c0fe8, 0xcdc0001b, 0xd8400021, 0xd8400023, - 0xc430000f, 0x17300001, 0x9b00fffe, 0x800010cb, 0xd8c00010, 0xd8000022, 0xd8000023, 0xc430005e, - 0x97000aac, 0x7c408001, 0x88000000, 0xc43c000e, 0xc434002e, 0x2bfc0008, 0x2020002c, 0xcfc00013, - 0xce01326c, 0x17780001, 0x27740001, 0x07a810d8, 0xcf400010, 0xc421326c, 0x96000001, 0x86800000, - 0x80000168, 0x80000aa7, 0x80000bfc, 0x800012e9, 0x8000104c, 0xcc400040, 0xd8800010, 0xc4180032, - 0x29980008, 0xcd800013, 0x200c007d, 0xccc1325b, 0xc411325b, 0x95000001, 0x7c408001, 0x88000000, - 0x28240007, 0xde430000, 0xd4400078, 0x80001190, 0xcc80003b, 0x24b00008, 0xc418000e, 0x1330000a, - 0x18a800e5, 0x1d980008, 0x12a80008, 0x7da9800a, 0x29980008, 0xcd800013, 0xc40d3249, 0x18cc003e, - 0x98c00002, 0xd840003d, 0x2b304000, 0xcf01326c, 0xc431326c, 0x7c40c001, 0x7c410001, 0x7c414001, - 0x192400fd, 0x50580020, 0x7d59401a, 0x7c41c001, 0x06681110, 0x7c420001, 0xcc400078, 0x18ac0024, - 0x19180070, 0x19100078, 0xcec00008, 0x18f40058, 0x5978073a, 0x7f7b400a, 0x97000001, 0x86800000, - 0x80001117, 0x80001118, 0x80001122, 0x8000112d, 0x80001130, 0x80001133, 0x8000016a, 0x8000117b, - 0x24ec0f00, 0x32ec0600, 0x96c00003, 0xc4300006, 0x9b00ffff, 0xd1400025, 0xcf400024, 0xcdc00026, - 0xd8400027, 0x8000117b, 0x24ec0f00, 0x32ec0600, 0x96c00003, 0xc4300006, 0x9b00ffff, 0xd1400025, - 0xcf400024, 0xcdc00026, 0xce000026, 0xd8400027, 0x8000117b, 0xc81c001f, 0x55e00020, 0x80001122, - 0xc81c0020, 0x55e00020, 0x80001122, 0x8c00116b, 0xd8400013, 0xc02a0200, 0x7e8e8009, 0x22a8003d, - 0x22a80074, 0x2774001c, 0x13740014, 0x7eb6800a, 0x25ecffff, 0x55700020, 0x15f40010, 0x13740002, - 0x275c001f, 0x95c00027, 0x7c018001, 0x7f41c001, 0x15dc0002, 0x39e00008, 0x25dc0007, 0x7dc1c01e, - 0x05dc0001, 0x96000004, 0x05e40008, 0x8c00116e, 0x80001168, 0x7dc2001e, 0x06200001, 0x05e40008, - 0x7e62000e, 0x9a000004, 0x7da58001, 0x8c00116e, 0x80001165, 0x7dc2001e, 0x06200001, 0x7e1a0001, - 0x05cc0008, 0x7e0d000e, 0x95000007, 0x7e02401e, 0x06640001, 0x06640008, 0x05d80008, 0x8c00116e, - 0x80001168, 0x7dc2401e, 0x06640001, 0x7da58001, 0x8c00116e, 0x05e00008, 0x7da2000c, 0x9600ffe6, - 0x17640002, 0x8c00116e, 0x80001190, 0xc4200006, 0x9a00ffff, 0x90000000, 0x8c00116b, 0xc420000e, - 0x2a200001, 0xce00001a, 0xce81c078, 0xcec1c080, 0xcc01c081, 0xcd41c082, 0xcf01c083, 0x12640002, - 0x22640435, 0xce41c084, 0x90000000, 0x0528117e, 0x312c0003, 0x86800000, 0x80001190, 0x80001185, - 0x80001182, 0x80001182, 0xc4300012, 0x9b00ffff, 0x9ac0000c, 0xc03a0400, 0xc4340004, 0xd8400013, - 0xd8400008, 0xc418000e, 0x15980008, 0x1198001c, 0x7d81c00a, 0xcdc130b7, 0xcf8130b5, 0xcf400008, - 0x04240008, 0xc418000e, 0xc41c0049, 0x19a000e8, 0x29a80008, 0x7de2c00c, 0xce800013, 0xc421325e, - 0x26200010, 0xc415326d, 0x9a000006, 0xc420007d, 0x96000004, 0x96c00003, 0xce40003e, 0x800011a3, - 0x7d654001, 0xcd41326d, 0x7c020001, 0x96000005, 0xc4100026, 0xc4240081, 0xc4140025, 0x800011b6, - 0xc4253279, 0xc415326d, 0xc431326c, 0x2730003f, 0x3b380006, 0x97800004, 0x3f38000b, 0x9b800004, - 0x800011b4, 0x04300006, 0x800011b4, 0x0430000b, 0x04380002, 0x7fb10004, 0x7e57000f, 0x7e578002, - 0x7d67c002, 0x0be40001, 0x97000002, 0x7d3a4002, 0x202c002c, 0xc421325e, 0x04280020, 0xcec1326c, - 0x26200010, 0x3e640010, 0x96000003, 0x96400002, 0xce81325e, 0xc4300028, 0xc434002e, 0x17780001, - 0x27740001, 0x07a811cf, 0x9b00feb8, 0xcf400010, 0xc414005e, 0x954009a7, 0x86800000, 0x80000168, - 0x80000aa7, 0x80000bfc, 0x800012e9, 0x80000168, 0x8c00120d, 0x7c40c001, 0xccc1c07c, 0xcc41c07d, - 0xcc41c08c, 0x7c410001, 0xcc41c079, 0xcd01c07e, 0x7c414001, 0x18f0012f, 0x18f40612, 0x18cc00c1, - 0x7f73400a, 0x7cf7400a, 0x39600004, 0x9a000002, 0xc0140004, 0x11600001, 0x18fc003e, 0x9740001c, - 0xcf400041, 0xc425c07f, 0x97c00003, 0x166c001f, 0x800011ee, 0x1a6c003e, 0x96c00006, 0x04200002, - 0x0a200001, 0x9a00ffff, 0xd8400013, 0x800011e8, 0xc428002c, 0x96800010, 0x26ac007f, 0xcec0001b, - 0xd8400021, 0x1ab00030, 0x1aac0fe8, 0xc434000f, 0x9b40ffff, 0x97000008, 0xcec0001b, 0xd8400021, - 0xc434000f, 0x9b40ffff, 0x80001205, 0x0a200001, 0x9a00ffff, 0xd8400013, 0xc425c07f, 0x166c001f, - 0x11600001, 0x9ac0fffa, 0x8c001232, 0x7c408001, 0x88000000, 0xd8000033, 0xc438000b, 0xc43c0009, - 0x27fc0001, 0x97c0fffe, 0xd8400013, 0xd841c07f, 0xc43dc07f, 0x1bfc0078, 0x7ffbc00c, 0x97c0fffd, - 0x90000000, 0xc03a2800, 0xcf81c07c, 0xcc01c07d, 0xcc01c08c, 0xcc01c079, 0xcc01c07e, 0x04380040, - 0xcf80001b, 0xd8400021, 0xc438000f, 0x9b80ffff, 0x04380060, 0xcf80001b, 0xd8400021, 0xc438000f, - 0x9b80ffff, 0x04380002, 0x0bb80001, 0x9b80ffff, 0xd8400013, 0xc43dc07f, 0x17fc001f, 0x04380010, - 0x9bc0fffa, 0x90000000, 0xd8400013, 0xd801c07f, 0xd8400013, 0xc43dc07f, 0xcfc00078, 0xd8000034, - 0x90000000, 0xc03ae000, 0xcf81c200, 0xc03a0800, 0xcf81c07c, 0xcc01c07d, 0xcc01c08c, 0xcc01c079, - 0xcc01c07e, 0x04380040, 0xcf80001b, 0xd8400021, 0xc438000f, 0x9b80ffff, 0x04380002, 0x0bb80001, - 0x9b80ffff, 0xd8400013, 0xc43dc07f, 0x17fc001f, 0x04380010, 0x9bc0fffa, 0x90000000, 0xc03ae000, - 0xcf81c200, 0xc03a4000, 0xcf81c07c, 0xcc01c07d, 0xcc01c08c, 0xcc01c079, 0xcc01c07e, 0x04380002, - 0x0bb80001, 0x9b80ffff, 0xd8400013, 0xc43dc07f, 0x17fc001f, 0x04380010, 0x9bc0fffa, 0x90000000, - 0xc40c0007, 0x30d00002, 0x99000052, 0xd8400029, 0xc424005e, 0x9640090f, 0x7c410001, 0xc428000e, - 0x1514001f, 0x19180038, 0x2aa80008, 0x99400030, 0x30dc0001, 0xce800013, 0x99c0000a, 0xc42d324e, - 0xc431324d, 0x52ec0020, 0x7ef2c01a, 0xc435324f, 0xc4293256, 0x1ab0c006, 0x52ec0008, 0x8000127f, - 0xc42d3258, 0xc4313257, 0x52ec0020, 0x7ef2c01a, 0xc4353259, 0xc429325a, 0x1ab0c012, 0x07740001, - 0x04240002, 0x26a0003f, 0x7e624004, 0x7f67800f, 0x97800002, 0x04340000, 0x53740002, 0x7ef6c011, - 0x1ab42010, 0x16a8000c, 0x26a80800, 0x2b740000, 0x7f73400a, 0x7f6b400a, 0xcf40001c, 0xd2c0001e, - 0xd8400021, 0xc438000f, 0x9b80ffff, 0xc4100011, 0x1514001f, 0x99400006, 0x9980000a, 0x8c0012e1, - 0xc40c0007, 0x04100000, 0x80001267, 0xd800002a, 0xc424005e, 0x964008d7, 0xd9800036, 0x80000c16, - 0xc42c001d, 0x95c00005, 0xc431325a, 0x1b300677, 0x11dc000c, 0x800012aa, 0xc4313256, 0x1b34060b, - 0x1b300077, 0x7f37000a, 0x13300017, 0x04340100, 0x26ec00ff, 0xc03a8002, 0x7ef6c00a, 0x7edec00a, - 0x7f3b000a, 0x7ef2c00a, 0xcec1325b, 0x80000c16, 0xc4140032, 0xc410001d, 0x29540008, 0xcd400013, - 0xc40d325b, 0x1858003f, 0x251000ff, 0x99800007, 0x7d0cc00a, 0xccc1325b, 0xc411325d, 0x251001ef, - 0xcd01325d, 0x80000168, 0x18d0006c, 0x18d407f0, 0x9900000e, 0x04100002, 0xc4193256, 0xc41d324f, - 0x2598003f, 0x7d190004, 0x7d5d4001, 0x7d52000f, 0x9a000003, 0xcd41324f, 0x800012d8, 0x7d514002, - 0xcd41324f, 0x800012d8, 0xc4193259, 0xc41d325a, 0x7d958001, 0x7dd5c002, 0xcd813259, 0xcdc1325a, - 0xc411325d, 0x251001ef, 0xcd01325d, 0x1ccc001e, 0xccc1325b, 0xc40d325b, 0x94c00001, 0x7c408001, - 0x88000000, 0xc40c0021, 0xc4340028, 0x14f00010, 0xc4380030, 0xc43c0007, 0x9b000004, 0x9b40000c, - 0x9b80000f, 0x90000000, 0x17300001, 0x9b000005, 0xccc00037, 0x8c000190, 0xd8000032, 0x90000000, - 0xd8000028, 0xd800002b, 0x80000168, 0xd980003f, 0x97c00002, 0xd9c0003f, 0x80001082, 0xd9800040, - 0x97c00002, 0xd9c00040, 0x800010de, 0xc43c0007, 0x33f80003, 0x97800051, 0xcc80003b, 0x24b00008, - 0xc418000e, 0x1330000a, 0x18a800e5, 0x1d980008, 0x12a80008, 0x7da9800a, 0x29980008, 0xcd800013, - 0xc4353249, 0x1b74003e, 0x9b400002, 0xd840003d, 0x2b304000, 0xcf01326c, 0xc431326c, 0x97000001, - 0x7c434001, 0x1b4c00f8, 0x7c410001, 0x7c414001, 0x50700020, 0x04e81324, 0x18ac0024, 0x7c41c001, - 0x50600020, 0xcc400078, 0x30e40004, 0x9a400007, 0x7d71401a, 0x596401fc, 0x12640009, 0x1b74008d, - 0x7e76400a, 0x2a640000, 0xcec00008, 0x86800000, 0x8000016a, 0x8000016a, 0x8000016a, 0x8000016a, - 0x8000132c, 0x8000133b, 0x80001344, 0x8000016a, 0xc4340004, 0xd8400013, 0xd8400008, 0xc42530b5, - 0x1a68003a, 0x9a80fffe, 0x2024003a, 0xc418000e, 0x25980700, 0x11980014, 0x7d19000a, 0xcd0130b7, - 0xce4130b5, 0xcf400008, 0x80001190, 0xce40001c, 0xd140001e, 0xd8400021, 0xc428000f, 0x9a80ffff, - 0xc4240011, 0x7de6800f, 0x9a80ffea, 0x80001190, 0xce40001c, 0xd140001e, 0xd8400021, 0xc428000f, - 0x9a80ffff, 0xc8240011, 0x7de1c01a, 0x7de6800f, 0x9a80ffe0, 0x80001190, 0x8c00104f, 0x28182002, - 0xc430000d, 0xc4340035, 0xcd800013, 0xc8140023, 0xc4180081, 0x13300005, 0xc4240004, 0x11a00002, - 0x12640004, 0x7d614011, 0xc4100026, 0x05980008, 0x7ca4800a, 0x7d1a0002, 0x7cb0800a, 0x3e280008, - 0x7cb4800a, 0xc4300027, 0x042c0008, 0xd1400025, 0xcf000024, 0x20240030, 0x7ca48001, 0xcc800026, - 0x7c434001, 0x1b4c00f8, 0xcf400026, 0xcc400026, 0x28340000, 0xcf400013, 0x7c414001, 0x507c0020, - 0x30e40004, 0x9a400005, 0x7d7d401a, 0xd1400072, 0xc8140072, 0x557c0020, 0x28342002, 0xcf400013, - 0xcd400026, 0xcfc00026, 0xd4400026, 0xcc000026, 0x9a800005, 0x32280000, 0x9a800002, 0x9a000000, - 0x7c018001, 0xd8400027, 0xd8800013, 0x04380028, 0xcec0003a, 0xcf81a2a4, 0x80001037, 0xd8400029, - 0xc40c005e, 0x94c007eb, 0x7c40c001, 0x50500020, 0x7d0d001a, 0xd1000072, 0xc8100072, 0x591c01fc, - 0x11dc0009, 0x45140210, 0x595801fc, 0x11980009, 0x29dc0000, 0xcdc0001c, 0xd140001e, 0xd8400021, - 0xc418000f, 0x9980ffff, 0xc4200011, 0x1624001f, 0x96400069, 0xc40c000e, 0x28cc0008, 0xccc00013, - 0xce013249, 0x1a307fe8, 0xcf00000a, 0x23304076, 0xd1000001, 0xcf000001, 0xc41d3254, 0xc4253256, - 0x18cc00e8, 0x10cc0015, 0x4514020c, 0xd140001e, 0xd8400021, 0xc418000f, 0x9980ffff, 0xc4200011, - 0xce013248, 0x1a2001e8, 0x12200014, 0x2a204001, 0xce000013, 0x1a64003c, 0x1264001f, 0x11dc0009, - 0x15dc000b, 0x7dcdc00a, 0x7e5dc00a, 0xcdc00100, 0xd8800013, 0xd8400010, 0xd800002a, 0xd8400008, - 0xcf00000d, 0xcf00000a, 0x8c001427, 0x04340022, 0x07740001, 0x04300010, 0xdf430000, 0x7c434001, - 0x7c408001, 0xd4412e01, 0x0434001e, 0xdf430000, 0xd4400078, 0xdf030000, 0xd4412e40, 0xd8400013, - 0xcc41c030, 0xcc41c031, 0x248dfffe, 0xccc12e00, 0xd8800013, 0xcc812e00, 0x7c434001, 0x7c434001, - 0x8c00142b, 0xd8000010, 0xc40c000e, 0x28cc0008, 0xccc00013, 0x45140248, 0xd140001e, 0xd8400021, - 0xc418000f, 0x9980ffff, 0xc8200011, 0xce013257, 0x56200020, 0xce013258, 0x0434000c, 0xdb000024, - 0xd1400025, 0xd8000026, 0xd8000026, 0xd8400027, 0x45540008, 0xd140001e, 0xd8400021, 0xc418000f, - 0x9980ffff, 0xc8200011, 0xce013259, 0x56200020, 0xc0337fff, 0x7f220009, 0xce01325a, 0x55300020, - 0x7d01c001, 0x042c01d0, 0x8c000d61, 0x06ec0004, 0x7f01c001, 0x8c000d61, 0x041c0002, 0x042c01c8, - 0x8c000d61, 0xc4380012, 0x9b80ffff, 0xd800002a, 0x80000aa7, 0xd800002a, 0x7c408001, 0x88000000, - 0xd8400029, 0x7c40c001, 0x50500020, 0x8c001427, 0x7cd0c01a, 0xc4200007, 0xd0c00072, 0xc8240072, - 0xd240001e, 0x7c414001, 0x19682011, 0x5a6c01fc, 0x12ec0009, 0x7eeac00a, 0x2aec0000, 0xcec0001c, - 0xd8400021, 0xc430000f, 0x9b00ffff, 0xc4180011, 0x7c438001, 0x99800007, 0xdf830000, 0xcfa0000c, - 0x8c00142b, 0xd4400078, 0xd800002a, 0x80001b70, 0x8c00142b, 0xd800002a, 0x80001b70, 0xd8000012, - 0xc43c0008, 0x9bc0ffff, 0x90000000, 0xd8400012, 0xc43c0008, 0x97c0ffff, 0x90000000, 0xc4380007, - 0x7c40c001, 0x17b80001, 0x18d40038, 0x7c410001, 0x9b800004, 0xd8400029, 0xc414005e, 0x9540073d, - 0x18c80066, 0x7c414001, 0x30880001, 0x7c418001, 0x94800008, 0x8c00187c, 0xcf400013, 0xc42c0004, - 0xd8400008, 0xcd910000, 0xcec00008, 0x7d410001, 0x043c0000, 0x7c41c001, 0x7c420001, 0x04240001, - 0x06200001, 0x4220000c, 0x0a640001, 0xcc000078, 0x9a40fffe, 0x24e80007, 0x24ec0010, 0xd8400013, - 0x9ac00006, 0xc42c0004, 0xd8400008, 0xc5310000, 0xcec00008, 0x80001465, 0x51540020, 0x7d15001a, - 0xd1000072, 0xc82c0072, 0xd2c0001e, 0x18f02011, 0x5aec01fc, 0x12ec0009, 0x7ef2c00a, 0x2aec0000, - 0xcec0001c, 0xd8400021, 0xc42c000f, 0x9ac0ffff, 0xc4300011, 0x96800012, 0x12a80001, 0x0aa80001, - 0x06a8146a, 0x7f1f0009, 0x86800000, 0x7f1b400f, 0x80001478, 0x7f1b400e, 0x80001478, 0x7f1b400c, - 0x8000147a, 0x7f1b400d, 0x8000147a, 0x7f1b400f, 0x8000147a, 0x7f1b400e, 0x8000147a, 0x7f334002, - 0x97400014, 0x8000147b, 0x9b400012, 0x9b800005, 0x9bc0001f, 0x7e024001, 0x043c0001, 0x8000144a, - 0xc40c0032, 0xc438001d, 0x28cc0008, 0xccc00013, 0xc43d325b, 0x1bb81ff0, 0x7fbfc00a, 0xcfc1325b, - 0xc411325d, 0x251001ef, 0xcd01325d, 0x80001b70, 0x94800007, 0x8c00187c, 0xcf400013, 0xc42c0004, - 0xd8400008, 0xcd910000, 0xcec00008, 0x9b800003, 0xd800002a, 0x80001b70, 0xc40c0032, 0x28cc0008, - 0xccc00013, 0xc40d325b, 0x800012c2, 0xc40c000e, 0xc43c0007, 0xc438001d, 0x28cc0008, 0xccc00013, - 0x13f4000c, 0x9bc00006, 0xc43d3256, 0x1bf0060b, 0x1bfc0077, 0x7ff3c00a, 0x800014a9, 0xc43d325a, - 0x1bfc0677, 0x04300100, 0x1bb81ff0, 0x7f73400a, 0xc0328007, 0x7fb7800a, 0x13fc0017, 0x7ff3c00a, - 0x7ffbc00a, 0xcfc1325b, 0xc03a0002, 0xc4340004, 0xd8400013, 0xd8400008, 0xcf8130b5, 0xcf400008, - 0x80000c16, 0x043c0000, 0xc414000e, 0x29540008, 0xcd400013, 0xc4193246, 0xc41d3245, 0x51980020, - 0x7dd9c01a, 0x45dc0390, 0xc4313267, 0x04183000, 0xcd813267, 0x1b380057, 0x1b340213, 0x1b300199, - 0x7f7b400a, 0x7f73400a, 0xcf400024, 0xd1c00025, 0xcc800026, 0x7c420001, 0xce000026, 0x7c424001, - 0xce400026, 0x7c428001, 0xce800026, 0x7c42c001, 0xcec00026, 0x7c430001, 0xcf000026, 0x7c434001, - 0xcf400026, 0x7c438001, 0xcf800026, 0xd8400027, 0xcd400013, 0x04182000, 0xcd813267, 0xd840004f, - 0x1a0800fd, 0x109c000a, 0xc4193265, 0x7dd9c00a, 0xcdc13265, 0x2620ffff, 0xce080228, 0x9880000e, - 0xce480250, 0xce880258, 0xd8080230, 0xd8080238, 0xd8080240, 0xd8080248, 0xd8080268, 0xd8080270, - 0xd8080278, 0xd8080280, 0xd800004f, 0x97c0ec75, 0x90000000, 0x040c0000, 0x041c0010, 0x26180001, - 0x09dc0001, 0x16200001, 0x95800002, 0x04cc0001, 0x99c0fffb, 0xccc80230, 0xd8080238, 0xd8080240, - 0xd8080248, 0x040c0000, 0xce480250, 0xce880258, 0x52a80020, 0x7e6a401a, 0x041c0020, 0x66580001, - 0x09dc0001, 0x56640001, 0x95800002, 0x04cc0001, 0x99c0fffb, 0xccc80260, 0xd8080268, 0xd8080270, - 0xd8080278, 0xd8080280, 0x040c0000, 0xcec80288, 0xcf080290, 0xcec80298, 0xcf0802a0, 0x040c0000, - 0x041c0010, 0xcf4802a8, 0x27580001, 0x09dc0001, 0x17740001, 0x95800002, 0x04cc0001, 0x99c0fffb, - 0xccc802b0, 0xd80802b8, 0x178c000b, 0x27b8003f, 0x7cf8c001, 0xcf8802c0, 0xccc802c8, 0xcf8802d0, - 0xcf8802d8, 0xd800004f, 0x97c00002, 0x90000000, 0x7c408001, 0x88000000, 0xc40c000e, 0x28cc0008, - 0xccc00013, 0xc43d3265, 0x1bc800ea, 0x7c418001, 0x25b8ffff, 0xc4930240, 0xc48f0238, 0x04cc0001, - 0x24cc000f, 0x7cd2800c, 0x9a80000b, 0xc5230309, 0x2620ffff, 0x7e3a400c, 0x9a400004, 0x05100001, - 0x2510000f, 0x80001539, 0xcd08034b, 0xd4400078, 0x80000168, 0xc48f0230, 0xc4930240, 0x98c00004, - 0xcd880353, 0x8c00163f, 0xc49b0353, 0xc4930238, 0xc48f0228, 0x05100001, 0x2510000f, 0x7cd14005, - 0x25540001, 0x99400004, 0x05100001, 0x2510000f, 0x8000154f, 0xc48f0230, 0x7c41c001, 0xcd080238, - 0xcd08034b, 0x08cc0001, 0x2598ffff, 0x3d200008, 0xccc80230, 0xcd900309, 0xd8100319, 0x04340801, - 0x2198003f, 0xcf400013, 0xcd910ce7, 0xc4190ce6, 0x7d918005, 0x25980001, 0x9580fffd, 0x7d918004, - 0xcd810ce6, 0x9a000003, 0xcdd1054f, 0x8000156e, 0x090c0008, 0xcdcd050e, 0x040c0000, 0x110c0014, - 0x28cc4001, 0xccc00013, 0xcc41230a, 0xcc41230b, 0xcc41230c, 0xcc41230d, 0xcc480329, 0xcc48032a, - 0xcc4802e0, 0xd8000055, 0xc48f02e0, 0x24d8003f, 0x09940001, 0x44100001, 0x9580002c, 0x95400005, - 0x09540001, 0x51100001, 0x69100001, 0x8000157f, 0x24cc003f, 0xc4970290, 0xc49b0288, 0x51540020, - 0x7d59401a, 0xc49b02a0, 0xc49f0298, 0x51980020, 0x7d9d801a, 0x041c0040, 0x04200000, 0x7dcdc002, - 0x7d924019, 0x7d26400c, 0x09dc0001, 0x9a400008, 0x51100001, 0x06200001, 0x99c0fffa, 0xc48f0230, - 0xc4930240, 0x8c00163f, 0x80001579, 0x7d010021, 0x7d914019, 0xc4930238, 0x55580020, 0xcd480298, - 0xcd8802a0, 0x10d40010, 0x12180016, 0xc51f0309, 0x7d95800a, 0x7d62000a, 0x7dd9c00a, 0xd8400013, - 0xcdd00309, 0xce113320, 0xc48f02e0, 0xc49b02b0, 0x18dc01e8, 0x7dd9400e, 0xc48f0230, 0xc4930240, - 0x95c0001d, 0x95400003, 0x8c00163f, 0x800015aa, 0xc48f0238, 0xc4a302b8, 0x12240004, 0x7e5e400a, - 0xc4ab02a8, 0x04100000, 0xce4c0319, 0x7d9d8002, 0x7ea14005, 0x25540001, 0x99400004, 0x06200001, - 0x2620000f, 0x800015bc, 0x09dc0001, 0x04240001, 0x7e624004, 0x06200001, 0x7d25000a, 0x2620000f, - 0x99c0fff4, 0xd8400013, 0xcd0d3330, 0xce0802b8, 0xcd8802b0, 0xc4ab02e0, 0x1aa807f0, 0xc48f02d0, - 0xc49702d8, 0xc49b02c8, 0xc49f02c0, 0x96800028, 0x7d4e000f, 0x9600000b, 0x7d964002, 0x7e6a000f, - 0x96000003, 0x7d694001, 0x800015e9, 0x7cde4002, 0x7e6a000f, 0x96000008, 0x7de94001, 0x800015e9, - 0x7cd64002, 0x7e6a000e, 0x96000003, 0x7d694001, 0x800015e9, 0xc48f0230, 0xc4930240, 0x8c00163f, - 0x800015cd, 0xc4930238, 0x7d698002, 0xcd4802d8, 0x129c0008, 0xc50f0319, 0x11a0000e, 0x11140001, - 0xc4340004, 0xd8400008, 0xd8400013, 0x7e1e000a, 0x1198000a, 0xcd953300, 0x7e0e000a, 0x12a8000a, - 0xce953301, 0xce100319, 0xcf400008, 0xc4b70280, 0xc4b30278, 0x7f73800a, 0x536c0020, 0x7ef2c01a, - 0x9780eb68, 0x8c001608, 0xd8080278, 0xd8080280, 0x7c408001, 0x88000000, 0x043c0003, 0x80001609, - 0x043c0001, 0x30b40000, 0x9b400011, 0xc4b70258, 0xc4b30250, 0x53780020, 0x7fb3801a, 0x7faf8019, - 0x04300020, 0x04280000, 0x67b40001, 0x0b300001, 0x57b80001, 0x97400002, 0x06a80001, 0x9b00fffb, - 0xc4bb0260, 0x7fab8001, 0xcf880260, 0x04300020, 0x04280000, 0x66f40001, 0x0b300001, 0x56ec0001, - 0x97400005, 0x8c001628, 0xc4353247, 0x7f7f4009, 0x9b40fffe, 0x06a80001, 0x9b00fff7, 0x90000000, - 0x269c0007, 0x11dc0008, 0x29dc0008, 0x26a00018, 0x12200003, 0x7de1c00a, 0x26a00060, 0x06200020, - 0x16200001, 0x7de1c00a, 0xcdc00013, 0x90000000, 0x269c0018, 0x26a00007, 0x26a40060, 0x11dc0006, - 0x12200006, 0x16640001, 0x29dc0008, 0x7de1c00a, 0x7de5c00a, 0xcdc00013, 0x90000000, 0xc4b70228, - 0x05100001, 0x04cc0001, 0x2510000f, 0xccc80230, 0x7f514005, 0x25540001, 0x99400004, 0x05100001, - 0x2510000f, 0x80001644, 0xc4b30248, 0xcd080240, 0x7f130005, 0x27300001, 0x9b000002, 0x8c001688, - 0x8c00120d, 0x8c001219, 0x8c001232, 0x04300001, 0x04340801, 0x7f130004, 0xcf400013, 0xcf01051e, - 0xc42d051f, 0x7ed2c005, 0x26ec0001, 0x96c0fffd, 0xcf01051f, 0xd8000055, 0xc5170309, 0x195c07f0, - 0x196007f6, 0x04340000, 0x95c00008, 0x09dc0001, 0x04340001, 0x95c00005, 0x09dc0001, 0x53740001, - 0x6b740001, 0x80001665, 0xc4a702a0, 0xc4ab0298, 0x52640020, 0x7e6a401a, 0x7f634014, 0x7e76401a, - 0xc4300004, 0xd8400008, 0xd8400013, 0x56680020, 0xd8113320, 0xce480298, 0xce8802a0, 0xc5170319, - 0xc4b702b0, 0x255c000f, 0x7f5f4001, 0xd8113330, 0xcf4802b0, 0x11340001, 0x195c07e8, 0x196007ee, - 0xd8353300, 0x7e1e4001, 0xd8353301, 0xce4802d0, 0xd8100309, 0xd8100319, 0xcf000008, 0x90000000, - 0xc4970258, 0xc48f0250, 0x51540020, 0x7cd4c01a, 0xc4af0280, 0xc4b30278, 0x52ec0020, 0x7ef2c01a, - 0x04140020, 0x04280000, 0x64d80001, 0x09540001, 0x54cc0001, 0x95800060, 0x8c001628, 0xc4193247, - 0x25980001, 0x9580005c, 0x7dc24001, 0xc41d3248, 0x25dc000f, 0x7dd2000c, 0x96000057, 0xc41d3255, - 0xc435324f, 0x7df5c00c, 0x99c00004, 0xc4193265, 0x25980040, 0x9580fffe, 0xc439325b, 0x1bb0003f, - 0x97000049, 0x1bb000e8, 0x33380003, 0x9b800046, 0x33300002, 0x9700000a, 0xc4393260, 0x1bb000e4, - 0x33300004, 0x97000040, 0xc431325d, 0x27300010, 0x9b00fffe, 0x800016f1, 0xce400013, 0xc033ffff, - 0x2f3000ff, 0xc439325b, 0x7f3b0009, 0xcf01325b, 0xc439325b, 0x27b800ff, 0x9b80fffe, 0xd8c00033, - 0xc4300009, 0x27300008, 0x9700fffe, 0x1a7003e6, 0x27380003, 0x13b80004, 0x27300003, 0x13300003, - 0x7fb38001, 0x1a7000e8, 0x7fb38001, 0x13300001, 0x7fb38001, 0x07b80002, 0xd8400013, 0x1a700064, - 0x33300002, 0x97000009, 0x17b00005, 0x07300003, 0xcf012082, 0xcc01203f, 0xd8400013, 0xcc01203f, - 0x0b300003, 0x800016df, 0x17b00005, 0xcf012082, 0xcc01203f, 0xd8400013, 0xcc01203f, 0x13300005, - 0x7fb30002, 0xc4392083, 0x7fb38005, 0x27b80001, 0x9b80ffdf, 0xd8c00034, 0xce400013, 0xc431325d, - 0x27300010, 0x9b00fffe, 0xc439325b, 0x27b000ff, 0x9b00ffca, 0xd841325d, 0x2030007b, 0xcf01325b, - 0x800016f2, 0xd841325d, 0x04300001, 0x7f2b0014, 0x7ef2c01a, 0x06a80001, 0x9940ff9c, 0x8c001608, - 0xd8080278, 0xd8080280, 0x90000000, 0xd840004f, 0xc414000e, 0x29540008, 0xcd400013, 0xc43d3265, - 0x1bc800ea, 0xd80802e9, 0x7c40c001, 0x18fc0064, 0x9bc00042, 0xc4193246, 0xc41d3245, 0x51980020, - 0x7dd9801a, 0x45980400, 0xc4313267, 0x043c3000, 0xcfc13267, 0xc43d3267, 0x9bc00001, 0x1b380057, - 0x1b340213, 0x1b300199, 0x7f7b400a, 0x7f73400a, 0xcf400024, 0x14f4001d, 0xc4bf02e9, 0x9bc0001c, - 0x7c410001, 0x192807fa, 0xc4bf0258, 0xc4a70250, 0x53fc0020, 0x7e7e401a, 0x042c0000, 0x04300000, - 0x667c0001, 0x56640001, 0x06ec0001, 0x97c0fffd, 0x07300001, 0x0aec0001, 0x7eebc00c, 0x06ec0001, - 0x97c0fff8, 0x0b300001, 0x43300007, 0x53300002, 0x7db30011, 0xd3000025, 0xc03ec005, 0x2bfca200, - 0xcfc00026, 0xccc00026, 0xcd000026, 0x192807fa, 0xc01f007f, 0x7d1d0009, 0x2110007d, 0x8c001628, - 0x203c003f, 0xcfc13256, 0x8c0017f5, 0xcd013254, 0x18fc01e8, 0xcfc13248, 0x8c00185b, 0xd8413247, - 0x0b740001, 0x9b40ffd5, 0xd800004f, 0xc4bf02e9, 0x97c0ea24, 0x90000000, 0x14d4001d, 0xc4930260, - 0x7d52400e, 0xc49f0258, 0xc4a30250, 0x51dc0020, 0x7de1801a, 0x96400017, 0x7d534002, 0xc4af0270, - 0x7dae4005, 0x26640001, 0x32e0001f, 0x9a400006, 0x06ec0001, 0x96000002, 0x042c0000, 0xcec80270, - 0x8000174f, 0x0b740001, 0x8c00178a, 0x05100001, 0x9b40fff3, 0xc4af0280, 0xc4b30278, 0x52ec0020, - 0x7ef2c01a, 0x8c001608, 0xd8080278, 0xd8080280, 0xc4ab0268, 0x7daa4005, 0x26640001, 0x32a0001f, - 0x9a400005, 0x06a80001, 0x96000002, 0x24280000, 0x80001765, 0x7c410001, 0xc01f007f, 0x09540001, - 0x7d1d0009, 0x2110007d, 0x8c001628, 0xd8013256, 0x8c0017f2, 0xcd013254, 0xc4113248, 0x15100004, - 0x11100004, 0xc4b3034b, 0x7f13000a, 0xcf013248, 0xc4930260, 0x8c001855, 0x32a4001f, 0xd8413247, - 0xd800004f, 0x09100001, 0x06a80001, 0x96400002, 0x24280000, 0xcd080260, 0xce880268, 0x9940ffc0, - 0x7c408001, 0x88000000, 0x7ec28001, 0x8c001628, 0x32e0001f, 0xc4253247, 0x26640001, 0x9640005e, - 0xc4293265, 0xc4253255, 0xc431324f, 0x7e72400c, 0x26a80040, 0x9a400002, 0x9680fff7, 0xc429325b, - 0x1aa4003f, 0x96400049, 0x1aa400e8, 0x32680003, 0x9a800046, 0x32640002, 0x9640000a, 0xc4293260, - 0x1aa400e4, 0x32640004, 0x96400040, 0xc425325d, 0x26640010, 0x9a40fffe, 0x800017e2, 0xcdc00013, - 0xc027ffff, 0x2e6400ff, 0xc429325b, 0x7e6a4009, 0xce41325b, 0xc429325b, 0x26a800ff, 0x9a80fffe, - 0xd8c00033, 0xc4240009, 0x26640008, 0x9640fffe, 0x19e403e6, 0x26680003, 0x12a80004, 0x26640003, - 0x12640003, 0x7ea68001, 0x19e400e8, 0x7ea68001, 0x12640001, 0x7ea68001, 0x06a80002, 0xd8400013, - 0x19e40064, 0x32640002, 0x96400009, 0x16a40005, 0x06640003, 0xce412082, 0xcc01203f, 0xd8400013, - 0xcc01203f, 0x0a640003, 0x800017d0, 0x16a40005, 0xce412082, 0xcc01203f, 0xd8400013, 0xcc01203f, - 0x12640005, 0x7ea64002, 0xc4292083, 0x7ea68005, 0x26a80001, 0x9a80ffdf, 0xd8c00034, 0xcdc00013, - 0xc425325d, 0x26640010, 0x9a40fffe, 0xc429325b, 0x26a400ff, 0x9a40ffca, 0xd841325d, 0x2024007b, - 0xce41325b, 0x800017e3, 0xd841325d, 0xc4a70280, 0xc4ab0278, 0x52640020, 0x7e6a401a, 0x04280001, - 0x7eae8014, 0x7e6a401a, 0x56680020, 0xce480278, 0xce880280, 0x06ec0001, 0x96000002, 0x042c0000, - 0xcec80270, 0x90000000, 0x7c438001, 0x7c420001, 0x800017fe, 0xc4bf02e9, 0x9bc00006, 0x7c438001, - 0x7c420001, 0xcf800026, 0xce000026, 0x800017fe, 0xc43b02eb, 0xc42302ec, 0xcf813245, 0xce013246, - 0x52200020, 0x7fa3801a, 0x47b8020c, 0x15e00008, 0x1220000a, 0x2a206032, 0x513c001e, 0x7e3e001a, - 0xc4bf02e9, 0x9bc00005, 0xc43c000e, 0x2bfc0008, 0xcfc00013, 0x8000180f, 0xcd400013, 0xc4313267, - 0x1b3c0077, 0x1b300199, 0x7ff3000a, 0x1330000a, 0x2b300032, 0x043c3000, 0xcfc13267, 0xc43d3267, - 0xd200000b, 0xc4200007, 0xd3800002, 0xcf000002, 0xd8000040, 0x96000002, 0xd8400040, 0xd8400018, - 0x043c2000, 0xcfc13267, 0xd8000018, 0xd8800010, 0xcdc00013, 0x7dc30001, 0xdc1e0000, 0x04380032, - 0xcf80000e, 0x8c001427, 0xcc413248, 0xc43d3269, 0x27fc000f, 0x33fc0003, 0x97c00011, 0x043c001f, - 0xdfc30000, 0xd4413249, 0x7c43c001, 0x7c43c001, 0x043c0024, 0x0bfc0021, 0xdfc30000, 0xd441326a, - 0x173c0008, 0x1b300303, 0x7f3f0001, 0x043c0001, 0x7ff3c004, 0xcfc13084, 0x80001842, 0x043c0024, - 0xdfc30000, 0xd4413249, 0x7c43c001, 0x23fc003f, 0xcfc1326d, 0x0bb80026, 0xdf830000, 0xd441326e, - 0x7c438001, 0x7c438001, 0xc4393265, 0x1fb8ffc6, 0xddc30000, 0xcf813265, 0x9a000003, 0xcdc0000c, - 0x80001852, 0xcdc0000d, 0xce000010, 0x8c00142b, 0x90000000, 0x7c41c001, 0x7c420001, 0xcdc13252, - 0xce013253, 0x8c001628, 0x80001878, 0xc49f02e9, 0x99c00018, 0x7c41c001, 0x7c420001, 0xcdc13252, - 0xce013253, 0xc43c000e, 0x2bfc0008, 0xcfc00013, 0x043c3000, 0xcfc13267, 0xc43d3267, 0x97c0ffff, - 0xcdc00026, 0xce000026, 0xd8400027, 0xc41c0012, 0x99c0ffff, 0xc43c000e, 0x2bfc0008, 0xcfc00013, - 0x043c2000, 0xcfc13267, 0x8c001628, 0x80001878, 0xc41f02ed, 0xc42302ee, 0xcdc13252, 0xce013253, - 0x04200001, 0x7e2a0004, 0xce013084, 0x90000000, 0x28340001, 0x313c0bcc, 0x9bc00010, 0x393c051f, - 0x9bc00004, 0x3d3c050e, 0x9bc0000c, 0x97c0000c, 0x393c0560, 0x9bc00004, 0x3d3c054f, 0x9bc00007, - 0x97c00007, 0x393c1538, 0x9bc00005, 0x3d3c1537, 0x9bc00002, 0x97c00002, 0x2b740800, 0x90000000, - 0xc40c000e, 0x28cc0008, 0xccc00013, 0xc43d3265, 0x1bc800ea, 0x7c40c001, 0x18e8007c, 0x7c42c001, - 0x06a8189a, 0x86800000, 0x8000189e, 0x800018c5, 0x800018f2, 0x8000016a, 0x7c414001, 0x18d0007e, - 0x50580020, 0x09200001, 0x7d59401a, 0xd1400072, 0xc8140072, 0x09240002, 0x7c418001, 0x7c41c001, - 0x99000011, 0xc4340004, 0xd8400013, 0xd8400008, 0xc42130b5, 0x1a24002c, 0x9a40fffe, 0x2020002c, - 0xc418000d, 0x1198001c, 0x10cc0004, 0x14cc0004, 0x7cd8c00a, 0xccc130b7, 0xce0130b5, 0xcf400008, - 0x80000168, 0xd1400025, 0x5978073a, 0x2bb80002, 0xcf800024, 0xcd800026, 0xcdc00026, 0xd8400027, - 0x9600e8a8, 0xc4300012, 0x9b00ffff, 0x9640e8a5, 0x800018a9, 0x04140000, 0xc55b0309, 0x3d5c0010, - 0x05540001, 0x2598ffff, 0x09780001, 0x7dad800c, 0x99c0ffd2, 0x9580fff9, 0xc4970258, 0xc4930250, - 0x51540020, 0x7d15001a, 0x04140020, 0x04280000, 0x442c0000, 0x65180001, 0x09540001, 0x55100001, - 0x9580000b, 0x8c001628, 0xc41d3248, 0x04300001, 0x7f2b0014, 0x25dc000f, 0x7df9c00c, 0x95c00004, - 0x7ef2c01a, 0xd8c13260, 0xd901325d, 0x06a80001, 0x9940fff1, 0x04140020, 0x04280000, 0x66d80001, - 0x09540001, 0x56ec0001, 0x95800005, 0x8c001628, 0xc421325d, 0x26240007, 0x9a40fffe, 0x06a80001, - 0x9940fff7, 0x8000189e, 0x04140020, 0x04280000, 0x09540001, 0x8c001628, 0xc41d3254, 0xc023007f, - 0x19e4003e, 0x7de1c009, 0x7dee000c, 0x96400008, 0x96000007, 0xd8c13260, 0xd901325d, 0xc421325d, - 0x261c0007, 0x99c0fffe, 0x8000189e, 0x06a80001, 0x9940fff0, 0x8000189e, 0xc40c000e, 0x28cc0008, - 0xccc00013, 0xc43d3265, 0x1bc800ea, 0x7c40c001, 0x18e00064, 0x06281911, 0x14f4001d, 0x24cc0003, - 0x86800000, 0x80001915, 0x800019af, 0x80001a2b, 0x8000016a, 0xcc48032b, 0xcc480333, 0xcc48033b, - 0xcc480343, 0x98800011, 0xc4213246, 0xc4253245, 0x52200020, 0x7e26401a, 0x46640400, 0xc4313267, - 0x04203000, 0xce013267, 0xc4213267, 0x9a000001, 0x1b3c0057, 0x1b200213, 0x1b300199, 0x7e3e000a, - 0x7e32000a, 0xce000024, 0xc4970258, 0xc4930250, 0x51540020, 0x7d15001a, 0xc4af0280, 0xc4b30278, - 0x52ec0020, 0x7ef2c01a, 0x04180000, 0x04140020, 0x04280000, 0x7f438001, 0x8c001628, 0xc41d3247, - 0x25dc0001, 0x95c00068, 0xc4213254, 0x1a1c003e, 0x95c00065, 0xc01f007f, 0x7e1e0009, 0x97800062, - 0x0bb80001, 0x43bc0008, 0x7fcbc001, 0xc7df032b, 0x7e1fc00c, 0x97c0fffa, 0x043c0101, 0x94c00002, - 0x043c0102, 0xc439325b, 0x1bb0003f, 0x97000049, 0x1bb000e8, 0x33380003, 0x9b800046, 0x33300002, - 0x97000009, 0xc4393260, 0x1bb000e4, 0x33300004, 0x97000040, 0xc431325d, 0x27300010, 0x9b00fffe, - 0x80001994, 0x8c001628, 0xc033ffff, 0x2f3000ff, 0xc439325b, 0x7f3b0009, 0xcf01325b, 0xc439325b, - 0x27b800ff, 0x9b80fffe, 0xd8c00033, 0xc4300009, 0x27300008, 0x9700fffe, 0x19f003e6, 0x27380003, - 0x13b80004, 0x27300003, 0x13300003, 0x7fb38001, 0x19f000e8, 0x7fb38001, 0x13300001, 0x7fb38001, - 0x07b80002, 0xd8400013, 0x19f00064, 0x33300002, 0x97000009, 0x17b00005, 0x07300003, 0xcf012082, - 0xcc01203f, 0xd8400013, 0xcc01203f, 0x0b300003, 0x80001982, 0x17b00005, 0xcf012082, 0xcc01203f, - 0xd8400013, 0xcc01203f, 0x13300005, 0x7fb30002, 0xc4392083, 0x7fb38005, 0x27b80001, 0x9b80ffdf, - 0xd8c00034, 0xcdc00013, 0xc431325d, 0x27300010, 0x9b00fffe, 0xc439325b, 0x27b000ff, 0x9b00ffcb, - 0xcfc1325d, 0x2030007b, 0xcf01325b, 0x80001995, 0xcfc1325d, 0x04300001, 0x7f2b0014, 0x7ef2c01a, - 0x98800009, 0x41bc0007, 0x53fc0002, 0x7e7fc011, 0xd3c00025, 0xd8000026, 0xd8400027, 0xc43c0012, - 0x9bc0ffff, 0x653c0001, 0x7dbd8001, 0x06a80001, 0x09540001, 0x55100001, 0x9940ff8f, 0xc43c000e, - 0x2bfc0008, 0xcfc00013, 0x043c2000, 0xcfc13267, 0xd8080278, 0xd8080280, 0x80000168, 0x7c410001, - 0x04140000, 0xc55b0309, 0x3d5c0010, 0x2598ffff, 0x05540001, 0x7d91800c, 0x95c00003, 0xd4400078, - 0x80000168, 0x9580fff8, 0x09780001, 0xc4970258, 0xc4930250, 0x51540020, 0x7d15001a, 0xc4af0280, - 0xc4b30278, 0x52ec0020, 0x7ef2c01a, 0x04140020, 0x04280000, 0x65180001, 0x09540001, 0x55100001, - 0x9580005d, 0x8c001628, 0xc4253247, 0x26640001, 0x04200101, 0x96400058, 0x7dc24001, 0xc41d3248, - 0x25dc000f, 0x7df9c00c, 0x95c00053, 0x94c00002, 0x04200102, 0x7e41c001, 0xc425325b, 0x1a70003f, - 0x97000049, 0x1a7000e8, 0x33240003, 0x9a400046, 0x33300002, 0x9700000a, 0xc4253260, 0x1a7000e4, - 0x33300004, 0x97000040, 0xc431325d, 0x27300010, 0x9b00fffe, 0x80001a21, 0xcdc00013, 0xc033ffff, - 0x2f3000ff, 0xc425325b, 0x7f270009, 0xcf01325b, 0xc425325b, 0x266400ff, 0x9a40fffe, 0xd8c00033, - 0xc4300009, 0x27300008, 0x9700fffe, 0x19f003e6, 0x27240003, 0x12640004, 0x27300003, 0x13300003, - 0x7e724001, 0x19f000e8, 0x7e724001, 0x13300001, 0x7e724001, 0x06640002, 0xd8400013, 0x19f00064, - 0x33300002, 0x97000009, 0x16700005, 0x07300003, 0xcf012082, 0xcc01203f, 0xd8400013, 0xcc01203f, - 0x0b300003, 0x80001a0f, 0x16700005, 0xcf012082, 0xcc01203f, 0xd8400013, 0xcc01203f, 0x13300005, - 0x7e730002, 0xc4252083, 0x7e724005, 0x26640001, 0x9a40ffdf, 0xd8c00034, 0xcdc00013, 0xc431325d, - 0x27300010, 0x9b00fffe, 0xc425325b, 0x267000ff, 0x9b00ffca, 0xce01325d, 0x2030007b, 0xcf01325b, - 0x80001a22, 0xce01325d, 0x04300001, 0x7f2b0014, 0x7ef2c01a, 0x06a80001, 0x9940ff9f, 0xd4400078, - 0xd8080278, 0xd8080280, 0x80000168, 0x8c001a31, 0xd4400078, 0xd8080278, 0xd8080280, 0x7c408001, - 0x88000000, 0xc4213246, 0xc4253245, 0x52200020, 0x7e26401a, 0x46640400, 0xc4313267, 0x04203000, - 0xce013267, 0xc4213267, 0x9a000001, 0x1b180057, 0x1b200213, 0x1b300199, 0x7e1a000a, 0x7e32000a, - 0xce000024, 0xc4970258, 0xc4930250, 0x51540020, 0x7d15001a, 0xc4af0280, 0xc4b30278, 0x52ec0020, - 0x7ef2c01a, 0x04140020, 0x04280000, 0x65180001, 0x95800060, 0x8c001628, 0xc4193247, 0x25980001, - 0x04200101, 0x94c00005, 0x30f00005, 0x04200005, 0x9b000002, 0x04200102, 0x95800056, 0xc439325b, - 0x1bb0003f, 0x97000049, 0x1bb000e8, 0x33380003, 0x9b800046, 0x33300002, 0x9700000a, 0xc4393260, - 0x1bb000e4, 0x33300004, 0x97000040, 0xc431325d, 0x27300010, 0x9b00fffe, 0x80001aa2, 0xcdc00013, - 0xc033ffff, 0x2f3000ff, 0xc439325b, 0x7f3b0009, 0xcf01325b, 0xc439325b, 0x27b800ff, 0x9b80fffe, - 0xd8c00033, 0xc4300009, 0x27300008, 0x9700fffe, 0x19f003e6, 0x27380003, 0x13b80004, 0x27300003, - 0x13300003, 0x7fb38001, 0x19f000e8, 0x7fb38001, 0x13300001, 0x7fb38001, 0x07b80002, 0xd8400013, - 0x19f00064, 0x33300002, 0x97000009, 0x17b00005, 0x07300003, 0xcf012082, 0xcc01203f, 0xd8400013, - 0xcc01203f, 0x0b300003, 0x80001a90, 0x17b00005, 0xcf012082, 0xcc01203f, 0xd8400013, 0xcc01203f, - 0x13300005, 0x7fb30002, 0xc4392083, 0x7fb38005, 0x27b80001, 0x9b80ffdf, 0xd8c00034, 0xcdc00013, - 0xc431325d, 0x27300010, 0x9b00fffe, 0xc439325b, 0x27b000ff, 0x9b00ffca, 0xce01325d, 0x2030007b, - 0xcf00325b, 0x80001aa3, 0xce01325d, 0x04300001, 0x7f2b0014, 0x7ef2c01a, 0xc49b02e9, 0x99800005, - 0xd2400025, 0x4664001c, 0xd8000026, 0xd8400027, 0x06a80001, 0x09540001, 0x55100001, 0x9940ff9c, - 0xc49b02e9, 0x99800008, 0xc430000e, 0x2b300008, 0xcf000013, 0x04302000, 0xcf013267, 0xc4313267, - 0x97000001, 0x90000000, 0x244c00ff, 0xcc4c0200, 0x7c408001, 0x88000000, 0xc44f0200, 0xc410000b, - 0xc414000c, 0x7d158010, 0x059cc000, 0xd8400013, 0xccdd0000, 0x7c408001, 0x88000000, 0xc40c0037, - 0x94c0ffff, 0xcc000049, 0xc40c003a, 0x94c0ffff, 0x7c40c001, 0x24d00001, 0x9500e69a, 0x18d0003b, - 0x18d40021, 0x99400006, 0xd840004a, 0xc40c003c, 0x94c0ffff, 0x14cc0001, 0x94c00028, 0xd8000033, - 0xc438000b, 0xc43c0009, 0x27fc0001, 0x97c0fffe, 0xd8400013, 0xd841c07f, 0xc43dc07f, 0x1bfc0078, - 0x7ffbc00c, 0x97c0fffd, 0x99000004, 0xc0120840, 0x282c0040, 0x80001ae8, 0xc0121841, 0x282c001a, - 0xcd01c07c, 0xcc01c07d, 0xcc01c08c, 0xcc01c079, 0xcc01c07e, 0x04200004, 0xcec0001b, 0xd8400021, - 0x0a200001, 0x9a00ffff, 0xc425c07f, 0x166c001f, 0x04200004, 0x9ac0fffb, 0xc434000f, 0x9b40ffff, - 0xd801c07f, 0xd8400013, 0xc425c07f, 0xce400078, 0xd8000034, 0x9940e66b, 0xd800004a, 0x7c408001, - 0x88000000, 0xc40c0036, 0x24d00001, 0x9900fffe, 0x18cc0021, 0xccc00047, 0xcc000046, 0xc40c0039, - 0x94c0ffff, 0xc40c003d, 0x98c0ffff, 0x7c40c001, 0x24d003ff, 0x18d47fea, 0x18d87ff4, 0xcd00004c, - 0xcd40004e, 0xcd80004d, 0xd8400013, 0xcd41c405, 0xc02a0001, 0x2aa80001, 0xce800013, 0xcd01c406, - 0xcc01c406, 0xcc01c406, 0xc40c0006, 0x98c0ffff, 0xc414000e, 0x29540008, 0x295c0001, 0xcd400013, - 0xd8c1325e, 0xcdc0001a, 0x11980002, 0x4110000c, 0xc0160800, 0x7d15000a, 0xc0164010, 0xd8400013, - 0xcd41c078, 0xcc01c080, 0xcc01c081, 0xcd81c082, 0xcc01c083, 0xcd01c084, 0xc40c0006, 0x98c0ffff, - 0xd8400048, 0xc40c003b, 0x94c0ffff, 0x80000c16, 0xd8400013, 0xd801c40a, 0xd901c40d, 0xd801c410, - 0xd801c40e, 0xd801c40f, 0xc40c0040, 0x04140001, 0x09540001, 0x9940ffff, 0x04140096, 0xd8400013, - 0xccc1c400, 0xc411c401, 0x9500fffa, 0xc424003e, 0x04d00001, 0x11100002, 0xcd01c40c, 0xc0180034, - 0xcd81c411, 0xd841c414, 0x0a540001, 0xcd41c412, 0x2468000f, 0xc419c416, 0x41980003, 0xc41c003f, - 0x7dda0001, 0x12200002, 0x10cc0002, 0xccc1c40c, 0xd901c411, 0xce41c412, 0xd8800013, 0xce292e40, - 0xcc412e01, 0xcc412e02, 0xcc412e03, 0xcc412e00, 0x80000aa7, 0xc43c0007, 0xdc120000, 0x31144000, - 0x95400005, 0xdc030000, 0xd800002a, 0xcc3c000c, 0x80001b70, 0x33f80003, 0xd4400078, 0x9780e601, - 0x188cfff0, 0x04e40002, 0x80001190, 0x7c408001, 0x88000000, 0xc424005e, 0x96400006, 0x90000000, - 0xc424005e, 0x96400003, 0x7c408001, 0x88000000, 0x80001b74, 0x80000168, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, - 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, - 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, - 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, - 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, - 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, - 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, - 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, - 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, - 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, - 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, - 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, - 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, - 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, - 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, - 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, - 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, - 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, - 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, - 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, - 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, - 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, - 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, - 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, - 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, - 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, - 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, - 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, - 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, - 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, - 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, - 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, - 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, - 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, - 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, - 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, - 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, - 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, - 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, - 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, - 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, - 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, - 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, - 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, - 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, - 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, - 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, - 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, - 0xbf810000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - }, - .dfy_size = 7440 -}; - -static const PWR_DFY_Section pwr_virus_section4 = { - .dfy_cntl = 0x80000004, - .dfy_addr_hi = 0x000000b4, - .dfy_addr_lo = 0x54106500, - .dfy_data = { - 0x7e000200, 0x7e020204, 0xc00a0505, 0x00000000, 0xbf8c007f, 0xb8900904, 0xb8911a04, 0xb8920304, - 0xb8930b44, 0x921c0d0c, 0x921c1c13, 0x921d0c12, 0x811c1d1c, 0x811c111c, 0x921cff1c, 0x00000400, - 0x921dff10, 0x00000100, 0x81181d1c, 0x7e040218, 0xe0701000, 0x80050002, 0xe0501000, 0x80050302, - 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, - 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, - 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, - 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0701000, 0x80050102, - 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, - 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, - 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, - 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, - 0xe0701000, 0x80050002, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, - 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, - 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, - 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, - 0xe0501000, 0x80050302, 0xe0701000, 0x80050102, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, - 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, - 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, - 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, - 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0701000, 0x80050002, 0xe0501000, 0x80050302, - 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, - 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, - 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, - 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0701000, 0x80050102, - 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, - 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, - 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, - 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, - 0xbf810000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - }, - .dfy_size = 240 -}; - -static const PWR_DFY_Section pwr_virus_section5 = { - .dfy_cntl = 0x80000004, - .dfy_addr_hi = 0x000000b4, - .dfy_addr_lo = 0x54106900, - .dfy_data = { - 0x7e080200, 0x7e100204, 0xbefc00ff, 0x00010000, 0x24200087, 0x262200ff, 0x000001f0, 0x20222282, - 0x28182111, 0xd81a0000, 0x0000040c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, - 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, - 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000, - 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, - 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000, - 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, - 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, - 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, - 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, - 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, - 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, - 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000, - 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, - 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000, - 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, - 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, - 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, - 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, - 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, - 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, - 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000, - 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, - 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000, - 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, - 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, - 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, - 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, - 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, - 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, - 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000, - 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, - 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000, - 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, - 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, - 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, - 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, - 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, - 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, - 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000, - 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, - 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000, - 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, - 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, - 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, - 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, - 0x1100000c, 0xbf810000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - }, - .dfy_size = 384 -}; - -static const PWR_DFY_Section pwr_virus_section6 = { - .dfy_cntl = 0x80000004, - .dfy_addr_hi = 0x000000b4, - .dfy_addr_lo = 0x54116f00, - .dfy_data = { - 0xc0310800, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000040, 0x00000001, 0x00000001, 0x00000001, 0x00000000, 0xb4540fe8, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000041, 0x0000000c, 0x00000000, 0x07808000, 0xffffffff, - 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000002, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0x55555555, 0x55555555, 0x55555555, - 0x55555555, 0x00000000, 0x00000000, 0x540fee40, 0x000000b4, 0x00000010, 0x00000001, 0x00000004, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x54116f00, 0x000000b4, 0x00000000, 0x00000000, 0x00005301, 0x00000000, 0x00000000, 0x00000000, - 0xb4540fef, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x540fee20, 0x000000b4, 0x00000000, - 0x00000000, 0x08000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0xc0310800, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000040, 0x00000001, 0x00000001, 0x00000001, 0x00000000, 0xb454105e, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x000000c0, 0x00000010, 0x00000000, 0x07808000, 0xffffffff, - 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000002, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0x55555555, 0x55555555, 0x55555555, - 0x55555555, 0x00000000, 0x00000000, 0x540fee40, 0x000000b4, 0x00000010, 0x00000001, 0x00000004, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x54117300, 0x000000b4, 0x00000000, 0x00000000, 0x00005301, 0x00000000, 0x00000000, 0x00000000, - 0xb4540fef, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x540fee20, 0x000000b4, 0x00000000, - 0x00000000, 0x08000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0xc0310800, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000040, 0x00000001, 0x00000001, 0x00000001, 0x00000000, 0xb4541065, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000500, 0x0000001c, 0x00000000, 0x07808000, 0xffffffff, - 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000002, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0x55555555, 0x55555555, 0x55555555, - 0x55555555, 0x00000000, 0x00000000, 0x540fee40, 0x000000b4, 0x00000010, 0x00000001, 0x00000004, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x54117700, 0x000000b4, 0x00000000, 0x00000000, 0x00005301, 0x00000000, 0x00000000, 0x00000000, - 0xb4540fef, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x540fee20, 0x000000b4, 0x00000000, - 0x00000000, 0x08000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0xc0310800, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000040, 0x00000001, 0x00000001, 0x00000001, 0x00000000, 0xb4541069, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000444, 0x0000008a, 0x00000000, 0x07808000, 0xffffffff, - 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000002, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0x55555555, 0x55555555, 0x55555555, - 0x55555555, 0x00000000, 0x00000000, 0x540fee40, 0x000000b4, 0x00000010, 0x00000001, 0x00000004, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x54117b00, 0x000000b4, 0x00000000, 0x00000000, 0x00005301, 0x00000000, 0x00000000, 0x00000000, - 0xb4540fef, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x540fee20, 0x000000b4, 0x00000000, - 0x00000000, 0x08000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, - }, - .dfy_size = 1024 -}; - -static const PWR_Command_Table pwr_virus_table_post[] = { - { 0x00000000, mmCP_MEC_CNTL }, - { 0x00000000, mmCP_MEC_CNTL }, - { 0x00000004, mmSRBM_GFX_CNTL }, - { 0x54116f00, mmCP_MQD_BASE_ADDR }, - { 0x000000b4, mmCP_MQD_BASE_ADDR_HI }, - { 0xb4540fef, mmCP_HQD_PQ_BASE }, - { 0x00000000, mmCP_HQD_PQ_BASE_HI }, - { 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR }, - { 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI }, - { 0x00005301, mmCP_HQD_PERSISTENT_STATE }, - { 0x00010000, mmCP_HQD_VMID }, - { 0xc8318509, mmCP_HQD_PQ_CONTROL }, - { 0x00000005, mmSRBM_GFX_CNTL }, - { 0x54117300, mmCP_MQD_BASE_ADDR }, - { 0x000000b4, mmCP_MQD_BASE_ADDR_HI }, - { 0xb4540fef, mmCP_HQD_PQ_BASE }, - { 0x00000000, mmCP_HQD_PQ_BASE_HI }, - { 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR }, - { 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI }, - { 0x00005301, mmCP_HQD_PERSISTENT_STATE }, - { 0x00010000, mmCP_HQD_VMID }, - { 0xc8318509, mmCP_HQD_PQ_CONTROL }, - { 0x00000006, mmSRBM_GFX_CNTL }, - { 0x54117700, mmCP_MQD_BASE_ADDR }, - { 0x000000b4, mmCP_MQD_BASE_ADDR_HI }, - { 0xb4540fef, mmCP_HQD_PQ_BASE }, - { 0x00000000, mmCP_HQD_PQ_BASE_HI }, - { 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR }, - { 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI }, - { 0x00005301, mmCP_HQD_PERSISTENT_STATE }, - { 0x00010000, mmCP_HQD_VMID }, - { 0xc8318509, mmCP_HQD_PQ_CONTROL }, - { 0x00000007, mmSRBM_GFX_CNTL }, - { 0x54117b00, mmCP_MQD_BASE_ADDR }, - { 0x000000b4, mmCP_MQD_BASE_ADDR_HI }, - { 0xb4540fef, mmCP_HQD_PQ_BASE }, - { 0x00000000, mmCP_HQD_PQ_BASE_HI }, - { 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR }, - { 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI }, - { 0x00005301, mmCP_HQD_PERSISTENT_STATE }, - { 0x00010000, mmCP_HQD_VMID }, - { 0xc8318509, mmCP_HQD_PQ_CONTROL }, - { 0x00000004, mmSRBM_GFX_CNTL }, - { 0x00000000, mmCP_HQD_ACTIVE }, - { 0x00000000, mmCP_HQD_PQ_RPTR }, - { 0x00000000, mmCP_HQD_PQ_WPTR }, - { 0x00000001, mmCP_HQD_ACTIVE }, - { 0x00000104, mmSRBM_GFX_CNTL }, - { 0x00000000, mmCP_HQD_ACTIVE }, - { 0x00000000, mmCP_HQD_PQ_RPTR }, - { 0x00000000, mmCP_HQD_PQ_WPTR }, - { 0x00000001, mmCP_HQD_ACTIVE }, - { 0x00000204, mmSRBM_GFX_CNTL }, - { 0x00000000, mmCP_HQD_ACTIVE }, - { 0x00000000, mmCP_HQD_PQ_RPTR }, - { 0x00000000, mmCP_HQD_PQ_WPTR }, - { 0x00000001, mmCP_HQD_ACTIVE }, - { 0x00000304, mmSRBM_GFX_CNTL }, - { 0x00000000, mmCP_HQD_ACTIVE }, - { 0x00000000, mmCP_HQD_PQ_RPTR }, - { 0x00000000, mmCP_HQD_PQ_WPTR }, - { 0x00000001, mmCP_HQD_ACTIVE }, - { 0x00000404, mmSRBM_GFX_CNTL }, - { 0x00000000, mmCP_HQD_ACTIVE }, - { 0x00000000, mmCP_HQD_PQ_RPTR }, - { 0x00000000, mmCP_HQD_PQ_WPTR }, - { 0x00000001, mmCP_HQD_ACTIVE }, - { 0x00000504, mmSRBM_GFX_CNTL }, - { 0x00000000, mmCP_HQD_ACTIVE }, - { 0x00000000, mmCP_HQD_PQ_RPTR }, - { 0x00000000, mmCP_HQD_PQ_WPTR }, - { 0x00000001, mmCP_HQD_ACTIVE }, - { 0x00000604, mmSRBM_GFX_CNTL }, - { 0x00000000, mmCP_HQD_ACTIVE }, - { 0x00000000, mmCP_HQD_PQ_RPTR }, - { 0x00000000, mmCP_HQD_PQ_WPTR }, - { 0x00000001, mmCP_HQD_ACTIVE }, - { 0x00000704, mmSRBM_GFX_CNTL }, - { 0x00000000, mmCP_HQD_ACTIVE }, - { 0x00000000, mmCP_HQD_PQ_RPTR }, - { 0x00000000, mmCP_HQD_PQ_WPTR }, - { 0x00000001, mmCP_HQD_ACTIVE }, - { 0x00000005, mmSRBM_GFX_CNTL }, - { 0x00000000, mmCP_HQD_ACTIVE }, - { 0x00000000, mmCP_HQD_PQ_RPTR }, - { 0x00000000, mmCP_HQD_PQ_WPTR }, - { 0x00000001, mmCP_HQD_ACTIVE }, - { 0x00000105, mmSRBM_GFX_CNTL }, - { 0x00000000, mmCP_HQD_ACTIVE }, - { 0x00000000, mmCP_HQD_PQ_RPTR }, - { 0x00000000, mmCP_HQD_PQ_WPTR }, - { 0x00000001, mmCP_HQD_ACTIVE }, - { 0x00000205, mmSRBM_GFX_CNTL }, - { 0x00000000, mmCP_HQD_ACTIVE }, - { 0x00000000, mmCP_HQD_PQ_RPTR }, - { 0x00000000, mmCP_HQD_PQ_WPTR }, - { 0x00000001, mmCP_HQD_ACTIVE }, - { 0x00000305, mmSRBM_GFX_CNTL }, - { 0x00000000, mmCP_HQD_ACTIVE }, - { 0x00000000, mmCP_HQD_PQ_RPTR }, - { 0x00000000, mmCP_HQD_PQ_WPTR }, - { 0x00000001, mmCP_HQD_ACTIVE }, - { 0x00000405, mmSRBM_GFX_CNTL }, - { 0x00000000, mmCP_HQD_ACTIVE }, - { 0x00000000, mmCP_HQD_PQ_RPTR }, - { 0x00000000, mmCP_HQD_PQ_WPTR }, - { 0x00000001, mmCP_HQD_ACTIVE }, - { 0x00000505, mmSRBM_GFX_CNTL }, - { 0x00000000, mmCP_HQD_ACTIVE }, - { 0x00000000, mmCP_HQD_PQ_RPTR }, - { 0x00000000, mmCP_HQD_PQ_WPTR }, - { 0x00000001, mmCP_HQD_ACTIVE }, - { 0x00000605, mmSRBM_GFX_CNTL }, - { 0x00000000, mmCP_HQD_ACTIVE }, - { 0x00000000, mmCP_HQD_PQ_RPTR }, - { 0x00000000, mmCP_HQD_PQ_WPTR }, - { 0x00000001, mmCP_HQD_ACTIVE }, - { 0x00000705, mmSRBM_GFX_CNTL }, - { 0x00000000, mmCP_HQD_ACTIVE }, - { 0x00000000, mmCP_HQD_PQ_RPTR }, - { 0x00000000, mmCP_HQD_PQ_WPTR }, - { 0x00000001, mmCP_HQD_ACTIVE }, - { 0x00000006, mmSRBM_GFX_CNTL }, - { 0x00000000, mmCP_HQD_ACTIVE }, - { 0x00000000, mmCP_HQD_PQ_RPTR }, - { 0x00000000, mmCP_HQD_PQ_WPTR }, - { 0x00000001, mmCP_HQD_ACTIVE }, - { 0x00000106, mmSRBM_GFX_CNTL }, - { 0x00000000, mmCP_HQD_ACTIVE }, - { 0x00000000, mmCP_HQD_PQ_RPTR }, - { 0x00000000, mmCP_HQD_PQ_WPTR }, - { 0x00000001, mmCP_HQD_ACTIVE }, - { 0x00000206, mmSRBM_GFX_CNTL }, - { 0x00000000, mmCP_HQD_ACTIVE }, - { 0x00000000, mmCP_HQD_PQ_RPTR }, - { 0x00000000, mmCP_HQD_PQ_WPTR }, - { 0x00000001, mmCP_HQD_ACTIVE }, - { 0x00000306, mmSRBM_GFX_CNTL }, - { 0x00000000, mmCP_HQD_ACTIVE }, - { 0x00000000, mmCP_HQD_PQ_RPTR }, - { 0x00000000, mmCP_HQD_PQ_WPTR }, - { 0x00000001, mmCP_HQD_ACTIVE }, - { 0x00000406, mmSRBM_GFX_CNTL }, - { 0x00000000, mmCP_HQD_ACTIVE }, - { 0x00000000, mmCP_HQD_PQ_RPTR }, - { 0x00000000, mmCP_HQD_PQ_WPTR }, - { 0x00000001, mmCP_HQD_ACTIVE }, - { 0x00000506, mmSRBM_GFX_CNTL }, - { 0x00000000, mmCP_HQD_ACTIVE }, - { 0x00000000, mmCP_HQD_PQ_RPTR }, - { 0x00000000, mmCP_HQD_PQ_WPTR }, - { 0x00000001, mmCP_HQD_ACTIVE }, - { 0x00000606, mmSRBM_GFX_CNTL }, - { 0x00000000, mmCP_HQD_ACTIVE }, - { 0x00000000, mmCP_HQD_PQ_RPTR }, - { 0x00000000, mmCP_HQD_PQ_WPTR }, - { 0x00000001, mmCP_HQD_ACTIVE }, - { 0x00000706, mmSRBM_GFX_CNTL }, - { 0x00000000, mmCP_HQD_ACTIVE }, - { 0x00000000, mmCP_HQD_PQ_RPTR }, - { 0x00000000, mmCP_HQD_PQ_WPTR }, - { 0x00000001, mmCP_HQD_ACTIVE }, - { 0x00000007, mmSRBM_GFX_CNTL }, - { 0x00000000, mmCP_HQD_ACTIVE }, - { 0x00000000, mmCP_HQD_PQ_RPTR }, - { 0x00000000, mmCP_HQD_PQ_WPTR }, - { 0x00000001, mmCP_HQD_ACTIVE }, - { 0x00000107, mmSRBM_GFX_CNTL }, - { 0x00000000, mmCP_HQD_ACTIVE }, - { 0x00000000, mmCP_HQD_PQ_RPTR }, - { 0x00000000, mmCP_HQD_PQ_WPTR }, - { 0x00000001, mmCP_HQD_ACTIVE }, - { 0x00000207, mmSRBM_GFX_CNTL }, - { 0x00000000, mmCP_HQD_ACTIVE }, - { 0x00000000, mmCP_HQD_PQ_RPTR }, - { 0x00000000, mmCP_HQD_PQ_WPTR }, - { 0x00000001, mmCP_HQD_ACTIVE }, - { 0x00000307, mmSRBM_GFX_CNTL }, - { 0x00000000, mmCP_HQD_ACTIVE }, - { 0x00000000, mmCP_HQD_PQ_RPTR }, - { 0x00000000, mmCP_HQD_PQ_WPTR }, - { 0x00000001, mmCP_HQD_ACTIVE }, - { 0x00000407, mmSRBM_GFX_CNTL }, - { 0x00000000, mmCP_HQD_ACTIVE }, - { 0x00000000, mmCP_HQD_PQ_RPTR }, - { 0x00000000, mmCP_HQD_PQ_WPTR }, - { 0x00000001, mmCP_HQD_ACTIVE }, - { 0x00000507, mmSRBM_GFX_CNTL }, - { 0x00000000, mmCP_HQD_ACTIVE }, - { 0x00000000, mmCP_HQD_PQ_RPTR }, - { 0x00000000, mmCP_HQD_PQ_WPTR }, - { 0x00000001, mmCP_HQD_ACTIVE }, - { 0x00000607, mmSRBM_GFX_CNTL }, - { 0x00000000, mmCP_HQD_ACTIVE }, - { 0x00000000, mmCP_HQD_PQ_RPTR }, - { 0x00000000, mmCP_HQD_PQ_WPTR }, - { 0x00000001, mmCP_HQD_ACTIVE }, - { 0x00000707, mmSRBM_GFX_CNTL }, - { 0x00000000, mmCP_HQD_ACTIVE }, - { 0x00000000, mmCP_HQD_PQ_RPTR }, - { 0x00000000, mmCP_HQD_PQ_WPTR }, - { 0x00000001, mmCP_HQD_ACTIVE }, - { 0x00000008, mmSRBM_GFX_CNTL }, - { 0x00000000, mmCP_HQD_ACTIVE }, - { 0x00000000, mmCP_HQD_PQ_RPTR }, - { 0x00000000, mmCP_HQD_PQ_WPTR }, - { 0x00000001, mmCP_HQD_ACTIVE }, - { 0x00000108, mmSRBM_GFX_CNTL }, - { 0x00000000, mmCP_HQD_ACTIVE }, - { 0x00000000, mmCP_HQD_PQ_RPTR }, - { 0x00000000, mmCP_HQD_PQ_WPTR }, - { 0x00000001, mmCP_HQD_ACTIVE }, - { 0x00000208, mmSRBM_GFX_CNTL }, - { 0x00000000, mmCP_HQD_ACTIVE }, - { 0x00000000, mmCP_HQD_PQ_RPTR }, - { 0x00000000, mmCP_HQD_PQ_WPTR }, - { 0x00000001, mmCP_HQD_ACTIVE }, - { 0x00000308, mmSRBM_GFX_CNTL }, - { 0x00000000, mmCP_HQD_ACTIVE }, - { 0x00000000, mmCP_HQD_PQ_RPTR }, - { 0x00000000, mmCP_HQD_PQ_WPTR }, - { 0x00000001, mmCP_HQD_ACTIVE }, - { 0x00000408, mmSRBM_GFX_CNTL }, - { 0x00000000, mmCP_HQD_ACTIVE }, - { 0x00000000, mmCP_HQD_PQ_RPTR }, - { 0x00000000, mmCP_HQD_PQ_WPTR }, - { 0x00000001, mmCP_HQD_ACTIVE }, - { 0x00000508, mmSRBM_GFX_CNTL }, - { 0x00000000, mmCP_HQD_ACTIVE }, - { 0x00000000, mmCP_HQD_PQ_RPTR }, - { 0x00000000, mmCP_HQD_PQ_WPTR }, - { 0x00000001, mmCP_HQD_ACTIVE }, - { 0x00000608, mmSRBM_GFX_CNTL }, - { 0x00000000, mmCP_HQD_ACTIVE }, - { 0x00000000, mmCP_HQD_PQ_RPTR }, - { 0x00000000, mmCP_HQD_PQ_WPTR }, - { 0x00000001, mmCP_HQD_ACTIVE }, - { 0x00000708, mmSRBM_GFX_CNTL }, - { 0x00000000, mmCP_HQD_ACTIVE }, - { 0x00000000, mmCP_HQD_PQ_RPTR }, - { 0x00000000, mmCP_HQD_PQ_WPTR }, - { 0x00000001, mmCP_HQD_ACTIVE }, - { 0x00000009, mmSRBM_GFX_CNTL }, - { 0x00000000, mmCP_HQD_ACTIVE }, - { 0x00000000, mmCP_HQD_PQ_RPTR }, - { 0x00000000, mmCP_HQD_PQ_WPTR }, - { 0x00000001, mmCP_HQD_ACTIVE }, - { 0x00000109, mmSRBM_GFX_CNTL }, - { 0x00000000, mmCP_HQD_ACTIVE }, - { 0x00000000, mmCP_HQD_PQ_RPTR }, - { 0x00000000, mmCP_HQD_PQ_WPTR }, - { 0x00000001, mmCP_HQD_ACTIVE }, - { 0x00000209, mmSRBM_GFX_CNTL }, - { 0x00000000, mmCP_HQD_ACTIVE }, - { 0x00000000, mmCP_HQD_PQ_RPTR }, - { 0x00000000, mmCP_HQD_PQ_WPTR }, - { 0x00000001, mmCP_HQD_ACTIVE }, - { 0x00000309, mmSRBM_GFX_CNTL }, - { 0x00000000, mmCP_HQD_ACTIVE }, - { 0x00000000, mmCP_HQD_PQ_RPTR }, - { 0x00000000, mmCP_HQD_PQ_WPTR }, - { 0x00000001, mmCP_HQD_ACTIVE }, - { 0x00000409, mmSRBM_GFX_CNTL }, - { 0x00000000, mmCP_HQD_ACTIVE }, - { 0x00000000, mmCP_HQD_PQ_RPTR }, - { 0x00000000, mmCP_HQD_PQ_WPTR }, - { 0x00000001, mmCP_HQD_ACTIVE }, - { 0x00000509, mmSRBM_GFX_CNTL }, - { 0x00000000, mmCP_HQD_ACTIVE }, - { 0x00000000, mmCP_HQD_PQ_RPTR }, - { 0x00000000, mmCP_HQD_PQ_WPTR }, - { 0x00000001, mmCP_HQD_ACTIVE }, - { 0x00000609, mmSRBM_GFX_CNTL }, - { 0x00000000, mmCP_HQD_ACTIVE }, - { 0x00000000, mmCP_HQD_PQ_RPTR }, - { 0x00000000, mmCP_HQD_PQ_WPTR }, - { 0x00000001, mmCP_HQD_ACTIVE }, - { 0x00000709, mmSRBM_GFX_CNTL }, - { 0x00000000, mmCP_HQD_ACTIVE }, - { 0x00000000, mmCP_HQD_PQ_RPTR }, - { 0x00000000, mmCP_HQD_PQ_WPTR }, - { 0x00000001, mmCP_HQD_ACTIVE }, - { 0x00000004, mmSRBM_GFX_CNTL }, - { 0x01010101, mmCP_PQ_WPTR_POLL_CNTL1 }, - { 0x00000000, mmGRBM_STATUS }, - { 0x00000000, mmGRBM_STATUS }, - { 0x00000000, mmGRBM_STATUS }, - { 0x00000000, 0xFFFFFFFF }, -}; - - -#endif diff --git a/drivers/gpu/drm/amd/pm/inc/power_state.h b/drivers/gpu/drm/amd/pm/inc/power_state.h deleted file mode 100644 index a5f2227a3971..000000000000 --- a/drivers/gpu/drm/amd/pm/inc/power_state.h +++ /dev/null @@ -1,196 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#ifndef PP_POWERSTATE_H -#define PP_POWERSTATE_H - -struct pp_hw_power_state { - unsigned int magic; -}; - -struct pp_power_state; - - -#define PP_INVALID_POWER_STATE_ID (0) - - -/* - * An item of a list containing Power States. - */ - -struct PP_StateLinkedList { - struct pp_power_state *next; - struct pp_power_state *prev; -}; - - -enum PP_StateUILabel { - PP_StateUILabel_None, - PP_StateUILabel_Battery, - PP_StateUILabel_MiddleLow, - PP_StateUILabel_Balanced, - PP_StateUILabel_MiddleHigh, - PP_StateUILabel_Performance, - PP_StateUILabel_BACO -}; - -enum PP_StateClassificationFlag { - PP_StateClassificationFlag_Boot = 0x0001, - PP_StateClassificationFlag_Thermal = 0x0002, - PP_StateClassificationFlag_LimitedPowerSource = 0x0004, - PP_StateClassificationFlag_Rest = 0x0008, - PP_StateClassificationFlag_Forced = 0x0010, - PP_StateClassificationFlag_User3DPerformance = 0x0020, - PP_StateClassificationFlag_User2DPerformance = 0x0040, - PP_StateClassificationFlag_3DPerformance = 0x0080, - PP_StateClassificationFlag_ACOverdriveTemplate = 0x0100, - PP_StateClassificationFlag_Uvd = 0x0200, - PP_StateClassificationFlag_3DPerformanceLow = 0x0400, - PP_StateClassificationFlag_ACPI = 0x0800, - PP_StateClassificationFlag_HD2 = 0x1000, - PP_StateClassificationFlag_UvdHD = 0x2000, - PP_StateClassificationFlag_UvdSD = 0x4000, - PP_StateClassificationFlag_UserDCPerformance = 0x8000, - PP_StateClassificationFlag_DCOverdriveTemplate = 0x10000, - PP_StateClassificationFlag_BACO = 0x20000, - PP_StateClassificationFlag_LimitedPowerSource_2 = 0x40000, - PP_StateClassificationFlag_ULV = 0x80000, - PP_StateClassificationFlag_UvdMVC = 0x100000, -}; - -typedef unsigned int PP_StateClassificationFlags; - -struct PP_StateClassificationBlock { - enum PP_StateUILabel ui_label; - enum PP_StateClassificationFlag flags; - int bios_index; - bool temporary_state; - bool to_be_deleted; -}; - -struct PP_StatePcieBlock { - unsigned int lanes; -}; - -enum PP_RefreshrateSource { - PP_RefreshrateSource_EDID, - PP_RefreshrateSource_Explicit -}; - -struct PP_StateDisplayBlock { - bool disableFrameModulation; - bool limitRefreshrate; - enum PP_RefreshrateSource refreshrateSource; - int explicitRefreshrate; - int edidRefreshrateIndex; - bool enableVariBright; -}; - -struct PP_StateMemroyBlock { - bool dllOff; - uint8_t m3arb; - uint8_t unused[3]; -}; - -struct PP_StateSoftwareAlgorithmBlock { - bool disableLoadBalancing; - bool enableSleepForTimestamps; -}; - -#define PP_TEMPERATURE_UNITS_PER_CENTIGRADES 1000 - -/** - * Type to hold a temperature range. - */ -struct PP_TemperatureRange { - int min; - int max; - int edge_emergency_max; - int hotspot_min; - int hotspot_crit_max; - int hotspot_emergency_max; - int mem_min; - int mem_crit_max; - int mem_emergency_max; -}; - -struct PP_StateValidationBlock { - bool singleDisplayOnly; - bool disallowOnDC; - uint8_t supportedPowerLevels; -}; - -struct PP_UVD_CLOCKS { - uint32_t VCLK; - uint32_t DCLK; -}; - -/** -* Structure to hold a PowerPlay Power State. -*/ -struct pp_power_state { - uint32_t id; - struct PP_StateLinkedList orderedList; - struct PP_StateLinkedList allStatesList; - - struct PP_StateClassificationBlock classification; - struct PP_StateValidationBlock validation; - struct PP_StatePcieBlock pcie; - struct PP_StateDisplayBlock display; - struct PP_StateMemroyBlock memory; - struct PP_TemperatureRange temperatures; - struct PP_StateSoftwareAlgorithmBlock software; - struct PP_UVD_CLOCKS uvd_clocks; - struct pp_hw_power_state hardware; -}; - -enum PP_MMProfilingState { - PP_MMProfilingState_NA = 0, - PP_MMProfilingState_Started, - PP_MMProfilingState_Stopped -}; - -struct pp_clock_engine_request { - unsigned long client_type; - unsigned long ctx_id; - uint64_t context_handle; - unsigned long sclk; - unsigned long sclk_hard_min; - unsigned long mclk; - unsigned long iclk; - unsigned long evclk; - unsigned long ecclk; - unsigned long ecclk_hard_min; - unsigned long vclk; - unsigned long dclk; - unsigned long sclk_over_drive; - unsigned long mclk_over_drive; - unsigned long sclk_threshold; - unsigned long flag; - unsigned long vclk_ceiling; - unsigned long dclk_ceiling; - unsigned long num_cus; - unsigned long pm_flag; - enum PP_MMProfilingState mm_profiling_state; -}; - -#endif diff --git a/drivers/gpu/drm/amd/pm/inc/pp_debug.h b/drivers/gpu/drm/amd/pm/inc/pp_debug.h deleted file mode 100644 index cea65093b6ad..000000000000 --- a/drivers/gpu/drm/amd/pm/inc/pp_debug.h +++ /dev/null @@ -1,62 +0,0 @@ - -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#ifndef PP_DEBUG_H -#define PP_DEBUG_H - -#ifdef pr_fmt -#undef pr_fmt -#endif - -#define pr_fmt(fmt) "amdgpu: [powerplay] " fmt - -#include -#include -#include - -#define PP_ASSERT_WITH_CODE(cond, msg, code) \ - do { \ - if (!(cond)) { \ - pr_warn_ratelimited("%s\n", msg); \ - code; \ - } \ - } while (0) - -#define PP_ASSERT(cond, msg) \ - do { \ - if (!(cond)) { \ - pr_warn_ratelimited("%s\n", msg); \ - } \ - } while (0) - -#define PP_DBG_LOG(fmt, ...) \ - do { \ - pr_debug(fmt, ##__VA_ARGS__); \ - } while (0) - - -#define GET_FLEXIBLE_ARRAY_MEMBER_ADDR(type, member, ptr, n) \ - (type *)((char *)&(ptr)->member + (sizeof(type) * (n))) - -#endif /* PP_DEBUG_H */ - diff --git a/drivers/gpu/drm/amd/pm/inc/pp_endian.h b/drivers/gpu/drm/amd/pm/inc/pp_endian.h deleted file mode 100644 index f49d1963fe85..000000000000 --- a/drivers/gpu/drm/amd/pm/inc/pp_endian.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef _PP_ENDIAN_H_ -#define _PP_ENDIAN_H_ - -#define PP_HOST_TO_SMC_UL(X) cpu_to_be32(X) -#define PP_SMC_TO_HOST_UL(X) be32_to_cpu(X) - -#define PP_HOST_TO_SMC_US(X) cpu_to_be16(X) -#define PP_SMC_TO_HOST_US(X) be16_to_cpu(X) - -#define CONVERT_FROM_HOST_TO_SMC_UL(X) ((X) = PP_HOST_TO_SMC_UL(X)) -#define CONVERT_FROM_SMC_TO_HOST_UL(X) ((X) = PP_SMC_TO_HOST_UL(X)) - -#define CONVERT_FROM_HOST_TO_SMC_US(X) ((X) = PP_HOST_TO_SMC_US(X)) - -#endif /* _PP_ENDIAN_H_ */ diff --git a/drivers/gpu/drm/amd/pm/inc/pp_thermal.h b/drivers/gpu/drm/amd/pm/inc/pp_thermal.h deleted file mode 100644 index f7c41185097e..000000000000 --- a/drivers/gpu/drm/amd/pm/inc/pp_thermal.h +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Copyright 2018 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#ifndef PP_THERMAL_H -#define PP_THERMAL_H - -#include "power_state.h" - -static const struct PP_TemperatureRange __maybe_unused SMU7ThermalWithDelayPolicy[] = -{ - {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000}, - { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000}, -}; - -static const struct PP_TemperatureRange __maybe_unused SMU7ThermalPolicy[] = -{ - {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000}, - { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000}, -}; - -#define CTF_OFFSET_EDGE 5 -#define CTF_OFFSET_HOTSPOT 5 -#define CTF_OFFSET_HBM 5 - -#endif diff --git a/drivers/gpu/drm/amd/pm/inc/ppinterrupt.h b/drivers/gpu/drm/amd/pm/inc/ppinterrupt.h deleted file mode 100644 index c067e0925b6b..000000000000 --- a/drivers/gpu/drm/amd/pm/inc/ppinterrupt.h +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef _PP_INTERRUPT_H_ -#define _PP_INTERRUPT_H_ - -enum amd_thermal_irq { - AMD_THERMAL_IRQ_LOW_TO_HIGH = 0, - AMD_THERMAL_IRQ_HIGH_TO_LOW, - - AMD_THERMAL_IRQ_LAST -}; - -/* The type of the interrupt callback functions in PowerPlay */ -typedef int (*irq_handler_func_t)(void *private_data, - unsigned src_id, const uint32_t *iv_entry); - -/* Event Manager action chain list information */ -struct pp_interrupt_registration_info { - irq_handler_func_t call_back; /* Pointer to callback function */ - void *context; /* Pointer to callback function context */ - uint32_t src_id; /* Registered interrupt id */ - const uint32_t *iv_entry; -}; - -#endif /* _PP_INTERRUPT_H_ */ diff --git a/drivers/gpu/drm/amd/pm/inc/rv_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/rv_ppsmc.h deleted file mode 100644 index 171f12b82716..000000000000 --- a/drivers/gpu/drm/amd/pm/inc/rv_ppsmc.h +++ /dev/null @@ -1,96 +0,0 @@ -/* - * Copyright 2017 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef RAVEN_PP_SMC_H -#define RAVEN_PP_SMC_H - -#pragma pack(push, 1) - -#define PPSMC_Result_OK 0x1 -#define PPSMC_Result_Failed 0xFF -#define PPSMC_Result_UnknownCmd 0xFE -#define PPSMC_Result_CmdRejectedPrereq 0xFD -#define PPSMC_Result_CmdRejectedBusy 0xFC - -#define PPSMC_MSG_TestMessage 0x1 -#define PPSMC_MSG_GetSmuVersion 0x2 -#define PPSMC_MSG_GetDriverIfVersion 0x3 -#define PPSMC_MSG_PowerUpGfx 0x6 -#define PPSMC_MSG_EnableGfxOff 0x7 -#define PPSMC_MSG_DisableGfxOff 0x8 -#define PPSMC_MSG_PowerDownIspByTile 0x9 -#define PPSMC_MSG_PowerUpIspByTile 0xA -#define PPSMC_MSG_PowerDownVcn 0xB -#define PPSMC_MSG_PowerUpVcn 0xC -#define PPSMC_MSG_PowerDownSdma 0xD -#define PPSMC_MSG_PowerUpSdma 0xE -#define PPSMC_MSG_SetHardMinIspclkByFreq 0xF -#define PPSMC_MSG_SetHardMinVcn 0x10 -#define PPSMC_MSG_SetMinDisplayClock 0x11 -#define PPSMC_MSG_SetHardMinFclkByFreq 0x12 -#define PPSMC_MSG_SetAllowFclkSwitch 0x13 -#define PPSMC_MSG_SetMinVideoGfxclkFreq 0x14 -#define PPSMC_MSG_ActiveProcessNotify 0x15 -#define PPSMC_MSG_SetCustomPolicy 0x16 -#define PPSMC_MSG_SetVideoFps 0x17 -#define PPSMC_MSG_SetDisplayCount 0x18 -#define PPSMC_MSG_QueryPowerLimit 0x19 -#define PPSMC_MSG_SetDriverDramAddrHigh 0x1A -#define PPSMC_MSG_SetDriverDramAddrLow 0x1B -#define PPSMC_MSG_TransferTableSmu2Dram 0x1C -#define PPSMC_MSG_TransferTableDram2Smu 0x1D -#define PPSMC_MSG_DeviceDriverReset 0x1E -#define PPSMC_MSG_SetGfxclkOverdriveByFreqVid 0x1F -#define PPSMC_MSG_SetHardMinDcefclkByFreq 0x20 -#define PPSMC_MSG_SetHardMinSocclkByFreq 0x21 -#define PPSMC_MSG_SetMinVddcrSocVoltage 0x22 -#define PPSMC_MSG_SetMinVideoFclkFreq 0x23 -#define PPSMC_MSG_SetMinDeepSleepDcefclk 0x24 -#define PPSMC_MSG_ForcePowerDownGfx 0x25 -#define PPSMC_MSG_SetPhyclkVoltageByFreq 0x26 -#define PPSMC_MSG_SetDppclkVoltageByFreq 0x27 -#define PPSMC_MSG_SetSoftMinVcn 0x28 -#define PPSMC_MSG_GetGfxclkFrequency 0x2A -#define PPSMC_MSG_GetFclkFrequency 0x2B -#define PPSMC_MSG_GetMinGfxclkFrequency 0x2C -#define PPSMC_MSG_GetMaxGfxclkFrequency 0x2D -#define PPSMC_MSG_SoftReset 0x2E -#define PPSMC_MSG_SetGfxCGPG 0x2F -#define PPSMC_MSG_SetSoftMaxGfxClk 0x30 -#define PPSMC_MSG_SetHardMinGfxClk 0x31 -#define PPSMC_MSG_SetSoftMaxSocclkByFreq 0x32 -#define PPSMC_MSG_SetSoftMaxFclkByFreq 0x33 -#define PPSMC_MSG_SetSoftMaxVcn 0x34 -#define PPSMC_MSG_PowerGateMmHub 0x35 -#define PPSMC_MSG_SetRccPfcPmeRestoreRegister 0x36 -#define PPSMC_MSG_GpuChangeState 0x37 -#define PPSMC_MSG_GetGfxBusy 0x3D -#define PPSMC_Message_Count 0x42 - -typedef uint16_t PPSMC_Result; -typedef int PPSMC_Msg; - - -#pragma pack(pop) - -#endif diff --git a/drivers/gpu/drm/amd/pm/inc/smu10.h b/drivers/gpu/drm/amd/pm/inc/smu10.h deleted file mode 100644 index 9e837a5014c5..000000000000 --- a/drivers/gpu/drm/amd/pm/inc/smu10.h +++ /dev/null @@ -1,188 +0,0 @@ -/* - * Copyright 2017 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef SMU10_H -#define SMU10_H - -#pragma pack(push, 1) - -#define ENABLE_DEBUG_FEATURES - -/* Feature Control Defines */ -#define FEATURE_CCLK_CONTROLLER_BIT 0 -#define FEATURE_FAN_CONTROLLER_BIT 1 -#define FEATURE_DATA_CALCULATION_BIT 2 -#define FEATURE_PPT_BIT 3 -#define FEATURE_TDC_BIT 4 -#define FEATURE_THERMAL_BIT 5 -#define FEATURE_FIT_BIT 6 -#define FEATURE_EDC_BIT 7 -#define FEATURE_PLL_POWER_DOWN_BIT 8 -#define FEATURE_ULV_BIT 9 -#define FEATURE_VDDOFF_BIT 10 -#define FEATURE_VCN_DPM_BIT 11 -#define FEATURE_ACP_DPM_BIT 12 -#define FEATURE_ISP_DPM_BIT 13 -#define FEATURE_FCLK_DPM_BIT 14 -#define FEATURE_SOCCLK_DPM_BIT 15 -#define FEATURE_MP0CLK_DPM_BIT 16 -#define FEATURE_LCLK_DPM_BIT 17 -#define FEATURE_SHUBCLK_DPM_BIT 18 -#define FEATURE_DCEFCLK_DPM_BIT 19 -#define FEATURE_GFX_DPM_BIT 20 -#define FEATURE_DS_GFXCLK_BIT 21 -#define FEATURE_DS_SOCCLK_BIT 22 -#define FEATURE_DS_LCLK_BIT 23 -#define FEATURE_DS_DCEFCLK_BIT 24 -#define FEATURE_DS_SHUBCLK_BIT 25 -#define FEATURE_RM_BIT 26 -#define FEATURE_S0i2_BIT 27 -#define FEATURE_WHISPER_MODE_BIT 28 -#define FEATURE_DS_FCLK_BIT 29 -#define FEATURE_DS_SMNCLK_BIT 30 -#define FEATURE_DS_MP1CLK_BIT 31 -#define FEATURE_DS_MP0CLK_BIT 32 -#define FEATURE_MGCG_BIT 33 -#define FEATURE_DS_FUSE_SRAM_BIT 34 -#define FEATURE_GFX_CKS 35 -#define FEATURE_PSI0_BIT 36 -#define FEATURE_PROCHOT_BIT 37 -#define FEATURE_CPUOFF_BIT 38 -#define FEATURE_STAPM_BIT 39 -#define FEATURE_CORE_CSTATES_BIT 40 -#define FEATURE_SPARE_41_BIT 41 -#define FEATURE_SPARE_42_BIT 42 -#define FEATURE_SPARE_43_BIT 43 -#define FEATURE_SPARE_44_BIT 44 -#define FEATURE_SPARE_45_BIT 45 -#define FEATURE_SPARE_46_BIT 46 -#define FEATURE_SPARE_47_BIT 47 -#define FEATURE_SPARE_48_BIT 48 -#define FEATURE_SPARE_49_BIT 49 -#define FEATURE_SPARE_50_BIT 50 -#define FEATURE_SPARE_51_BIT 51 -#define FEATURE_SPARE_52_BIT 52 -#define FEATURE_SPARE_53_BIT 53 -#define FEATURE_SPARE_54_BIT 54 -#define FEATURE_SPARE_55_BIT 55 -#define FEATURE_SPARE_56_BIT 56 -#define FEATURE_SPARE_57_BIT 57 -#define FEATURE_SPARE_58_BIT 58 -#define FEATURE_SPARE_59_BIT 59 -#define FEATURE_SPARE_60_BIT 60 -#define FEATURE_SPARE_61_BIT 61 -#define FEATURE_SPARE_62_BIT 62 -#define FEATURE_SPARE_63_BIT 63 - -#define NUM_FEATURES 64 - -#define FEATURE_CCLK_CONTROLLER_MASK (1 << FEATURE_CCLK_CONTROLLER_BIT) -#define FEATURE_FAN_CONTROLLER_MASK (1 << FEATURE_FAN_CONTROLLER_BIT) -#define FEATURE_DATA_CALCULATION_MASK (1 << FEATURE_DATA_CALCULATION_BIT) -#define FEATURE_PPT_MASK (1 << FEATURE_PPT_BIT) -#define FEATURE_TDC_MASK (1 << FEATURE_TDC_BIT) -#define FEATURE_THERMAL_MASK (1 << FEATURE_THERMAL_BIT) -#define FEATURE_FIT_MASK (1 << FEATURE_FIT_BIT) -#define FEATURE_EDC_MASK (1 << FEATURE_EDC_BIT) -#define FEATURE_PLL_POWER_DOWN_MASK (1 << FEATURE_PLL_POWER_DOWN_BIT) -#define FEATURE_ULV_MASK (1 << FEATURE_ULV_BIT) -#define FEATURE_VDDOFF_MASK (1 << FEATURE_VDDOFF_BIT) -#define FEATURE_VCN_DPM_MASK (1 << FEATURE_VCN_DPM_BIT) -#define FEATURE_ACP_DPM_MASK (1 << FEATURE_ACP_DPM_BIT) -#define FEATURE_ISP_DPM_MASK (1 << FEATURE_ISP_DPM_BIT) -#define FEATURE_FCLK_DPM_MASK (1 << FEATURE_FCLK_DPM_BIT) -#define FEATURE_SOCCLK_DPM_MASK (1 << FEATURE_SOCCLK_DPM_BIT) -#define FEATURE_MP0CLK_DPM_MASK (1 << FEATURE_MP0CLK_DPM_BIT) -#define FEATURE_LCLK_DPM_MASK (1 << FEATURE_LCLK_DPM_BIT) -#define FEATURE_SHUBCLK_DPM_MASK (1 << FEATURE_SHUBCLK_DPM_BIT) -#define FEATURE_DCEFCLK_DPM_MASK (1 << FEATURE_DCEFCLK_DPM_BIT) -#define FEATURE_GFX_DPM_MASK (1 << FEATURE_GFX_DPM_BIT) -#define FEATURE_DS_GFXCLK_MASK (1 << FEATURE_DS_GFXCLK_BIT) -#define FEATURE_DS_SOCCLK_MASK (1 << FEATURE_DS_SOCCLK_BIT) -#define FEATURE_DS_LCLK_MASK (1 << FEATURE_DS_LCLK_BIT) -#define FEATURE_DS_DCEFCLK_MASK (1 << FEATURE_DS_DCEFCLK_BIT) -#define FEATURE_DS_SHUBCLK_MASK (1 << FEATURE_DS_SHUBCLK_BIT) -#define FEATURE_RM_MASK (1 << FEATURE_RM_BIT) -#define FEATURE_DS_FCLK_MASK (1 << FEATURE_DS_FCLK_BIT) -#define FEATURE_DS_SMNCLK_MASK (1 << FEATURE_DS_SMNCLK_BIT) -#define FEATURE_DS_MP1CLK_MASK (1 << FEATURE_DS_MP1CLK_BIT) -#define FEATURE_DS_MP0CLK_MASK (1 << FEATURE_DS_MP0CLK_BIT) -#define FEATURE_MGCG_MASK (1 << FEATURE_MGCG_BIT) -#define FEATURE_DS_FUSE_SRAM_MASK (1 << FEATURE_DS_FUSE_SRAM_BIT) -#define FEATURE_PSI0_MASK (1 << FEATURE_PSI0_BIT) -#define FEATURE_STAPM_MASK (1 << FEATURE_STAPM_BIT) -#define FEATURE_PROCHOT_MASK (1 << FEATURE_PROCHOT_BIT) -#define FEATURE_CPUOFF_MASK (1 << FEATURE_CPUOFF_BIT) -#define FEATURE_CORE_CSTATES_MASK (1 << FEATURE_CORE_CSTATES_BIT) - -/* Workload bits */ -#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 0 -#define WORKLOAD_PPLIB_VIDEO_BIT 2 -#define WORKLOAD_PPLIB_VR_BIT 3 -#define WORKLOAD_PPLIB_COMPUTE_BIT 4 -#define WORKLOAD_PPLIB_CUSTOM_BIT 5 -#define WORKLOAD_PPLIB_COUNT 6 - -typedef struct { - /* MP1_EXT_SCRATCH0 */ - uint32_t CurrLevel_ACP : 4; - uint32_t CurrLevel_ISP : 4; - uint32_t CurrLevel_VCN : 4; - uint32_t CurrLevel_LCLK : 4; - uint32_t CurrLevel_MP0CLK : 4; - uint32_t CurrLevel_FCLK : 4; - uint32_t CurrLevel_SOCCLK : 4; - uint32_t CurrLevel_DCEFCLK : 4; - /* MP1_EXT_SCRATCH1 */ - uint32_t TargLevel_ACP : 4; - uint32_t TargLevel_ISP : 4; - uint32_t TargLevel_VCN : 4; - uint32_t TargLevel_LCLK : 4; - uint32_t TargLevel_MP0CLK : 4; - uint32_t TargLevel_FCLK : 4; - uint32_t TargLevel_SOCCLK : 4; - uint32_t TargLevel_DCEFCLK : 4; - /* MP1_EXT_SCRATCH2 */ - uint32_t CurrLevel_SHUBCLK : 4; - uint32_t TargLevel_SHUBCLK : 4; - uint32_t InUlv : 1; - uint32_t InS0i2 : 1; - uint32_t InWhisperMode : 1; - uint32_t Reserved : 21; - /* MP1_EXT_SCRATCH3-4 */ - uint32_t Reserved2[2]; - /* MP1_EXT_SCRATCH5 */ - uint32_t FeatureStatus[NUM_FEATURES / 32]; -} FwStatus_t; - -#define TABLE_BIOS_IF 0 /* Called by BIOS */ -#define TABLE_WATERMARKS 1 /* Called by Driver */ -#define TABLE_CUSTOM_DPM 2 /* Called by Driver */ -#define TABLE_PMSTATUSLOG 3 /* Called by Tools for Agm logging */ -#define TABLE_DPMCLOCKS 4 /* Called by Driver */ -#define TABLE_MOMENTARY_PM 5 /* Called by Tools */ -#define TABLE_COUNT 6 - -#pragma pack(pop) - -#endif diff --git a/drivers/gpu/drm/amd/pm/inc/smu10_driver_if.h b/drivers/gpu/drm/amd/pm/inc/smu10_driver_if.h deleted file mode 100644 index c498158771cc..000000000000 --- a/drivers/gpu/drm/amd/pm/inc/smu10_driver_if.h +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Copyright 2017 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef SMU10_DRIVER_IF_H -#define SMU10_DRIVER_IF_H - -#define SMU10_DRIVER_IF_VERSION 0x6 - -#define NUM_DSPCLK_LEVELS 8 - -typedef struct { - int32_t value; - uint32_t numFractionalBits; -} FloatInIntFormat_t; - -typedef enum { - DSPCLK_DCEFCLK = 0, - DSPCLK_DISPCLK, - DSPCLK_PIXCLK, - DSPCLK_PHYCLK, - DSPCLK_COUNT, -} DSPCLK_e; - -typedef struct { - uint16_t Freq; - uint16_t Vid; -} DisplayClockTable_t; - - -typedef struct { - uint16_t MinClock; /* This is either DCFCLK or SOCCLK (in MHz) */ - uint16_t MaxClock; /* This is either DCFCLK or SOCCLK (in MHz) */ - uint16_t MinMclk; - uint16_t MaxMclk; - - uint8_t WmSetting; - uint8_t WmType; - uint8_t Padding[2]; -} WatermarkRowGeneric_t; - -#define NUM_WM_RANGES 4 - -typedef enum { - WM_SOCCLK = 0, - WM_DCFCLK, - WM_COUNT, -} WM_CLOCK_e; - -typedef struct { - WatermarkRowGeneric_t WatermarkRow[WM_COUNT][NUM_WM_RANGES]; - uint32_t MmHubPadding[7]; -} Watermarks_t; - -typedef enum { - CUSTOM_DPM_SETTING_GFXCLK, - CUSTOM_DPM_SETTING_CCLK, - CUSTOM_DPM_SETTING_FCLK_CCX, - CUSTOM_DPM_SETTING_FCLK_GFX, - CUSTOM_DPM_SETTING_FCLK_STALLS, - CUSTOM_DPM_SETTING_LCLK, - CUSTOM_DPM_SETTING_COUNT, -} CUSTOM_DPM_SETTING_e; - -typedef struct { - uint8_t ActiveHystLimit; - uint8_t IdleHystLimit; - uint8_t FPS; - uint8_t MinActiveFreqType; - FloatInIntFormat_t MinActiveFreq; - FloatInIntFormat_t PD_Data_limit; - FloatInIntFormat_t PD_Data_time_constant; - FloatInIntFormat_t PD_Data_error_coeff; - FloatInIntFormat_t PD_Data_error_rate_coeff; -} DpmActivityMonitorCoeffExt_t; - -typedef struct { - DpmActivityMonitorCoeffExt_t DpmActivityMonitorCoeff[CUSTOM_DPM_SETTING_COUNT]; -} CustomDpmSettings_t; - -#define NUM_SOCCLK_DPM_LEVELS 8 -#define NUM_DCEFCLK_DPM_LEVELS 4 -#define NUM_FCLK_DPM_LEVELS 4 -#define NUM_MEMCLK_DPM_LEVELS 4 - -typedef struct { - uint32_t Freq; /* In MHz */ - uint32_t Vol; /* Millivolts with 2 fractional bits */ -} DpmClock_t; - -typedef struct { - DpmClock_t DcefClocks[NUM_DCEFCLK_DPM_LEVELS]; - DpmClock_t SocClocks[NUM_SOCCLK_DPM_LEVELS]; - DpmClock_t FClocks[NUM_FCLK_DPM_LEVELS]; - DpmClock_t MemClocks[NUM_MEMCLK_DPM_LEVELS]; -} DpmClocks_t; - -#endif diff --git a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if.h b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if.h deleted file mode 100644 index fdc6b7a57bc9..000000000000 --- a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if.h +++ /dev/null @@ -1,893 +0,0 @@ -/* - * Copyright 2018 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef SMU11_DRIVER_IF_H -#define SMU11_DRIVER_IF_H - -// *** IMPORTANT *** -// SMU TEAM: Always increment the interface version if -// any structure is changed in this file -// Be aware of that the version should be updated in -// smu_v11_0.h, rename is also needed. -// #define SMU11_DRIVER_IF_VERSION 0x13 - -#define PPTABLE_V20_SMU_VERSION 3 - -#define NUM_GFXCLK_DPM_LEVELS 16 -#define NUM_VCLK_DPM_LEVELS 8 -#define NUM_DCLK_DPM_LEVELS 8 -#define NUM_ECLK_DPM_LEVELS 8 -#define NUM_MP0CLK_DPM_LEVELS 2 -#define NUM_SOCCLK_DPM_LEVELS 8 -#define NUM_UCLK_DPM_LEVELS 4 -#define NUM_FCLK_DPM_LEVELS 8 -#define NUM_DCEFCLK_DPM_LEVELS 8 -#define NUM_DISPCLK_DPM_LEVELS 8 -#define NUM_PIXCLK_DPM_LEVELS 8 -#define NUM_PHYCLK_DPM_LEVELS 8 -#define NUM_LINK_LEVELS 2 -#define NUM_XGMI_LEVELS 2 - -#define MAX_GFXCLK_DPM_LEVEL (NUM_GFXCLK_DPM_LEVELS - 1) -#define MAX_VCLK_DPM_LEVEL (NUM_VCLK_DPM_LEVELS - 1) -#define MAX_DCLK_DPM_LEVEL (NUM_DCLK_DPM_LEVELS - 1) -#define MAX_ECLK_DPM_LEVEL (NUM_ECLK_DPM_LEVELS - 1) -#define MAX_MP0CLK_DPM_LEVEL (NUM_MP0CLK_DPM_LEVELS - 1) -#define MAX_SOCCLK_DPM_LEVEL (NUM_SOCCLK_DPM_LEVELS - 1) -#define MAX_UCLK_DPM_LEVEL (NUM_UCLK_DPM_LEVELS - 1) -#define MAX_FCLK_DPM_LEVEL (NUM_FCLK_DPM_LEVELS - 1) -#define MAX_DCEFCLK_DPM_LEVEL (NUM_DCEFCLK_DPM_LEVELS - 1) -#define MAX_DISPCLK_DPM_LEVEL (NUM_DISPCLK_DPM_LEVELS - 1) -#define MAX_PIXCLK_DPM_LEVEL (NUM_PIXCLK_DPM_LEVELS - 1) -#define MAX_PHYCLK_DPM_LEVEL (NUM_PHYCLK_DPM_LEVELS - 1) -#define MAX_LINK_LEVEL (NUM_LINK_LEVELS - 1) -#define MAX_XGMI_LEVEL (NUM_XGMI_LEVELS - 1) - -#define PPSMC_GeminiModeNone 0 -#define PPSMC_GeminiModeMaster 1 -#define PPSMC_GeminiModeSlave 2 - - -#define FEATURE_DPM_PREFETCHER_BIT 0 -#define FEATURE_DPM_GFXCLK_BIT 1 -#define FEATURE_DPM_UCLK_BIT 2 -#define FEATURE_DPM_SOCCLK_BIT 3 -#define FEATURE_DPM_UVD_BIT 4 -#define FEATURE_DPM_VCE_BIT 5 -#define FEATURE_ULV_BIT 6 -#define FEATURE_DPM_MP0CLK_BIT 7 -#define FEATURE_DPM_LINK_BIT 8 -#define FEATURE_DPM_DCEFCLK_BIT 9 -#define FEATURE_DS_GFXCLK_BIT 10 -#define FEATURE_DS_SOCCLK_BIT 11 -#define FEATURE_DS_LCLK_BIT 12 -#define FEATURE_PPT_BIT 13 -#define FEATURE_TDC_BIT 14 -#define FEATURE_THERMAL_BIT 15 -#define FEATURE_GFX_PER_CU_CG_BIT 16 -#define FEATURE_RM_BIT 17 -#define FEATURE_DS_DCEFCLK_BIT 18 -#define FEATURE_ACDC_BIT 19 -#define FEATURE_VR0HOT_BIT 20 -#define FEATURE_VR1HOT_BIT 21 -#define FEATURE_FW_CTF_BIT 22 -#define FEATURE_LED_DISPLAY_BIT 23 -#define FEATURE_FAN_CONTROL_BIT 24 -#define FEATURE_GFX_EDC_BIT 25 -#define FEATURE_GFXOFF_BIT 26 -#define FEATURE_CG_BIT 27 -#define FEATURE_DPM_FCLK_BIT 28 -#define FEATURE_DS_FCLK_BIT 29 -#define FEATURE_DS_MP1CLK_BIT 30 -#define FEATURE_DS_MP0CLK_BIT 31 -#define FEATURE_XGMI_BIT 32 -#define FEATURE_ECC_BIT 33 -#define FEATURE_SPARE_34_BIT 34 -#define FEATURE_SPARE_35_BIT 35 -#define FEATURE_SPARE_36_BIT 36 -#define FEATURE_SPARE_37_BIT 37 -#define FEATURE_SPARE_38_BIT 38 -#define FEATURE_SPARE_39_BIT 39 -#define FEATURE_SPARE_40_BIT 40 -#define FEATURE_SPARE_41_BIT 41 -#define FEATURE_SPARE_42_BIT 42 -#define FEATURE_SPARE_43_BIT 43 -#define FEATURE_SPARE_44_BIT 44 -#define FEATURE_SPARE_45_BIT 45 -#define FEATURE_SPARE_46_BIT 46 -#define FEATURE_SPARE_47_BIT 47 -#define FEATURE_SPARE_48_BIT 48 -#define FEATURE_SPARE_49_BIT 49 -#define FEATURE_SPARE_50_BIT 50 -#define FEATURE_SPARE_51_BIT 51 -#define FEATURE_SPARE_52_BIT 52 -#define FEATURE_SPARE_53_BIT 53 -#define FEATURE_SPARE_54_BIT 54 -#define FEATURE_SPARE_55_BIT 55 -#define FEATURE_SPARE_56_BIT 56 -#define FEATURE_SPARE_57_BIT 57 -#define FEATURE_SPARE_58_BIT 58 -#define FEATURE_SPARE_59_BIT 59 -#define FEATURE_SPARE_60_BIT 60 -#define FEATURE_SPARE_61_BIT 61 -#define FEATURE_SPARE_62_BIT 62 -#define FEATURE_SPARE_63_BIT 63 - -#define NUM_FEATURES 64 - -#define FEATURE_DPM_PREFETCHER_MASK (1 << FEATURE_DPM_PREFETCHER_BIT ) -#define FEATURE_DPM_GFXCLK_MASK (1 << FEATURE_DPM_GFXCLK_BIT ) -#define FEATURE_DPM_UCLK_MASK (1 << FEATURE_DPM_UCLK_BIT ) -#define FEATURE_DPM_SOCCLK_MASK (1 << FEATURE_DPM_SOCCLK_BIT ) -#define FEATURE_DPM_UVD_MASK (1 << FEATURE_DPM_UVD_BIT ) -#define FEATURE_DPM_VCE_MASK (1 << FEATURE_DPM_VCE_BIT ) -#define FEATURE_ULV_MASK (1 << FEATURE_ULV_BIT ) -#define FEATURE_DPM_MP0CLK_MASK (1 << FEATURE_DPM_MP0CLK_BIT ) -#define FEATURE_DPM_LINK_MASK (1 << FEATURE_DPM_LINK_BIT ) -#define FEATURE_DPM_DCEFCLK_MASK (1 << FEATURE_DPM_DCEFCLK_BIT ) -#define FEATURE_DS_GFXCLK_MASK (1 << FEATURE_DS_GFXCLK_BIT ) -#define FEATURE_DS_SOCCLK_MASK (1 << FEATURE_DS_SOCCLK_BIT ) -#define FEATURE_DS_LCLK_MASK (1 << FEATURE_DS_LCLK_BIT ) -#define FEATURE_PPT_MASK (1 << FEATURE_PPT_BIT ) -#define FEATURE_TDC_MASK (1 << FEATURE_TDC_BIT ) -#define FEATURE_THERMAL_MASK (1 << FEATURE_THERMAL_BIT ) -#define FEATURE_GFX_PER_CU_CG_MASK (1 << FEATURE_GFX_PER_CU_CG_BIT ) -#define FEATURE_RM_MASK (1 << FEATURE_RM_BIT ) -#define FEATURE_DS_DCEFCLK_MASK (1 << FEATURE_DS_DCEFCLK_BIT ) -#define FEATURE_ACDC_MASK (1 << FEATURE_ACDC_BIT ) -#define FEATURE_VR0HOT_MASK (1 << FEATURE_VR0HOT_BIT ) -#define FEATURE_VR1HOT_MASK (1 << FEATURE_VR1HOT_BIT ) -#define FEATURE_FW_CTF_MASK (1 << FEATURE_FW_CTF_BIT ) -#define FEATURE_LED_DISPLAY_MASK (1 << FEATURE_LED_DISPLAY_BIT ) -#define FEATURE_FAN_CONTROL_MASK (1 << FEATURE_FAN_CONTROL_BIT ) -#define FEATURE_GFX_EDC_MASK (1 << FEATURE_GFX_EDC_BIT ) -#define FEATURE_GFXOFF_MASK (1 << FEATURE_GFXOFF_BIT ) -#define FEATURE_CG_MASK (1 << FEATURE_CG_BIT ) -#define FEATURE_DPM_FCLK_MASK (1 << FEATURE_DPM_FCLK_BIT ) -#define FEATURE_DS_FCLK_MASK (1 << FEATURE_DS_FCLK_BIT ) -#define FEATURE_DS_MP1CLK_MASK (1 << FEATURE_DS_MP1CLK_BIT ) -#define FEATURE_DS_MP0CLK_MASK (1 << FEATURE_DS_MP0CLK_BIT ) -#define FEATURE_XGMI_MASK (1ULL << FEATURE_XGMI_BIT ) -#define FEATURE_ECC_MASK (1ULL << FEATURE_ECC_BIT ) - -#define DPM_OVERRIDE_DISABLE_SOCCLK_PID 0x00000001 -#define DPM_OVERRIDE_DISABLE_UCLK_PID 0x00000002 -#define DPM_OVERRIDE_ENABLE_VOLT_LINK_UVD_SOCCLK 0x00000004 -#define DPM_OVERRIDE_ENABLE_VOLT_LINK_UVD_UCLK 0x00000008 -#define DPM_OVERRIDE_ENABLE_FREQ_LINK_VCLK_SOCCLK 0x00000010 -#define DPM_OVERRIDE_ENABLE_FREQ_LINK_VCLK_UCLK 0x00000020 -#define DPM_OVERRIDE_ENABLE_FREQ_LINK_DCLK_SOCCLK 0x00000040 -#define DPM_OVERRIDE_ENABLE_FREQ_LINK_DCLK_UCLK 0x00000080 -#define DPM_OVERRIDE_ENABLE_VOLT_LINK_VCE_SOCCLK 0x00000100 -#define DPM_OVERRIDE_ENABLE_VOLT_LINK_VCE_UCLK 0x00000200 -#define DPM_OVERRIDE_ENABLE_FREQ_LINK_ECLK_SOCCLK 0x00000400 -#define DPM_OVERRIDE_ENABLE_FREQ_LINK_ECLK_UCLK 0x00000800 -#define DPM_OVERRIDE_ENABLE_FREQ_LINK_GFXCLK_SOCCLK 0x00001000 -#define DPM_OVERRIDE_ENABLE_FREQ_LINK_GFXCLK_UCLK 0x00002000 -#define DPM_OVERRIDE_ENABLE_GFXOFF_GFXCLK_SWITCH 0x00004000 -#define DPM_OVERRIDE_ENABLE_GFXOFF_SOCCLK_SWITCH 0x00008000 -#define DPM_OVERRIDE_ENABLE_GFXOFF_UCLK_SWITCH 0x00010000 -#define DPM_OVERRIDE_ENABLE_GFXOFF_FCLK_SWITCH 0x00020000 - -#define I2C_CONTROLLER_ENABLED 1 -#define I2C_CONTROLLER_DISABLED 0 - -#define VR_MAPPING_VR_SELECT_MASK 0x01 -#define VR_MAPPING_VR_SELECT_SHIFT 0x00 - -#define VR_MAPPING_PLANE_SELECT_MASK 0x02 -#define VR_MAPPING_PLANE_SELECT_SHIFT 0x01 - - -#define PSI_SEL_VR0_PLANE0_PSI0 0x01 -#define PSI_SEL_VR0_PLANE0_PSI1 0x02 -#define PSI_SEL_VR0_PLANE1_PSI0 0x04 -#define PSI_SEL_VR0_PLANE1_PSI1 0x08 -#define PSI_SEL_VR1_PLANE0_PSI0 0x10 -#define PSI_SEL_VR1_PLANE0_PSI1 0x20 -#define PSI_SEL_VR1_PLANE1_PSI0 0x40 -#define PSI_SEL_VR1_PLANE1_PSI1 0x80 - - -#define THROTTLER_STATUS_PADDING_BIT 0 -#define THROTTLER_STATUS_TEMP_EDGE_BIT 1 -#define THROTTLER_STATUS_TEMP_HOTSPOT_BIT 2 -#define THROTTLER_STATUS_TEMP_HBM_BIT 3 -#define THROTTLER_STATUS_TEMP_VR_GFX_BIT 4 -#define THROTTLER_STATUS_TEMP_VR_SOC_BIT 5 -#define THROTTLER_STATUS_TEMP_VR_MEM0_BIT 6 -#define THROTTLER_STATUS_TEMP_VR_MEM1_BIT 7 -#define THROTTLER_STATUS_TEMP_LIQUID_BIT 8 -#define THROTTLER_STATUS_TEMP_PLX_BIT 9 -#define THROTTLER_STATUS_TEMP_SKIN_BIT 10 -#define THROTTLER_STATUS_TDC_GFX_BIT 11 -#define THROTTLER_STATUS_TDC_SOC_BIT 12 -#define THROTTLER_STATUS_PPT_BIT 13 -#define THROTTLER_STATUS_FIT_BIT 14 -#define THROTTLER_STATUS_PPM_BIT 15 - - -#define TABLE_TRANSFER_OK 0x0 -#define TABLE_TRANSFER_FAILED 0xFF - - -#define WORKLOAD_DEFAULT_BIT 0 -#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 1 -#define WORKLOAD_PPLIB_POWER_SAVING_BIT 2 -#define WORKLOAD_PPLIB_VIDEO_BIT 3 -#define WORKLOAD_PPLIB_VR_BIT 4 -#define WORKLOAD_PPLIB_COMPUTE_BIT 5 -#define WORKLOAD_PPLIB_CUSTOM_BIT 6 -#define WORKLOAD_PPLIB_COUNT 7 - - -#define XGMI_STATE_D0 1 -#define XGMI_STATE_D3 0 - -typedef enum { - I2C_CONTROLLER_PORT_0 = 0, - I2C_CONTROLLER_PORT_1 = 1, -} I2cControllerPort_e; - -typedef enum { - I2C_CONTROLLER_NAME_VR_GFX = 0, - I2C_CONTROLLER_NAME_VR_SOC, - I2C_CONTROLLER_NAME_VR_VDDCI, - I2C_CONTROLLER_NAME_VR_HBM, - I2C_CONTROLLER_NAME_LIQUID_0, - I2C_CONTROLLER_NAME_LIQUID_1, - I2C_CONTROLLER_NAME_PLX, - I2C_CONTROLLER_NAME_COUNT, -} I2cControllerName_e; - -typedef enum { - I2C_CONTROLLER_THROTTLER_TYPE_NONE = 0, - I2C_CONTROLLER_THROTTLER_VR_GFX, - I2C_CONTROLLER_THROTTLER_VR_SOC, - I2C_CONTROLLER_THROTTLER_VR_VDDCI, - I2C_CONTROLLER_THROTTLER_VR_HBM, - I2C_CONTROLLER_THROTTLER_LIQUID_0, - I2C_CONTROLLER_THROTTLER_LIQUID_1, - I2C_CONTROLLER_THROTTLER_PLX, -} I2cControllerThrottler_e; - -typedef enum { - I2C_CONTROLLER_PROTOCOL_VR_XPDE132G5, - I2C_CONTROLLER_PROTOCOL_VR_IR35217, - I2C_CONTROLLER_PROTOCOL_TMP_TMP102A, - I2C_CONTROLLER_PROTOCOL_SPARE_0, - I2C_CONTROLLER_PROTOCOL_SPARE_1, - I2C_CONTROLLER_PROTOCOL_SPARE_2, -} I2cControllerProtocol_e; - -typedef enum { - I2C_CONTROLLER_SPEED_SLOW = 0, - I2C_CONTROLLER_SPEED_FAST = 1, -} I2cControllerSpeed_e; - -typedef struct { - uint32_t Enabled; - uint32_t SlaveAddress; - uint32_t ControllerPort; - uint32_t ControllerName; - - uint32_t ThermalThrottler; - uint32_t I2cProtocol; - uint32_t I2cSpeed; -} I2cControllerConfig_t; - -typedef struct { - uint32_t a; - uint32_t b; - uint32_t c; -} QuadraticInt_t; - -typedef struct { - uint32_t m; - uint32_t b; -} LinearInt_t; - -typedef struct { - uint32_t a; - uint32_t b; - uint32_t c; -} DroopInt_t; - -typedef enum { - PPCLK_GFXCLK, - PPCLK_VCLK, - PPCLK_DCLK, - PPCLK_ECLK, - PPCLK_SOCCLK, - PPCLK_UCLK, - PPCLK_DCEFCLK, - PPCLK_DISPCLK, - PPCLK_PIXCLK, - PPCLK_PHYCLK, - PPCLK_FCLK, - PPCLK_COUNT, -} PPCLK_e; - -typedef enum { - POWER_SOURCE_AC, - POWER_SOURCE_DC, - POWER_SOURCE_COUNT, -} POWER_SOURCE_e; - -typedef enum { - VOLTAGE_MODE_AVFS = 0, - VOLTAGE_MODE_AVFS_SS, - VOLTAGE_MODE_SS, - VOLTAGE_MODE_COUNT, -} VOLTAGE_MODE_e; - - -typedef enum { - AVFS_VOLTAGE_GFX = 0, - AVFS_VOLTAGE_SOC, - AVFS_VOLTAGE_COUNT, -} AVFS_VOLTAGE_TYPE_e; - - -typedef struct { - uint8_t VoltageMode; - uint8_t SnapToDiscrete; - uint8_t NumDiscreteLevels; - uint8_t padding; - LinearInt_t ConversionToAvfsClk; - QuadraticInt_t SsCurve; -} DpmDescriptor_t; - -typedef struct { - uint32_t Version; - - - uint32_t FeaturesToRun[2]; - - - uint16_t SocketPowerLimitAc0; - uint16_t SocketPowerLimitAc0Tau; - uint16_t SocketPowerLimitAc1; - uint16_t SocketPowerLimitAc1Tau; - uint16_t SocketPowerLimitAc2; - uint16_t SocketPowerLimitAc2Tau; - uint16_t SocketPowerLimitAc3; - uint16_t SocketPowerLimitAc3Tau; - uint16_t SocketPowerLimitDc; - uint16_t SocketPowerLimitDcTau; - uint16_t TdcLimitSoc; - uint16_t TdcLimitSocTau; - uint16_t TdcLimitGfx; - uint16_t TdcLimitGfxTau; - - uint16_t TedgeLimit; - uint16_t ThotspotLimit; - uint16_t ThbmLimit; - uint16_t Tvr_gfxLimit; - uint16_t Tvr_memLimit; - uint16_t Tliquid1Limit; - uint16_t Tliquid2Limit; - uint16_t TplxLimit; - uint32_t FitLimit; - - uint16_t PpmPowerLimit; - uint16_t PpmTemperatureThreshold; - - uint8_t MemoryOnPackage; - uint8_t padding8_limits; - uint16_t Tvr_SocLimit; - - uint16_t UlvVoltageOffsetSoc; - uint16_t UlvVoltageOffsetGfx; - - uint8_t UlvSmnclkDid; - uint8_t UlvMp1clkDid; - uint8_t UlvGfxclkBypass; - uint8_t Padding234; - - - uint16_t MinVoltageGfx; - uint16_t MinVoltageSoc; - uint16_t MaxVoltageGfx; - uint16_t MaxVoltageSoc; - - uint16_t LoadLineResistanceGfx; - uint16_t LoadLineResistanceSoc; - - DpmDescriptor_t DpmDescriptor[PPCLK_COUNT]; - - uint16_t FreqTableGfx [NUM_GFXCLK_DPM_LEVELS ]; - uint16_t FreqTableVclk [NUM_VCLK_DPM_LEVELS ]; - uint16_t FreqTableDclk [NUM_DCLK_DPM_LEVELS ]; - uint16_t FreqTableEclk [NUM_ECLK_DPM_LEVELS ]; - uint16_t FreqTableSocclk [NUM_SOCCLK_DPM_LEVELS ]; - uint16_t FreqTableUclk [NUM_UCLK_DPM_LEVELS ]; - uint16_t FreqTableFclk [NUM_FCLK_DPM_LEVELS ]; - uint16_t FreqTableDcefclk [NUM_DCEFCLK_DPM_LEVELS ]; - uint16_t FreqTableDispclk [NUM_DISPCLK_DPM_LEVELS ]; - uint16_t FreqTablePixclk [NUM_PIXCLK_DPM_LEVELS ]; - uint16_t FreqTablePhyclk [NUM_PHYCLK_DPM_LEVELS ]; - - uint16_t DcModeMaxFreq [PPCLK_COUNT ]; - uint16_t Padding8_Clks; - - uint16_t Mp0clkFreq [NUM_MP0CLK_DPM_LEVELS]; - uint16_t Mp0DpmVoltage [NUM_MP0CLK_DPM_LEVELS]; - - - uint16_t GfxclkFidle; - uint16_t GfxclkSlewRate; - uint16_t CksEnableFreq; - uint16_t Padding789; - QuadraticInt_t CksVoltageOffset; - uint8_t Padding567[4]; - uint16_t GfxclkDsMaxFreq; - uint8_t GfxclkSource; - uint8_t Padding456; - - uint8_t LowestUclkReservedForUlv; - uint8_t Padding8_Uclk[3]; - - - uint8_t PcieGenSpeed[NUM_LINK_LEVELS]; - uint8_t PcieLaneCount[NUM_LINK_LEVELS]; - uint16_t LclkFreq[NUM_LINK_LEVELS]; - - - uint16_t EnableTdpm; - uint16_t TdpmHighHystTemperature; - uint16_t TdpmLowHystTemperature; - uint16_t GfxclkFreqHighTempLimit; - - - uint16_t FanStopTemp; - uint16_t FanStartTemp; - - uint16_t FanGainEdge; - uint16_t FanGainHotspot; - uint16_t FanGainLiquid; - uint16_t FanGainVrGfx; - uint16_t FanGainVrSoc; - uint16_t FanGainPlx; - uint16_t FanGainHbm; - uint16_t FanPwmMin; - uint16_t FanAcousticLimitRpm; - uint16_t FanThrottlingRpm; - uint16_t FanMaximumRpm; - uint16_t FanTargetTemperature; - uint16_t FanTargetGfxclk; - uint8_t FanZeroRpmEnable; - uint8_t FanTachEdgePerRev; - - - - int16_t FuzzyFan_ErrorSetDelta; - int16_t FuzzyFan_ErrorRateSetDelta; - int16_t FuzzyFan_PwmSetDelta; - uint16_t FuzzyFan_Reserved; - - - uint8_t OverrideAvfsGb[AVFS_VOLTAGE_COUNT]; - uint8_t Padding8_Avfs[2]; - - QuadraticInt_t qAvfsGb[AVFS_VOLTAGE_COUNT]; - DroopInt_t dBtcGbGfxCksOn; - DroopInt_t dBtcGbGfxCksOff; - DroopInt_t dBtcGbGfxAfll; - DroopInt_t dBtcGbSoc; - LinearInt_t qAgingGb[AVFS_VOLTAGE_COUNT]; - - QuadraticInt_t qStaticVoltageOffset[AVFS_VOLTAGE_COUNT]; - - uint16_t DcTol[AVFS_VOLTAGE_COUNT]; - - uint8_t DcBtcEnabled[AVFS_VOLTAGE_COUNT]; - uint8_t Padding8_GfxBtc[2]; - - int16_t DcBtcMin[AVFS_VOLTAGE_COUNT]; - uint16_t DcBtcMax[AVFS_VOLTAGE_COUNT]; - - - uint8_t XgmiLinkSpeed [NUM_XGMI_LEVELS]; - uint8_t XgmiLinkWidth [NUM_XGMI_LEVELS]; - uint16_t XgmiFclkFreq [NUM_XGMI_LEVELS]; - uint16_t XgmiUclkFreq [NUM_XGMI_LEVELS]; - uint16_t XgmiSocclkFreq [NUM_XGMI_LEVELS]; - uint16_t XgmiSocVoltage [NUM_XGMI_LEVELS]; - - uint32_t DebugOverrides; - QuadraticInt_t ReservedEquation0; - QuadraticInt_t ReservedEquation1; - QuadraticInt_t ReservedEquation2; - QuadraticInt_t ReservedEquation3; - - uint16_t MinVoltageUlvGfx; - uint16_t MinVoltageUlvSoc; - - uint16_t MGpuFanBoostLimitRpm; - uint16_t padding16_Fan; - - uint16_t FanGainVrMem0; - uint16_t FanGainVrMem1; - - uint16_t DcBtcGb[AVFS_VOLTAGE_COUNT]; - - uint32_t Reserved[11]; - - uint32_t Padding32[3]; - - uint16_t MaxVoltageStepGfx; - uint16_t MaxVoltageStepSoc; - - uint8_t VddGfxVrMapping; - uint8_t VddSocVrMapping; - uint8_t VddMem0VrMapping; - uint8_t VddMem1VrMapping; - - uint8_t GfxUlvPhaseSheddingMask; - uint8_t SocUlvPhaseSheddingMask; - uint8_t ExternalSensorPresent; - uint8_t Padding8_V; - - - uint16_t GfxMaxCurrent; - int8_t GfxOffset; - uint8_t Padding_TelemetryGfx; - - uint16_t SocMaxCurrent; - int8_t SocOffset; - uint8_t Padding_TelemetrySoc; - - uint16_t Mem0MaxCurrent; - int8_t Mem0Offset; - uint8_t Padding_TelemetryMem0; - - uint16_t Mem1MaxCurrent; - int8_t Mem1Offset; - uint8_t Padding_TelemetryMem1; - - - uint8_t AcDcGpio; - uint8_t AcDcPolarity; - uint8_t VR0HotGpio; - uint8_t VR0HotPolarity; - - uint8_t VR1HotGpio; - uint8_t VR1HotPolarity; - uint8_t Padding1; - uint8_t Padding2; - - - - uint8_t LedPin0; - uint8_t LedPin1; - uint8_t LedPin2; - uint8_t padding8_4; - - - uint8_t PllGfxclkSpreadEnabled; - uint8_t PllGfxclkSpreadPercent; - uint16_t PllGfxclkSpreadFreq; - - uint8_t UclkSpreadEnabled; - uint8_t UclkSpreadPercent; - uint16_t UclkSpreadFreq; - - uint8_t FclkSpreadEnabled; - uint8_t FclkSpreadPercent; - uint16_t FclkSpreadFreq; - - uint8_t FllGfxclkSpreadEnabled; - uint8_t FllGfxclkSpreadPercent; - uint16_t FllGfxclkSpreadFreq; - - I2cControllerConfig_t I2cControllers[I2C_CONTROLLER_NAME_COUNT]; - - uint32_t BoardReserved[10]; - - - uint32_t MmHubPadding[8]; - -} PPTable_t; - -typedef struct { - - uint16_t GfxclkAverageLpfTau; - uint16_t SocclkAverageLpfTau; - uint16_t UclkAverageLpfTau; - uint16_t GfxActivityLpfTau; - uint16_t UclkActivityLpfTau; - uint16_t SocketPowerLpfTau; - - - uint32_t MmHubPadding[8]; -} DriverSmuConfig_t; - -typedef struct { - - uint16_t GfxclkFmin; - uint16_t GfxclkFmax; - uint16_t GfxclkFreq1; - uint16_t GfxclkVolt1; - uint16_t GfxclkFreq2; - uint16_t GfxclkVolt2; - uint16_t GfxclkFreq3; - uint16_t GfxclkVolt3; - uint16_t UclkFmax; - int16_t OverDrivePct; - uint16_t FanMaximumRpm; - uint16_t FanMinimumPwm; - uint16_t FanTargetTemperature; - uint16_t MaxOpTemp; - uint16_t FanZeroRpmEnable; - uint16_t Padding; - -} OverDriveTable_t; - -typedef struct { - uint16_t CurrClock[PPCLK_COUNT]; - uint16_t AverageGfxclkFrequency; - uint16_t AverageSocclkFrequency; - uint16_t AverageUclkFrequency ; - uint16_t AverageGfxActivity ; - uint16_t AverageUclkActivity ; - uint8_t CurrSocVoltageOffset ; - uint8_t CurrGfxVoltageOffset ; - uint8_t CurrMemVidOffset ; - uint8_t Padding8 ; - uint16_t CurrSocketPower ; - uint16_t TemperatureEdge ; - uint16_t TemperatureHotspot ; - uint16_t TemperatureHBM ; - uint16_t TemperatureVrGfx ; - uint16_t TemperatureVrSoc ; - uint16_t TemperatureVrMem0 ; - uint16_t TemperatureVrMem1 ; - uint16_t TemperatureLiquid ; - uint16_t TemperaturePlx ; - uint32_t ThrottlerStatus ; - - uint8_t LinkDpmLevel; - uint16_t AverageSocketPower; - uint8_t Padding; - - - uint32_t MmHubPadding[7]; -} SmuMetrics_t; - -typedef struct { - uint16_t MinClock; - uint16_t MaxClock; - uint16_t MinUclk; - uint16_t MaxUclk; - - uint8_t WmSetting; - uint8_t Padding[3]; -} WatermarkRowGeneric_t; - -#define NUM_WM_RANGES 4 - -typedef enum { - WM_SOCCLK = 0, - WM_DCEFCLK, - WM_COUNT_PP, -} WM_CLOCK_e; - -typedef struct { - - WatermarkRowGeneric_t WatermarkRow[WM_COUNT_PP][NUM_WM_RANGES]; - - uint32_t MmHubPadding[7]; -} Watermarks_t; - -typedef struct { - uint16_t avgPsmCount[45]; - uint16_t minPsmCount[45]; - float avgPsmVoltage[45]; - float minPsmVoltage[45]; - - uint16_t avgScsPsmCount; - uint16_t minScsPsmCount; - float avgScsPsmVoltage; - float minScsPsmVoltage; - - - uint32_t MmHubPadding[6]; -} AvfsDebugTable_t; - -typedef struct { - uint8_t AvfsVersion; - uint8_t AvfsEn[AVFS_VOLTAGE_COUNT]; - - uint8_t OverrideVFT[AVFS_VOLTAGE_COUNT]; - uint8_t OverrideAvfsGb[AVFS_VOLTAGE_COUNT]; - - uint8_t OverrideTemperatures[AVFS_VOLTAGE_COUNT]; - uint8_t OverrideVInversion[AVFS_VOLTAGE_COUNT]; - uint8_t OverrideP2V[AVFS_VOLTAGE_COUNT]; - uint8_t OverrideP2VCharzFreq[AVFS_VOLTAGE_COUNT]; - - int32_t VFT0_m1[AVFS_VOLTAGE_COUNT]; - int32_t VFT0_m2[AVFS_VOLTAGE_COUNT]; - int32_t VFT0_b[AVFS_VOLTAGE_COUNT]; - - int32_t VFT1_m1[AVFS_VOLTAGE_COUNT]; - int32_t VFT1_m2[AVFS_VOLTAGE_COUNT]; - int32_t VFT1_b[AVFS_VOLTAGE_COUNT]; - - int32_t VFT2_m1[AVFS_VOLTAGE_COUNT]; - int32_t VFT2_m2[AVFS_VOLTAGE_COUNT]; - int32_t VFT2_b[AVFS_VOLTAGE_COUNT]; - - int32_t AvfsGb0_m1[AVFS_VOLTAGE_COUNT]; - int32_t AvfsGb0_m2[AVFS_VOLTAGE_COUNT]; - int32_t AvfsGb0_b[AVFS_VOLTAGE_COUNT]; - - int32_t AcBtcGb_m1[AVFS_VOLTAGE_COUNT]; - int32_t AcBtcGb_m2[AVFS_VOLTAGE_COUNT]; - int32_t AcBtcGb_b[AVFS_VOLTAGE_COUNT]; - - uint32_t AvfsTempCold[AVFS_VOLTAGE_COUNT]; - uint32_t AvfsTempMid[AVFS_VOLTAGE_COUNT]; - uint32_t AvfsTempHot[AVFS_VOLTAGE_COUNT]; - - uint32_t VInversion[AVFS_VOLTAGE_COUNT]; - - - int32_t P2V_m1[AVFS_VOLTAGE_COUNT]; - int32_t P2V_m2[AVFS_VOLTAGE_COUNT]; - int32_t P2V_b[AVFS_VOLTAGE_COUNT]; - - uint32_t P2VCharzFreq[AVFS_VOLTAGE_COUNT]; - - uint32_t EnabledAvfsModules; - - uint32_t MmHubPadding[7]; -} AvfsFuseOverride_t; - -typedef struct { - - uint8_t Gfx_ActiveHystLimit; - uint8_t Gfx_IdleHystLimit; - uint8_t Gfx_FPS; - uint8_t Gfx_MinActiveFreqType; - uint8_t Gfx_BoosterFreqType; - uint8_t Gfx_UseRlcBusy; - uint16_t Gfx_MinActiveFreq; - uint16_t Gfx_BoosterFreq; - uint16_t Gfx_PD_Data_time_constant; - uint32_t Gfx_PD_Data_limit_a; - uint32_t Gfx_PD_Data_limit_b; - uint32_t Gfx_PD_Data_limit_c; - uint32_t Gfx_PD_Data_error_coeff; - uint32_t Gfx_PD_Data_error_rate_coeff; - - uint8_t Soc_ActiveHystLimit; - uint8_t Soc_IdleHystLimit; - uint8_t Soc_FPS; - uint8_t Soc_MinActiveFreqType; - uint8_t Soc_BoosterFreqType; - uint8_t Soc_UseRlcBusy; - uint16_t Soc_MinActiveFreq; - uint16_t Soc_BoosterFreq; - uint16_t Soc_PD_Data_time_constant; - uint32_t Soc_PD_Data_limit_a; - uint32_t Soc_PD_Data_limit_b; - uint32_t Soc_PD_Data_limit_c; - uint32_t Soc_PD_Data_error_coeff; - uint32_t Soc_PD_Data_error_rate_coeff; - - uint8_t Mem_ActiveHystLimit; - uint8_t Mem_IdleHystLimit; - uint8_t Mem_FPS; - uint8_t Mem_MinActiveFreqType; - uint8_t Mem_BoosterFreqType; - uint8_t Mem_UseRlcBusy; - uint16_t Mem_MinActiveFreq; - uint16_t Mem_BoosterFreq; - uint16_t Mem_PD_Data_time_constant; - uint32_t Mem_PD_Data_limit_a; - uint32_t Mem_PD_Data_limit_b; - uint32_t Mem_PD_Data_limit_c; - uint32_t Mem_PD_Data_error_coeff; - uint32_t Mem_PD_Data_error_rate_coeff; - - uint8_t Fclk_ActiveHystLimit; - uint8_t Fclk_IdleHystLimit; - uint8_t Fclk_FPS; - uint8_t Fclk_MinActiveFreqType; - uint8_t Fclk_BoosterFreqType; - uint8_t Fclk_UseRlcBusy; - uint16_t Fclk_MinActiveFreq; - uint16_t Fclk_BoosterFreq; - uint16_t Fclk_PD_Data_time_constant; - uint32_t Fclk_PD_Data_limit_a; - uint32_t Fclk_PD_Data_limit_b; - uint32_t Fclk_PD_Data_limit_c; - uint32_t Fclk_PD_Data_error_coeff; - uint32_t Fclk_PD_Data_error_rate_coeff; - -} DpmActivityMonitorCoeffInt_t; - -#define TABLE_PPTABLE 0 -#define TABLE_WATERMARKS 1 -#define TABLE_AVFS 2 -#define TABLE_AVFS_PSM_DEBUG 3 -#define TABLE_AVFS_FUSE_OVERRIDE 4 -#define TABLE_PMSTATUSLOG 5 -#define TABLE_SMU_METRICS 6 -#define TABLE_DRIVER_SMU_CONFIG 7 -#define TABLE_ACTIVITY_MONITOR_COEFF 8 -#define TABLE_OVERDRIVE 9 -#define TABLE_COUNT 10 - - -#define UCLK_SWITCH_SLOW 0 -#define UCLK_SWITCH_FAST 1 - - -#define SQ_Enable_MASK 0x1 -#define SQ_IR_MASK 0x2 -#define SQ_PCC_MASK 0x4 -#define SQ_EDC_MASK 0x8 - -#define TCP_Enable_MASK 0x100 -#define TCP_IR_MASK 0x200 -#define TCP_PCC_MASK 0x400 -#define TCP_EDC_MASK 0x800 - -#define TD_Enable_MASK 0x10000 -#define TD_IR_MASK 0x20000 -#define TD_PCC_MASK 0x40000 -#define TD_EDC_MASK 0x80000 - -#define DB_Enable_MASK 0x1000000 -#define DB_IR_MASK 0x2000000 -#define DB_PCC_MASK 0x4000000 -#define DB_EDC_MASK 0x8000000 - -#define SQ_Enable_SHIFT 0 -#define SQ_IR_SHIFT 1 -#define SQ_PCC_SHIFT 2 -#define SQ_EDC_SHIFT 3 - -#define TCP_Enable_SHIFT 8 -#define TCP_IR_SHIFT 9 -#define TCP_PCC_SHIFT 10 -#define TCP_EDC_SHIFT 11 - -#define TD_Enable_SHIFT 16 -#define TD_IR_SHIFT 17 -#define TD_PCC_SHIFT 18 -#define TD_EDC_SHIFT 19 - -#define DB_Enable_SHIFT 24 -#define DB_IR_SHIFT 25 -#define DB_PCC_SHIFT 26 -#define DB_EDC_SHIFT 27 - -#define REMOVE_FMAX_MARGIN_BIT 0x0 -#define REMOVE_DCTOL_MARGIN_BIT 0x1 -#define REMOVE_PLATFORM_MARGIN_BIT 0x2 - -#endif diff --git a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_arcturus.h b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_arcturus.h deleted file mode 100644 index 43d43d6addc0..000000000000 --- a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_arcturus.h +++ /dev/null @@ -1,931 +0,0 @@ -/* - * Copyright 2019 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef SMU11_DRIVER_IF_ARCTURUS_H -#define SMU11_DRIVER_IF_ARCTURUS_H - -// *** IMPORTANT *** -// SMU TEAM: Always increment the interface version if -// any structure is changed in this file -//#define SMU11_DRIVER_IF_VERSION 0x09 - -#define PPTABLE_ARCTURUS_SMU_VERSION 4 - -#define NUM_GFXCLK_DPM_LEVELS 16 -#define NUM_VCLK_DPM_LEVELS 8 -#define NUM_DCLK_DPM_LEVELS 8 -#define NUM_MP0CLK_DPM_LEVELS 2 -#define NUM_SOCCLK_DPM_LEVELS 8 -#define NUM_UCLK_DPM_LEVELS 4 -#define NUM_FCLK_DPM_LEVELS 8 -#define NUM_XGMI_LEVELS 2 -#define NUM_XGMI_PSTATE_LEVELS 4 - -#define MAX_GFXCLK_DPM_LEVEL (NUM_GFXCLK_DPM_LEVELS - 1) -#define MAX_VCLK_DPM_LEVEL (NUM_VCLK_DPM_LEVELS - 1) -#define MAX_DCLK_DPM_LEVEL (NUM_DCLK_DPM_LEVELS - 1) -#define MAX_MP0CLK_DPM_LEVEL (NUM_MP0CLK_DPM_LEVELS - 1) -#define MAX_SOCCLK_DPM_LEVEL (NUM_SOCCLK_DPM_LEVELS - 1) -#define MAX_UCLK_DPM_LEVEL (NUM_UCLK_DPM_LEVELS - 1) -#define MAX_FCLK_DPM_LEVEL (NUM_FCLK_DPM_LEVELS - 1) -#define MAX_XGMI_LEVEL (NUM_XGMI_LEVELS - 1) -#define MAX_XGMI_PSTATE_LEVEL (NUM_XGMI_PSTATE_LEVELS - 1) - -// Feature Control Defines -// DPM -#define FEATURE_DPM_PREFETCHER_BIT 0 -#define FEATURE_DPM_GFXCLK_BIT 1 -#define FEATURE_DPM_UCLK_BIT 2 -#define FEATURE_DPM_SOCCLK_BIT 3 -#define FEATURE_DPM_FCLK_BIT 4 -#define FEATURE_DPM_MP0CLK_BIT 5 -#define FEATURE_DPM_XGMI_BIT 6 -// Idle -#define FEATURE_DS_GFXCLK_BIT 7 -#define FEATURE_DS_SOCCLK_BIT 8 -#define FEATURE_DS_LCLK_BIT 9 -#define FEATURE_DS_FCLK_BIT 10 -#define FEATURE_DS_UCLK_BIT 11 -#define FEATURE_GFX_ULV_BIT 12 -#define FEATURE_DPM_VCN_BIT 13 -#define FEATURE_RSMU_SMN_CG_BIT 14 -#define FEATURE_WAFL_CG_BIT 15 -// Throttler/Response -#define FEATURE_PPT_BIT 16 -#define FEATURE_TDC_BIT 17 -#define FEATURE_APCC_PLUS_BIT 18 -#define FEATURE_VR0HOT_BIT 19 -#define FEATURE_VR1HOT_BIT 20 -#define FEATURE_FW_CTF_BIT 21 -#define FEATURE_FAN_CONTROL_BIT 22 -#define FEATURE_THERMAL_BIT 23 -// Other -#define FEATURE_OUT_OF_BAND_MONITOR_BIT 24 -#define FEATURE_TEMP_DEPENDENT_VMIN_BIT 25 -#define FEATURE_PER_PART_VMIN_BIT 26 - -#define FEATURE_SPARE_27_BIT 27 -#define FEATURE_SPARE_28_BIT 28 -#define FEATURE_SPARE_29_BIT 29 -#define FEATURE_SPARE_30_BIT 30 -#define FEATURE_SPARE_31_BIT 31 -#define FEATURE_SPARE_32_BIT 32 -#define FEATURE_SPARE_33_BIT 33 -#define FEATURE_SPARE_34_BIT 34 -#define FEATURE_SPARE_35_BIT 35 -#define FEATURE_SPARE_36_BIT 36 -#define FEATURE_SPARE_37_BIT 37 -#define FEATURE_SPARE_38_BIT 38 -#define FEATURE_SPARE_39_BIT 39 -#define FEATURE_SPARE_40_BIT 40 -#define FEATURE_SPARE_41_BIT 41 -#define FEATURE_SPARE_42_BIT 42 -#define FEATURE_SPARE_43_BIT 43 -#define FEATURE_SPARE_44_BIT 44 -#define FEATURE_SPARE_45_BIT 45 -#define FEATURE_SPARE_46_BIT 46 -#define FEATURE_SPARE_47_BIT 47 -#define FEATURE_SPARE_48_BIT 48 -#define FEATURE_SPARE_49_BIT 49 -#define FEATURE_SPARE_50_BIT 50 -#define FEATURE_SPARE_51_BIT 51 -#define FEATURE_SPARE_52_BIT 52 -#define FEATURE_SPARE_53_BIT 53 -#define FEATURE_SPARE_54_BIT 54 -#define FEATURE_SPARE_55_BIT 55 -#define FEATURE_SPARE_56_BIT 56 -#define FEATURE_SPARE_57_BIT 57 -#define FEATURE_SPARE_58_BIT 58 -#define FEATURE_SPARE_59_BIT 59 -#define FEATURE_SPARE_60_BIT 60 -#define FEATURE_SPARE_61_BIT 61 -#define FEATURE_SPARE_62_BIT 62 -#define FEATURE_SPARE_63_BIT 63 - -#define NUM_FEATURES 64 - - -#define FEATURE_DPM_PREFETCHER_MASK (1 << FEATURE_DPM_PREFETCHER_BIT ) -#define FEATURE_DPM_GFXCLK_MASK (1 << FEATURE_DPM_GFXCLK_BIT ) -#define FEATURE_DPM_UCLK_MASK (1 << FEATURE_DPM_UCLK_BIT ) -#define FEATURE_DPM_SOCCLK_MASK (1 << FEATURE_DPM_SOCCLK_BIT ) -#define FEATURE_DPM_FCLK_MASK (1 << FEATURE_DPM_FCLK_BIT ) -#define FEATURE_DPM_MP0CLK_MASK (1 << FEATURE_DPM_MP0CLK_BIT ) -#define FEATURE_DPM_XGMI_MASK (1 << FEATURE_DPM_XGMI_BIT ) - -#define FEATURE_DS_GFXCLK_MASK (1 << FEATURE_DS_GFXCLK_BIT ) -#define FEATURE_DS_SOCCLK_MASK (1 << FEATURE_DS_SOCCLK_BIT ) -#define FEATURE_DS_LCLK_MASK (1 << FEATURE_DS_LCLK_BIT ) -#define FEATURE_DS_FCLK_MASK (1 << FEATURE_DS_FCLK_BIT ) -#define FEATURE_DS_UCLK_MASK (1 << FEATURE_DS_UCLK_BIT ) -#define FEATURE_GFX_ULV_MASK (1 << FEATURE_GFX_ULV_BIT ) -#define FEATURE_DPM_VCN_MASK (1 << FEATURE_DPM_VCN_BIT ) -#define FEATURE_RSMU_SMN_CG_MASK (1 << FEATURE_RSMU_SMN_CG_BIT ) -#define FEATURE_WAFL_CG_MASK (1 << FEATURE_WAFL_CG_BIT ) - -#define FEATURE_PPT_MASK (1 << FEATURE_PPT_BIT ) -#define FEATURE_TDC_MASK (1 << FEATURE_TDC_BIT ) -#define FEATURE_APCC_PLUS_MASK (1 << FEATURE_APCC_PLUS_BIT ) -#define FEATURE_VR0HOT_MASK (1 << FEATURE_VR0HOT_BIT ) -#define FEATURE_VR1HOT_MASK (1 << FEATURE_VR1HOT_BIT ) -#define FEATURE_FW_CTF_MASK (1 << FEATURE_FW_CTF_BIT ) -#define FEATURE_FAN_CONTROL_MASK (1 << FEATURE_FAN_CONTROL_BIT ) -#define FEATURE_THERMAL_MASK (1 << FEATURE_THERMAL_BIT ) - -#define FEATURE_OUT_OF_BAND_MONITOR_MASK (1 << FEATURE_OUT_OF_BAND_MONITOR_BIT ) -#define FEATURE_TEMP_DEPENDENT_VMIN_MASK (1 << FEATURE_TEMP_DEPENDENT_VMIN_BIT ) -#define FEATURE_PER_PART_VMIN_MASK (1 << FEATURE_PER_PART_VMIN_BIT ) - - -//FIXME need updating -// Debug Overrides Bitmask -#define DPM_OVERRIDE_DISABLE_UCLK_PID 0x00000001 -#define DPM_OVERRIDE_DISABLE_VOLT_LINK_VCN_FCLK 0x00000002 - -// I2C Config Bit Defines -#define I2C_CONTROLLER_ENABLED 1 -#define I2C_CONTROLLER_DISABLED 0 - -// VR Mapping Bit Defines -#define VR_MAPPING_VR_SELECT_MASK 0x01 -#define VR_MAPPING_VR_SELECT_SHIFT 0x00 - -#define VR_MAPPING_PLANE_SELECT_MASK 0x02 -#define VR_MAPPING_PLANE_SELECT_SHIFT 0x01 - -// PSI Bit Defines -#define PSI_SEL_VR0_PLANE0_PSI0 0x01 -#define PSI_SEL_VR0_PLANE0_PSI1 0x02 -#define PSI_SEL_VR0_PLANE1_PSI0 0x04 -#define PSI_SEL_VR0_PLANE1_PSI1 0x08 -#define PSI_SEL_VR1_PLANE0_PSI0 0x10 -#define PSI_SEL_VR1_PLANE0_PSI1 0x20 -#define PSI_SEL_VR1_PLANE1_PSI0 0x40 -#define PSI_SEL_VR1_PLANE1_PSI1 0x80 - -// Throttler Control/Status Bits -#define THROTTLER_PADDING_BIT 0 -#define THROTTLER_TEMP_EDGE_BIT 1 -#define THROTTLER_TEMP_HOTSPOT_BIT 2 -#define THROTTLER_TEMP_MEM_BIT 3 -#define THROTTLER_TEMP_VR_GFX_BIT 4 -#define THROTTLER_TEMP_VR_MEM_BIT 5 -#define THROTTLER_TEMP_VR_SOC_BIT 6 -#define THROTTLER_TDC_GFX_BIT 7 -#define THROTTLER_TDC_SOC_BIT 8 -#define THROTTLER_PPT0_BIT 9 -#define THROTTLER_PPT1_BIT 10 -#define THROTTLER_PPT2_BIT 11 -#define THROTTLER_PPT3_BIT 12 -#define THROTTLER_PPM_BIT 13 -#define THROTTLER_FIT_BIT 14 -#define THROTTLER_APCC_BIT 15 -#define THROTTLER_VRHOT0_BIT 16 -#define THROTTLER_VRHOT1_BIT 17 - -// Table transfer status -#define TABLE_TRANSFER_OK 0x0 -#define TABLE_TRANSFER_FAILED 0xFF -#define TABLE_TRANSFER_PENDING 0xAB - -// Workload bits -#define WORKLOAD_PPLIB_DEFAULT_BIT 0 -#define WORKLOAD_PPLIB_POWER_SAVING_BIT 1 -#define WORKLOAD_PPLIB_VIDEO_BIT 2 -#define WORKLOAD_PPLIB_COMPUTE_BIT 3 -#define WORKLOAD_PPLIB_CUSTOM_BIT 4 -#define WORKLOAD_PPLIB_COUNT 5 - -//XGMI performance states -#define XGMI_STATE_D0 1 -#define XGMI_STATE_D3 0 - -#define NUM_I2C_CONTROLLERS 8 - -#define I2C_CONTROLLER_ENABLED 1 -#define I2C_CONTROLLER_DISABLED 0 - -#define MAX_SW_I2C_COMMANDS 8 - -typedef enum { - I2C_CONTROLLER_PORT_0 = 0, //CKSVII2C0 - I2C_CONTROLLER_PORT_1 = 1, //CKSVII2C1 - I2C_CONTROLLER_PORT_COUNT, -} I2cControllerPort_e; - -typedef enum { - I2C_CONTROLLER_NAME_VR_GFX = 0, - I2C_CONTROLLER_NAME_VR_SOC, - I2C_CONTROLLER_NAME_VR_MEM, - I2C_CONTROLLER_NAME_SPARE, - I2C_CONTROLLER_NAME_COUNT, -} I2cControllerName_e; - -typedef enum { - I2C_CONTROLLER_THROTTLER_TYPE_NONE = 0, - I2C_CONTROLLER_THROTTLER_VR_GFX, - I2C_CONTROLLER_THROTTLER_VR_SOC, - I2C_CONTROLLER_THROTTLER_VR_MEM, - I2C_CONTROLLER_THROTTLER_COUNT, -} I2cControllerThrottler_e; - -typedef enum { - I2C_CONTROLLER_PROTOCOL_VR_0, - I2C_CONTROLLER_PROTOCOL_VR_1, - I2C_CONTROLLER_PROTOCOL_TMP_0, - I2C_CONTROLLER_PROTOCOL_TMP_1, - I2C_CONTROLLER_PROTOCOL_SPARE_0, - I2C_CONTROLLER_PROTOCOL_SPARE_1, - I2C_CONTROLLER_PROTOCOL_COUNT, -} I2cControllerProtocol_e; - -typedef struct { - uint8_t Enabled; - uint8_t Speed; - uint8_t Padding[2]; - uint32_t SlaveAddress; - uint8_t ControllerPort; - uint8_t ControllerName; - uint8_t ThermalThrotter; - uint8_t I2cProtocol; -} I2cControllerConfig_t; - -typedef enum { - I2C_PORT_SVD_SCL = 0, - I2C_PORT_GPIO, -} I2cPort_e; - -typedef enum { - I2C_SPEED_FAST_50K = 0, //50 Kbits/s - I2C_SPEED_FAST_100K, //100 Kbits/s - I2C_SPEED_FAST_400K, //400 Kbits/s - I2C_SPEED_FAST_PLUS_1M, //1 Mbits/s (in fast mode) - I2C_SPEED_HIGH_1M, //1 Mbits/s (in high speed mode) - I2C_SPEED_HIGH_2M, //2.3 Mbits/s - I2C_SPEED_COUNT, -} I2cSpeed_e; - -typedef enum { - I2C_CMD_READ = 0, - I2C_CMD_WRITE, - I2C_CMD_COUNT, -} I2cCmdType_e; - -#define CMDCONFIG_STOP_BIT 0 -#define CMDCONFIG_RESTART_BIT 1 - -#define CMDCONFIG_STOP_MASK (1 << CMDCONFIG_STOP_BIT) -#define CMDCONFIG_RESTART_MASK (1 << CMDCONFIG_RESTART_BIT) - -typedef struct { - uint8_t RegisterAddr; ////only valid for write, ignored for read - uint8_t Cmd; //Read(0) or Write(1) - uint8_t Data; //Return data for read. Data to send for write - uint8_t CmdConfig; //Includes whether associated command should have a stop or restart command -} SwI2cCmd_t; //SW I2C Command Table - -typedef struct { - uint8_t I2CcontrollerPort; //CKSVII2C0(0) or //CKSVII2C1(1) - uint8_t I2CSpeed; //Slow(0) or Fast(1) - uint16_t SlaveAddress; - uint8_t NumCmds; //Number of commands - uint8_t Padding[3]; - - SwI2cCmd_t SwI2cCmds[MAX_SW_I2C_COMMANDS]; - - uint32_t MmHubPadding[8]; // SMU internal use - -} SwI2cRequest_t; // SW I2C Request Table - -//D3HOT sequences -typedef enum { - BACO_SEQUENCE, - MSR_SEQUENCE, - BAMACO_SEQUENCE, - ULPS_SEQUENCE, - D3HOT_SEQUENCE_COUNT, -}D3HOTSequence_e; - -//THis is aligned with RSMU PGFSM Register Mapping -typedef enum { - PG_DYNAMIC_MODE = 0, - PG_STATIC_MODE, -} PowerGatingMode_e; - -//This is aligned with RSMU PGFSM Register Mapping -typedef enum { - PG_POWER_DOWN = 0, - PG_POWER_UP, -} PowerGatingSettings_e; - -typedef struct { - uint32_t a; // store in IEEE float format in this variable - uint32_t b; // store in IEEE float format in this variable - uint32_t c; // store in IEEE float format in this variable -} QuadraticInt_t; - -typedef struct { - uint32_t m; // store in IEEE float format in this variable - uint32_t b; // store in IEEE float format in this variable -} LinearInt_t; - -typedef struct { - uint32_t a; // store in IEEE float format in this variable - uint32_t b; // store in IEEE float format in this variable - uint32_t c; // store in IEEE float format in this variable -} DroopInt_t; - -typedef enum { - GFXCLK_SOURCE_PLL = 0, - GFXCLK_SOURCE_AFLL, - GFXCLK_SOURCE_COUNT, -} GfxclkSrc_e; - -typedef enum { - PPCLK_GFXCLK, - PPCLK_VCLK, - PPCLK_DCLK, - PPCLK_SOCCLK, - PPCLK_UCLK, - PPCLK_FCLK, - PPCLK_COUNT, -} PPCLK_e; - -typedef enum { - POWER_SOURCE_AC, - POWER_SOURCE_DC, - POWER_SOURCE_COUNT, -} POWER_SOURCE_e; - -typedef enum { - TEMP_EDGE, - TEMP_HOTSPOT, - TEMP_MEM, - TEMP_VR_GFX, - TEMP_VR_SOC, - TEMP_VR_MEM, - TEMP_COUNT -} TEMP_TYPE_e; - -typedef enum { - PPT_THROTTLER_PPT0, - PPT_THROTTLER_PPT1, - PPT_THROTTLER_PPT2, - PPT_THROTTLER_PPT3, - PPT_THROTTLER_COUNT -} PPT_THROTTLER_e; - -typedef enum { - VOLTAGE_MODE_AVFS = 0, - VOLTAGE_MODE_AVFS_SS, - VOLTAGE_MODE_SS, - VOLTAGE_MODE_COUNT, -} VOLTAGE_MODE_e; - -typedef enum { - AVFS_VOLTAGE_GFX = 0, - AVFS_VOLTAGE_SOC, - AVFS_VOLTAGE_COUNT, -} AVFS_VOLTAGE_TYPE_e; - -typedef enum { - GPIO_INT_POLARITY_ACTIVE_LOW = 0, - GPIO_INT_POLARITY_ACTIVE_HIGH, -} GpioIntPolarity_e; - -typedef enum { - MEMORY_TYPE_GDDR6 = 0, - MEMORY_TYPE_HBM, -} MemoryType_e; - -typedef enum { - PWR_CONFIG_TDP = 0, - PWR_CONFIG_TGP, - PWR_CONFIG_TCP_ESTIMATED, - PWR_CONFIG_TCP_MEASURED, -} PwrConfig_e; - -typedef enum { - XGMI_LINK_RATE_2 = 2, // 2Gbps - XGMI_LINK_RATE_4 = 4, // 4Gbps - XGMI_LINK_RATE_8 = 8, // 8Gbps - XGMI_LINK_RATE_12 = 12, // 12Gbps - XGMI_LINK_RATE_16 = 16, // 16Gbps - XGMI_LINK_RATE_17 = 17, // 17Gbps - XGMI_LINK_RATE_18 = 18, // 18Gbps - XGMI_LINK_RATE_19 = 19, // 19Gbps - XGMI_LINK_RATE_20 = 20, // 20Gbps - XGMI_LINK_RATE_21 = 21, // 21Gbps - XGMI_LINK_RATE_22 = 22, // 22Gbps - XGMI_LINK_RATE_23 = 23, // 23Gbps - XGMI_LINK_RATE_24 = 24, // 24Gbps - XGMI_LINK_RATE_25 = 25, // 25Gbps - XGMI_LINK_RATE_COUNT -} XGMI_LINK_RATE_e; - -typedef enum { - XGMI_LINK_WIDTH_1 = 1, // x1 - XGMI_LINK_WIDTH_2 = 2, // x2 - XGMI_LINK_WIDTH_4 = 4, // x4 - XGMI_LINK_WIDTH_8 = 8, // x8 - XGMI_LINK_WIDTH_9 = 9, // x9 - XGMI_LINK_WIDTH_16 = 16, // x16 - XGMI_LINK_WIDTH_COUNT -} XGMI_LINK_WIDTH_e; - -typedef struct { - uint8_t VoltageMode; // 0 - AVFS only, 1- min(AVFS,SS), 2-SS only - uint8_t SnapToDiscrete; // 0 - Fine grained DPM, 1 - Discrete DPM - uint8_t NumDiscreteLevels; // Set to 2 (Fmin, Fmax) when using fine grained DPM, otherwise set to # discrete levels used - uint8_t padding; - LinearInt_t ConversionToAvfsClk; // Transfer function to AVFS Clock (GHz->GHz) - QuadraticInt_t SsCurve; // Slow-slow curve (GHz->V) - uint16_t SsFmin; // Fmin for SS curve. If SS curve is selected, will use V@SSFmin for F <= Fmin - uint16_t Padding16; -} DpmDescriptor_t; - -typedef struct { - uint32_t Version; - - // SECTION: Feature Enablement - uint32_t FeaturesToRun[2]; - - // SECTION: Infrastructure Limits - uint16_t SocketPowerLimitAc[PPT_THROTTLER_COUNT]; - uint16_t SocketPowerLimitAcTau[PPT_THROTTLER_COUNT]; - uint16_t TdcLimitSoc; // Amps - uint16_t TdcLimitSocTau; // Time constant of LPF in ms - uint16_t TdcLimitGfx; // Amps - uint16_t TdcLimitGfxTau; // Time constant of LPF in ms - - uint16_t TedgeLimit; // Celcius - uint16_t ThotspotLimit; // Celcius - uint16_t TmemLimit; // Celcius - uint16_t Tvr_gfxLimit; // Celcius - uint16_t Tvr_memLimit; // Celcius - uint16_t Tvr_socLimit; // Celcius - uint32_t FitLimit; // Failures in time (failures per million parts over the defined lifetime) - - uint16_t PpmPowerLimit; // Switch this this power limit when temperature is above PpmTempThreshold - uint16_t PpmTemperatureThreshold; - - // SECTION: Throttler settings - uint32_t ThrottlerControlMask; // See Throtter masks defines - - // SECTION: ULV Settings - uint16_t UlvVoltageOffsetGfx; // In mV(Q2) - uint16_t UlvPadding; // Padding - - uint8_t UlvGfxclkBypass; // 1 to turn off/bypass Gfxclk during ULV, 0 to leave Gfxclk on during ULV - uint8_t Padding234[3]; - - // SECTION: Voltage Control Parameters - uint16_t MinVoltageGfx; // In mV(Q2) Minimum Voltage ("Vmin") of VDD_GFX - uint16_t MinVoltageSoc; // In mV(Q2) Minimum Voltage ("Vmin") of VDD_SOC - uint16_t MaxVoltageGfx; // In mV(Q2) Maximum Voltage allowable of VDD_GFX - uint16_t MaxVoltageSoc; // In mV(Q2) Maximum Voltage allowable of VDD_SOC - - uint16_t LoadLineResistanceGfx; // In mOhms with 8 fractional bits - uint16_t LoadLineResistanceSoc; // In mOhms with 8 fractional bits - - //SECTION: DPM Config 1 - DpmDescriptor_t DpmDescriptor[PPCLK_COUNT]; - - uint16_t FreqTableGfx [NUM_GFXCLK_DPM_LEVELS ]; // In MHz - uint16_t FreqTableVclk [NUM_VCLK_DPM_LEVELS ]; // In MHz - uint16_t FreqTableDclk [NUM_DCLK_DPM_LEVELS ]; // In MHz - uint16_t FreqTableSocclk [NUM_SOCCLK_DPM_LEVELS ]; // In MHz - uint16_t FreqTableUclk [NUM_UCLK_DPM_LEVELS ]; // In MHz - uint16_t FreqTableFclk [NUM_FCLK_DPM_LEVELS ]; // In MHz - - uint32_t Paddingclks[16]; - - // SECTION: DPM Config 2 - uint16_t Mp0clkFreq [NUM_MP0CLK_DPM_LEVELS]; // in MHz - uint16_t Mp0DpmVoltage [NUM_MP0CLK_DPM_LEVELS]; // mV(Q2) - - // GFXCLK DPM - uint16_t GfxclkFidle; // In MHz - uint16_t GfxclkSlewRate; // for PLL babystepping??? - uint8_t Padding567[4]; - uint16_t GfxclkDsMaxFreq; // In MHz - uint8_t GfxclkSource; // 0 = PLL, 1 = AFLL - uint8_t Padding456; - - // GFXCLK Thermal DPM (formerly 'Boost' Settings) - uint16_t EnableTdpm; - uint16_t TdpmHighHystTemperature; - uint16_t TdpmLowHystTemperature; - uint16_t GfxclkFreqHighTempLimit; // High limit on GFXCLK when temperature is high, for reliability. - - // SECTION: Fan Control - uint16_t FanStopTemp; //Celcius - uint16_t FanStartTemp; //Celcius - - uint16_t FanGainEdge; - uint16_t FanGainHotspot; - uint16_t FanGainVrGfx; - uint16_t FanGainVrSoc; - uint16_t FanGainVrMem; - uint16_t FanGainHbm; - uint16_t FanPwmMin; - uint16_t FanAcousticLimitRpm; - uint16_t FanThrottlingRpm; - uint16_t FanMaximumRpm; - uint16_t FanTargetTemperature; - uint16_t FanTargetGfxclk; - uint8_t FanZeroRpmEnable; - uint8_t FanTachEdgePerRev; - uint8_t FanTempInputSelect; - uint8_t padding8_Fan; - - // The following are AFC override parameters. Leave at 0 to use FW defaults. - int16_t FuzzyFan_ErrorSetDelta; - int16_t FuzzyFan_ErrorRateSetDelta; - int16_t FuzzyFan_PwmSetDelta; - uint16_t FuzzyFan_Reserved; - - - // SECTION: AVFS - // Overrides - uint8_t OverrideAvfsGb[AVFS_VOLTAGE_COUNT]; - uint8_t Padding8_Avfs[2]; - - QuadraticInt_t qAvfsGb[AVFS_VOLTAGE_COUNT]; // GHz->V Override of fused curve - DroopInt_t dBtcGbGfxPll; // GHz->V BtcGb - DroopInt_t dBtcGbGfxAfll; // GHz->V BtcGb - DroopInt_t dBtcGbSoc; // GHz->V BtcGb - LinearInt_t qAgingGb[AVFS_VOLTAGE_COUNT]; // GHz->V - - QuadraticInt_t qStaticVoltageOffset[AVFS_VOLTAGE_COUNT]; // GHz->V - - uint16_t DcTol[AVFS_VOLTAGE_COUNT]; // mV Q2 - - uint8_t DcBtcEnabled[AVFS_VOLTAGE_COUNT]; - uint8_t Padding8_GfxBtc[2]; - - uint16_t DcBtcMin[AVFS_VOLTAGE_COUNT]; // mV Q2 - uint16_t DcBtcMax[AVFS_VOLTAGE_COUNT]; // mV Q2 - - uint16_t DcBtcGb[AVFS_VOLTAGE_COUNT]; // mV Q2 - - // SECTION: XGMI - uint8_t XgmiDpmPstates[NUM_XGMI_LEVELS]; // 2 DPM states, high and low. 0-P0, 1-P1, 2-P2, 3-P3. - uint8_t XgmiDpmSpare[2]; - - // Temperature Dependent Vmin - uint16_t VDDGFX_TVmin; //Celcius - uint16_t VDDSOC_TVmin; //Celcius - uint16_t VDDGFX_Vmin_HiTemp; // mV Q2 - uint16_t VDDGFX_Vmin_LoTemp; // mV Q2 - uint16_t VDDSOC_Vmin_HiTemp; // mV Q2 - uint16_t VDDSOC_Vmin_LoTemp; // mV Q2 - - uint16_t VDDGFX_TVminHystersis; // Celcius - uint16_t VDDSOC_TVminHystersis; // Celcius - - - // SECTION: Advanced Options - uint32_t DebugOverrides; - QuadraticInt_t ReservedEquation0; - QuadraticInt_t ReservedEquation1; - QuadraticInt_t ReservedEquation2; - QuadraticInt_t ReservedEquation3; - - uint16_t MinVoltageUlvGfx; // In mV(Q2) Minimum Voltage ("Vmin") of VDD_GFX in ULV mode - uint16_t PaddingUlv; // Padding - - // Total Power configuration, use defines from PwrConfig_e - uint8_t TotalPowerConfig; //0-TDP, 1-TGP, 2-TCP Estimated, 3-TCP Measured - uint8_t TotalPowerSpare1; - uint16_t TotalPowerSpare2; - - // APCC Settings - uint16_t PccThresholdLow; - uint16_t PccThresholdHigh; - uint32_t PaddingAPCC[6]; //FIXME pending SPEC - - // OOB Settings - uint16_t BasePerformanceCardPower; - uint16_t MaxPerformanceCardPower; - uint16_t BasePerformanceFrequencyCap; //In Mhz - uint16_t MaxPerformanceFrequencyCap; //In Mhz - - // Per-Part Vmin - uint16_t VDDGFX_VminLow; // mv Q2 - uint16_t VDDGFX_TVminLow; //Celcius - uint16_t VDDGFX_VminLow_HiTemp; // mv Q2 - uint16_t VDDGFX_VminLow_LoTemp; // mv Q2 - - // SECTION: Reserved - uint32_t Reserved[7]; - - // SECTION: BOARD PARAMETERS - - // SVI2 Board Parameters - uint16_t MaxVoltageStepGfx; // In mV(Q2) Max voltage step that SMU will request. Multiple steps are taken if voltage change exceeds this value. - uint16_t MaxVoltageStepSoc; // In mV(Q2) Max voltage step that SMU will request. Multiple steps are taken if voltage change exceeds this value. - - uint8_t VddGfxVrMapping; // Use VR_MAPPING* bitfields - uint8_t VddSocVrMapping; // Use VR_MAPPING* bitfields - uint8_t VddMemVrMapping; // Use VR_MAPPING* bitfields - uint8_t BoardVrMapping; // Use VR_MAPPING* bitfields - - uint8_t GfxUlvPhaseSheddingMask; // set this to 1 to set PSI0/1 to 1 in ULV mode - uint8_t ExternalSensorPresent; // External RDI connected to TMON (aka TEMP IN) - uint8_t Padding8_V[2]; - - // Telemetry Settings - uint16_t GfxMaxCurrent; // in Amps - int8_t GfxOffset; // in Amps - uint8_t Padding_TelemetryGfx; - - uint16_t SocMaxCurrent; // in Amps - int8_t SocOffset; // in Amps - uint8_t Padding_TelemetrySoc; - - uint16_t MemMaxCurrent; // in Amps - int8_t MemOffset; // in Amps - uint8_t Padding_TelemetryMem; - - uint16_t BoardMaxCurrent; // in Amps - int8_t BoardOffset; // in Amps - uint8_t Padding_TelemetryBoardInput; - - // GPIO Settings - uint8_t VR0HotGpio; // GPIO pin configured for VR0 HOT event - uint8_t VR0HotPolarity; // GPIO polarity for VR0 HOT event - uint8_t VR1HotGpio; // GPIO pin configured for VR1 HOT event - uint8_t VR1HotPolarity; // GPIO polarity for VR1 HOT event - - // GFXCLK PLL Spread Spectrum - uint8_t PllGfxclkSpreadEnabled; // on or off - uint8_t PllGfxclkSpreadPercent; // Q4.4 - uint16_t PllGfxclkSpreadFreq; // kHz - - // UCLK Spread Spectrum - uint8_t UclkSpreadEnabled; // on or off - uint8_t UclkSpreadPercent; // Q4.4 - uint16_t UclkSpreadFreq; // kHz - - // FCLK Spread Spectrum - uint8_t FclkSpreadEnabled; // on or off - uint8_t FclkSpreadPercent; // Q4.4 - uint16_t FclkSpreadFreq; // kHz - - // GFXCLK Fll Spread Spectrum - uint8_t FllGfxclkSpreadEnabled; // on or off - uint8_t FllGfxclkSpreadPercent; // Q4.4 - uint16_t FllGfxclkSpreadFreq; // kHz - - // I2C Controller Structure - I2cControllerConfig_t I2cControllers[NUM_I2C_CONTROLLERS]; - - // Memory section - uint32_t MemoryChannelEnabled; // For DRAM use only, Max 32 channels enabled bit mask. - - uint8_t DramBitWidth; // For DRAM use only. See Dram Bit width type defines - uint8_t PaddingMem[3]; - - // Total board power - uint16_t TotalBoardPower; //Only needed for TCP Estimated case, where TCP = TGP+Total Board Power - uint16_t BoardPadding; - - // SECTION: XGMI Training - uint8_t XgmiLinkSpeed [NUM_XGMI_PSTATE_LEVELS]; - uint8_t XgmiLinkWidth [NUM_XGMI_PSTATE_LEVELS]; - - uint16_t XgmiFclkFreq [NUM_XGMI_PSTATE_LEVELS]; - uint16_t XgmiSocVoltage [NUM_XGMI_PSTATE_LEVELS]; - - // GPIO pins for I2C communications with 2nd controller for Input Telemetry Sequence - uint8_t GpioI2cScl; // Serial Clock - uint8_t GpioI2cSda; // Serial Data - uint16_t GpioPadding; - - // Platform input telemetry voltage coefficient - uint32_t BoardVoltageCoeffA; // decode by /1000 - uint32_t BoardVoltageCoeffB; // decode by /1000 - - uint32_t BoardReserved[7]; - - // Padding for MMHUB - do not modify this - uint32_t MmHubPadding[8]; // SMU internal use - -} PPTable_t; - -typedef struct { - // Time constant parameters for clock averages in ms - uint16_t GfxclkAverageLpfTau; - uint16_t SocclkAverageLpfTau; - uint16_t UclkAverageLpfTau; - uint16_t GfxActivityLpfTau; - uint16_t UclkActivityLpfTau; - - uint16_t SocketPowerLpfTau; - - uint16_t VcnClkAverageLpfTau; - uint16_t padding16; - - // Padding - ignore - uint32_t MmHubPadding[8]; // SMU internal use -} DriverSmuConfig_t; - -typedef struct { - uint16_t CurrClock[PPCLK_COUNT]; - uint16_t AverageGfxclkFrequency; - uint16_t AverageSocclkFrequency; - uint16_t AverageUclkFrequency ; - uint16_t AverageGfxActivity ; - uint16_t AverageUclkActivity ; - uint8_t CurrSocVoltageOffset ; - uint8_t CurrGfxVoltageOffset ; - uint8_t CurrMemVidOffset ; - uint8_t Padding8 ; - uint16_t AverageSocketPower ; - uint16_t TemperatureEdge ; - uint16_t TemperatureHotspot ; - uint16_t TemperatureHBM ; - uint16_t TemperatureVrGfx ; - uint16_t TemperatureVrSoc ; - uint16_t TemperatureVrMem ; - uint32_t ThrottlerStatus ; - - uint16_t CurrFanSpeed ; - uint16_t AverageVclkFrequency ; - uint16_t AverageDclkFrequency ; - uint16_t VcnActivityPercentage ; - uint32_t EnergyAccumulator ; - - uint32_t Padding[2]; - - // Padding - ignore - uint32_t MmHubPadding[8]; // SMU internal use -} SmuMetrics_t; - - -typedef struct { - uint16_t avgPsmCount[75]; - uint16_t minPsmCount[75]; - float avgPsmVoltage[75]; - float minPsmVoltage[75]; - - uint32_t MmHubPadding[8]; // SMU internal use -} AvfsDebugTable_t; - -typedef struct { - uint8_t AvfsVersion; - uint8_t Padding; - uint8_t AvfsEn[AVFS_VOLTAGE_COUNT]; - - uint8_t OverrideVFT[AVFS_VOLTAGE_COUNT]; - uint8_t OverrideAvfsGb[AVFS_VOLTAGE_COUNT]; - - uint8_t OverrideTemperatures[AVFS_VOLTAGE_COUNT]; - uint8_t OverrideVInversion[AVFS_VOLTAGE_COUNT]; - uint8_t OverrideP2V[AVFS_VOLTAGE_COUNT]; - uint8_t OverrideP2VCharzFreq[AVFS_VOLTAGE_COUNT]; - - int32_t VFT0_m1[AVFS_VOLTAGE_COUNT]; // Q8.24 - int32_t VFT0_m2[AVFS_VOLTAGE_COUNT]; // Q12.12 - int32_t VFT0_b[AVFS_VOLTAGE_COUNT]; // Q32 - - int32_t VFT1_m1[AVFS_VOLTAGE_COUNT]; // Q8.16 - int32_t VFT1_m2[AVFS_VOLTAGE_COUNT]; // Q12.12 - int32_t VFT1_b[AVFS_VOLTAGE_COUNT]; // Q32 - - int32_t VFT2_m1[AVFS_VOLTAGE_COUNT]; // Q8.16 - int32_t VFT2_m2[AVFS_VOLTAGE_COUNT]; // Q12.12 - int32_t VFT2_b[AVFS_VOLTAGE_COUNT]; // Q32 - - int32_t AvfsGb0_m1[AVFS_VOLTAGE_COUNT]; // Q8.24 - int32_t AvfsGb0_m2[AVFS_VOLTAGE_COUNT]; // Q12.12 - int32_t AvfsGb0_b[AVFS_VOLTAGE_COUNT]; // Q32 - - int32_t AcBtcGb_m1[AVFS_VOLTAGE_COUNT]; // Q8.24 - int32_t AcBtcGb_m2[AVFS_VOLTAGE_COUNT]; // Q12.12 - int32_t AcBtcGb_b[AVFS_VOLTAGE_COUNT]; // Q32 - - uint32_t AvfsTempCold[AVFS_VOLTAGE_COUNT]; - uint32_t AvfsTempMid[AVFS_VOLTAGE_COUNT]; - uint32_t AvfsTempHot[AVFS_VOLTAGE_COUNT]; - - uint32_t VInversion[AVFS_VOLTAGE_COUNT]; // in mV with 2 fractional bits - - - int32_t P2V_m1[AVFS_VOLTAGE_COUNT]; // Q8.24 - int32_t P2V_m2[AVFS_VOLTAGE_COUNT]; // Q12.12 - int32_t P2V_b[AVFS_VOLTAGE_COUNT]; // Q32 - - uint32_t P2VCharzFreq[AVFS_VOLTAGE_COUNT]; // in 10KHz units - - uint32_t EnabledAvfsModules[3]; - - uint32_t MmHubPadding[8]; // SMU internal use -} AvfsFuseOverride_t; - -typedef struct { - uint8_t Gfx_ActiveHystLimit; - uint8_t Gfx_IdleHystLimit; - uint8_t Gfx_FPS; - uint8_t Gfx_MinActiveFreqType; - uint8_t Gfx_BoosterFreqType; - uint8_t Gfx_MinFreqStep; // Minimum delta between current and target frequeny in order for FW to change clock. - uint8_t Gfx_UseRlcBusy; - uint8_t PaddingGfx[3]; - uint16_t Gfx_MinActiveFreq; // MHz - uint16_t Gfx_BoosterFreq; // MHz - uint16_t Gfx_PD_Data_time_constant; // Time constant of PD controller in ms - uint32_t Gfx_PD_Data_limit_a; // Q16 - uint32_t Gfx_PD_Data_limit_b; // Q16 - uint32_t Gfx_PD_Data_limit_c; // Q16 - uint32_t Gfx_PD_Data_error_coeff; // Q16 - uint32_t Gfx_PD_Data_error_rate_coeff; // Q16 - - uint8_t Mem_ActiveHystLimit; - uint8_t Mem_IdleHystLimit; - uint8_t Mem_FPS; - uint8_t Mem_MinActiveFreqType; - uint8_t Mem_BoosterFreqType; - uint8_t Mem_MinFreqStep; // Minimum delta between current and target frequeny in order for FW to change clock. - uint8_t Mem_UseRlcBusy; - uint8_t PaddingMem[3]; - uint16_t Mem_MinActiveFreq; // MHz - uint16_t Mem_BoosterFreq; // MHz - uint16_t Mem_PD_Data_time_constant; // Time constant of PD controller in ms - uint32_t Mem_PD_Data_limit_a; // Q16 - uint32_t Mem_PD_Data_limit_b; // Q16 - uint32_t Mem_PD_Data_limit_c; // Q16 - uint32_t Mem_PD_Data_error_coeff; // Q16 - uint32_t Mem_PD_Data_error_rate_coeff; // Q16 - - uint32_t Mem_UpThreshold_Limit; // Q16 - uint8_t Mem_UpHystLimit; - uint8_t Mem_DownHystLimit; - uint16_t Mem_Fps; - - uint32_t BusyThreshold; // Q16 - uint32_t BusyHyst; - uint32_t IdleHyst; - - uint32_t MmHubPadding[8]; // SMU internal use -} DpmActivityMonitorCoeffInt_t; - -// These defines are used with the following messages: -// SMC_MSG_TransferTableDram2Smu -// SMC_MSG_TransferTableSmu2Dram -#define TABLE_PPTABLE 0 -#define TABLE_AVFS 1 -#define TABLE_AVFS_PSM_DEBUG 2 -#define TABLE_AVFS_FUSE_OVERRIDE 3 -#define TABLE_PMSTATUSLOG 4 -#define TABLE_SMU_METRICS 5 -#define TABLE_DRIVER_SMU_CONFIG 6 -#define TABLE_OVERDRIVE 7 -#define TABLE_WAFL_XGMI_TOPOLOGY 8 -#define TABLE_I2C_COMMANDS 9 -#define TABLE_ACTIVITY_MONITOR_COEFF 10 -#define TABLE_COUNT 11 - -// These defines are used with the SMC_MSG_SetUclkFastSwitch message. -typedef enum { - DF_SWITCH_TYPE_FAST = 0, - DF_SWITCH_TYPE_SLOW, - DF_SWITCH_TYPE_COUNT, -} DF_SWITCH_TYPE_e; - -typedef enum { - DRAM_BIT_WIDTH_DISABLED = 0, - DRAM_BIT_WIDTH_X_8, - DRAM_BIT_WIDTH_X_16, - DRAM_BIT_WIDTH_X_32, - DRAM_BIT_WIDTH_X_64, // NOT USED. - DRAM_BIT_WIDTH_X_128, - DRAM_BIT_WIDTH_COUNT, -} DRAM_BIT_WIDTH_TYPE_e; - -#define REMOVE_FMAX_MARGIN_BIT 0x0 -#define REMOVE_DCTOL_MARGIN_BIT 0x1 -#define REMOVE_PLATFORM_MARGIN_BIT 0x2 - -#endif diff --git a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_cyan_skillfish.h b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_cyan_skillfish.h deleted file mode 100644 index 4884a4e1f261..000000000000 --- a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_cyan_skillfish.h +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Copyright 2021 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#ifndef __SMU11_DRIVER_IF_CYAN_SKILLFISH_H__ -#define __SMU11_DRIVER_IF_CYAN_SKILLFISH_H__ - -// *** IMPORTANT *** -// Always increment the interface version if -// any structure is changed in this file -#define MP1_DRIVER_IF_VERSION 0x8 - -#define TABLE_BIOS_IF 0 // Called by BIOS -#define TABLE_WATERMARKS 1 // Called by Driver; defined here, but not used, for backward compatible -#define TABLE_PMSTATUSLOG 3 // Called by Tools for Agm logging -#define TABLE_DPMCLOCKS 4 // Called by Driver; defined here, but not used, for backward compatible -#define TABLE_MOMENTARY_PM 5 // Called by Tools; defined here, but not used, for backward compatible -#define TABLE_SMU_METRICS 6 // Called by Driver -#define TABLE_COUNT 7 - -typedef struct SmuMetricsTable_t { - //CPU status - uint16_t CoreFrequency[6]; //[MHz] - uint32_t CorePower[6]; //[mW] - uint16_t CoreTemperature[6]; //[centi-Celsius] - uint16_t L3Frequency[2]; //[MHz] - uint16_t L3Temperature[2]; //[centi-Celsius] - uint16_t C0Residency[6]; //Percentage - - // GFX status - uint16_t GfxclkFrequency; //[MHz] - uint16_t GfxTemperature; //[centi-Celsius] - - // SOC IP info - uint16_t SocclkFrequency; //[MHz] - uint16_t VclkFrequency; //[MHz] - uint16_t DclkFrequency; //[MHz] - uint16_t MemclkFrequency; //[MHz] - - // power, VF info for CPU/GFX telemetry rails, and then socket power total - uint32_t Voltage[2]; //[mV] indices: VDDCR_VDD, VDDCR_GFX - uint32_t Current[2]; //[mA] indices: VDDCR_VDD, VDDCR_GFX - uint32_t Power[2]; //[mW] indices: VDDCR_VDD, VDDCR_GFX - uint32_t CurrentSocketPower; //[mW] - - uint16_t SocTemperature; //[centi-Celsius] - uint16_t EdgeTemperature; - uint16_t ThrottlerStatus; - uint16_t Spare; - -} SmuMetricsTable_t; - -typedef struct SmuMetrics_t { - SmuMetricsTable_t Current; - SmuMetricsTable_t Average; - uint32_t SampleStartTime; - uint32_t SampleStopTime; - uint32_t Accnt; -} SmuMetrics_t; - -#endif diff --git a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_navi10.h b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_navi10.h deleted file mode 100644 index 04752ade1016..000000000000 --- a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_navi10.h +++ /dev/null @@ -1,1220 +0,0 @@ -/* - * Copyright 2019 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#ifndef __SMU11_DRIVER_IF_NAVI10_H__ -#define __SMU11_DRIVER_IF_NAVI10_H__ - -// *** IMPORTANT *** -// SMU TEAM: Always increment the interface version if -// any structure is changed in this file -// Be aware of that the version should be updated in -// smu_v11_0.h, maybe rename is also needed. -// #define SMU11_DRIVER_IF_VERSION 0x33 - -#define PPTABLE_NV10_SMU_VERSION 8 - -#define NUM_GFXCLK_DPM_LEVELS 16 -#define NUM_SMNCLK_DPM_LEVELS 2 -#define NUM_SOCCLK_DPM_LEVELS 8 -#define NUM_MP0CLK_DPM_LEVELS 2 -#define NUM_DCLK_DPM_LEVELS 8 -#define NUM_VCLK_DPM_LEVELS 8 -#define NUM_DCEFCLK_DPM_LEVELS 8 -#define NUM_PHYCLK_DPM_LEVELS 8 -#define NUM_DISPCLK_DPM_LEVELS 8 -#define NUM_PIXCLK_DPM_LEVELS 8 -#define NUM_UCLK_DPM_LEVELS 4 -#define NUM_MP1CLK_DPM_LEVELS 2 -#define NUM_LINK_LEVELS 2 - - -#define MAX_GFXCLK_DPM_LEVEL (NUM_GFXCLK_DPM_LEVELS - 1) -#define MAX_SMNCLK_DPM_LEVEL (NUM_SMNCLK_DPM_LEVELS - 1) -#define MAX_SOCCLK_DPM_LEVEL (NUM_SOCCLK_DPM_LEVELS - 1) -#define MAX_MP0CLK_DPM_LEVEL (NUM_MP0CLK_DPM_LEVELS - 1) -#define MAX_DCLK_DPM_LEVEL (NUM_DCLK_DPM_LEVELS - 1) -#define MAX_VCLK_DPM_LEVEL (NUM_VCLK_DPM_LEVELS - 1) -#define MAX_DCEFCLK_DPM_LEVEL (NUM_DCEFCLK_DPM_LEVELS - 1) -#define MAX_DISPCLK_DPM_LEVEL (NUM_DISPCLK_DPM_LEVELS - 1) -#define MAX_PIXCLK_DPM_LEVEL (NUM_PIXCLK_DPM_LEVELS - 1) -#define MAX_PHYCLK_DPM_LEVEL (NUM_PHYCLK_DPM_LEVELS - 1) -#define MAX_UCLK_DPM_LEVEL (NUM_UCLK_DPM_LEVELS - 1) -#define MAX_MP1CLK_DPM_LEVEL (NUM_MP1CLK_DPM_LEVELS - 1) -#define MAX_LINK_LEVEL (NUM_LINK_LEVELS - 1) - -//Gemini Modes -#define PPSMC_GeminiModeNone 0 //Single GPU board -#define PPSMC_GeminiModeMaster 1 //Master GPU on a Gemini board -#define PPSMC_GeminiModeSlave 2 //Slave GPU on a Gemini board - -// Feature Control Defines -// DPM -#define FEATURE_DPM_PREFETCHER_BIT 0 -#define FEATURE_DPM_GFXCLK_BIT 1 -#define FEATURE_DPM_GFX_PACE_BIT 2 -#define FEATURE_DPM_UCLK_BIT 3 -#define FEATURE_DPM_SOCCLK_BIT 4 -#define FEATURE_DPM_MP0CLK_BIT 5 -#define FEATURE_DPM_LINK_BIT 6 -#define FEATURE_DPM_DCEFCLK_BIT 7 -#define FEATURE_MEM_VDDCI_SCALING_BIT 8 -#define FEATURE_MEM_MVDD_SCALING_BIT 9 - -//Idle -#define FEATURE_DS_GFXCLK_BIT 10 -#define FEATURE_DS_SOCCLK_BIT 11 -#define FEATURE_DS_LCLK_BIT 12 -#define FEATURE_DS_DCEFCLK_BIT 13 -#define FEATURE_DS_UCLK_BIT 14 -#define FEATURE_GFX_ULV_BIT 15 -#define FEATURE_FW_DSTATE_BIT 16 -#define FEATURE_GFXOFF_BIT 17 -#define FEATURE_BACO_BIT 18 -#define FEATURE_VCN_PG_BIT 19 -#define FEATURE_JPEG_PG_BIT 20 -#define FEATURE_USB_PG_BIT 21 -#define FEATURE_RSMU_SMN_CG_BIT 22 -//Throttler/Response -#define FEATURE_PPT_BIT 23 -#define FEATURE_TDC_BIT 24 -#define FEATURE_GFX_EDC_BIT 25 -#define FEATURE_APCC_PLUS_BIT 26 -#define FEATURE_GTHR_BIT 27 -#define FEATURE_ACDC_BIT 28 -#define FEATURE_VR0HOT_BIT 29 -#define FEATURE_VR1HOT_BIT 30 -#define FEATURE_FW_CTF_BIT 31 -#define FEATURE_FAN_CONTROL_BIT 32 -#define FEATURE_THERMAL_BIT 33 -#define FEATURE_GFX_DCS_BIT 34 -//VF -#define FEATURE_RM_BIT 35 -#define FEATURE_LED_DISPLAY_BIT 36 -//Other -#define FEATURE_GFX_SS_BIT 37 -#define FEATURE_OUT_OF_BAND_MONITOR_BIT 38 -#define FEATURE_TEMP_DEPENDENT_VMIN_BIT 39 - -#define FEATURE_MMHUB_PG_BIT 40 -#define FEATURE_ATHUB_PG_BIT 41 -#define FEATURE_APCC_DFLL_BIT 42 -#define FEATURE_SPARE_43_BIT 43 -#define FEATURE_SPARE_44_BIT 44 -#define FEATURE_SPARE_45_BIT 45 -#define FEATURE_SPARE_46_BIT 46 -#define FEATURE_SPARE_47_BIT 47 -#define FEATURE_SPARE_48_BIT 48 -#define FEATURE_SPARE_49_BIT 49 -#define FEATURE_SPARE_50_BIT 50 -#define FEATURE_SPARE_51_BIT 51 -#define FEATURE_SPARE_52_BIT 52 -#define FEATURE_SPARE_53_BIT 53 -#define FEATURE_SPARE_54_BIT 54 -#define FEATURE_SPARE_55_BIT 55 -#define FEATURE_SPARE_56_BIT 56 -#define FEATURE_SPARE_57_BIT 57 -#define FEATURE_SPARE_58_BIT 58 -#define FEATURE_SPARE_59_BIT 59 -#define FEATURE_SPARE_60_BIT 60 -#define FEATURE_SPARE_61_BIT 61 -#define FEATURE_SPARE_62_BIT 62 -#define FEATURE_SPARE_63_BIT 63 -#define NUM_FEATURES 64 - -// Debug Overrides Bitmask -#define DPM_OVERRIDE_DISABLE_SOCCLK_PID 0x00000001 -#define DPM_OVERRIDE_DISABLE_UCLK_PID 0x00000002 -#define DPM_OVERRIDE_DISABLE_VOLT_LINK_VCN_SOCCLK 0x00000004 -#define DPM_OVERRIDE_ENABLE_FREQ_LINK_VCLK_SOCCLK 0x00000008 -#define DPM_OVERRIDE_ENABLE_FREQ_LINK_DCLK_SOCCLK 0x00000010 -#define DPM_OVERRIDE_ENABLE_FREQ_LINK_GFXCLK_SOCCLK 0x00000020 -#define DPM_OVERRIDE_ENABLE_FREQ_LINK_GFXCLK_UCLK 0x00000040 -#define DPM_OVERRIDE_DISABLE_VOLT_LINK_DCE_SOCCLK 0x00000080 -#define DPM_OVERRIDE_DISABLE_VOLT_LINK_MP0_SOCCLK 0x00000100 -#define DPM_OVERRIDE_DISABLE_DFLL_PLL_SHUTDOWN 0x00000200 -#define DPM_OVERRIDE_DISABLE_MEMORY_TEMPERATURE_READ 0x00000400 - -// VR Mapping Bit Defines -#define VR_MAPPING_VR_SELECT_MASK 0x01 -#define VR_MAPPING_VR_SELECT_SHIFT 0x00 - -#define VR_MAPPING_PLANE_SELECT_MASK 0x02 -#define VR_MAPPING_PLANE_SELECT_SHIFT 0x01 - -// PSI Bit Defines -#define PSI_SEL_VR0_PLANE0_PSI0 0x01 -#define PSI_SEL_VR0_PLANE0_PSI1 0x02 -#define PSI_SEL_VR0_PLANE1_PSI0 0x04 -#define PSI_SEL_VR0_PLANE1_PSI1 0x08 -#define PSI_SEL_VR1_PLANE0_PSI0 0x10 -#define PSI_SEL_VR1_PLANE0_PSI1 0x20 -#define PSI_SEL_VR1_PLANE1_PSI0 0x40 -#define PSI_SEL_VR1_PLANE1_PSI1 0x80 - -// Throttler Control/Status Bits -#define THROTTLER_PADDING_BIT 0 -#define THROTTLER_TEMP_EDGE_BIT 1 -#define THROTTLER_TEMP_HOTSPOT_BIT 2 -#define THROTTLER_TEMP_MEM_BIT 3 -#define THROTTLER_TEMP_VR_GFX_BIT 4 -#define THROTTLER_TEMP_VR_MEM0_BIT 5 -#define THROTTLER_TEMP_VR_MEM1_BIT 6 -#define THROTTLER_TEMP_VR_SOC_BIT 7 -#define THROTTLER_TEMP_LIQUID0_BIT 8 -#define THROTTLER_TEMP_LIQUID1_BIT 9 -#define THROTTLER_TEMP_PLX_BIT 10 -#define THROTTLER_TEMP_SKIN_BIT 11 -#define THROTTLER_TDC_GFX_BIT 12 -#define THROTTLER_TDC_SOC_BIT 13 -#define THROTTLER_PPT0_BIT 14 -#define THROTTLER_PPT1_BIT 15 -#define THROTTLER_PPT2_BIT 16 -#define THROTTLER_PPT3_BIT 17 -#define THROTTLER_FIT_BIT 18 -#define THROTTLER_PPM_BIT 19 -#define THROTTLER_APCC_BIT 20 - -// FW DState Features Control Bits -#define FW_DSTATE_SOC_ULV_BIT 0 -#define FW_DSTATE_G6_HSR_BIT 1 -#define FW_DSTATE_G6_PHY_VDDCI_OFF_BIT 2 -#define FW_DSTATE_MP0_DS_BIT 3 -#define FW_DSTATE_SMN_DS_BIT 4 -#define FW_DSTATE_MP1_DS_BIT 5 -#define FW_DSTATE_MP1_WHISPER_MODE_BIT 6 -#define FW_DSTATE_LIV_MIN_BIT 7 -#define FW_DSTATE_SOC_PLL_PWRDN_BIT 8 - -#define FW_DSTATE_SOC_ULV_MASK (1 << FW_DSTATE_SOC_ULV_BIT ) -#define FW_DSTATE_G6_HSR_MASK (1 << FW_DSTATE_G6_HSR_BIT ) -#define FW_DSTATE_G6_PHY_VDDCI_OFF_MASK (1 << FW_DSTATE_G6_PHY_VDDCI_OFF_BIT ) -#define FW_DSTATE_MP1_DS_MASK (1 << FW_DSTATE_MP1_DS_BIT ) -#define FW_DSTATE_MP0_DS_MASK (1 << FW_DSTATE_MP0_DS_BIT ) -#define FW_DSTATE_SMN_DS_MASK (1 << FW_DSTATE_SMN_DS_BIT ) -#define FW_DSTATE_MP1_WHISPER_MODE_MASK (1 << FW_DSTATE_MP1_WHISPER_MODE_BIT ) -#define FW_DSTATE_LIV_MIN_MASK (1 << FW_DSTATE_LIV_MIN_BIT ) -#define FW_DSTATE_SOC_PLL_PWRDN_MASK (1 << FW_DSTATE_SOC_PLL_PWRDN_BIT ) - -//I2C Interface - -#define NUM_I2C_CONTROLLERS 8 - -#define I2C_CONTROLLER_ENABLED 1 -#define I2C_CONTROLLER_DISABLED 0 - -#define MAX_SW_I2C_COMMANDS 8 - -typedef enum { - I2C_CONTROLLER_PORT_0 = 0, //CKSVII2C0 - I2C_CONTROLLER_PORT_1 = 1, //CKSVII2C1 - I2C_CONTROLLER_PORT_COUNT, -} I2cControllerPort_e; - -typedef enum { - I2C_CONTROLLER_NAME_VR_GFX = 0, - I2C_CONTROLLER_NAME_VR_SOC, - I2C_CONTROLLER_NAME_VR_VDDCI, - I2C_CONTROLLER_NAME_VR_MVDD, - I2C_CONTROLLER_NAME_LIQUID0, - I2C_CONTROLLER_NAME_LIQUID1, - I2C_CONTROLLER_NAME_PLX, - I2C_CONTROLLER_NAME_SPARE, - I2C_CONTROLLER_NAME_COUNT, -} I2cControllerName_e; - -typedef enum { - I2C_CONTROLLER_THROTTLER_TYPE_NONE = 0, - I2C_CONTROLLER_THROTTLER_VR_GFX, - I2C_CONTROLLER_THROTTLER_VR_SOC, - I2C_CONTROLLER_THROTTLER_VR_VDDCI, - I2C_CONTROLLER_THROTTLER_VR_MVDD, - I2C_CONTROLLER_THROTTLER_LIQUID0, - I2C_CONTROLLER_THROTTLER_LIQUID1, - I2C_CONTROLLER_THROTTLER_PLX, - I2C_CONTROLLER_THROTTLER_COUNT, -} I2cControllerThrottler_e; - -typedef enum { - I2C_CONTROLLER_PROTOCOL_VR_0, - I2C_CONTROLLER_PROTOCOL_VR_1, - I2C_CONTROLLER_PROTOCOL_TMP_0, - I2C_CONTROLLER_PROTOCOL_TMP_1, - I2C_CONTROLLER_PROTOCOL_SPARE_0, - I2C_CONTROLLER_PROTOCOL_SPARE_1, - I2C_CONTROLLER_PROTOCOL_COUNT, -} I2cControllerProtocol_e; - -typedef struct { - uint8_t Enabled; - uint8_t Speed; - uint8_t Padding[2]; - uint32_t SlaveAddress; - uint8_t ControllerPort; - uint8_t ControllerName; - uint8_t ThermalThrotter; - uint8_t I2cProtocol; -} I2cControllerConfig_t; - -typedef enum { - I2C_PORT_SVD_SCL = 0, - I2C_PORT_GPIO, -} I2cPort_e; - -typedef enum { - I2C_SPEED_FAST_50K = 0, //50 Kbits/s - I2C_SPEED_FAST_100K, //100 Kbits/s - I2C_SPEED_FAST_400K, //400 Kbits/s - I2C_SPEED_FAST_PLUS_1M, //1 Mbits/s (in fast mode) - I2C_SPEED_HIGH_1M, //1 Mbits/s (in high speed mode) - I2C_SPEED_HIGH_2M, //2.3 Mbits/s - I2C_SPEED_COUNT, -} I2cSpeed_e; - -typedef enum { - I2C_CMD_READ = 0, - I2C_CMD_WRITE, - I2C_CMD_COUNT, -} I2cCmdType_e; - -#define CMDCONFIG_STOP_BIT 0 -#define CMDCONFIG_RESTART_BIT 1 - -#define CMDCONFIG_STOP_MASK (1 << CMDCONFIG_STOP_BIT) -#define CMDCONFIG_RESTART_MASK (1 << CMDCONFIG_RESTART_BIT) - -typedef struct { - uint8_t RegisterAddr; ////only valid for write, ignored for read - uint8_t Cmd; //Read(0) or Write(1) - uint8_t Data; //Return data for read. Data to send for write - uint8_t CmdConfig; //Includes whether associated command should have a stop or restart command -} SwI2cCmd_t; //SW I2C Command Table - -typedef struct { - uint8_t I2CcontrollerPort; //CKSVII2C0(0) or //CKSVII2C1(1) - uint8_t I2CSpeed; //Slow(0) or Fast(1) - uint16_t SlaveAddress; - uint8_t NumCmds; //Number of commands - uint8_t Padding[3]; - - SwI2cCmd_t SwI2cCmds[MAX_SW_I2C_COMMANDS]; - - uint32_t MmHubPadding[8]; // SMU internal use - -} SwI2cRequest_t; // SW I2C Request Table - -//D3HOT sequences -typedef enum { - BACO_SEQUENCE, - MSR_SEQUENCE, - BAMACO_SEQUENCE, - ULPS_SEQUENCE, - D3HOT_SEQUENCE_COUNT, -}D3HOTSequence_e; - -//THis is aligned with RSMU PGFSM Register Mapping -typedef enum { - PG_DYNAMIC_MODE = 0, - PG_STATIC_MODE, -} PowerGatingMode_e; - -//This is aligned with RSMU PGFSM Register Mapping -typedef enum { - PG_POWER_DOWN = 0, - PG_POWER_UP, -} PowerGatingSettings_e; - -typedef struct { - uint32_t a; // store in IEEE float format in this variable - uint32_t b; // store in IEEE float format in this variable - uint32_t c; // store in IEEE float format in this variable -} QuadraticInt_t; - -typedef struct { - uint32_t m; // store in IEEE float format in this variable - uint32_t b; // store in IEEE float format in this variable -} LinearInt_t; - -typedef struct { - uint32_t a; // store in IEEE float format in this variable - uint32_t b; // store in IEEE float format in this variable - uint32_t c; // store in IEEE float format in this variable -} DroopInt_t; - -typedef enum { - GFXCLK_SOURCE_PLL = 0, - GFXCLK_SOURCE_DFLL, - GFXCLK_SOURCE_COUNT, -} GfxclkSrc_e; - -//Only Clks that have DPM descriptors are listed here -typedef enum { - PPCLK_GFXCLK = 0, - PPCLK_SOCCLK, - PPCLK_UCLK, - PPCLK_DCLK, - PPCLK_VCLK, - PPCLK_DCEFCLK, - PPCLK_DISPCLK, - PPCLK_PIXCLK, - PPCLK_PHYCLK, - PPCLK_COUNT, -} PPCLK_e; - -typedef enum { - POWER_SOURCE_AC, - POWER_SOURCE_DC, - POWER_SOURCE_COUNT, -} POWER_SOURCE_e; - -typedef enum { - PPT_THROTTLER_PPT0, - PPT_THROTTLER_PPT1, - PPT_THROTTLER_PPT2, - PPT_THROTTLER_PPT3, - PPT_THROTTLER_COUNT -} PPT_THROTTLER_e; - -typedef enum { - VOLTAGE_MODE_AVFS = 0, - VOLTAGE_MODE_AVFS_SS, - VOLTAGE_MODE_SS, - VOLTAGE_MODE_COUNT, -} VOLTAGE_MODE_e; - - -typedef enum { - AVFS_VOLTAGE_GFX = 0, - AVFS_VOLTAGE_SOC, - AVFS_VOLTAGE_COUNT, -} AVFS_VOLTAGE_TYPE_e; - -typedef enum { - UCLK_DIV_BY_1 = 0, - UCLK_DIV_BY_2, - UCLK_DIV_BY_4, - UCLK_DIV_BY_8, -} UCLK_DIV_e; - -typedef enum { - GPIO_INT_POLARITY_ACTIVE_LOW = 0, - GPIO_INT_POLARITY_ACTIVE_HIGH, -} GpioIntPolarity_e; - -typedef enum { - MEMORY_TYPE_GDDR6 = 0, - MEMORY_TYPE_HBM, -} MemoryType_e; - -typedef enum { - PWR_CONFIG_TDP = 0, - PWR_CONFIG_TGP, - PWR_CONFIG_TCP_ESTIMATED, - PWR_CONFIG_TCP_MEASURED, -} PwrConfig_e; - -typedef struct { - uint8_t VoltageMode; // 0 - AVFS only, 1- min(AVFS,SS), 2-SS only - uint8_t SnapToDiscrete; // 0 - Fine grained DPM, 1 - Discrete DPM - uint8_t NumDiscreteLevels; // Set to 2 (Fmin, Fmax) when using fine grained DPM, otherwise set to # discrete levels used - uint8_t Padding; - LinearInt_t ConversionToAvfsClk; // Transfer function to AVFS Clock (GHz->GHz) - QuadraticInt_t SsCurve; // Slow-slow curve (GHz->V) -} DpmDescriptor_t; - -typedef enum { - TEMP_EDGE, - TEMP_HOTSPOT, - TEMP_MEM, - TEMP_VR_GFX, - TEMP_VR_MEM0, - TEMP_VR_MEM1, - TEMP_VR_SOC, - TEMP_LIQUID0, - TEMP_LIQUID1, - TEMP_PLX, - TEMP_COUNT -} TEMP_e; - -//Out of band monitor status defines -//see SPEC //gpu/doc/soc_arch/spec/feature/SMBUS/SMBUS.xlsx -#define POWER_MANAGER_CONTROLLER_NOT_RUNNING 0 -#define POWER_MANAGER_CONTROLLER_RUNNING 1 - -#define POWER_MANAGER_CONTROLLER_BIT 0 -#define MAXIMUM_DPM_STATE_GFX_ENGINE_RESTRICTED_BIT 8 -#define GPU_DIE_TEMPERATURE_THROTTLING_BIT 9 -#define HBM_DIE_TEMPERATURE_THROTTLING_BIT 10 -#define TGP_THROTTLING_BIT 11 -#define PCC_THROTTLING_BIT 12 -#define HBM_TEMPERATURE_EXCEEDING_TEMPERATURE_LIMIT_BIT 13 -#define HBM_TEMPERATURE_EXCEEDING_MAX_MEMORY_TEMPERATURE_BIT 14 - -#define POWER_MANAGER_CONTROLLER_MASK (1 << POWER_MANAGER_CONTROLLER_BIT ) -#define MAXIMUM_DPM_STATE_GFX_ENGINE_RESTRICTED_MASK (1 << MAXIMUM_DPM_STATE_GFX_ENGINE_RESTRICTED_BIT ) -#define GPU_DIE_TEMPERATURE_THROTTLING_MASK (1 << GPU_DIE_TEMPERATURE_THROTTLING_BIT ) -#define HBM_DIE_TEMPERATURE_THROTTLING_MASK (1 << HBM_DIE_TEMPERATURE_THROTTLING_BIT ) -#define TGP_THROTTLING_MASK (1 << TGP_THROTTLING_BIT ) -#define PCC_THROTTLING_MASK (1 << PCC_THROTTLING_BIT ) -#define HBM_TEMPERATURE_EXCEEDING_TEMPERATURE_LIMIT_MASK (1 << HBM_TEMPERATURE_EXCEEDING_TEMPERATURE_LIMIT_BIT ) -#define HBM_TEMPERATURE_EXCEEDING_MAX_MEMORY_TEMPERATURE_MASK (1 << HBM_TEMPERATURE_EXCEEDING_MAX_MEMORY_TEMPERATURE_BIT) - -//This structure to be DMA to SMBUS Config register space -typedef struct { - uint8_t MinorInfoVersion; - uint8_t MajorInfoVersion; - uint8_t TableSize; - uint8_t Reserved; - - uint8_t Reserved1; - uint8_t RevID; - uint16_t DeviceID; - - uint16_t DieTemperatureLimit; - uint16_t FanTargetTemperature; - - uint16_t MemoryTemperatureLimit; - uint16_t MemoryTemperatureLimit1; - - uint16_t TGP; - uint16_t CardPower; - - uint32_t DieTemperatureRegisterOffset; - - uint32_t Reserved2; - - uint32_t Reserved3; - - uint32_t Status; - - uint16_t DieTemperature; - uint16_t CurrentMemoryTemperature; - - uint16_t MemoryTemperature; - uint8_t MemoryHotspotPosition; - uint8_t Reserved4; - - uint32_t BoardLevelEnergyAccumulator; -} OutOfBandMonitor_t; - -typedef struct { - uint32_t Version; - - // SECTION: Feature Enablement - uint32_t FeaturesToRun[2]; - - // SECTION: Infrastructure Limits - uint16_t SocketPowerLimitAc[PPT_THROTTLER_COUNT]; - uint16_t SocketPowerLimitAcTau[PPT_THROTTLER_COUNT]; - uint16_t SocketPowerLimitDc[PPT_THROTTLER_COUNT]; - uint16_t SocketPowerLimitDcTau[PPT_THROTTLER_COUNT]; - - uint16_t TdcLimitSoc; // Amps - uint16_t TdcLimitSocTau; // Time constant of LPF in ms - uint16_t TdcLimitGfx; // Amps - uint16_t TdcLimitGfxTau; // Time constant of LPF in ms - - uint16_t TedgeLimit; // Celcius - uint16_t ThotspotLimit; // Celcius - uint16_t TmemLimit; // Celcius - uint16_t Tvr_gfxLimit; // Celcius - uint16_t Tvr_mem0Limit; // Celcius - uint16_t Tvr_mem1Limit; // Celcius - uint16_t Tvr_socLimit; // Celcius - uint16_t Tliquid0Limit; // Celcius - uint16_t Tliquid1Limit; // Celcius - uint16_t TplxLimit; // Celcius - uint32_t FitLimit; // Failures in time (failures per million parts over the defined lifetime) - - uint16_t PpmPowerLimit; // Switch this this power limit when temperature is above PpmTempThreshold - uint16_t PpmTemperatureThreshold; - - // SECTION: Throttler settings - uint32_t ThrottlerControlMask; // See Throtter masks defines - - // SECTION: FW DSTATE Settings - uint32_t FwDStateMask; // See FW DState masks defines - - // SECTION: ULV Settings - uint16_t UlvVoltageOffsetSoc; // In mV(Q2) - uint16_t UlvVoltageOffsetGfx; // In mV(Q2) - - uint8_t GceaLinkMgrIdleThreshold; //Set by SMU FW during enablment of SOC_ULV. Controls delay for GFX SDP port disconnection during idle events - uint8_t paddingRlcUlvParams[3]; - - uint8_t UlvSmnclkDid; //DID for ULV mode. 0 means CLK will not be modified in ULV. - uint8_t UlvMp1clkDid; //DID for ULV mode. 0 means CLK will not be modified in ULV. - uint8_t UlvGfxclkBypass; // 1 to turn off/bypass Gfxclk during ULV, 0 to leave Gfxclk on during ULV - uint8_t Padding234; - - uint16_t MinVoltageUlvGfx; // In mV(Q2) Minimum Voltage ("Vmin") of VDD_GFX in ULV mode - uint16_t MinVoltageUlvSoc; // In mV(Q2) Minimum Voltage ("Vmin") of VDD_SOC in ULV mode - - - // SECTION: Voltage Control Parameters - uint16_t MinVoltageGfx; // In mV(Q2) Minimum Voltage ("Vmin") of VDD_GFX - uint16_t MinVoltageSoc; // In mV(Q2) Minimum Voltage ("Vmin") of VDD_SOC - uint16_t MaxVoltageGfx; // In mV(Q2) Maximum Voltage allowable of VDD_GFX - uint16_t MaxVoltageSoc; // In mV(Q2) Maximum Voltage allowable of VDD_SOC - - uint16_t LoadLineResistanceGfx; // In mOhms with 8 fractional bits - uint16_t LoadLineResistanceSoc; // In mOhms with 8 fractional bits - - //SECTION: DPM Config 1 - DpmDescriptor_t DpmDescriptor[PPCLK_COUNT]; - - uint16_t FreqTableGfx [NUM_GFXCLK_DPM_LEVELS ]; // In MHz - uint16_t FreqTableVclk [NUM_VCLK_DPM_LEVELS ]; // In MHz - uint16_t FreqTableDclk [NUM_DCLK_DPM_LEVELS ]; // In MHz - uint16_t FreqTableSocclk [NUM_SOCCLK_DPM_LEVELS ]; // In MHz - uint16_t FreqTableUclk [NUM_UCLK_DPM_LEVELS ]; // In MHz - uint16_t FreqTableDcefclk [NUM_DCEFCLK_DPM_LEVELS ]; // In MHz - uint16_t FreqTableDispclk [NUM_DISPCLK_DPM_LEVELS ]; // In MHz - uint16_t FreqTablePixclk [NUM_PIXCLK_DPM_LEVELS ]; // In MHz - uint16_t FreqTablePhyclk [NUM_PHYCLK_DPM_LEVELS ]; // In MHz - uint32_t Paddingclks[16]; - - uint16_t DcModeMaxFreq [PPCLK_COUNT ]; // In MHz - uint16_t Padding8_Clks; - - uint8_t FreqTableUclkDiv [NUM_UCLK_DPM_LEVELS ]; // 0:Div-1, 1:Div-1/2, 2:Div-1/4, 3:Div-1/8 - - // SECTION: DPM Config 2 - uint16_t Mp0clkFreq [NUM_MP0CLK_DPM_LEVELS]; // in MHz - uint16_t Mp0DpmVoltage [NUM_MP0CLK_DPM_LEVELS]; // mV(Q2) - uint16_t MemVddciVoltage [NUM_UCLK_DPM_LEVELS]; // mV(Q2) - uint16_t MemMvddVoltage [NUM_UCLK_DPM_LEVELS]; // mV(Q2) - // GFXCLK DPM - uint16_t GfxclkFgfxoffEntry; // in Mhz - uint16_t GfxclkFinit; // in Mhz - uint16_t GfxclkFidle; // in MHz - uint16_t GfxclkSlewRate; // for PLL babystepping??? - uint16_t GfxclkFopt; // in Mhz - uint8_t Padding567[2]; - uint16_t GfxclkDsMaxFreq; // in MHz - uint8_t GfxclkSource; // 0 = PLL, 1 = DFLL - uint8_t Padding456; - - // UCLK section - uint8_t LowestUclkReservedForUlv; // Set this to 1 if UCLK DPM0 is reserved for ULV-mode only - uint8_t paddingUclk[3]; - - uint8_t MemoryType; // 0-GDDR6, 1-HBM - uint8_t MemoryChannels; - uint8_t PaddingMem[2]; - - // Link DPM Settings - uint8_t PcieGenSpeed[NUM_LINK_LEVELS]; ///< 0:PciE-gen1 1:PciE-gen2 2:PciE-gen3 3:PciE-gen4 - uint8_t PcieLaneCount[NUM_LINK_LEVELS]; ///< 1=x1, 2=x2, 3=x4, 4=x8, 5=x12, 6=x16 - uint16_t LclkFreq[NUM_LINK_LEVELS]; - - // GFXCLK Thermal DPM (formerly 'Boost' Settings) - uint16_t EnableTdpm; - uint16_t TdpmHighHystTemperature; - uint16_t TdpmLowHystTemperature; - uint16_t GfxclkFreqHighTempLimit; // High limit on GFXCLK when temperature is high, for reliability. - - // SECTION: Fan Control - uint16_t FanStopTemp; //Celcius - uint16_t FanStartTemp; //Celcius - - uint16_t FanGainEdge; - uint16_t FanGainHotspot; - uint16_t FanGainLiquid0; - uint16_t FanGainLiquid1; - uint16_t FanGainVrGfx; - uint16_t FanGainVrSoc; - uint16_t FanGainVrMem0; - uint16_t FanGainVrMem1; - uint16_t FanGainPlx; - uint16_t FanGainMem; - uint16_t FanPwmMin; - uint16_t FanAcousticLimitRpm; - uint16_t FanThrottlingRpm; - uint16_t FanMaximumRpm; - uint16_t FanTargetTemperature; - uint16_t FanTargetGfxclk; - uint8_t FanTempInputSelect; - uint8_t FanPadding; - uint8_t FanZeroRpmEnable; - uint8_t FanTachEdgePerRev; - //uint8_t padding8_Fan[2]; - - // The following are AFC override parameters. Leave at 0 to use FW defaults. - int16_t FuzzyFan_ErrorSetDelta; - int16_t FuzzyFan_ErrorRateSetDelta; - int16_t FuzzyFan_PwmSetDelta; - uint16_t FuzzyFan_Reserved; - - - // SECTION: AVFS - // Overrides - uint8_t OverrideAvfsGb[AVFS_VOLTAGE_COUNT]; - uint8_t Padding8_Avfs[2]; - - QuadraticInt_t qAvfsGb[AVFS_VOLTAGE_COUNT]; // GHz->V Override of fused curve - DroopInt_t dBtcGbGfxPll; // GHz->V BtcGb - DroopInt_t dBtcGbGfxDfll; // GHz->V BtcGb - DroopInt_t dBtcGbSoc; // GHz->V BtcGb - LinearInt_t qAgingGb[AVFS_VOLTAGE_COUNT]; // GHz->V - - QuadraticInt_t qStaticVoltageOffset[AVFS_VOLTAGE_COUNT]; // GHz->V - - uint16_t DcTol[AVFS_VOLTAGE_COUNT]; // mV Q2 - - uint8_t DcBtcEnabled[AVFS_VOLTAGE_COUNT]; - uint8_t Padding8_GfxBtc[2]; - - uint16_t DcBtcMin[AVFS_VOLTAGE_COUNT]; // mV Q2 - uint16_t DcBtcMax[AVFS_VOLTAGE_COUNT]; // mV Q2 - - // SECTION: Advanced Options - uint32_t DebugOverrides; - QuadraticInt_t ReservedEquation0; - QuadraticInt_t ReservedEquation1; - QuadraticInt_t ReservedEquation2; - QuadraticInt_t ReservedEquation3; - - // Total Power configuration, use defines from PwrConfig_e - uint8_t TotalPowerConfig; //0-TDP, 1-TGP, 2-TCP Estimated, 3-TCP Measured - uint8_t TotalPowerSpare1; - uint16_t TotalPowerSpare2; - - // APCC Settings - uint16_t PccThresholdLow; - uint16_t PccThresholdHigh; - uint32_t MGpuFanBoostLimitRpm; - uint32_t PaddingAPCC[5]; - - // Temperature Dependent Vmin - uint16_t VDDGFX_TVmin; //Celcius - uint16_t VDDSOC_TVmin; //Celcius - uint16_t VDDGFX_Vmin_HiTemp; // mV Q2 - uint16_t VDDGFX_Vmin_LoTemp; // mV Q2 - uint16_t VDDSOC_Vmin_HiTemp; // mV Q2 - uint16_t VDDSOC_Vmin_LoTemp; // mV Q2 - - uint16_t VDDGFX_TVminHystersis; // Celcius - uint16_t VDDSOC_TVminHystersis; // Celcius - - // BTC Setting - uint32_t BtcConfig; - - uint16_t SsFmin[10]; // PPtable value to function similar to VFTFmin for SS Curve; Size is PPCLK_COUNT rounded to nearest multiple of 2 - uint16_t DcBtcGb[AVFS_VOLTAGE_COUNT]; - - // SECTION: Board Reserved - uint32_t Reserved[8]; - - // SECTION: BOARD PARAMETERS - // I2C Control - I2cControllerConfig_t I2cControllers[NUM_I2C_CONTROLLERS]; - - // SVI2 Board Parameters - uint16_t MaxVoltageStepGfx; // In mV(Q2) Max voltage step that SMU will request. Multiple steps are taken if voltage change exceeds this value. - uint16_t MaxVoltageStepSoc; // In mV(Q2) Max voltage step that SMU will request. Multiple steps are taken if voltage change exceeds this value. - - uint8_t VddGfxVrMapping; // Use VR_MAPPING* bitfields - uint8_t VddSocVrMapping; // Use VR_MAPPING* bitfields - uint8_t VddMem0VrMapping; // Use VR_MAPPING* bitfields - uint8_t VddMem1VrMapping; // Use VR_MAPPING* bitfields - - uint8_t GfxUlvPhaseSheddingMask; // set this to 1 to set PSI0/1 to 1 in ULV mode - uint8_t SocUlvPhaseSheddingMask; // set this to 1 to set PSI0/1 to 1 in ULV mode - uint8_t ExternalSensorPresent; // External RDI connected to TMON (aka TEMP IN) - uint8_t Padding8_V; - - // Telemetry Settings - uint16_t GfxMaxCurrent; // in Amps - int8_t GfxOffset; // in Amps - uint8_t Padding_TelemetryGfx; - - uint16_t SocMaxCurrent; // in Amps - int8_t SocOffset; // in Amps - uint8_t Padding_TelemetrySoc; - - uint16_t Mem0MaxCurrent; // in Amps - int8_t Mem0Offset; // in Amps - uint8_t Padding_TelemetryMem0; - - uint16_t Mem1MaxCurrent; // in Amps - int8_t Mem1Offset; // in Amps - uint8_t Padding_TelemetryMem1; - - // GPIO Settings - uint8_t AcDcGpio; // GPIO pin configured for AC/DC switching - uint8_t AcDcPolarity; // GPIO polarity for AC/DC switching - uint8_t VR0HotGpio; // GPIO pin configured for VR0 HOT event - uint8_t VR0HotPolarity; // GPIO polarity for VR0 HOT event - - uint8_t VR1HotGpio; // GPIO pin configured for VR1 HOT event - uint8_t VR1HotPolarity; // GPIO polarity for VR1 HOT event - uint8_t GthrGpio; // GPIO pin configured for GTHR Event - uint8_t GthrPolarity; // replace GPIO polarity for GTHR - - // LED Display Settings - uint8_t LedPin0; // GPIO number for LedPin[0] - uint8_t LedPin1; // GPIO number for LedPin[1] - uint8_t LedPin2; // GPIO number for LedPin[2] - uint8_t padding8_4; - - // GFXCLK PLL Spread Spectrum - uint8_t PllGfxclkSpreadEnabled; // on or off - uint8_t PllGfxclkSpreadPercent; // Q4.4 - uint16_t PllGfxclkSpreadFreq; // kHz - - // GFXCLK DFLL Spread Spectrum - uint8_t DfllGfxclkSpreadEnabled; // on or off - uint8_t DfllGfxclkSpreadPercent; // Q4.4 - uint16_t DfllGfxclkSpreadFreq; // kHz - - // UCLK Spread Spectrum - uint8_t UclkSpreadEnabled; // on or off - uint8_t UclkSpreadPercent; // Q4.4 - uint16_t UclkSpreadFreq; // kHz - - // SOCCLK Spread Spectrum - uint8_t SoclkSpreadEnabled; // on or off - uint8_t SocclkSpreadPercent; // Q4.4 - uint16_t SocclkSpreadFreq; // kHz - - // Total board power - uint16_t TotalBoardPower; //Only needed for TCP Estimated case, where TCP = TGP+Total Board Power - uint16_t BoardPadding; - - // Mvdd Svi2 Div Ratio Setting - uint32_t MvddRatio; // This is used for MVDD Vid workaround. It has 16 fractional bits (Q16.16) - - uint8_t RenesesLoadLineEnabled; - uint8_t GfxLoadlineResistance; - uint8_t SocLoadlineResistance; - uint8_t Padding8_Loadline; - - uint32_t BoardReserved[8]; - - // Padding for MMHUB - do not modify this - uint32_t MmHubPadding[8]; // SMU internal use - -} PPTable_t; - -typedef struct { - // Time constant parameters for clock averages in ms - uint16_t GfxclkAverageLpfTau; - uint16_t SocclkAverageLpfTau; - uint16_t UclkAverageLpfTau; - uint16_t GfxActivityLpfTau; - uint16_t UclkActivityLpfTau; - uint16_t SocketPowerLpfTau; - - // Padding - ignore - uint32_t MmHubPadding[8]; // SMU internal use -} DriverSmuConfig_t; - -typedef struct { - - uint16_t GfxclkFmin; // MHz - uint16_t GfxclkFmax; // MHz - uint16_t GfxclkFreq1; // MHz - uint16_t GfxclkVolt1; // mV (Q2) - uint16_t GfxclkFreq2; // MHz - uint16_t GfxclkVolt2; // mV (Q2) - uint16_t GfxclkFreq3; // MHz - uint16_t GfxclkVolt3; // mV (Q2) - uint16_t UclkFmax; // MHz - int16_t OverDrivePct; // % - uint16_t FanMaximumRpm; - uint16_t FanMinimumPwm; - uint16_t FanTargetTemperature; // Degree Celcius - uint16_t FanMode; - uint16_t FanMaxPwm; - uint16_t FanMinPwm; - uint16_t FanMaxTemp; // Degree Celcius - uint16_t FanMinTemp; // Degree Celcius - uint16_t MaxOpTemp; // Degree Celcius - uint16_t FanZeroRpmEnable; - - uint32_t MmHubPadding[6]; // SMU internal use - -} OverDriveTable_t; - -typedef struct { - uint16_t CurrClock[PPCLK_COUNT]; - uint16_t AverageGfxclkFrequency; - uint16_t AverageSocclkFrequency; - uint16_t AverageUclkFrequency ; - uint16_t AverageGfxActivity ; - uint16_t AverageUclkActivity ; - uint8_t CurrSocVoltageOffset ; - uint8_t CurrGfxVoltageOffset ; - uint8_t CurrMemVidOffset ; - uint8_t Padding8 ; - uint16_t AverageSocketPower ; - uint16_t TemperatureEdge ; - uint16_t TemperatureHotspot ; - uint16_t TemperatureMem ; - uint16_t TemperatureVrGfx ; - uint16_t TemperatureVrMem0 ; - uint16_t TemperatureVrMem1 ; - uint16_t TemperatureVrSoc ; - uint16_t TemperatureLiquid0 ; - uint16_t TemperatureLiquid1 ; - uint16_t TemperaturePlx ; - uint16_t Padding16 ; - uint32_t ThrottlerStatus ; - - uint8_t LinkDpmLevel; - uint8_t Padding8_2; - uint16_t CurrFanSpeed; - - // Padding - ignore - uint32_t MmHubPadding[8]; // SMU internal use -} SmuMetrics_legacy_t; - -typedef struct { - uint16_t CurrClock[PPCLK_COUNT]; - uint16_t AverageGfxclkFrequencyPostDs; - uint16_t AverageSocclkFrequency; - uint16_t AverageUclkFrequencyPostDs; - uint16_t AverageGfxActivity ; - uint16_t AverageUclkActivity ; - uint8_t CurrSocVoltageOffset ; - uint8_t CurrGfxVoltageOffset ; - uint8_t CurrMemVidOffset ; - uint8_t Padding8 ; - uint16_t AverageSocketPower ; - uint16_t TemperatureEdge ; - uint16_t TemperatureHotspot ; - uint16_t TemperatureMem ; - uint16_t TemperatureVrGfx ; - uint16_t TemperatureVrMem0 ; - uint16_t TemperatureVrMem1 ; - uint16_t TemperatureVrSoc ; - uint16_t TemperatureLiquid0 ; - uint16_t TemperatureLiquid1 ; - uint16_t TemperaturePlx ; - uint16_t Padding16 ; - uint32_t ThrottlerStatus ; - - uint8_t LinkDpmLevel; - uint8_t Padding8_2; - uint16_t CurrFanSpeed; - - uint16_t AverageGfxclkFrequencyPreDs; - uint16_t AverageUclkFrequencyPreDs; - uint8_t PcieRate; - uint8_t PcieWidth; - uint8_t Padding8_3[2]; - - // Padding - ignore - uint32_t MmHubPadding[8]; // SMU internal use -} SmuMetrics_t; - -typedef struct { - uint16_t CurrClock[PPCLK_COUNT]; - uint16_t AverageGfxclkFrequency; - uint16_t AverageSocclkFrequency; - uint16_t AverageUclkFrequency ; - uint16_t AverageGfxActivity ; - uint16_t AverageUclkActivity ; - uint8_t CurrSocVoltageOffset ; - uint8_t CurrGfxVoltageOffset ; - uint8_t CurrMemVidOffset ; - uint8_t Padding8 ; - uint16_t AverageSocketPower ; - uint16_t TemperatureEdge ; - uint16_t TemperatureHotspot ; - uint16_t TemperatureMem ; - uint16_t TemperatureVrGfx ; - uint16_t TemperatureVrMem0 ; - uint16_t TemperatureVrMem1 ; - uint16_t TemperatureVrSoc ; - uint16_t TemperatureLiquid0 ; - uint16_t TemperatureLiquid1 ; - uint16_t TemperaturePlx ; - uint16_t Padding16 ; - uint32_t ThrottlerStatus ; - - uint8_t LinkDpmLevel; - uint8_t Padding8_2; - uint16_t CurrFanSpeed; - - uint32_t EnergyAccumulator; - uint16_t AverageVclkFrequency ; - uint16_t AverageDclkFrequency ; - uint16_t VcnActivityPercentage ; - uint16_t padding16_2; - - // Padding - ignore - uint32_t MmHubPadding[8]; // SMU internal use -} SmuMetrics_NV12_legacy_t; - -typedef struct { - uint16_t CurrClock[PPCLK_COUNT]; - uint16_t AverageGfxclkFrequencyPostDs; - uint16_t AverageSocclkFrequency; - uint16_t AverageUclkFrequencyPostDs; - uint16_t AverageGfxActivity ; - uint16_t AverageUclkActivity ; - uint8_t CurrSocVoltageOffset ; - uint8_t CurrGfxVoltageOffset ; - uint8_t CurrMemVidOffset ; - uint8_t Padding8 ; - uint16_t AverageSocketPower ; - uint16_t TemperatureEdge ; - uint16_t TemperatureHotspot ; - uint16_t TemperatureMem ; - uint16_t TemperatureVrGfx ; - uint16_t TemperatureVrMem0 ; - uint16_t TemperatureVrMem1 ; - uint16_t TemperatureVrSoc ; - uint16_t TemperatureLiquid0 ; - uint16_t TemperatureLiquid1 ; - uint16_t TemperaturePlx ; - uint16_t Padding16 ; - uint32_t ThrottlerStatus ; - - uint8_t LinkDpmLevel; - uint8_t Padding8_2; - uint16_t CurrFanSpeed; - - uint16_t AverageVclkFrequency ; - uint16_t AverageDclkFrequency ; - uint16_t VcnActivityPercentage ; - uint16_t AverageGfxclkFrequencyPreDs; - uint16_t AverageUclkFrequencyPreDs; - uint8_t PcieRate; - uint8_t PcieWidth; - - uint32_t Padding32_1; - uint64_t EnergyAccumulator; - - // Padding - ignore - uint32_t MmHubPadding[8]; // SMU internal use -} SmuMetrics_NV12_t; - -typedef union SmuMetrics { - SmuMetrics_legacy_t nv10_legacy_metrics; - SmuMetrics_t nv10_metrics; - SmuMetrics_NV12_legacy_t nv12_legacy_metrics; - SmuMetrics_NV12_t nv12_metrics; -} SmuMetrics_NV1X_t; - -typedef struct { - uint16_t MinClock; // This is either DCEFCLK or SOCCLK (in MHz) - uint16_t MaxClock; // This is either DCEFCLK or SOCCLK (in MHz) - uint16_t MinUclk; - uint16_t MaxUclk; - - uint8_t WmSetting; - uint8_t Padding[3]; - - uint32_t MmHubPadding[8]; // SMU internal use -} WatermarkRowGeneric_t; - -#define NUM_WM_RANGES 4 - -typedef enum { - WM_SOCCLK = 0, - WM_DCEFCLK, - WM_COUNT, -} WM_CLOCK_e; - -typedef struct { - // Watermarks - WatermarkRowGeneric_t WatermarkRow[WM_COUNT][NUM_WM_RANGES]; - - uint32_t MmHubPadding[8]; // SMU internal use -} Watermarks_t; - -typedef struct { - uint16_t avgPsmCount[28]; - uint16_t minPsmCount[28]; - float avgPsmVoltage[28]; - float minPsmVoltage[28]; - - uint32_t MmHubPadding[32]; // SMU internal use -} AvfsDebugTable_t_NV14; - -typedef struct { - uint16_t avgPsmCount[36]; - uint16_t minPsmCount[36]; - float avgPsmVoltage[36]; - float minPsmVoltage[36]; - - uint32_t MmHubPadding[8]; // SMU internal use -} AvfsDebugTable_t_NV10; - -typedef struct { - uint8_t AvfsVersion; - uint8_t Padding; - - uint8_t AvfsEn[AVFS_VOLTAGE_COUNT]; - - uint8_t OverrideVFT[AVFS_VOLTAGE_COUNT]; - uint8_t OverrideAvfsGb[AVFS_VOLTAGE_COUNT]; - - uint8_t OverrideTemperatures[AVFS_VOLTAGE_COUNT]; - uint8_t OverrideVInversion[AVFS_VOLTAGE_COUNT]; - uint8_t OverrideP2V[AVFS_VOLTAGE_COUNT]; - uint8_t OverrideP2VCharzFreq[AVFS_VOLTAGE_COUNT]; - - int32_t VFT0_m1[AVFS_VOLTAGE_COUNT]; // Q8.24 - int32_t VFT0_m2[AVFS_VOLTAGE_COUNT]; // Q12.12 - int32_t VFT0_b[AVFS_VOLTAGE_COUNT]; // Q32 - - int32_t VFT1_m1[AVFS_VOLTAGE_COUNT]; // Q8.16 - int32_t VFT1_m2[AVFS_VOLTAGE_COUNT]; // Q12.12 - int32_t VFT1_b[AVFS_VOLTAGE_COUNT]; // Q32 - - int32_t VFT2_m1[AVFS_VOLTAGE_COUNT]; // Q8.16 - int32_t VFT2_m2[AVFS_VOLTAGE_COUNT]; // Q12.12 - int32_t VFT2_b[AVFS_VOLTAGE_COUNT]; // Q32 - - int32_t AvfsGb0_m1[AVFS_VOLTAGE_COUNT]; // Q8.24 - int32_t AvfsGb0_m2[AVFS_VOLTAGE_COUNT]; // Q12.12 - int32_t AvfsGb0_b[AVFS_VOLTAGE_COUNT]; // Q32 - - int32_t AcBtcGb_m1[AVFS_VOLTAGE_COUNT]; // Q8.24 - int32_t AcBtcGb_m2[AVFS_VOLTAGE_COUNT]; // Q12.12 - int32_t AcBtcGb_b[AVFS_VOLTAGE_COUNT]; // Q32 - - uint32_t AvfsTempCold[AVFS_VOLTAGE_COUNT]; - uint32_t AvfsTempMid[AVFS_VOLTAGE_COUNT]; - uint32_t AvfsTempHot[AVFS_VOLTAGE_COUNT]; - - uint32_t VInversion[AVFS_VOLTAGE_COUNT]; // in mV with 2 fractional bits - - - int32_t P2V_m1[AVFS_VOLTAGE_COUNT]; // Q8.24 - int32_t P2V_m2[AVFS_VOLTAGE_COUNT]; // Q12.12 - int32_t P2V_b[AVFS_VOLTAGE_COUNT]; // Q32 - - uint32_t P2VCharzFreq[AVFS_VOLTAGE_COUNT]; // in 10KHz units - - uint32_t EnabledAvfsModules[2]; //NV10 - 36 AVFS modules - - uint32_t MmHubPadding[8]; // SMU internal use -} AvfsFuseOverride_t; - -typedef struct { - - uint8_t Gfx_ActiveHystLimit; - uint8_t Gfx_IdleHystLimit; - uint8_t Gfx_FPS; - uint8_t Gfx_MinActiveFreqType; - uint8_t Gfx_BoosterFreqType; - uint8_t Gfx_MinFreqStep; // Minimum delta between current and target frequeny in order for FW to change clock. - uint16_t Gfx_MinActiveFreq; // MHz - uint16_t Gfx_BoosterFreq; // MHz - uint16_t Gfx_PD_Data_time_constant; // Time constant of PD controller in ms - uint32_t Gfx_PD_Data_limit_a; // Q16 - uint32_t Gfx_PD_Data_limit_b; // Q16 - uint32_t Gfx_PD_Data_limit_c; // Q16 - uint32_t Gfx_PD_Data_error_coeff; // Q16 - uint32_t Gfx_PD_Data_error_rate_coeff; // Q16 - - uint8_t Soc_ActiveHystLimit; - uint8_t Soc_IdleHystLimit; - uint8_t Soc_FPS; - uint8_t Soc_MinActiveFreqType; - uint8_t Soc_BoosterFreqType; - uint8_t Soc_MinFreqStep; // Minimum delta between current and target frequeny in order for FW to change clock. - uint16_t Soc_MinActiveFreq; // MHz - uint16_t Soc_BoosterFreq; // MHz - uint16_t Soc_PD_Data_time_constant; // Time constant of PD controller in ms - uint32_t Soc_PD_Data_limit_a; // Q16 - uint32_t Soc_PD_Data_limit_b; // Q16 - uint32_t Soc_PD_Data_limit_c; // Q16 - uint32_t Soc_PD_Data_error_coeff; // Q16 - uint32_t Soc_PD_Data_error_rate_coeff; // Q16 - - uint8_t Mem_ActiveHystLimit; - uint8_t Mem_IdleHystLimit; - uint8_t Mem_FPS; - uint8_t Mem_MinActiveFreqType; - uint8_t Mem_BoosterFreqType; - uint8_t Mem_MinFreqStep; // Minimum delta between current and target frequeny in order for FW to change clock. - uint16_t Mem_MinActiveFreq; // MHz - uint16_t Mem_BoosterFreq; // MHz - uint16_t Mem_PD_Data_time_constant; // Time constant of PD controller in ms - uint32_t Mem_PD_Data_limit_a; // Q16 - uint32_t Mem_PD_Data_limit_b; // Q16 - uint32_t Mem_PD_Data_limit_c; // Q16 - uint32_t Mem_PD_Data_error_coeff; // Q16 - uint32_t Mem_PD_Data_error_rate_coeff; // Q16 - - uint32_t Mem_UpThreshold_Limit; // Q16 - uint8_t Mem_UpHystLimit; - uint8_t Mem_DownHystLimit; - uint16_t Mem_Fps; - - uint32_t MmHubPadding[8]; // SMU internal use - -} DpmActivityMonitorCoeffInt_t; - - -// Workload bits -#define WORKLOAD_PPLIB_DEFAULT_BIT 0 -#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 1 -#define WORKLOAD_PPLIB_POWER_SAVING_BIT 2 -#define WORKLOAD_PPLIB_VIDEO_BIT 3 -#define WORKLOAD_PPLIB_VR_BIT 4 -#define WORKLOAD_PPLIB_COMPUTE_BIT 5 -#define WORKLOAD_PPLIB_CUSTOM_BIT 6 -#define WORKLOAD_PPLIB_COUNT 7 - - -// These defines are used with the following messages: -// SMC_MSG_TransferTableDram2Smu -// SMC_MSG_TransferTableSmu2Dram - -// Table transfer status -#define TABLE_TRANSFER_OK 0x0 -#define TABLE_TRANSFER_FAILED 0xFF - -// Table types -#define TABLE_PPTABLE 0 -#define TABLE_WATERMARKS 1 -#define TABLE_AVFS 2 -#define TABLE_AVFS_PSM_DEBUG 3 -#define TABLE_AVFS_FUSE_OVERRIDE 4 -#define TABLE_PMSTATUSLOG 5 -#define TABLE_SMU_METRICS 6 -#define TABLE_DRIVER_SMU_CONFIG 7 -#define TABLE_ACTIVITY_MONITOR_COEFF 8 -#define TABLE_OVERDRIVE 9 -#define TABLE_I2C_COMMANDS 10 -#define TABLE_PACE 11 -#define TABLE_COUNT 12 - -//RLC Pace Table total number of levels -#define RLC_PACE_TABLE_NUM_LEVELS 16 - -typedef struct { - float FlopsPerByteTable[RLC_PACE_TABLE_NUM_LEVELS]; - - uint32_t MmHubPadding[8]; // SMU internal use -} RlcPaceFlopsPerByteOverride_t; - -// These defines are used with the SMC_MSG_SetUclkFastSwitch message. -#define UCLK_SWITCH_SLOW 0 -#define UCLK_SWITCH_FAST 1 -#endif diff --git a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_sienna_cichlid.h b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_sienna_cichlid.h deleted file mode 100644 index 63b8701fd466..000000000000 --- a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_sienna_cichlid.h +++ /dev/null @@ -1,1691 +0,0 @@ -/* - * Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef __SMU11_DRIVER_IF_SIENNA_CICHLID_H__ -#define __SMU11_DRIVER_IF_SIENNA_CICHLID_H__ - -// *** IMPORTANT *** -// SMU TEAM: Always increment the interface version if -// any structure is changed in this file -#define SMU11_DRIVER_IF_VERSION 0x3B - -#define PPTABLE_Sienna_Cichlid_SMU_VERSION 7 - -#define NUM_GFXCLK_DPM_LEVELS 16 -#define NUM_SMNCLK_DPM_LEVELS 2 -#define NUM_SOCCLK_DPM_LEVELS 8 -#define NUM_MP0CLK_DPM_LEVELS 2 -#define NUM_DCLK_DPM_LEVELS 8 -#define NUM_VCLK_DPM_LEVELS 8 -#define NUM_DCEFCLK_DPM_LEVELS 8 -#define NUM_PHYCLK_DPM_LEVELS 8 -#define NUM_DISPCLK_DPM_LEVELS 8 -#define NUM_PIXCLK_DPM_LEVELS 8 -#define NUM_DTBCLK_DPM_LEVELS 8 -#define NUM_UCLK_DPM_LEVELS 4 -#define NUM_MP1CLK_DPM_LEVELS 2 -#define NUM_LINK_LEVELS 2 -#define NUM_FCLK_DPM_LEVELS 8 -#define NUM_XGMI_LEVELS 2 -#define NUM_XGMI_PSTATE_LEVELS 4 -#define NUM_OD_FAN_MAX_POINTS 6 - -#define MAX_GFXCLK_DPM_LEVEL (NUM_GFXCLK_DPM_LEVELS - 1) -#define MAX_SMNCLK_DPM_LEVEL (NUM_SMNCLK_DPM_LEVELS - 1) -#define MAX_SOCCLK_DPM_LEVEL (NUM_SOCCLK_DPM_LEVELS - 1) -#define MAX_MP0CLK_DPM_LEVEL (NUM_MP0CLK_DPM_LEVELS - 1) -#define MAX_DCLK_DPM_LEVEL (NUM_DCLK_DPM_LEVELS - 1) -#define MAX_VCLK_DPM_LEVEL (NUM_VCLK_DPM_LEVELS - 1) -#define MAX_DCEFCLK_DPM_LEVEL (NUM_DCEFCLK_DPM_LEVELS - 1) -#define MAX_DISPCLK_DPM_LEVEL (NUM_DISPCLK_DPM_LEVELS - 1) -#define MAX_PIXCLK_DPM_LEVEL (NUM_PIXCLK_DPM_LEVELS - 1) -#define MAX_PHYCLK_DPM_LEVEL (NUM_PHYCLK_DPM_LEVELS - 1) -#define MAX_DTBCLK_DPM_LEVEL (NUM_DTBCLK_DPM_LEVELS - 1) -#define MAX_UCLK_DPM_LEVEL (NUM_UCLK_DPM_LEVELS - 1) -#define MAX_MP1CLK_DPM_LEVEL (NUM_MP1CLK_DPM_LEVELS - 1) -#define MAX_LINK_LEVEL (NUM_LINK_LEVELS - 1) -#define MAX_FCLK_DPM_LEVEL (NUM_FCLK_DPM_LEVELS - 1) - -//Gemini Modes -#define PPSMC_GeminiModeNone 0 //Single GPU board -#define PPSMC_GeminiModeMaster 1 //Master GPU on a Gemini board -#define PPSMC_GeminiModeSlave 2 //Slave GPU on a Gemini board - -// Feature Control Defines -// DPM -#define FEATURE_DPM_PREFETCHER_BIT 0 -#define FEATURE_DPM_GFXCLK_BIT 1 -#define FEATURE_DPM_GFX_GPO_BIT 2 -#define FEATURE_DPM_UCLK_BIT 3 -#define FEATURE_DPM_FCLK_BIT 4 -#define FEATURE_DPM_SOCCLK_BIT 5 -#define FEATURE_DPM_MP0CLK_BIT 6 -#define FEATURE_DPM_LINK_BIT 7 -#define FEATURE_DPM_DCEFCLK_BIT 8 -#define FEATURE_DPM_XGMI_BIT 9 -#define FEATURE_MEM_VDDCI_SCALING_BIT 10 -#define FEATURE_MEM_MVDD_SCALING_BIT 11 - -//Idle -#define FEATURE_DS_GFXCLK_BIT 12 -#define FEATURE_DS_SOCCLK_BIT 13 -#define FEATURE_DS_FCLK_BIT 14 -#define FEATURE_DS_LCLK_BIT 15 -#define FEATURE_DS_DCEFCLK_BIT 16 -#define FEATURE_DS_UCLK_BIT 17 -#define FEATURE_GFX_ULV_BIT 18 -#define FEATURE_FW_DSTATE_BIT 19 -#define FEATURE_GFXOFF_BIT 20 -#define FEATURE_BACO_BIT 21 -#define FEATURE_MM_DPM_PG_BIT 22 -#define FEATURE_SPARE_23_BIT 23 -//Throttler/Response -#define FEATURE_PPT_BIT 24 -#define FEATURE_TDC_BIT 25 -#define FEATURE_APCC_PLUS_BIT 26 -#define FEATURE_GTHR_BIT 27 -#define FEATURE_ACDC_BIT 28 -#define FEATURE_VR0HOT_BIT 29 -#define FEATURE_VR1HOT_BIT 30 -#define FEATURE_FW_CTF_BIT 31 -#define FEATURE_FAN_CONTROL_BIT 32 -#define FEATURE_THERMAL_BIT 33 -#define FEATURE_GFX_DCS_BIT 34 -//VF -#define FEATURE_RM_BIT 35 -#define FEATURE_LED_DISPLAY_BIT 36 -//Other -#define FEATURE_GFX_SS_BIT 37 -#define FEATURE_OUT_OF_BAND_MONITOR_BIT 38 -#define FEATURE_TEMP_DEPENDENT_VMIN_BIT 39 - -#define FEATURE_MMHUB_PG_BIT 40 -#define FEATURE_ATHUB_PG_BIT 41 -#define FEATURE_APCC_DFLL_BIT 42 -#define FEATURE_DF_SUPERV_BIT 43 -#define FEATURE_RSMU_SMN_CG_BIT 44 -#define FEATURE_DF_CSTATE_BIT 45 -#define FEATURE_2_STEP_PSTATE_BIT 46 -#define FEATURE_SMNCLK_DPM_BIT 47 -#define FEATURE_PERLINK_GMIDOWN_BIT 48 -#define FEATURE_GFX_EDC_BIT 49 -#define FEATURE_GFX_PER_PART_VMIN_BIT 50 -#define FEATURE_SMART_SHIFT_BIT 51 -#define FEATURE_APT_BIT 52 -#define FEATURE_SPARE_53_BIT 53 -#define FEATURE_SPARE_54_BIT 54 -#define FEATURE_SPARE_55_BIT 55 -#define FEATURE_SPARE_56_BIT 56 -#define FEATURE_SPARE_57_BIT 57 -#define FEATURE_SPARE_58_BIT 58 -#define FEATURE_SPARE_59_BIT 59 -#define FEATURE_SPARE_60_BIT 60 -#define FEATURE_SPARE_61_BIT 61 -#define FEATURE_SPARE_62_BIT 62 -#define FEATURE_SPARE_63_BIT 63 -#define NUM_FEATURES 64 - -//For use with feature control messages -typedef enum { - FEATURE_PWR_ALL, - FEATURE_PWR_S5, - FEATURE_PWR_BACO, - FEATURE_PWR_SOC, - FEATURE_PWR_GFX, - FEATURE_PWR_DOMAIN_COUNT, -} FEATURE_PWR_DOMAIN_e; - - -// Debug Overrides Bitmask -#define DPM_OVERRIDE_DISABLE_FCLK_PID 0x00000001 -#define DPM_OVERRIDE_DISABLE_UCLK_PID 0x00000002 -#define DPM_OVERRIDE_DISABLE_VOLT_LINK_VCN_FCLK 0x00000004 -#define DPM_OVERRIDE_ENABLE_FREQ_LINK_VCLK_FCLK 0x00000008 -#define DPM_OVERRIDE_ENABLE_FREQ_LINK_DCLK_FCLK 0x00000010 -#define DPM_OVERRIDE_ENABLE_FREQ_LINK_GFXCLK_SOCCLK 0x00000020 -#define DPM_OVERRIDE_ENABLE_FREQ_LINK_GFXCLK_UCLK 0x00000040 -#define DPM_OVERRIDE_DISABLE_VOLT_LINK_DCE_FCLK 0x00000080 -#define DPM_OVERRIDE_DISABLE_VOLT_LINK_MP0_SOCCLK 0x00000100 -#define DPM_OVERRIDE_DISABLE_DFLL_PLL_SHUTDOWN 0x00000200 -#define DPM_OVERRIDE_DISABLE_MEMORY_TEMPERATURE_READ 0x00000400 -#define DPM_OVERRIDE_DISABLE_VOLT_LINK_VCN_DCEFCLK 0x00000800 -#define DPM_OVERRIDE_DISABLE_FAST_FCLK_TIMER 0x00001000 -#define DPM_OVERRIDE_DISABLE_VCN_PG 0x00002000 -#define DPM_OVERRIDE_DISABLE_FMAX_VMAX 0x00004000 - -// VR Mapping Bit Defines -#define VR_MAPPING_VR_SELECT_MASK 0x01 -#define VR_MAPPING_VR_SELECT_SHIFT 0x00 - -#define VR_MAPPING_PLANE_SELECT_MASK 0x02 -#define VR_MAPPING_PLANE_SELECT_SHIFT 0x01 - -// PSI Bit Defines -#define PSI_SEL_VR0_PLANE0_PSI0 0x01 -#define PSI_SEL_VR0_PLANE0_PSI1 0x02 -#define PSI_SEL_VR0_PLANE1_PSI0 0x04 -#define PSI_SEL_VR0_PLANE1_PSI1 0x08 -#define PSI_SEL_VR1_PLANE0_PSI0 0x10 -#define PSI_SEL_VR1_PLANE0_PSI1 0x20 -#define PSI_SEL_VR1_PLANE1_PSI0 0x40 -#define PSI_SEL_VR1_PLANE1_PSI1 0x80 - -// Throttler Control/Status Bits -#define THROTTLER_PADDING_BIT 0 -#define THROTTLER_TEMP_EDGE_BIT 1 -#define THROTTLER_TEMP_HOTSPOT_BIT 2 -#define THROTTLER_TEMP_MEM_BIT 3 -#define THROTTLER_TEMP_VR_GFX_BIT 4 -#define THROTTLER_TEMP_VR_MEM0_BIT 5 -#define THROTTLER_TEMP_VR_MEM1_BIT 6 -#define THROTTLER_TEMP_VR_SOC_BIT 7 -#define THROTTLER_TEMP_LIQUID0_BIT 8 -#define THROTTLER_TEMP_LIQUID1_BIT 9 -#define THROTTLER_TEMP_PLX_BIT 10 -#define THROTTLER_TDC_GFX_BIT 11 -#define THROTTLER_TDC_SOC_BIT 12 -#define THROTTLER_PPT0_BIT 13 -#define THROTTLER_PPT1_BIT 14 -#define THROTTLER_PPT2_BIT 15 -#define THROTTLER_PPT3_BIT 16 -#define THROTTLER_FIT_BIT 17 -#define THROTTLER_PPM_BIT 18 -#define THROTTLER_APCC_BIT 19 -#define THROTTLER_COUNT 20 - -// FW DState Features Control Bits -// FW DState Features Control Bits -#define FW_DSTATE_SOC_ULV_BIT 0 -#define FW_DSTATE_G6_HSR_BIT 1 -#define FW_DSTATE_G6_PHY_VDDCI_OFF_BIT 2 -#define FW_DSTATE_MP0_DS_BIT 3 -#define FW_DSTATE_SMN_DS_BIT 4 -#define FW_DSTATE_MP1_DS_BIT 5 -#define FW_DSTATE_MP1_WHISPER_MODE_BIT 6 -#define FW_DSTATE_SOC_LIV_MIN_BIT 7 -#define FW_DSTATE_SOC_PLL_PWRDN_BIT 8 -#define FW_DSTATE_MEM_PLL_PWRDN_BIT 9 -#define FW_DSTATE_OPTIMIZE_MALL_REFRESH_BIT 10 -#define FW_DSTATE_MEM_PSI_BIT 11 -#define FW_DSTATE_HSR_NON_STROBE_BIT 12 -#define FW_DSTATE_MP0_ENTER_WFI_BIT 13 - -#define FW_DSTATE_SOC_ULV_MASK (1 << FW_DSTATE_SOC_ULV_BIT ) -#define FW_DSTATE_G6_HSR_MASK (1 << FW_DSTATE_G6_HSR_BIT ) -#define FW_DSTATE_G6_PHY_VDDCI_OFF_MASK (1 << FW_DSTATE_G6_PHY_VDDCI_OFF_BIT ) -#define FW_DSTATE_MP1_DS_MASK (1 << FW_DSTATE_MP1_DS_BIT ) -#define FW_DSTATE_MP0_DS_MASK (1 << FW_DSTATE_MP0_DS_BIT ) -#define FW_DSTATE_SMN_DS_MASK (1 << FW_DSTATE_SMN_DS_BIT ) -#define FW_DSTATE_MP1_WHISPER_MODE_MASK (1 << FW_DSTATE_MP1_WHISPER_MODE_BIT ) -#define FW_DSTATE_SOC_LIV_MIN_MASK (1 << FW_DSTATE_SOC_LIV_MIN_BIT ) -#define FW_DSTATE_SOC_PLL_PWRDN_MASK (1 << FW_DSTATE_SOC_PLL_PWRDN_BIT ) -#define FW_DSTATE_MEM_PLL_PWRDN_MASK (1 << FW_DSTATE_MEM_PLL_PWRDN_BIT ) -#define FW_DSTATE_OPTIMIZE_MALL_REFRESH_MASK (1 << FW_DSTATE_OPTIMIZE_MALL_REFRESH_BIT ) -#define FW_DSTATE_MEM_PSI_MASK (1 << FW_DSTATE_MEM_PSI_BIT ) -#define FW_DSTATE_HSR_NON_STROBE_MASK (1 << FW_DSTATE_HSR_NON_STROBE_BIT ) -#define FW_DSTATE_MP0_ENTER_WFI_MASK (1 << FW_DSTATE_MP0_ENTER_WFI_BIT ) - -// GFX GPO Feature Contains PACE and DEM sub features -#define GFX_GPO_PACE_BIT 0 -#define GFX_GPO_DEM_BIT 1 - -#define GFX_GPO_PACE_MASK (1 << GFX_GPO_PACE_BIT) -#define GFX_GPO_DEM_MASK (1 << GFX_GPO_DEM_BIT ) - -#define GPO_UPDATE_REQ_UCLKDPM_MASK 0x1 -#define GPO_UPDATE_REQ_FCLKDPM_MASK 0x2 -#define GPO_UPDATE_REQ_MALLHIT_MASK 0x4 - - -//LED Display Mask & Control Bits -#define LED_DISPLAY_GFX_DPM_BIT 0 -#define LED_DISPLAY_PCIE_BIT 1 -#define LED_DISPLAY_ERROR_BIT 2 - -//RLC Pace Table total number of levels -#define RLC_PACE_TABLE_NUM_LEVELS 16 - -typedef enum { - DRAM_BIT_WIDTH_DISABLED = 0, - DRAM_BIT_WIDTH_X_8, - DRAM_BIT_WIDTH_X_16, - DRAM_BIT_WIDTH_X_32, - DRAM_BIT_WIDTH_X_64, // NOT USED. - DRAM_BIT_WIDTH_X_128, - DRAM_BIT_WIDTH_COUNT, -} DRAM_BIT_WIDTH_TYPE_e; - -//I2C Interface -#define NUM_I2C_CONTROLLERS 16 - -#define I2C_CONTROLLER_ENABLED 1 -#define I2C_CONTROLLER_DISABLED 0 - -#define MAX_SW_I2C_COMMANDS 24 - -typedef enum { - I2C_CONTROLLER_PORT_0 = 0, //CKSVII2C0 - I2C_CONTROLLER_PORT_1 = 1, //CKSVII2C1 - I2C_CONTROLLER_PORT_COUNT, -} I2cControllerPort_e; - -typedef enum { - I2C_CONTROLLER_NAME_VR_GFX = 0, - I2C_CONTROLLER_NAME_VR_SOC, - I2C_CONTROLLER_NAME_VR_VDDCI, - I2C_CONTROLLER_NAME_VR_MVDD, - I2C_CONTROLLER_NAME_LIQUID0, - I2C_CONTROLLER_NAME_LIQUID1, - I2C_CONTROLLER_NAME_PLX, - I2C_CONTROLLER_NAME_OTHER, - I2C_CONTROLLER_NAME_COUNT, -} I2cControllerName_e; - -typedef enum { - I2C_CONTROLLER_THROTTLER_TYPE_NONE = 0, - I2C_CONTROLLER_THROTTLER_VR_GFX, - I2C_CONTROLLER_THROTTLER_VR_SOC, - I2C_CONTROLLER_THROTTLER_VR_VDDCI, - I2C_CONTROLLER_THROTTLER_VR_MVDD, - I2C_CONTROLLER_THROTTLER_LIQUID0, - I2C_CONTROLLER_THROTTLER_LIQUID1, - I2C_CONTROLLER_THROTTLER_PLX, - I2C_CONTROLLER_THROTTLER_INA3221, - I2C_CONTROLLER_THROTTLER_COUNT, -} I2cControllerThrottler_e; - -typedef enum { - I2C_CONTROLLER_PROTOCOL_VR_XPDE132G5, - I2C_CONTROLLER_PROTOCOL_VR_IR35217, - I2C_CONTROLLER_PROTOCOL_TMP_TMP102A, - I2C_CONTROLLER_PROTOCOL_INA3221, - I2C_CONTROLLER_PROTOCOL_COUNT, -} I2cControllerProtocol_e; - -typedef struct { - uint8_t Enabled; - uint8_t Speed; - uint8_t SlaveAddress; - uint8_t ControllerPort; - uint8_t ControllerName; - uint8_t ThermalThrotter; - uint8_t I2cProtocol; - uint8_t PaddingConfig; -} I2cControllerConfig_t; - -typedef enum { - I2C_PORT_SVD_SCL = 0, - I2C_PORT_GPIO, -} I2cPort_e; - -typedef enum { - I2C_SPEED_FAST_50K = 0, //50 Kbits/s - I2C_SPEED_FAST_100K, //100 Kbits/s - I2C_SPEED_FAST_400K, //400 Kbits/s - I2C_SPEED_FAST_PLUS_1M, //1 Mbits/s (in fast mode) - I2C_SPEED_HIGH_1M, //1 Mbits/s (in high speed mode) - I2C_SPEED_HIGH_2M, //2.3 Mbits/s - I2C_SPEED_COUNT, -} I2cSpeed_e; - -typedef enum { - I2C_CMD_READ = 0, - I2C_CMD_WRITE, - I2C_CMD_COUNT, -} I2cCmdType_e; - -typedef enum { - FAN_MODE_AUTO = 0, - FAN_MODE_MANUAL_LINEAR, -} FanMode_e; - -#define CMDCONFIG_STOP_BIT 0 -#define CMDCONFIG_RESTART_BIT 1 -#define CMDCONFIG_READWRITE_BIT 2 //bit should be 0 for read, 1 for write - -#define CMDCONFIG_STOP_MASK (1 << CMDCONFIG_STOP_BIT) -#define CMDCONFIG_RESTART_MASK (1 << CMDCONFIG_RESTART_BIT) -#define CMDCONFIG_READWRITE_MASK (1 << CMDCONFIG_READWRITE_BIT) - -typedef struct { - uint8_t ReadWriteData; //Return data for read. Data to send for write - uint8_t CmdConfig; //Includes whether associated command should have a stop or restart command, and is a read or write -} SwI2cCmd_t; //SW I2C Command Table - -typedef struct { - uint8_t I2CcontrollerPort; //CKSVII2C0(0) or //CKSVII2C1(1) - uint8_t I2CSpeed; //Use I2cSpeed_e to indicate speed to select - uint8_t SlaveAddress; //Slave address of device - uint8_t NumCmds; //Number of commands - - SwI2cCmd_t SwI2cCmds[MAX_SW_I2C_COMMANDS]; -} SwI2cRequest_t; // SW I2C Request Table - -typedef struct { - SwI2cRequest_t SwI2cRequest; - - uint32_t Spare[8]; - uint32_t MmHubPadding[8]; // SMU internal use -} SwI2cRequestExternal_t; - -//D3HOT sequences -typedef enum { - BACO_SEQUENCE, - MSR_SEQUENCE, - BAMACO_SEQUENCE, - ULPS_SEQUENCE, - D3HOT_SEQUENCE_COUNT, -} D3HOTSequence_e; - -//THis is aligned with RSMU PGFSM Register Mapping -typedef enum { - PG_DYNAMIC_MODE = 0, - PG_STATIC_MODE, -} PowerGatingMode_e; - -//This is aligned with RSMU PGFSM Register Mapping -typedef enum { - PG_POWER_DOWN = 0, - PG_POWER_UP, -} PowerGatingSettings_e; - -typedef struct { - uint32_t a; // store in IEEE float format in this variable - uint32_t b; // store in IEEE float format in this variable - uint32_t c; // store in IEEE float format in this variable -} QuadraticInt_t; - -typedef struct { - uint32_t a; // store in fixed point, [31:20] signed integer, [19:0] fractional bits - uint32_t b; // store in fixed point, [31:20] signed integer, [19:0] fractional bits - uint32_t c; // store in fixed point, [31:20] signed integer, [19:0] fractional bits -} QuadraticFixedPoint_t; - -typedef struct { - uint32_t m; // store in IEEE float format in this variable - uint32_t b; // store in IEEE float format in this variable -} LinearInt_t; - -typedef struct { - uint32_t a; // store in IEEE float format in this variable - uint32_t b; // store in IEEE float format in this variable - uint32_t c; // store in IEEE float format in this variable -} DroopInt_t; - -//Piecewise linear droop model, Sienna_Cichlid currently used only for GFX DFLL -#define NUM_PIECE_WISE_LINEAR_DROOP_MODEL_VF_POINTS 5 -typedef enum { - PIECEWISE_LINEAR_FUSED_MODEL = 0, - PIECEWISE_LINEAR_PP_MODEL, - QUADRATIC_PP_MODEL, - PERPART_PIECEWISE_LINEAR_PP_MODEL, -} DfllDroopModelSelect_e; - -typedef struct { - uint32_t Fset[NUM_PIECE_WISE_LINEAR_DROOP_MODEL_VF_POINTS]; //in GHz, store in IEEE float format - uint32_t Vdroop[NUM_PIECE_WISE_LINEAR_DROOP_MODEL_VF_POINTS]; //in V , store in IEEE float format -}PiecewiseLinearDroopInt_t; - -typedef enum { - GFXCLK_SOURCE_PLL = 0, - GFXCLK_SOURCE_DFLL, - GFXCLK_SOURCE_COUNT, -} GFXCLK_SOURCE_e; - -//Only Clks that have DPM descriptors are listed here -typedef enum { - PPCLK_GFXCLK = 0, - PPCLK_SOCCLK, - PPCLK_UCLK, - PPCLK_FCLK, - PPCLK_DCLK_0, - PPCLK_VCLK_0, - PPCLK_DCLK_1, - PPCLK_VCLK_1, - PPCLK_DCEFCLK, - PPCLK_DISPCLK, - PPCLK_PIXCLK, - PPCLK_PHYCLK, - PPCLK_DTBCLK, - PPCLK_COUNT, -} PPCLK_e; - -typedef enum { - VOLTAGE_MODE_AVFS = 0, - VOLTAGE_MODE_AVFS_SS, - VOLTAGE_MODE_SS, - VOLTAGE_MODE_COUNT, -} VOLTAGE_MODE_e; - - -typedef enum { - AVFS_VOLTAGE_GFX = 0, - AVFS_VOLTAGE_SOC, - AVFS_VOLTAGE_COUNT, -} AVFS_VOLTAGE_TYPE_e; - -typedef enum { - UCLK_DIV_BY_1 = 0, - UCLK_DIV_BY_2, - UCLK_DIV_BY_4, - UCLK_DIV_BY_8, -} UCLK_DIV_e; - -typedef enum { - GPIO_INT_POLARITY_ACTIVE_LOW = 0, - GPIO_INT_POLARITY_ACTIVE_HIGH, -} GpioIntPolarity_e; - -typedef enum { - PWR_CONFIG_TDP = 0, - PWR_CONFIG_TGP, - PWR_CONFIG_TCP_ESTIMATED, - PWR_CONFIG_TCP_MEASURED, -} PwrConfig_e; - -typedef enum { - XGMI_LINK_RATE_2 = 2, // 2Gbps - XGMI_LINK_RATE_4 = 4, // 4Gbps - XGMI_LINK_RATE_8 = 8, // 8Gbps - XGMI_LINK_RATE_12 = 12, // 12Gbps - XGMI_LINK_RATE_16 = 16, // 16Gbps - XGMI_LINK_RATE_17 = 17, // 17Gbps - XGMI_LINK_RATE_18 = 18, // 18Gbps - XGMI_LINK_RATE_19 = 19, // 19Gbps - XGMI_LINK_RATE_20 = 20, // 20Gbps - XGMI_LINK_RATE_21 = 21, // 21Gbps - XGMI_LINK_RATE_22 = 22, // 22Gbps - XGMI_LINK_RATE_23 = 23, // 23Gbps - XGMI_LINK_RATE_24 = 24, // 24Gbps - XGMI_LINK_RATE_25 = 25, // 25Gbps - XGMI_LINK_RATE_COUNT -} XGMI_LINK_RATE_e; - -typedef enum { - XGMI_LINK_WIDTH_1 = 0, // x1 - XGMI_LINK_WIDTH_2, // x2 - XGMI_LINK_WIDTH_4, // x4 - XGMI_LINK_WIDTH_8, // x8 - XGMI_LINK_WIDTH_9, // x9 - XGMI_LINK_WIDTH_16, // x16 - XGMI_LINK_WIDTH_COUNT -} XGMI_LINK_WIDTH_e; - -typedef struct { - uint8_t VoltageMode; // 0 - AVFS only, 1- min(AVFS,SS), 2-SS only - uint8_t SnapToDiscrete; // 0 - Fine grained DPM, 1 - Discrete DPM - uint8_t NumDiscreteLevels; // Set to 2 (Fmin, Fmax) when using fine grained DPM, otherwise set to # discrete levels used - uint8_t Padding; - LinearInt_t ConversionToAvfsClk; // Transfer function to AVFS Clock (GHz->GHz) - QuadraticInt_t SsCurve; // Slow-slow curve (GHz->V) - uint16_t SsFmin; // Fmin for SS curve. If SS curve is selected, will use V@SSFmin for F <= Fmin - uint16_t Padding16; -} DpmDescriptor_t; - -typedef enum { - PPT_THROTTLER_PPT0, - PPT_THROTTLER_PPT1, - PPT_THROTTLER_PPT2, - PPT_THROTTLER_PPT3, - PPT_THROTTLER_COUNT -} PPT_THROTTLER_e; - -typedef enum { - TEMP_EDGE, - TEMP_HOTSPOT, - TEMP_MEM, - TEMP_VR_GFX, - TEMP_VR_MEM0, - TEMP_VR_MEM1, - TEMP_VR_SOC, - TEMP_LIQUID0, - TEMP_LIQUID1, - TEMP_PLX, - TEMP_COUNT, -} TEMP_e; - -typedef enum { - TDC_THROTTLER_GFX, - TDC_THROTTLER_SOC, - TDC_THROTTLER_COUNT -} TDC_THROTTLER_e; - -typedef enum { - CUSTOMER_VARIANT_ROW, - CUSTOMER_VARIANT_FALCON, - CUSTOMER_VARIANT_COUNT, -} CUSTOMER_VARIANT_e; - -// Used for 2-step UCLK DPM change workaround -typedef struct { - uint16_t Fmin; - uint16_t Fmax; -} UclkDpmChangeRange_t; - -typedef struct { - // MAJOR SECTION: SKU PARAMETERS - - uint32_t Version; - - // SECTION: Feature Enablement - uint32_t FeaturesToRun[NUM_FEATURES / 32]; - - // SECTION: Infrastructure Limits - uint16_t SocketPowerLimitAc[PPT_THROTTLER_COUNT]; // Watts - uint16_t SocketPowerLimitAcTau[PPT_THROTTLER_COUNT]; // Time constant of LPF in ms - uint16_t SocketPowerLimitDc[PPT_THROTTLER_COUNT]; // Watts - uint16_t SocketPowerLimitDcTau[PPT_THROTTLER_COUNT]; // Time constant of LPF in ms - - uint16_t TdcLimit[TDC_THROTTLER_COUNT]; // Amps - uint16_t TdcLimitTau[TDC_THROTTLER_COUNT]; // Time constant of LPF in ms - - uint16_t TemperatureLimit[TEMP_COUNT]; // Celcius - - uint32_t FitLimit; // Failures in time (failures per million parts over the defined lifetime) - - // SECTION: Power Configuration - uint8_t TotalPowerConfig; //0-TDP, 1-TGP, 2-TCP Estimated, 3-TCP Measured. Use defines from PwrConfig_e - uint8_t TotalPowerPadding[3]; - - // SECTION: APCC Settings - uint32_t ApccPlusResidencyLimit; - - //SECTION: SMNCLK DPM - uint16_t SmnclkDpmFreq [NUM_SMNCLK_DPM_LEVELS]; // in MHz - uint16_t SmnclkDpmVoltage [NUM_SMNCLK_DPM_LEVELS]; // mV(Q2) - - uint32_t PaddingAPCC; - uint16_t PerPartDroopVsetGfxDfll[NUM_PIECE_WISE_LINEAR_DROOP_MODEL_VF_POINTS]; //In mV(Q2) - uint16_t PaddingPerPartDroop; - - // SECTION: Throttler settings - uint32_t ThrottlerControlMask; // See Throtter masks defines - - // SECTION: FW DSTATE Settings - uint32_t FwDStateMask; // See FW DState masks defines - - // SECTION: ULV Settings - uint16_t UlvVoltageOffsetSoc; // In mV(Q2) - uint16_t UlvVoltageOffsetGfx; // In mV(Q2) - - uint16_t MinVoltageUlvGfx; // In mV(Q2) Minimum Voltage ("Vmin") of VDD_GFX in ULV mode - uint16_t MinVoltageUlvSoc; // In mV(Q2) Minimum Voltage ("Vmin") of VDD_SOC in ULV mode - - uint16_t SocLIVmin; // In mV(Q2) Long Idle Vmin (deep ULV), for VDD_SOC - uint16_t PaddingLIVmin; - - uint8_t GceaLinkMgrIdleThreshold; //Set by SMU FW during enablment of GFXOFF. Controls delay for GFX SDP port disconnection during idle events - uint8_t paddingRlcUlvParams[3]; - - // SECTION: Voltage Control Parameters - uint16_t MinVoltageGfx; // In mV(Q2) Minimum Voltage ("Vmin") of VDD_GFX - uint16_t MinVoltageSoc; // In mV(Q2) Minimum Voltage ("Vmin") of VDD_SOC - uint16_t MaxVoltageGfx; // In mV(Q2) Maximum Voltage allowable of VDD_GFX - uint16_t MaxVoltageSoc; // In mV(Q2) Maximum Voltage allowable of VDD_SOC - - uint16_t LoadLineResistanceGfx; // In mOhms with 8 fractional bits - uint16_t LoadLineResistanceSoc; // In mOhms with 8 fractional bits - - // SECTION: Temperature Dependent Vmin - uint16_t VDDGFX_TVmin; //Celcius - uint16_t VDDSOC_TVmin; //Celcius - uint16_t VDDGFX_Vmin_HiTemp; // mV Q2 - uint16_t VDDGFX_Vmin_LoTemp; // mV Q2 - uint16_t VDDSOC_Vmin_HiTemp; // mV Q2 - uint16_t VDDSOC_Vmin_LoTemp; // mV Q2 - - uint16_t VDDGFX_TVminHystersis; // Celcius - uint16_t VDDSOC_TVminHystersis; // Celcius - - //SECTION: DPM Config 1 - DpmDescriptor_t DpmDescriptor[PPCLK_COUNT]; - - uint16_t FreqTableGfx [NUM_GFXCLK_DPM_LEVELS ]; // In MHz - uint16_t FreqTableVclk [NUM_VCLK_DPM_LEVELS ]; // In MHz - uint16_t FreqTableDclk [NUM_DCLK_DPM_LEVELS ]; // In MHz - uint16_t FreqTableSocclk [NUM_SOCCLK_DPM_LEVELS ]; // In MHz - uint16_t FreqTableUclk [NUM_UCLK_DPM_LEVELS ]; // In MHz - uint16_t FreqTableDcefclk [NUM_DCEFCLK_DPM_LEVELS ]; // In MHz - uint16_t FreqTableDispclk [NUM_DISPCLK_DPM_LEVELS ]; // In MHz - uint16_t FreqTablePixclk [NUM_PIXCLK_DPM_LEVELS ]; // In MHz - uint16_t FreqTablePhyclk [NUM_PHYCLK_DPM_LEVELS ]; // In MHz - uint16_t FreqTableDtbclk [NUM_DTBCLK_DPM_LEVELS ]; // In MHz - uint16_t FreqTableFclk [NUM_FCLK_DPM_LEVELS ]; // In MHz - uint32_t Paddingclks; - - DroopInt_t PerPartDroopModelGfxDfll[NUM_PIECE_WISE_LINEAR_DROOP_MODEL_VF_POINTS]; //GHz ->Vstore in IEEE float format - - uint32_t DcModeMaxFreq [PPCLK_COUNT ]; // In MHz - - uint8_t FreqTableUclkDiv [NUM_UCLK_DPM_LEVELS ]; // 0:Div-1, 1:Div-1/2, 2:Div-1/4, 3:Div-1/8 - - // Used for MALL performance boost - uint16_t FclkBoostFreq; // In Mhz - uint16_t FclkParamPadding; - - // SECTION: DPM Config 2 - uint16_t Mp0clkFreq [NUM_MP0CLK_DPM_LEVELS]; // in MHz - uint16_t Mp0DpmVoltage [NUM_MP0CLK_DPM_LEVELS]; // mV(Q2) - uint16_t MemVddciVoltage [NUM_UCLK_DPM_LEVELS]; // mV(Q2) - uint16_t MemMvddVoltage [NUM_UCLK_DPM_LEVELS]; // mV(Q2) - // GFXCLK DPM - uint16_t GfxclkFgfxoffEntry; // in Mhz - uint16_t GfxclkFinit; // in Mhz - uint16_t GfxclkFidle; // in MHz - uint8_t GfxclkSource; // 0 = PLL, 1 = DFLL - uint8_t GfxclkPadding; - - // GFX GPO - uint8_t GfxGpoSubFeatureMask; // bit 0 = PACE, bit 1 = DEM - uint8_t GfxGpoEnabledWorkPolicyMask; //Any policy that GPO can be enabled - uint8_t GfxGpoDisabledWorkPolicyMask; //Any policy that GPO can be disabled - uint8_t GfxGpoPadding[1]; - uint32_t GfxGpoVotingAllow; //For indicating which feature changes should result in a GPO table recalculation - - uint32_t GfxGpoPadding32[4]; - - uint16_t GfxDcsFopt; // Optimal GFXCLK for DCS in Mhz - uint16_t GfxDcsFclkFopt; // Optimal FCLK for DCS in Mhz - uint16_t GfxDcsUclkFopt; // Optimal UCLK for DCS in Mhz - - uint16_t DcsGfxOffVoltage; //Voltage in mV(Q2) applied to VDDGFX when entering DCS GFXOFF phase - - uint16_t DcsMinGfxOffTime; //Minimum amount of time PMFW shuts GFX OFF as part of GFX DCS phase - uint16_t DcsMaxGfxOffTime; //Maximum amount of time PMFW can shut GFX OFF as part of GFX DCS phase at a stretch. - - uint32_t DcsMinCreditAccum; //Min amount of positive credit accumulation before waking GFX up as part of DCS. - - uint16_t DcsExitHysteresis; //The min amount of time power credit accumulator should have a value > 0 before SMU exits the DCS throttling phase. - uint16_t DcsTimeout; //This is the amount of time SMU FW waits for RLC to put GFX into GFXOFF before reverting to the fallback mechanism of throttling GFXCLK to Fmin. - - uint32_t DcsParamPadding[5]; - - uint16_t FlopsPerByteTable[RLC_PACE_TABLE_NUM_LEVELS]; // Q8.8 - - // UCLK section - uint8_t LowestUclkReservedForUlv; // Set this to 1 if UCLK DPM0 is reserved for ULV-mode only - uint8_t PaddingMem[3]; - - uint8_t UclkDpmPstates [NUM_UCLK_DPM_LEVELS]; // 4 DPM states, 0-P0, 1-P1, 2-P2, 3-P3. - - // Used for 2-Step UCLK change workaround - UclkDpmChangeRange_t UclkDpmSrcFreqRange; // In Mhz - UclkDpmChangeRange_t UclkDpmTargFreqRange; // In Mhz - uint16_t UclkDpmMidstepFreq; // In Mhz - uint16_t UclkMidstepPadding; - - // Link DPM Settings - uint8_t PcieGenSpeed[NUM_LINK_LEVELS]; ///< 0:PciE-gen1 1:PciE-gen2 2:PciE-gen3 3:PciE-gen4 - uint8_t PcieLaneCount[NUM_LINK_LEVELS]; ///< 1=x1, 2=x2, 3=x4, 4=x8, 5=x12, 6=x16 - uint16_t LclkFreq[NUM_LINK_LEVELS]; - - // SECTION: Fan Control - uint16_t FanStopTemp; //Celcius - uint16_t FanStartTemp; //Celcius - - uint16_t FanGain[TEMP_COUNT]; - - uint16_t FanPwmMin; - uint16_t FanAcousticLimitRpm; - uint16_t FanThrottlingRpm; - uint16_t FanMaximumRpm; - uint16_t MGpuFanBoostLimitRpm; - uint16_t FanTargetTemperature; - uint16_t FanTargetGfxclk; - uint16_t FanPadding16; - uint8_t FanTempInputSelect; - uint8_t FanPadding; - uint8_t FanZeroRpmEnable; - uint8_t FanTachEdgePerRev; - - // The following are AFC override parameters. Leave at 0 to use FW defaults. - int16_t FuzzyFan_ErrorSetDelta; - int16_t FuzzyFan_ErrorRateSetDelta; - int16_t FuzzyFan_PwmSetDelta; - uint16_t FuzzyFan_Reserved; - - // SECTION: AVFS - // Overrides - uint8_t OverrideAvfsGb[AVFS_VOLTAGE_COUNT]; - uint8_t dBtcGbGfxDfllModelSelect; //0 -> fused piece-wise model, 1 -> piece-wise linear(PPTable), 2 -> quadratic model(PPTable) - uint8_t Padding8_Avfs; - - QuadraticInt_t qAvfsGb[AVFS_VOLTAGE_COUNT]; // GHz->V Override of fused curve - DroopInt_t dBtcGbGfxPll; // GHz->V BtcGb - DroopInt_t dBtcGbGfxDfll; // GHz->V BtcGb - DroopInt_t dBtcGbSoc; // GHz->V BtcGb - LinearInt_t qAgingGb[AVFS_VOLTAGE_COUNT]; // GHz->V - - PiecewiseLinearDroopInt_t PiecewiseLinearDroopIntGfxDfll; //GHz ->Vstore in IEEE float format - - QuadraticInt_t qStaticVoltageOffset[AVFS_VOLTAGE_COUNT]; // GHz->V - - uint16_t DcTol[AVFS_VOLTAGE_COUNT]; // mV Q2 - - uint8_t DcBtcEnabled[AVFS_VOLTAGE_COUNT]; - uint8_t Padding8_GfxBtc[2]; - - uint16_t DcBtcMin[AVFS_VOLTAGE_COUNT]; // mV Q2 - uint16_t DcBtcMax[AVFS_VOLTAGE_COUNT]; // mV Q2 - - uint16_t DcBtcGb[AVFS_VOLTAGE_COUNT]; // mV Q2 - - // SECTION: XGMI - uint8_t XgmiDpmPstates[NUM_XGMI_LEVELS]; // 2 DPM states, high and low. 0-P0, 1-P1, 2-P2, 3-P3. - uint8_t XgmiDpmSpare[2]; - - // SECTION: Advanced Options - uint32_t DebugOverrides; - QuadraticInt_t ReservedEquation0; - QuadraticInt_t ReservedEquation1; - QuadraticInt_t ReservedEquation2; - QuadraticInt_t ReservedEquation3; - - // SECTION: Sku Reserved - uint8_t CustomerVariant; - - //VC BTC parameters are only applicable to VDD_GFX domain - uint8_t VcBtcEnabled; - uint16_t VcBtcVminT0; // T0_VMIN - uint16_t VcBtcFixedVminAgingOffset; // FIXED_VMIN_AGING_OFFSET - uint16_t VcBtcVmin2PsmDegrationGb; // VMIN_TO_PSM_DEGRADATION_GB - uint32_t VcBtcPsmA; // A_PSM - uint32_t VcBtcPsmB; // B_PSM - uint32_t VcBtcVminA; // A_VMIN - uint32_t VcBtcVminB; // B_VMIN - - //GPIO Board feature - uint16_t LedGpio; //GeneriA GPIO flag used to control the radeon LEDs - uint16_t GfxPowerStagesGpio; //Genlk_vsync GPIO flag used to control gfx power stages - - uint32_t SkuReserved[8]; - - - // MAJOR SECTION: BOARD PARAMETERS - - //SECTION: Gaming Clocks - uint32_t GamingClk[6]; - - // SECTION: I2C Control - I2cControllerConfig_t I2cControllers[NUM_I2C_CONTROLLERS]; - - uint8_t GpioScl; // GPIO Number for SCL Line, used only for CKSVII2C1 - uint8_t GpioSda; // GPIO Number for SDA Line, used only for CKSVII2C1 - uint8_t FchUsbPdSlaveAddr; //For requesting USB PD controller S-states via FCH I2C when entering PME turn off - uint8_t I2cSpare[1]; - - // SECTION: SVI2 Board Parameters - uint8_t VddGfxVrMapping; // Use VR_MAPPING* bitfields - uint8_t VddSocVrMapping; // Use VR_MAPPING* bitfields - uint8_t VddMem0VrMapping; // Use VR_MAPPING* bitfields - uint8_t VddMem1VrMapping; // Use VR_MAPPING* bitfields - - uint8_t GfxUlvPhaseSheddingMask; // set this to 1 to set PSI0/1 to 1 in ULV mode - uint8_t SocUlvPhaseSheddingMask; // set this to 1 to set PSI0/1 to 1 in ULV mode - uint8_t VddciUlvPhaseSheddingMask; // set this to 1 to set PSI0/1 to 1 in ULV mode - uint8_t MvddUlvPhaseSheddingMask; // set this to 1 to set PSI0/1 to 1 in ULV mode - - // SECTION: Telemetry Settings - uint16_t GfxMaxCurrent; // in Amps - int8_t GfxOffset; // in Amps - uint8_t Padding_TelemetryGfx; - - uint16_t SocMaxCurrent; // in Amps - int8_t SocOffset; // in Amps - uint8_t Padding_TelemetrySoc; - - uint16_t Mem0MaxCurrent; // in Amps - int8_t Mem0Offset; // in Amps - uint8_t Padding_TelemetryMem0; - - uint16_t Mem1MaxCurrent; // in Amps - int8_t Mem1Offset; // in Amps - uint8_t Padding_TelemetryMem1; - - uint32_t MvddRatio; // This is used for MVDD Svi2 Div Ratio workaround. It has 16 fractional bits (Q16.16) - - // SECTION: GPIO Settings - uint8_t AcDcGpio; // GPIO pin configured for AC/DC switching - uint8_t AcDcPolarity; // GPIO polarity for AC/DC switching - uint8_t VR0HotGpio; // GPIO pin configured for VR0 HOT event - uint8_t VR0HotPolarity; // GPIO polarity for VR0 HOT event - - uint8_t VR1HotGpio; // GPIO pin configured for VR1 HOT event - uint8_t VR1HotPolarity; // GPIO polarity for VR1 HOT event - uint8_t GthrGpio; // GPIO pin configured for GTHR Event - uint8_t GthrPolarity; // replace GPIO polarity for GTHR - - // LED Display Settings - uint8_t LedPin0; // GPIO number for LedPin[0] - uint8_t LedPin1; // GPIO number for LedPin[1] - uint8_t LedPin2; // GPIO number for LedPin[2] - uint8_t LedEnableMask; - - uint8_t LedPcie; // GPIO number for PCIE results - uint8_t LedError; // GPIO number for Error Cases - uint8_t LedSpare1[2]; - - // SECTION: Clock Spread Spectrum - - // GFXCLK PLL Spread Spectrum - uint8_t PllGfxclkSpreadEnabled; // on or off - uint8_t PllGfxclkSpreadPercent; // Q4.4 - uint16_t PllGfxclkSpreadFreq; // kHz - - // GFXCLK DFLL Spread Spectrum - uint8_t DfllGfxclkSpreadEnabled; // on or off - uint8_t DfllGfxclkSpreadPercent; // Q4.4 - uint16_t DfllGfxclkSpreadFreq; // kHz - - // UCLK Spread Spectrum - uint16_t UclkSpreadPadding; - uint16_t UclkSpreadFreq; // kHz - - // FCLK Spread Spectrum - uint8_t FclkSpreadEnabled; // on or off - uint8_t FclkSpreadPercent; // Q4.4 - uint16_t FclkSpreadFreq; // kHz - - // Section: Memory Config - uint32_t MemoryChannelEnabled; // For DRAM use only, Max 32 channels enabled bit mask. - - uint8_t DramBitWidth; // For DRAM use only. See Dram Bit width type defines - uint8_t PaddingMem1[3]; - - // Section: Total Board Power - uint16_t TotalBoardPower; //Only needed for TCP Estimated case, where TCP = TGP+Total Board Power - uint16_t BoardPowerPadding; - - // SECTION: XGMI Training - uint8_t XgmiLinkSpeed [NUM_XGMI_PSTATE_LEVELS]; - uint8_t XgmiLinkWidth [NUM_XGMI_PSTATE_LEVELS]; - - uint16_t XgmiFclkFreq [NUM_XGMI_PSTATE_LEVELS]; - uint16_t XgmiSocVoltage [NUM_XGMI_PSTATE_LEVELS]; - - // SECTION: UMC feature flags - uint8_t HsrEnabled; - uint8_t VddqOffEnabled; - uint8_t PaddingUmcFlags[2]; - - // UCLK Spread Spectrum - uint8_t UclkSpreadPercent[16]; - - // SECTION: Board Reserved - uint32_t BoardReserved[11]; - - // SECTION: Structure Padding - - // Padding for MMHUB - do not modify this - uint32_t MmHubPadding[8]; // SMU internal use - -} PPTable_t; - -typedef struct { - // MAJOR SECTION: SKU PARAMETERS - - uint32_t Version; - - // SECTION: Feature Enablement - uint32_t FeaturesToRun[NUM_FEATURES / 32]; - - // SECTION: Infrastructure Limits - uint16_t SocketPowerLimitAc[PPT_THROTTLER_COUNT]; // Watts - uint16_t SocketPowerLimitAcTau[PPT_THROTTLER_COUNT]; // Time constant of LPF in ms - uint16_t SocketPowerLimitDc[PPT_THROTTLER_COUNT]; // Watts - uint16_t SocketPowerLimitDcTau[PPT_THROTTLER_COUNT]; // Time constant of LPF in ms - - uint16_t TdcLimit[TDC_THROTTLER_COUNT]; // Amps - uint16_t TdcLimitTau[TDC_THROTTLER_COUNT]; // Time constant of LPF in ms - - uint16_t TemperatureLimit[TEMP_COUNT]; // Celcius - - uint32_t FitLimit; // Failures in time (failures per million parts over the defined lifetime) - - // SECTION: Power Configuration - uint8_t TotalPowerConfig; //0-TDP, 1-TGP, 2-TCP Estimated, 3-TCP Measured. Use defines from PwrConfig_e - uint8_t TotalPowerPadding[3]; - - // SECTION: APCC Settings - uint32_t ApccPlusResidencyLimit; - - //SECTION: SMNCLK DPM - uint16_t SmnclkDpmFreq [NUM_SMNCLK_DPM_LEVELS]; // in MHz - uint16_t SmnclkDpmVoltage [NUM_SMNCLK_DPM_LEVELS]; // mV(Q2) - - uint32_t PaddingAPCC; - uint16_t PerPartDroopVsetGfxDfll[NUM_PIECE_WISE_LINEAR_DROOP_MODEL_VF_POINTS]; //In mV(Q2) - uint16_t PaddingPerPartDroop; - - // SECTION: Throttler settings - uint32_t ThrottlerControlMask; // See Throtter masks defines - - // SECTION: FW DSTATE Settings - uint32_t FwDStateMask; // See FW DState masks defines - - // SECTION: ULV Settings - uint16_t UlvVoltageOffsetSoc; // In mV(Q2) - uint16_t UlvVoltageOffsetGfx; // In mV(Q2) - - uint16_t MinVoltageUlvGfx; // In mV(Q2) Minimum Voltage ("Vmin") of VDD_GFX in ULV mode - uint16_t MinVoltageUlvSoc; // In mV(Q2) Minimum Voltage ("Vmin") of VDD_SOC in ULV mode - - uint16_t SocLIVmin; - uint16_t SocLIVminoffset; - - uint8_t GceaLinkMgrIdleThreshold; //Set by SMU FW during enablment of GFXOFF. Controls delay for GFX SDP port disconnection during idle events - uint8_t paddingRlcUlvParams[3]; - - // SECTION: Voltage Control Parameters - uint16_t MinVoltageGfx; // In mV(Q2) Minimum Voltage ("Vmin") of VDD_GFX - uint16_t MinVoltageSoc; // In mV(Q2) Minimum Voltage ("Vmin") of VDD_SOC - uint16_t MaxVoltageGfx; // In mV(Q2) Maximum Voltage allowable of VDD_GFX - uint16_t MaxVoltageSoc; // In mV(Q2) Maximum Voltage allowable of VDD_SOC - - uint16_t LoadLineResistanceGfx; // In mOhms with 8 fractional bits - uint16_t LoadLineResistanceSoc; // In mOhms with 8 fractional bits - - // SECTION: Temperature Dependent Vmin - uint16_t VDDGFX_TVmin; //Celcius - uint16_t VDDSOC_TVmin; //Celcius - uint16_t VDDGFX_Vmin_HiTemp; // mV Q2 - uint16_t VDDGFX_Vmin_LoTemp; // mV Q2 - uint16_t VDDSOC_Vmin_HiTemp; // mV Q2 - uint16_t VDDSOC_Vmin_LoTemp; // mV Q2 - - uint16_t VDDGFX_TVminHystersis; // Celcius - uint16_t VDDSOC_TVminHystersis; // Celcius - - //SECTION: DPM Config 1 - DpmDescriptor_t DpmDescriptor[PPCLK_COUNT]; - - uint16_t FreqTableGfx [NUM_GFXCLK_DPM_LEVELS ]; // In MHz - uint16_t FreqTableVclk [NUM_VCLK_DPM_LEVELS ]; // In MHz - uint16_t FreqTableDclk [NUM_DCLK_DPM_LEVELS ]; // In MHz - uint16_t FreqTableSocclk [NUM_SOCCLK_DPM_LEVELS ]; // In MHz - uint16_t FreqTableUclk [NUM_UCLK_DPM_LEVELS ]; // In MHz - uint16_t FreqTableDcefclk [NUM_DCEFCLK_DPM_LEVELS ]; // In MHz - uint16_t FreqTableDispclk [NUM_DISPCLK_DPM_LEVELS ]; // In MHz - uint16_t FreqTablePixclk [NUM_PIXCLK_DPM_LEVELS ]; // In MHz - uint16_t FreqTablePhyclk [NUM_PHYCLK_DPM_LEVELS ]; // In MHz - uint16_t FreqTableDtbclk [NUM_DTBCLK_DPM_LEVELS ]; // In MHz - uint16_t FreqTableFclk [NUM_FCLK_DPM_LEVELS ]; // In MHz - uint32_t Paddingclks; - - DroopInt_t PerPartDroopModelGfxDfll[NUM_PIECE_WISE_LINEAR_DROOP_MODEL_VF_POINTS]; //GHz ->Vstore in IEEE float format - - uint32_t DcModeMaxFreq [PPCLK_COUNT ]; // In MHz - - uint8_t FreqTableUclkDiv [NUM_UCLK_DPM_LEVELS ]; // 0:Div-1, 1:Div-1/2, 2:Div-1/4, 3:Div-1/8 - - // Used for MALL performance boost - uint16_t FclkBoostFreq; // In Mhz - uint16_t FclkParamPadding; - - // SECTION: DPM Config 2 - uint16_t Mp0clkFreq [NUM_MP0CLK_DPM_LEVELS]; // in MHz - uint16_t Mp0DpmVoltage [NUM_MP0CLK_DPM_LEVELS]; // mV(Q2) - uint16_t MemVddciVoltage [NUM_UCLK_DPM_LEVELS]; // mV(Q2) - uint16_t MemMvddVoltage [NUM_UCLK_DPM_LEVELS]; // mV(Q2) - // GFXCLK DPM - uint16_t GfxclkFgfxoffEntry; // in Mhz - uint16_t GfxclkFinit; // in Mhz - uint16_t GfxclkFidle; // in MHz - uint8_t GfxclkSource; // 0 = PLL, 1 = DFLL - uint8_t GfxclkPadding; - - // GFX GPO - uint8_t GfxGpoSubFeatureMask; // bit 0 = PACE, bit 1 = DEM - uint8_t GfxGpoEnabledWorkPolicyMask; //Any policy that GPO can be enabled - uint8_t GfxGpoDisabledWorkPolicyMask; //Any policy that GPO can be disabled - uint8_t GfxGpoPadding[1]; - uint32_t GfxGpoVotingAllow; //For indicating which feature changes should result in a GPO table recalculation - - uint32_t GfxGpoPadding32[4]; - - uint16_t GfxDcsFopt; // Optimal GFXCLK for DCS in Mhz - uint16_t GfxDcsFclkFopt; // Optimal FCLK for DCS in Mhz - uint16_t GfxDcsUclkFopt; // Optimal UCLK for DCS in Mhz - - uint16_t DcsGfxOffVoltage; //Voltage in mV(Q2) applied to VDDGFX when entering DCS GFXOFF phase - - uint16_t DcsMinGfxOffTime; //Minimum amount of time PMFW shuts GFX OFF as part of GFX DCS phase - uint16_t DcsMaxGfxOffTime; //Maximum amount of time PMFW can shut GFX OFF as part of GFX DCS phase at a stretch. - - uint32_t DcsMinCreditAccum; //Min amount of positive credit accumulation before waking GFX up as part of DCS. - - uint16_t DcsExitHysteresis; //The min amount of time power credit accumulator should have a value > 0 before SMU exits the DCS throttling phase. - uint16_t DcsTimeout; //This is the amount of time SMU FW waits for RLC to put GFX into GFXOFF before reverting to the fallback mechanism of throttling GFXCLK to Fmin. - - uint32_t DcsParamPadding[5]; - - uint16_t FlopsPerByteTable[RLC_PACE_TABLE_NUM_LEVELS]; // Q8.8 - - // UCLK section - uint8_t LowestUclkReservedForUlv; // Set this to 1 if UCLK DPM0 is reserved for ULV-mode only - uint8_t PaddingMem[3]; - - uint8_t UclkDpmPstates [NUM_UCLK_DPM_LEVELS]; // 4 DPM states, 0-P0, 1-P1, 2-P2, 3-P3. - - // Used for 2-Step UCLK change workaround - UclkDpmChangeRange_t UclkDpmSrcFreqRange; // In Mhz - UclkDpmChangeRange_t UclkDpmTargFreqRange; // In Mhz - uint16_t UclkDpmMidstepFreq; // In Mhz - uint16_t UclkMidstepPadding; - - // Link DPM Settings - uint8_t PcieGenSpeed[NUM_LINK_LEVELS]; ///< 0:PciE-gen1 1:PciE-gen2 2:PciE-gen3 3:PciE-gen4 - uint8_t PcieLaneCount[NUM_LINK_LEVELS]; ///< 1=x1, 2=x2, 3=x4, 4=x8, 5=x12, 6=x16 - uint16_t LclkFreq[NUM_LINK_LEVELS]; - - // SECTION: Fan Control - uint16_t FanStopTemp; //Celcius - uint16_t FanStartTemp; //Celcius - - uint16_t FanGain[TEMP_COUNT]; - - uint16_t FanPwmMin; - uint16_t FanAcousticLimitRpm; - uint16_t FanThrottlingRpm; - uint16_t FanMaximumRpm; - uint16_t MGpuFanBoostLimitRpm; - uint16_t FanTargetTemperature; - uint16_t FanTargetGfxclk; - uint16_t FanPadding16; - uint8_t FanTempInputSelect; - uint8_t FanPadding; - uint8_t FanZeroRpmEnable; - uint8_t FanTachEdgePerRev; - - // The following are AFC override parameters. Leave at 0 to use FW defaults. - int16_t FuzzyFan_ErrorSetDelta; - int16_t FuzzyFan_ErrorRateSetDelta; - int16_t FuzzyFan_PwmSetDelta; - uint16_t FuzzyFan_Reserved; - - // SECTION: AVFS - // Overrides - uint8_t OverrideAvfsGb[AVFS_VOLTAGE_COUNT]; - uint8_t dBtcGbGfxDfllModelSelect; //0 -> fused piece-wise model, 1 -> piece-wise linear(PPTable), 2 -> quadratic model(PPTable) - uint8_t Padding8_Avfs; - - QuadraticInt_t qAvfsGb[AVFS_VOLTAGE_COUNT]; // GHz->V Override of fused curve - DroopInt_t dBtcGbGfxPll; // GHz->V BtcGb - DroopInt_t dBtcGbGfxDfll; // GHz->V BtcGb - DroopInt_t dBtcGbSoc; // GHz->V BtcGb - LinearInt_t qAgingGb[AVFS_VOLTAGE_COUNT]; // GHz->V - - PiecewiseLinearDroopInt_t PiecewiseLinearDroopIntGfxDfll; //GHz ->Vstore in IEEE float format - - QuadraticInt_t qStaticVoltageOffset[AVFS_VOLTAGE_COUNT]; // GHz->V - - uint16_t DcTol[AVFS_VOLTAGE_COUNT]; // mV Q2 - - uint8_t DcBtcEnabled[AVFS_VOLTAGE_COUNT]; - uint8_t Padding8_GfxBtc[2]; - - uint16_t DcBtcMin[AVFS_VOLTAGE_COUNT]; // mV Q2 - uint16_t DcBtcMax[AVFS_VOLTAGE_COUNT]; // mV Q2 - - uint16_t DcBtcGb[AVFS_VOLTAGE_COUNT]; // mV Q2 - - // SECTION: XGMI - uint8_t XgmiDpmPstates[NUM_XGMI_LEVELS]; // 2 DPM states, high and low. 0-P0, 1-P1, 2-P2, 3-P3. - uint8_t XgmiDpmSpare[2]; - - // SECTION: Advanced Options - uint32_t DebugOverrides; - QuadraticInt_t ReservedEquation0; - QuadraticInt_t ReservedEquation1; - QuadraticInt_t ReservedEquation2; - QuadraticInt_t ReservedEquation3; - - // SECTION: Sku Reserved - uint8_t CustomerVariant; - - //VC BTC parameters are only applicable to VDD_GFX domain - uint8_t VcBtcEnabled; - uint16_t VcBtcVminT0; // T0_VMIN - uint16_t VcBtcFixedVminAgingOffset; // FIXED_VMIN_AGING_OFFSET - uint16_t VcBtcVmin2PsmDegrationGb; // VMIN_TO_PSM_DEGRADATION_GB - uint32_t VcBtcPsmA; // A_PSM - uint32_t VcBtcPsmB; // B_PSM - uint32_t VcBtcVminA; // A_VMIN - uint32_t VcBtcVminB; // B_VMIN - - //GPIO Board feature - uint16_t LedGpio; //GeneriA GPIO flag used to control the radeon LEDs - uint16_t GfxPowerStagesGpio; //Genlk_vsync GPIO flag used to control gfx power stages - - uint32_t SkuReserved[63]; - - - - // MAJOR SECTION: BOARD PARAMETERS - - //SECTION: Gaming Clocks - uint32_t GamingClk[6]; - - // SECTION: I2C Control - I2cControllerConfig_t I2cControllers[NUM_I2C_CONTROLLERS]; - - uint8_t GpioScl; // GPIO Number for SCL Line, used only for CKSVII2C1 - uint8_t GpioSda; // GPIO Number for SDA Line, used only for CKSVII2C1 - uint8_t FchUsbPdSlaveAddr; //For requesting USB PD controller S-states via FCH I2C when entering PME turn off - uint8_t I2cSpare[1]; - - // SECTION: SVI2 Board Parameters - uint8_t VddGfxVrMapping; // Use VR_MAPPING* bitfields - uint8_t VddSocVrMapping; // Use VR_MAPPING* bitfields - uint8_t VddMem0VrMapping; // Use VR_MAPPING* bitfields - uint8_t VddMem1VrMapping; // Use VR_MAPPING* bitfields - - uint8_t GfxUlvPhaseSheddingMask; // set this to 1 to set PSI0/1 to 1 in ULV mode - uint8_t SocUlvPhaseSheddingMask; // set this to 1 to set PSI0/1 to 1 in ULV mode - uint8_t VddciUlvPhaseSheddingMask; // set this to 1 to set PSI0/1 to 1 in ULV mode - uint8_t MvddUlvPhaseSheddingMask; // set this to 1 to set PSI0/1 to 1 in ULV mode - - // SECTION: Telemetry Settings - uint16_t GfxMaxCurrent; // in Amps - int8_t GfxOffset; // in Amps - uint8_t Padding_TelemetryGfx; - - uint16_t SocMaxCurrent; // in Amps - int8_t SocOffset; // in Amps - uint8_t Padding_TelemetrySoc; - - uint16_t Mem0MaxCurrent; // in Amps - int8_t Mem0Offset; // in Amps - uint8_t Padding_TelemetryMem0; - - uint16_t Mem1MaxCurrent; // in Amps - int8_t Mem1Offset; // in Amps - uint8_t Padding_TelemetryMem1; - - uint32_t MvddRatio; // This is used for MVDD Svi2 Div Ratio workaround. It has 16 fractional bits (Q16.16) - - // SECTION: GPIO Settings - uint8_t AcDcGpio; // GPIO pin configured for AC/DC switching - uint8_t AcDcPolarity; // GPIO polarity for AC/DC switching - uint8_t VR0HotGpio; // GPIO pin configured for VR0 HOT event - uint8_t VR0HotPolarity; // GPIO polarity for VR0 HOT event - - uint8_t VR1HotGpio; // GPIO pin configured for VR1 HOT event - uint8_t VR1HotPolarity; // GPIO polarity for VR1 HOT event - uint8_t GthrGpio; // GPIO pin configured for GTHR Event - uint8_t GthrPolarity; // replace GPIO polarity for GTHR - - // LED Display Settings - uint8_t LedPin0; // GPIO number for LedPin[0] - uint8_t LedPin1; // GPIO number for LedPin[1] - uint8_t LedPin2; // GPIO number for LedPin[2] - uint8_t LedEnableMask; - - uint8_t LedPcie; // GPIO number for PCIE results - uint8_t LedError; // GPIO number for Error Cases - uint8_t LedSpare1[2]; - - // SECTION: Clock Spread Spectrum - - // GFXCLK PLL Spread Spectrum - uint8_t PllGfxclkSpreadEnabled; // on or off - uint8_t PllGfxclkSpreadPercent; // Q4.4 - uint16_t PllGfxclkSpreadFreq; // kHz - - // GFXCLK DFLL Spread Spectrum - uint8_t DfllGfxclkSpreadEnabled; // on or off - uint8_t DfllGfxclkSpreadPercent; // Q4.4 - uint16_t DfllGfxclkSpreadFreq; // kHz - - // UCLK Spread Spectrum - uint16_t UclkSpreadPadding; - uint16_t UclkSpreadFreq; // kHz - - // FCLK Spread Spectrum - uint8_t FclkSpreadEnabled; // on or off - uint8_t FclkSpreadPercent; // Q4.4 - uint16_t FclkSpreadFreq; // kHz - - // Section: Memory Config - uint32_t MemoryChannelEnabled; // For DRAM use only, Max 32 channels enabled bit mask. - - uint8_t DramBitWidth; // For DRAM use only. See Dram Bit width type defines - uint8_t PaddingMem1[3]; - - // Section: Total Board Power - uint16_t TotalBoardPower; //Only needed for TCP Estimated case, where TCP = TGP+Total Board Power - uint16_t BoardPowerPadding; - - // SECTION: XGMI Training - uint8_t XgmiLinkSpeed [NUM_XGMI_PSTATE_LEVELS]; - uint8_t XgmiLinkWidth [NUM_XGMI_PSTATE_LEVELS]; - - uint16_t XgmiFclkFreq [NUM_XGMI_PSTATE_LEVELS]; - uint16_t XgmiSocVoltage [NUM_XGMI_PSTATE_LEVELS]; - - // SECTION: UMC feature flags - uint8_t HsrEnabled; - uint8_t VddqOffEnabled; - uint8_t PaddingUmcFlags[2]; - - // UCLK Spread Spectrum - uint8_t UclkSpreadPercent[16]; - - // SECTION: Board Reserved - uint32_t BoardReserved[11]; - - // SECTION: Structure Padding - - // Padding for MMHUB - do not modify this - uint32_t MmHubPadding[8]; // SMU internal use - - -} PPTable_beige_goby_t; - -typedef struct { - // Time constant parameters for clock averages in ms - uint16_t GfxclkAverageLpfTau; - uint16_t FclkAverageLpfTau; - uint16_t UclkAverageLpfTau; - uint16_t GfxActivityLpfTau; - uint16_t UclkActivityLpfTau; - uint16_t SocketPowerLpfTau; - uint16_t VcnClkAverageLpfTau; - uint16_t padding16; -} DriverSmuConfig_t; - -typedef struct { - DriverSmuConfig_t DriverSmuConfig; - - uint32_t Spare[7]; - // Padding - ignore - uint32_t MmHubPadding[8]; // SMU internal use -} DriverSmuConfigExternal_t; - -typedef struct { - uint16_t GfxclkFmin; // MHz - uint16_t GfxclkFmax; // MHz - QuadraticInt_t CustomGfxVfCurve; // a: mV/MHz^2, b: mv/MHz, c: mV - uint16_t CustomCurveFmin; // MHz - uint16_t UclkFmin; // MHz - uint16_t UclkFmax; // MHz - int16_t OverDrivePct; // % - uint16_t FanMaximumRpm; - uint16_t FanMinimumPwm; - uint16_t FanAcousticLimitRpm; - uint16_t FanTargetTemperature; // Degree Celcius - uint8_t FanLinearPwmPoints[NUM_OD_FAN_MAX_POINTS]; - uint8_t FanLinearTempPoints[NUM_OD_FAN_MAX_POINTS]; - uint16_t MaxOpTemp; // Degree Celcius - int16_t VddGfxOffset; // in mV - uint8_t FanZeroRpmEnable; - uint8_t FanZeroRpmStopTemp; - uint8_t FanMode; - uint8_t Padding[1]; -} OverDriveTable_t; - -typedef struct { - OverDriveTable_t OverDriveTable; - uint32_t Spare[8]; - - uint32_t MmHubPadding[8]; // SMU internal use -} OverDriveTableExternal_t; - -typedef struct { - uint32_t CurrClock[PPCLK_COUNT]; - - uint16_t AverageGfxclkFrequencyPreDs; - uint16_t AverageGfxclkFrequencyPostDs; - uint16_t AverageFclkFrequencyPreDs; - uint16_t AverageFclkFrequencyPostDs; - uint16_t AverageUclkFrequencyPreDs ; - uint16_t AverageUclkFrequencyPostDs ; - - - uint16_t AverageGfxActivity ; - uint16_t AverageUclkActivity ; - uint8_t CurrSocVoltageOffset ; - uint8_t CurrGfxVoltageOffset ; - uint8_t CurrMemVidOffset ; - uint8_t Padding8 ; - uint16_t AverageSocketPower ; - uint16_t TemperatureEdge ; - uint16_t TemperatureHotspot ; - uint16_t TemperatureMem ; - uint16_t TemperatureVrGfx ; - uint16_t TemperatureVrMem0 ; - uint16_t TemperatureVrMem1 ; - uint16_t TemperatureVrSoc ; - uint16_t TemperatureLiquid0 ; - uint16_t TemperatureLiquid1 ; - uint16_t TemperaturePlx ; - uint16_t Padding16 ; - uint32_t ThrottlerStatus ; - - uint8_t LinkDpmLevel; - uint8_t CurrFanPwm; - uint16_t CurrFanSpeed; - - //BACO metrics, PMFW-1721 - //metrics for D3hot entry/exit and driver ARM msgs - uint8_t D3HotEntryCountPerMode[D3HOT_SEQUENCE_COUNT]; - uint8_t D3HotExitCountPerMode[D3HOT_SEQUENCE_COUNT]; - uint8_t ArmMsgReceivedCountPerMode[D3HOT_SEQUENCE_COUNT]; - - //PMFW-4362 - uint32_t EnergyAccumulator; - uint16_t AverageVclk0Frequency ; - uint16_t AverageDclk0Frequency ; - uint16_t AverageVclk1Frequency ; - uint16_t AverageDclk1Frequency ; - uint16_t VcnActivityPercentage ; //place holder, David N. to provide full sequence - uint8_t PcieRate ; - uint8_t PcieWidth ; - uint16_t AverageGfxclkFrequencyTarget; - uint16_t Padding16_2; - -} SmuMetrics_t; - -typedef struct { - uint32_t CurrClock[PPCLK_COUNT]; - - uint16_t AverageGfxclkFrequencyPreDs; - uint16_t AverageGfxclkFrequencyPostDs; - uint16_t AverageFclkFrequencyPreDs; - uint16_t AverageFclkFrequencyPostDs; - uint16_t AverageUclkFrequencyPreDs ; - uint16_t AverageUclkFrequencyPostDs ; - - - uint16_t AverageGfxActivity ; - uint16_t AverageUclkActivity ; - uint8_t CurrSocVoltageOffset ; - uint8_t CurrGfxVoltageOffset ; - uint8_t CurrMemVidOffset ; - uint8_t Padding8 ; - uint16_t AverageSocketPower ; - uint16_t TemperatureEdge ; - uint16_t TemperatureHotspot ; - uint16_t TemperatureMem ; - uint16_t TemperatureVrGfx ; - uint16_t TemperatureVrMem0 ; - uint16_t TemperatureVrMem1 ; - uint16_t TemperatureVrSoc ; - uint16_t TemperatureLiquid0 ; - uint16_t TemperatureLiquid1 ; - uint16_t TemperaturePlx ; - uint16_t Padding16 ; - uint32_t AccCnt ; - uint8_t ThrottlingPercentage[THROTTLER_COUNT]; - - - uint8_t LinkDpmLevel; - uint8_t CurrFanPwm; - uint16_t CurrFanSpeed; - - //BACO metrics, PMFW-1721 - //metrics for D3hot entry/exit and driver ARM msgs - uint8_t D3HotEntryCountPerMode[D3HOT_SEQUENCE_COUNT]; - uint8_t D3HotExitCountPerMode[D3HOT_SEQUENCE_COUNT]; - uint8_t ArmMsgReceivedCountPerMode[D3HOT_SEQUENCE_COUNT]; - - //PMFW-4362 - uint32_t EnergyAccumulator; - uint16_t AverageVclk0Frequency ; - uint16_t AverageDclk0Frequency ; - uint16_t AverageVclk1Frequency ; - uint16_t AverageDclk1Frequency ; - uint16_t VcnActivityPercentage ; //place holder, David N. to provide full sequence - uint8_t PcieRate ; - uint8_t PcieWidth ; - uint16_t AverageGfxclkFrequencyTarget; - uint16_t Padding16_2; - -} SmuMetrics_V2_t; - -typedef struct { - union { - SmuMetrics_t SmuMetrics; - SmuMetrics_V2_t SmuMetrics_V2; - }; - uint32_t Spare[1]; - - // Padding - ignore - uint32_t MmHubPadding[8]; // SMU internal use -} SmuMetricsExternal_t; - -typedef struct { - uint16_t MinClock; // This is either DCEFCLK or SOCCLK (in MHz) - uint16_t MaxClock; // This is either DCEFCLK or SOCCLK (in MHz) - uint16_t MinUclk; - uint16_t MaxUclk; - - uint8_t WmSetting; - uint8_t Flags; - uint8_t Padding[2]; - -} WatermarkRowGeneric_t; - -#define NUM_WM_RANGES 4 - -typedef enum { - WM_SOCCLK = 0, - WM_DCEFCLK, - WM_COUNT, -} WM_CLOCK_e; - -typedef enum { - WATERMARKS_CLOCK_RANGE = 0, - WATERMARKS_DUMMY_PSTATE, - WATERMARKS_MALL, - WATERMARKS_COUNT, -} WATERMARKS_FLAGS_e; - -typedef struct { - // Watermarks - WatermarkRowGeneric_t WatermarkRow[WM_COUNT][NUM_WM_RANGES]; -} Watermarks_t; - -typedef struct { - Watermarks_t Watermarks; - - uint32_t MmHubPadding[8]; // SMU internal use -} WatermarksExternal_t; - -typedef struct { - uint16_t avgPsmCount[67]; - uint16_t minPsmCount[67]; - float avgPsmVoltage[67]; - float minPsmVoltage[67]; -} AvfsDebugTable_t; - -typedef struct { - AvfsDebugTable_t AvfsDebugTable; - - uint32_t MmHubPadding[8]; // SMU internal use -} AvfsDebugTableExternal_t; - -typedef struct { - uint8_t AvfsVersion; - uint8_t Padding; - - uint8_t AvfsEn[AVFS_VOLTAGE_COUNT]; - - uint8_t OverrideVFT[AVFS_VOLTAGE_COUNT]; - uint8_t OverrideAvfsGb[AVFS_VOLTAGE_COUNT]; - - uint8_t OverrideTemperatures[AVFS_VOLTAGE_COUNT]; - uint8_t OverrideVInversion[AVFS_VOLTAGE_COUNT]; - uint8_t OverrideP2V[AVFS_VOLTAGE_COUNT]; - uint8_t OverrideP2VCharzFreq[AVFS_VOLTAGE_COUNT]; - - int32_t VFT0_m1[AVFS_VOLTAGE_COUNT]; // Q8.24 - int32_t VFT0_m2[AVFS_VOLTAGE_COUNT]; // Q12.12 - int32_t VFT0_b[AVFS_VOLTAGE_COUNT]; // Q32 - - int32_t VFT1_m1[AVFS_VOLTAGE_COUNT]; // Q8.16 - int32_t VFT1_m2[AVFS_VOLTAGE_COUNT]; // Q12.12 - int32_t VFT1_b[AVFS_VOLTAGE_COUNT]; // Q32 - - int32_t VFT2_m1[AVFS_VOLTAGE_COUNT]; // Q8.16 - int32_t VFT2_m2[AVFS_VOLTAGE_COUNT]; // Q12.12 - int32_t VFT2_b[AVFS_VOLTAGE_COUNT]; // Q32 - - int32_t AvfsGb0_m1[AVFS_VOLTAGE_COUNT]; // Q8.24 - int32_t AvfsGb0_m2[AVFS_VOLTAGE_COUNT]; // Q12.12 - int32_t AvfsGb0_b[AVFS_VOLTAGE_COUNT]; // Q32 - - int32_t AcBtcGb_m1[AVFS_VOLTAGE_COUNT]; // Q8.24 - int32_t AcBtcGb_m2[AVFS_VOLTAGE_COUNT]; // Q12.12 - int32_t AcBtcGb_b[AVFS_VOLTAGE_COUNT]; // Q32 - - uint32_t AvfsTempCold[AVFS_VOLTAGE_COUNT]; - uint32_t AvfsTempMid[AVFS_VOLTAGE_COUNT]; - uint32_t AvfsTempHot[AVFS_VOLTAGE_COUNT]; - - uint32_t VInversion[AVFS_VOLTAGE_COUNT]; // in mV with 2 fractional bits - - - int32_t P2V_m1[AVFS_VOLTAGE_COUNT]; // Q8.24 - int32_t P2V_m2[AVFS_VOLTAGE_COUNT]; // Q12.12 - int32_t P2V_b[AVFS_VOLTAGE_COUNT]; // Q32 - - uint32_t P2VCharzFreq[AVFS_VOLTAGE_COUNT]; // in 10KHz units - - uint32_t EnabledAvfsModules[3]; //Sienna_Cichlid - 67 AVFS modules -} AvfsFuseOverride_t; - -typedef struct { - AvfsFuseOverride_t AvfsFuseOverride; - - uint32_t MmHubPadding[8]; // SMU internal use -} AvfsFuseOverrideExternal_t; - -typedef struct { - uint8_t Gfx_ActiveHystLimit; - uint8_t Gfx_IdleHystLimit; - uint8_t Gfx_FPS; - uint8_t Gfx_MinActiveFreqType; - uint8_t Gfx_BoosterFreqType; - uint8_t Gfx_MinFreqStep; // Minimum delta between current and target frequeny in order for FW to change clock. - uint16_t Gfx_MinActiveFreq; // MHz - uint16_t Gfx_BoosterFreq; // MHz - uint16_t Gfx_PD_Data_time_constant; // Time constant of PD controller in ms - uint32_t Gfx_PD_Data_limit_a; // Q16 - uint32_t Gfx_PD_Data_limit_b; // Q16 - uint32_t Gfx_PD_Data_limit_c; // Q16 - uint32_t Gfx_PD_Data_error_coeff; // Q16 - uint32_t Gfx_PD_Data_error_rate_coeff; // Q16 - - uint8_t Fclk_ActiveHystLimit; - uint8_t Fclk_IdleHystLimit; - uint8_t Fclk_FPS; - uint8_t Fclk_MinActiveFreqType; - uint8_t Fclk_BoosterFreqType; - uint8_t Fclk_MinFreqStep; // Minimum delta between current and target frequeny in order for FW to change clock. - uint16_t Fclk_MinActiveFreq; // MHz - uint16_t Fclk_BoosterFreq; // MHz - uint16_t Fclk_PD_Data_time_constant; // Time constant of PD controller in ms - uint32_t Fclk_PD_Data_limit_a; // Q16 - uint32_t Fclk_PD_Data_limit_b; // Q16 - uint32_t Fclk_PD_Data_limit_c; // Q16 - uint32_t Fclk_PD_Data_error_coeff; // Q16 - uint32_t Fclk_PD_Data_error_rate_coeff; // Q16 - - uint8_t Mem_ActiveHystLimit; - uint8_t Mem_IdleHystLimit; - uint8_t Mem_FPS; - uint8_t Mem_MinActiveFreqType; - uint8_t Mem_BoosterFreqType; - uint8_t Mem_MinFreqStep; // Minimum delta between current and target frequeny in order for FW to change clock. - uint16_t Mem_MinActiveFreq; // MHz - uint16_t Mem_BoosterFreq; // MHz - uint16_t Mem_PD_Data_time_constant; // Time constant of PD controller in ms - uint32_t Mem_PD_Data_limit_a; // Q16 - uint32_t Mem_PD_Data_limit_b; // Q16 - uint32_t Mem_PD_Data_limit_c; // Q16 - uint32_t Mem_PD_Data_error_coeff; // Q16 - uint32_t Mem_PD_Data_error_rate_coeff; // Q16 - - uint32_t Mem_UpThreshold_Limit; // Q16 - uint8_t Mem_UpHystLimit; - uint8_t Mem_DownHystLimit; - uint16_t Mem_Fps; - -} DpmActivityMonitorCoeffInt_t; - - -typedef struct { - DpmActivityMonitorCoeffInt_t DpmActivityMonitorCoeffInt; - uint32_t MmHubPadding[8]; // SMU internal use -} DpmActivityMonitorCoeffIntExternal_t; - -// Workload bits -#define WORKLOAD_PPLIB_DEFAULT_BIT 0 -#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 1 -#define WORKLOAD_PPLIB_POWER_SAVING_BIT 2 -#define WORKLOAD_PPLIB_VIDEO_BIT 3 -#define WORKLOAD_PPLIB_VR_BIT 4 -#define WORKLOAD_PPLIB_COMPUTE_BIT 5 -#define WORKLOAD_PPLIB_CUSTOM_BIT 6 -#define WORKLOAD_PPLIB_W3D_BIT 7 -#define WORKLOAD_PPLIB_COUNT 8 - - -// These defines are used with the following messages: -// SMC_MSG_TransferTableDram2Smu -// SMC_MSG_TransferTableSmu2Dram - -// Table transfer status -#define TABLE_TRANSFER_OK 0x0 -#define TABLE_TRANSFER_FAILED 0xFF - -// Table types -#define TABLE_PPTABLE 0 -#define TABLE_WATERMARKS 1 -#define TABLE_AVFS_PSM_DEBUG 2 -#define TABLE_AVFS_FUSE_OVERRIDE 3 -#define TABLE_PMSTATUSLOG 4 -#define TABLE_SMU_METRICS 5 -#define TABLE_DRIVER_SMU_CONFIG 6 -#define TABLE_ACTIVITY_MONITOR_COEFF 7 -#define TABLE_OVERDRIVE 8 -#define TABLE_I2C_COMMANDS 9 -#define TABLE_PACE 10 -#define TABLE_COUNT 11 - -typedef struct { - float FlopsPerByteTable[RLC_PACE_TABLE_NUM_LEVELS]; -} RlcPaceFlopsPerByteOverride_t; - -typedef struct { - RlcPaceFlopsPerByteOverride_t RlcPaceFlopsPerByteOverride; - - uint32_t MmHubPadding[8]; // SMU internal use -} RlcPaceFlopsPerByteOverrideExternal_t; - -// These defines are used with the SMC_MSG_SetUclkFastSwitch message. -#define UCLK_SWITCH_SLOW 0 -#define UCLK_SWITCH_FAST 1 -#define UCLK_SWITCH_DUMMY 2 -#endif diff --git a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h b/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h deleted file mode 100644 index 8361ebd8d876..000000000000 --- a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h +++ /dev/null @@ -1,282 +0,0 @@ -/* - * Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#ifndef __SMU11_DRIVER_IF_VANGOGH_H__ -#define __SMU11_DRIVER_IF_VANGOGH_H__ - -// *** IMPORTANT *** -// SMU TEAM: Always increment the interface version if -// any structure is changed in this file -#define SMU13_DRIVER_IF_VERSION 3 - -typedef struct { - int32_t value; - uint32_t numFractionalBits; -} FloatInIntFormat_t; - -typedef enum { - DSPCLK_DCFCLK = 0, - DSPCLK_DISPCLK, - DSPCLK_PIXCLK, - DSPCLK_PHYCLK, - DSPCLK_COUNT, -} DSPCLK_e; - -typedef struct { - uint16_t Freq; // in MHz - uint16_t Vid; // min voltage in SVI2 VID -} DisplayClockTable_t; - -typedef struct { - uint16_t MinClock; // This is either DCFCLK or SOCCLK (in MHz) - uint16_t MaxClock; // This is either DCFCLK or SOCCLK (in MHz) - uint16_t MinMclk; - uint16_t MaxMclk; - - uint8_t WmSetting; - uint8_t WmType; // Used for normal pstate change or memory retraining - uint8_t Padding[2]; -} WatermarkRowGeneric_t; - -#define NUM_WM_RANGES 4 -#define WM_PSTATE_CHG 0 -#define WM_RETRAINING 1 - -typedef enum { - WM_SOCCLK = 0, - WM_DCFCLK, - WM_COUNT, -} WM_CLOCK_e; - -typedef struct { - // Watermarks - WatermarkRowGeneric_t WatermarkRow[WM_COUNT][NUM_WM_RANGES]; - - uint32_t MmHubPadding[7]; // SMU internal use -} Watermarks_t; - -typedef enum { - CUSTOM_DPM_SETTING_GFXCLK, - CUSTOM_DPM_SETTING_CCLK, - CUSTOM_DPM_SETTING_FCLK_CCX, - CUSTOM_DPM_SETTING_FCLK_GFX, - CUSTOM_DPM_SETTING_FCLK_STALLS, - CUSTOM_DPM_SETTING_LCLK, - CUSTOM_DPM_SETTING_COUNT, -} CUSTOM_DPM_SETTING_e; - -typedef struct { - uint8_t ActiveHystLimit; - uint8_t IdleHystLimit; - uint8_t FPS; - uint8_t MinActiveFreqType; - FloatInIntFormat_t MinActiveFreq; - FloatInIntFormat_t PD_Data_limit; - FloatInIntFormat_t PD_Data_time_constant; - FloatInIntFormat_t PD_Data_error_coeff; - FloatInIntFormat_t PD_Data_error_rate_coeff; -} DpmActivityMonitorCoeffExt_t; - -typedef struct { - DpmActivityMonitorCoeffExt_t DpmActivityMonitorCoeff[CUSTOM_DPM_SETTING_COUNT]; -} CustomDpmSettings_t; - -#define NUM_DCFCLK_DPM_LEVELS 7 -#define NUM_DISPCLK_DPM_LEVELS 7 -#define NUM_DPPCLK_DPM_LEVELS 7 -#define NUM_SOCCLK_DPM_LEVELS 7 -#define NUM_ISPICLK_DPM_LEVELS 7 -#define NUM_ISPXCLK_DPM_LEVELS 7 -#define NUM_VCN_DPM_LEVELS 5 -#define NUM_FCLK_DPM_LEVELS 4 -#define NUM_SOC_VOLTAGE_LEVELS 8 - -typedef struct { - uint32_t fclk; - uint32_t memclk; - uint32_t voltage; -} df_pstate_t; - -typedef struct { - uint32_t vclk; - uint32_t dclk; -} vcn_clk_t; - -//Freq in MHz -//Voltage in milli volts with 2 fractional bits - -typedef struct { - uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS]; - uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS]; - uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS]; - uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS]; - uint32_t IspiClocks[NUM_ISPICLK_DPM_LEVELS]; - uint32_t IspxClocks[NUM_ISPXCLK_DPM_LEVELS]; - vcn_clk_t VcnClocks[NUM_VCN_DPM_LEVELS]; - - uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS]; - - df_pstate_t DfPstateTable[NUM_FCLK_DPM_LEVELS]; - - uint32_t MinGfxClk; - uint32_t MaxGfxClk; - - uint8_t NumDfPstatesEnabled; - uint8_t NumDcfclkLevelsEnabled; - uint8_t NumDispClkLevelsEnabled; //applies to both dispclk and dppclk - uint8_t NumSocClkLevelsEnabled; - - uint8_t IspClkLevelsEnabled; //applies to both ispiclk and ispxclk - uint8_t VcnClkLevelsEnabled; //applies to both vclk/dclk - uint8_t spare[2]; -} DpmClocks_t; - - -// Throttler Status Bitmask -#define THROTTLER_STATUS_BIT_SPL 0 -#define THROTTLER_STATUS_BIT_FPPT 1 -#define THROTTLER_STATUS_BIT_SPPT 2 -#define THROTTLER_STATUS_BIT_SPPT_APU 3 -#define THROTTLER_STATUS_BIT_THM_CORE 4 -#define THROTTLER_STATUS_BIT_THM_GFX 5 -#define THROTTLER_STATUS_BIT_THM_SOC 6 -#define THROTTLER_STATUS_BIT_TDC_VDD 7 -#define THROTTLER_STATUS_BIT_TDC_SOC 8 -#define THROTTLER_STATUS_BIT_TDC_GFX 9 -#define THROTTLER_STATUS_BIT_TDC_CVIP 10 - -typedef struct { - uint16_t GfxclkFrequency; //[MHz] - uint16_t SocclkFrequency; //[MHz] - uint16_t VclkFrequency; //[MHz] - uint16_t DclkFrequency; //[MHz] - uint16_t MemclkFrequency; //[MHz] - uint16_t spare; - - uint16_t GfxActivity; //[centi] - uint16_t UvdActivity; //[centi] - - uint16_t Voltage[3]; //[mV] indices: VDDCR_VDD, VDDCR_SOC, VDDCR_GFX - uint16_t Current[3]; //[mA] indices: VDDCR_VDD, VDDCR_SOC, VDDCR_GFX - uint16_t Power[3]; //[mW] indices: VDDCR_VDD, VDDCR_SOC, VDDCR_GFX - uint16_t CurrentSocketPower; //[mW] - - //3rd party tools in Windows need info in the case of APUs - uint16_t CoreFrequency[8]; //[MHz] - uint16_t CorePower[8]; //[mW] - uint16_t CoreTemperature[8]; //[centi-Celsius] - uint16_t L3Frequency[2]; //[MHz] - uint16_t L3Temperature[2]; //[centi-Celsius] - - uint16_t GfxTemperature; //[centi-Celsius] - uint16_t SocTemperature; //[centi-Celsius] - uint16_t EdgeTemperature; - uint16_t ThrottlerStatus; -} SmuMetrics_legacy_t; - -typedef struct { - uint16_t GfxclkFrequency; //[MHz] - uint16_t SocclkFrequency; //[MHz] - uint16_t VclkFrequency; //[MHz] - uint16_t DclkFrequency; //[MHz] - uint16_t MemclkFrequency; //[MHz] - uint16_t spare; - - uint16_t GfxActivity; //[centi] - uint16_t UvdActivity; //[centi] - uint16_t C0Residency[4]; //percentage - - uint16_t Voltage[3]; //[mV] indices: VDDCR_VDD, VDDCR_SOC, VDDCR_GFX - uint16_t Current[3]; //[mA] indices: VDDCR_VDD, VDDCR_SOC, VDDCR_GFX - uint16_t Power[3]; //[mW] indices: VDDCR_VDD, VDDCR_SOC, VDDCR_GFX - uint16_t CurrentSocketPower; //[mW] - - //3rd party tools in Windows need info in the case of APUs - uint16_t CoreFrequency[4]; //[MHz] - uint16_t CorePower[4]; //[mW] - uint16_t CoreTemperature[4]; //[centi-Celsius] - uint16_t L3Frequency[1]; //[MHz] - uint16_t L3Temperature[1]; //[centi-Celsius] - - uint16_t GfxTemperature; //[centi-Celsius] - uint16_t SocTemperature; //[centi-Celsius] - uint16_t EdgeTemperature; - uint16_t ThrottlerStatus; -} SmuMetricsTable_t; - -typedef struct { - SmuMetricsTable_t Current; - SmuMetricsTable_t Average; - //uint32_t AccCnt; - uint32_t SampleStartTime; - uint32_t SampleStopTime; -} SmuMetrics_t; - - -// Workload bits -#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 0 -#define WORKLOAD_PPLIB_VIDEO_BIT 2 -#define WORKLOAD_PPLIB_VR_BIT 3 -#define WORKLOAD_PPLIB_COMPUTE_BIT 4 -#define WORKLOAD_PPLIB_CUSTOM_BIT 5 -#define WORKLOAD_PPLIB_COUNT 6 - -#define TABLE_BIOS_IF 0 // Called by BIOS -#define TABLE_WATERMARKS 1 // Called by DAL through VBIOS -#define TABLE_CUSTOM_DPM 2 // Called by Driver -#define TABLE_SPARE1 3 -#define TABLE_DPMCLOCKS 4 // Called by Driver -#define TABLE_SPARE2 5 // Called by Tools -#define TABLE_MODERN_STDBY 6 // Called by Tools for Modern Standby Log -#define TABLE_SMU_METRICS 7 // Called by Driver -#define TABLE_COUNT 8 - -//ISP tile definitions -typedef enum { - TILE_ISPX = 0, // ISPX - TILE_ISPM, // ISPM - TILE_ISPC, // ISPCORE - TILE_ISPPRE, // ISPPRE - TILE_ISPPOST, // ISPPOST - TILE_MAX -} TILE_NUM_e; - -// Tile Selection (Based on arguments) -#define TILE_SEL_ISPX (1<<(TILE_ISPX)) -#define TILE_SEL_ISPM (1<<(TILE_ISPM)) -#define TILE_SEL_ISPC (1<<(TILE_ISPC)) -#define TILE_SEL_ISPPRE (1<<(TILE_ISPPRE)) -#define TILE_SEL_ISPPOST (1<<(TILE_ISPPOST)) - -// Mask for ISP tiles in PGFSM PWR Status Registers -//Bit[1:0] maps to ISPX, (ISPX) -//Bit[3:2] maps to ISPM, (ISPM) -//Bit[5:4] maps to ISPCORE, (ISPCORE) -//Bit[7:6] maps to ISPPRE, (ISPPRE) -//Bit[9:8] maps to POST, (ISPPOST -#define TILE_ISPX_MASK ((1<<0) | (1<<1)) -#define TILE_ISPM_MASK ((1<<2) | (1<<3)) -#define TILE_ISPC_MASK ((1<<4) | (1<<5)) -#define TILE_ISPPRE_MASK ((1<<6) | (1<<7)) -#define TILE_ISPPOST_MASK ((1<<8) | (1<<9)) - -#endif diff --git a/drivers/gpu/drm/amd/pm/inc/smu12_driver_if.h b/drivers/gpu/drm/amd/pm/inc/smu12_driver_if.h deleted file mode 100644 index e9315eb5b48e..000000000000 --- a/drivers/gpu/drm/amd/pm/inc/smu12_driver_if.h +++ /dev/null @@ -1,232 +0,0 @@ -/* - * Copyright 2019 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef SMU12_DRIVER_IF_H -#define SMU12_DRIVER_IF_H - -// *** IMPORTANT *** -// SMU TEAM: Always increment the interface version if -// any structure is changed in this file -#define SMU12_DRIVER_IF_VERSION 14 - -typedef struct { - int32_t value; - uint32_t numFractionalBits; -} FloatInIntFormat_t; - -typedef enum { - DSPCLK_DCFCLK = 0, - DSPCLK_DISPCLK, - DSPCLK_PIXCLK, - DSPCLK_PHYCLK, - DSPCLK_COUNT, -} DSPCLK_e; - -typedef struct { - uint16_t Freq; // in MHz - uint16_t Vid; // min voltage in SVI2 VID -} DisplayClockTable_t; - -typedef struct { - uint16_t MinClock; // This is either DCFCLK or SOCCLK (in MHz) - uint16_t MaxClock; // This is either DCFCLK or SOCCLK (in MHz) - uint16_t MinMclk; - uint16_t MaxMclk; - - uint8_t WmSetting; - uint8_t WmType; // Used for normal pstate change or memory retraining - uint8_t Padding[2]; -} WatermarkRowGeneric_t; - -#define NUM_WM_RANGES 4 -#define WM_PSTATE_CHG 0 -#define WM_RETRAINING 1 - -typedef enum { - WM_SOCCLK = 0, - WM_DCFCLK, - WM_COUNT, -} WM_CLOCK_e; - -typedef struct { - // Watermarks - WatermarkRowGeneric_t WatermarkRow[WM_COUNT][NUM_WM_RANGES]; - - uint32_t MmHubPadding[7]; // SMU internal use -} Watermarks_t; - -typedef enum { - CUSTOM_DPM_SETTING_GFXCLK, - CUSTOM_DPM_SETTING_CCLK, - CUSTOM_DPM_SETTING_FCLK_CCX, - CUSTOM_DPM_SETTING_FCLK_GFX, - CUSTOM_DPM_SETTING_FCLK_STALLS, - CUSTOM_DPM_SETTING_LCLK, - CUSTOM_DPM_SETTING_COUNT, -} CUSTOM_DPM_SETTING_e; - -typedef struct { - uint8_t ActiveHystLimit; - uint8_t IdleHystLimit; - uint8_t FPS; - uint8_t MinActiveFreqType; - FloatInIntFormat_t MinActiveFreq; - FloatInIntFormat_t PD_Data_limit; - FloatInIntFormat_t PD_Data_time_constant; - FloatInIntFormat_t PD_Data_error_coeff; - FloatInIntFormat_t PD_Data_error_rate_coeff; -} DpmActivityMonitorCoeffExt_t; - -typedef struct { - DpmActivityMonitorCoeffExt_t DpmActivityMonitorCoeff[CUSTOM_DPM_SETTING_COUNT]; -} CustomDpmSettings_t; - - -#define NUM_DCFCLK_DPM_LEVELS 8 -#define NUM_SOCCLK_DPM_LEVELS 8 -#define NUM_FCLK_DPM_LEVELS 4 -#define NUM_MEMCLK_DPM_LEVELS 4 -#define NUM_VCN_DPM_LEVELS 8 - -typedef struct { - uint32_t Freq; // In MHz - uint32_t Vol; // Millivolts with 2 fractional bits -} DpmClock_t; - -typedef struct { - DpmClock_t DcfClocks[NUM_DCFCLK_DPM_LEVELS]; - DpmClock_t SocClocks[NUM_SOCCLK_DPM_LEVELS]; - DpmClock_t FClocks[NUM_FCLK_DPM_LEVELS]; - DpmClock_t MemClocks[NUM_MEMCLK_DPM_LEVELS]; - DpmClock_t VClocks[NUM_VCN_DPM_LEVELS]; - DpmClock_t DClocks[NUM_VCN_DPM_LEVELS]; - - uint8_t NumDcfClkDpmEnabled; - uint8_t NumSocClkDpmEnabled; - uint8_t NumFClkDpmEnabled; - uint8_t NumMemClkDpmEnabled; - uint8_t NumVClkDpmEnabled; - uint8_t NumDClkDpmEnabled; - uint8_t spare[2]; -} DpmClocks_t; - - -typedef enum { - CLOCK_SMNCLK = 0, - CLOCK_SOCCLK, - CLOCK_MP0CLK, - CLOCK_MP1CLK, - CLOCK_MP2CLK, - CLOCK_VCLK, - CLOCK_LCLK, - CLOCK_DCLK, - CLOCK_ACLK, - CLOCK_ISPCLK, - CLOCK_SHUBCLK, - CLOCK_DISPCLK, - CLOCK_DPPCLK, - CLOCK_DPREFCLK, - CLOCK_DCFCLK, - CLOCK_FCLK, - CLOCK_UMCCLK, - CLOCK_GFXCLK, - CLOCK_COUNT, -} CLOCK_IDs_e; - -// Throttler Status Bitmask -#define THROTTLER_STATUS_BIT_SPL 0 -#define THROTTLER_STATUS_BIT_FPPT 1 -#define THROTTLER_STATUS_BIT_SPPT 2 -#define THROTTLER_STATUS_BIT_SPPT_APU 3 -#define THROTTLER_STATUS_BIT_THM_CORE 4 -#define THROTTLER_STATUS_BIT_THM_GFX 5 -#define THROTTLER_STATUS_BIT_THM_SOC 6 -#define THROTTLER_STATUS_BIT_TDC_VDD 7 -#define THROTTLER_STATUS_BIT_TDC_SOC 8 -#define THROTTLER_STATUS_BIT_PROCHOT_CPU 9 -#define THROTTLER_STATUS_BIT_PROCHOT_GFX 10 -#define THROTTLER_STATUS_BIT_EDC_CPU 11 -#define THROTTLER_STATUS_BIT_EDC_GFX 12 - -typedef struct { - uint16_t ClockFrequency[CLOCK_COUNT]; //[MHz] - - uint16_t AverageGfxclkFrequency; //[MHz] - uint16_t AverageSocclkFrequency; //[MHz] - uint16_t AverageVclkFrequency; //[MHz] - uint16_t AverageFclkFrequency; //[MHz] - - uint16_t AverageGfxActivity; //[centi] - uint16_t AverageUvdActivity; //[centi] - - uint16_t Voltage[2]; //[mV] indices: VDDCR_VDD, VDDCR_SOC - uint16_t Current[2]; //[mA] indices: VDDCR_VDD, VDDCR_SOC - uint16_t Power[2]; //[mW] indices: VDDCR_VDD, VDDCR_SOC - - uint16_t FanPwm; //[milli] - uint16_t CurrentSocketPower; //[W] - - uint16_t CoreFrequency[8]; //[MHz] - uint16_t CorePower[8]; //[mW] - uint16_t CoreTemperature[8]; //[centi-Celsius] - uint16_t L3Frequency[2]; //[MHz] - uint16_t L3Temperature[2]; //[centi-Celsius] - - uint16_t GfxTemperature; //[centi-Celsius] - uint16_t SocTemperature; //[centi-Celsius] - uint16_t ThrottlerStatus; - uint16_t spare; - - uint16_t StapmOriginalLimit; //[W] - uint16_t StapmCurrentLimit; //[W] - uint16_t ApuPower; //[W] - uint16_t dGpuPower; //[W] - - uint16_t VddTdcValue; //[mA] - uint16_t SocTdcValue; //[mA] - uint16_t VddEdcValue; //[mA] - uint16_t SocEdcValue; //[mA] - uint16_t reserve[2]; -} SmuMetrics_t; - - -// Workload bits -#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 0 -#define WORKLOAD_PPLIB_VIDEO_BIT 2 -#define WORKLOAD_PPLIB_VR_BIT 3 -#define WORKLOAD_PPLIB_COMPUTE_BIT 4 -#define WORKLOAD_PPLIB_CUSTOM_BIT 5 -#define WORKLOAD_PPLIB_COUNT 6 - -#define TABLE_BIOS_IF 0 // Called by BIOS -#define TABLE_WATERMARKS 1 // Called by Driver -#define TABLE_CUSTOM_DPM 2 // Called by Driver -#define TABLE_SPARE1 3 -#define TABLE_DPMCLOCKS 4 // Called by Driver -#define TABLE_MOMENTARY_PM 5 // Called by Tools -#define TABLE_MODERN_STDBY 6 // Called by Tools for Modern Standby Log -#define TABLE_SMU_METRICS 7 // Called by Driver -#define TABLE_COUNT 8 - - -#endif diff --git a/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h b/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h deleted file mode 100644 index 0f67c56c2863..000000000000 --- a/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h +++ /dev/null @@ -1,538 +0,0 @@ -/* - * Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef SMU13_DRIVER_IF_ALDEBARAN_H -#define SMU13_DRIVER_IF_ALDEBARAN_H - -#define NUM_VCLK_DPM_LEVELS 8 -#define NUM_DCLK_DPM_LEVELS 8 -#define NUM_SOCCLK_DPM_LEVELS 8 -#define NUM_LCLK_DPM_LEVELS 8 -#define NUM_UCLK_DPM_LEVELS 4 -#define NUM_FCLK_DPM_LEVELS 8 -#define NUM_XGMI_DPM_LEVELS 4 - -// Feature Control Defines -#define FEATURE_DATA_CALCULATIONS 0 -#define FEATURE_DPM_GFXCLK_BIT 1 -#define FEATURE_DPM_UCLK_BIT 2 -#define FEATURE_DPM_SOCCLK_BIT 3 -#define FEATURE_DPM_FCLK_BIT 4 -#define FEATURE_DPM_LCLK_BIT 5 -#define FEATURE_DPM_XGMI_BIT 6 -#define FEATURE_DS_GFXCLK_BIT 7 -#define FEATURE_DS_SOCCLK_BIT 8 -#define FEATURE_DS_LCLK_BIT 9 -#define FEATURE_DS_FCLK_BIT 10 -#define FEATURE_DS_UCLK_BIT 11 -#define FEATURE_GFX_SS_BIT 12 -#define FEATURE_DPM_VCN_BIT 13 -#define FEATURE_RSMU_SMN_CG_BIT 14 -#define FEATURE_WAFL_CG_BIT 15 -#define FEATURE_PPT_BIT 16 -#define FEATURE_TDC_BIT 17 -#define FEATURE_APCC_PLUS_BIT 18 -#define FEATURE_APCC_DFLL_BIT 19 -#define FEATURE_FW_CTF_BIT 20 -#define FEATURE_THERMAL_BIT 21 -#define FEATURE_OUT_OF_BAND_MONITOR_BIT 22 -#define FEATURE_SPARE_23_BIT 23 -#define FEATURE_XGMI_PER_LINK_PWR_DWN 24 -#define FEATURE_DF_CSTATE 25 -#define FEATURE_FUSE_CG_BIT 26 -#define FEATURE_MP1_CG_BIT 27 -#define FEATURE_SMUIO_CG_BIT 28 -#define FEATURE_THM_CG_BIT 29 -#define FEATURE_CLK_CG_BIT 30 -#define FEATURE_EDC_BIT 31 -#define FEATURE_SPARE_32_BIT 32 -#define FEATURE_SPARE_33_BIT 33 -#define FEATURE_SPARE_34_BIT 34 -#define FEATURE_SPARE_35_BIT 35 -#define FEATURE_SPARE_36_BIT 36 -#define FEATURE_SPARE_37_BIT 37 -#define FEATURE_SPARE_38_BIT 38 -#define FEATURE_SPARE_39_BIT 39 -#define FEATURE_SPARE_40_BIT 40 -#define FEATURE_SPARE_41_BIT 41 -#define FEATURE_SPARE_42_BIT 42 -#define FEATURE_SPARE_43_BIT 43 -#define FEATURE_SPARE_44_BIT 44 -#define FEATURE_SPARE_45_BIT 45 -#define FEATURE_SPARE_46_BIT 46 -#define FEATURE_SPARE_47_BIT 47 -#define FEATURE_SPARE_48_BIT 48 -#define FEATURE_SPARE_49_BIT 49 -#define FEATURE_SPARE_50_BIT 50 -#define FEATURE_SPARE_51_BIT 51 -#define FEATURE_SPARE_52_BIT 52 -#define FEATURE_SPARE_53_BIT 53 -#define FEATURE_SPARE_54_BIT 54 -#define FEATURE_SPARE_55_BIT 55 -#define FEATURE_SPARE_56_BIT 56 -#define FEATURE_SPARE_57_BIT 57 -#define FEATURE_SPARE_58_BIT 58 -#define FEATURE_SPARE_59_BIT 59 -#define FEATURE_SPARE_60_BIT 60 -#define FEATURE_SPARE_61_BIT 61 -#define FEATURE_SPARE_62_BIT 62 -#define FEATURE_SPARE_63_BIT 63 - -#define NUM_FEATURES 64 - -// I2C Config Bit Defines -#define I2C_CONTROLLER_ENABLED 1 -#define I2C_CONTROLLER_DISABLED 0 - -// Throttler Status Bits. -// These are aligned with the out of band monitor alarm bits for common throttlers -#define THROTTLER_PPT0_BIT 0 -#define THROTTLER_PPT1_BIT 1 -#define THROTTLER_TDC_GFX_BIT 2 -#define THROTTLER_TDC_SOC_BIT 3 -#define THROTTLER_TDC_HBM_BIT 4 -#define THROTTLER_SPARE_5 5 -#define THROTTLER_TEMP_GPU_BIT 6 -#define THROTTLER_TEMP_MEM_BIT 7 -#define THORTTLER_SPARE_8 8 -#define THORTTLER_SPARE_9 9 -#define THORTTLER_SPARE_10 10 -#define THROTTLER_TEMP_VR_GFX_BIT 11 -#define THROTTLER_TEMP_VR_SOC_BIT 12 -#define THROTTLER_TEMP_VR_MEM_BIT 13 -#define THORTTLER_SPARE_14 14 -#define THORTTLER_SPARE_15 15 -#define THORTTLER_SPARE_16 16 -#define THORTTLER_SPARE_17 17 -#define THORTTLER_SPARE_18 18 -#define THROTTLER_APCC_BIT 19 - -// Table transfer status -#define TABLE_TRANSFER_OK 0x0 -#define TABLE_TRANSFER_FAILED 0xFF -#define TABLE_TRANSFER_PENDING 0xAB - -//I2C Interface -#define NUM_I2C_CONTROLLERS 8 - -#define I2C_CONTROLLER_ENABLED 1 -#define I2C_CONTROLLER_DISABLED 0 - -#define MAX_SW_I2C_COMMANDS 24 - -#define ALDEBARAN_UMC_CHANNEL_NUM 32 - -typedef enum { - I2C_CONTROLLER_PORT_0, //CKSVII2C0 - I2C_CONTROLLER_PORT_1, //CKSVII2C1 - I2C_CONTROLLER_PORT_COUNT, -} I2cControllerPort_e; - -typedef enum { - I2C_CONTROLLER_THROTTLER_TYPE_NONE, - I2C_CONTROLLER_THROTTLER_VR_GFX0, - I2C_CONTROLLER_THROTTLER_VR_GFX1, - I2C_CONTROLLER_THROTTLER_VR_SOC, - I2C_CONTROLLER_THROTTLER_VR_MEM, - I2C_CONTROLLER_THROTTLER_COUNT, -} I2cControllerThrottler_e; - -typedef enum { - I2C_CONTROLLER_PROTOCOL_VR_MP2855, - I2C_CONTROLLER_PROTOCOL_COUNT, -} I2cControllerProtocol_e; - -typedef struct { - uint8_t Enabled; - uint8_t Speed; - uint8_t SlaveAddress; - uint8_t ControllerPort; - uint8_t ThermalThrotter; - uint8_t I2cProtocol; - uint8_t PaddingConfig[2]; -} I2cControllerConfig_t; - -typedef enum { - I2C_PORT_SVD_SCL, - I2C_PORT_GPIO, -} I2cPort_e; - -typedef enum { - I2C_SPEED_FAST_50K, //50 Kbits/s - I2C_SPEED_FAST_100K, //100 Kbits/s - I2C_SPEED_FAST_400K, //400 Kbits/s - I2C_SPEED_FAST_PLUS_1M, //1 Mbits/s (in fast mode) - I2C_SPEED_HIGH_1M, //1 Mbits/s (in high speed mode) - I2C_SPEED_HIGH_2M, //2.3 Mbits/s - I2C_SPEED_COUNT, -} I2cSpeed_e; - -typedef enum { - I2C_CMD_READ, - I2C_CMD_WRITE, - I2C_CMD_COUNT, -} I2cCmdType_e; - -#define CMDCONFIG_STOP_BIT 0 -#define CMDCONFIG_RESTART_BIT 1 -#define CMDCONFIG_READWRITE_BIT 2 //bit should be 0 for read, 1 for write - -#define CMDCONFIG_STOP_MASK (1 << CMDCONFIG_STOP_BIT) -#define CMDCONFIG_RESTART_MASK (1 << CMDCONFIG_RESTART_BIT) -#define CMDCONFIG_READWRITE_MASK (1 << CMDCONFIG_READWRITE_BIT) - -typedef struct { - uint8_t ReadWriteData; //Return data for read. Data to send for write - uint8_t CmdConfig; //Includes whether associated command should have a stop or restart command, and is a read or write -} SwI2cCmd_t; //SW I2C Command Table - -typedef struct { - uint8_t I2CcontrollerPort; //CKSVII2C0(0) or //CKSVII2C1(1) - uint8_t I2CSpeed; //Use I2cSpeed_e to indicate speed to select - uint8_t SlaveAddress; //Slave address of device - uint8_t NumCmds; //Number of commands - SwI2cCmd_t SwI2cCmds[MAX_SW_I2C_COMMANDS]; -} SwI2cRequest_t; // SW I2C Request Table - -typedef struct { - SwI2cRequest_t SwI2cRequest; - uint32_t Spare[8]; - uint32_t MmHubPadding[8]; // SMU internal use -} SwI2cRequestExternal_t; - -typedef struct { - uint32_t a; // store in IEEE float format in this variable - uint32_t b; // store in IEEE float format in this variable - uint32_t c; // store in IEEE float format in this variable -} QuadraticInt_t; - -typedef struct { - uint32_t m; // store in IEEE float format in this variable - uint32_t b; // store in IEEE float format in this variable -} LinearInt_t; - -typedef enum { - GFXCLK_SOURCE_PLL, - GFXCLK_SOURCE_DFLL, - GFXCLK_SOURCE_COUNT, -} GfxclkSrc_e; - -typedef enum { - PPCLK_GFXCLK, - PPCLK_VCLK, - PPCLK_DCLK, - PPCLK_SOCCLK, - PPCLK_UCLK, - PPCLK_FCLK, - PPCLK_LCLK, - PPCLK_COUNT, -} PPCLK_e; - -typedef enum { - GPIO_INT_POLARITY_ACTIVE_LOW, - GPIO_INT_POLARITY_ACTIVE_HIGH, -} GpioIntPolarity_e; - -//PPSMC_MSG_SetUclkDpmMode -typedef enum { - UCLK_DPM_MODE_BANDWIDTH, - UCLK_DPM_MODE_LATENCY, -} UCLK_DPM_MODE_e; - -typedef struct { - uint8_t StartupLevel; - uint8_t NumDiscreteLevels; // Set to 2 (Fmin, Fmax) when using fine grained DPM, otherwise set to # discrete levels used - uint16_t SsFmin; // Fmin for SS curve. If SS curve is selected, will use V@SSFmin for F <= Fmin - LinearInt_t ConversionToAvfsClk; // Transfer function to AVFS Clock (GHz->GHz) - QuadraticInt_t SsCurve; // Slow-slow curve (GHz->V) -} DpmDescriptor_t; - -typedef struct { - uint32_t Version; - - // SECTION: Feature Enablement - uint32_t FeaturesToRun[2]; - - // SECTION: Infrastructure Limits - uint16_t PptLimit; // Watts - uint16_t TdcLimitGfx; // Amps - uint16_t TdcLimitSoc; // Amps - uint16_t TdcLimitHbm; // Amps - uint16_t ThotspotLimit; // Celcius - uint16_t TmemLimit; // Celcius - uint16_t Tvr_gfxLimit; // Celcius - uint16_t Tvr_memLimit; // Celcius - uint16_t Tvr_socLimit; // Celcius - uint16_t PaddingLimit; - - // SECTION: Voltage Control Parameters - uint16_t MaxVoltageGfx; // In mV(Q2) Maximum Voltage allowable of VDD_GFX - uint16_t MaxVoltageSoc; // In mV(Q2) Maximum Voltage allowable of VDD_SOC - - //SECTION: DPM Config 1 - DpmDescriptor_t DpmDescriptor[PPCLK_COUNT]; - - uint8_t DidTableVclk[NUM_VCLK_DPM_LEVELS]; //PPCLK_VCLK - uint8_t DidTableDclk[NUM_DCLK_DPM_LEVELS]; //PPCLK_DCLK - uint8_t DidTableSocclk[NUM_SOCCLK_DPM_LEVELS]; //PPCLK_SOCCLK - uint8_t DidTableLclk[NUM_LCLK_DPM_LEVELS]; //PPCLK_LCLK - uint32_t FidTableFclk[NUM_FCLK_DPM_LEVELS]; //PPCLK_FCLK - uint8_t DidTableFclk[NUM_FCLK_DPM_LEVELS]; //PPCLK_FCLK - uint32_t FidTableUclk[NUM_UCLK_DPM_LEVELS]; //PPCLK_UCLK - uint8_t DidTableUclk[NUM_UCLK_DPM_LEVELS]; //PPCLK_UCLK - - uint32_t StartupFidPll0; //GFXAVFSCLK, SOCCLK, MP0CLK, MPIOCLK, DXIOCLK - uint32_t StartupFidPll4; //VCLK, DCLK, WAFLCLK - uint32_t StartupFidPll5; //SMNCLK, MP1CLK, LCLK - - uint8_t StartupSmnclkDid; - uint8_t StartupMp0clkDid; - uint8_t StartupMp1clkDid; - uint8_t StartupWaflclkDid; - uint8_t StartupGfxavfsclkDid; - uint8_t StartupMpioclkDid; - uint8_t StartupDxioclkDid; - uint8_t spare123; - - uint8_t StartupVidGpu0Svi0Plane0; //VDDCR_GFX0 - uint8_t StartupVidGpu0Svi0Plane1; //VDDCR_SOC - uint8_t StartupVidGpu0Svi1Plane0; //VDDCR_HBM - uint8_t StartupVidGpu0Svi1Plane1; //UNUSED [0 = plane is not used and should not be programmed] - - uint8_t StartupVidGpu1Svi0Plane0; //VDDCR_GFX1 - uint8_t StartupVidGpu1Svi0Plane1; //UNUSED [0 = plane is not used and should not be programmed] - uint8_t StartupVidGpu1Svi1Plane0; //UNUSED [0 = plane is not used and should not be programmed] - uint8_t StartupVidGpu1Svi1Plane1; //UNUSED [0 = plane is not used and should not be programmed] - - // GFXCLK DPM - uint16_t GfxclkFmax; // In MHz - uint16_t GfxclkFmin; // In MHz - uint16_t GfxclkFidle; // In MHz - uint16_t GfxclkFinit; // In MHz - uint8_t GfxclkSource; // GfxclkSrc_e [0 = PLL, 1 = DFLL] - uint8_t spare1[2]; - uint8_t StartupGfxclkDid; - uint32_t StartupGfxclkFid; - - // SECTION: AVFS - uint16_t GFX_Guardband_Freq[8]; // MHz [unsigned] - int16_t GFX_Guardband_Voltage_Cold[8]; // mV [signed] - int16_t GFX_Guardband_Voltage_Mid[8]; // mV [signed] - int16_t GFX_Guardband_Voltage_Hot[8]; // mV [signed] - - uint16_t SOC_Guardband_Freq[8]; // MHz [unsigned] - int16_t SOC_Guardband_Voltage_Cold[8]; // mV [signed] - int16_t SOC_Guardband_Voltage_Mid[8]; // mV [signed] - int16_t SOC_Guardband_Voltage_Hot[8]; // mV [signed] - - // VDDCR_GFX BTC - uint16_t DcBtcEnabled; - int16_t DcBtcMin; // mV [signed] - int16_t DcBtcMax; // mV [signed] - int16_t DcBtcGb; // mV [signed] - - // SECTION: XGMI - uint8_t XgmiLinkSpeed[NUM_XGMI_DPM_LEVELS]; //Gbps [EX: 32 = 32Gbps] - uint8_t XgmiLinkWidth[NUM_XGMI_DPM_LEVELS]; //Width [EX: 16 = x16] - uint8_t XgmiStartupLevel; - uint8_t spare12[3]; - - // GFX Vmin - uint16_t GFX_PPVmin_Enabled; - uint16_t GFX_Vmin_Plat_Offset_Hot; // mV - uint16_t GFX_Vmin_Plat_Offset_Cold; // mV - uint16_t GFX_Vmin_Hot_T0; // mV - uint16_t GFX_Vmin_Cold_T0; // mV - uint16_t GFX_Vmin_Hot_Eol; // mV - uint16_t GFX_Vmin_Cold_Eol; // mV - uint16_t GFX_Vmin_Aging_Offset; // mV - uint16_t GFX_Vmin_Temperature_Hot; // 'C - uint16_t GFX_Vmin_Temperature_Cold; // 'C - - // SOC Vmin - uint16_t SOC_PPVmin_Enabled; - uint16_t SOC_Vmin_Plat_Offset_Hot; // mV - uint16_t SOC_Vmin_Plat_Offset_Cold; // mV - uint16_t SOC_Vmin_Hot_T0; // mV - uint16_t SOC_Vmin_Cold_T0; // mV - uint16_t SOC_Vmin_Hot_Eol; // mV - uint16_t SOC_Vmin_Cold_Eol; // mV - uint16_t SOC_Vmin_Aging_Offset; // mV - uint16_t SOC_Vmin_Temperature_Hot; // 'C - uint16_t SOC_Vmin_Temperature_Cold; // 'C - - // APCC Settings - uint32_t ApccPlusResidencyLimit; //PCC residency % (0-100) - - // Determinism - uint16_t DeterminismVoltageOffset; //mV - uint16_t spare22; - - // reserved - uint32_t spare3[14]; - - // SECTION: BOARD PARAMETERS - // Telemetry Settings - uint16_t GfxMaxCurrent; // in Amps - int8_t GfxOffset; // in Amps - uint8_t Padding_TelemetryGfx; - - uint16_t SocMaxCurrent; // in Amps - int8_t SocOffset; // in Amps - uint8_t Padding_TelemetrySoc; - - uint16_t MemMaxCurrent; // in Amps - int8_t MemOffset; // in Amps - uint8_t Padding_TelemetryMem; - - uint16_t BoardMaxCurrent; // in Amps - int8_t BoardOffset; // in Amps - uint8_t Padding_TelemetryBoardInput; - - // Platform input telemetry voltage coefficient - uint32_t BoardVoltageCoeffA; // decode by /1000 - uint32_t BoardVoltageCoeffB; // decode by /1000 - - // GPIO Settings - uint8_t VR0HotGpio; // GPIO pin configured for VR0 HOT event - uint8_t VR0HotPolarity; // GPIO polarity for VR0 HOT event - uint8_t VR1HotGpio; // GPIO pin configured for VR1 HOT event - uint8_t VR1HotPolarity; // GPIO polarity for VR1 HOT event - - // UCLK Spread Spectrum - uint8_t UclkSpreadEnabled; // on or off - uint8_t UclkSpreadPercent; // Q4.4 - uint16_t UclkSpreadFreq; // kHz - - // FCLK Spread Spectrum - uint8_t FclkSpreadEnabled; // on or off - uint8_t FclkSpreadPercent; // Q4.4 - uint16_t FclkSpreadFreq; // kHz - - // I2C Controller Structure - I2cControllerConfig_t I2cControllers[NUM_I2C_CONTROLLERS]; - - // GPIO pins for I2C communications with 2nd controller for Input Telemetry Sequence - uint8_t GpioI2cScl; // Serial Clock - uint8_t GpioI2cSda; // Serial Data - uint16_t spare5; - - uint16_t XgmiMaxCurrent; // in Amps - int8_t XgmiOffset; // in Amps - uint8_t Padding_TelemetryXgmi; - - uint16_t EdcPowerLimit; - uint16_t spare6; - - //reserved - uint32_t reserved[14]; - -} PPTable_t; - -typedef struct { - // Time constant parameters for clock averages in ms - uint16_t GfxclkAverageLpfTau; - uint16_t SocclkAverageLpfTau; - uint16_t UclkAverageLpfTau; - uint16_t GfxActivityLpfTau; - uint16_t UclkActivityLpfTau; - - uint16_t SocketPowerLpfTau; - - uint32_t Spare[8]; - // Padding - ignore - uint32_t MmHubPadding[8]; // SMU internal use -} DriverSmuConfig_t; - -typedef struct { - uint16_t CurrClock[PPCLK_COUNT]; - uint16_t Padding1 ; - uint16_t AverageGfxclkFrequency; - uint16_t AverageSocclkFrequency; - uint16_t AverageUclkFrequency ; - uint16_t AverageGfxActivity ; - uint16_t AverageUclkActivity ; - uint8_t CurrSocVoltageOffset ; - uint8_t CurrGfxVoltageOffset ; - uint8_t CurrMemVidOffset ; - uint8_t Padding8 ; - uint16_t AverageSocketPower ; - uint16_t TemperatureEdge ; - uint16_t TemperatureHotspot ; - uint16_t TemperatureHBM ; // Max - uint16_t TemperatureVrGfx ; - uint16_t TemperatureVrSoc ; - uint16_t TemperatureVrMem ; - uint32_t ThrottlerStatus ; - - uint32_t PublicSerialNumLower32; - uint32_t PublicSerialNumUpper32; - uint16_t TemperatureAllHBM[4] ; - uint32_t GfxBusyAcc ; - uint32_t DramBusyAcc ; - uint32_t EnergyAcc64bitLow ; //15.259uJ resolution - uint32_t EnergyAcc64bitHigh ; - uint32_t TimeStampLow ; //10ns resolution - uint32_t TimeStampHigh ; - - // Padding - ignore - uint32_t MmHubPadding[8]; // SMU internal use -} SmuMetrics_t; - - -typedef struct { - uint16_t avgPsmCount[76]; - uint16_t minPsmCount[76]; - float avgPsmVoltage[76]; - float minPsmVoltage[76]; - - uint32_t MmHubPadding[8]; // SMU internal use -} AvfsDebugTable_t; - -typedef struct { - uint64_t mca_umc_status; - uint64_t mca_umc_addr; - uint16_t ce_count_lo_chip; - uint16_t ce_count_hi_chip; - - uint32_t eccPadding; -} EccInfo_t; - -typedef struct { - EccInfo_t EccInfo[ALDEBARAN_UMC_CHANNEL_NUM]; -} EccInfoTable_t; - -// These defines are used with the following messages: -// SMC_MSG_TransferTableDram2Smu -// SMC_MSG_TransferTableSmu2Dram -#define TABLE_PPTABLE 0 -#define TABLE_AVFS_PSM_DEBUG 1 -#define TABLE_AVFS_FUSE_OVERRIDE 2 -#define TABLE_PMSTATUSLOG 3 -#define TABLE_SMU_METRICS 4 -#define TABLE_DRIVER_SMU_CONFIG 5 -#define TABLE_I2C_COMMANDS 6 -#define TABLE_ECCINFO 7 -#define TABLE_COUNT 8 - -#endif diff --git a/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_yellow_carp.h b/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_yellow_carp.h deleted file mode 100644 index 25540cb28208..000000000000 --- a/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_yellow_carp.h +++ /dev/null @@ -1,222 +0,0 @@ -/* - * Copyright 2020 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#ifndef __SMU13_DRIVER_IF_YELLOW_CARP_H__ -#define __SMU13_DRIVER_IF_YELLOW_CARP_H__ - -// *** IMPORTANT *** -// SMU TEAM: Always increment the interface version if -// any structure is changed in this file -#define SMU13_DRIVER_IF_VERSION 4 - -typedef struct { - int32_t value; - uint32_t numFractionalBits; -} FloatInIntFormat_t; - -typedef enum { - DSPCLK_DCFCLK = 0, - DSPCLK_DISPCLK, - DSPCLK_PIXCLK, - DSPCLK_PHYCLK, - DSPCLK_COUNT, -} DSPCLK_e; - -typedef struct { - uint16_t Freq; // in MHz - uint16_t Vid; // min voltage in SVI3 VID -} DisplayClockTable_t; - -typedef struct { - uint16_t MinClock; // This is either DCFCLK or SOCCLK (in MHz) - uint16_t MaxClock; // This is either DCFCLK or SOCCLK (in MHz) - uint16_t MinMclk; - uint16_t MaxMclk; - - uint8_t WmSetting; - uint8_t WmType; // Used for normal pstate change or memory retraining - uint8_t Padding[2]; -} WatermarkRowGeneric_t; - -#define NUM_WM_RANGES 4 -#define WM_PSTATE_CHG 0 -#define WM_RETRAINING 1 - -typedef enum { - WM_SOCCLK = 0, - WM_DCFCLK, - WM_COUNT, -} WM_CLOCK_e; - -typedef struct { - // Watermarks - WatermarkRowGeneric_t WatermarkRow[WM_COUNT][NUM_WM_RANGES]; - - uint32_t MmHubPadding[7]; // SMU internal use -} Watermarks_t; - -typedef enum { - CUSTOM_DPM_SETTING_GFXCLK, - CUSTOM_DPM_SETTING_CCLK, - CUSTOM_DPM_SETTING_FCLK_CCX, - CUSTOM_DPM_SETTING_FCLK_GFX, - CUSTOM_DPM_SETTING_FCLK_STALLS, - CUSTOM_DPM_SETTING_LCLK, - CUSTOM_DPM_SETTING_COUNT, -} CUSTOM_DPM_SETTING_e; - -typedef struct { - uint8_t ActiveHystLimit; - uint8_t IdleHystLimit; - uint8_t FPS; - uint8_t MinActiveFreqType; - FloatInIntFormat_t MinActiveFreq; - FloatInIntFormat_t PD_Data_limit; - FloatInIntFormat_t PD_Data_time_constant; - FloatInIntFormat_t PD_Data_error_coeff; - FloatInIntFormat_t PD_Data_error_rate_coeff; -} DpmActivityMonitorCoeffExt_t; - -typedef struct { - DpmActivityMonitorCoeffExt_t DpmActivityMonitorCoeff[CUSTOM_DPM_SETTING_COUNT]; -} CustomDpmSettings_t; - -#define NUM_DCFCLK_DPM_LEVELS 8 -#define NUM_DISPCLK_DPM_LEVELS 8 -#define NUM_DPPCLK_DPM_LEVELS 8 -#define NUM_SOCCLK_DPM_LEVELS 8 -#define NUM_VCN_DPM_LEVELS 8 -#define NUM_SOC_VOLTAGE_LEVELS 8 -#define NUM_DF_PSTATE_LEVELS 4 - -typedef struct { - uint32_t FClk; - uint32_t MemClk; - uint32_t Voltage; - uint8_t WckRatio; - uint8_t Spare[3]; -} DfPstateTable_t; - -//Freq in MHz -//Voltage in milli volts with 2 fractional bits -typedef struct { - uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS]; - uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS]; - uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS]; - uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS]; - uint32_t VClocks[NUM_VCN_DPM_LEVELS]; - uint32_t DClocks[NUM_VCN_DPM_LEVELS]; - uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS]; - DfPstateTable_t DfPstateTable[NUM_DF_PSTATE_LEVELS]; - - uint8_t NumDcfClkLevelsEnabled; - uint8_t NumDispClkLevelsEnabled; //Applies to both Dispclk and Dppclk - uint8_t NumSocClkLevelsEnabled; - uint8_t VcnClkLevelsEnabled; //Applies to both Vclk and Dclk - uint8_t NumDfPstatesEnabled; - uint8_t spare[3]; - - uint32_t MinGfxClk; - uint32_t MaxGfxClk; -} DpmClocks_t; - - -// Throttler Status Bitmask -#define THROTTLER_STATUS_BIT_SPL 0 -#define THROTTLER_STATUS_BIT_FPPT 1 -#define THROTTLER_STATUS_BIT_SPPT 2 -#define THROTTLER_STATUS_BIT_SPPT_APU 3 -#define THROTTLER_STATUS_BIT_THM_CORE 4 -#define THROTTLER_STATUS_BIT_THM_GFX 5 -#define THROTTLER_STATUS_BIT_THM_SOC 6 -#define THROTTLER_STATUS_BIT_TDC_VDD 7 -#define THROTTLER_STATUS_BIT_TDC_SOC 8 -#define THROTTLER_STATUS_BIT_PROCHOT_CPU 9 -#define THROTTLER_STATUS_BIT_PROCHOT_GFX 10 -#define THROTTLER_STATUS_BIT_EDC_CPU 11 -#define THROTTLER_STATUS_BIT_EDC_GFX 12 - -typedef struct { - uint16_t GfxclkFrequency; //[MHz] - uint16_t SocclkFrequency; //[MHz] - uint16_t VclkFrequency; //[MHz] - uint16_t DclkFrequency; //[MHz] - uint16_t MemclkFrequency; //[MHz] - uint16_t spare; - - uint16_t GfxActivity; //[centi] - uint16_t UvdActivity; //[centi] - - uint16_t Voltage[2]; //[mV] indices: VDDCR_VDD, VDDCR_SOC - uint16_t Current[2]; //[mA] indices: VDDCR_VDD, VDDCR_SOC - uint16_t Power[2]; //[mW] indices: VDDCR_VDD, VDDCR_SOC - - //3rd party tools in Windows need this info in the case of APUs - uint16_t CoreFrequency[8]; //[MHz] - uint16_t CorePower[8]; //[mW] - uint16_t CoreTemperature[8]; //[centi-Celsius] - uint16_t L3Frequency; //[MHz] - uint16_t L3Temperature; //[centi-Celsius] - - uint16_t GfxTemperature; //[centi-Celsius] - uint16_t SocTemperature; //[centi-Celsius] - uint16_t ThrottlerStatus; - - uint16_t CurrentSocketPower; //[mW] - uint16_t StapmOpnLimit; //[W] - uint16_t StapmCurrentLimit; //[W] - uint32_t ApuPower; //[mW] - uint32_t dGpuPower; //[mW] - - uint16_t VddTdcValue; //[mA] - uint16_t SocTdcValue; //[mA] - uint16_t VddEdcValue; //[mA] - uint16_t SocEdcValue; //[mA] - - uint16_t InfrastructureCpuMaxFreq; //[MHz] - uint16_t InfrastructureGfxMaxFreq; //[MHz] - - uint16_t SkinTemp; - uint16_t DeviceState; -} SmuMetrics_t; - - -// Workload bits -#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 0 -#define WORKLOAD_PPLIB_VIDEO_BIT 2 -#define WORKLOAD_PPLIB_VR_BIT 3 -#define WORKLOAD_PPLIB_COMPUTE_BIT 4 -#define WORKLOAD_PPLIB_CUSTOM_BIT 5 -#define WORKLOAD_PPLIB_COUNT 6 - -#define TABLE_BIOS_IF 0 // Called by BIOS -#define TABLE_WATERMARKS 1 // Called by DAL through VBIOS -#define TABLE_CUSTOM_DPM 2 // Called by Driver -#define TABLE_SPARE1 3 -#define TABLE_DPMCLOCKS 4 // Called by Driver and VBIOS -#define TABLE_MOMENTARY_PM 5 // Called by Tools -#define TABLE_MODERN_STDBY 6 // Called by Tools for Modern Standby Log -#define TABLE_SMU_METRICS 7 // Called by Driver -#define TABLE_INFRASTRUCTURE_LIMITS 8 -#define TABLE_COUNT 9 - -#endif diff --git a/drivers/gpu/drm/amd/pm/inc/smu7.h b/drivers/gpu/drm/amd/pm/inc/smu7.h deleted file mode 100644 index e14072d45918..000000000000 --- a/drivers/gpu/drm/amd/pm/inc/smu7.h +++ /dev/null @@ -1,189 +0,0 @@ -/* - * Copyright 2013 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef SMU7_H -#define SMU7_H - -#pragma pack(push, 1) - -#define SMU7_CONTEXT_ID_SMC 1 -#define SMU7_CONTEXT_ID_VBIOS 2 - - -#define SMU7_CONTEXT_ID_SMC 1 -#define SMU7_CONTEXT_ID_VBIOS 2 - -#define SMU7_MAX_LEVELS_VDDC 8 -#define SMU7_MAX_LEVELS_VDDCI 4 -#define SMU7_MAX_LEVELS_MVDD 4 -#define SMU7_MAX_LEVELS_VDDNB 8 - -#define SMU7_MAX_LEVELS_GRAPHICS SMU__NUM_SCLK_DPM_STATE // SCLK + SQ DPM + ULV -#define SMU7_MAX_LEVELS_MEMORY SMU__NUM_MCLK_DPM_LEVELS // MCLK Levels DPM -#define SMU7_MAX_LEVELS_GIO SMU__NUM_LCLK_DPM_LEVELS // LCLK Levels -#define SMU7_MAX_LEVELS_LINK SMU__NUM_PCIE_DPM_LEVELS // PCIe speed and number of lanes. -#define SMU7_MAX_LEVELS_UVD 8 // VCLK/DCLK levels for UVD. -#define SMU7_MAX_LEVELS_VCE 8 // ECLK levels for VCE. -#define SMU7_MAX_LEVELS_ACP 8 // ACLK levels for ACP. -#define SMU7_MAX_LEVELS_SAMU 8 // SAMCLK levels for SAMU. -#define SMU7_MAX_ENTRIES_SMIO 32 // Number of entries in SMIO table. - -#define DPM_NO_LIMIT 0 -#define DPM_NO_UP 1 -#define DPM_GO_DOWN 2 -#define DPM_GO_UP 3 - -#define SMU7_FIRST_DPM_GRAPHICS_LEVEL 0 -#define SMU7_FIRST_DPM_MEMORY_LEVEL 0 - -#define GPIO_CLAMP_MODE_VRHOT 1 -#define GPIO_CLAMP_MODE_THERM 2 -#define GPIO_CLAMP_MODE_DC 4 - -#define SCRATCH_B_TARG_PCIE_INDEX_SHIFT 0 -#define SCRATCH_B_TARG_PCIE_INDEX_MASK (0x7< -#include "amd_powerplay.h" -#include "hwmgr.h" - -enum SMU_TABLE { - SMU_UVD_TABLE = 0, - SMU_VCE_TABLE, - SMU_BIF_TABLE, -}; - -enum SMU_TYPE { - SMU_SoftRegisters = 0, - SMU_Discrete_DpmTable, -}; - -enum SMU_MEMBER { - HandshakeDisables = 0, - VoltageChangeTimeout, - AverageGraphicsActivity, - AverageMemoryActivity, - PreVBlankGap, - VBlankTimeout, - UcodeLoadStatus, - UvdBootLevel, - VceBootLevel, - LowSclkInterruptThreshold, - DRAM_LOG_ADDR_H, - DRAM_LOG_ADDR_L, - DRAM_LOG_PHY_ADDR_H, - DRAM_LOG_PHY_ADDR_L, - DRAM_LOG_BUFF_SIZE, -}; - - -enum SMU_MAC_DEFINITION { - SMU_MAX_LEVELS_GRAPHICS = 0, - SMU_MAX_LEVELS_MEMORY, - SMU_MAX_LEVELS_LINK, - SMU_MAX_ENTRIES_SMIO, - SMU_MAX_LEVELS_VDDC, - SMU_MAX_LEVELS_VDDGFX, - SMU_MAX_LEVELS_VDDCI, - SMU_MAX_LEVELS_MVDD, - SMU_UVD_MCLK_HANDSHAKE_DISABLE, -}; - -enum SMU9_TABLE_ID { - PPTABLE = 0, - WMTABLE, - AVFSTABLE, - TOOLSTABLE, - AVFSFUSETABLE -}; - -enum SMU10_TABLE_ID { - SMU10_WMTABLE = 0, - SMU10_CLOCKTABLE, -}; - -extern int smum_download_powerplay_table(struct pp_hwmgr *hwmgr, void **table); - -extern int smum_upload_powerplay_table(struct pp_hwmgr *hwmgr); - -extern int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t *resp); - -extern int smum_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, - uint16_t msg, uint32_t parameter, - uint32_t *resp); - -extern int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr); - -extern int smum_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type); -extern int smum_process_firmware_header(struct pp_hwmgr *hwmgr); -extern int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr); -extern int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr); -extern int smum_init_smc_table(struct pp_hwmgr *hwmgr); -extern int smum_populate_all_graphic_levels(struct pp_hwmgr *hwmgr); -extern int smum_populate_all_memory_levels(struct pp_hwmgr *hwmgr); -extern int smum_initialize_mc_reg_table(struct pp_hwmgr *hwmgr); -extern uint32_t smum_get_offsetof(struct pp_hwmgr *hwmgr, - uint32_t type, uint32_t member); -extern uint32_t smum_get_mac_definition(struct pp_hwmgr *hwmgr, uint32_t value); - -extern bool smum_is_dpm_running(struct pp_hwmgr *hwmgr); - -extern bool smum_is_hw_avfs_present(struct pp_hwmgr *hwmgr); - -extern int smum_update_dpm_settings(struct pp_hwmgr *hwmgr, void *profile_setting); - -extern int smum_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw); - -extern int smum_stop_smc(struct pp_hwmgr *hwmgr); - -#endif diff --git a/drivers/gpu/drm/amd/pm/inc/tonga_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/tonga_ppsmc.h deleted file mode 100644 index 63631296d751..000000000000 --- a/drivers/gpu/drm/amd/pm/inc/tonga_ppsmc.h +++ /dev/null @@ -1,420 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef TONGA_PP_SMC_H -#define TONGA_PP_SMC_H - -#pragma pack(push, 1) - -#define PPSMC_SWSTATE_FLAG_DC 0x01 -#define PPSMC_SWSTATE_FLAG_UVD 0x02 -#define PPSMC_SWSTATE_FLAG_VCE 0x04 -#define PPSMC_SWSTATE_FLAG_PCIE_X1 0x08 - -#define PPSMC_THERMAL_PROTECT_TYPE_INTERNAL 0x00 -#define PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL 0x01 -#define PPSMC_THERMAL_PROTECT_TYPE_NONE 0xff - -#define PPSMC_SYSTEMFLAG_GPIO_DC 0x01 -#define PPSMC_SYSTEMFLAG_STEPVDDC 0x02 -#define PPSMC_SYSTEMFLAG_GDDR5 0x04 - -#define PPSMC_SYSTEMFLAG_DISABLE_BABYSTEP 0x08 - -#define PPSMC_SYSTEMFLAG_REGULATOR_HOT 0x10 -#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_ANALOG 0x20 -#define PPSMC_SYSTEMFLAG_12CHANNEL 0x40 - - -#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_MASK 0x07 -#define PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK 0x08 - -#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTODPMLOWSTATE 0x00 -#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE 0x01 - -#define PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH 0x10 -#define PPSMC_EXTRAFLAGS_DRIVER_TO_GPIO17 0x20 -#define PPSMC_EXTRAFLAGS_PCC_TO_GPIO17 0x40 - -/* Defines for DPM 2.0 */ -#define PPSMC_DPM2FLAGS_TDPCLMP 0x01 -#define PPSMC_DPM2FLAGS_PWRSHFT 0x02 -#define PPSMC_DPM2FLAGS_OCP 0x04 - -/* Defines for display watermark level */ - -#define PPSMC_DISPLAY_WATERMARK_LOW 0 -#define PPSMC_DISPLAY_WATERMARK_HIGH 1 - -/* In the HW performance level's state flags:*/ -#define PPSMC_STATEFLAG_AUTO_PULSE_SKIP 0x01 -#define PPSMC_STATEFLAG_POWERBOOST 0x02 -#define PPSMC_STATEFLAG_PSKIP_ON_TDP_FAULT 0x04 -#define PPSMC_STATEFLAG_POWERSHIFT 0x08 -#define PPSMC_STATEFLAG_SLOW_READ_MARGIN 0x10 -#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 0x20 -#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS 0x40 - -/* Fan control algorithm:*/ -#define FDO_MODE_HARDWARE 0 -#define FDO_MODE_PIECE_WISE_LINEAR 1 - -enum FAN_CONTROL { - FAN_CONTROL_FUZZY, - FAN_CONTROL_TABLE -}; - -/* Return codes for driver to SMC communication.*/ - -#define PPSMC_Result_OK ((uint16_t)0x01) -#define PPSMC_Result_NoMore ((uint16_t)0x02) -#define PPSMC_Result_NotNow ((uint16_t)0x03) - -#define PPSMC_Result_Failed ((uint16_t)0xFF) -#define PPSMC_Result_UnknownCmd ((uint16_t)0xFE) -#define PPSMC_Result_UnknownVT ((uint16_t)0xFD) - -typedef uint16_t PPSMC_Result; - -#define PPSMC_isERROR(x) ((uint16_t)0x80 & (x)) - - -#define PPSMC_MSG_Halt ((uint16_t)0x10) -#define PPSMC_MSG_Resume ((uint16_t)0x11) -#define PPSMC_MSG_EnableDPMLevel ((uint16_t)0x12) -#define PPSMC_MSG_ZeroLevelsDisabled ((uint16_t)0x13) -#define PPSMC_MSG_OneLevelsDisabled ((uint16_t)0x14) -#define PPSMC_MSG_TwoLevelsDisabled ((uint16_t)0x15) -#define PPSMC_MSG_EnableThermalInterrupt ((uint16_t)0x16) -#define PPSMC_MSG_RunningOnAC ((uint16_t)0x17) -#define PPSMC_MSG_LevelUp ((uint16_t)0x18) -#define PPSMC_MSG_LevelDown ((uint16_t)0x19) -#define PPSMC_MSG_ResetDPMCounters ((uint16_t)0x1a) -#define PPSMC_MSG_SwitchToSwState ((uint16_t)0x20) - -#define PPSMC_MSG_SwitchToSwStateLast ((uint16_t)0x3f) -#define PPSMC_MSG_SwitchToInitialState ((uint16_t)0x40) -#define PPSMC_MSG_NoForcedLevel ((uint16_t)0x41) -#define PPSMC_MSG_ForceHigh ((uint16_t)0x42) -#define PPSMC_MSG_ForceMediumOrHigh ((uint16_t)0x43) - -#define PPSMC_MSG_SwitchToMinimumPower ((uint16_t)0x51) -#define PPSMC_MSG_ResumeFromMinimumPower ((uint16_t)0x52) -#define PPSMC_MSG_EnableCac ((uint16_t)0x53) -#define PPSMC_MSG_DisableCac ((uint16_t)0x54) -#define PPSMC_DPMStateHistoryStart ((uint16_t)0x55) -#define PPSMC_DPMStateHistoryStop ((uint16_t)0x56) -#define PPSMC_CACHistoryStart ((uint16_t)0x57) -#define PPSMC_CACHistoryStop ((uint16_t)0x58) -#define PPSMC_TDPClampingActive ((uint16_t)0x59) -#define PPSMC_TDPClampingInactive ((uint16_t)0x5A) -#define PPSMC_StartFanControl ((uint16_t)0x5B) -#define PPSMC_StopFanControl ((uint16_t)0x5C) -#define PPSMC_NoDisplay ((uint16_t)0x5D) -#define PPSMC_HasDisplay ((uint16_t)0x5E) -#define PPSMC_MSG_UVDPowerOFF ((uint16_t)0x60) -#define PPSMC_MSG_UVDPowerON ((uint16_t)0x61) -#define PPSMC_MSG_EnableULV ((uint16_t)0x62) -#define PPSMC_MSG_DisableULV ((uint16_t)0x63) -#define PPSMC_MSG_EnterULV ((uint16_t)0x64) -#define PPSMC_MSG_ExitULV ((uint16_t)0x65) -#define PPSMC_PowerShiftActive ((uint16_t)0x6A) -#define PPSMC_PowerShiftInactive ((uint16_t)0x6B) -#define PPSMC_OCPActive ((uint16_t)0x6C) -#define PPSMC_OCPInactive ((uint16_t)0x6D) -#define PPSMC_CACLongTermAvgEnable ((uint16_t)0x6E) -#define PPSMC_CACLongTermAvgDisable ((uint16_t)0x6F) -#define PPSMC_MSG_InferredStateSweep_Start ((uint16_t)0x70) -#define PPSMC_MSG_InferredStateSweep_Stop ((uint16_t)0x71) -#define PPSMC_MSG_SwitchToLowestInfState ((uint16_t)0x72) -#define PPSMC_MSG_SwitchToNonInfState ((uint16_t)0x73) -#define PPSMC_MSG_AllStateSweep_Start ((uint16_t)0x74) -#define PPSMC_MSG_AllStateSweep_Stop ((uint16_t)0x75) -#define PPSMC_MSG_SwitchNextLowerInfState ((uint16_t)0x76) -#define PPSMC_MSG_SwitchNextHigherInfState ((uint16_t)0x77) -#define PPSMC_MSG_MclkRetrainingTest ((uint16_t)0x78) -#define PPSMC_MSG_ForceTDPClamping ((uint16_t)0x79) -#define PPSMC_MSG_CollectCAC_PowerCorreln ((uint16_t)0x7A) -#define PPSMC_MSG_CollectCAC_WeightCalib ((uint16_t)0x7B) -#define PPSMC_MSG_CollectCAC_SQonly ((uint16_t)0x7C) -#define PPSMC_MSG_CollectCAC_TemperaturePwr ((uint16_t)0x7D) - -#define PPSMC_MSG_ExtremitiesTest_Start ((uint16_t)0x7E) -#define PPSMC_MSG_ExtremitiesTest_Stop ((uint16_t)0x7F) -#define PPSMC_FlushDataCache ((uint16_t)0x80) -#define PPSMC_FlushInstrCache ((uint16_t)0x81) - -#define PPSMC_MSG_SetEnabledLevels ((uint16_t)0x82) -#define PPSMC_MSG_SetForcedLevels ((uint16_t)0x83) - -#define PPSMC_MSG_ResetToDefaults ((uint16_t)0x84) - -#define PPSMC_MSG_SetForcedLevelsAndJump ((uint16_t)0x85) -#define PPSMC_MSG_SetCACHistoryMode ((uint16_t)0x86) -#define PPSMC_MSG_EnableDTE ((uint16_t)0x87) -#define PPSMC_MSG_DisableDTE ((uint16_t)0x88) - -#define PPSMC_MSG_SmcSpaceSetAddress ((uint16_t)0x89) -#define PPSMC_MSG_ChangeNearTDPLimit ((uint16_t)0x90) -#define PPSMC_MSG_ChangeSafePowerLimit ((uint16_t)0x91) - -#define PPSMC_MSG_DPMStateSweepStart ((uint16_t)0x92) -#define PPSMC_MSG_DPMStateSweepStop ((uint16_t)0x93) - -#define PPSMC_MSG_OVRDDisableSCLKDS ((uint16_t)0x94) -#define PPSMC_MSG_CancelDisableOVRDSCLKDS ((uint16_t)0x95) -#define PPSMC_MSG_ThrottleOVRDSCLKDS ((uint16_t)0x96) -#define PPSMC_MSG_CancelThrottleOVRDSCLKDS ((uint16_t)0x97) -#define PPSMC_MSG_GPIO17 ((uint16_t)0x98) - -#define PPSMC_MSG_API_SetSvi2Volt_Vddc ((uint16_t)0x99) -#define PPSMC_MSG_API_SetSvi2Volt_Vddci ((uint16_t)0x9A) -#define PPSMC_MSG_API_SetSvi2Volt_Mvdd ((uint16_t)0x9B) -#define PPSMC_MSG_API_GetSvi2Volt_Vddc ((uint16_t)0x9C) -#define PPSMC_MSG_API_GetSvi2Volt_Vddci ((uint16_t)0x9D) -#define PPSMC_MSG_API_GetSvi2Volt_Mvdd ((uint16_t)0x9E) - -#define PPSMC_MSG_BREAK ((uint16_t)0xF8) - -/* Trinity Specific Messages*/ -#define PPSMC_MSG_Test ((uint16_t) 0x100) -#define PPSMC_MSG_DPM_Voltage_Pwrmgt ((uint16_t) 0x101) -#define PPSMC_MSG_DPM_Config ((uint16_t) 0x102) -#define PPSMC_MSG_PM_Controller_Start ((uint16_t) 0x103) -#define PPSMC_MSG_DPM_ForceState ((uint16_t) 0x104) -#define PPSMC_MSG_PG_PowerDownSIMD ((uint16_t) 0x105) -#define PPSMC_MSG_PG_PowerUpSIMD ((uint16_t) 0x106) -#define PPSMC_MSG_PM_Controller_Stop ((uint16_t) 0x107) -#define PPSMC_MSG_PG_SIMD_Config ((uint16_t) 0x108) -#define PPSMC_MSG_Voltage_Cntl_Enable ((uint16_t) 0x109) -#define PPSMC_MSG_Thermal_Cntl_Enable ((uint16_t) 0x10a) -#define PPSMC_MSG_Reset_Service ((uint16_t) 0x10b) -#define PPSMC_MSG_VCEPowerOFF ((uint16_t) 0x10e) -#define PPSMC_MSG_VCEPowerON ((uint16_t) 0x10f) -#define PPSMC_MSG_DPM_Disable_VCE_HS ((uint16_t) 0x110) -#define PPSMC_MSG_DPM_Enable_VCE_HS ((uint16_t) 0x111) -#define PPSMC_MSG_DPM_N_LevelsDisabled ((uint16_t) 0x112) -#define PPSMC_MSG_DCEPowerOFF ((uint16_t) 0x113) -#define PPSMC_MSG_DCEPowerON ((uint16_t) 0x114) -#define PPSMC_MSG_PCIE_DDIPowerDown ((uint16_t) 0x117) -#define PPSMC_MSG_PCIE_DDIPowerUp ((uint16_t) 0x118) -#define PPSMC_MSG_PCIE_CascadePLLPowerDown ((uint16_t) 0x119) -#define PPSMC_MSG_PCIE_CascadePLLPowerUp ((uint16_t) 0x11a) -#define PPSMC_MSG_SYSPLLPowerOff ((uint16_t) 0x11b) -#define PPSMC_MSG_SYSPLLPowerOn ((uint16_t) 0x11c) -#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint16_t) 0x11d) -#define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint16_t) 0x11e) -#define PPSMC_MSG_DISPLAYPHYStatusNotify ((uint16_t) 0x11f) -#define PPSMC_MSG_EnableBAPM ((uint16_t) 0x120) -#define PPSMC_MSG_DisableBAPM ((uint16_t) 0x121) -#define PPSMC_MSG_PCIE_PHYPowerDown ((uint16_t) 0x122) -#define PPSMC_MSG_PCIE_PHYPowerUp ((uint16_t) 0x123) -#define PPSMC_MSG_UVD_DPM_Config ((uint16_t) 0x124) -#define PPSMC_MSG_Spmi_Enable ((uint16_t) 0x122) -#define PPSMC_MSG_Spmi_Timer ((uint16_t) 0x123) -#define PPSMC_MSG_LCLK_DPM_Config ((uint16_t) 0x124) -#define PPSMC_MSG_NBDPM_Config ((uint16_t) 0x125) -#define PPSMC_MSG_PCIE_DDIPhyPowerDown ((uint16_t) 0x126) -#define PPSMC_MSG_PCIE_DDIPhyPowerUp ((uint16_t) 0x127) -#define PPSMC_MSG_MCLKDPM_Config ((uint16_t) 0x128) - -#define PPSMC_MSG_UVDDPM_Config ((uint16_t) 0x129) -#define PPSMC_MSG_VCEDPM_Config ((uint16_t) 0x12A) -#define PPSMC_MSG_ACPDPM_Config ((uint16_t) 0x12B) -#define PPSMC_MSG_SAMUDPM_Config ((uint16_t) 0x12C) -#define PPSMC_MSG_UVDDPM_SetEnabledMask ((uint16_t) 0x12D) -#define PPSMC_MSG_VCEDPM_SetEnabledMask ((uint16_t) 0x12E) -#define PPSMC_MSG_ACPDPM_SetEnabledMask ((uint16_t) 0x12F) -#define PPSMC_MSG_SAMUDPM_SetEnabledMask ((uint16_t) 0x130) -#define PPSMC_MSG_MCLKDPM_ForceState ((uint16_t) 0x131) -#define PPSMC_MSG_MCLKDPM_NoForcedLevel ((uint16_t) 0x132) -#define PPSMC_MSG_Thermal_Cntl_Disable ((uint16_t) 0x133) -#define PPSMC_MSG_SetTDPLimit ((uint16_t) 0x134) -#define PPSMC_MSG_Voltage_Cntl_Disable ((uint16_t) 0x135) -#define PPSMC_MSG_PCIeDPM_Enable ((uint16_t) 0x136) -#define PPSMC_MSG_ACPPowerOFF ((uint16_t) 0x137) -#define PPSMC_MSG_ACPPowerON ((uint16_t) 0x138) -#define PPSMC_MSG_SAMPowerOFF ((uint16_t) 0x139) -#define PPSMC_MSG_SAMPowerON ((uint16_t) 0x13a) -#define PPSMC_MSG_SDMAPowerOFF ((uint16_t) 0x13b) -#define PPSMC_MSG_SDMAPowerON ((uint16_t) 0x13c) -#define PPSMC_MSG_PCIeDPM_Disable ((uint16_t) 0x13d) -#define PPSMC_MSG_IOMMUPowerOFF ((uint16_t) 0x13e) -#define PPSMC_MSG_IOMMUPowerON ((uint16_t) 0x13f) -#define PPSMC_MSG_NBDPM_Enable ((uint16_t) 0x140) -#define PPSMC_MSG_NBDPM_Disable ((uint16_t) 0x141) -#define PPSMC_MSG_NBDPM_ForceNominal ((uint16_t) 0x142) -#define PPSMC_MSG_NBDPM_ForcePerformance ((uint16_t) 0x143) -#define PPSMC_MSG_NBDPM_UnForce ((uint16_t) 0x144) -#define PPSMC_MSG_SCLKDPM_SetEnabledMask ((uint16_t) 0x145) -#define PPSMC_MSG_MCLKDPM_SetEnabledMask ((uint16_t) 0x146) -#define PPSMC_MSG_PCIeDPM_ForceLevel ((uint16_t) 0x147) -#define PPSMC_MSG_PCIeDPM_UnForceLevel ((uint16_t) 0x148) -#define PPSMC_MSG_EnableACDCGPIOInterrupt ((uint16_t) 0x149) -#define PPSMC_MSG_EnableVRHotGPIOInterrupt ((uint16_t) 0x14a) -#define PPSMC_MSG_SwitchToAC ((uint16_t) 0x14b) - -#define PPSMC_MSG_XDMAPowerOFF ((uint16_t) 0x14c) -#define PPSMC_MSG_XDMAPowerON ((uint16_t) 0x14d) - -#define PPSMC_MSG_DPM_Enable ((uint16_t)0x14e) -#define PPSMC_MSG_DPM_Disable ((uint16_t)0x14f) -#define PPSMC_MSG_MCLKDPM_Enable ((uint16_t)0x150) -#define PPSMC_MSG_MCLKDPM_Disable ((uint16_t)0x151) -#define PPSMC_MSG_LCLKDPM_Enable ((uint16_t)0x152) -#define PPSMC_MSG_LCLKDPM_Disable ((uint16_t)0x153) -#define PPSMC_MSG_UVDDPM_Enable ((uint16_t)0x154) -#define PPSMC_MSG_UVDDPM_Disable ((uint16_t)0x155) -#define PPSMC_MSG_SAMUDPM_Enable ((uint16_t)0x156) -#define PPSMC_MSG_SAMUDPM_Disable ((uint16_t)0x157) -#define PPSMC_MSG_ACPDPM_Enable ((uint16_t)0x158) -#define PPSMC_MSG_ACPDPM_Disable ((uint16_t)0x159) -#define PPSMC_MSG_VCEDPM_Enable ((uint16_t)0x15a) -#define PPSMC_MSG_VCEDPM_Disable ((uint16_t)0x15b) -#define PPSMC_MSG_LCLKDPM_SetEnabledMask ((uint16_t)0x15c) - -#define PPSMC_MSG_DPM_FPS_Mode ((uint16_t) 0x15d) -#define PPSMC_MSG_DPM_Activity_Mode ((uint16_t) 0x15e) -#define PPSMC_MSG_VddC_Request ((uint16_t) 0x15f) -#define PPSMC_MSG_MCLKDPM_GetEnabledMask ((uint16_t) 0x160) -#define PPSMC_MSG_LCLKDPM_GetEnabledMask ((uint16_t) 0x161) -#define PPSMC_MSG_SCLKDPM_GetEnabledMask ((uint16_t) 0x162) -#define PPSMC_MSG_UVDDPM_GetEnabledMask ((uint16_t) 0x163) -#define PPSMC_MSG_SAMUDPM_GetEnabledMask ((uint16_t) 0x164) -#define PPSMC_MSG_ACPDPM_GetEnabledMask ((uint16_t) 0x165) -#define PPSMC_MSG_VCEDPM_GetEnabledMask ((uint16_t) 0x166) -#define PPSMC_MSG_PCIeDPM_SetEnabledMask ((uint16_t) 0x167) -#define PPSMC_MSG_PCIeDPM_GetEnabledMask ((uint16_t) 0x168) -#define PPSMC_MSG_TDCLimitEnable ((uint16_t) 0x169) -#define PPSMC_MSG_TDCLimitDisable ((uint16_t) 0x16a) -#define PPSMC_MSG_DPM_AutoRotate_Mode ((uint16_t) 0x16b) -#define PPSMC_MSG_DISPCLK_FROM_FCH ((uint16_t)0x16c) -#define PPSMC_MSG_DISPCLK_FROM_DFS ((uint16_t)0x16d) -#define PPSMC_MSG_DPREFCLK_FROM_FCH ((uint16_t)0x16e) -#define PPSMC_MSG_DPREFCLK_FROM_DFS ((uint16_t)0x16f) -#define PPSMC_MSG_PmStatusLogStart ((uint16_t)0x170) -#define PPSMC_MSG_PmStatusLogSample ((uint16_t)0x171) -#define PPSMC_MSG_SCLK_AutoDPM_ON ((uint16_t) 0x172) -#define PPSMC_MSG_MCLK_AutoDPM_ON ((uint16_t) 0x173) -#define PPSMC_MSG_LCLK_AutoDPM_ON ((uint16_t) 0x174) -#define PPSMC_MSG_UVD_AutoDPM_ON ((uint16_t) 0x175) -#define PPSMC_MSG_SAMU_AutoDPM_ON ((uint16_t) 0x176) -#define PPSMC_MSG_ACP_AutoDPM_ON ((uint16_t) 0x177) -#define PPSMC_MSG_VCE_AutoDPM_ON ((uint16_t) 0x178) -#define PPSMC_MSG_PCIe_AutoDPM_ON ((uint16_t) 0x179) -#define PPSMC_MSG_MASTER_AutoDPM_ON ((uint16_t) 0x17a) -#define PPSMC_MSG_MASTER_AutoDPM_OFF ((uint16_t) 0x17b) -#define PPSMC_MSG_DYNAMICDISPPHYPOWER ((uint16_t) 0x17c) -#define PPSMC_MSG_CAC_COLLECTION_ON ((uint16_t) 0x17d) -#define PPSMC_MSG_CAC_COLLECTION_OFF ((uint16_t) 0x17e) -#define PPSMC_MSG_CAC_CORRELATION_ON ((uint16_t) 0x17f) -#define PPSMC_MSG_CAC_CORRELATION_OFF ((uint16_t) 0x180) -#define PPSMC_MSG_PM_STATUS_TO_DRAM_ON ((uint16_t) 0x181) -#define PPSMC_MSG_PM_STATUS_TO_DRAM_OFF ((uint16_t) 0x182) -#define PPSMC_MSG_UVD_HANDSHAKE_OFF ((uint16_t) 0x183) -#define PPSMC_MSG_ALLOW_LOWSCLK_INTERRUPT ((uint16_t) 0x184) -#define PPSMC_MSG_PkgPwrLimitEnable ((uint16_t) 0x185) -#define PPSMC_MSG_PkgPwrLimitDisable ((uint16_t) 0x186) -#define PPSMC_MSG_PkgPwrSetLimit ((uint16_t) 0x187) -#define PPSMC_MSG_OverDriveSetTargetTdp ((uint16_t) 0x188) -#define PPSMC_MSG_SCLKDPM_FreezeLevel ((uint16_t) 0x189) -#define PPSMC_MSG_SCLKDPM_UnfreezeLevel ((uint16_t) 0x18A) -#define PPSMC_MSG_MCLKDPM_FreezeLevel ((uint16_t) 0x18B) -#define PPSMC_MSG_MCLKDPM_UnfreezeLevel ((uint16_t) 0x18C) -#define PPSMC_MSG_START_DRAM_LOGGING ((uint16_t) 0x18D) -#define PPSMC_MSG_STOP_DRAM_LOGGING ((uint16_t) 0x18E) -#define PPSMC_MSG_MASTER_DeepSleep_ON ((uint16_t) 0x18F) -#define PPSMC_MSG_MASTER_DeepSleep_OFF ((uint16_t) 0x190) -#define PPSMC_MSG_Remove_DC_Clamp ((uint16_t) 0x191) -#define PPSMC_MSG_DisableACDCGPIOInterrupt ((uint16_t) 0x192) -#define PPSMC_MSG_OverrideVoltageControl_SetVddc ((uint16_t) 0x193) -#define PPSMC_MSG_OverrideVoltageControl_SetVddci ((uint16_t) 0x194) -#define PPSMC_MSG_SetVidOffset_1 ((uint16_t) 0x195) -#define PPSMC_MSG_SetVidOffset_2 ((uint16_t) 0x207) -#define PPSMC_MSG_GetVidOffset_1 ((uint16_t) 0x196) -#define PPSMC_MSG_GetVidOffset_2 ((uint16_t) 0x208) -#define PPSMC_MSG_THERMAL_OVERDRIVE_Enable ((uint16_t) 0x197) -#define PPSMC_MSG_THERMAL_OVERDRIVE_Disable ((uint16_t) 0x198) -#define PPSMC_MSG_SetTjMax ((uint16_t) 0x199) -#define PPSMC_MSG_SetFanPwmMax ((uint16_t) 0x19A) - -#define PPSMC_MSG_WaitForMclkSwitchFinish ((uint16_t) 0x19B) -#define PPSMC_MSG_ENABLE_THERMAL_DPM ((uint16_t) 0x19C) -#define PPSMC_MSG_DISABLE_THERMAL_DPM ((uint16_t) 0x19D) -#define PPSMC_MSG_Enable_PCC ((uint16_t) 0x19E) -#define PPSMC_MSG_Disable_PCC ((uint16_t) 0x19F) - -#define PPSMC_MSG_API_GetSclkFrequency ((uint16_t) 0x200) -#define PPSMC_MSG_API_GetMclkFrequency ((uint16_t) 0x201) -#define PPSMC_MSG_API_GetSclkBusy ((uint16_t) 0x202) -#define PPSMC_MSG_API_GetMclkBusy ((uint16_t) 0x203) -#define PPSMC_MSG_API_GetAsicPower ((uint16_t) 0x204) -#define PPSMC_MSG_SetFanRpmMax ((uint16_t) 0x205) -#define PPSMC_MSG_SetFanSclkTarget ((uint16_t) 0x206) -#define PPSMC_MSG_SetFanMinPwm ((uint16_t) 0x209) -#define PPSMC_MSG_SetFanTemperatureTarget ((uint16_t) 0x20A) - -#define PPSMC_MSG_BACO_StartMonitor ((uint16_t) 0x240) -#define PPSMC_MSG_BACO_Cancel ((uint16_t) 0x241) -#define PPSMC_MSG_EnableVddGfx ((uint16_t) 0x242) -#define PPSMC_MSG_DisableVddGfx ((uint16_t) 0x243) -#define PPSMC_MSG_UcodeAddressLow ((uint16_t) 0x244) -#define PPSMC_MSG_UcodeAddressHigh ((uint16_t) 0x245) -#define PPSMC_MSG_UcodeLoadStatus ((uint16_t) 0x246) - -#define PPSMC_MSG_DRV_DRAM_ADDR_HI ((uint16_t) 0x250) -#define PPSMC_MSG_DRV_DRAM_ADDR_LO ((uint16_t) 0x251) -#define PPSMC_MSG_SMU_DRAM_ADDR_HI ((uint16_t) 0x252) -#define PPSMC_MSG_SMU_DRAM_ADDR_LO ((uint16_t) 0x253) -#define PPSMC_MSG_LoadUcodes ((uint16_t) 0x254) -#define PPSMC_MSG_PowerStateNotify ((uint16_t) 0x255) -#define PPSMC_MSG_COND_EXEC_DRAM_ADDR_HI ((uint16_t) 0x256) -#define PPSMC_MSG_COND_EXEC_DRAM_ADDR_LO ((uint16_t) 0x257) -#define PPSMC_MSG_VBIOS_DRAM_ADDR_HI ((uint16_t) 0x258) -#define PPSMC_MSG_VBIOS_DRAM_ADDR_LO ((uint16_t) 0x259) -#define PPSMC_MSG_LoadVBios ((uint16_t) 0x25A) -#define PPSMC_MSG_GetUcodeVersion ((uint16_t) 0x25B) -#define DMCUSMC_MSG_PSREntry ((uint16_t) 0x25C) -#define DMCUSMC_MSG_PSRExit ((uint16_t) 0x25D) -#define PPSMC_MSG_EnableClockGatingFeature ((uint16_t) 0x260) -#define PPSMC_MSG_DisableClockGatingFeature ((uint16_t) 0x261) -#define PPSMC_MSG_IsDeviceRunning ((uint16_t) 0x262) -#define PPSMC_MSG_LoadMetaData ((uint16_t) 0x263) -#define PPSMC_MSG_TMON_AutoCaliberate_Enable ((uint16_t) 0x264) -#define PPSMC_MSG_TMON_AutoCaliberate_Disable ((uint16_t) 0x265) -#define PPSMC_MSG_GetTelemetry1Slope ((uint16_t) 0x266) -#define PPSMC_MSG_GetTelemetry1Offset ((uint16_t) 0x267) -#define PPSMC_MSG_GetTelemetry2Slope ((uint16_t) 0x268) -#define PPSMC_MSG_GetTelemetry2Offset ((uint16_t) 0x269) - -typedef uint16_t PPSMC_Msg; - -/* If the SMC firmware has an event status soft register this is what the individual bits mean.*/ -#define PPSMC_EVENT_STATUS_THERMAL 0x00000001 -#define PPSMC_EVENT_STATUS_REGULATORHOT 0x00000002 -#define PPSMC_EVENT_STATUS_DC 0x00000004 -#define PPSMC_EVENT_STATUS_GPIO17 0x00000008 - - -#pragma pack(pop) -#endif diff --git a/drivers/gpu/drm/amd/pm/inc/vega10_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/vega10_ppsmc.h deleted file mode 100644 index 715b5a168831..000000000000 --- a/drivers/gpu/drm/amd/pm/inc/vega10_ppsmc.h +++ /dev/null @@ -1,144 +0,0 @@ -/* - * Copyright 2016 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef PP_SMC_H -#define PP_SMC_H - -#pragma pack(push, 1) - -#define SMU_UCODE_VERSION 0x001c0800 - -/* SMU Response Codes: */ -#define PPSMC_Result_OK 0x1 -#define PPSMC_Result_Failed 0xFF -#define PPSMC_Result_UnknownCmd 0xFE -#define PPSMC_Result_CmdRejectedPrereq 0xFD -#define PPSMC_Result_CmdRejectedBusy 0xFC - -typedef uint16_t PPSMC_Result; - -/* Message Definitions */ -#define PPSMC_MSG_TestMessage 0x1 -#define PPSMC_MSG_GetSmuVersion 0x2 -#define PPSMC_MSG_GetDriverIfVersion 0x3 -#define PPSMC_MSG_EnableSmuFeatures 0x4 -#define PPSMC_MSG_DisableSmuFeatures 0x5 -#define PPSMC_MSG_GetEnabledSmuFeatures 0x6 -#define PPSMC_MSG_SetWorkloadMask 0x7 -#define PPSMC_MSG_SetPptLimit 0x8 -#define PPSMC_MSG_SetDriverDramAddrHigh 0x9 -#define PPSMC_MSG_SetDriverDramAddrLow 0xA -#define PPSMC_MSG_SetToolsDramAddrHigh 0xB -#define PPSMC_MSG_SetToolsDramAddrLow 0xC -#define PPSMC_MSG_TransferTableSmu2Dram 0xD -#define PPSMC_MSG_TransferTableDram2Smu 0xE -#define PPSMC_MSG_UseDefaultPPTable 0xF -#define PPSMC_MSG_UseBackupPPTable 0x10 -#define PPSMC_MSG_RunBtc 0x11 -#define PPSMC_MSG_RequestI2CBus 0x12 -#define PPSMC_MSG_ReleaseI2CBus 0x13 -#define PPSMC_MSG_ConfigureTelemetry 0x14 -#define PPSMC_MSG_SetUlvIpMask 0x15 -#define PPSMC_MSG_SetSocVidOffset 0x16 -#define PPSMC_MSG_SetMemVidOffset 0x17 -#define PPSMC_MSG_GetSocVidOffset 0x18 -#define PPSMC_MSG_GetMemVidOffset 0x19 -#define PPSMC_MSG_SetFloorSocVoltage 0x1A -#define PPSMC_MSG_SoftReset 0x1B -#define PPSMC_MSG_StartBacoMonitor 0x1C -#define PPSMC_MSG_CancelBacoMonitor 0x1D -#define PPSMC_MSG_EnterBaco 0x1E -#define PPSMC_MSG_AllowLowGfxclkInterrupt 0x1F -#define PPSMC_MSG_SetLowGfxclkInterruptThreshold 0x20 -#define PPSMC_MSG_SetSoftMinGfxclkByIndex 0x21 -#define PPSMC_MSG_SetSoftMaxGfxclkByIndex 0x22 -#define PPSMC_MSG_GetCurrentGfxclkIndex 0x23 -#define PPSMC_MSG_SetSoftMinUclkByIndex 0x24 -#define PPSMC_MSG_SetSoftMaxUclkByIndex 0x25 -#define PPSMC_MSG_GetCurrentUclkIndex 0x26 -#define PPSMC_MSG_SetSoftMinUvdByIndex 0x27 -#define PPSMC_MSG_SetSoftMaxUvdByIndex 0x28 -#define PPSMC_MSG_GetCurrentUvdIndex 0x29 -#define PPSMC_MSG_SetSoftMinVceByIndex 0x2A -#define PPSMC_MSG_SetSoftMaxVceByIndex 0x2B -#define PPSMC_MSG_SetHardMinVceByIndex 0x2C -#define PPSMC_MSG_GetCurrentVceIndex 0x2D -#define PPSMC_MSG_SetSoftMinSocclkByIndex 0x2E -#define PPSMC_MSG_SetHardMinSocclkByIndex 0x2F -#define PPSMC_MSG_SetSoftMaxSocclkByIndex 0x30 -#define PPSMC_MSG_GetCurrentSocclkIndex 0x31 -#define PPSMC_MSG_SetMinLinkDpmByIndex 0x32 -#define PPSMC_MSG_GetCurrentLinkIndex 0x33 -#define PPSMC_MSG_GetAverageGfxclkFrequency 0x34 -#define PPSMC_MSG_GetAverageSocclkFrequency 0x35 -#define PPSMC_MSG_GetAverageUclkFrequency 0x36 -#define PPSMC_MSG_GetAverageGfxActivity 0x37 -#define PPSMC_MSG_GetTemperatureEdge 0x38 -#define PPSMC_MSG_GetTemperatureHotspot 0x39 -#define PPSMC_MSG_GetTemperatureHBM 0x3A -#define PPSMC_MSG_GetTemperatureVrSoc 0x3B -#define PPSMC_MSG_GetTemperatureVrMem 0x3C -#define PPSMC_MSG_GetTemperatureLiquid 0x3D -#define PPSMC_MSG_GetTemperaturePlx 0x3E -#define PPSMC_MSG_OverDriveSetPercentage 0x3F -#define PPSMC_MSG_SetMinDeepSleepDcefclk 0x40 -#define PPSMC_MSG_SwitchToAC 0x41 -#define PPSMC_MSG_SetUclkFastSwitch 0x42 -#define PPSMC_MSG_SetUclkDownHyst 0x43 -#define PPSMC_MSG_RemoveDCClamp 0x44 -#define PPSMC_MSG_GfxDeviceDriverReset 0x45 -#define PPSMC_MSG_GetCurrentRpm 0x46 -#define PPSMC_MSG_SetVideoFps 0x47 -#define PPSMC_MSG_SetCustomGfxDpmParameters 0x48 -#define PPSMC_MSG_SetTjMax 0x49 -#define PPSMC_MSG_SetFanTemperatureTarget 0x4A -#define PPSMC_MSG_PrepareMp1ForUnload 0x4B -#define PPSMC_MSG_RequestDisplayClockByFreq 0x4C -#define PPSMC_MSG_GetClockFreqMHz 0x4D -#define PPSMC_MSG_DramLogSetDramAddrHigh 0x4E -#define PPSMC_MSG_DramLogSetDramAddrLow 0x4F -#define PPSMC_MSG_DramLogSetDramSize 0x50 -#define PPSMC_MSG_SetFanMaxRpm 0x51 -#define PPSMC_MSG_SetFanMinPwm 0x52 -#define PPSMC_MSG_ConfigureGfxDidt 0x55 -#define PPSMC_MSG_NumOfDisplays 0x56 -#define PPSMC_MSG_ReadSerialNumTop32 0x58 -#define PPSMC_MSG_ReadSerialNumBottom32 0x59 -#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x5A -#define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x5B -#define PPSMC_MSG_RunAcgBtc 0x5C -#define PPSMC_MSG_RunAcgInClosedLoop 0x5D -#define PPSMC_MSG_RunAcgInOpenLoop 0x5E -#define PPSMC_MSG_InitializeAcg 0x5F -#define PPSMC_MSG_GetCurrPkgPwr 0x61 -#define PPSMC_MSG_GetAverageGfxclkActualFrequency 0x63 -#define PPSMC_MSG_SetPccThrottleLevel 0x67 -#define PPSMC_MSG_UpdatePkgPwrPidAlpha 0x68 -#define PPSMC_Message_Count 0x69 - - -typedef int PPSMC_Msg; - -#pragma pack(pop) - -#endif diff --git a/drivers/gpu/drm/amd/pm/inc/vega12/smu9_driver_if.h b/drivers/gpu/drm/amd/pm/inc/vega12/smu9_driver_if.h deleted file mode 100644 index b6ffd08784e7..000000000000 --- a/drivers/gpu/drm/amd/pm/inc/vega12/smu9_driver_if.h +++ /dev/null @@ -1,767 +0,0 @@ -/* - * Copyright 2017 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef VEGA12_SMU9_DRIVER_IF_H -#define VEGA12_SMU9_DRIVER_IF_H - -/**** IMPORTANT *** - * SMU TEAM: Always increment the interface version if - * any structure is changed in this file - */ -#define SMU9_DRIVER_IF_VERSION 0x10 - -#define PPTABLE_V12_SMU_VERSION 1 - -#define NUM_GFXCLK_DPM_LEVELS 16 -#define NUM_VCLK_DPM_LEVELS 8 -#define NUM_DCLK_DPM_LEVELS 8 -#define NUM_ECLK_DPM_LEVELS 8 -#define NUM_MP0CLK_DPM_LEVELS 2 -#define NUM_UCLK_DPM_LEVELS 4 -#define NUM_SOCCLK_DPM_LEVELS 8 -#define NUM_DCEFCLK_DPM_LEVELS 8 -#define NUM_DISPCLK_DPM_LEVELS 8 -#define NUM_PIXCLK_DPM_LEVELS 8 -#define NUM_PHYCLK_DPM_LEVELS 8 -#define NUM_LINK_LEVELS 2 - -#define MAX_GFXCLK_DPM_LEVEL (NUM_GFXCLK_DPM_LEVELS - 1) -#define MAX_VCLK_DPM_LEVEL (NUM_VCLK_DPM_LEVELS - 1) -#define MAX_DCLK_DPM_LEVEL (NUM_DCLK_DPM_LEVELS - 1) -#define MAX_ECLK_DPM_LEVEL (NUM_ECLK_DPM_LEVELS - 1) -#define MAX_MP0CLK_DPM_LEVEL (NUM_MP0CLK_DPM_LEVELS - 1) -#define MAX_UCLK_DPM_LEVEL (NUM_UCLK_DPM_LEVELS - 1) -#define MAX_SOCCLK_DPM_LEVEL (NUM_SOCCLK_DPM_LEVELS - 1) -#define MAX_DCEFCLK_DPM_LEVEL (NUM_DCEFCLK_DPM_LEVELS - 1) -#define MAX_DISPCLK_DPM_LEVEL (NUM_DISPCLK_DPM_LEVELS - 1) -#define MAX_PIXCLK_DPM_LEVEL (NUM_PIXCLK_DPM_LEVELS - 1) -#define MAX_PHYCLK_DPM_LEVEL (NUM_PHYCLK_DPM_LEVELS - 1) -#define MAX_LINK_LEVEL (NUM_LINK_LEVELS - 1) - - -#define PPSMC_GeminiModeNone 0 -#define PPSMC_GeminiModeMaster 1 -#define PPSMC_GeminiModeSlave 2 - - -#define FEATURE_DPM_PREFETCHER_BIT 0 -#define FEATURE_DPM_GFXCLK_BIT 1 -#define FEATURE_DPM_UCLK_BIT 2 -#define FEATURE_DPM_SOCCLK_BIT 3 -#define FEATURE_DPM_UVD_BIT 4 -#define FEATURE_DPM_VCE_BIT 5 -#define FEATURE_ULV_BIT 6 -#define FEATURE_DPM_MP0CLK_BIT 7 -#define FEATURE_DPM_LINK_BIT 8 -#define FEATURE_DPM_DCEFCLK_BIT 9 -#define FEATURE_DS_GFXCLK_BIT 10 -#define FEATURE_DS_SOCCLK_BIT 11 -#define FEATURE_DS_LCLK_BIT 12 -#define FEATURE_PPT_BIT 13 -#define FEATURE_TDC_BIT 14 -#define FEATURE_THERMAL_BIT 15 -#define FEATURE_GFX_PER_CU_CG_BIT 16 -#define FEATURE_RM_BIT 17 -#define FEATURE_DS_DCEFCLK_BIT 18 -#define FEATURE_ACDC_BIT 19 -#define FEATURE_VR0HOT_BIT 20 -#define FEATURE_VR1HOT_BIT 21 -#define FEATURE_FW_CTF_BIT 22 -#define FEATURE_LED_DISPLAY_BIT 23 -#define FEATURE_FAN_CONTROL_BIT 24 -#define FEATURE_GFX_EDC_BIT 25 -#define FEATURE_GFXOFF_BIT 26 -#define FEATURE_CG_BIT 27 -#define FEATURE_ACG_BIT 28 -#define FEATURE_SPARE_29_BIT 29 -#define FEATURE_SPARE_30_BIT 30 -#define FEATURE_SPARE_31_BIT 31 - -#define NUM_FEATURES 32 - -#define FEATURE_DPM_PREFETCHER_MASK (1 << FEATURE_DPM_PREFETCHER_BIT ) -#define FEATURE_DPM_GFXCLK_MASK (1 << FEATURE_DPM_GFXCLK_BIT ) -#define FEATURE_DPM_UCLK_MASK (1 << FEATURE_DPM_UCLK_BIT ) -#define FEATURE_DPM_SOCCLK_MASK (1 << FEATURE_DPM_SOCCLK_BIT ) -#define FEATURE_DPM_UVD_MASK (1 << FEATURE_DPM_UVD_BIT ) -#define FEATURE_DPM_VCE_MASK (1 << FEATURE_DPM_VCE_BIT ) -#define FEATURE_ULV_MASK (1 << FEATURE_ULV_BIT ) -#define FEATURE_DPM_MP0CLK_MASK (1 << FEATURE_DPM_MP0CLK_BIT ) -#define FEATURE_DPM_LINK_MASK (1 << FEATURE_DPM_LINK_BIT ) -#define FEATURE_DPM_DCEFCLK_MASK (1 << FEATURE_DPM_DCEFCLK_BIT ) -#define FEATURE_DS_GFXCLK_MASK (1 << FEATURE_DS_GFXCLK_BIT ) -#define FEATURE_DS_SOCCLK_MASK (1 << FEATURE_DS_SOCCLK_BIT ) -#define FEATURE_DS_LCLK_MASK (1 << FEATURE_DS_LCLK_BIT ) -#define FEATURE_PPT_MASK (1 << FEATURE_PPT_BIT ) -#define FEATURE_TDC_MASK (1 << FEATURE_TDC_BIT ) -#define FEATURE_THERMAL_MASK (1 << FEATURE_THERMAL_BIT ) -#define FEATURE_GFX_PER_CU_CG_MASK (1 << FEATURE_GFX_PER_CU_CG_BIT ) -#define FEATURE_RM_MASK (1 << FEATURE_RM_BIT ) -#define FEATURE_DS_DCEFCLK_MASK (1 << FEATURE_DS_DCEFCLK_BIT ) -#define FEATURE_ACDC_MASK (1 << FEATURE_ACDC_BIT ) -#define FEATURE_VR0HOT_MASK (1 << FEATURE_VR0HOT_BIT ) -#define FEATURE_VR1HOT_MASK (1 << FEATURE_VR1HOT_BIT ) -#define FEATURE_FW_CTF_MASK (1 << FEATURE_FW_CTF_BIT ) -#define FEATURE_LED_DISPLAY_MASK (1 << FEATURE_LED_DISPLAY_BIT ) -#define FEATURE_FAN_CONTROL_MASK (1 << FEATURE_FAN_CONTROL_BIT ) -#define FEATURE_GFX_EDC_MASK (1 << FEATURE_GFX_EDC_BIT ) -#define FEATURE_GFXOFF_MASK (1 << FEATURE_GFXOFF_BIT ) -#define FEATURE_CG_MASK (1 << FEATURE_CG_BIT ) -#define FEATURE_ACG_MASK (1 << FEATURE_ACG_BIT) -#define FEATURE_SPARE_29_MASK (1 << FEATURE_SPARE_29_BIT ) -#define FEATURE_SPARE_30_MASK (1 << FEATURE_SPARE_30_BIT ) -#define FEATURE_SPARE_31_MASK (1 << FEATURE_SPARE_31_BIT ) - - -#define DPM_OVERRIDE_DISABLE_SOCCLK_PID 0x00000001 -#define DPM_OVERRIDE_DISABLE_UCLK_PID 0x00000002 -#define DPM_OVERRIDE_ENABLE_VOLT_LINK_UVD_SOCCLK 0x00000004 -#define DPM_OVERRIDE_ENABLE_VOLT_LINK_UVD_UCLK 0x00000008 -#define DPM_OVERRIDE_ENABLE_FREQ_LINK_VCLK_SOCCLK 0x00000010 -#define DPM_OVERRIDE_ENABLE_FREQ_LINK_VCLK_UCLK 0x00000020 -#define DPM_OVERRIDE_ENABLE_FREQ_LINK_DCLK_SOCCLK 0x00000040 -#define DPM_OVERRIDE_ENABLE_FREQ_LINK_DCLK_UCLK 0x00000080 -#define DPM_OVERRIDE_ENABLE_VOLT_LINK_VCE_SOCCLK 0x00000100 -#define DPM_OVERRIDE_ENABLE_VOLT_LINK_VCE_UCLK 0x00000200 -#define DPM_OVERRIDE_ENABLE_FREQ_LINK_ECLK_SOCCLK 0x00000400 -#define DPM_OVERRIDE_ENABLE_FREQ_LINK_ECLK_UCLK 0x00000800 -#define DPM_OVERRIDE_ENABLE_FREQ_LINK_GFXCLK_SOCCLK 0x00001000 -#define DPM_OVERRIDE_ENABLE_FREQ_LINK_GFXCLK_UCLK 0x00002000 -#define DPM_OVERRIDE_ENABLE_GFXOFF_GFXCLK_SWITCH 0x00004000 -#define DPM_OVERRIDE_ENABLE_GFXOFF_SOCCLK_SWITCH 0x00008000 -#define DPM_OVERRIDE_ENABLE_GFXOFF_UCLK_SWITCH 0x00010000 - - -#define VR_MAPPING_VR_SELECT_MASK 0x01 -#define VR_MAPPING_VR_SELECT_SHIFT 0x00 - -#define VR_MAPPING_PLANE_SELECT_MASK 0x02 -#define VR_MAPPING_PLANE_SELECT_SHIFT 0x01 - - -#define PSI_SEL_VR0_PLANE0_PSI0 0x01 -#define PSI_SEL_VR0_PLANE0_PSI1 0x02 -#define PSI_SEL_VR0_PLANE1_PSI0 0x04 -#define PSI_SEL_VR0_PLANE1_PSI1 0x08 -#define PSI_SEL_VR1_PLANE0_PSI0 0x10 -#define PSI_SEL_VR1_PLANE0_PSI1 0x20 -#define PSI_SEL_VR1_PLANE1_PSI0 0x40 -#define PSI_SEL_VR1_PLANE1_PSI1 0x80 - - -#define THROTTLER_STATUS_PADDING_BIT 0 -#define THROTTLER_STATUS_TEMP_EDGE_BIT 1 -#define THROTTLER_STATUS_TEMP_HOTSPOT_BIT 2 -#define THROTTLER_STATUS_TEMP_HBM_BIT 3 -#define THROTTLER_STATUS_TEMP_VR_GFX_BIT 4 -#define THROTTLER_STATUS_TEMP_VR_MEM_BIT 5 -#define THROTTLER_STATUS_TEMP_LIQUID_BIT 6 -#define THROTTLER_STATUS_TEMP_PLX_BIT 7 -#define THROTTLER_STATUS_TEMP_SKIN_BIT 8 -#define THROTTLER_STATUS_TDC_GFX_BIT 9 -#define THROTTLER_STATUS_TDC_SOC_BIT 10 -#define THROTTLER_STATUS_PPT_BIT 11 -#define THROTTLER_STATUS_FIT_BIT 12 -#define THROTTLER_STATUS_PPM_BIT 13 - - -#define TABLE_TRANSFER_OK 0x0 -#define TABLE_TRANSFER_FAILED 0xFF - - -#define WORKLOAD_DEFAULT_BIT 0 -#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 1 -#define WORKLOAD_PPLIB_POWER_SAVING_BIT 2 -#define WORKLOAD_PPLIB_VIDEO_BIT 3 -#define WORKLOAD_PPLIB_VR_BIT 4 -#define WORKLOAD_PPLIB_COMPUTE_BIT 5 -#define WORKLOAD_PPLIB_CUSTOM_BIT 6 -#define WORKLOAD_PPLIB_COUNT 7 - -typedef struct { - uint32_t a; - uint32_t b; - uint32_t c; -} QuadraticInt_t; - -typedef struct { - uint32_t m; - uint32_t b; -} LinearInt_t; - -typedef struct { - uint32_t a; - uint32_t b; - uint32_t c; -} DroopInt_t; - -typedef enum { - PPCLK_GFXCLK, - PPCLK_VCLK, - PPCLK_DCLK, - PPCLK_ECLK, - PPCLK_SOCCLK, - PPCLK_UCLK, - PPCLK_DCEFCLK, - PPCLK_DISPCLK, - PPCLK_PIXCLK, - PPCLK_PHYCLK, - PPCLK_COUNT, -} PPCLK_e; - -enum { - VOLTAGE_MODE_AVFS, - VOLTAGE_MODE_AVFS_SS, - VOLTAGE_MODE_SS, - VOLTAGE_MODE_COUNT, -}; - -typedef struct { - uint8_t VoltageMode; - uint8_t SnapToDiscrete; - uint8_t NumDiscreteLevels; - uint8_t padding; - LinearInt_t ConversionToAvfsClk; - QuadraticInt_t SsCurve; -} DpmDescriptor_t; - -typedef struct { - uint32_t Version; - - - uint32_t FeaturesToRun[2]; - - - uint16_t SocketPowerLimitAc0; - uint16_t SocketPowerLimitAc0Tau; - uint16_t SocketPowerLimitAc1; - uint16_t SocketPowerLimitAc1Tau; - uint16_t SocketPowerLimitAc2; - uint16_t SocketPowerLimitAc2Tau; - uint16_t SocketPowerLimitAc3; - uint16_t SocketPowerLimitAc3Tau; - uint16_t SocketPowerLimitDc; - uint16_t SocketPowerLimitDcTau; - uint16_t TdcLimitSoc; - uint16_t TdcLimitSocTau; - uint16_t TdcLimitGfx; - uint16_t TdcLimitGfxTau; - - uint16_t TedgeLimit; - uint16_t ThotspotLimit; - uint16_t ThbmLimit; - uint16_t Tvr_gfxLimit; - uint16_t Tvr_memLimit; - uint16_t Tliquid1Limit; - uint16_t Tliquid2Limit; - uint16_t TplxLimit; - uint32_t FitLimit; - - uint16_t PpmPowerLimit; - uint16_t PpmTemperatureThreshold; - - uint8_t MemoryOnPackage; - uint8_t padding8_limits[3]; - - - uint16_t UlvVoltageOffsetSoc; - uint16_t UlvVoltageOffsetGfx; - - uint8_t UlvSmnclkDid; - uint8_t UlvMp1clkDid; - uint8_t UlvGfxclkBypass; - uint8_t Padding234; - - - uint16_t MinVoltageGfx; - uint16_t MinVoltageSoc; - uint16_t MaxVoltageGfx; - uint16_t MaxVoltageSoc; - - uint16_t LoadLineResistance; - uint16_t LoadLine_padding; - - - DpmDescriptor_t DpmDescriptor[PPCLK_COUNT]; - - uint16_t FreqTableGfx [NUM_GFXCLK_DPM_LEVELS ]; - uint16_t FreqTableVclk [NUM_VCLK_DPM_LEVELS ]; - uint16_t FreqTableDclk [NUM_DCLK_DPM_LEVELS ]; - uint16_t FreqTableEclk [NUM_ECLK_DPM_LEVELS ]; - uint16_t FreqTableSocclk [NUM_SOCCLK_DPM_LEVELS ]; - uint16_t FreqTableUclk [NUM_UCLK_DPM_LEVELS ]; - uint16_t FreqTableDcefclk [NUM_DCEFCLK_DPM_LEVELS ]; - uint16_t FreqTableDispclk [NUM_DISPCLK_DPM_LEVELS ]; - uint16_t FreqTablePixclk [NUM_PIXCLK_DPM_LEVELS ]; - uint16_t FreqTablePhyclk [NUM_PHYCLK_DPM_LEVELS ]; - - uint16_t DcModeMaxFreq [PPCLK_COUNT ]; - - - uint16_t Mp0clkFreq [NUM_MP0CLK_DPM_LEVELS]; - uint16_t Mp0DpmVoltage [NUM_MP0CLK_DPM_LEVELS]; - - - uint16_t GfxclkFidle; - uint16_t GfxclkSlewRate; - uint16_t CksEnableFreq; - uint16_t Padding789; - QuadraticInt_t CksVoltageOffset; - uint16_t AcgThresholdFreqHigh; - uint16_t AcgThresholdFreqLow; - uint16_t GfxclkDsMaxFreq; - uint8_t Padding456[2]; - - - uint8_t LowestUclkReservedForUlv; - uint8_t Padding8_Uclk[3]; - - - uint8_t PcieGenSpeed[NUM_LINK_LEVELS]; - uint8_t PcieLaneCount[NUM_LINK_LEVELS]; - uint16_t LclkFreq[NUM_LINK_LEVELS]; - - - uint16_t EnableTdpm; - uint16_t TdpmHighHystTemperature; - uint16_t TdpmLowHystTemperature; - uint16_t GfxclkFreqHighTempLimit; - - - uint16_t FanStopTemp; - uint16_t FanStartTemp; - - uint16_t FanGainEdge; - uint16_t FanGainHotspot; - uint16_t FanGainLiquid; - uint16_t FanGainVrVddc; - uint16_t FanGainVrMvdd; - uint16_t FanGainPlx; - uint16_t FanGainHbm; - uint16_t FanPwmMin; - uint16_t FanAcousticLimitRpm; - uint16_t FanThrottlingRpm; - uint16_t FanMaximumRpm; - uint16_t FanTargetTemperature; - uint16_t FanTargetGfxclk; - uint8_t FanZeroRpmEnable; - uint8_t FanTachEdgePerRev; - - - - int16_t FuzzyFan_ErrorSetDelta; - int16_t FuzzyFan_ErrorRateSetDelta; - int16_t FuzzyFan_PwmSetDelta; - uint16_t FuzzyFan_Reserved; - - - - - uint8_t OverrideAvfsGb; - uint8_t Padding8_Avfs[3]; - - QuadraticInt_t qAvfsGb; - DroopInt_t dBtcGbGfxCksOn; - DroopInt_t dBtcGbGfxCksOff; - DroopInt_t dBtcGbGfxAcg; - DroopInt_t dBtcGbSoc; - LinearInt_t qAgingGbGfx; - LinearInt_t qAgingGbSoc; - - QuadraticInt_t qStaticVoltageOffsetGfx; - QuadraticInt_t qStaticVoltageOffsetSoc; - - uint16_t DcTolGfx; - uint16_t DcTolSoc; - - uint8_t DcBtcGfxEnabled; - uint8_t DcBtcSocEnabled; - uint8_t Padding8_GfxBtc[2]; - - uint16_t DcBtcGfxMin; - uint16_t DcBtcGfxMax; - - uint16_t DcBtcSocMin; - uint16_t DcBtcSocMax; - - - - uint32_t DebugOverrides; - QuadraticInt_t ReservedEquation0; - QuadraticInt_t ReservedEquation1; - QuadraticInt_t ReservedEquation2; - QuadraticInt_t ReservedEquation3; - - uint16_t MinVoltageUlvGfx; - uint16_t MinVoltageUlvSoc; - - uint32_t Reserved[14]; - - - - uint8_t Liquid1_I2C_address; - uint8_t Liquid2_I2C_address; - uint8_t Vr_I2C_address; - uint8_t Plx_I2C_address; - - uint8_t Liquid_I2C_LineSCL; - uint8_t Liquid_I2C_LineSDA; - uint8_t Vr_I2C_LineSCL; - uint8_t Vr_I2C_LineSDA; - - uint8_t Plx_I2C_LineSCL; - uint8_t Plx_I2C_LineSDA; - uint8_t VrSensorPresent; - uint8_t LiquidSensorPresent; - - uint16_t MaxVoltageStepGfx; - uint16_t MaxVoltageStepSoc; - - uint8_t VddGfxVrMapping; - uint8_t VddSocVrMapping; - uint8_t VddMem0VrMapping; - uint8_t VddMem1VrMapping; - - uint8_t GfxUlvPhaseSheddingMask; - uint8_t SocUlvPhaseSheddingMask; - uint8_t ExternalSensorPresent; - uint8_t Padding8_V; - - - uint16_t GfxMaxCurrent; - int8_t GfxOffset; - uint8_t Padding_TelemetryGfx; - - uint16_t SocMaxCurrent; - int8_t SocOffset; - uint8_t Padding_TelemetrySoc; - - uint16_t Mem0MaxCurrent; - int8_t Mem0Offset; - uint8_t Padding_TelemetryMem0; - - uint16_t Mem1MaxCurrent; - int8_t Mem1Offset; - uint8_t Padding_TelemetryMem1; - - - uint8_t AcDcGpio; - uint8_t AcDcPolarity; - uint8_t VR0HotGpio; - uint8_t VR0HotPolarity; - - uint8_t VR1HotGpio; - uint8_t VR1HotPolarity; - uint8_t Padding1; - uint8_t Padding2; - - - - uint8_t LedPin0; - uint8_t LedPin1; - uint8_t LedPin2; - uint8_t padding8_4; - - - uint8_t PllGfxclkSpreadEnabled; - uint8_t PllGfxclkSpreadPercent; - uint16_t PllGfxclkSpreadFreq; - - uint8_t UclkSpreadEnabled; - uint8_t UclkSpreadPercent; - uint16_t UclkSpreadFreq; - - uint8_t SocclkSpreadEnabled; - uint8_t SocclkSpreadPercent; - uint16_t SocclkSpreadFreq; - - uint8_t AcgGfxclkSpreadEnabled; - uint8_t AcgGfxclkSpreadPercent; - uint16_t AcgGfxclkSpreadFreq; - - uint8_t Vr2_I2C_address; - uint8_t padding_vr2[3]; - - uint32_t BoardReserved[9]; - - - uint32_t MmHubPadding[7]; - -} PPTable_t; - -typedef struct { - - uint16_t GfxclkAverageLpfTau; - uint16_t SocclkAverageLpfTau; - uint16_t UclkAverageLpfTau; - uint16_t GfxActivityLpfTau; - uint16_t UclkActivityLpfTau; - - - uint32_t MmHubPadding[7]; -} DriverSmuConfig_t; - -typedef struct { - - uint16_t GfxclkFmin; - uint16_t GfxclkFmax; - uint16_t GfxclkFreq1; - uint16_t GfxclkOffsetVolt1; - uint16_t GfxclkFreq2; - uint16_t GfxclkOffsetVolt2; - uint16_t GfxclkFreq3; - uint16_t GfxclkOffsetVolt3; - uint16_t UclkFmax; - int16_t OverDrivePct; - uint16_t FanMaximumRpm; - uint16_t FanMinimumPwm; - uint16_t FanTargetTemperature; - uint16_t MaxOpTemp; - -} OverDriveTable_t; - -typedef struct { - uint16_t CurrClock[PPCLK_COUNT]; - uint16_t AverageGfxclkFrequency; - uint16_t AverageSocclkFrequency; - uint16_t AverageUclkFrequency ; - uint16_t AverageGfxActivity ; - uint16_t AverageUclkActivity ; - uint8_t CurrSocVoltageOffset ; - uint8_t CurrGfxVoltageOffset ; - uint8_t CurrMemVidOffset ; - uint8_t Padding8 ; - uint16_t CurrSocketPower ; - uint16_t TemperatureEdge ; - uint16_t TemperatureHotspot ; - uint16_t TemperatureHBM ; - uint16_t TemperatureVrGfx ; - uint16_t TemperatureVrMem ; - uint16_t TemperatureLiquid ; - uint16_t TemperaturePlx ; - uint32_t ThrottlerStatus ; - - uint8_t LinkDpmLevel; - uint8_t Padding[3]; - - - uint32_t MmHubPadding[7]; -} SmuMetrics_t; - -typedef struct { - uint16_t MinClock; - uint16_t MaxClock; - uint16_t MinUclk; - uint16_t MaxUclk; - - uint8_t WmSetting; - uint8_t Padding[3]; -} WatermarkRowGeneric_t; - -#define NUM_WM_RANGES 4 - -typedef enum { - WM_SOCCLK = 0, - WM_DCEFCLK, - WM_COUNT_PP, -} WM_CLOCK_e; - -typedef struct { - - WatermarkRowGeneric_t WatermarkRow[WM_COUNT_PP][NUM_WM_RANGES]; - - uint32_t MmHubPadding[7]; -} Watermarks_t; - -typedef struct { - uint16_t avgPsmCount[30]; - uint16_t minPsmCount[30]; - float avgPsmVoltage[30]; - float minPsmVoltage[30]; - - uint32_t MmHubPadding[7]; -} AvfsDebugTable_t; - -typedef struct { - uint8_t AvfsEn; - uint8_t AvfsVersion; - uint8_t OverrideVFT; - uint8_t OverrideAvfsGb; - - uint8_t OverrideTemperatures; - uint8_t OverrideVInversion; - uint8_t OverrideP2V; - uint8_t OverrideP2VCharzFreq; - - int32_t VFT0_m1; - int32_t VFT0_m2; - int32_t VFT0_b; - - int32_t VFT1_m1; - int32_t VFT1_m2; - int32_t VFT1_b; - - int32_t VFT2_m1; - int32_t VFT2_m2; - int32_t VFT2_b; - - int32_t AvfsGb0_m1; - int32_t AvfsGb0_m2; - int32_t AvfsGb0_b; - - int32_t AcBtcGb_m1; - int32_t AcBtcGb_m2; - int32_t AcBtcGb_b; - - uint32_t AvfsTempCold; - uint32_t AvfsTempMid; - uint32_t AvfsTempHot; - - uint32_t GfxVInversion; - uint32_t SocVInversion; - - int32_t P2V_m1; - int32_t P2V_m2; - int32_t P2V_b; - - uint32_t P2VCharzFreq; - - uint32_t EnabledAvfsModules; - - uint32_t MmHubPadding[7]; -} AvfsFuseOverride_t; - -typedef struct { - - uint8_t Gfx_ActiveHystLimit; - uint8_t Gfx_IdleHystLimit; - uint8_t Gfx_FPS; - uint8_t Gfx_MinActiveFreqType; - uint8_t Gfx_BoosterFreqType; - uint8_t Gfx_UseRlcBusy; - uint16_t Gfx_MinActiveFreq; - uint16_t Gfx_BoosterFreq; - uint16_t Gfx_PD_Data_time_constant; - uint32_t Gfx_PD_Data_limit_a; - uint32_t Gfx_PD_Data_limit_b; - uint32_t Gfx_PD_Data_limit_c; - uint32_t Gfx_PD_Data_error_coeff; - uint32_t Gfx_PD_Data_error_rate_coeff; - - uint8_t Soc_ActiveHystLimit; - uint8_t Soc_IdleHystLimit; - uint8_t Soc_FPS; - uint8_t Soc_MinActiveFreqType; - uint8_t Soc_BoosterFreqType; - uint8_t Soc_UseRlcBusy; - uint16_t Soc_MinActiveFreq; - uint16_t Soc_BoosterFreq; - uint16_t Soc_PD_Data_time_constant; - uint32_t Soc_PD_Data_limit_a; - uint32_t Soc_PD_Data_limit_b; - uint32_t Soc_PD_Data_limit_c; - uint32_t Soc_PD_Data_error_coeff; - uint32_t Soc_PD_Data_error_rate_coeff; - - uint8_t Mem_ActiveHystLimit; - uint8_t Mem_IdleHystLimit; - uint8_t Mem_FPS; - uint8_t Mem_MinActiveFreqType; - uint8_t Mem_BoosterFreqType; - uint8_t Mem_UseRlcBusy; - uint16_t Mem_MinActiveFreq; - uint16_t Mem_BoosterFreq; - uint16_t Mem_PD_Data_time_constant; - uint32_t Mem_PD_Data_limit_a; - uint32_t Mem_PD_Data_limit_b; - uint32_t Mem_PD_Data_limit_c; - uint32_t Mem_PD_Data_error_coeff; - uint32_t Mem_PD_Data_error_rate_coeff; - -} DpmActivityMonitorCoeffInt_t; - - - - -#define TABLE_PPTABLE 0 -#define TABLE_WATERMARKS 1 -#define TABLE_AVFS 2 -#define TABLE_AVFS_PSM_DEBUG 3 -#define TABLE_AVFS_FUSE_OVERRIDE 4 -#define TABLE_PMSTATUSLOG 5 -#define TABLE_SMU_METRICS 6 -#define TABLE_DRIVER_SMU_CONFIG 7 -#define TABLE_ACTIVITY_MONITOR_COEFF 8 -#define TABLE_OVERDRIVE 9 -#define TABLE_COUNT 10 - - -#define UCLK_SWITCH_SLOW 0 -#define UCLK_SWITCH_FAST 1 - - -#define SQ_Enable_MASK 0x1 -#define SQ_IR_MASK 0x2 -#define SQ_PCC_MASK 0x4 -#define SQ_EDC_MASK 0x8 - -#define TCP_Enable_MASK 0x100 -#define TCP_IR_MASK 0x200 -#define TCP_PCC_MASK 0x400 -#define TCP_EDC_MASK 0x800 - -#define TD_Enable_MASK 0x10000 -#define TD_IR_MASK 0x20000 -#define TD_PCC_MASK 0x40000 -#define TD_EDC_MASK 0x80000 - -#define DB_Enable_MASK 0x1000000 -#define DB_IR_MASK 0x2000000 -#define DB_PCC_MASK 0x4000000 -#define DB_EDC_MASK 0x8000000 - -#define SQ_Enable_SHIFT 0 -#define SQ_IR_SHIFT 1 -#define SQ_PCC_SHIFT 2 -#define SQ_EDC_SHIFT 3 - -#define TCP_Enable_SHIFT 8 -#define TCP_IR_SHIFT 9 -#define TCP_PCC_SHIFT 10 -#define TCP_EDC_SHIFT 11 - -#define TD_Enable_SHIFT 16 -#define TD_IR_SHIFT 17 -#define TD_PCC_SHIFT 18 -#define TD_EDC_SHIFT 19 - -#define DB_Enable_SHIFT 24 -#define DB_IR_SHIFT 25 -#define DB_PCC_SHIFT 26 -#define DB_EDC_SHIFT 27 - -#define REMOVE_FMAX_MARGIN_BIT 0x0 -#define REMOVE_DCTOL_MARGIN_BIT 0x1 -#define REMOVE_PLATFORM_MARGIN_BIT 0x2 - -#endif diff --git a/drivers/gpu/drm/amd/pm/inc/vega12_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/vega12_ppsmc.h deleted file mode 100644 index f985c78d746a..000000000000 --- a/drivers/gpu/drm/amd/pm/inc/vega12_ppsmc.h +++ /dev/null @@ -1,123 +0,0 @@ -/* - * Copyright 2017 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef VEGA12_PP_SMC_H -#define VEGA12_PP_SMC_H - -#pragma pack(push, 1) - -#define SMU_UCODE_VERSION 0x00270a00 - -/* SMU Response Codes: */ -#define PPSMC_Result_OK 0x1 -#define PPSMC_Result_Failed 0xFF -#define PPSMC_Result_UnknownCmd 0xFE -#define PPSMC_Result_CmdRejectedPrereq 0xFD -#define PPSMC_Result_CmdRejectedBusy 0xFC - -#define PPSMC_MSG_TestMessage 0x1 -#define PPSMC_MSG_GetSmuVersion 0x2 -#define PPSMC_MSG_GetDriverIfVersion 0x3 -#define PPSMC_MSG_SetAllowedFeaturesMaskLow 0x4 -#define PPSMC_MSG_SetAllowedFeaturesMaskHigh 0x5 -#define PPSMC_MSG_EnableAllSmuFeatures 0x6 -#define PPSMC_MSG_DisableAllSmuFeatures 0x7 -#define PPSMC_MSG_EnableSmuFeaturesLow 0x8 -#define PPSMC_MSG_EnableSmuFeaturesHigh 0x9 -#define PPSMC_MSG_DisableSmuFeaturesLow 0xA -#define PPSMC_MSG_DisableSmuFeaturesHigh 0xB -#define PPSMC_MSG_GetEnabledSmuFeaturesLow 0xC -#define PPSMC_MSG_GetEnabledSmuFeaturesHigh 0xD -#define PPSMC_MSG_SetWorkloadMask 0xE -#define PPSMC_MSG_SetPptLimit 0xF -#define PPSMC_MSG_SetDriverDramAddrHigh 0x10 -#define PPSMC_MSG_SetDriverDramAddrLow 0x11 -#define PPSMC_MSG_SetToolsDramAddrHigh 0x12 -#define PPSMC_MSG_SetToolsDramAddrLow 0x13 -#define PPSMC_MSG_TransferTableSmu2Dram 0x14 -#define PPSMC_MSG_TransferTableDram2Smu 0x15 -#define PPSMC_MSG_UseDefaultPPTable 0x16 -#define PPSMC_MSG_UseBackupPPTable 0x17 -#define PPSMC_MSG_RunBtc 0x18 -#define PPSMC_MSG_RequestI2CBus 0x19 -#define PPSMC_MSG_ReleaseI2CBus 0x1A -#define PPSMC_MSG_SetFloorSocVoltage 0x21 -#define PPSMC_MSG_SoftReset 0x22 -#define PPSMC_MSG_StartBacoMonitor 0x23 -#define PPSMC_MSG_CancelBacoMonitor 0x24 -#define PPSMC_MSG_EnterBaco 0x25 -#define PPSMC_MSG_SetSoftMinByFreq 0x26 -#define PPSMC_MSG_SetSoftMaxByFreq 0x27 -#define PPSMC_MSG_SetHardMinByFreq 0x28 -#define PPSMC_MSG_SetHardMaxByFreq 0x29 -#define PPSMC_MSG_GetMinDpmFreq 0x2A -#define PPSMC_MSG_GetMaxDpmFreq 0x2B -#define PPSMC_MSG_GetDpmFreqByIndex 0x2C -#define PPSMC_MSG_GetDpmClockFreq 0x2D -#define PPSMC_MSG_GetSsVoltageByDpm 0x2E -#define PPSMC_MSG_SetMemoryChannelConfig 0x2F -#define PPSMC_MSG_SetGeminiMode 0x30 -#define PPSMC_MSG_SetGeminiApertureHigh 0x31 -#define PPSMC_MSG_SetGeminiApertureLow 0x32 -#define PPSMC_MSG_SetMinLinkDpmByIndex 0x33 -#define PPSMC_MSG_OverridePcieParameters 0x34 -#define PPSMC_MSG_OverDriveSetPercentage 0x35 -#define PPSMC_MSG_SetMinDeepSleepDcefclk 0x36 -#define PPSMC_MSG_ReenableAcDcInterrupt 0x37 -#define PPSMC_MSG_NotifyPowerSource 0x38 -#define PPSMC_MSG_SetUclkFastSwitch 0x39 -#define PPSMC_MSG_SetUclkDownHyst 0x3A -#define PPSMC_MSG_GfxDeviceDriverReset 0x3B -#define PPSMC_MSG_GetCurrentRpm 0x3C -#define PPSMC_MSG_SetVideoFps 0x3D -#define PPSMC_MSG_SetTjMax 0x3E -#define PPSMC_MSG_SetFanTemperatureTarget 0x3F -#define PPSMC_MSG_PrepareMp1ForUnload 0x40 -#define PPSMC_MSG_DramLogSetDramAddrHigh 0x41 -#define PPSMC_MSG_DramLogSetDramAddrLow 0x42 -#define PPSMC_MSG_DramLogSetDramSize 0x43 -#define PPSMC_MSG_SetFanMaxRpm 0x44 -#define PPSMC_MSG_SetFanMinPwm 0x45 -#define PPSMC_MSG_ConfigureGfxDidt 0x46 -#define PPSMC_MSG_NumOfDisplays 0x47 -#define PPSMC_MSG_RemoveMargins 0x48 -#define PPSMC_MSG_ReadSerialNumTop32 0x49 -#define PPSMC_MSG_ReadSerialNumBottom32 0x4A -#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x4B -#define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x4C -#define PPSMC_MSG_RunAcgBtc 0x4D -#define PPSMC_MSG_InitializeAcg 0x4E -#define PPSMC_MSG_EnableAcgBtcTestMode 0x4F -#define PPSMC_MSG_EnableAcgSpreadSpectrum 0x50 -#define PPSMC_MSG_AllowGfxOff 0x51 -#define PPSMC_MSG_DisallowGfxOff 0x52 -#define PPSMC_MSG_GetPptLimit 0x53 -#define PPSMC_MSG_GetDcModeMaxDpmFreq 0x54 -#define PPSMC_Message_Count 0x56 - -typedef uint16_t PPSMC_Result; -typedef int PPSMC_Msg; - -#pragma pack(pop) - -#endif diff --git a/drivers/gpu/drm/amd/pm/inc/vega20_ppsmc.h b/drivers/gpu/drm/amd/pm/inc/vega20_ppsmc.h deleted file mode 100644 index 0c66f0fe1aaf..000000000000 --- a/drivers/gpu/drm/amd/pm/inc/vega20_ppsmc.h +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Copyright 2018 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef VEGA20_PP_SMC_H -#define VEGA20_PP_SMC_H - -#pragma pack(push, 1) - -// SMU Response Codes: -#define PPSMC_Result_OK 0x1 -#define PPSMC_Result_Failed 0xFF -#define PPSMC_Result_UnknownCmd 0xFE -#define PPSMC_Result_CmdRejectedPrereq 0xFD -#define PPSMC_Result_CmdRejectedBusy 0xFC - -// Message Definitions: -#define PPSMC_MSG_TestMessage 0x1 -#define PPSMC_MSG_GetSmuVersion 0x2 -#define PPSMC_MSG_GetDriverIfVersion 0x3 -#define PPSMC_MSG_SetAllowedFeaturesMaskLow 0x4 -#define PPSMC_MSG_SetAllowedFeaturesMaskHigh 0x5 -#define PPSMC_MSG_EnableAllSmuFeatures 0x6 -#define PPSMC_MSG_DisableAllSmuFeatures 0x7 -#define PPSMC_MSG_EnableSmuFeaturesLow 0x8 -#define PPSMC_MSG_EnableSmuFeaturesHigh 0x9 -#define PPSMC_MSG_DisableSmuFeaturesLow 0xA -#define PPSMC_MSG_DisableSmuFeaturesHigh 0xB -#define PPSMC_MSG_GetEnabledSmuFeaturesLow 0xC -#define PPSMC_MSG_GetEnabledSmuFeaturesHigh 0xD -#define PPSMC_MSG_SetWorkloadMask 0xE -#define PPSMC_MSG_SetPptLimit 0xF -#define PPSMC_MSG_SetDriverDramAddrHigh 0x10 -#define PPSMC_MSG_SetDriverDramAddrLow 0x11 -#define PPSMC_MSG_SetToolsDramAddrHigh 0x12 -#define PPSMC_MSG_SetToolsDramAddrLow 0x13 -#define PPSMC_MSG_TransferTableSmu2Dram 0x14 -#define PPSMC_MSG_TransferTableDram2Smu 0x15 -#define PPSMC_MSG_UseDefaultPPTable 0x16 -#define PPSMC_MSG_UseBackupPPTable 0x17 -#define PPSMC_MSG_RunBtc 0x18 -#define PPSMC_MSG_RequestI2CBus 0x19 -#define PPSMC_MSG_ReleaseI2CBus 0x1A -#define PPSMC_MSG_SetFloorSocVoltage 0x21 -#define PPSMC_MSG_SoftReset 0x22 -#define PPSMC_MSG_StartBacoMonitor 0x23 -#define PPSMC_MSG_CancelBacoMonitor 0x24 -#define PPSMC_MSG_EnterBaco 0x25 -#define PPSMC_MSG_SetSoftMinByFreq 0x26 -#define PPSMC_MSG_SetSoftMaxByFreq 0x27 -#define PPSMC_MSG_SetHardMinByFreq 0x28 -#define PPSMC_MSG_SetHardMaxByFreq 0x29 -#define PPSMC_MSG_GetMinDpmFreq 0x2A -#define PPSMC_MSG_GetMaxDpmFreq 0x2B -#define PPSMC_MSG_GetDpmFreqByIndex 0x2C -#define PPSMC_MSG_GetDpmClockFreq 0x2D -#define PPSMC_MSG_GetSsVoltageByDpm 0x2E -#define PPSMC_MSG_SetMemoryChannelConfig 0x2F -#define PPSMC_MSG_SetGeminiMode 0x30 -#define PPSMC_MSG_SetGeminiApertureHigh 0x31 -#define PPSMC_MSG_SetGeminiApertureLow 0x32 -#define PPSMC_MSG_SetMinLinkDpmByIndex 0x33 -#define PPSMC_MSG_OverridePcieParameters 0x34 -#define PPSMC_MSG_OverDriveSetPercentage 0x35 -#define PPSMC_MSG_SetMinDeepSleepDcefclk 0x36 -#define PPSMC_MSG_ReenableAcDcInterrupt 0x37 -#define PPSMC_MSG_NotifyPowerSource 0x38 -#define PPSMC_MSG_SetUclkFastSwitch 0x39 -#define PPSMC_MSG_SetUclkDownHyst 0x3A -//#define PPSMC_MSG_GfxDeviceDriverReset 0x3B -#define PPSMC_MSG_GetCurrentRpm 0x3C -#define PPSMC_MSG_SetVideoFps 0x3D -#define PPSMC_MSG_SetTjMax 0x3E -#define PPSMC_MSG_SetFanTemperatureTarget 0x3F -#define PPSMC_MSG_PrepareMp1ForUnload 0x40 -#define PPSMC_MSG_DramLogSetDramAddrHigh 0x41 -#define PPSMC_MSG_DramLogSetDramAddrLow 0x42 -#define PPSMC_MSG_DramLogSetDramSize 0x43 -#define PPSMC_MSG_SetFanMaxRpm 0x44 -#define PPSMC_MSG_SetFanMinPwm 0x45 -#define PPSMC_MSG_ConfigureGfxDidt 0x46 -#define PPSMC_MSG_NumOfDisplays 0x47 -#define PPSMC_MSG_RemoveMargins 0x48 -#define PPSMC_MSG_ReadSerialNumTop32 0x49 -#define PPSMC_MSG_ReadSerialNumBottom32 0x4A -#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x4B -#define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x4C -#define PPSMC_MSG_WaflTest 0x4D -#define PPSMC_MSG_SetFclkGfxClkRatio 0x4E -// Unused ID 0x4F to 0x50 -#define PPSMC_MSG_AllowGfxOff 0x51 -#define PPSMC_MSG_DisallowGfxOff 0x52 -#define PPSMC_MSG_GetPptLimit 0x53 -#define PPSMC_MSG_GetDcModeMaxDpmFreq 0x54 -#define PPSMC_MSG_GetDebugData 0x55 -#define PPSMC_MSG_SetXgmiMode 0x56 -#define PPSMC_MSG_RunAfllBtc 0x57 -#define PPSMC_MSG_ExitBaco 0x58 -#define PPSMC_MSG_PrepareMp1ForReset 0x59 -#define PPSMC_MSG_PrepareMp1ForShutdown 0x5A -#define PPSMC_MSG_SetMGpuFanBoostLimitRpm 0x5D -#define PPSMC_MSG_GetAVFSVoltageByDpm 0x5F -#define PPSMC_MSG_BacoWorkAroundFlushVDCI 0x60 -#define PPSMC_MSG_DFCstateControl 0x63 -#define PPSMC_Message_Count 0x64 - -typedef uint32_t PPSMC_Result; -typedef uint32_t PPSMC_Msg; - -#pragma pack(pop) - -#endif diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/Makefile b/drivers/gpu/drm/amd/pm/legacy-dpm/Makefile new file mode 100644 index 000000000000..baa4265d1daa --- /dev/null +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/Makefile @@ -0,0 +1,32 @@ +# +# Copyright 2021 Advanced Micro Devices, Inc. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the "Software"), +# to deal in the Software without restriction, including without limitation +# the rights to use, copy, modify, merge, publish, distribute, sublicense, +# and/or sell copies of the Software, and to permit persons to whom the +# Software is furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL +# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR +# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, +# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR +# OTHER DEALINGS IN THE SOFTWARE. +# + +AMD_LEGACYDPM_PATH = ../pm/legacy-dpm + +LEGACYDPM_MGR-y = legacy_dpm.o + +LEGACYDPM_MGR-$(CONFIG_DRM_AMDGPU_CIK)+= kv_dpm.o kv_smc.o +LEGACYDPM_MGR-$(CONFIG_DRM_AMDGPU_SI)+= si_dpm.o si_smc.o + +AMD_LEGACYDPM_POWER = $(addprefix $(AMD_LEGACYDPM_PATH)/,$(LEGACYDPM_MGR-y)) + +AMD_POWERPLAY_FILES += $(AMD_LEGACYDPM_POWER) diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/cik_dpm.h b/drivers/gpu/drm/amd/pm/legacy-dpm/cik_dpm.h new file mode 100644 index 000000000000..2fcc4b60153c --- /dev/null +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/cik_dpm.h @@ -0,0 +1,29 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __CIK_DPM_H__ +#define __CIK_DPM_H__ + +extern const struct amdgpu_ip_block_version kv_smu_ip_block; + +#endif diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c new file mode 100644 index 000000000000..72824ef61edd --- /dev/null +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c @@ -0,0 +1,3405 @@ +/* + * Copyright 2013 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include "amdgpu.h" +#include "amdgpu_pm.h" +#include "cikd.h" +#include "atom.h" +#include "amdgpu_atombios.h" +#include "amdgpu_dpm.h" +#include "kv_dpm.h" +#include "gfx_v7_0.h" +#include + +#include "smu/smu_7_0_0_d.h" +#include "smu/smu_7_0_0_sh_mask.h" + +#include "gca/gfx_7_2_d.h" +#include "gca/gfx_7_2_sh_mask.h" +#include "legacy_dpm.h" + +#define KV_MAX_DEEPSLEEP_DIVIDER_ID 5 +#define KV_MINIMUM_ENGINE_CLOCK 800 +#define SMC_RAM_END 0x40000 + +static const struct amd_pm_funcs kv_dpm_funcs; + +static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev); +static int kv_enable_nb_dpm(struct amdgpu_device *adev, + bool enable); +static void kv_init_graphics_levels(struct amdgpu_device *adev); +static int kv_calculate_ds_divider(struct amdgpu_device *adev); +static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev); +static int kv_calculate_dpm_settings(struct amdgpu_device *adev); +static void kv_enable_new_levels(struct amdgpu_device *adev); +static void kv_program_nbps_index_settings(struct amdgpu_device *adev, + struct amdgpu_ps *new_rps); +static int kv_set_enabled_level(struct amdgpu_device *adev, u32 level); +static int kv_set_enabled_levels(struct amdgpu_device *adev); +static int kv_force_dpm_highest(struct amdgpu_device *adev); +static int kv_force_dpm_lowest(struct amdgpu_device *adev); +static void kv_apply_state_adjust_rules(struct amdgpu_device *adev, + struct amdgpu_ps *new_rps, + struct amdgpu_ps *old_rps); +static int kv_set_thermal_temperature_range(struct amdgpu_device *adev, + int min_temp, int max_temp); +static int kv_init_fps_limits(struct amdgpu_device *adev); + +static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate); +static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate); + + +static u32 kv_convert_vid2_to_vid7(struct amdgpu_device *adev, + struct sumo_vid_mapping_table *vid_mapping_table, + u32 vid_2bit) +{ + struct amdgpu_clock_voltage_dependency_table *vddc_sclk_table = + &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; + u32 i; + + if (vddc_sclk_table && vddc_sclk_table->count) { + if (vid_2bit < vddc_sclk_table->count) + return vddc_sclk_table->entries[vid_2bit].v; + else + return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v; + } else { + for (i = 0; i < vid_mapping_table->num_entries; i++) { + if (vid_mapping_table->entries[i].vid_2bit == vid_2bit) + return vid_mapping_table->entries[i].vid_7bit; + } + return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit; + } +} + +static u32 kv_convert_vid7_to_vid2(struct amdgpu_device *adev, + struct sumo_vid_mapping_table *vid_mapping_table, + u32 vid_7bit) +{ + struct amdgpu_clock_voltage_dependency_table *vddc_sclk_table = + &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; + u32 i; + + if (vddc_sclk_table && vddc_sclk_table->count) { + for (i = 0; i < vddc_sclk_table->count; i++) { + if (vddc_sclk_table->entries[i].v == vid_7bit) + return i; + } + return vddc_sclk_table->count - 1; + } else { + for (i = 0; i < vid_mapping_table->num_entries; i++) { + if (vid_mapping_table->entries[i].vid_7bit == vid_7bit) + return vid_mapping_table->entries[i].vid_2bit; + } + + return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit; + } +} + +static void sumo_take_smu_control(struct amdgpu_device *adev, bool enable) +{ +/* This bit selects who handles display phy powergating. + * Clear the bit to let atom handle it. + * Set it to let the driver handle it. + * For now we just let atom handle it. + */ +#if 0 + u32 v = RREG32(mmDOUT_SCRATCH3); + + if (enable) + v |= 0x4; + else + v &= 0xFFFFFFFB; + + WREG32(mmDOUT_SCRATCH3, v); +#endif +} + +static void sumo_construct_sclk_voltage_mapping_table(struct amdgpu_device *adev, + struct sumo_sclk_voltage_mapping_table *sclk_voltage_mapping_table, + ATOM_AVAILABLE_SCLK_LIST *table) +{ + u32 i; + u32 n = 0; + u32 prev_sclk = 0; + + for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) { + if (table[i].ulSupportedSCLK > prev_sclk) { + sclk_voltage_mapping_table->entries[n].sclk_frequency = + table[i].ulSupportedSCLK; + sclk_voltage_mapping_table->entries[n].vid_2bit = + table[i].usVoltageIndex; + prev_sclk = table[i].ulSupportedSCLK; + n++; + } + } + + sclk_voltage_mapping_table->num_max_dpm_entries = n; +} + +static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev, + struct sumo_vid_mapping_table *vid_mapping_table, + ATOM_AVAILABLE_SCLK_LIST *table) +{ + u32 i, j; + + for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) { + if (table[i].ulSupportedSCLK != 0) { + vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit = + table[i].usVoltageID; + vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit = + table[i].usVoltageIndex; + } + } + + for (i = 0; i < SUMO_MAX_NUMBER_VOLTAGES; i++) { + if (vid_mapping_table->entries[i].vid_7bit == 0) { + for (j = i + 1; j < SUMO_MAX_NUMBER_VOLTAGES; j++) { + if (vid_mapping_table->entries[j].vid_7bit != 0) { + vid_mapping_table->entries[i] = + vid_mapping_table->entries[j]; + vid_mapping_table->entries[j].vid_7bit = 0; + break; + } + } + + if (j == SUMO_MAX_NUMBER_VOLTAGES) + break; + } + } + + vid_mapping_table->num_entries = i; +} + +#if 0 +static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] = +{ + { 0, 4, 1 }, + { 1, 4, 1 }, + { 2, 5, 1 }, + { 3, 4, 2 }, + { 4, 1, 1 }, + { 5, 5, 2 }, + { 6, 6, 1 }, + { 7, 9, 2 }, + { 0xffffffff } +}; + +static const struct kv_lcac_config_values mc0_local_cac_cfg_kv[] = +{ + { 0, 4, 1 }, + { 0xffffffff } +}; + +static const struct kv_lcac_config_values mc1_local_cac_cfg_kv[] = +{ + { 0, 4, 1 }, + { 0xffffffff } +}; + +static const struct kv_lcac_config_values mc2_local_cac_cfg_kv[] = +{ + { 0, 4, 1 }, + { 0xffffffff } +}; + +static const struct kv_lcac_config_values mc3_local_cac_cfg_kv[] = +{ + { 0, 4, 1 }, + { 0xffffffff } +}; + +static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] = +{ + { 0, 4, 1 }, + { 1, 4, 1 }, + { 2, 5, 1 }, + { 3, 4, 1 }, + { 4, 1, 1 }, + { 5, 5, 1 }, + { 6, 6, 1 }, + { 7, 9, 1 }, + { 8, 4, 1 }, + { 9, 2, 1 }, + { 10, 3, 1 }, + { 11, 6, 1 }, + { 12, 8, 2 }, + { 13, 1, 1 }, + { 14, 2, 1 }, + { 15, 3, 1 }, + { 16, 1, 1 }, + { 17, 4, 1 }, + { 18, 3, 1 }, + { 19, 1, 1 }, + { 20, 8, 1 }, + { 21, 5, 1 }, + { 22, 1, 1 }, + { 23, 1, 1 }, + { 24, 4, 1 }, + { 27, 6, 1 }, + { 28, 1, 1 }, + { 0xffffffff } +}; + +static const struct kv_lcac_config_reg sx0_cac_config_reg[] = +{ + { 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } +}; + +static const struct kv_lcac_config_reg mc0_cac_config_reg[] = +{ + { 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } +}; + +static const struct kv_lcac_config_reg mc1_cac_config_reg[] = +{ + { 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } +}; + +static const struct kv_lcac_config_reg mc2_cac_config_reg[] = +{ + { 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } +}; + +static const struct kv_lcac_config_reg mc3_cac_config_reg[] = +{ + { 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } +}; + +static const struct kv_lcac_config_reg cpl_cac_config_reg[] = +{ + { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } +}; +#endif + +static const struct kv_pt_config_reg didt_config_kv[] = +{ + { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, + { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, + { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, + { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, + { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, + { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, + { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, + { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, + { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, + { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, + { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, + { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, + { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, + { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, + { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, + { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, + { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, + { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, + { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, + { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, + { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, + { 0xFFFFFFFF } +}; + +static struct kv_ps *kv_get_ps(struct amdgpu_ps *rps) +{ + struct kv_ps *ps = rps->ps_priv; + + return ps; +} + +static struct kv_power_info *kv_get_pi(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = adev->pm.dpm.priv; + + return pi; +} + +#if 0 +static void kv_program_local_cac_table(struct amdgpu_device *adev, + const struct kv_lcac_config_values *local_cac_table, + const struct kv_lcac_config_reg *local_cac_reg) +{ + u32 i, count, data; + const struct kv_lcac_config_values *values = local_cac_table; + + while (values->block_id != 0xffffffff) { + count = values->signal_id; + for (i = 0; i < count; i++) { + data = ((values->block_id << local_cac_reg->block_shift) & + local_cac_reg->block_mask); + data |= ((i << local_cac_reg->signal_shift) & + local_cac_reg->signal_mask); + data |= ((values->t << local_cac_reg->t_shift) & + local_cac_reg->t_mask); + data |= ((1 << local_cac_reg->enable_shift) & + local_cac_reg->enable_mask); + WREG32_SMC(local_cac_reg->cntl, data); + } + values++; + } +} +#endif + +static int kv_program_pt_config_registers(struct amdgpu_device *adev, + const struct kv_pt_config_reg *cac_config_regs) +{ + const struct kv_pt_config_reg *config_regs = cac_config_regs; + u32 data; + u32 cache = 0; + + if (config_regs == NULL) + return -EINVAL; + + while (config_regs->offset != 0xFFFFFFFF) { + if (config_regs->type == KV_CONFIGREG_CACHE) { + cache |= ((config_regs->value << config_regs->shift) & config_regs->mask); + } else { + switch (config_regs->type) { + case KV_CONFIGREG_SMC_IND: + data = RREG32_SMC(config_regs->offset); + break; + case KV_CONFIGREG_DIDT_IND: + data = RREG32_DIDT(config_regs->offset); + break; + default: + data = RREG32(config_regs->offset); + break; + } + + data &= ~config_regs->mask; + data |= ((config_regs->value << config_regs->shift) & config_regs->mask); + data |= cache; + cache = 0; + + switch (config_regs->type) { + case KV_CONFIGREG_SMC_IND: + WREG32_SMC(config_regs->offset, data); + break; + case KV_CONFIGREG_DIDT_IND: + WREG32_DIDT(config_regs->offset, data); + break; + default: + WREG32(config_regs->offset, data); + break; + } + } + config_regs++; + } + + return 0; +} + +static void kv_do_enable_didt(struct amdgpu_device *adev, bool enable) +{ + struct kv_power_info *pi = kv_get_pi(adev); + u32 data; + + if (pi->caps_sq_ramping) { + data = RREG32_DIDT(ixDIDT_SQ_CTRL0); + if (enable) + data |= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK; + else + data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK; + WREG32_DIDT(ixDIDT_SQ_CTRL0, data); + } + + if (pi->caps_db_ramping) { + data = RREG32_DIDT(ixDIDT_DB_CTRL0); + if (enable) + data |= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK; + else + data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK; + WREG32_DIDT(ixDIDT_DB_CTRL0, data); + } + + if (pi->caps_td_ramping) { + data = RREG32_DIDT(ixDIDT_TD_CTRL0); + if (enable) + data |= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK; + else + data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK; + WREG32_DIDT(ixDIDT_TD_CTRL0, data); + } + + if (pi->caps_tcp_ramping) { + data = RREG32_DIDT(ixDIDT_TCP_CTRL0); + if (enable) + data |= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK; + else + data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK; + WREG32_DIDT(ixDIDT_TCP_CTRL0, data); + } +} + +static int kv_enable_didt(struct amdgpu_device *adev, bool enable) +{ + struct kv_power_info *pi = kv_get_pi(adev); + int ret; + + if (pi->caps_sq_ramping || + pi->caps_db_ramping || + pi->caps_td_ramping || + pi->caps_tcp_ramping) { + amdgpu_gfx_rlc_enter_safe_mode(adev); + + if (enable) { + ret = kv_program_pt_config_registers(adev, didt_config_kv); + if (ret) { + amdgpu_gfx_rlc_exit_safe_mode(adev); + return ret; + } + } + + kv_do_enable_didt(adev, enable); + + amdgpu_gfx_rlc_exit_safe_mode(adev); + } + + return 0; +} + +#if 0 +static void kv_initialize_hardware_cac_manager(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + + if (pi->caps_cac) { + WREG32_SMC(ixLCAC_SX0_OVR_SEL, 0); + WREG32_SMC(ixLCAC_SX0_OVR_VAL, 0); + kv_program_local_cac_table(adev, sx_local_cac_cfg_kv, sx0_cac_config_reg); + + WREG32_SMC(ixLCAC_MC0_OVR_SEL, 0); + WREG32_SMC(ixLCAC_MC0_OVR_VAL, 0); + kv_program_local_cac_table(adev, mc0_local_cac_cfg_kv, mc0_cac_config_reg); + + WREG32_SMC(ixLCAC_MC1_OVR_SEL, 0); + WREG32_SMC(ixLCAC_MC1_OVR_VAL, 0); + kv_program_local_cac_table(adev, mc1_local_cac_cfg_kv, mc1_cac_config_reg); + + WREG32_SMC(ixLCAC_MC2_OVR_SEL, 0); + WREG32_SMC(ixLCAC_MC2_OVR_VAL, 0); + kv_program_local_cac_table(adev, mc2_local_cac_cfg_kv, mc2_cac_config_reg); + + WREG32_SMC(ixLCAC_MC3_OVR_SEL, 0); + WREG32_SMC(ixLCAC_MC3_OVR_VAL, 0); + kv_program_local_cac_table(adev, mc3_local_cac_cfg_kv, mc3_cac_config_reg); + + WREG32_SMC(ixLCAC_CPL_OVR_SEL, 0); + WREG32_SMC(ixLCAC_CPL_OVR_VAL, 0); + kv_program_local_cac_table(adev, cpl_local_cac_cfg_kv, cpl_cac_config_reg); + } +} +#endif + +static int kv_enable_smc_cac(struct amdgpu_device *adev, bool enable) +{ + struct kv_power_info *pi = kv_get_pi(adev); + int ret = 0; + + if (pi->caps_cac) { + if (enable) { + ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_EnableCac); + if (ret) + pi->cac_enabled = false; + else + pi->cac_enabled = true; + } else if (pi->cac_enabled) { + amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DisableCac); + pi->cac_enabled = false; + } + } + + return ret; +} + +static int kv_process_firmware_header(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + u32 tmp; + int ret; + + ret = amdgpu_kv_read_smc_sram_dword(adev, SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU7_Firmware_Header, DpmTable), + &tmp, pi->sram_end); + + if (ret == 0) + pi->dpm_table_start = tmp; + + ret = amdgpu_kv_read_smc_sram_dword(adev, SMU7_FIRMWARE_HEADER_LOCATION + + offsetof(SMU7_Firmware_Header, SoftRegisters), + &tmp, pi->sram_end); + + if (ret == 0) + pi->soft_regs_start = tmp; + + return ret; +} + +static int kv_enable_dpm_voltage_scaling(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + int ret; + + pi->graphics_voltage_change_enable = 1; + + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, GraphicsVoltageChangeEnable), + &pi->graphics_voltage_change_enable, + sizeof(u8), pi->sram_end); + + return ret; +} + +static int kv_set_dpm_interval(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + int ret; + + pi->graphics_interval = 1; + + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, GraphicsInterval), + &pi->graphics_interval, + sizeof(u8), pi->sram_end); + + return ret; +} + +static int kv_set_dpm_boot_state(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + int ret; + + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, GraphicsBootLevel), + &pi->graphics_boot_level, + sizeof(u8), pi->sram_end); + + return ret; +} + +static void kv_program_vc(struct amdgpu_device *adev) +{ + WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0x3FFFC100); +} + +static void kv_clear_vc(struct amdgpu_device *adev) +{ + WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0); +} + +static int kv_set_divider_value(struct amdgpu_device *adev, + u32 index, u32 sclk) +{ + struct kv_power_info *pi = kv_get_pi(adev); + struct atom_clock_dividers dividers; + int ret; + + ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, + sclk, false, ÷rs); + if (ret) + return ret; + + pi->graphics_level[index].SclkDid = (u8)dividers.post_div; + pi->graphics_level[index].SclkFrequency = cpu_to_be32(sclk); + + return 0; +} + +static u16 kv_convert_8bit_index_to_voltage(struct amdgpu_device *adev, + u16 voltage) +{ + return 6200 - (voltage * 25); +} + +static u16 kv_convert_2bit_index_to_voltage(struct amdgpu_device *adev, + u32 vid_2bit) +{ + struct kv_power_info *pi = kv_get_pi(adev); + u32 vid_8bit = kv_convert_vid2_to_vid7(adev, + &pi->sys_info.vid_mapping_table, + vid_2bit); + + return kv_convert_8bit_index_to_voltage(adev, (u16)vid_8bit); +} + + +static int kv_set_vid(struct amdgpu_device *adev, u32 index, u32 vid) +{ + struct kv_power_info *pi = kv_get_pi(adev); + + pi->graphics_level[index].VoltageDownH = (u8)pi->voltage_drop_t; + pi->graphics_level[index].MinVddNb = + cpu_to_be32(kv_convert_2bit_index_to_voltage(adev, vid)); + + return 0; +} + +static int kv_set_at(struct amdgpu_device *adev, u32 index, u32 at) +{ + struct kv_power_info *pi = kv_get_pi(adev); + + pi->graphics_level[index].AT = cpu_to_be16((u16)at); + + return 0; +} + +static void kv_dpm_power_level_enable(struct amdgpu_device *adev, + u32 index, bool enable) +{ + struct kv_power_info *pi = kv_get_pi(adev); + + pi->graphics_level[index].EnabledForActivity = enable ? 1 : 0; +} + +static void kv_start_dpm(struct amdgpu_device *adev) +{ + u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT); + + tmp |= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK; + WREG32_SMC(ixGENERAL_PWRMGT, tmp); + + amdgpu_kv_smc_dpm_enable(adev, true); +} + +static void kv_stop_dpm(struct amdgpu_device *adev) +{ + amdgpu_kv_smc_dpm_enable(adev, false); +} + +static void kv_start_am(struct amdgpu_device *adev) +{ + u32 sclk_pwrmgt_cntl = RREG32_SMC(ixSCLK_PWRMGT_CNTL); + + sclk_pwrmgt_cntl &= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | + SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK); + sclk_pwrmgt_cntl |= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK; + + WREG32_SMC(ixSCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl); +} + +static void kv_reset_am(struct amdgpu_device *adev) +{ + u32 sclk_pwrmgt_cntl = RREG32_SMC(ixSCLK_PWRMGT_CNTL); + + sclk_pwrmgt_cntl |= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | + SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK); + + WREG32_SMC(ixSCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl); +} + +static int kv_freeze_sclk_dpm(struct amdgpu_device *adev, bool freeze) +{ + return amdgpu_kv_notify_message_to_smu(adev, freeze ? + PPSMC_MSG_SCLKDPM_FreezeLevel : PPSMC_MSG_SCLKDPM_UnfreezeLevel); +} + +static int kv_force_lowest_valid(struct amdgpu_device *adev) +{ + return kv_force_dpm_lowest(adev); +} + +static int kv_unforce_levels(struct amdgpu_device *adev) +{ + if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) + return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NoForcedLevel); + else + return kv_set_enabled_levels(adev); +} + +static int kv_update_sclk_t(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + u32 low_sclk_interrupt_t = 0; + int ret = 0; + + if (pi->caps_sclk_throttle_low_notification) { + low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t); + + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, LowSclkInterruptT), + (u8 *)&low_sclk_interrupt_t, + sizeof(u32), pi->sram_end); + } + return ret; +} + +static int kv_program_bootup_state(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + u32 i; + struct amdgpu_clock_voltage_dependency_table *table = + &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; + + if (table && table->count) { + for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { + if (table->entries[i].clk == pi->boot_pl.sclk) + break; + } + + pi->graphics_boot_level = (u8)i; + kv_dpm_power_level_enable(adev, i, true); + } else { + struct sumo_sclk_voltage_mapping_table *table = + &pi->sys_info.sclk_voltage_mapping_table; + + if (table->num_max_dpm_entries == 0) + return -EINVAL; + + for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { + if (table->entries[i].sclk_frequency == pi->boot_pl.sclk) + break; + } + + pi->graphics_boot_level = (u8)i; + kv_dpm_power_level_enable(adev, i, true); + } + return 0; +} + +static int kv_enable_auto_thermal_throttling(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + int ret; + + pi->graphics_therm_throttle_enable = 1; + + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, GraphicsThermThrottleEnable), + &pi->graphics_therm_throttle_enable, + sizeof(u8), pi->sram_end); + + return ret; +} + +static int kv_upload_dpm_settings(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + int ret; + + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, GraphicsLevel), + (u8 *)&pi->graphics_level, + sizeof(SMU7_Fusion_GraphicsLevel) * SMU7_MAX_LEVELS_GRAPHICS, + pi->sram_end); + + if (ret) + return ret; + + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, GraphicsDpmLevelCount), + &pi->graphics_dpm_level_count, + sizeof(u8), pi->sram_end); + + return ret; +} + +static u32 kv_get_clock_difference(u32 a, u32 b) +{ + return (a >= b) ? a - b : b - a; +} + +static u32 kv_get_clk_bypass(struct amdgpu_device *adev, u32 clk) +{ + struct kv_power_info *pi = kv_get_pi(adev); + u32 value; + + if (pi->caps_enable_dfs_bypass) { + if (kv_get_clock_difference(clk, 40000) < 200) + value = 3; + else if (kv_get_clock_difference(clk, 30000) < 200) + value = 2; + else if (kv_get_clock_difference(clk, 20000) < 200) + value = 7; + else if (kv_get_clock_difference(clk, 15000) < 200) + value = 6; + else if (kv_get_clock_difference(clk, 10000) < 200) + value = 8; + else + value = 0; + } else { + value = 0; + } + + return value; +} + +static int kv_populate_uvd_table(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + struct amdgpu_uvd_clock_voltage_dependency_table *table = + &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; + struct atom_clock_dividers dividers; + int ret; + u32 i; + + if (table == NULL || table->count == 0) + return 0; + + pi->uvd_level_count = 0; + for (i = 0; i < table->count; i++) { + if (pi->high_voltage_t && + (pi->high_voltage_t < table->entries[i].v)) + break; + + pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk); + pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk); + pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v); + + pi->uvd_level[i].VClkBypassCntl = + (u8)kv_get_clk_bypass(adev, table->entries[i].vclk); + pi->uvd_level[i].DClkBypassCntl = + (u8)kv_get_clk_bypass(adev, table->entries[i].dclk); + + ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, + table->entries[i].vclk, false, ÷rs); + if (ret) + return ret; + pi->uvd_level[i].VclkDivider = (u8)dividers.post_div; + + ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, + table->entries[i].dclk, false, ÷rs); + if (ret) + return ret; + pi->uvd_level[i].DclkDivider = (u8)dividers.post_div; + + pi->uvd_level_count++; + } + + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, UvdLevelCount), + (u8 *)&pi->uvd_level_count, + sizeof(u8), pi->sram_end); + if (ret) + return ret; + + pi->uvd_interval = 1; + + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, UVDInterval), + &pi->uvd_interval, + sizeof(u8), pi->sram_end); + if (ret) + return ret; + + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, UvdLevel), + (u8 *)&pi->uvd_level, + sizeof(SMU7_Fusion_UvdLevel) * SMU7_MAX_LEVELS_UVD, + pi->sram_end); + + return ret; + +} + +static int kv_populate_vce_table(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + int ret; + u32 i; + struct amdgpu_vce_clock_voltage_dependency_table *table = + &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; + struct atom_clock_dividers dividers; + + if (table == NULL || table->count == 0) + return 0; + + pi->vce_level_count = 0; + for (i = 0; i < table->count; i++) { + if (pi->high_voltage_t && + pi->high_voltage_t < table->entries[i].v) + break; + + pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk); + pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); + + pi->vce_level[i].ClkBypassCntl = + (u8)kv_get_clk_bypass(adev, table->entries[i].evclk); + + ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, + table->entries[i].evclk, false, ÷rs); + if (ret) + return ret; + pi->vce_level[i].Divider = (u8)dividers.post_div; + + pi->vce_level_count++; + } + + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, VceLevelCount), + (u8 *)&pi->vce_level_count, + sizeof(u8), + pi->sram_end); + if (ret) + return ret; + + pi->vce_interval = 1; + + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, VCEInterval), + (u8 *)&pi->vce_interval, + sizeof(u8), + pi->sram_end); + if (ret) + return ret; + + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, VceLevel), + (u8 *)&pi->vce_level, + sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_VCE, + pi->sram_end); + + return ret; +} + +static int kv_populate_samu_table(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + struct amdgpu_clock_voltage_dependency_table *table = + &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; + struct atom_clock_dividers dividers; + int ret; + u32 i; + + if (table == NULL || table->count == 0) + return 0; + + pi->samu_level_count = 0; + for (i = 0; i < table->count; i++) { + if (pi->high_voltage_t && + pi->high_voltage_t < table->entries[i].v) + break; + + pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk); + pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); + + pi->samu_level[i].ClkBypassCntl = + (u8)kv_get_clk_bypass(adev, table->entries[i].clk); + + ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, + table->entries[i].clk, false, ÷rs); + if (ret) + return ret; + pi->samu_level[i].Divider = (u8)dividers.post_div; + + pi->samu_level_count++; + } + + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, SamuLevelCount), + (u8 *)&pi->samu_level_count, + sizeof(u8), + pi->sram_end); + if (ret) + return ret; + + pi->samu_interval = 1; + + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, SAMUInterval), + (u8 *)&pi->samu_interval, + sizeof(u8), + pi->sram_end); + if (ret) + return ret; + + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, SamuLevel), + (u8 *)&pi->samu_level, + sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_SAMU, + pi->sram_end); + if (ret) + return ret; + + return ret; +} + + +static int kv_populate_acp_table(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + struct amdgpu_clock_voltage_dependency_table *table = + &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; + struct atom_clock_dividers dividers; + int ret; + u32 i; + + if (table == NULL || table->count == 0) + return 0; + + pi->acp_level_count = 0; + for (i = 0; i < table->count; i++) { + pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk); + pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); + + ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, + table->entries[i].clk, false, ÷rs); + if (ret) + return ret; + pi->acp_level[i].Divider = (u8)dividers.post_div; + + pi->acp_level_count++; + } + + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, AcpLevelCount), + (u8 *)&pi->acp_level_count, + sizeof(u8), + pi->sram_end); + if (ret) + return ret; + + pi->acp_interval = 1; + + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, ACPInterval), + (u8 *)&pi->acp_interval, + sizeof(u8), + pi->sram_end); + if (ret) + return ret; + + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, AcpLevel), + (u8 *)&pi->acp_level, + sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_ACP, + pi->sram_end); + if (ret) + return ret; + + return ret; +} + +static void kv_calculate_dfs_bypass_settings(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + u32 i; + struct amdgpu_clock_voltage_dependency_table *table = + &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; + + if (table && table->count) { + for (i = 0; i < pi->graphics_dpm_level_count; i++) { + if (pi->caps_enable_dfs_bypass) { + if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200) + pi->graphics_level[i].ClkBypassCntl = 3; + else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200) + pi->graphics_level[i].ClkBypassCntl = 2; + else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200) + pi->graphics_level[i].ClkBypassCntl = 7; + else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200) + pi->graphics_level[i].ClkBypassCntl = 6; + else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200) + pi->graphics_level[i].ClkBypassCntl = 8; + else + pi->graphics_level[i].ClkBypassCntl = 0; + } else { + pi->graphics_level[i].ClkBypassCntl = 0; + } + } + } else { + struct sumo_sclk_voltage_mapping_table *table = + &pi->sys_info.sclk_voltage_mapping_table; + for (i = 0; i < pi->graphics_dpm_level_count; i++) { + if (pi->caps_enable_dfs_bypass) { + if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200) + pi->graphics_level[i].ClkBypassCntl = 3; + else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200) + pi->graphics_level[i].ClkBypassCntl = 2; + else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200) + pi->graphics_level[i].ClkBypassCntl = 7; + else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200) + pi->graphics_level[i].ClkBypassCntl = 6; + else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200) + pi->graphics_level[i].ClkBypassCntl = 8; + else + pi->graphics_level[i].ClkBypassCntl = 0; + } else { + pi->graphics_level[i].ClkBypassCntl = 0; + } + } + } +} + +static int kv_enable_ulv(struct amdgpu_device *adev, bool enable) +{ + return amdgpu_kv_notify_message_to_smu(adev, enable ? + PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV); +} + +static void kv_reset_acp_boot_level(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + + pi->acp_boot_level = 0xff; +} + +static void kv_update_current_ps(struct amdgpu_device *adev, + struct amdgpu_ps *rps) +{ + struct kv_ps *new_ps = kv_get_ps(rps); + struct kv_power_info *pi = kv_get_pi(adev); + + pi->current_rps = *rps; + pi->current_ps = *new_ps; + pi->current_rps.ps_priv = &pi->current_ps; + adev->pm.dpm.current_ps = &pi->current_rps; +} + +static void kv_update_requested_ps(struct amdgpu_device *adev, + struct amdgpu_ps *rps) +{ + struct kv_ps *new_ps = kv_get_ps(rps); + struct kv_power_info *pi = kv_get_pi(adev); + + pi->requested_rps = *rps; + pi->requested_ps = *new_ps; + pi->requested_rps.ps_priv = &pi->requested_ps; + adev->pm.dpm.requested_ps = &pi->requested_rps; +} + +static void kv_dpm_enable_bapm(void *handle, bool enable) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct kv_power_info *pi = kv_get_pi(adev); + int ret; + + if (pi->bapm_enable) { + ret = amdgpu_kv_smc_bapm_enable(adev, enable); + if (ret) + DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); + } +} + +static bool kv_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor) +{ + switch (sensor) { + case THERMAL_TYPE_RV6XX: + case THERMAL_TYPE_RV770: + case THERMAL_TYPE_EVERGREEN: + case THERMAL_TYPE_SUMO: + case THERMAL_TYPE_NI: + case THERMAL_TYPE_SI: + case THERMAL_TYPE_CI: + case THERMAL_TYPE_KV: + return true; + case THERMAL_TYPE_ADT7473_WITH_INTERNAL: + case THERMAL_TYPE_EMC2103_WITH_INTERNAL: + return false; /* need special handling */ + case THERMAL_TYPE_NONE: + case THERMAL_TYPE_EXTERNAL: + case THERMAL_TYPE_EXTERNAL_GPIO: + default: + return false; + } +} + +static int kv_dpm_enable(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + int ret; + + ret = kv_process_firmware_header(adev); + if (ret) { + DRM_ERROR("kv_process_firmware_header failed\n"); + return ret; + } + kv_init_fps_limits(adev); + kv_init_graphics_levels(adev); + ret = kv_program_bootup_state(adev); + if (ret) { + DRM_ERROR("kv_program_bootup_state failed\n"); + return ret; + } + kv_calculate_dfs_bypass_settings(adev); + ret = kv_upload_dpm_settings(adev); + if (ret) { + DRM_ERROR("kv_upload_dpm_settings failed\n"); + return ret; + } + ret = kv_populate_uvd_table(adev); + if (ret) { + DRM_ERROR("kv_populate_uvd_table failed\n"); + return ret; + } + ret = kv_populate_vce_table(adev); + if (ret) { + DRM_ERROR("kv_populate_vce_table failed\n"); + return ret; + } + ret = kv_populate_samu_table(adev); + if (ret) { + DRM_ERROR("kv_populate_samu_table failed\n"); + return ret; + } + ret = kv_populate_acp_table(adev); + if (ret) { + DRM_ERROR("kv_populate_acp_table failed\n"); + return ret; + } + kv_program_vc(adev); +#if 0 + kv_initialize_hardware_cac_manager(adev); +#endif + kv_start_am(adev); + if (pi->enable_auto_thermal_throttling) { + ret = kv_enable_auto_thermal_throttling(adev); + if (ret) { + DRM_ERROR("kv_enable_auto_thermal_throttling failed\n"); + return ret; + } + } + ret = kv_enable_dpm_voltage_scaling(adev); + if (ret) { + DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n"); + return ret; + } + ret = kv_set_dpm_interval(adev); + if (ret) { + DRM_ERROR("kv_set_dpm_interval failed\n"); + return ret; + } + ret = kv_set_dpm_boot_state(adev); + if (ret) { + DRM_ERROR("kv_set_dpm_boot_state failed\n"); + return ret; + } + ret = kv_enable_ulv(adev, true); + if (ret) { + DRM_ERROR("kv_enable_ulv failed\n"); + return ret; + } + kv_start_dpm(adev); + ret = kv_enable_didt(adev, true); + if (ret) { + DRM_ERROR("kv_enable_didt failed\n"); + return ret; + } + ret = kv_enable_smc_cac(adev, true); + if (ret) { + DRM_ERROR("kv_enable_smc_cac failed\n"); + return ret; + } + + kv_reset_acp_boot_level(adev); + + ret = amdgpu_kv_smc_bapm_enable(adev, false); + if (ret) { + DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); + return ret; + } + + if (adev->irq.installed && + kv_is_internal_thermal_sensor(adev->pm.int_thermal_type)) { + ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX); + if (ret) { + DRM_ERROR("kv_set_thermal_temperature_range failed\n"); + return ret; + } + amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq, + AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); + amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq, + AMDGPU_THERMAL_IRQ_HIGH_TO_LOW); + } + + return ret; +} + +static void kv_dpm_disable(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + + amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, + AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); + amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, + AMDGPU_THERMAL_IRQ_HIGH_TO_LOW); + + amdgpu_kv_smc_bapm_enable(adev, false); + + if (adev->asic_type == CHIP_MULLINS) + kv_enable_nb_dpm(adev, false); + + /* powerup blocks */ + kv_dpm_powergate_acp(adev, false); + kv_dpm_powergate_samu(adev, false); + if (pi->caps_vce_pg) /* power on the VCE block */ + amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); + if (pi->caps_uvd_pg) /* power on the UVD block */ + amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON); + + kv_enable_smc_cac(adev, false); + kv_enable_didt(adev, false); + kv_clear_vc(adev); + kv_stop_dpm(adev); + kv_enable_ulv(adev, false); + kv_reset_am(adev); + + kv_update_current_ps(adev, adev->pm.dpm.boot_ps); +} + +#if 0 +static int kv_write_smc_soft_register(struct amdgpu_device *adev, + u16 reg_offset, u32 value) +{ + struct kv_power_info *pi = kv_get_pi(adev); + + return amdgpu_kv_copy_bytes_to_smc(adev, pi->soft_regs_start + reg_offset, + (u8 *)&value, sizeof(u16), pi->sram_end); +} + +static int kv_read_smc_soft_register(struct amdgpu_device *adev, + u16 reg_offset, u32 *value) +{ + struct kv_power_info *pi = kv_get_pi(adev); + + return amdgpu_kv_read_smc_sram_dword(adev, pi->soft_regs_start + reg_offset, + value, pi->sram_end); +} +#endif + +static void kv_init_sclk_t(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + + pi->low_sclk_interrupt_t = 0; +} + +static int kv_init_fps_limits(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + int ret = 0; + + if (pi->caps_fps) { + u16 tmp; + + tmp = 45; + pi->fps_high_t = cpu_to_be16(tmp); + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, FpsHighT), + (u8 *)&pi->fps_high_t, + sizeof(u16), pi->sram_end); + + tmp = 30; + pi->fps_low_t = cpu_to_be16(tmp); + + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, FpsLowT), + (u8 *)&pi->fps_low_t, + sizeof(u16), pi->sram_end); + + } + return ret; +} + +static void kv_init_powergate_state(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + + pi->uvd_power_gated = false; + pi->vce_power_gated = false; + pi->samu_power_gated = false; + pi->acp_power_gated = false; + +} + +static int kv_enable_uvd_dpm(struct amdgpu_device *adev, bool enable) +{ + return amdgpu_kv_notify_message_to_smu(adev, enable ? + PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable); +} + +static int kv_enable_vce_dpm(struct amdgpu_device *adev, bool enable) +{ + return amdgpu_kv_notify_message_to_smu(adev, enable ? + PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable); +} + +static int kv_enable_samu_dpm(struct amdgpu_device *adev, bool enable) +{ + return amdgpu_kv_notify_message_to_smu(adev, enable ? + PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable); +} + +static int kv_enable_acp_dpm(struct amdgpu_device *adev, bool enable) +{ + return amdgpu_kv_notify_message_to_smu(adev, enable ? + PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable); +} + +static int kv_update_uvd_dpm(struct amdgpu_device *adev, bool gate) +{ + struct kv_power_info *pi = kv_get_pi(adev); + struct amdgpu_uvd_clock_voltage_dependency_table *table = + &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; + int ret; + u32 mask; + + if (!gate) { + if (table->count) + pi->uvd_boot_level = table->count - 1; + else + pi->uvd_boot_level = 0; + + if (!pi->caps_uvd_dpm || pi->caps_stable_p_state) { + mask = 1 << pi->uvd_boot_level; + } else { + mask = 0x1f; + } + + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, UvdBootLevel), + (uint8_t *)&pi->uvd_boot_level, + sizeof(u8), pi->sram_end); + if (ret) + return ret; + + amdgpu_kv_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_UVDDPM_SetEnabledMask, + mask); + } + + return kv_enable_uvd_dpm(adev, !gate); +} + +static u8 kv_get_vce_boot_level(struct amdgpu_device *adev, u32 evclk) +{ + u8 i; + struct amdgpu_vce_clock_voltage_dependency_table *table = + &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; + + for (i = 0; i < table->count; i++) { + if (table->entries[i].evclk >= evclk) + break; + } + + return i; +} + +static int kv_update_vce_dpm(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_new_state, + struct amdgpu_ps *amdgpu_current_state) +{ + struct kv_power_info *pi = kv_get_pi(adev); + struct amdgpu_vce_clock_voltage_dependency_table *table = + &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; + int ret; + + if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) { + if (pi->caps_stable_p_state) + pi->vce_boot_level = table->count - 1; + else + pi->vce_boot_level = kv_get_vce_boot_level(adev, amdgpu_new_state->evclk); + + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, VceBootLevel), + (u8 *)&pi->vce_boot_level, + sizeof(u8), + pi->sram_end); + if (ret) + return ret; + + if (pi->caps_stable_p_state) + amdgpu_kv_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_VCEDPM_SetEnabledMask, + (1 << pi->vce_boot_level)); + kv_enable_vce_dpm(adev, true); + } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) { + kv_enable_vce_dpm(adev, false); + } + + return 0; +} + +static int kv_update_samu_dpm(struct amdgpu_device *adev, bool gate) +{ + struct kv_power_info *pi = kv_get_pi(adev); + struct amdgpu_clock_voltage_dependency_table *table = + &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; + int ret; + + if (!gate) { + if (pi->caps_stable_p_state) + pi->samu_boot_level = table->count - 1; + else + pi->samu_boot_level = 0; + + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, SamuBootLevel), + (u8 *)&pi->samu_boot_level, + sizeof(u8), + pi->sram_end); + if (ret) + return ret; + + if (pi->caps_stable_p_state) + amdgpu_kv_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_SAMUDPM_SetEnabledMask, + (1 << pi->samu_boot_level)); + } + + return kv_enable_samu_dpm(adev, !gate); +} + +static u8 kv_get_acp_boot_level(struct amdgpu_device *adev) +{ + u8 i; + struct amdgpu_clock_voltage_dependency_table *table = + &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; + + for (i = 0; i < table->count; i++) { + if (table->entries[i].clk >= 0) /* XXX */ + break; + } + + if (i >= table->count) + i = table->count - 1; + + return i; +} + +static void kv_update_acp_boot_level(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + u8 acp_boot_level; + + if (!pi->caps_stable_p_state) { + acp_boot_level = kv_get_acp_boot_level(adev); + if (acp_boot_level != pi->acp_boot_level) { + pi->acp_boot_level = acp_boot_level; + amdgpu_kv_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_ACPDPM_SetEnabledMask, + (1 << pi->acp_boot_level)); + } + } +} + +static int kv_update_acp_dpm(struct amdgpu_device *adev, bool gate) +{ + struct kv_power_info *pi = kv_get_pi(adev); + struct amdgpu_clock_voltage_dependency_table *table = + &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; + int ret; + + if (!gate) { + if (pi->caps_stable_p_state) + pi->acp_boot_level = table->count - 1; + else + pi->acp_boot_level = kv_get_acp_boot_level(adev); + + ret = amdgpu_kv_copy_bytes_to_smc(adev, + pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, AcpBootLevel), + (u8 *)&pi->acp_boot_level, + sizeof(u8), + pi->sram_end); + if (ret) + return ret; + + if (pi->caps_stable_p_state) + amdgpu_kv_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_ACPDPM_SetEnabledMask, + (1 << pi->acp_boot_level)); + } + + return kv_enable_acp_dpm(adev, !gate); +} + +static void kv_dpm_powergate_uvd(void *handle, bool gate) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct kv_power_info *pi = kv_get_pi(adev); + + pi->uvd_power_gated = gate; + + if (gate) { + /* stop the UVD block */ + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, + AMD_PG_STATE_GATE); + kv_update_uvd_dpm(adev, gate); + if (pi->caps_uvd_pg) + /* power off the UVD block */ + amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerOFF); + } else { + if (pi->caps_uvd_pg) + /* power on the UVD block */ + amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON); + /* re-init the UVD block */ + kv_update_uvd_dpm(adev, gate); + + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, + AMD_PG_STATE_UNGATE); + } +} + +static void kv_dpm_powergate_vce(void *handle, bool gate) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct kv_power_info *pi = kv_get_pi(adev); + + pi->vce_power_gated = gate; + + if (gate) { + /* stop the VCE block */ + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_GATE); + kv_enable_vce_dpm(adev, false); + if (pi->caps_vce_pg) /* power off the VCE block */ + amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF); + } else { + if (pi->caps_vce_pg) /* power on the VCE block */ + amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); + kv_enable_vce_dpm(adev, true); + /* re-init the VCE block */ + amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, + AMD_PG_STATE_UNGATE); + } +} + + +static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate) +{ + struct kv_power_info *pi = kv_get_pi(adev); + + if (pi->samu_power_gated == gate) + return; + + pi->samu_power_gated = gate; + + if (gate) { + kv_update_samu_dpm(adev, true); + if (pi->caps_samu_pg) + amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SAMPowerOFF); + } else { + if (pi->caps_samu_pg) + amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SAMPowerON); + kv_update_samu_dpm(adev, false); + } +} + +static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate) +{ + struct kv_power_info *pi = kv_get_pi(adev); + + if (pi->acp_power_gated == gate) + return; + + if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) + return; + + pi->acp_power_gated = gate; + + if (gate) { + kv_update_acp_dpm(adev, true); + if (pi->caps_acp_pg) + amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_ACPPowerOFF); + } else { + if (pi->caps_acp_pg) + amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_ACPPowerON); + kv_update_acp_dpm(adev, false); + } +} + +static void kv_set_valid_clock_range(struct amdgpu_device *adev, + struct amdgpu_ps *new_rps) +{ + struct kv_ps *new_ps = kv_get_ps(new_rps); + struct kv_power_info *pi = kv_get_pi(adev); + u32 i; + struct amdgpu_clock_voltage_dependency_table *table = + &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; + + if (table && table->count) { + for (i = 0; i < pi->graphics_dpm_level_count; i++) { + if ((table->entries[i].clk >= new_ps->levels[0].sclk) || + (i == (pi->graphics_dpm_level_count - 1))) { + pi->lowest_valid = i; + break; + } + } + + for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { + if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk) + break; + } + pi->highest_valid = i; + + if (pi->lowest_valid > pi->highest_valid) { + if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) > + (table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk)) + pi->highest_valid = pi->lowest_valid; + else + pi->lowest_valid = pi->highest_valid; + } + } else { + struct sumo_sclk_voltage_mapping_table *table = + &pi->sys_info.sclk_voltage_mapping_table; + + for (i = 0; i < (int)pi->graphics_dpm_level_count; i++) { + if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk || + i == (int)(pi->graphics_dpm_level_count - 1)) { + pi->lowest_valid = i; + break; + } + } + + for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { + if (table->entries[i].sclk_frequency <= + new_ps->levels[new_ps->num_levels - 1].sclk) + break; + } + pi->highest_valid = i; + + if (pi->lowest_valid > pi->highest_valid) { + if ((new_ps->levels[0].sclk - + table->entries[pi->highest_valid].sclk_frequency) > + (table->entries[pi->lowest_valid].sclk_frequency - + new_ps->levels[new_ps->num_levels -1].sclk)) + pi->highest_valid = pi->lowest_valid; + else + pi->lowest_valid = pi->highest_valid; + } + } +} + +static int kv_update_dfs_bypass_settings(struct amdgpu_device *adev, + struct amdgpu_ps *new_rps) +{ + struct kv_ps *new_ps = kv_get_ps(new_rps); + struct kv_power_info *pi = kv_get_pi(adev); + int ret = 0; + u8 clk_bypass_cntl; + + if (pi->caps_enable_dfs_bypass) { + clk_bypass_cntl = new_ps->need_dfs_bypass ? + pi->graphics_level[pi->graphics_boot_level].ClkBypassCntl : 0; + ret = amdgpu_kv_copy_bytes_to_smc(adev, + (pi->dpm_table_start + + offsetof(SMU7_Fusion_DpmTable, GraphicsLevel) + + (pi->graphics_boot_level * sizeof(SMU7_Fusion_GraphicsLevel)) + + offsetof(SMU7_Fusion_GraphicsLevel, ClkBypassCntl)), + &clk_bypass_cntl, + sizeof(u8), pi->sram_end); + } + + return ret; +} + +static int kv_enable_nb_dpm(struct amdgpu_device *adev, + bool enable) +{ + struct kv_power_info *pi = kv_get_pi(adev); + int ret = 0; + + if (enable) { + if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) { + ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NBDPM_Enable); + if (ret == 0) + pi->nb_dpm_enabled = true; + } + } else { + if (pi->enable_nb_dpm && pi->nb_dpm_enabled) { + ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NBDPM_Disable); + if (ret == 0) + pi->nb_dpm_enabled = false; + } + } + + return ret; +} + +static int kv_dpm_force_performance_level(void *handle, + enum amd_dpm_forced_level level) +{ + int ret; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (level == AMD_DPM_FORCED_LEVEL_HIGH) { + ret = kv_force_dpm_highest(adev); + if (ret) + return ret; + } else if (level == AMD_DPM_FORCED_LEVEL_LOW) { + ret = kv_force_dpm_lowest(adev); + if (ret) + return ret; + } else if (level == AMD_DPM_FORCED_LEVEL_AUTO) { + ret = kv_unforce_levels(adev); + if (ret) + return ret; + } + + adev->pm.dpm.forced_level = level; + + return 0; +} + +static int kv_dpm_pre_set_power_state(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct kv_power_info *pi = kv_get_pi(adev); + struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps; + struct amdgpu_ps *new_ps = &requested_ps; + + kv_update_requested_ps(adev, new_ps); + + kv_apply_state_adjust_rules(adev, + &pi->requested_rps, + &pi->current_rps); + + return 0; +} + +static int kv_dpm_set_power_state(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct kv_power_info *pi = kv_get_pi(adev); + struct amdgpu_ps *new_ps = &pi->requested_rps; + struct amdgpu_ps *old_ps = &pi->current_rps; + int ret; + + if (pi->bapm_enable) { + ret = amdgpu_kv_smc_bapm_enable(adev, adev->pm.ac_power); + if (ret) { + DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); + return ret; + } + } + + if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { + if (pi->enable_dpm) { + kv_set_valid_clock_range(adev, new_ps); + kv_update_dfs_bypass_settings(adev, new_ps); + ret = kv_calculate_ds_divider(adev); + if (ret) { + DRM_ERROR("kv_calculate_ds_divider failed\n"); + return ret; + } + kv_calculate_nbps_level_settings(adev); + kv_calculate_dpm_settings(adev); + kv_force_lowest_valid(adev); + kv_enable_new_levels(adev); + kv_upload_dpm_settings(adev); + kv_program_nbps_index_settings(adev, new_ps); + kv_unforce_levels(adev); + kv_set_enabled_levels(adev); + kv_force_lowest_valid(adev); + kv_unforce_levels(adev); + + ret = kv_update_vce_dpm(adev, new_ps, old_ps); + if (ret) { + DRM_ERROR("kv_update_vce_dpm failed\n"); + return ret; + } + kv_update_sclk_t(adev); + if (adev->asic_type == CHIP_MULLINS) + kv_enable_nb_dpm(adev, true); + } + } else { + if (pi->enable_dpm) { + kv_set_valid_clock_range(adev, new_ps); + kv_update_dfs_bypass_settings(adev, new_ps); + ret = kv_calculate_ds_divider(adev); + if (ret) { + DRM_ERROR("kv_calculate_ds_divider failed\n"); + return ret; + } + kv_calculate_nbps_level_settings(adev); + kv_calculate_dpm_settings(adev); + kv_freeze_sclk_dpm(adev, true); + kv_upload_dpm_settings(adev); + kv_program_nbps_index_settings(adev, new_ps); + kv_freeze_sclk_dpm(adev, false); + kv_set_enabled_levels(adev); + ret = kv_update_vce_dpm(adev, new_ps, old_ps); + if (ret) { + DRM_ERROR("kv_update_vce_dpm failed\n"); + return ret; + } + kv_update_acp_boot_level(adev); + kv_update_sclk_t(adev); + kv_enable_nb_dpm(adev, true); + } + } + + return 0; +} + +static void kv_dpm_post_set_power_state(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct kv_power_info *pi = kv_get_pi(adev); + struct amdgpu_ps *new_ps = &pi->requested_rps; + + kv_update_current_ps(adev, new_ps); +} + +static void kv_dpm_setup_asic(struct amdgpu_device *adev) +{ + sumo_take_smu_control(adev, true); + kv_init_powergate_state(adev); + kv_init_sclk_t(adev); +} + +#if 0 +static void kv_dpm_reset_asic(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + + if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { + kv_force_lowest_valid(adev); + kv_init_graphics_levels(adev); + kv_program_bootup_state(adev); + kv_upload_dpm_settings(adev); + kv_force_lowest_valid(adev); + kv_unforce_levels(adev); + } else { + kv_init_graphics_levels(adev); + kv_program_bootup_state(adev); + kv_freeze_sclk_dpm(adev, true); + kv_upload_dpm_settings(adev); + kv_freeze_sclk_dpm(adev, false); + kv_set_enabled_level(adev, pi->graphics_boot_level); + } +} +#endif + +static void kv_construct_max_power_limits_table(struct amdgpu_device *adev, + struct amdgpu_clock_and_voltage_limits *table) +{ + struct kv_power_info *pi = kv_get_pi(adev); + + if (pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries > 0) { + int idx = pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1; + table->sclk = + pi->sys_info.sclk_voltage_mapping_table.entries[idx].sclk_frequency; + table->vddc = + kv_convert_2bit_index_to_voltage(adev, + pi->sys_info.sclk_voltage_mapping_table.entries[idx].vid_2bit); + } + + table->mclk = pi->sys_info.nbp_memory_clock[0]; +} + +static void kv_patch_voltage_values(struct amdgpu_device *adev) +{ + int i; + struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table = + &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; + struct amdgpu_vce_clock_voltage_dependency_table *vce_table = + &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; + struct amdgpu_clock_voltage_dependency_table *samu_table = + &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; + struct amdgpu_clock_voltage_dependency_table *acp_table = + &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; + + if (uvd_table->count) { + for (i = 0; i < uvd_table->count; i++) + uvd_table->entries[i].v = + kv_convert_8bit_index_to_voltage(adev, + uvd_table->entries[i].v); + } + + if (vce_table->count) { + for (i = 0; i < vce_table->count; i++) + vce_table->entries[i].v = + kv_convert_8bit_index_to_voltage(adev, + vce_table->entries[i].v); + } + + if (samu_table->count) { + for (i = 0; i < samu_table->count; i++) + samu_table->entries[i].v = + kv_convert_8bit_index_to_voltage(adev, + samu_table->entries[i].v); + } + + if (acp_table->count) { + for (i = 0; i < acp_table->count; i++) + acp_table->entries[i].v = + kv_convert_8bit_index_to_voltage(adev, + acp_table->entries[i].v); + } + +} + +static void kv_construct_boot_state(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + + pi->boot_pl.sclk = pi->sys_info.bootup_sclk; + pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index; + pi->boot_pl.ds_divider_index = 0; + pi->boot_pl.ss_divider_index = 0; + pi->boot_pl.allow_gnb_slow = 1; + pi->boot_pl.force_nbp_state = 0; + pi->boot_pl.display_wm = 0; + pi->boot_pl.vce_wm = 0; +} + +static int kv_force_dpm_highest(struct amdgpu_device *adev) +{ + int ret; + u32 enable_mask, i; + + ret = amdgpu_kv_dpm_get_enable_mask(adev, &enable_mask); + if (ret) + return ret; + + for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i > 0; i--) { + if (enable_mask & (1 << i)) + break; + } + + if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) + return amdgpu_kv_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DPM_ForceState, i); + else + return kv_set_enabled_level(adev, i); +} + +static int kv_force_dpm_lowest(struct amdgpu_device *adev) +{ + int ret; + u32 enable_mask, i; + + ret = amdgpu_kv_dpm_get_enable_mask(adev, &enable_mask); + if (ret) + return ret; + + for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) { + if (enable_mask & (1 << i)) + break; + } + + if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) + return amdgpu_kv_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DPM_ForceState, i); + else + return kv_set_enabled_level(adev, i); +} + +static u8 kv_get_sleep_divider_id_from_clock(struct amdgpu_device *adev, + u32 sclk, u32 min_sclk_in_sr) +{ + struct kv_power_info *pi = kv_get_pi(adev); + u32 i; + u32 temp; + u32 min = max(min_sclk_in_sr, (u32)KV_MINIMUM_ENGINE_CLOCK); + + if (sclk < min) + return 0; + + if (!pi->caps_sclk_ds) + return 0; + + for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) { + temp = sclk >> i; + if (temp >= min) + break; + } + + return (u8)i; +} + +static int kv_get_high_voltage_limit(struct amdgpu_device *adev, int *limit) +{ + struct kv_power_info *pi = kv_get_pi(adev); + struct amdgpu_clock_voltage_dependency_table *table = + &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; + int i; + + if (table && table->count) { + for (i = table->count - 1; i >= 0; i--) { + if (pi->high_voltage_t && + (kv_convert_8bit_index_to_voltage(adev, table->entries[i].v) <= + pi->high_voltage_t)) { + *limit = i; + return 0; + } + } + } else { + struct sumo_sclk_voltage_mapping_table *table = + &pi->sys_info.sclk_voltage_mapping_table; + + for (i = table->num_max_dpm_entries - 1; i >= 0; i--) { + if (pi->high_voltage_t && + (kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit) <= + pi->high_voltage_t)) { + *limit = i; + return 0; + } + } + } + + *limit = 0; + return 0; +} + +static void kv_apply_state_adjust_rules(struct amdgpu_device *adev, + struct amdgpu_ps *new_rps, + struct amdgpu_ps *old_rps) +{ + struct kv_ps *ps = kv_get_ps(new_rps); + struct kv_power_info *pi = kv_get_pi(adev); + u32 min_sclk = 10000; /* ??? */ + u32 sclk, mclk = 0; + int i, limit; + bool force_high; + struct amdgpu_clock_voltage_dependency_table *table = + &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; + u32 stable_p_state_sclk = 0; + struct amdgpu_clock_and_voltage_limits *max_limits = + &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; + + if (new_rps->vce_active) { + new_rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk; + new_rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk; + } else { + new_rps->evclk = 0; + new_rps->ecclk = 0; + } + + mclk = max_limits->mclk; + sclk = min_sclk; + + if (pi->caps_stable_p_state) { + stable_p_state_sclk = (max_limits->sclk * 75) / 100; + + for (i = table->count - 1; i >= 0; i--) { + if (stable_p_state_sclk >= table->entries[i].clk) { + stable_p_state_sclk = table->entries[i].clk; + break; + } + } + + if (i > 0) + stable_p_state_sclk = table->entries[0].clk; + + sclk = stable_p_state_sclk; + } + + if (new_rps->vce_active) { + if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk) + sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk; + } + + ps->need_dfs_bypass = true; + + for (i = 0; i < ps->num_levels; i++) { + if (ps->levels[i].sclk < sclk) + ps->levels[i].sclk = sclk; + } + + if (table && table->count) { + for (i = 0; i < ps->num_levels; i++) { + if (pi->high_voltage_t && + (pi->high_voltage_t < + kv_convert_8bit_index_to_voltage(adev, ps->levels[i].vddc_index))) { + kv_get_high_voltage_limit(adev, &limit); + ps->levels[i].sclk = table->entries[limit].clk; + } + } + } else { + struct sumo_sclk_voltage_mapping_table *table = + &pi->sys_info.sclk_voltage_mapping_table; + + for (i = 0; i < ps->num_levels; i++) { + if (pi->high_voltage_t && + (pi->high_voltage_t < + kv_convert_8bit_index_to_voltage(adev, ps->levels[i].vddc_index))) { + kv_get_high_voltage_limit(adev, &limit); + ps->levels[i].sclk = table->entries[limit].sclk_frequency; + } + } + } + + if (pi->caps_stable_p_state) { + for (i = 0; i < ps->num_levels; i++) { + ps->levels[i].sclk = stable_p_state_sclk; + } + } + + pi->video_start = new_rps->dclk || new_rps->vclk || + new_rps->evclk || new_rps->ecclk; + + if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == + ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) + pi->battery_state = true; + else + pi->battery_state = false; + + if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { + ps->dpm0_pg_nb_ps_lo = 0x1; + ps->dpm0_pg_nb_ps_hi = 0x0; + ps->dpmx_nb_ps_lo = 0x1; + ps->dpmx_nb_ps_hi = 0x0; + } else { + ps->dpm0_pg_nb_ps_lo = 0x3; + ps->dpm0_pg_nb_ps_hi = 0x0; + ps->dpmx_nb_ps_lo = 0x3; + ps->dpmx_nb_ps_hi = 0x0; + + if (pi->sys_info.nb_dpm_enable) { + force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) || + pi->video_start || (adev->pm.dpm.new_active_crtc_count >= 3) || + pi->disable_nb_ps3_in_battery; + ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3; + ps->dpm0_pg_nb_ps_hi = 0x2; + ps->dpmx_nb_ps_lo = force_high ? 0x2 : 0x3; + ps->dpmx_nb_ps_hi = 0x2; + } + } +} + +static void kv_dpm_power_level_enabled_for_throttle(struct amdgpu_device *adev, + u32 index, bool enable) +{ + struct kv_power_info *pi = kv_get_pi(adev); + + pi->graphics_level[index].EnabledForThrottle = enable ? 1 : 0; +} + +static int kv_calculate_ds_divider(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + u32 sclk_in_sr = 10000; /* ??? */ + u32 i; + + if (pi->lowest_valid > pi->highest_valid) + return -EINVAL; + + for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { + pi->graphics_level[i].DeepSleepDivId = + kv_get_sleep_divider_id_from_clock(adev, + be32_to_cpu(pi->graphics_level[i].SclkFrequency), + sclk_in_sr); + } + return 0; +} + +static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + u32 i; + bool force_high; + struct amdgpu_clock_and_voltage_limits *max_limits = + &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; + u32 mclk = max_limits->mclk; + + if (pi->lowest_valid > pi->highest_valid) + return -EINVAL; + + if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { + for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { + pi->graphics_level[i].GnbSlow = 1; + pi->graphics_level[i].ForceNbPs1 = 0; + pi->graphics_level[i].UpH = 0; + } + + if (!pi->sys_info.nb_dpm_enable) + return 0; + + force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) || + (adev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start); + + if (force_high) { + for (i = pi->lowest_valid; i <= pi->highest_valid; i++) + pi->graphics_level[i].GnbSlow = 0; + } else { + if (pi->battery_state) + pi->graphics_level[0].ForceNbPs1 = 1; + + pi->graphics_level[1].GnbSlow = 0; + pi->graphics_level[2].GnbSlow = 0; + pi->graphics_level[3].GnbSlow = 0; + pi->graphics_level[4].GnbSlow = 0; + } + } else { + for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { + pi->graphics_level[i].GnbSlow = 1; + pi->graphics_level[i].ForceNbPs1 = 0; + pi->graphics_level[i].UpH = 0; + } + + if (pi->sys_info.nb_dpm_enable && pi->battery_state) { + pi->graphics_level[pi->lowest_valid].UpH = 0x28; + pi->graphics_level[pi->lowest_valid].GnbSlow = 0; + if (pi->lowest_valid != pi->highest_valid) + pi->graphics_level[pi->lowest_valid].ForceNbPs1 = 1; + } + } + return 0; +} + +static int kv_calculate_dpm_settings(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + u32 i; + + if (pi->lowest_valid > pi->highest_valid) + return -EINVAL; + + for (i = pi->lowest_valid; i <= pi->highest_valid; i++) + pi->graphics_level[i].DisplayWatermark = (i == pi->highest_valid) ? 1 : 0; + + return 0; +} + +static void kv_init_graphics_levels(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + u32 i; + struct amdgpu_clock_voltage_dependency_table *table = + &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; + + if (table && table->count) { + u32 vid_2bit; + + pi->graphics_dpm_level_count = 0; + for (i = 0; i < table->count; i++) { + if (pi->high_voltage_t && + (pi->high_voltage_t < + kv_convert_8bit_index_to_voltage(adev, table->entries[i].v))) + break; + + kv_set_divider_value(adev, i, table->entries[i].clk); + vid_2bit = kv_convert_vid7_to_vid2(adev, + &pi->sys_info.vid_mapping_table, + table->entries[i].v); + kv_set_vid(adev, i, vid_2bit); + kv_set_at(adev, i, pi->at[i]); + kv_dpm_power_level_enabled_for_throttle(adev, i, true); + pi->graphics_dpm_level_count++; + } + } else { + struct sumo_sclk_voltage_mapping_table *table = + &pi->sys_info.sclk_voltage_mapping_table; + + pi->graphics_dpm_level_count = 0; + for (i = 0; i < table->num_max_dpm_entries; i++) { + if (pi->high_voltage_t && + pi->high_voltage_t < + kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit)) + break; + + kv_set_divider_value(adev, i, table->entries[i].sclk_frequency); + kv_set_vid(adev, i, table->entries[i].vid_2bit); + kv_set_at(adev, i, pi->at[i]); + kv_dpm_power_level_enabled_for_throttle(adev, i, true); + pi->graphics_dpm_level_count++; + } + } + + for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) + kv_dpm_power_level_enable(adev, i, false); +} + +static void kv_enable_new_levels(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + u32 i; + + for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) { + if (i >= pi->lowest_valid && i <= pi->highest_valid) + kv_dpm_power_level_enable(adev, i, true); + } +} + +static int kv_set_enabled_level(struct amdgpu_device *adev, u32 level) +{ + u32 new_mask = (1 << level); + + return amdgpu_kv_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_SCLKDPM_SetEnabledMask, + new_mask); +} + +static int kv_set_enabled_levels(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + u32 i, new_mask = 0; + + for (i = pi->lowest_valid; i <= pi->highest_valid; i++) + new_mask |= (1 << i); + + return amdgpu_kv_send_msg_to_smc_with_parameter(adev, + PPSMC_MSG_SCLKDPM_SetEnabledMask, + new_mask); +} + +static void kv_program_nbps_index_settings(struct amdgpu_device *adev, + struct amdgpu_ps *new_rps) +{ + struct kv_ps *new_ps = kv_get_ps(new_rps); + struct kv_power_info *pi = kv_get_pi(adev); + u32 nbdpmconfig1; + + if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) + return; + + if (pi->sys_info.nb_dpm_enable) { + nbdpmconfig1 = RREG32_SMC(ixNB_DPM_CONFIG_1); + nbdpmconfig1 &= ~(NB_DPM_CONFIG_1__Dpm0PgNbPsLo_MASK | + NB_DPM_CONFIG_1__Dpm0PgNbPsHi_MASK | + NB_DPM_CONFIG_1__DpmXNbPsLo_MASK | + NB_DPM_CONFIG_1__DpmXNbPsHi_MASK); + nbdpmconfig1 |= (new_ps->dpm0_pg_nb_ps_lo << NB_DPM_CONFIG_1__Dpm0PgNbPsLo__SHIFT) | + (new_ps->dpm0_pg_nb_ps_hi << NB_DPM_CONFIG_1__Dpm0PgNbPsHi__SHIFT) | + (new_ps->dpmx_nb_ps_lo << NB_DPM_CONFIG_1__DpmXNbPsLo__SHIFT) | + (new_ps->dpmx_nb_ps_hi << NB_DPM_CONFIG_1__DpmXNbPsHi__SHIFT); + WREG32_SMC(ixNB_DPM_CONFIG_1, nbdpmconfig1); + } +} + +static int kv_set_thermal_temperature_range(struct amdgpu_device *adev, + int min_temp, int max_temp) +{ + int low_temp = 0 * 1000; + int high_temp = 255 * 1000; + u32 tmp; + + if (low_temp < min_temp) + low_temp = min_temp; + if (high_temp > max_temp) + high_temp = max_temp; + if (high_temp < low_temp) { + DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); + return -EINVAL; + } + + tmp = RREG32_SMC(ixCG_THERMAL_INT_CTRL); + tmp &= ~(CG_THERMAL_INT_CTRL__DIG_THERM_INTH_MASK | + CG_THERMAL_INT_CTRL__DIG_THERM_INTL_MASK); + tmp |= ((49 + (high_temp / 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTH__SHIFT) | + ((49 + (low_temp / 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTL__SHIFT); + WREG32_SMC(ixCG_THERMAL_INT_CTRL, tmp); + + adev->pm.dpm.thermal.min_temp = low_temp; + adev->pm.dpm.thermal.max_temp = high_temp; + + return 0; +} + +union igp_info { + struct _ATOM_INTEGRATED_SYSTEM_INFO info; + struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2; + struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5; + struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6; + struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7; + struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8; +}; + +static int kv_parse_sys_info_table(struct amdgpu_device *adev) +{ + struct kv_power_info *pi = kv_get_pi(adev); + struct amdgpu_mode_info *mode_info = &adev->mode_info; + int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); + union igp_info *igp_info; + u8 frev, crev; + u16 data_offset; + int i; + + if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) { + igp_info = (union igp_info *)(mode_info->atom_context->bios + + data_offset); + + if (crev != 8) { + DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev); + return -EINVAL; + } + pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock); + pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_8.ulBootUpUMAClock); + pi->sys_info.bootup_nb_voltage_index = + le16_to_cpu(igp_info->info_8.usBootUpNBVoltage); + if (igp_info->info_8.ucHtcTmpLmt == 0) + pi->sys_info.htc_tmp_lmt = 203; + else + pi->sys_info.htc_tmp_lmt = igp_info->info_8.ucHtcTmpLmt; + if (igp_info->info_8.ucHtcHystLmt == 0) + pi->sys_info.htc_hyst_lmt = 5; + else + pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt; + if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) { + DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n"); + } + + if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3)) + pi->sys_info.nb_dpm_enable = true; + else + pi->sys_info.nb_dpm_enable = false; + + for (i = 0; i < KV_NUM_NBPSTATES; i++) { + pi->sys_info.nbp_memory_clock[i] = + le32_to_cpu(igp_info->info_8.ulNbpStateMemclkFreq[i]); + pi->sys_info.nbp_n_clock[i] = + le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]); + } + if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) & + SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) + pi->caps_enable_dfs_bypass = true; + + sumo_construct_sclk_voltage_mapping_table(adev, + &pi->sys_info.sclk_voltage_mapping_table, + igp_info->info_8.sAvail_SCLK); + + sumo_construct_vid_mapping_table(adev, + &pi->sys_info.vid_mapping_table, + igp_info->info_8.sAvail_SCLK); + + kv_construct_max_power_limits_table(adev, + &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac); + } + return 0; +} + +union power_info { + struct _ATOM_POWERPLAY_INFO info; + struct _ATOM_POWERPLAY_INFO_V2 info_2; + struct _ATOM_POWERPLAY_INFO_V3 info_3; + struct _ATOM_PPLIB_POWERPLAYTABLE pplib; + struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; + struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; +}; + +union pplib_clock_info { + struct _ATOM_PPLIB_R600_CLOCK_INFO r600; + struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; + struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; + struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; +}; + +union pplib_power_state { + struct _ATOM_PPLIB_STATE v1; + struct _ATOM_PPLIB_STATE_V2 v2; +}; + +static void kv_patch_boot_state(struct amdgpu_device *adev, + struct kv_ps *ps) +{ + struct kv_power_info *pi = kv_get_pi(adev); + + ps->num_levels = 1; + ps->levels[0] = pi->boot_pl; +} + +static void kv_parse_pplib_non_clock_info(struct amdgpu_device *adev, + struct amdgpu_ps *rps, + struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info, + u8 table_rev) +{ + struct kv_ps *ps = kv_get_ps(rps); + + rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings); + rps->class = le16_to_cpu(non_clock_info->usClassification); + rps->class2 = le16_to_cpu(non_clock_info->usClassification2); + + if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { + rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); + rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); + } else { + rps->vclk = 0; + rps->dclk = 0; + } + + if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) { + adev->pm.dpm.boot_ps = rps; + kv_patch_boot_state(adev, ps); + } + if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) + adev->pm.dpm.uvd_ps = rps; +} + +static void kv_parse_pplib_clock_info(struct amdgpu_device *adev, + struct amdgpu_ps *rps, int index, + union pplib_clock_info *clock_info) +{ + struct kv_power_info *pi = kv_get_pi(adev); + struct kv_ps *ps = kv_get_ps(rps); + struct kv_pl *pl = &ps->levels[index]; + u32 sclk; + + sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); + sclk |= clock_info->sumo.ucEngineClockHigh << 16; + pl->sclk = sclk; + pl->vddc_index = clock_info->sumo.vddcIndex; + + ps->num_levels = index + 1; + + if (pi->caps_sclk_ds) { + pl->ds_divider_index = 5; + pl->ss_divider_index = 5; + } +} + +static int kv_parse_power_table(struct amdgpu_device *adev) +{ + struct amdgpu_mode_info *mode_info = &adev->mode_info; + struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; + union pplib_power_state *power_state; + int i, j, k, non_clock_array_index, clock_array_index; + union pplib_clock_info *clock_info; + struct _StateArray *state_array; + struct _ClockInfoArray *clock_info_array; + struct _NonClockInfoArray *non_clock_info_array; + union power_info *power_info; + int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); + u16 data_offset; + u8 frev, crev; + u8 *power_state_offset; + struct kv_ps *ps; + + if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) + return -EINVAL; + power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); + + amdgpu_add_thermal_controller(adev); + + state_array = (struct _StateArray *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib.usStateArrayOffset)); + clock_info_array = (struct _ClockInfoArray *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); + non_clock_info_array = (struct _NonClockInfoArray *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); + + adev->pm.dpm.ps = kcalloc(state_array->ucNumEntries, + sizeof(struct amdgpu_ps), + GFP_KERNEL); + if (!adev->pm.dpm.ps) + return -ENOMEM; + power_state_offset = (u8 *)state_array->states; + for (i = 0; i < state_array->ucNumEntries; i++) { + u8 *idx; + power_state = (union pplib_power_state *)power_state_offset; + non_clock_array_index = power_state->v2.nonClockInfoIndex; + non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) + &non_clock_info_array->nonClockInfo[non_clock_array_index]; + ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL); + if (ps == NULL) { + kfree(adev->pm.dpm.ps); + return -ENOMEM; + } + adev->pm.dpm.ps[i].ps_priv = ps; + k = 0; + idx = (u8 *)&power_state->v2.clockInfoIndex[0]; + for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { + clock_array_index = idx[j]; + if (clock_array_index >= clock_info_array->ucNumEntries) + continue; + if (k >= SUMO_MAX_HARDWARE_POWERLEVELS) + break; + clock_info = (union pplib_clock_info *) + ((u8 *)&clock_info_array->clockInfo[0] + + (clock_array_index * clock_info_array->ucEntrySize)); + kv_parse_pplib_clock_info(adev, + &adev->pm.dpm.ps[i], k, + clock_info); + k++; + } + kv_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i], + non_clock_info, + non_clock_info_array->ucEntrySize); + power_state_offset += 2 + power_state->v2.ucNumDPMLevels; + } + adev->pm.dpm.num_ps = state_array->ucNumEntries; + + /* fill in the vce power states */ + for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) { + u32 sclk; + clock_array_index = adev->pm.dpm.vce_states[i].clk_idx; + clock_info = (union pplib_clock_info *) + &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; + sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); + sclk |= clock_info->sumo.ucEngineClockHigh << 16; + adev->pm.dpm.vce_states[i].sclk = sclk; + adev->pm.dpm.vce_states[i].mclk = 0; + } + + return 0; +} + +static int kv_dpm_init(struct amdgpu_device *adev) +{ + struct kv_power_info *pi; + int ret, i; + + pi = kzalloc(sizeof(struct kv_power_info), GFP_KERNEL); + if (pi == NULL) + return -ENOMEM; + adev->pm.dpm.priv = pi; + + ret = amdgpu_get_platform_caps(adev); + if (ret) + return ret; + + ret = amdgpu_parse_extended_power_table(adev); + if (ret) + return ret; + + for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) + pi->at[i] = TRINITY_AT_DFLT; + + pi->sram_end = SMC_RAM_END; + + pi->enable_nb_dpm = true; + + pi->caps_power_containment = true; + pi->caps_cac = true; + pi->enable_didt = false; + if (pi->enable_didt) { + pi->caps_sq_ramping = true; + pi->caps_db_ramping = true; + pi->caps_td_ramping = true; + pi->caps_tcp_ramping = true; + } + + if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK) + pi->caps_sclk_ds = true; + else + pi->caps_sclk_ds = false; + + pi->enable_auto_thermal_throttling = true; + pi->disable_nb_ps3_in_battery = false; + if (amdgpu_bapm == 0) + pi->bapm_enable = false; + else + pi->bapm_enable = true; + pi->voltage_drop_t = 0; + pi->caps_sclk_throttle_low_notification = false; + pi->caps_fps = false; /* true? */ + pi->caps_uvd_pg = (adev->pg_flags & AMD_PG_SUPPORT_UVD) ? true : false; + pi->caps_uvd_dpm = true; + pi->caps_vce_pg = (adev->pg_flags & AMD_PG_SUPPORT_VCE) ? true : false; + pi->caps_samu_pg = (adev->pg_flags & AMD_PG_SUPPORT_SAMU) ? true : false; + pi->caps_acp_pg = (adev->pg_flags & AMD_PG_SUPPORT_ACP) ? true : false; + pi->caps_stable_p_state = false; + + ret = kv_parse_sys_info_table(adev); + if (ret) + return ret; + + kv_patch_voltage_values(adev); + kv_construct_boot_state(adev); + + ret = kv_parse_power_table(adev); + if (ret) + return ret; + + pi->enable_dpm = true; + + return 0; +} + +static void +kv_dpm_debugfs_print_current_performance_level(void *handle, + struct seq_file *m) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct kv_power_info *pi = kv_get_pi(adev); + u32 current_index = + (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) & + TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >> + TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT; + u32 sclk, tmp; + u16 vddc; + + if (current_index >= SMU__NUM_SCLK_DPM_STATE) { + seq_printf(m, "invalid dpm profile %d\n", current_index); + } else { + sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency); + tmp = (RREG32_SMC(ixSMU_VOLTAGE_STATUS) & + SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK) >> + SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT; + vddc = kv_convert_8bit_index_to_voltage(adev, (u16)tmp); + seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en"); + seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en"); + seq_printf(m, "power level %d sclk: %u vddc: %u\n", + current_index, sclk, vddc); + } +} + +static void +kv_dpm_print_power_state(void *handle, void *request_ps) +{ + int i; + struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps; + struct kv_ps *ps = kv_get_ps(rps); + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + amdgpu_dpm_print_class_info(rps->class, rps->class2); + amdgpu_dpm_print_cap_info(rps->caps); + printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); + for (i = 0; i < ps->num_levels; i++) { + struct kv_pl *pl = &ps->levels[i]; + printk("\t\tpower level %d sclk: %u vddc: %u\n", + i, pl->sclk, + kv_convert_8bit_index_to_voltage(adev, pl->vddc_index)); + } + amdgpu_dpm_print_ps_status(adev, rps); +} + +static void kv_dpm_fini(struct amdgpu_device *adev) +{ + int i; + + for (i = 0; i < adev->pm.dpm.num_ps; i++) { + kfree(adev->pm.dpm.ps[i].ps_priv); + } + kfree(adev->pm.dpm.ps); + kfree(adev->pm.dpm.priv); + amdgpu_free_extended_power_table(adev); +} + +static void kv_dpm_display_configuration_changed(void *handle) +{ + +} + +static u32 kv_dpm_get_sclk(void *handle, bool low) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct kv_power_info *pi = kv_get_pi(adev); + struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps); + + if (low) + return requested_state->levels[0].sclk; + else + return requested_state->levels[requested_state->num_levels - 1].sclk; +} + +static u32 kv_dpm_get_mclk(void *handle, bool low) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct kv_power_info *pi = kv_get_pi(adev); + + return pi->sys_info.bootup_uma_clk; +} + +/* get temperature in millidegrees */ +static int kv_dpm_get_temp(void *handle) +{ + u32 temp; + int actual_temp = 0; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + temp = RREG32_SMC(0xC0300E0C); + + if (temp) + actual_temp = (temp / 8) - 49; + else + actual_temp = 0; + + actual_temp = actual_temp * 1000; + + return actual_temp; +} + +static int kv_dpm_early_init(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + adev->powerplay.pp_funcs = &kv_dpm_funcs; + adev->powerplay.pp_handle = adev; + kv_dpm_set_irq_funcs(adev); + + return 0; +} + +static int kv_dpm_late_init(void *handle) +{ + /* powerdown unused blocks for now */ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (!adev->pm.dpm_enabled) + return 0; + + kv_dpm_powergate_acp(adev, true); + kv_dpm_powergate_samu(adev, true); + + return 0; +} + +static int kv_dpm_sw_init(void *handle) +{ + int ret; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230, + &adev->pm.dpm.thermal.irq); + if (ret) + return ret; + + ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 231, + &adev->pm.dpm.thermal.irq); + if (ret) + return ret; + + /* default to balanced state */ + adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED; + adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; + adev->pm.dpm.forced_level = AMD_DPM_FORCED_LEVEL_AUTO; + adev->pm.default_sclk = adev->clock.default_sclk; + adev->pm.default_mclk = adev->clock.default_mclk; + adev->pm.current_sclk = adev->clock.default_sclk; + adev->pm.current_mclk = adev->clock.default_mclk; + adev->pm.int_thermal_type = THERMAL_TYPE_NONE; + + if (amdgpu_dpm == 0) + return 0; + + INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler); + mutex_lock(&adev->pm.mutex); + ret = kv_dpm_init(adev); + if (ret) + goto dpm_failed; + adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; + if (amdgpu_dpm == 1) + amdgpu_pm_print_power_states(adev); + mutex_unlock(&adev->pm.mutex); + DRM_INFO("amdgpu: dpm initialized\n"); + + return 0; + +dpm_failed: + kv_dpm_fini(adev); + mutex_unlock(&adev->pm.mutex); + DRM_ERROR("amdgpu: dpm initialization failed\n"); + return ret; +} + +static int kv_dpm_sw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + flush_work(&adev->pm.dpm.thermal.work); + + mutex_lock(&adev->pm.mutex); + kv_dpm_fini(adev); + mutex_unlock(&adev->pm.mutex); + + return 0; +} + +static int kv_dpm_hw_init(void *handle) +{ + int ret; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (!amdgpu_dpm) + return 0; + + mutex_lock(&adev->pm.mutex); + kv_dpm_setup_asic(adev); + ret = kv_dpm_enable(adev); + if (ret) + adev->pm.dpm_enabled = false; + else + adev->pm.dpm_enabled = true; + mutex_unlock(&adev->pm.mutex); + amdgpu_legacy_dpm_compute_clocks(adev); + return ret; +} + +static int kv_dpm_hw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (adev->pm.dpm_enabled) { + mutex_lock(&adev->pm.mutex); + kv_dpm_disable(adev); + mutex_unlock(&adev->pm.mutex); + } + + return 0; +} + +static int kv_dpm_suspend(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (adev->pm.dpm_enabled) { + mutex_lock(&adev->pm.mutex); + /* disable dpm */ + kv_dpm_disable(adev); + /* reset the power state */ + adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; + mutex_unlock(&adev->pm.mutex); + } + return 0; +} + +static int kv_dpm_resume(void *handle) +{ + int ret; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (adev->pm.dpm_enabled) { + /* asic init will reset to the boot state */ + mutex_lock(&adev->pm.mutex); + kv_dpm_setup_asic(adev); + ret = kv_dpm_enable(adev); + if (ret) + adev->pm.dpm_enabled = false; + else + adev->pm.dpm_enabled = true; + mutex_unlock(&adev->pm.mutex); + if (adev->pm.dpm_enabled) + amdgpu_legacy_dpm_compute_clocks(adev); + } + return 0; +} + +static bool kv_dpm_is_idle(void *handle) +{ + return true; +} + +static int kv_dpm_wait_for_idle(void *handle) +{ + return 0; +} + + +static int kv_dpm_soft_reset(void *handle) +{ + return 0; +} + +static int kv_dpm_set_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned type, + enum amdgpu_interrupt_state state) +{ + u32 cg_thermal_int; + + switch (type) { + case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH: + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); + cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; + WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); + break; + case AMDGPU_IRQ_STATE_ENABLE: + cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); + cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; + WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); + break; + default: + break; + } + break; + + case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW: + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); + cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; + WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); + break; + case AMDGPU_IRQ_STATE_ENABLE: + cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); + cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; + WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); + break; + default: + break; + } + break; + + default: + break; + } + return 0; +} + +static int kv_dpm_process_interrupt(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + bool queue_thermal = false; + + if (entry == NULL) + return -EINVAL; + + switch (entry->src_id) { + case 230: /* thermal low to high */ + DRM_DEBUG("IH: thermal low to high\n"); + adev->pm.dpm.thermal.high_to_low = false; + queue_thermal = true; + break; + case 231: /* thermal high to low */ + DRM_DEBUG("IH: thermal high to low\n"); + adev->pm.dpm.thermal.high_to_low = true; + queue_thermal = true; + break; + default: + break; + } + + if (queue_thermal) + schedule_work(&adev->pm.dpm.thermal.work); + + return 0; +} + +static int kv_dpm_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + return 0; +} + +static int kv_dpm_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + return 0; +} + +static inline bool kv_are_power_levels_equal(const struct kv_pl *kv_cpl1, + const struct kv_pl *kv_cpl2) +{ + return ((kv_cpl1->sclk == kv_cpl2->sclk) && + (kv_cpl1->vddc_index == kv_cpl2->vddc_index) && + (kv_cpl1->ds_divider_index == kv_cpl2->ds_divider_index) && + (kv_cpl1->force_nbp_state == kv_cpl2->force_nbp_state)); +} + +static int kv_check_state_equal(void *handle, + void *current_ps, + void *request_ps, + bool *equal) +{ + struct kv_ps *kv_cps; + struct kv_ps *kv_rps; + int i; + struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps; + struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (adev == NULL || cps == NULL || rps == NULL || equal == NULL) + return -EINVAL; + + kv_cps = kv_get_ps(cps); + kv_rps = kv_get_ps(rps); + + if (kv_cps == NULL) { + *equal = false; + return 0; + } + + if (kv_cps->num_levels != kv_rps->num_levels) { + *equal = false; + return 0; + } + + for (i = 0; i < kv_cps->num_levels; i++) { + if (!kv_are_power_levels_equal(&(kv_cps->levels[i]), + &(kv_rps->levels[i]))) { + *equal = false; + return 0; + } + } + + /* If all performance levels are the same try to use the UVD clocks to break the tie.*/ + *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk)); + *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk)); + + return 0; +} + +static int kv_dpm_read_sensor(void *handle, int idx, + void *value, int *size) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct kv_power_info *pi = kv_get_pi(adev); + uint32_t sclk; + u32 pl_index = + (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) & + TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >> + TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT; + + /* size must be at least 4 bytes for all sensors */ + if (*size < 4) + return -EINVAL; + + switch (idx) { + case AMDGPU_PP_SENSOR_GFX_SCLK: + if (pl_index < SMU__NUM_SCLK_DPM_STATE) { + sclk = be32_to_cpu( + pi->graphics_level[pl_index].SclkFrequency); + *((uint32_t *)value) = sclk; + *size = 4; + return 0; + } + return -EINVAL; + case AMDGPU_PP_SENSOR_GPU_TEMP: + *((uint32_t *)value) = kv_dpm_get_temp(adev); + *size = 4; + return 0; + default: + return -EOPNOTSUPP; + } +} + +static int kv_set_powergating_by_smu(void *handle, + uint32_t block_type, bool gate) +{ + switch (block_type) { + case AMD_IP_BLOCK_TYPE_UVD: + kv_dpm_powergate_uvd(handle, gate); + break; + case AMD_IP_BLOCK_TYPE_VCE: + kv_dpm_powergate_vce(handle, gate); + break; + default: + break; + } + return 0; +} + +static const struct amd_ip_funcs kv_dpm_ip_funcs = { + .name = "kv_dpm", + .early_init = kv_dpm_early_init, + .late_init = kv_dpm_late_init, + .sw_init = kv_dpm_sw_init, + .sw_fini = kv_dpm_sw_fini, + .hw_init = kv_dpm_hw_init, + .hw_fini = kv_dpm_hw_fini, + .suspend = kv_dpm_suspend, + .resume = kv_dpm_resume, + .is_idle = kv_dpm_is_idle, + .wait_for_idle = kv_dpm_wait_for_idle, + .soft_reset = kv_dpm_soft_reset, + .set_clockgating_state = kv_dpm_set_clockgating_state, + .set_powergating_state = kv_dpm_set_powergating_state, +}; + +const struct amdgpu_ip_block_version kv_smu_ip_block = +{ + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 1, + .minor = 0, + .rev = 0, + .funcs = &kv_dpm_ip_funcs, +}; + +static const struct amd_pm_funcs kv_dpm_funcs = { + .pre_set_power_state = &kv_dpm_pre_set_power_state, + .set_power_state = &kv_dpm_set_power_state, + .post_set_power_state = &kv_dpm_post_set_power_state, + .display_configuration_changed = &kv_dpm_display_configuration_changed, + .get_sclk = &kv_dpm_get_sclk, + .get_mclk = &kv_dpm_get_mclk, + .print_power_state = &kv_dpm_print_power_state, + .debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level, + .force_performance_level = &kv_dpm_force_performance_level, + .set_powergating_by_smu = kv_set_powergating_by_smu, + .enable_bapm = &kv_dpm_enable_bapm, + .get_vce_clock_state = amdgpu_get_vce_clock_state, + .check_state_equal = kv_check_state_equal, + .read_sensor = &kv_dpm_read_sensor, + .pm_compute_clocks = amdgpu_legacy_dpm_compute_clocks, +}; + +static const struct amdgpu_irq_src_funcs kv_dpm_irq_funcs = { + .set = kv_dpm_set_interrupt_state, + .process = kv_dpm_process_interrupt, +}; + +static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST; + adev->pm.dpm.thermal.irq.funcs = &kv_dpm_irq_funcs; +} diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.h b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.h new file mode 100644 index 000000000000..6df0ed41317c --- /dev/null +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.h @@ -0,0 +1,229 @@ +/* + * Copyright 2013 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __KV_DPM_H__ +#define __KV_DPM_H__ + +#define SMU__NUM_SCLK_DPM_STATE 8 +#define SMU__NUM_MCLK_DPM_LEVELS 4 +#define SMU__NUM_LCLK_DPM_LEVELS 8 +#define SMU__NUM_PCIE_DPM_LEVELS 0 /* ??? */ +#include "smu7_fusion.h" +#include "ppsmc.h" + +#define SUMO_MAX_HARDWARE_POWERLEVELS 5 + +#define SUMO_MAX_NUMBER_VOLTAGES 4 + +struct sumo_vid_mapping_entry { + u16 vid_2bit; + u16 vid_7bit; +}; + +struct sumo_vid_mapping_table { + u32 num_entries; + struct sumo_vid_mapping_entry entries[SUMO_MAX_NUMBER_VOLTAGES]; +}; + +struct sumo_sclk_voltage_mapping_entry { + u32 sclk_frequency; + u16 vid_2bit; + u16 rsv; +}; + +struct sumo_sclk_voltage_mapping_table { + u32 num_max_dpm_entries; + struct sumo_sclk_voltage_mapping_entry entries[SUMO_MAX_HARDWARE_POWERLEVELS]; +}; + +#define TRINITY_AT_DFLT 30 + +#define KV_NUM_NBPSTATES 4 + +enum kv_pt_config_reg_type { + KV_CONFIGREG_MMR = 0, + KV_CONFIGREG_SMC_IND, + KV_CONFIGREG_DIDT_IND, + KV_CONFIGREG_CACHE, + KV_CONFIGREG_MAX +}; + +struct kv_pt_config_reg { + u32 offset; + u32 mask; + u32 shift; + u32 value; + enum kv_pt_config_reg_type type; +}; + +struct kv_lcac_config_values { + u32 block_id; + u32 signal_id; + u32 t; +}; + +struct kv_lcac_config_reg { + u32 cntl; + u32 block_mask; + u32 block_shift; + u32 signal_mask; + u32 signal_shift; + u32 t_mask; + u32 t_shift; + u32 enable_mask; + u32 enable_shift; +}; + +struct kv_pl { + u32 sclk; + u8 vddc_index; + u8 ds_divider_index; + u8 ss_divider_index; + u8 allow_gnb_slow; + u8 force_nbp_state; + u8 display_wm; + u8 vce_wm; +}; + +struct kv_ps { + struct kv_pl levels[SUMO_MAX_HARDWARE_POWERLEVELS]; + u32 num_levels; + bool need_dfs_bypass; + u8 dpm0_pg_nb_ps_lo; + u8 dpm0_pg_nb_ps_hi; + u8 dpmx_nb_ps_lo; + u8 dpmx_nb_ps_hi; +}; + +struct kv_sys_info { + u32 bootup_uma_clk; + u32 bootup_sclk; + u32 dentist_vco_freq; + u32 nb_dpm_enable; + u32 nbp_memory_clock[KV_NUM_NBPSTATES]; + u32 nbp_n_clock[KV_NUM_NBPSTATES]; + u16 bootup_nb_voltage_index; + u8 htc_tmp_lmt; + u8 htc_hyst_lmt; + struct sumo_sclk_voltage_mapping_table sclk_voltage_mapping_table; + struct sumo_vid_mapping_table vid_mapping_table; + u32 uma_channel_number; +}; + +struct kv_power_info { + u32 at[SUMO_MAX_HARDWARE_POWERLEVELS]; + u32 voltage_drop_t; + struct kv_sys_info sys_info; + struct kv_pl boot_pl; + bool enable_nb_ps_policy; + bool disable_nb_ps3_in_battery; + bool video_start; + bool battery_state; + u32 lowest_valid; + u32 highest_valid; + u16 high_voltage_t; + bool cac_enabled; + bool bapm_enable; + /* smc offsets */ + u32 sram_end; + u32 dpm_table_start; + u32 soft_regs_start; + /* dpm SMU tables */ + u8 graphics_dpm_level_count; + u8 uvd_level_count; + u8 vce_level_count; + u8 acp_level_count; + u8 samu_level_count; + u16 fps_high_t; + SMU7_Fusion_GraphicsLevel graphics_level[SMU__NUM_SCLK_DPM_STATE]; + SMU7_Fusion_ACPILevel acpi_level; + SMU7_Fusion_UvdLevel uvd_level[SMU7_MAX_LEVELS_UVD]; + SMU7_Fusion_ExtClkLevel vce_level[SMU7_MAX_LEVELS_VCE]; + SMU7_Fusion_ExtClkLevel acp_level[SMU7_MAX_LEVELS_ACP]; + SMU7_Fusion_ExtClkLevel samu_level[SMU7_MAX_LEVELS_SAMU]; + u8 uvd_boot_level; + u8 vce_boot_level; + u8 acp_boot_level; + u8 samu_boot_level; + u8 uvd_interval; + u8 vce_interval; + u8 acp_interval; + u8 samu_interval; + u8 graphics_boot_level; + u8 graphics_interval; + u8 graphics_therm_throttle_enable; + u8 graphics_voltage_change_enable; + u8 graphics_clk_slow_enable; + u8 graphics_clk_slow_divider; + u8 fps_low_t; + u32 low_sclk_interrupt_t; + bool uvd_power_gated; + bool vce_power_gated; + bool acp_power_gated; + bool samu_power_gated; + bool nb_dpm_enabled; + /* flags */ + bool enable_didt; + bool enable_dpm; + bool enable_auto_thermal_throttling; + bool enable_nb_dpm; + /* caps */ + bool caps_cac; + bool caps_power_containment; + bool caps_sq_ramping; + bool caps_db_ramping; + bool caps_td_ramping; + bool caps_tcp_ramping; + bool caps_sclk_throttle_low_notification; + bool caps_fps; + bool caps_uvd_dpm; + bool caps_uvd_pg; + bool caps_vce_pg; + bool caps_samu_pg; + bool caps_acp_pg; + bool caps_stable_p_state; + bool caps_enable_dfs_bypass; + bool caps_sclk_ds; + struct amdgpu_ps current_rps; + struct kv_ps current_ps; + struct amdgpu_ps requested_rps; + struct kv_ps requested_ps; +}; + +/* XXX are these ok? */ +#define KV_TEMP_RANGE_MIN (90 * 1000) +#define KV_TEMP_RANGE_MAX (120 * 1000) + +/* kv_smc.c */ +int amdgpu_kv_notify_message_to_smu(struct amdgpu_device *adev, u32 id); +int amdgpu_kv_dpm_get_enable_mask(struct amdgpu_device *adev, u32 *enable_mask); +int amdgpu_kv_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, + PPSMC_Msg msg, u32 parameter); +int amdgpu_kv_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address, + u32 *value, u32 limit); +int amdgpu_kv_smc_dpm_enable(struct amdgpu_device *adev, bool enable); +int amdgpu_kv_smc_bapm_enable(struct amdgpu_device *adev, bool enable); +int amdgpu_kv_copy_bytes_to_smc(struct amdgpu_device *adev, + u32 smc_start_address, + const u8 *src, u32 byte_count, u32 limit); + +#endif diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_smc.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_smc.c new file mode 100644 index 000000000000..2d9ab6b8be66 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_smc.c @@ -0,0 +1,218 @@ +/* + * Copyright 2013 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Alex Deucher + */ + +#include "amdgpu.h" +#include "cikd.h" +#include "kv_dpm.h" + +#include "smu/smu_7_0_0_d.h" +#include "smu/smu_7_0_0_sh_mask.h" + +int amdgpu_kv_notify_message_to_smu(struct amdgpu_device *adev, u32 id) +{ + u32 i; + u32 tmp = 0; + + WREG32(mmSMC_MESSAGE_0, id & SMC_MESSAGE_0__SMC_MSG_MASK); + + for (i = 0; i < adev->usec_timeout; i++) { + if ((RREG32(mmSMC_RESP_0) & SMC_RESP_0__SMC_RESP_MASK) != 0) + break; + udelay(1); + } + tmp = RREG32(mmSMC_RESP_0) & SMC_RESP_0__SMC_RESP_MASK; + + if (tmp != 1) { + if (tmp == 0xFF) + return -EINVAL; + else if (tmp == 0xFE) + return -EINVAL; + } + + return 0; +} + +int amdgpu_kv_dpm_get_enable_mask(struct amdgpu_device *adev, u32 *enable_mask) +{ + int ret; + + ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SCLKDPM_GetEnabledMask); + + if (ret == 0) + *enable_mask = RREG32_SMC(ixSMC_SYSCON_MSG_ARG_0); + + return ret; +} + +int amdgpu_kv_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, + PPSMC_Msg msg, u32 parameter) +{ + + WREG32(mmSMC_MSG_ARG_0, parameter); + + return amdgpu_kv_notify_message_to_smu(adev, msg); +} + +static int kv_set_smc_sram_address(struct amdgpu_device *adev, + u32 smc_address, u32 limit) +{ + if (smc_address & 3) + return -EINVAL; + if ((smc_address + 3) > limit) + return -EINVAL; + + WREG32(mmSMC_IND_INDEX_0, smc_address); + WREG32_P(mmSMC_IND_ACCESS_CNTL, 0, + ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK); + + return 0; +} + +int amdgpu_kv_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address, + u32 *value, u32 limit) +{ + int ret; + + ret = kv_set_smc_sram_address(adev, smc_address, limit); + if (ret) + return ret; + + *value = RREG32(mmSMC_IND_DATA_0); + return 0; +} + +int amdgpu_kv_smc_dpm_enable(struct amdgpu_device *adev, bool enable) +{ + if (enable) + return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DPM_Enable); + else + return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DPM_Disable); +} + +int amdgpu_kv_smc_bapm_enable(struct amdgpu_device *adev, bool enable) +{ + if (enable) + return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_EnableBAPM); + else + return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DisableBAPM); +} + +int amdgpu_kv_copy_bytes_to_smc(struct amdgpu_device *adev, + u32 smc_start_address, + const u8 *src, u32 byte_count, u32 limit) +{ + int ret; + u32 data, original_data, addr, extra_shift, t_byte, count, mask; + + if ((smc_start_address + byte_count) > limit) + return -EINVAL; + + addr = smc_start_address; + t_byte = addr & 3; + + /* RMW for the initial bytes */ + if (t_byte != 0) { + addr -= t_byte; + + ret = kv_set_smc_sram_address(adev, addr, limit); + if (ret) + return ret; + + original_data = RREG32(mmSMC_IND_DATA_0); + + data = 0; + mask = 0; + count = 4; + while (count > 0) { + if (t_byte > 0) { + mask = (mask << 8) | 0xff; + t_byte--; + } else if (byte_count > 0) { + data = (data << 8) + *src++; + byte_count--; + mask <<= 8; + } else { + data <<= 8; + mask = (mask << 8) | 0xff; + } + count--; + } + + data |= original_data & mask; + + ret = kv_set_smc_sram_address(adev, addr, limit); + if (ret) + return ret; + + WREG32(mmSMC_IND_DATA_0, data); + + addr += 4; + } + + while (byte_count >= 4) { + /* SMC address space is BE */ + data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3]; + + ret = kv_set_smc_sram_address(adev, addr, limit); + if (ret) + return ret; + + WREG32(mmSMC_IND_DATA_0, data); + + src += 4; + byte_count -= 4; + addr += 4; + } + + /* RMW for the final bytes */ + if (byte_count > 0) { + data = 0; + + ret = kv_set_smc_sram_address(adev, addr, limit); + if (ret) + return ret; + + original_data = RREG32(mmSMC_IND_DATA_0); + + extra_shift = 8 * (4 - byte_count); + + while (byte_count > 0) { + /* SMC address space is BE */ + data = (data << 8) + *src++; + byte_count--; + } + + data <<= extra_shift; + + data |= (original_data & ~((~0UL) << extra_shift)); + + ret = kv_set_smc_sram_address(adev, addr, limit); + if (ret) + return ret; + + WREG32(mmSMC_IND_DATA_0, data); + } + return 0; +} + diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c new file mode 100644 index 000000000000..3c6ee493e410 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c @@ -0,0 +1,1080 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include "amdgpu.h" +#include "amdgpu_i2c.h" +#include "amdgpu_atombios.h" +#include "atom.h" +#include "amd_pcie.h" +#include "legacy_dpm.h" +#include "amdgpu_dpm_internal.h" +#include "amdgpu_display.h" + +#define amdgpu_dpm_pre_set_power_state(adev) \ + ((adev)->powerplay.pp_funcs->pre_set_power_state((adev)->powerplay.pp_handle)) + +#define amdgpu_dpm_post_set_power_state(adev) \ + ((adev)->powerplay.pp_funcs->post_set_power_state((adev)->powerplay.pp_handle)) + +#define amdgpu_dpm_display_configuration_changed(adev) \ + ((adev)->powerplay.pp_funcs->display_configuration_changed((adev)->powerplay.pp_handle)) + +#define amdgpu_dpm_print_power_state(adev, ps) \ + ((adev)->powerplay.pp_funcs->print_power_state((adev)->powerplay.pp_handle, (ps))) + +#define amdgpu_dpm_vblank_too_short(adev) \ + ((adev)->powerplay.pp_funcs->vblank_too_short((adev)->powerplay.pp_handle)) + +#define amdgpu_dpm_check_state_equal(adev, cps, rps, equal) \ + ((adev)->powerplay.pp_funcs->check_state_equal((adev)->powerplay.pp_handle, (cps), (rps), (equal))) + +void amdgpu_dpm_print_class_info(u32 class, u32 class2) +{ + const char *s; + + switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) { + case ATOM_PPLIB_CLASSIFICATION_UI_NONE: + default: + s = "none"; + break; + case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY: + s = "battery"; + break; + case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED: + s = "balanced"; + break; + case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE: + s = "performance"; + break; + } + printk("\tui class: %s\n", s); + printk("\tinternal class:"); + if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) && + (class2 == 0)) + pr_cont(" none"); + else { + if (class & ATOM_PPLIB_CLASSIFICATION_BOOT) + pr_cont(" boot"); + if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL) + pr_cont(" thermal"); + if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE) + pr_cont(" limited_pwr"); + if (class & ATOM_PPLIB_CLASSIFICATION_REST) + pr_cont(" rest"); + if (class & ATOM_PPLIB_CLASSIFICATION_FORCED) + pr_cont(" forced"); + if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) + pr_cont(" 3d_perf"); + if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE) + pr_cont(" ovrdrv"); + if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) + pr_cont(" uvd"); + if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW) + pr_cont(" 3d_low"); + if (class & ATOM_PPLIB_CLASSIFICATION_ACPI) + pr_cont(" acpi"); + if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) + pr_cont(" uvd_hd2"); + if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) + pr_cont(" uvd_hd"); + if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) + pr_cont(" uvd_sd"); + if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2) + pr_cont(" limited_pwr2"); + if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) + pr_cont(" ulv"); + if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) + pr_cont(" uvd_mvc"); + } + pr_cont("\n"); +} + +void amdgpu_dpm_print_cap_info(u32 caps) +{ + printk("\tcaps:"); + if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) + pr_cont(" single_disp"); + if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK) + pr_cont(" video"); + if (caps & ATOM_PPLIB_DISALLOW_ON_DC) + pr_cont(" no_dc"); + pr_cont("\n"); +} + +void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev, + struct amdgpu_ps *rps) +{ + printk("\tstatus:"); + if (rps == adev->pm.dpm.current_ps) + pr_cont(" c"); + if (rps == adev->pm.dpm.requested_ps) + pr_cont(" r"); + if (rps == adev->pm.dpm.boot_ps) + pr_cont(" b"); + pr_cont("\n"); +} + +void amdgpu_pm_print_power_states(struct amdgpu_device *adev) +{ + int i; + + if (adev->powerplay.pp_funcs->print_power_state == NULL) + return; + + for (i = 0; i < adev->pm.dpm.num_ps; i++) + amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]); + +} + +union power_info { + struct _ATOM_POWERPLAY_INFO info; + struct _ATOM_POWERPLAY_INFO_V2 info_2; + struct _ATOM_POWERPLAY_INFO_V3 info_3; + struct _ATOM_PPLIB_POWERPLAYTABLE pplib; + struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; + struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; + struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4; + struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5; +}; + +int amdgpu_get_platform_caps(struct amdgpu_device *adev) +{ + struct amdgpu_mode_info *mode_info = &adev->mode_info; + union power_info *power_info; + int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); + u16 data_offset; + u8 frev, crev; + + if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) + return -EINVAL; + power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); + + adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps); + adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); + adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); + + return 0; +} + +union fan_info { + struct _ATOM_PPLIB_FANTABLE fan; + struct _ATOM_PPLIB_FANTABLE2 fan2; + struct _ATOM_PPLIB_FANTABLE3 fan3; +}; + +static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table, + ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table) +{ + u32 size = atom_table->ucNumEntries * + sizeof(struct amdgpu_clock_voltage_dependency_entry); + int i; + ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry; + + amdgpu_table->entries = kzalloc(size, GFP_KERNEL); + if (!amdgpu_table->entries) + return -ENOMEM; + + entry = &atom_table->entries[0]; + for (i = 0; i < atom_table->ucNumEntries; i++) { + amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) | + (entry->ucClockHigh << 16); + amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage); + entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *) + ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record)); + } + amdgpu_table->count = atom_table->ucNumEntries; + + return 0; +} + +/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */ +#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12 +#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14 +#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16 +#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18 +#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20 +#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22 +#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24 +#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26 + +int amdgpu_parse_extended_power_table(struct amdgpu_device *adev) +{ + struct amdgpu_mode_info *mode_info = &adev->mode_info; + union power_info *power_info; + union fan_info *fan_info; + ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table; + int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); + u16 data_offset; + u8 frev, crev; + int ret, i; + + if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) + return -EINVAL; + power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); + + /* fan table */ + if (le16_to_cpu(power_info->pplib.usTableSize) >= + sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) { + if (power_info->pplib3.usFanTableOffset) { + fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib3.usFanTableOffset)); + adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst; + adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin); + adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed); + adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh); + adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin); + adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed); + adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh); + if (fan_info->fan.ucFanTableFormat >= 2) + adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax); + else + adev->pm.dpm.fan.t_max = 10900; + adev->pm.dpm.fan.cycle_delay = 100000; + if (fan_info->fan.ucFanTableFormat >= 3) { + adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode; + adev->pm.dpm.fan.default_max_fan_pwm = + le16_to_cpu(fan_info->fan3.usFanPWMMax); + adev->pm.dpm.fan.default_fan_output_sensitivity = 4836; + adev->pm.dpm.fan.fan_output_sensitivity = + le16_to_cpu(fan_info->fan3.usFanOutputSensitivity); + } + adev->pm.dpm.fan.ucode_fan_control = true; + } + } + + /* clock dependancy tables, shedding tables */ + if (le16_to_cpu(power_info->pplib.usTableSize) >= + sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) { + if (power_info->pplib4.usVddcDependencyOnSCLKOffset) { + dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset)); + ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk, + dep_table); + if (ret) { + amdgpu_free_extended_power_table(adev); + return ret; + } + } + if (power_info->pplib4.usVddciDependencyOnMCLKOffset) { + dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset)); + ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk, + dep_table); + if (ret) { + amdgpu_free_extended_power_table(adev); + return ret; + } + } + if (power_info->pplib4.usVddcDependencyOnMCLKOffset) { + dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset)); + ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk, + dep_table); + if (ret) { + amdgpu_free_extended_power_table(adev); + return ret; + } + } + if (power_info->pplib4.usMvddDependencyOnMCLKOffset) { + dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset)); + ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk, + dep_table); + if (ret) { + amdgpu_free_extended_power_table(adev); + return ret; + } + } + if (power_info->pplib4.usMaxClockVoltageOnDCOffset) { + ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v = + (ATOM_PPLIB_Clock_Voltage_Limit_Table *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset)); + if (clk_v->ucNumEntries) { + adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk = + le16_to_cpu(clk_v->entries[0].usSclkLow) | + (clk_v->entries[0].ucSclkHigh << 16); + adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk = + le16_to_cpu(clk_v->entries[0].usMclkLow) | + (clk_v->entries[0].ucMclkHigh << 16); + adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc = + le16_to_cpu(clk_v->entries[0].usVddc); + adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci = + le16_to_cpu(clk_v->entries[0].usVddci); + } + } + if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) { + ATOM_PPLIB_PhaseSheddingLimits_Table *psl = + (ATOM_PPLIB_PhaseSheddingLimits_Table *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset)); + ATOM_PPLIB_PhaseSheddingLimits_Record *entry; + + adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries = + kcalloc(psl->ucNumEntries, + sizeof(struct amdgpu_phase_shedding_limits_entry), + GFP_KERNEL); + if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) { + amdgpu_free_extended_power_table(adev); + return -ENOMEM; + } + + entry = &psl->entries[0]; + for (i = 0; i < psl->ucNumEntries; i++) { + adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk = + le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16); + adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk = + le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16); + adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage = + le16_to_cpu(entry->usVoltage); + entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *) + ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record)); + } + adev->pm.dpm.dyn_state.phase_shedding_limits_table.count = + psl->ucNumEntries; + } + } + + /* cac data */ + if (le16_to_cpu(power_info->pplib.usTableSize) >= + sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) { + adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit); + adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit); + adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit; + adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit); + if (adev->pm.dpm.tdp_od_limit) + adev->pm.dpm.power_control = true; + else + adev->pm.dpm.power_control = false; + adev->pm.dpm.tdp_adjustment = 0; + adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold); + adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage); + adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope); + if (power_info->pplib5.usCACLeakageTableOffset) { + ATOM_PPLIB_CAC_Leakage_Table *cac_table = + (ATOM_PPLIB_CAC_Leakage_Table *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset)); + ATOM_PPLIB_CAC_Leakage_Record *entry; + u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table); + adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL); + if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) { + amdgpu_free_extended_power_table(adev); + return -ENOMEM; + } + entry = &cac_table->entries[0]; + for (i = 0; i < cac_table->ucNumEntries; i++) { + if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) { + adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 = + le16_to_cpu(entry->usVddc1); + adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 = + le16_to_cpu(entry->usVddc2); + adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 = + le16_to_cpu(entry->usVddc3); + } else { + adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc = + le16_to_cpu(entry->usVddc); + adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage = + le32_to_cpu(entry->ulLeakageValue); + } + entry = (ATOM_PPLIB_CAC_Leakage_Record *) + ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record)); + } + adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries; + } + } + + /* ext tables */ + if (le16_to_cpu(power_info->pplib.usTableSize) >= + sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) { + ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset)); + if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) && + ext_hdr->usVCETableOffset) { + VCEClockInfoArray *array = (VCEClockInfoArray *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(ext_hdr->usVCETableOffset) + 1); + ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits = + (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(ext_hdr->usVCETableOffset) + 1 + + 1 + array->ucNumEntries * sizeof(VCEClockInfo)); + ATOM_PPLIB_VCE_State_Table *states = + (ATOM_PPLIB_VCE_State_Table *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(ext_hdr->usVCETableOffset) + 1 + + 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) + + 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record))); + ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry; + ATOM_PPLIB_VCE_State_Record *state_entry; + VCEClockInfo *vce_clk; + u32 size = limits->numEntries * + sizeof(struct amdgpu_vce_clock_voltage_dependency_entry); + adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries = + kzalloc(size, GFP_KERNEL); + if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) { + amdgpu_free_extended_power_table(adev); + return -ENOMEM; + } + adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count = + limits->numEntries; + entry = &limits->entries[0]; + state_entry = &states->entries[0]; + for (i = 0; i < limits->numEntries; i++) { + vce_clk = (VCEClockInfo *) + ((u8 *)&array->entries[0] + + (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo))); + adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk = + le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16); + adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk = + le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16); + adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v = + le16_to_cpu(entry->usVoltage); + entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *) + ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)); + } + adev->pm.dpm.num_of_vce_states = + states->numEntries > AMD_MAX_VCE_LEVELS ? + AMD_MAX_VCE_LEVELS : states->numEntries; + for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) { + vce_clk = (VCEClockInfo *) + ((u8 *)&array->entries[0] + + (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo))); + adev->pm.dpm.vce_states[i].evclk = + le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16); + adev->pm.dpm.vce_states[i].ecclk = + le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16); + adev->pm.dpm.vce_states[i].clk_idx = + state_entry->ucClockInfoIndex & 0x3f; + adev->pm.dpm.vce_states[i].pstate = + (state_entry->ucClockInfoIndex & 0xc0) >> 6; + state_entry = (ATOM_PPLIB_VCE_State_Record *) + ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record)); + } + } + if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) && + ext_hdr->usUVDTableOffset) { + UVDClockInfoArray *array = (UVDClockInfoArray *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(ext_hdr->usUVDTableOffset) + 1); + ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits = + (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 + + 1 + (array->ucNumEntries * sizeof (UVDClockInfo))); + ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry; + u32 size = limits->numEntries * + sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry); + adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries = + kzalloc(size, GFP_KERNEL); + if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) { + amdgpu_free_extended_power_table(adev); + return -ENOMEM; + } + adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count = + limits->numEntries; + entry = &limits->entries[0]; + for (i = 0; i < limits->numEntries; i++) { + UVDClockInfo *uvd_clk = (UVDClockInfo *) + ((u8 *)&array->entries[0] + + (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo))); + adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk = + le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16); + adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk = + le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16); + adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v = + le16_to_cpu(entry->usVoltage); + entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *) + ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record)); + } + } + if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) && + ext_hdr->usSAMUTableOffset) { + ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits = + (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1); + ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry; + u32 size = limits->numEntries * + sizeof(struct amdgpu_clock_voltage_dependency_entry); + adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries = + kzalloc(size, GFP_KERNEL); + if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) { + amdgpu_free_extended_power_table(adev); + return -ENOMEM; + } + adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count = + limits->numEntries; + entry = &limits->entries[0]; + for (i = 0; i < limits->numEntries; i++) { + adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk = + le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16); + adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v = + le16_to_cpu(entry->usVoltage); + entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *) + ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record)); + } + } + if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) && + ext_hdr->usPPMTableOffset) { + ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(ext_hdr->usPPMTableOffset)); + adev->pm.dpm.dyn_state.ppm_table = + kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL); + if (!adev->pm.dpm.dyn_state.ppm_table) { + amdgpu_free_extended_power_table(adev); + return -ENOMEM; + } + adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign; + adev->pm.dpm.dyn_state.ppm_table->cpu_core_number = + le16_to_cpu(ppm->usCpuCoreNumber); + adev->pm.dpm.dyn_state.ppm_table->platform_tdp = + le32_to_cpu(ppm->ulPlatformTDP); + adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp = + le32_to_cpu(ppm->ulSmallACPlatformTDP); + adev->pm.dpm.dyn_state.ppm_table->platform_tdc = + le32_to_cpu(ppm->ulPlatformTDC); + adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc = + le32_to_cpu(ppm->ulSmallACPlatformTDC); + adev->pm.dpm.dyn_state.ppm_table->apu_tdp = + le32_to_cpu(ppm->ulApuTDP); + adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp = + le32_to_cpu(ppm->ulDGpuTDP); + adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power = + le32_to_cpu(ppm->ulDGpuUlvPower); + adev->pm.dpm.dyn_state.ppm_table->tj_max = + le32_to_cpu(ppm->ulTjmax); + } + if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) && + ext_hdr->usACPTableOffset) { + ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits = + (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(ext_hdr->usACPTableOffset) + 1); + ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry; + u32 size = limits->numEntries * + sizeof(struct amdgpu_clock_voltage_dependency_entry); + adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries = + kzalloc(size, GFP_KERNEL); + if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) { + amdgpu_free_extended_power_table(adev); + return -ENOMEM; + } + adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count = + limits->numEntries; + entry = &limits->entries[0]; + for (i = 0; i < limits->numEntries; i++) { + adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk = + le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16); + adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v = + le16_to_cpu(entry->usVoltage); + entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *) + ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record)); + } + } + if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) && + ext_hdr->usPowerTuneTableOffset) { + u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset + + le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); + ATOM_PowerTune_Table *pt; + adev->pm.dpm.dyn_state.cac_tdp_table = + kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL); + if (!adev->pm.dpm.dyn_state.cac_tdp_table) { + amdgpu_free_extended_power_table(adev); + return -ENOMEM; + } + if (rev > 0) { + ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); + adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = + ppt->usMaximumPowerDeliveryLimit; + pt = &ppt->power_tune_table; + } else { + ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); + adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255; + pt = &ppt->power_tune_table; + } + adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP); + adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp = + le16_to_cpu(pt->usConfigurableTDP); + adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC); + adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit = + le16_to_cpu(pt->usBatteryPowerLimit); + adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit = + le16_to_cpu(pt->usSmallPowerLimit); + adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage = + le16_to_cpu(pt->usLowCACLeakage); + adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage = + le16_to_cpu(pt->usHighCACLeakage); + } + if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) && + ext_hdr->usSclkVddgfxTableOffset) { + dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset)); + ret = amdgpu_parse_clk_voltage_dep_table( + &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk, + dep_table); + if (ret) { + kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries); + return ret; + } + } + } + + return 0; +} + +void amdgpu_free_extended_power_table(struct amdgpu_device *adev) +{ + struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state; + + kfree(dyn_state->vddc_dependency_on_sclk.entries); + kfree(dyn_state->vddci_dependency_on_mclk.entries); + kfree(dyn_state->vddc_dependency_on_mclk.entries); + kfree(dyn_state->mvdd_dependency_on_mclk.entries); + kfree(dyn_state->cac_leakage_table.entries); + kfree(dyn_state->phase_shedding_limits_table.entries); + kfree(dyn_state->ppm_table); + kfree(dyn_state->cac_tdp_table); + kfree(dyn_state->vce_clock_voltage_dependency_table.entries); + kfree(dyn_state->uvd_clock_voltage_dependency_table.entries); + kfree(dyn_state->samu_clock_voltage_dependency_table.entries); + kfree(dyn_state->acp_clock_voltage_dependency_table.entries); + kfree(dyn_state->vddgfx_dependency_on_sclk.entries); +} + +static const char *pp_lib_thermal_controller_names[] = { + "NONE", + "lm63", + "adm1032", + "adm1030", + "max6649", + "lm64", + "f75375", + "RV6xx", + "RV770", + "adt7473", + "NONE", + "External GPIO", + "Evergreen", + "emc2103", + "Sumo", + "Northern Islands", + "Southern Islands", + "lm96163", + "Sea Islands", + "Kaveri/Kabini", +}; + +void amdgpu_add_thermal_controller(struct amdgpu_device *adev) +{ + struct amdgpu_mode_info *mode_info = &adev->mode_info; + ATOM_PPLIB_POWERPLAYTABLE *power_table; + int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); + ATOM_PPLIB_THERMALCONTROLLER *controller; + struct amdgpu_i2c_bus_rec i2c_bus; + u16 data_offset; + u8 frev, crev; + + if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) + return; + power_table = (ATOM_PPLIB_POWERPLAYTABLE *) + (mode_info->atom_context->bios + data_offset); + controller = &power_table->sThermalController; + + /* add the i2c bus for thermal/fan chip */ + if (controller->ucType > 0) { + if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) + adev->pm.no_fan = true; + adev->pm.fan_pulses_per_revolution = + controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK; + if (adev->pm.fan_pulses_per_revolution) { + adev->pm.fan_min_rpm = controller->ucFanMinRPM; + adev->pm.fan_max_rpm = controller->ucFanMaxRPM; + } + if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) { + DRM_INFO("Internal thermal controller %s fan control\n", + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX; + } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) { + DRM_INFO("Internal thermal controller %s fan control\n", + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + adev->pm.int_thermal_type = THERMAL_TYPE_RV770; + } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) { + DRM_INFO("Internal thermal controller %s fan control\n", + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN; + } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) { + DRM_INFO("Internal thermal controller %s fan control\n", + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + adev->pm.int_thermal_type = THERMAL_TYPE_SUMO; + } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) { + DRM_INFO("Internal thermal controller %s fan control\n", + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + adev->pm.int_thermal_type = THERMAL_TYPE_NI; + } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) { + DRM_INFO("Internal thermal controller %s fan control\n", + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + adev->pm.int_thermal_type = THERMAL_TYPE_SI; + } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) { + DRM_INFO("Internal thermal controller %s fan control\n", + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + adev->pm.int_thermal_type = THERMAL_TYPE_CI; + } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) { + DRM_INFO("Internal thermal controller %s fan control\n", + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + adev->pm.int_thermal_type = THERMAL_TYPE_KV; + } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) { + DRM_INFO("External GPIO thermal controller %s fan control\n", + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO; + } else if (controller->ucType == + ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) { + DRM_INFO("ADT7473 with internal thermal controller %s fan control\n", + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL; + } else if (controller->ucType == + ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) { + DRM_INFO("EMC2103 with internal thermal controller %s fan control\n", + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL; + } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) { + DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", + pp_lib_thermal_controller_names[controller->ucType], + controller->ucI2cAddress >> 1, + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL; + i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine); + adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus); + if (adev->pm.i2c_bus) { + struct i2c_board_info info = { }; + const char *name = pp_lib_thermal_controller_names[controller->ucType]; + info.addr = controller->ucI2cAddress >> 1; + strlcpy(info.type, name, sizeof(info.type)); + i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info); + } + } else { + DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n", + controller->ucType, + controller->ucI2cAddress >> 1, + (controller->ucFanParameters & + ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); + } + } +} + +struct amd_vce_state* amdgpu_get_vce_clock_state(void *handle, u32 idx) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (idx < adev->pm.dpm.num_of_vce_states) + return &adev->pm.dpm.vce_states[idx]; + + return NULL; +} + +static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev, + enum amd_pm_state_type dpm_state) +{ + int i; + struct amdgpu_ps *ps; + u32 ui_class; + bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ? + true : false; + + /* check if the vblank period is too short to adjust the mclk */ + if (single_display && adev->powerplay.pp_funcs->vblank_too_short) { + if (amdgpu_dpm_vblank_too_short(adev)) + single_display = false; + } + + /* certain older asics have a separare 3D performance state, + * so try that first if the user selected performance + */ + if (dpm_state == POWER_STATE_TYPE_PERFORMANCE) + dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF; + /* balanced states don't exist at the moment */ + if (dpm_state == POWER_STATE_TYPE_BALANCED) + dpm_state = POWER_STATE_TYPE_PERFORMANCE; + +restart_search: + /* Pick the best power state based on current conditions */ + for (i = 0; i < adev->pm.dpm.num_ps; i++) { + ps = &adev->pm.dpm.ps[i]; + ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK; + switch (dpm_state) { + /* user states */ + case POWER_STATE_TYPE_BATTERY: + if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) { + if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { + if (single_display) + return ps; + } else + return ps; + } + break; + case POWER_STATE_TYPE_BALANCED: + if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) { + if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { + if (single_display) + return ps; + } else + return ps; + } + break; + case POWER_STATE_TYPE_PERFORMANCE: + if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) { + if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { + if (single_display) + return ps; + } else + return ps; + } + break; + /* internal states */ + case POWER_STATE_TYPE_INTERNAL_UVD: + if (adev->pm.dpm.uvd_ps) + return adev->pm.dpm.uvd_ps; + else + break; + case POWER_STATE_TYPE_INTERNAL_UVD_SD: + if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) + return ps; + break; + case POWER_STATE_TYPE_INTERNAL_UVD_HD: + if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) + return ps; + break; + case POWER_STATE_TYPE_INTERNAL_UVD_HD2: + if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) + return ps; + break; + case POWER_STATE_TYPE_INTERNAL_UVD_MVC: + if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) + return ps; + break; + case POWER_STATE_TYPE_INTERNAL_BOOT: + return adev->pm.dpm.boot_ps; + case POWER_STATE_TYPE_INTERNAL_THERMAL: + if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL) + return ps; + break; + case POWER_STATE_TYPE_INTERNAL_ACPI: + if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) + return ps; + break; + case POWER_STATE_TYPE_INTERNAL_ULV: + if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) + return ps; + break; + case POWER_STATE_TYPE_INTERNAL_3DPERF: + if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) + return ps; + break; + default: + break; + } + } + /* use a fallback state if we didn't match */ + switch (dpm_state) { + case POWER_STATE_TYPE_INTERNAL_UVD_SD: + dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD; + goto restart_search; + case POWER_STATE_TYPE_INTERNAL_UVD_HD: + case POWER_STATE_TYPE_INTERNAL_UVD_HD2: + case POWER_STATE_TYPE_INTERNAL_UVD_MVC: + if (adev->pm.dpm.uvd_ps) { + return adev->pm.dpm.uvd_ps; + } else { + dpm_state = POWER_STATE_TYPE_PERFORMANCE; + goto restart_search; + } + case POWER_STATE_TYPE_INTERNAL_THERMAL: + dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI; + goto restart_search; + case POWER_STATE_TYPE_INTERNAL_ACPI: + dpm_state = POWER_STATE_TYPE_BATTERY; + goto restart_search; + case POWER_STATE_TYPE_BATTERY: + case POWER_STATE_TYPE_BALANCED: + case POWER_STATE_TYPE_INTERNAL_3DPERF: + dpm_state = POWER_STATE_TYPE_PERFORMANCE; + goto restart_search; + default: + break; + } + + return NULL; +} + +static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) +{ + struct amdgpu_ps *ps; + enum amd_pm_state_type dpm_state; + int ret; + bool equal = false; + + /* if dpm init failed */ + if (!adev->pm.dpm_enabled) + return 0; + + if (adev->pm.dpm.user_state != adev->pm.dpm.state) { + /* add other state override checks here */ + if ((!adev->pm.dpm.thermal_active) && + (!adev->pm.dpm.uvd_active)) + adev->pm.dpm.state = adev->pm.dpm.user_state; + } + dpm_state = adev->pm.dpm.state; + + ps = amdgpu_dpm_pick_power_state(adev, dpm_state); + if (ps) + adev->pm.dpm.requested_ps = ps; + else + return -EINVAL; + + if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) { + printk("switching from power state:\n"); + amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps); + printk("switching to power state:\n"); + amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps); + } + + /* update whether vce is active */ + ps->vce_active = adev->pm.dpm.vce_active; + if (adev->powerplay.pp_funcs->display_configuration_changed) + amdgpu_dpm_display_configuration_changed(adev); + + ret = amdgpu_dpm_pre_set_power_state(adev); + if (ret) + return ret; + + if (adev->powerplay.pp_funcs->check_state_equal) { + if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal)) + equal = false; + } + + if (equal) + return 0; + + if (adev->powerplay.pp_funcs->set_power_state) + adev->powerplay.pp_funcs->set_power_state(adev->powerplay.pp_handle); + + amdgpu_dpm_post_set_power_state(adev); + + adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; + adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count; + + if (adev->powerplay.pp_funcs->force_performance_level) { + if (adev->pm.dpm.thermal_active) { + enum amd_dpm_forced_level level = adev->pm.dpm.forced_level; + /* force low perf level for thermal */ + amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW); + /* save the user's level */ + adev->pm.dpm.forced_level = level; + } else { + /* otherwise, user selected level */ + amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level); + } + } + + return 0; +} + +void amdgpu_legacy_dpm_compute_clocks(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int i = 0; + + if (adev->mode_info.num_crtc) + amdgpu_display_bandwidth_update(adev); + + for (i = 0; i < AMDGPU_MAX_RINGS; i++) { + struct amdgpu_ring *ring = adev->rings[i]; + if (ring && ring->sched.ready) + amdgpu_fence_wait_empty(ring); + } + + amdgpu_dpm_get_active_displays(adev); + + amdgpu_dpm_change_power_state_locked(adev); +} + +void amdgpu_dpm_thermal_work_handler(struct work_struct *work) +{ + struct amdgpu_device *adev = + container_of(work, struct amdgpu_device, + pm.dpm.thermal.work); + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + /* switch to the thermal state */ + enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL; + int temp, size = sizeof(temp); + + if (!adev->pm.dpm_enabled) + return; + + if (!pp_funcs->read_sensor(adev->powerplay.pp_handle, + AMDGPU_PP_SENSOR_GPU_TEMP, + (void *)&temp, + &size)) { + if (temp < adev->pm.dpm.thermal.min_temp) + /* switch back the user state */ + dpm_state = adev->pm.dpm.user_state; + } else { + if (adev->pm.dpm.thermal.high_to_low) + /* switch back the user state */ + dpm_state = adev->pm.dpm.user_state; + } + + if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL) + adev->pm.dpm.thermal_active = true; + else + adev->pm.dpm.thermal_active = false; + + adev->pm.dpm.state = dpm_state; + + amdgpu_legacy_dpm_compute_clocks(adev->powerplay.pp_handle); +} diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h new file mode 100644 index 000000000000..93bd3973330c --- /dev/null +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h @@ -0,0 +1,38 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __LEGACY_DPM_H__ +#define __LEGACY_DPM_H__ + +void amdgpu_dpm_print_class_info(u32 class, u32 class2); +void amdgpu_dpm_print_cap_info(u32 caps); +void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev, + struct amdgpu_ps *rps); +int amdgpu_get_platform_caps(struct amdgpu_device *adev); +int amdgpu_parse_extended_power_table(struct amdgpu_device *adev); +void amdgpu_free_extended_power_table(struct amdgpu_device *adev); +void amdgpu_add_thermal_controller(struct amdgpu_device *adev); +struct amd_vce_state* amdgpu_get_vce_clock_state(void *handle, u32 idx); +void amdgpu_pm_print_power_states(struct amdgpu_device *adev); +void amdgpu_legacy_dpm_compute_clocks(void *handle); +void amdgpu_dpm_thermal_work_handler(struct work_struct *work); +#endif diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/ppsmc.h b/drivers/gpu/drm/amd/pm/legacy-dpm/ppsmc.h new file mode 100644 index 000000000000..8463245f424f --- /dev/null +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/ppsmc.h @@ -0,0 +1,200 @@ +/* + * Copyright 2011 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef PP_SMC_H +#define PP_SMC_H + +#pragma pack(push, 1) + +#define PPSMC_SWSTATE_FLAG_DC 0x01 +#define PPSMC_SWSTATE_FLAG_UVD 0x02 +#define PPSMC_SWSTATE_FLAG_VCE 0x04 +#define PPSMC_SWSTATE_FLAG_PCIE_X1 0x08 + +#define PPSMC_THERMAL_PROTECT_TYPE_INTERNAL 0x00 +#define PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL 0x01 +#define PPSMC_THERMAL_PROTECT_TYPE_NONE 0xff + +#define PPSMC_SYSTEMFLAG_GPIO_DC 0x01 +#define PPSMC_SYSTEMFLAG_STEPVDDC 0x02 +#define PPSMC_SYSTEMFLAG_GDDR5 0x04 +#define PPSMC_SYSTEMFLAG_DISABLE_BABYSTEP 0x08 +#define PPSMC_SYSTEMFLAG_REGULATOR_HOT 0x10 +#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_ANALOG 0x20 +#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_PROG_GPIO 0x40 + +#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_MASK 0x07 +#define PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK 0x08 +#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTODPMLOWSTATE 0x00 +#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE 0x01 +#define PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH 0x02 + +#define PPSMC_DISPLAY_WATERMARK_LOW 0 +#define PPSMC_DISPLAY_WATERMARK_HIGH 1 + +#define PPSMC_STATEFLAG_AUTO_PULSE_SKIP 0x01 +#define PPSMC_STATEFLAG_POWERBOOST 0x02 +#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 0x20 +#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS 0x40 + +#define FDO_MODE_HARDWARE 0 +#define FDO_MODE_PIECE_WISE_LINEAR 1 + +enum FAN_CONTROL { + FAN_CONTROL_FUZZY, + FAN_CONTROL_TABLE +}; + +#define PPSMC_Result_OK ((uint8_t)0x01) +#define PPSMC_Result_Failed ((uint8_t)0xFF) + +typedef uint8_t PPSMC_Result; + +#define PPSMC_MSG_Halt ((uint8_t)0x10) +#define PPSMC_MSG_Resume ((uint8_t)0x11) +#define PPSMC_MSG_ZeroLevelsDisabled ((uint8_t)0x13) +#define PPSMC_MSG_OneLevelsDisabled ((uint8_t)0x14) +#define PPSMC_MSG_TwoLevelsDisabled ((uint8_t)0x15) +#define PPSMC_MSG_EnableThermalInterrupt ((uint8_t)0x16) +#define PPSMC_MSG_RunningOnAC ((uint8_t)0x17) +#define PPSMC_MSG_SwitchToSwState ((uint8_t)0x20) +#define PPSMC_MSG_SwitchToInitialState ((uint8_t)0x40) +#define PPSMC_MSG_NoForcedLevel ((uint8_t)0x41) +#define PPSMC_MSG_ForceHigh ((uint8_t)0x42) +#define PPSMC_MSG_ForceMediumOrHigh ((uint8_t)0x43) +#define PPSMC_MSG_SwitchToMinimumPower ((uint8_t)0x51) +#define PPSMC_MSG_ResumeFromMinimumPower ((uint8_t)0x52) +#define PPSMC_MSG_EnableCac ((uint8_t)0x53) +#define PPSMC_MSG_DisableCac ((uint8_t)0x54) +#define PPSMC_TDPClampingActive ((uint8_t)0x59) +#define PPSMC_TDPClampingInactive ((uint8_t)0x5A) +#define PPSMC_StartFanControl ((uint8_t)0x5B) +#define PPSMC_StopFanControl ((uint8_t)0x5C) +#define PPSMC_MSG_NoDisplay ((uint8_t)0x5D) +#define PPSMC_NoDisplay ((uint8_t)0x5D) +#define PPSMC_MSG_HasDisplay ((uint8_t)0x5E) +#define PPSMC_HasDisplay ((uint8_t)0x5E) +#define PPSMC_MSG_UVDPowerOFF ((uint8_t)0x60) +#define PPSMC_MSG_UVDPowerON ((uint8_t)0x61) +#define PPSMC_MSG_EnableULV ((uint8_t)0x62) +#define PPSMC_MSG_DisableULV ((uint8_t)0x63) +#define PPSMC_MSG_EnterULV ((uint8_t)0x64) +#define PPSMC_MSG_ExitULV ((uint8_t)0x65) +#define PPSMC_CACLongTermAvgEnable ((uint8_t)0x6E) +#define PPSMC_CACLongTermAvgDisable ((uint8_t)0x6F) +#define PPSMC_MSG_CollectCAC_PowerCorreln ((uint8_t)0x7A) +#define PPSMC_FlushDataCache ((uint8_t)0x80) +#define PPSMC_MSG_SetEnabledLevels ((uint8_t)0x82) +#define PPSMC_MSG_SetForcedLevels ((uint8_t)0x83) +#define PPSMC_MSG_ResetToDefaults ((uint8_t)0x84) +#define PPSMC_MSG_EnableDTE ((uint8_t)0x87) +#define PPSMC_MSG_DisableDTE ((uint8_t)0x88) +#define PPSMC_MSG_ThrottleOVRDSCLKDS ((uint8_t)0x96) +#define PPSMC_MSG_CancelThrottleOVRDSCLKDS ((uint8_t)0x97) +#define PPSMC_MSG_EnableACDCGPIOInterrupt ((uint16_t) 0x149) + +/* CI/KV/KB */ +#define PPSMC_MSG_UVDDPM_SetEnabledMask ((uint16_t) 0x12D) +#define PPSMC_MSG_VCEDPM_SetEnabledMask ((uint16_t) 0x12E) +#define PPSMC_MSG_ACPDPM_SetEnabledMask ((uint16_t) 0x12F) +#define PPSMC_MSG_SAMUDPM_SetEnabledMask ((uint16_t) 0x130) +#define PPSMC_MSG_MCLKDPM_ForceState ((uint16_t) 0x131) +#define PPSMC_MSG_MCLKDPM_NoForcedLevel ((uint16_t) 0x132) +#define PPSMC_MSG_Thermal_Cntl_Disable ((uint16_t) 0x133) +#define PPSMC_MSG_Voltage_Cntl_Disable ((uint16_t) 0x135) +#define PPSMC_MSG_PCIeDPM_Enable ((uint16_t) 0x136) +#define PPSMC_MSG_PCIeDPM_Disable ((uint16_t) 0x13d) +#define PPSMC_MSG_ACPPowerOFF ((uint16_t) 0x137) +#define PPSMC_MSG_ACPPowerON ((uint16_t) 0x138) +#define PPSMC_MSG_SAMPowerOFF ((uint16_t) 0x139) +#define PPSMC_MSG_SAMPowerON ((uint16_t) 0x13a) +#define PPSMC_MSG_PCIeDPM_Disable ((uint16_t) 0x13d) +#define PPSMC_MSG_NBDPM_Enable ((uint16_t) 0x140) +#define PPSMC_MSG_NBDPM_Disable ((uint16_t) 0x141) +#define PPSMC_MSG_SCLKDPM_SetEnabledMask ((uint16_t) 0x145) +#define PPSMC_MSG_MCLKDPM_SetEnabledMask ((uint16_t) 0x146) +#define PPSMC_MSG_PCIeDPM_ForceLevel ((uint16_t) 0x147) +#define PPSMC_MSG_PCIeDPM_UnForceLevel ((uint16_t) 0x148) +#define PPSMC_MSG_EnableVRHotGPIOInterrupt ((uint16_t) 0x14a) +#define PPSMC_MSG_DPM_Enable ((uint16_t) 0x14e) +#define PPSMC_MSG_DPM_Disable ((uint16_t) 0x14f) +#define PPSMC_MSG_MCLKDPM_Enable ((uint16_t) 0x150) +#define PPSMC_MSG_MCLKDPM_Disable ((uint16_t) 0x151) +#define PPSMC_MSG_UVDDPM_Enable ((uint16_t) 0x154) +#define PPSMC_MSG_UVDDPM_Disable ((uint16_t) 0x155) +#define PPSMC_MSG_SAMUDPM_Enable ((uint16_t) 0x156) +#define PPSMC_MSG_SAMUDPM_Disable ((uint16_t) 0x157) +#define PPSMC_MSG_ACPDPM_Enable ((uint16_t) 0x158) +#define PPSMC_MSG_ACPDPM_Disable ((uint16_t) 0x159) +#define PPSMC_MSG_VCEDPM_Enable ((uint16_t) 0x15a) +#define PPSMC_MSG_VCEDPM_Disable ((uint16_t) 0x15b) +#define PPSMC_MSG_VddC_Request ((uint16_t) 0x15f) +#define PPSMC_MSG_SCLKDPM_GetEnabledMask ((uint16_t) 0x162) +#define PPSMC_MSG_PCIeDPM_SetEnabledMask ((uint16_t) 0x167) +#define PPSMC_MSG_TDCLimitEnable ((uint16_t) 0x169) +#define PPSMC_MSG_TDCLimitDisable ((uint16_t) 0x16a) +#define PPSMC_MSG_PkgPwrLimitEnable ((uint16_t) 0x185) +#define PPSMC_MSG_PkgPwrLimitDisable ((uint16_t) 0x186) +#define PPSMC_MSG_PkgPwrSetLimit ((uint16_t) 0x187) +#define PPSMC_MSG_OverDriveSetTargetTdp ((uint16_t) 0x188) +#define PPSMC_MSG_SCLKDPM_FreezeLevel ((uint16_t) 0x189) +#define PPSMC_MSG_SCLKDPM_UnfreezeLevel ((uint16_t) 0x18A) +#define PPSMC_MSG_MCLKDPM_FreezeLevel ((uint16_t) 0x18B) +#define PPSMC_MSG_MCLKDPM_UnfreezeLevel ((uint16_t) 0x18C) +#define PPSMC_MSG_MASTER_DeepSleep_ON ((uint16_t) 0x18F) +#define PPSMC_MSG_MASTER_DeepSleep_OFF ((uint16_t) 0x190) +#define PPSMC_MSG_Remove_DC_Clamp ((uint16_t) 0x191) +#define PPSMC_MSG_SetFanPwmMax ((uint16_t) 0x19A) +#define PPSMC_MSG_SetFanRpmMax ((uint16_t) 0x205) + +#define PPSMC_MSG_ENABLE_THERMAL_DPM ((uint16_t) 0x19C) +#define PPSMC_MSG_DISABLE_THERMAL_DPM ((uint16_t) 0x19D) + +#define PPSMC_MSG_API_GetSclkFrequency ((uint16_t) 0x200) +#define PPSMC_MSG_API_GetMclkFrequency ((uint16_t) 0x201) + +/* TN */ +#define PPSMC_MSG_DPM_Config ((uint32_t) 0x102) +#define PPSMC_MSG_DPM_ForceState ((uint32_t) 0x104) +#define PPSMC_MSG_PG_SIMD_Config ((uint32_t) 0x108) +#define PPSMC_MSG_Voltage_Cntl_Enable ((uint32_t) 0x109) +#define PPSMC_MSG_Thermal_Cntl_Enable ((uint32_t) 0x10a) +#define PPSMC_MSG_VCEPowerOFF ((uint32_t) 0x10e) +#define PPSMC_MSG_VCEPowerON ((uint32_t) 0x10f) +#define PPSMC_MSG_DPM_N_LevelsDisabled ((uint32_t) 0x112) +#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint32_t) 0x11d) +#define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint32_t) 0x11e) +#define PPSMC_MSG_EnableBAPM ((uint32_t) 0x120) +#define PPSMC_MSG_DisableBAPM ((uint32_t) 0x121) +#define PPSMC_MSG_UVD_DPM_Config ((uint32_t) 0x124) + +#define PPSMC_MSG_DRV_DRAM_ADDR_HI ((uint16_t) 0x250) +#define PPSMC_MSG_DRV_DRAM_ADDR_LO ((uint16_t) 0x251) +#define PPSMC_MSG_SMU_DRAM_ADDR_HI ((uint16_t) 0x252) +#define PPSMC_MSG_SMU_DRAM_ADDR_LO ((uint16_t) 0x253) +#define PPSMC_MSG_LoadUcodes ((uint16_t) 0x254) + +typedef uint16_t PPSMC_Msg; + +#pragma pack(pop) + +#endif diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/r600_dpm.h b/drivers/gpu/drm/amd/pm/legacy-dpm/r600_dpm.h new file mode 100644 index 000000000000..055321f61ca7 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/r600_dpm.h @@ -0,0 +1,127 @@ +/* + * Copyright 2011 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __R600_DPM_H__ +#define __R600_DPM_H__ + +#define R600_ASI_DFLT 10000 +#define R600_BSP_DFLT 0x41EB +#define R600_BSU_DFLT 0x2 +#define R600_AH_DFLT 5 +#define R600_RLP_DFLT 25 +#define R600_RMP_DFLT 65 +#define R600_LHP_DFLT 40 +#define R600_LMP_DFLT 15 +#define R600_TD_DFLT 0 +#define R600_UTC_DFLT_00 0x24 +#define R600_UTC_DFLT_01 0x22 +#define R600_UTC_DFLT_02 0x22 +#define R600_UTC_DFLT_03 0x22 +#define R600_UTC_DFLT_04 0x22 +#define R600_UTC_DFLT_05 0x22 +#define R600_UTC_DFLT_06 0x22 +#define R600_UTC_DFLT_07 0x22 +#define R600_UTC_DFLT_08 0x22 +#define R600_UTC_DFLT_09 0x22 +#define R600_UTC_DFLT_10 0x22 +#define R600_UTC_DFLT_11 0x22 +#define R600_UTC_DFLT_12 0x22 +#define R600_UTC_DFLT_13 0x22 +#define R600_UTC_DFLT_14 0x22 +#define R600_DTC_DFLT_00 0x24 +#define R600_DTC_DFLT_01 0x22 +#define R600_DTC_DFLT_02 0x22 +#define R600_DTC_DFLT_03 0x22 +#define R600_DTC_DFLT_04 0x22 +#define R600_DTC_DFLT_05 0x22 +#define R600_DTC_DFLT_06 0x22 +#define R600_DTC_DFLT_07 0x22 +#define R600_DTC_DFLT_08 0x22 +#define R600_DTC_DFLT_09 0x22 +#define R600_DTC_DFLT_10 0x22 +#define R600_DTC_DFLT_11 0x22 +#define R600_DTC_DFLT_12 0x22 +#define R600_DTC_DFLT_13 0x22 +#define R600_DTC_DFLT_14 0x22 +#define R600_VRC_DFLT 0x0000C003 +#define R600_VOLTAGERESPONSETIME_DFLT 1000 +#define R600_BACKBIASRESPONSETIME_DFLT 1000 +#define R600_VRU_DFLT 0x3 +#define R600_SPLLSTEPTIME_DFLT 0x1000 +#define R600_SPLLSTEPUNIT_DFLT 0x3 +#define R600_TPU_DFLT 0 +#define R600_TPC_DFLT 0x200 +#define R600_SSTU_DFLT 0 +#define R600_SST_DFLT 0x00C8 +#define R600_GICST_DFLT 0x200 +#define R600_FCT_DFLT 0x0400 +#define R600_FCTU_DFLT 0 +#define R600_CTXCGTT3DRPHC_DFLT 0x20 +#define R600_CTXCGTT3DRSDC_DFLT 0x40 +#define R600_VDDC3DOORPHC_DFLT 0x100 +#define R600_VDDC3DOORSDC_DFLT 0x7 +#define R600_VDDC3DOORSU_DFLT 0 +#define R600_MPLLLOCKTIME_DFLT 100 +#define R600_MPLLRESETTIME_DFLT 150 +#define R600_VCOSTEPPCT_DFLT 20 +#define R600_ENDINGVCOSTEPPCT_DFLT 5 +#define R600_REFERENCEDIVIDER_DFLT 4 + +#define R600_PM_NUMBER_OF_TC 15 +#define R600_PM_NUMBER_OF_SCLKS 20 +#define R600_PM_NUMBER_OF_MCLKS 4 +#define R600_PM_NUMBER_OF_VOLTAGE_LEVELS 4 +#define R600_PM_NUMBER_OF_ACTIVITY_LEVELS 3 + +/* XXX are these ok? */ +#define R600_TEMP_RANGE_MIN (90 * 1000) +#define R600_TEMP_RANGE_MAX (120 * 1000) + +#define FDO_PWM_MODE_STATIC 1 +#define FDO_PWM_MODE_STATIC_RPM 5 + +enum r600_power_level { + R600_POWER_LEVEL_LOW = 0, + R600_POWER_LEVEL_MEDIUM = 1, + R600_POWER_LEVEL_HIGH = 2, + R600_POWER_LEVEL_CTXSW = 3, +}; + +enum r600_td { + R600_TD_AUTO, + R600_TD_UP, + R600_TD_DOWN, +}; + +enum r600_display_watermark { + R600_DISPLAY_WATERMARK_LOW = 0, + R600_DISPLAY_WATERMARK_HIGH = 1, +}; + +enum r600_display_gap +{ + R600_PM_DISPLAY_GAP_VBLANK_OR_WM = 0, + R600_PM_DISPLAY_GAP_VBLANK = 1, + R600_PM_DISPLAY_GAP_WATERMARK = 2, + R600_PM_DISPLAY_GAP_IGNORE = 3, +}; +#endif diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c new file mode 100644 index 000000000000..9f8cc81cb7ca --- /dev/null +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c @@ -0,0 +1,8153 @@ +/* + * Copyright 2013 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#include +#include + +#include "amdgpu.h" +#include "amdgpu_pm.h" +#include "amdgpu_dpm.h" +#include "amdgpu_atombios.h" +#include "amd_pcie.h" +#include "sid.h" +#include "r600_dpm.h" +#include "si_dpm.h" +#include "atom.h" +#include "../include/pptable.h" +#include +#include +#include +#include + +#define MC_CG_ARB_FREQ_F0 0x0a +#define MC_CG_ARB_FREQ_F1 0x0b +#define MC_CG_ARB_FREQ_F2 0x0c +#define MC_CG_ARB_FREQ_F3 0x0d + +#define SMC_RAM_END 0x20000 + +#define SCLK_MIN_DEEPSLEEP_FREQ 1350 + + +/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */ +#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12 +#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14 +#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16 +#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18 +#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20 +#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22 + +#define BIOS_SCRATCH_4 0x5cd + +MODULE_FIRMWARE("amdgpu/tahiti_smc.bin"); +MODULE_FIRMWARE("amdgpu/pitcairn_smc.bin"); +MODULE_FIRMWARE("amdgpu/pitcairn_k_smc.bin"); +MODULE_FIRMWARE("amdgpu/verde_smc.bin"); +MODULE_FIRMWARE("amdgpu/verde_k_smc.bin"); +MODULE_FIRMWARE("amdgpu/oland_smc.bin"); +MODULE_FIRMWARE("amdgpu/oland_k_smc.bin"); +MODULE_FIRMWARE("amdgpu/hainan_smc.bin"); +MODULE_FIRMWARE("amdgpu/hainan_k_smc.bin"); +MODULE_FIRMWARE("amdgpu/banks_k_2_smc.bin"); + +static const struct amd_pm_funcs si_dpm_funcs; + +union power_info { + struct _ATOM_POWERPLAY_INFO info; + struct _ATOM_POWERPLAY_INFO_V2 info_2; + struct _ATOM_POWERPLAY_INFO_V3 info_3; + struct _ATOM_PPLIB_POWERPLAYTABLE pplib; + struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; + struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; + struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4; + struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5; +}; + +union fan_info { + struct _ATOM_PPLIB_FANTABLE fan; + struct _ATOM_PPLIB_FANTABLE2 fan2; + struct _ATOM_PPLIB_FANTABLE3 fan3; +}; + +union pplib_clock_info { + struct _ATOM_PPLIB_R600_CLOCK_INFO r600; + struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; + struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; + struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; + struct _ATOM_PPLIB_SI_CLOCK_INFO si; +}; + +enum si_dpm_auto_throttle_src { + SI_DPM_AUTO_THROTTLE_SRC_THERMAL, + SI_DPM_AUTO_THROTTLE_SRC_EXTERNAL +}; + +enum si_dpm_event_src { + SI_DPM_EVENT_SRC_ANALOG = 0, + SI_DPM_EVENT_SRC_EXTERNAL = 1, + SI_DPM_EVENT_SRC_DIGITAL = 2, + SI_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, + SI_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4 +}; + +static const u32 r600_utc[R600_PM_NUMBER_OF_TC] = +{ + R600_UTC_DFLT_00, + R600_UTC_DFLT_01, + R600_UTC_DFLT_02, + R600_UTC_DFLT_03, + R600_UTC_DFLT_04, + R600_UTC_DFLT_05, + R600_UTC_DFLT_06, + R600_UTC_DFLT_07, + R600_UTC_DFLT_08, + R600_UTC_DFLT_09, + R600_UTC_DFLT_10, + R600_UTC_DFLT_11, + R600_UTC_DFLT_12, + R600_UTC_DFLT_13, + R600_UTC_DFLT_14, +}; + +static const u32 r600_dtc[R600_PM_NUMBER_OF_TC] = +{ + R600_DTC_DFLT_00, + R600_DTC_DFLT_01, + R600_DTC_DFLT_02, + R600_DTC_DFLT_03, + R600_DTC_DFLT_04, + R600_DTC_DFLT_05, + R600_DTC_DFLT_06, + R600_DTC_DFLT_07, + R600_DTC_DFLT_08, + R600_DTC_DFLT_09, + R600_DTC_DFLT_10, + R600_DTC_DFLT_11, + R600_DTC_DFLT_12, + R600_DTC_DFLT_13, + R600_DTC_DFLT_14, +}; + +static const struct si_cac_config_reg cac_weights_tahiti[] = +{ + { 0x0, 0x0000ffff, 0, 0xc, SISLANDS_CACCONFIG_CGIND }, + { 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0x0000ffff, 0, 0x101, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0xffff0000, 16, 0xc, SISLANDS_CACCONFIG_CGIND }, + { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0x0000ffff, 0, 0x8fc, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0x0000ffff, 0, 0x95, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0xffff0000, 16, 0x34e, SISLANDS_CACCONFIG_CGIND }, + { 0x18f, 0x0000ffff, 0, 0x1a1, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0x0000ffff, 0, 0xda, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0xffff0000, 16, 0x46, SISLANDS_CACCONFIG_CGIND }, + { 0x9, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0xa, 0x0000ffff, 0, 0x208, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0x0000ffff, 0, 0xe7, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0xffff0000, 16, 0x948, SISLANDS_CACCONFIG_CGIND }, + { 0xc, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0xe, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0x0000ffff, 0, 0x167, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND }, + { 0x14, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0x0000ffff, 0, 0x31, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x6d, 0x0000ffff, 0, 0x18e, SISLANDS_CACCONFIG_CGIND }, + { 0xFFFFFFFF } +}; + +static const struct si_cac_config_reg lcac_tahiti[] = +{ + { 0x143, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND }, + { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x146, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND }, + { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x149, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND }, + { 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x14c, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND }, + { 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x8c, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, + { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x8f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, + { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x92, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, + { 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x95, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, + { 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x14f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, + { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x152, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, + { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x155, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, + { 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x158, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, + { 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x110, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, + { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x113, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, + { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x116, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, + { 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x119, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, + { 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x16d, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND }, + { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0xFFFFFFFF } + +}; + +static const struct si_cac_config_reg cac_override_tahiti[] = +{ + { 0xFFFFFFFF } +}; + +static const struct si_powertune_data powertune_data_tahiti = +{ + ((1 << 16) | 27027), + 6, + 0, + 4, + 95, + { + 0UL, + 0UL, + 4521550UL, + 309631529UL, + -1270850L, + 4513710L, + 40 + }, + 595000000UL, + 12, + { + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + }, + true +}; + +static const struct si_dte_data dte_data_tahiti = +{ + { 1159409, 0, 0, 0, 0 }, + { 777, 0, 0, 0, 0 }, + 2, + 54000, + 127000, + 25, + 2, + 10, + 13, + { 27, 31, 35, 39, 43, 47, 54, 61, 67, 74, 81, 88, 95, 0, 0, 0 }, + { 240888759, 221057860, 235370597, 162287531, 158510299, 131423027, 116673180, 103067515, 87941937, 76209048, 68209175, 64090048, 58301890, 0, 0, 0 }, + { 12024, 11189, 11451, 8411, 7939, 6666, 5681, 4905, 4241, 3720, 3354, 3122, 2890, 0, 0, 0 }, + 85, + false +}; + +static const struct si_dte_data dte_data_tahiti_pro = +{ + { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, + { 0x0, 0x0, 0x0, 0x0, 0x0 }, + 5, + 45000, + 100, + 0xA, + 1, + 0, + 0x10, + { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, + { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, + { 0x7D0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, + 90, + true +}; + +static const struct si_dte_data dte_data_new_zealand = +{ + { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0 }, + { 0x29B, 0x3E9, 0x537, 0x7D2, 0 }, + 0x5, + 0xAFC8, + 0x69, + 0x32, + 1, + 0, + 0x10, + { 0x82, 0xA0, 0xB4, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE }, + { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, + { 0xDAC, 0x1388, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685 }, + 85, + true +}; + +static const struct si_dte_data dte_data_aruba_pro = +{ + { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, + { 0x0, 0x0, 0x0, 0x0, 0x0 }, + 5, + 45000, + 100, + 0xA, + 1, + 0, + 0x10, + { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, + { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, + { 0x1000, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, + 90, + true +}; + +static const struct si_dte_data dte_data_malta = +{ + { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, + { 0x0, 0x0, 0x0, 0x0, 0x0 }, + 5, + 45000, + 100, + 0xA, + 1, + 0, + 0x10, + { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, + { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, + { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, + 90, + true +}; + +static const struct si_cac_config_reg cac_weights_pitcairn[] = +{ + { 0x0, 0x0000ffff, 0, 0x8a, SISLANDS_CACCONFIG_CGIND }, + { 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0xffff0000, 16, 0x24d, SISLANDS_CACCONFIG_CGIND }, + { 0x2, 0x0000ffff, 0, 0x19, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0x0000ffff, 0, 0xc11, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0xffff0000, 16, 0x7f3, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0x0000ffff, 0, 0x403, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0xffff0000, 16, 0x367, SISLANDS_CACCONFIG_CGIND }, + { 0x18f, 0x0000ffff, 0, 0x4c9, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0xffff0000, 16, 0x45d, SISLANDS_CACCONFIG_CGIND }, + { 0x9, 0x0000ffff, 0, 0x36d, SISLANDS_CACCONFIG_CGIND }, + { 0xa, 0x0000ffff, 0, 0x534, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0x0000ffff, 0, 0x5da, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0xffff0000, 16, 0x880, SISLANDS_CACCONFIG_CGIND }, + { 0xc, 0x0000ffff, 0, 0x201, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0xe, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0x0000ffff, 0, 0x1f, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0x0000ffff, 0, 0x5de, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x12, 0x0000ffff, 0, 0x7b, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0xffff0000, 16, 0x13, SISLANDS_CACCONFIG_CGIND }, + { 0x14, 0x0000ffff, 0, 0xf9, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0x0000ffff, 0, 0x66, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x4e, 0x0000ffff, 0, 0x13, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x6d, 0x0000ffff, 0, 0x186, SISLANDS_CACCONFIG_CGIND }, + { 0xFFFFFFFF } +}; + +static const struct si_cac_config_reg lcac_pitcairn[] = +{ + { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x8f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x146, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x116, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x155, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x92, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x149, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x119, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x158, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x95, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x14c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x16d, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0xFFFFFFFF } +}; + +static const struct si_cac_config_reg cac_override_pitcairn[] = +{ + { 0xFFFFFFFF } +}; + +static const struct si_powertune_data powertune_data_pitcairn = +{ + ((1 << 16) | 27027), + 5, + 0, + 6, + 100, + { + 51600000UL, + 1800000UL, + 7194395UL, + 309631529UL, + -1270850L, + 4513710L, + 100 + }, + 117830498UL, + 12, + { + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + }, + true +}; + +static const struct si_dte_data dte_data_pitcairn = +{ + { 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0 }, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + 0, + false +}; + +static const struct si_dte_data dte_data_curacao_xt = +{ + { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, + { 0x0, 0x0, 0x0, 0x0, 0x0 }, + 5, + 45000, + 100, + 0xA, + 1, + 0, + 0x10, + { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, + { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, + { 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, + 90, + true +}; + +static const struct si_dte_data dte_data_curacao_pro = +{ + { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, + { 0x0, 0x0, 0x0, 0x0, 0x0 }, + 5, + 45000, + 100, + 0xA, + 1, + 0, + 0x10, + { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, + { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, + { 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, + 90, + true +}; + +static const struct si_dte_data dte_data_neptune_xt = +{ + { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, + { 0x0, 0x0, 0x0, 0x0, 0x0 }, + 5, + 45000, + 100, + 0xA, + 1, + 0, + 0x10, + { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, + { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, + { 0x3A2F, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, + 90, + true +}; + +static const struct si_cac_config_reg cac_weights_chelsea_pro[] = +{ + { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND }, + { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND }, + { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND }, + { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND }, + { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND }, + { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND }, + { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND }, + { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND }, + { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x14, 0x0000ffff, 0, 0x2BD, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, + { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND }, + { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND }, + { 0xFFFFFFFF } +}; + +static const struct si_cac_config_reg cac_weights_chelsea_xt[] = +{ + { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND }, + { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND }, + { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND }, + { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND }, + { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND }, + { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND }, + { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND }, + { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND }, + { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x14, 0x0000ffff, 0, 0x30A, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, + { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND }, + { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND }, + { 0xFFFFFFFF } +}; + +static const struct si_cac_config_reg cac_weights_heathrow[] = +{ + { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND }, + { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND }, + { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND }, + { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND }, + { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND }, + { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND }, + { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND }, + { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND }, + { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x14, 0x0000ffff, 0, 0x362, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, + { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND }, + { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND }, + { 0xFFFFFFFF } +}; + +static const struct si_cac_config_reg cac_weights_cape_verde_pro[] = +{ + { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND }, + { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND }, + { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND }, + { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND }, + { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND }, + { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND }, + { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND }, + { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND }, + { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x14, 0x0000ffff, 0, 0x315, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, + { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND }, + { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND }, + { 0xFFFFFFFF } +}; + +static const struct si_cac_config_reg cac_weights_cape_verde[] = +{ + { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND }, + { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND }, + { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND }, + { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND }, + { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND }, + { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND }, + { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND }, + { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND }, + { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, + { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND }, + { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND }, + { 0xFFFFFFFF } +}; + +static const struct si_cac_config_reg lcac_cape_verde[] = +{ + { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x143, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x8f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x146, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x164, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x167, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x16a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x15e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x161, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x15b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0xFFFFFFFF } +}; + +static const struct si_cac_config_reg cac_override_cape_verde[] = +{ + { 0xFFFFFFFF } +}; + +static const struct si_powertune_data powertune_data_cape_verde = +{ + ((1 << 16) | 0x6993), + 5, + 0, + 7, + 105, + { + 0UL, + 0UL, + 7194395UL, + 309631529UL, + -1270850L, + 4513710L, + 100 + }, + 117830498UL, + 12, + { + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + }, + true +}; + +static const struct si_dte_data dte_data_cape_verde = +{ + { 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0 }, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + 0, + false +}; + +static const struct si_dte_data dte_data_venus_xtx = +{ + { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, + { 0x71C, 0xAAB, 0xE39, 0x11C7, 0x0 }, + 5, + 55000, + 0x69, + 0xA, + 1, + 0, + 0x3, + { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, + { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, + { 0xD6D8, 0x88B8, 0x1555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, + 90, + true +}; + +static const struct si_dte_data dte_data_venus_xt = +{ + { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, + { 0xBDA, 0x11C7, 0x17B4, 0x1DA1, 0x0 }, + 5, + 55000, + 0x69, + 0xA, + 1, + 0, + 0x3, + { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, + { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, + { 0xAFC8, 0x88B8, 0x238E, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, + 90, + true +}; + +static const struct si_dte_data dte_data_venus_pro = +{ + { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, + { 0x11C7, 0x1AAB, 0x238E, 0x2C72, 0x0 }, + 5, + 55000, + 0x69, + 0xA, + 1, + 0, + 0x3, + { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, + { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, + { 0x88B8, 0x88B8, 0x3555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, + 90, + true +}; + +static const struct si_cac_config_reg cac_weights_oland[] = +{ + { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND }, + { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND }, + { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND }, + { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND }, + { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND }, + { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND }, + { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND }, + { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND }, + { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, + { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND }, + { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND }, + { 0xFFFFFFFF } +}; + +static const struct si_cac_config_reg cac_weights_mars_pro[] = +{ + { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND }, + { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND }, + { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND }, + { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND }, + { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND }, + { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND }, + { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND }, + { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND }, + { 0x14, 0x0000ffff, 0, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, + { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND }, + { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND }, + { 0xFFFFFFFF } +}; + +static const struct si_cac_config_reg cac_weights_mars_xt[] = +{ + { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND }, + { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND }, + { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND }, + { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND }, + { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND }, + { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND }, + { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND }, + { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND }, + { 0x14, 0x0000ffff, 0, 0x60, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, + { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND }, + { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND }, + { 0xFFFFFFFF } +}; + +static const struct si_cac_config_reg cac_weights_oland_pro[] = +{ + { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND }, + { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND }, + { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND }, + { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND }, + { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND }, + { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND }, + { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND }, + { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND }, + { 0x14, 0x0000ffff, 0, 0x90, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, + { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND }, + { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND }, + { 0xFFFFFFFF } +}; + +static const struct si_cac_config_reg cac_weights_oland_xt[] = +{ + { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND }, + { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND }, + { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND }, + { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND }, + { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND }, + { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND }, + { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND }, + { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND }, + { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND }, + { 0x14, 0x0000ffff, 0, 0x120, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, + { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND }, + { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND }, + { 0xFFFFFFFF } +}; + +static const struct si_cac_config_reg lcac_oland[] = +{ + { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND }, + { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND }, + { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND }, + { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x143, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, + { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0xFFFFFFFF } +}; + +static const struct si_cac_config_reg lcac_mars_pro[] = +{ + { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND }, + { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND }, + { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND }, + { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, + { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, + { 0xFFFFFFFF } +}; + +static const struct si_cac_config_reg cac_override_oland[] = +{ + { 0xFFFFFFFF } +}; + +static const struct si_powertune_data powertune_data_oland = +{ + ((1 << 16) | 0x6993), + 5, + 0, + 7, + 105, + { + 0UL, + 0UL, + 7194395UL, + 309631529UL, + -1270850L, + 4513710L, + 100 + }, + 117830498UL, + 12, + { + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + }, + true +}; + +static const struct si_powertune_data powertune_data_mars_pro = +{ + ((1 << 16) | 0x6993), + 5, + 0, + 7, + 105, + { + 0UL, + 0UL, + 7194395UL, + 309631529UL, + -1270850L, + 4513710L, + 100 + }, + 117830498UL, + 12, + { + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + }, + true +}; + +static const struct si_dte_data dte_data_oland = +{ + { 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0 }, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, + 0, + false +}; + +static const struct si_dte_data dte_data_mars_pro = +{ + { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, + { 0x0, 0x0, 0x0, 0x0, 0x0 }, + 5, + 55000, + 105, + 0xA, + 1, + 0, + 0x10, + { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, + { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, + { 0xF627, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, + 90, + true +}; + +static const struct si_dte_data dte_data_sun_xt = +{ + { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, + { 0x0, 0x0, 0x0, 0x0, 0x0 }, + 5, + 55000, + 105, + 0xA, + 1, + 0, + 0x10, + { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, + { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, + { 0xD555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, + 90, + true +}; + + +static const struct si_cac_config_reg cac_weights_hainan[] = +{ + { 0x0, 0x0000ffff, 0, 0x2d9, SISLANDS_CACCONFIG_CGIND }, + { 0x0, 0xffff0000, 16, 0x22b, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0x0000ffff, 0, 0x21c, SISLANDS_CACCONFIG_CGIND }, + { 0x1, 0xffff0000, 16, 0x1dc, SISLANDS_CACCONFIG_CGIND }, + { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0x0000ffff, 0, 0x24e, SISLANDS_CACCONFIG_CGIND }, + { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0x0000ffff, 0, 0x35e, SISLANDS_CACCONFIG_CGIND }, + { 0x5, 0xffff0000, 16, 0x1143, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0x0000ffff, 0, 0xe17, SISLANDS_CACCONFIG_CGIND }, + { 0x6, 0xffff0000, 16, 0x441, SISLANDS_CACCONFIG_CGIND }, + { 0x18f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0x0000ffff, 0, 0x28b, SISLANDS_CACCONFIG_CGIND }, + { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x8, 0xffff0000, 16, 0xabe, SISLANDS_CACCONFIG_CGIND }, + { 0x9, 0x0000ffff, 0, 0xf11, SISLANDS_CACCONFIG_CGIND }, + { 0xa, 0x0000ffff, 0, 0x907, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0x0000ffff, 0, 0xb45, SISLANDS_CACCONFIG_CGIND }, + { 0xb, 0xffff0000, 16, 0xd1e, SISLANDS_CACCONFIG_CGIND }, + { 0xc, 0x0000ffff, 0, 0xa2c, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0x0000ffff, 0, 0x62, SISLANDS_CACCONFIG_CGIND }, + { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0xe, 0x0000ffff, 0, 0x1f3, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0x0000ffff, 0, 0x42, SISLANDS_CACCONFIG_CGIND }, + { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0x0000ffff, 0, 0x709, SISLANDS_CACCONFIG_CGIND }, + { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x13, 0xffff0000, 16, 0x3a, SISLANDS_CACCONFIG_CGIND }, + { 0x14, 0x0000ffff, 0, 0x357, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND }, + { 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0x0000ffff, 0, 0x314, SISLANDS_CACCONFIG_CGIND }, + { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x17, 0x0000ffff, 0, 0x6d, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, + { 0x6d, 0x0000ffff, 0, 0x1b9, SISLANDS_CACCONFIG_CGIND }, + { 0xFFFFFFFF } +}; + +static const struct si_powertune_data powertune_data_hainan = +{ + ((1 << 16) | 0x6993), + 5, + 0, + 9, + 105, + { + 0UL, + 0UL, + 7194395UL, + 309631529UL, + -1270850L, + 4513710L, + 100 + }, + 117830498UL, + 12, + { + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0 + }, + true +}; + +static struct rv7xx_power_info *rv770_get_pi(struct amdgpu_device *adev); +static struct evergreen_power_info *evergreen_get_pi(struct amdgpu_device *adev); +static struct ni_power_info *ni_get_pi(struct amdgpu_device *adev); +static struct si_ps *si_get_ps(struct amdgpu_ps *rps); + +static int si_populate_voltage_value(struct amdgpu_device *adev, + const struct atom_voltage_table *table, + u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage); +static int si_get_std_voltage_value(struct amdgpu_device *adev, + SISLANDS_SMC_VOLTAGE_VALUE *voltage, + u16 *std_voltage); +static int si_write_smc_soft_register(struct amdgpu_device *adev, + u16 reg_offset, u32 value); +static int si_convert_power_level_to_smc(struct amdgpu_device *adev, + struct rv7xx_pl *pl, + SISLANDS_SMC_HW_PERFORMANCE_LEVEL *level); +static int si_calculate_sclk_params(struct amdgpu_device *adev, + u32 engine_clock, + SISLANDS_SMC_SCLK_VALUE *sclk); + +static void si_thermal_start_smc_fan_control(struct amdgpu_device *adev); +static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev); +static void si_dpm_set_irq_funcs(struct amdgpu_device *adev); + +static struct si_power_info *si_get_pi(struct amdgpu_device *adev) +{ + struct si_power_info *pi = adev->pm.dpm.priv; + return pi; +} + +static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coeffients *coeff, + u16 v, s32 t, u32 ileakage, u32 *leakage) +{ + s64 kt, kv, leakage_w, i_leakage, vddc; + s64 temperature, t_slope, t_intercept, av, bv, t_ref; + s64 tmp; + + i_leakage = div64_s64(drm_int2fixp(ileakage), 100); + vddc = div64_s64(drm_int2fixp(v), 1000); + temperature = div64_s64(drm_int2fixp(t), 1000); + + t_slope = div64_s64(drm_int2fixp(coeff->t_slope), 100000000); + t_intercept = div64_s64(drm_int2fixp(coeff->t_intercept), 100000000); + av = div64_s64(drm_int2fixp(coeff->av), 100000000); + bv = div64_s64(drm_int2fixp(coeff->bv), 100000000); + t_ref = drm_int2fixp(coeff->t_ref); + + tmp = drm_fixp_mul(t_slope, vddc) + t_intercept; + kt = drm_fixp_exp(drm_fixp_mul(tmp, temperature)); + kt = drm_fixp_div(kt, drm_fixp_exp(drm_fixp_mul(tmp, t_ref))); + kv = drm_fixp_mul(av, drm_fixp_exp(drm_fixp_mul(bv, vddc))); + + leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc); + + *leakage = drm_fixp2int(leakage_w * 1000); +} + +static void si_calculate_leakage_for_v_and_t(struct amdgpu_device *adev, + const struct ni_leakage_coeffients *coeff, + u16 v, + s32 t, + u32 i_leakage, + u32 *leakage) +{ + si_calculate_leakage_for_v_and_t_formula(coeff, v, t, i_leakage, leakage); +} + +static void si_calculate_leakage_for_v_formula(const struct ni_leakage_coeffients *coeff, + const u32 fixed_kt, u16 v, + u32 ileakage, u32 *leakage) +{ + s64 kt, kv, leakage_w, i_leakage, vddc; + + i_leakage = div64_s64(drm_int2fixp(ileakage), 100); + vddc = div64_s64(drm_int2fixp(v), 1000); + + kt = div64_s64(drm_int2fixp(fixed_kt), 100000000); + kv = drm_fixp_mul(div64_s64(drm_int2fixp(coeff->av), 100000000), + drm_fixp_exp(drm_fixp_mul(div64_s64(drm_int2fixp(coeff->bv), 100000000), vddc))); + + leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc); + + *leakage = drm_fixp2int(leakage_w * 1000); +} + +static void si_calculate_leakage_for_v(struct amdgpu_device *adev, + const struct ni_leakage_coeffients *coeff, + const u32 fixed_kt, + u16 v, + u32 i_leakage, + u32 *leakage) +{ + si_calculate_leakage_for_v_formula(coeff, fixed_kt, v, i_leakage, leakage); +} + + +static void si_update_dte_from_pl2(struct amdgpu_device *adev, + struct si_dte_data *dte_data) +{ + u32 p_limit1 = adev->pm.dpm.tdp_limit; + u32 p_limit2 = adev->pm.dpm.near_tdp_limit; + u32 k = dte_data->k; + u32 t_max = dte_data->max_t; + u32 t_split[5] = { 10, 15, 20, 25, 30 }; + u32 t_0 = dte_data->t0; + u32 i; + + if (p_limit2 != 0 && p_limit2 <= p_limit1) { + dte_data->tdep_count = 3; + + for (i = 0; i < k; i++) { + dte_data->r[i] = + (t_split[i] * (t_max - t_0/(u32)1000) * (1 << 14)) / + (p_limit2 * (u32)100); + } + + dte_data->tdep_r[1] = dte_data->r[4] * 2; + + for (i = 2; i < SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE; i++) { + dte_data->tdep_r[i] = dte_data->r[4]; + } + } else { + DRM_ERROR("Invalid PL2! DTE will not be updated.\n"); + } +} + +static struct rv7xx_power_info *rv770_get_pi(struct amdgpu_device *adev) +{ + struct rv7xx_power_info *pi = adev->pm.dpm.priv; + + return pi; +} + +static struct ni_power_info *ni_get_pi(struct amdgpu_device *adev) +{ + struct ni_power_info *pi = adev->pm.dpm.priv; + + return pi; +} + +static struct si_ps *si_get_ps(struct amdgpu_ps *aps) +{ + struct si_ps *ps = aps->ps_priv; + + return ps; +} + +static void si_initialize_powertune_defaults(struct amdgpu_device *adev) +{ + struct ni_power_info *ni_pi = ni_get_pi(adev); + struct si_power_info *si_pi = si_get_pi(adev); + bool update_dte_from_pl2 = false; + + if (adev->asic_type == CHIP_TAHITI) { + si_pi->cac_weights = cac_weights_tahiti; + si_pi->lcac_config = lcac_tahiti; + si_pi->cac_override = cac_override_tahiti; + si_pi->powertune_data = &powertune_data_tahiti; + si_pi->dte_data = dte_data_tahiti; + + switch (adev->pdev->device) { + case 0x6798: + si_pi->dte_data.enable_dte_by_default = true; + break; + case 0x6799: + si_pi->dte_data = dte_data_new_zealand; + break; + case 0x6790: + case 0x6791: + case 0x6792: + case 0x679E: + si_pi->dte_data = dte_data_aruba_pro; + update_dte_from_pl2 = true; + break; + case 0x679B: + si_pi->dte_data = dte_data_malta; + update_dte_from_pl2 = true; + break; + case 0x679A: + si_pi->dte_data = dte_data_tahiti_pro; + update_dte_from_pl2 = true; + break; + default: + if (si_pi->dte_data.enable_dte_by_default == true) + DRM_ERROR("DTE is not enabled!\n"); + break; + } + } else if (adev->asic_type == CHIP_PITCAIRN) { + si_pi->cac_weights = cac_weights_pitcairn; + si_pi->lcac_config = lcac_pitcairn; + si_pi->cac_override = cac_override_pitcairn; + si_pi->powertune_data = &powertune_data_pitcairn; + + switch (adev->pdev->device) { + case 0x6810: + case 0x6818: + si_pi->dte_data = dte_data_curacao_xt; + update_dte_from_pl2 = true; + break; + case 0x6819: + case 0x6811: + si_pi->dte_data = dte_data_curacao_pro; + update_dte_from_pl2 = true; + break; + case 0x6800: + case 0x6806: + si_pi->dte_data = dte_data_neptune_xt; + update_dte_from_pl2 = true; + break; + default: + si_pi->dte_data = dte_data_pitcairn; + break; + } + } else if (adev->asic_type == CHIP_VERDE) { + si_pi->lcac_config = lcac_cape_verde; + si_pi->cac_override = cac_override_cape_verde; + si_pi->powertune_data = &powertune_data_cape_verde; + + switch (adev->pdev->device) { + case 0x683B: + case 0x683F: + case 0x6829: + case 0x6835: + si_pi->cac_weights = cac_weights_cape_verde_pro; + si_pi->dte_data = dte_data_cape_verde; + break; + case 0x682C: + si_pi->cac_weights = cac_weights_cape_verde_pro; + si_pi->dte_data = dte_data_sun_xt; + update_dte_from_pl2 = true; + break; + case 0x6825: + case 0x6827: + si_pi->cac_weights = cac_weights_heathrow; + si_pi->dte_data = dte_data_cape_verde; + break; + case 0x6824: + case 0x682D: + si_pi->cac_weights = cac_weights_chelsea_xt; + si_pi->dte_data = dte_data_cape_verde; + break; + case 0x682F: + si_pi->cac_weights = cac_weights_chelsea_pro; + si_pi->dte_data = dte_data_cape_verde; + break; + case 0x6820: + si_pi->cac_weights = cac_weights_heathrow; + si_pi->dte_data = dte_data_venus_xtx; + break; + case 0x6821: + si_pi->cac_weights = cac_weights_heathrow; + si_pi->dte_data = dte_data_venus_xt; + break; + case 0x6823: + case 0x682B: + case 0x6822: + case 0x682A: + si_pi->cac_weights = cac_weights_chelsea_pro; + si_pi->dte_data = dte_data_venus_pro; + break; + default: + si_pi->cac_weights = cac_weights_cape_verde; + si_pi->dte_data = dte_data_cape_verde; + break; + } + } else if (adev->asic_type == CHIP_OLAND) { + si_pi->lcac_config = lcac_mars_pro; + si_pi->cac_override = cac_override_oland; + si_pi->powertune_data = &powertune_data_mars_pro; + si_pi->dte_data = dte_data_mars_pro; + + switch (adev->pdev->device) { + case 0x6601: + case 0x6621: + case 0x6603: + case 0x6605: + si_pi->cac_weights = cac_weights_mars_pro; + update_dte_from_pl2 = true; + break; + case 0x6600: + case 0x6606: + case 0x6620: + case 0x6604: + si_pi->cac_weights = cac_weights_mars_xt; + update_dte_from_pl2 = true; + break; + case 0x6611: + case 0x6613: + case 0x6608: + si_pi->cac_weights = cac_weights_oland_pro; + update_dte_from_pl2 = true; + break; + case 0x6610: + si_pi->cac_weights = cac_weights_oland_xt; + update_dte_from_pl2 = true; + break; + default: + si_pi->cac_weights = cac_weights_oland; + si_pi->lcac_config = lcac_oland; + si_pi->cac_override = cac_override_oland; + si_pi->powertune_data = &powertune_data_oland; + si_pi->dte_data = dte_data_oland; + break; + } + } else if (adev->asic_type == CHIP_HAINAN) { + si_pi->cac_weights = cac_weights_hainan; + si_pi->lcac_config = lcac_oland; + si_pi->cac_override = cac_override_oland; + si_pi->powertune_data = &powertune_data_hainan; + si_pi->dte_data = dte_data_sun_xt; + update_dte_from_pl2 = true; + } else { + DRM_ERROR("Unknown SI asic revision, failed to initialize PowerTune!\n"); + return; + } + + ni_pi->enable_power_containment = false; + ni_pi->enable_cac = false; + ni_pi->enable_sq_ramping = false; + si_pi->enable_dte = false; + + if (si_pi->powertune_data->enable_powertune_by_default) { + ni_pi->enable_power_containment = true; + ni_pi->enable_cac = true; + if (si_pi->dte_data.enable_dte_by_default) { + si_pi->enable_dte = true; + if (update_dte_from_pl2) + si_update_dte_from_pl2(adev, &si_pi->dte_data); + + } + ni_pi->enable_sq_ramping = true; + } + + ni_pi->driver_calculate_cac_leakage = true; + ni_pi->cac_configuration_required = true; + + if (ni_pi->cac_configuration_required) { + ni_pi->support_cac_long_term_average = true; + si_pi->dyn_powertune_data.l2_lta_window_size = + si_pi->powertune_data->l2_lta_window_size_default; + si_pi->dyn_powertune_data.lts_truncate = + si_pi->powertune_data->lts_truncate_default; + } else { + ni_pi->support_cac_long_term_average = false; + si_pi->dyn_powertune_data.l2_lta_window_size = 0; + si_pi->dyn_powertune_data.lts_truncate = 0; + } + + si_pi->dyn_powertune_data.disable_uvd_powertune = false; +} + +static u32 si_get_smc_power_scaling_factor(struct amdgpu_device *adev) +{ + return 1; +} + +static u32 si_calculate_cac_wintime(struct amdgpu_device *adev) +{ + u32 xclk; + u32 wintime; + u32 cac_window; + u32 cac_window_size; + + xclk = amdgpu_asic_get_xclk(adev); + + if (xclk == 0) + return 0; + + cac_window = RREG32(CG_CAC_CTRL) & CAC_WINDOW_MASK; + cac_window_size = ((cac_window & 0xFFFF0000) >> 16) * (cac_window & 0x0000FFFF); + + wintime = (cac_window_size * 100) / xclk; + + return wintime; +} + +static u32 si_scale_power_for_smc(u32 power_in_watts, u32 scaling_factor) +{ + return power_in_watts; +} + +static int si_calculate_adjusted_tdp_limits(struct amdgpu_device *adev, + bool adjust_polarity, + u32 tdp_adjustment, + u32 *tdp_limit, + u32 *near_tdp_limit) +{ + u32 adjustment_delta, max_tdp_limit; + + if (tdp_adjustment > (u32)adev->pm.dpm.tdp_od_limit) + return -EINVAL; + + max_tdp_limit = ((100 + 100) * adev->pm.dpm.tdp_limit) / 100; + + if (adjust_polarity) { + *tdp_limit = ((100 + tdp_adjustment) * adev->pm.dpm.tdp_limit) / 100; + *near_tdp_limit = adev->pm.dpm.near_tdp_limit_adjusted + (*tdp_limit - adev->pm.dpm.tdp_limit); + } else { + *tdp_limit = ((100 - tdp_adjustment) * adev->pm.dpm.tdp_limit) / 100; + adjustment_delta = adev->pm.dpm.tdp_limit - *tdp_limit; + if (adjustment_delta < adev->pm.dpm.near_tdp_limit_adjusted) + *near_tdp_limit = adev->pm.dpm.near_tdp_limit_adjusted - adjustment_delta; + else + *near_tdp_limit = 0; + } + + if ((*tdp_limit <= 0) || (*tdp_limit > max_tdp_limit)) + return -EINVAL; + if ((*near_tdp_limit <= 0) || (*near_tdp_limit > *tdp_limit)) + return -EINVAL; + + return 0; +} + +static int si_populate_smc_tdp_limits(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_state) +{ + struct ni_power_info *ni_pi = ni_get_pi(adev); + struct si_power_info *si_pi = si_get_pi(adev); + + if (ni_pi->enable_power_containment) { + SISLANDS_SMC_STATETABLE *smc_table = &si_pi->smc_statetable; + PP_SIslands_PAPMParameters *papm_parm; + struct amdgpu_ppm_table *ppm = adev->pm.dpm.dyn_state.ppm_table; + u32 scaling_factor = si_get_smc_power_scaling_factor(adev); + u32 tdp_limit; + u32 near_tdp_limit; + int ret; + + if (scaling_factor == 0) + return -EINVAL; + + memset(smc_table, 0, sizeof(SISLANDS_SMC_STATETABLE)); + + ret = si_calculate_adjusted_tdp_limits(adev, + false, /* ??? */ + adev->pm.dpm.tdp_adjustment, + &tdp_limit, + &near_tdp_limit); + if (ret) + return ret; + + smc_table->dpm2Params.TDPLimit = + cpu_to_be32(si_scale_power_for_smc(tdp_limit, scaling_factor) * 1000); + smc_table->dpm2Params.NearTDPLimit = + cpu_to_be32(si_scale_power_for_smc(near_tdp_limit, scaling_factor) * 1000); + smc_table->dpm2Params.SafePowerLimit = + cpu_to_be32(si_scale_power_for_smc((near_tdp_limit * SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100, scaling_factor) * 1000); + + ret = amdgpu_si_copy_bytes_to_smc(adev, + (si_pi->state_table_start + offsetof(SISLANDS_SMC_STATETABLE, dpm2Params) + + offsetof(PP_SIslands_DPM2Parameters, TDPLimit)), + (u8 *)(&(smc_table->dpm2Params.TDPLimit)), + sizeof(u32) * 3, + si_pi->sram_end); + if (ret) + return ret; + + if (si_pi->enable_ppm) { + papm_parm = &si_pi->papm_parm; + memset(papm_parm, 0, sizeof(PP_SIslands_PAPMParameters)); + papm_parm->NearTDPLimitTherm = cpu_to_be32(ppm->dgpu_tdp); + papm_parm->dGPU_T_Limit = cpu_to_be32(ppm->tj_max); + papm_parm->dGPU_T_Warning = cpu_to_be32(95); + papm_parm->dGPU_T_Hysteresis = cpu_to_be32(5); + papm_parm->PlatformPowerLimit = 0xffffffff; + papm_parm->NearTDPLimitPAPM = 0xffffffff; + + ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->papm_cfg_table_start, + (u8 *)papm_parm, + sizeof(PP_SIslands_PAPMParameters), + si_pi->sram_end); + if (ret) + return ret; + } + } + return 0; +} + +static int si_populate_smc_tdp_limits_2(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_state) +{ + struct ni_power_info *ni_pi = ni_get_pi(adev); + struct si_power_info *si_pi = si_get_pi(adev); + + if (ni_pi->enable_power_containment) { + SISLANDS_SMC_STATETABLE *smc_table = &si_pi->smc_statetable; + u32 scaling_factor = si_get_smc_power_scaling_factor(adev); + int ret; + + memset(smc_table, 0, sizeof(SISLANDS_SMC_STATETABLE)); + + smc_table->dpm2Params.NearTDPLimit = + cpu_to_be32(si_scale_power_for_smc(adev->pm.dpm.near_tdp_limit_adjusted, scaling_factor) * 1000); + smc_table->dpm2Params.SafePowerLimit = + cpu_to_be32(si_scale_power_for_smc((adev->pm.dpm.near_tdp_limit_adjusted * SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100, scaling_factor) * 1000); + + ret = amdgpu_si_copy_bytes_to_smc(adev, + (si_pi->state_table_start + + offsetof(SISLANDS_SMC_STATETABLE, dpm2Params) + + offsetof(PP_SIslands_DPM2Parameters, NearTDPLimit)), + (u8 *)(&(smc_table->dpm2Params.NearTDPLimit)), + sizeof(u32) * 2, + si_pi->sram_end); + if (ret) + return ret; + } + + return 0; +} + +static u16 si_calculate_power_efficiency_ratio(struct amdgpu_device *adev, + const u16 prev_std_vddc, + const u16 curr_std_vddc) +{ + u64 margin = (u64)SISLANDS_DPM2_PWREFFICIENCYRATIO_MARGIN; + u64 prev_vddc = (u64)prev_std_vddc; + u64 curr_vddc = (u64)curr_std_vddc; + u64 pwr_efficiency_ratio, n, d; + + if ((prev_vddc == 0) || (curr_vddc == 0)) + return 0; + + n = div64_u64((u64)1024 * curr_vddc * curr_vddc * ((u64)1000 + margin), (u64)1000); + d = prev_vddc * prev_vddc; + pwr_efficiency_ratio = div64_u64(n, d); + + if (pwr_efficiency_ratio > (u64)0xFFFF) + return 0; + + return (u16)pwr_efficiency_ratio; +} + +static bool si_should_disable_uvd_powertune(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_state) +{ + struct si_power_info *si_pi = si_get_pi(adev); + + if (si_pi->dyn_powertune_data.disable_uvd_powertune && + amdgpu_state->vclk && amdgpu_state->dclk) + return true; + + return false; +} + +struct evergreen_power_info *evergreen_get_pi(struct amdgpu_device *adev) +{ + struct evergreen_power_info *pi = adev->pm.dpm.priv; + + return pi; +} + +static int si_populate_power_containment_values(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_state, + SISLANDS_SMC_SWSTATE *smc_state) +{ + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); + struct ni_power_info *ni_pi = ni_get_pi(adev); + struct si_ps *state = si_get_ps(amdgpu_state); + SISLANDS_SMC_VOLTAGE_VALUE vddc; + u32 prev_sclk; + u32 max_sclk; + u32 min_sclk; + u16 prev_std_vddc; + u16 curr_std_vddc; + int i; + u16 pwr_efficiency_ratio; + u8 max_ps_percent; + bool disable_uvd_power_tune; + int ret; + + if (ni_pi->enable_power_containment == false) + return 0; + + if (state->performance_level_count == 0) + return -EINVAL; + + if (smc_state->levelCount != state->performance_level_count) + return -EINVAL; + + disable_uvd_power_tune = si_should_disable_uvd_powertune(adev, amdgpu_state); + + smc_state->levels[0].dpm2.MaxPS = 0; + smc_state->levels[0].dpm2.NearTDPDec = 0; + smc_state->levels[0].dpm2.AboveSafeInc = 0; + smc_state->levels[0].dpm2.BelowSafeInc = 0; + smc_state->levels[0].dpm2.PwrEfficiencyRatio = 0; + + for (i = 1; i < state->performance_level_count; i++) { + prev_sclk = state->performance_levels[i-1].sclk; + max_sclk = state->performance_levels[i].sclk; + if (i == 1) + max_ps_percent = SISLANDS_DPM2_MAXPS_PERCENT_M; + else + max_ps_percent = SISLANDS_DPM2_MAXPS_PERCENT_H; + + if (prev_sclk > max_sclk) + return -EINVAL; + + if ((max_ps_percent == 0) || + (prev_sclk == max_sclk) || + disable_uvd_power_tune) + min_sclk = max_sclk; + else if (i == 1) + min_sclk = prev_sclk; + else + min_sclk = (prev_sclk * (u32)max_ps_percent) / 100; + + if (min_sclk < state->performance_levels[0].sclk) + min_sclk = state->performance_levels[0].sclk; + + if (min_sclk == 0) + return -EINVAL; + + ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table, + state->performance_levels[i-1].vddc, &vddc); + if (ret) + return ret; + + ret = si_get_std_voltage_value(adev, &vddc, &prev_std_vddc); + if (ret) + return ret; + + ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table, + state->performance_levels[i].vddc, &vddc); + if (ret) + return ret; + + ret = si_get_std_voltage_value(adev, &vddc, &curr_std_vddc); + if (ret) + return ret; + + pwr_efficiency_ratio = si_calculate_power_efficiency_ratio(adev, + prev_std_vddc, curr_std_vddc); + + smc_state->levels[i].dpm2.MaxPS = (u8)((SISLANDS_DPM2_MAX_PULSE_SKIP * (max_sclk - min_sclk)) / max_sclk); + smc_state->levels[i].dpm2.NearTDPDec = SISLANDS_DPM2_NEAR_TDP_DEC; + smc_state->levels[i].dpm2.AboveSafeInc = SISLANDS_DPM2_ABOVE_SAFE_INC; + smc_state->levels[i].dpm2.BelowSafeInc = SISLANDS_DPM2_BELOW_SAFE_INC; + smc_state->levels[i].dpm2.PwrEfficiencyRatio = cpu_to_be16(pwr_efficiency_ratio); + } + + return 0; +} + +static int si_populate_sq_ramping_values(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_state, + SISLANDS_SMC_SWSTATE *smc_state) +{ + struct ni_power_info *ni_pi = ni_get_pi(adev); + struct si_ps *state = si_get_ps(amdgpu_state); + u32 sq_power_throttle, sq_power_throttle2; + bool enable_sq_ramping = ni_pi->enable_sq_ramping; + int i; + + if (state->performance_level_count == 0) + return -EINVAL; + + if (smc_state->levelCount != state->performance_level_count) + return -EINVAL; + + if (adev->pm.dpm.sq_ramping_threshold == 0) + return -EINVAL; + + if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER > (MAX_POWER_MASK >> MAX_POWER_SHIFT)) + enable_sq_ramping = false; + + if (SISLANDS_DPM2_SQ_RAMP_MIN_POWER > (MIN_POWER_MASK >> MIN_POWER_SHIFT)) + enable_sq_ramping = false; + + if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA > (MAX_POWER_DELTA_MASK >> MAX_POWER_DELTA_SHIFT)) + enable_sq_ramping = false; + + if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT)) + enable_sq_ramping = false; + + if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT)) + enable_sq_ramping = false; + + for (i = 0; i < state->performance_level_count; i++) { + sq_power_throttle = 0; + sq_power_throttle2 = 0; + + if ((state->performance_levels[i].sclk >= adev->pm.dpm.sq_ramping_threshold) && + enable_sq_ramping) { + sq_power_throttle |= MAX_POWER(SISLANDS_DPM2_SQ_RAMP_MAX_POWER); + sq_power_throttle |= MIN_POWER(SISLANDS_DPM2_SQ_RAMP_MIN_POWER); + sq_power_throttle2 |= MAX_POWER_DELTA(SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA); + sq_power_throttle2 |= STI_SIZE(SISLANDS_DPM2_SQ_RAMP_STI_SIZE); + sq_power_throttle2 |= LTI_RATIO(SISLANDS_DPM2_SQ_RAMP_LTI_RATIO); + } else { + sq_power_throttle |= MAX_POWER_MASK | MIN_POWER_MASK; + sq_power_throttle2 |= MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK; + } + + smc_state->levels[i].SQPowerThrottle = cpu_to_be32(sq_power_throttle); + smc_state->levels[i].SQPowerThrottle_2 = cpu_to_be32(sq_power_throttle2); + } + + return 0; +} + +static int si_enable_power_containment(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_new_state, + bool enable) +{ + struct ni_power_info *ni_pi = ni_get_pi(adev); + PPSMC_Result smc_result; + int ret = 0; + + if (ni_pi->enable_power_containment) { + if (enable) { + if (!si_should_disable_uvd_powertune(adev, amdgpu_new_state)) { + smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_TDPClampingActive); + if (smc_result != PPSMC_Result_OK) { + ret = -EINVAL; + ni_pi->pc_enabled = false; + } else { + ni_pi->pc_enabled = true; + } + } + } else { + smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_TDPClampingInactive); + if (smc_result != PPSMC_Result_OK) + ret = -EINVAL; + ni_pi->pc_enabled = false; + } + } + + return ret; +} + +static int si_initialize_smc_dte_tables(struct amdgpu_device *adev) +{ + struct si_power_info *si_pi = si_get_pi(adev); + int ret = 0; + struct si_dte_data *dte_data = &si_pi->dte_data; + Smc_SIslands_DTE_Configuration *dte_tables = NULL; + u32 table_size; + u8 tdep_count; + u32 i; + + if (dte_data == NULL) + si_pi->enable_dte = false; + + if (si_pi->enable_dte == false) + return 0; + + if (dte_data->k <= 0) + return -EINVAL; + + dte_tables = kzalloc(sizeof(Smc_SIslands_DTE_Configuration), GFP_KERNEL); + if (dte_tables == NULL) { + si_pi->enable_dte = false; + return -ENOMEM; + } + + table_size = dte_data->k; + + if (table_size > SMC_SISLANDS_DTE_MAX_FILTER_STAGES) + table_size = SMC_SISLANDS_DTE_MAX_FILTER_STAGES; + + tdep_count = dte_data->tdep_count; + if (tdep_count > SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE) + tdep_count = SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE; + + dte_tables->K = cpu_to_be32(table_size); + dte_tables->T0 = cpu_to_be32(dte_data->t0); + dte_tables->MaxT = cpu_to_be32(dte_data->max_t); + dte_tables->WindowSize = dte_data->window_size; + dte_tables->temp_select = dte_data->temp_select; + dte_tables->DTE_mode = dte_data->dte_mode; + dte_tables->Tthreshold = cpu_to_be32(dte_data->t_threshold); + + if (tdep_count > 0) + table_size--; + + for (i = 0; i < table_size; i++) { + dte_tables->tau[i] = cpu_to_be32(dte_data->tau[i]); + dte_tables->R[i] = cpu_to_be32(dte_data->r[i]); + } + + dte_tables->Tdep_count = tdep_count; + + for (i = 0; i < (u32)tdep_count; i++) { + dte_tables->T_limits[i] = dte_data->t_limits[i]; + dte_tables->Tdep_tau[i] = cpu_to_be32(dte_data->tdep_tau[i]); + dte_tables->Tdep_R[i] = cpu_to_be32(dte_data->tdep_r[i]); + } + + ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->dte_table_start, + (u8 *)dte_tables, + sizeof(Smc_SIslands_DTE_Configuration), + si_pi->sram_end); + kfree(dte_tables); + + return ret; +} + +static int si_get_cac_std_voltage_max_min(struct amdgpu_device *adev, + u16 *max, u16 *min) +{ + struct si_power_info *si_pi = si_get_pi(adev); + struct amdgpu_cac_leakage_table *table = + &adev->pm.dpm.dyn_state.cac_leakage_table; + u32 i; + u32 v0_loadline; + + if (table == NULL) + return -EINVAL; + + *max = 0; + *min = 0xFFFF; + + for (i = 0; i < table->count; i++) { + if (table->entries[i].vddc > *max) + *max = table->entries[i].vddc; + if (table->entries[i].vddc < *min) + *min = table->entries[i].vddc; + } + + if (si_pi->powertune_data->lkge_lut_v0_percent > 100) + return -EINVAL; + + v0_loadline = (*min) * (100 - si_pi->powertune_data->lkge_lut_v0_percent) / 100; + + if (v0_loadline > 0xFFFFUL) + return -EINVAL; + + *min = (u16)v0_loadline; + + if ((*min > *max) || (*max == 0) || (*min == 0)) + return -EINVAL; + + return 0; +} + +static u16 si_get_cac_std_voltage_step(u16 max, u16 min) +{ + return ((max - min) + (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES - 1)) / + SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; +} + +static int si_init_dte_leakage_table(struct amdgpu_device *adev, + PP_SIslands_CacConfig *cac_tables, + u16 vddc_max, u16 vddc_min, u16 vddc_step, + u16 t0, u16 t_step) +{ + struct si_power_info *si_pi = si_get_pi(adev); + u32 leakage; + unsigned int i, j; + s32 t; + u32 smc_leakage; + u32 scaling_factor; + u16 voltage; + + scaling_factor = si_get_smc_power_scaling_factor(adev); + + for (i = 0; i < SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES ; i++) { + t = (1000 * (i * t_step + t0)); + + for (j = 0; j < SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) { + voltage = vddc_max - (vddc_step * j); + + si_calculate_leakage_for_v_and_t(adev, + &si_pi->powertune_data->leakage_coefficients, + voltage, + t, + si_pi->dyn_powertune_data.cac_leakage, + &leakage); + + smc_leakage = si_scale_power_for_smc(leakage, scaling_factor) / 4; + + if (smc_leakage > 0xFFFF) + smc_leakage = 0xFFFF; + + cac_tables->cac_lkge_lut[i][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES-1-j] = + cpu_to_be16((u16)smc_leakage); + } + } + return 0; +} + +static int si_init_simplified_leakage_table(struct amdgpu_device *adev, + PP_SIslands_CacConfig *cac_tables, + u16 vddc_max, u16 vddc_min, u16 vddc_step) +{ + struct si_power_info *si_pi = si_get_pi(adev); + u32 leakage; + unsigned int i, j; + u32 smc_leakage; + u32 scaling_factor; + u16 voltage; + + scaling_factor = si_get_smc_power_scaling_factor(adev); + + for (j = 0; j < SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) { + voltage = vddc_max - (vddc_step * j); + + si_calculate_leakage_for_v(adev, + &si_pi->powertune_data->leakage_coefficients, + si_pi->powertune_data->fixed_kt, + voltage, + si_pi->dyn_powertune_data.cac_leakage, + &leakage); + + smc_leakage = si_scale_power_for_smc(leakage, scaling_factor) / 4; + + if (smc_leakage > 0xFFFF) + smc_leakage = 0xFFFF; + + for (i = 0; i < SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES ; i++) + cac_tables->cac_lkge_lut[i][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES-1-j] = + cpu_to_be16((u16)smc_leakage); + } + return 0; +} + +static int si_initialize_smc_cac_tables(struct amdgpu_device *adev) +{ + struct ni_power_info *ni_pi = ni_get_pi(adev); + struct si_power_info *si_pi = si_get_pi(adev); + PP_SIslands_CacConfig *cac_tables = NULL; + u16 vddc_max, vddc_min, vddc_step; + u16 t0, t_step; + u32 load_line_slope, reg; + int ret = 0; + u32 ticks_per_us = amdgpu_asic_get_xclk(adev) / 100; + + if (ni_pi->enable_cac == false) + return 0; + + cac_tables = kzalloc(sizeof(PP_SIslands_CacConfig), GFP_KERNEL); + if (!cac_tables) + return -ENOMEM; + + reg = RREG32(CG_CAC_CTRL) & ~CAC_WINDOW_MASK; + reg |= CAC_WINDOW(si_pi->powertune_data->cac_window); + WREG32(CG_CAC_CTRL, reg); + + si_pi->dyn_powertune_data.cac_leakage = adev->pm.dpm.cac_leakage; + si_pi->dyn_powertune_data.dc_pwr_value = + si_pi->powertune_data->dc_cac[NISLANDS_DCCAC_LEVEL_0]; + si_pi->dyn_powertune_data.wintime = si_calculate_cac_wintime(adev); + si_pi->dyn_powertune_data.shift_n = si_pi->powertune_data->shift_n_default; + + si_pi->dyn_powertune_data.leakage_minimum_temperature = 80 * 1000; + + ret = si_get_cac_std_voltage_max_min(adev, &vddc_max, &vddc_min); + if (ret) + goto done_free; + + vddc_step = si_get_cac_std_voltage_step(vddc_max, vddc_min); + vddc_min = vddc_max - (vddc_step * (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES - 1)); + t_step = 4; + t0 = 60; + + if (si_pi->enable_dte || ni_pi->driver_calculate_cac_leakage) + ret = si_init_dte_leakage_table(adev, cac_tables, + vddc_max, vddc_min, vddc_step, + t0, t_step); + else + ret = si_init_simplified_leakage_table(adev, cac_tables, + vddc_max, vddc_min, vddc_step); + if (ret) + goto done_free; + + load_line_slope = ((u32)adev->pm.dpm.load_line_slope << SMC_SISLANDS_SCALE_R) / 100; + + cac_tables->l2numWin_TDP = cpu_to_be32(si_pi->dyn_powertune_data.l2_lta_window_size); + cac_tables->lts_truncate_n = si_pi->dyn_powertune_data.lts_truncate; + cac_tables->SHIFT_N = si_pi->dyn_powertune_data.shift_n; + cac_tables->lkge_lut_V0 = cpu_to_be32((u32)vddc_min); + cac_tables->lkge_lut_Vstep = cpu_to_be32((u32)vddc_step); + cac_tables->R_LL = cpu_to_be32(load_line_slope); + cac_tables->WinTime = cpu_to_be32(si_pi->dyn_powertune_data.wintime); + cac_tables->calculation_repeats = cpu_to_be32(2); + cac_tables->dc_cac = cpu_to_be32(0); + cac_tables->log2_PG_LKG_SCALE = 12; + cac_tables->cac_temp = si_pi->powertune_data->operating_temp; + cac_tables->lkge_lut_T0 = cpu_to_be32((u32)t0); + cac_tables->lkge_lut_Tstep = cpu_to_be32((u32)t_step); + + ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->cac_table_start, + (u8 *)cac_tables, + sizeof(PP_SIslands_CacConfig), + si_pi->sram_end); + + if (ret) + goto done_free; + + ret = si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_ticks_per_us, ticks_per_us); + +done_free: + if (ret) { + ni_pi->enable_cac = false; + ni_pi->enable_power_containment = false; + } + + kfree(cac_tables); + + return ret; +} + +static int si_program_cac_config_registers(struct amdgpu_device *adev, + const struct si_cac_config_reg *cac_config_regs) +{ + const struct si_cac_config_reg *config_regs = cac_config_regs; + u32 data = 0, offset; + + if (!config_regs) + return -EINVAL; + + while (config_regs->offset != 0xFFFFFFFF) { + switch (config_regs->type) { + case SISLANDS_CACCONFIG_CGIND: + offset = SMC_CG_IND_START + config_regs->offset; + if (offset < SMC_CG_IND_END) + data = RREG32_SMC(offset); + break; + default: + data = RREG32(config_regs->offset); + break; + } + + data &= ~config_regs->mask; + data |= ((config_regs->value << config_regs->shift) & config_regs->mask); + + switch (config_regs->type) { + case SISLANDS_CACCONFIG_CGIND: + offset = SMC_CG_IND_START + config_regs->offset; + if (offset < SMC_CG_IND_END) + WREG32_SMC(offset, data); + break; + default: + WREG32(config_regs->offset, data); + break; + } + config_regs++; + } + return 0; +} + +static int si_initialize_hardware_cac_manager(struct amdgpu_device *adev) +{ + struct ni_power_info *ni_pi = ni_get_pi(adev); + struct si_power_info *si_pi = si_get_pi(adev); + int ret; + + if ((ni_pi->enable_cac == false) || + (ni_pi->cac_configuration_required == false)) + return 0; + + ret = si_program_cac_config_registers(adev, si_pi->lcac_config); + if (ret) + return ret; + ret = si_program_cac_config_registers(adev, si_pi->cac_override); + if (ret) + return ret; + ret = si_program_cac_config_registers(adev, si_pi->cac_weights); + if (ret) + return ret; + + return 0; +} + +static int si_enable_smc_cac(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_new_state, + bool enable) +{ + struct ni_power_info *ni_pi = ni_get_pi(adev); + struct si_power_info *si_pi = si_get_pi(adev); + PPSMC_Result smc_result; + int ret = 0; + + if (ni_pi->enable_cac) { + if (enable) { + if (!si_should_disable_uvd_powertune(adev, amdgpu_new_state)) { + if (ni_pi->support_cac_long_term_average) { + smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_CACLongTermAvgEnable); + if (smc_result != PPSMC_Result_OK) + ni_pi->support_cac_long_term_average = false; + } + + smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableCac); + if (smc_result != PPSMC_Result_OK) { + ret = -EINVAL; + ni_pi->cac_enabled = false; + } else { + ni_pi->cac_enabled = true; + } + + if (si_pi->enable_dte) { + smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableDTE); + if (smc_result != PPSMC_Result_OK) + ret = -EINVAL; + } + } + } else if (ni_pi->cac_enabled) { + if (si_pi->enable_dte) + smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableDTE); + + smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableCac); + + ni_pi->cac_enabled = false; + + if (ni_pi->support_cac_long_term_average) + smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_CACLongTermAvgDisable); + } + } + return ret; +} + +static int si_init_smc_spll_table(struct amdgpu_device *adev) +{ + struct ni_power_info *ni_pi = ni_get_pi(adev); + struct si_power_info *si_pi = si_get_pi(adev); + SMC_SISLANDS_SPLL_DIV_TABLE *spll_table; + SISLANDS_SMC_SCLK_VALUE sclk_params; + u32 fb_div, p_div; + u32 clk_s, clk_v; + u32 sclk = 0; + int ret = 0; + u32 tmp; + int i; + + if (si_pi->spll_table_start == 0) + return -EINVAL; + + spll_table = kzalloc(sizeof(SMC_SISLANDS_SPLL_DIV_TABLE), GFP_KERNEL); + if (spll_table == NULL) + return -ENOMEM; + + for (i = 0; i < 256; i++) { + ret = si_calculate_sclk_params(adev, sclk, &sclk_params); + if (ret) + break; + p_div = (sclk_params.vCG_SPLL_FUNC_CNTL & SPLL_PDIV_A_MASK) >> SPLL_PDIV_A_SHIFT; + fb_div = (sclk_params.vCG_SPLL_FUNC_CNTL_3 & SPLL_FB_DIV_MASK) >> SPLL_FB_DIV_SHIFT; + clk_s = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM & CLK_S_MASK) >> CLK_S_SHIFT; + clk_v = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM_2 & CLK_V_MASK) >> CLK_V_SHIFT; + + fb_div &= ~0x00001FFF; + fb_div >>= 1; + clk_v >>= 6; + + if (p_div & ~(SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT)) + ret = -EINVAL; + if (fb_div & ~(SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT)) + ret = -EINVAL; + if (clk_s & ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT)) + ret = -EINVAL; + if (clk_v & ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT)) + ret = -EINVAL; + + if (ret) + break; + + tmp = ((fb_div << SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK) | + ((p_div << SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK); + spll_table->freq[i] = cpu_to_be32(tmp); + + tmp = ((clk_v << SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK) | + ((clk_s << SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK); + spll_table->ss[i] = cpu_to_be32(tmp); + + sclk += 512; + } + + + if (!ret) + ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->spll_table_start, + (u8 *)spll_table, + sizeof(SMC_SISLANDS_SPLL_DIV_TABLE), + si_pi->sram_end); + + if (ret) + ni_pi->enable_power_containment = false; + + kfree(spll_table); + + return ret; +} + +static u16 si_get_lower_of_leakage_and_vce_voltage(struct amdgpu_device *adev, + u16 vce_voltage) +{ + u16 highest_leakage = 0; + struct si_power_info *si_pi = si_get_pi(adev); + int i; + + for (i = 0; i < si_pi->leakage_voltage.count; i++){ + if (highest_leakage < si_pi->leakage_voltage.entries[i].voltage) + highest_leakage = si_pi->leakage_voltage.entries[i].voltage; + } + + if (si_pi->leakage_voltage.count && (highest_leakage < vce_voltage)) + return highest_leakage; + + return vce_voltage; +} + +static int si_get_vce_clock_voltage(struct amdgpu_device *adev, + u32 evclk, u32 ecclk, u16 *voltage) +{ + u32 i; + int ret = -EINVAL; + struct amdgpu_vce_clock_voltage_dependency_table *table = + &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; + + if (((evclk == 0) && (ecclk == 0)) || + (table && (table->count == 0))) { + *voltage = 0; + return 0; + } + + for (i = 0; i < table->count; i++) { + if ((evclk <= table->entries[i].evclk) && + (ecclk <= table->entries[i].ecclk)) { + *voltage = table->entries[i].v; + ret = 0; + break; + } + } + + /* if no match return the highest voltage */ + if (ret) + *voltage = table->entries[table->count - 1].v; + + *voltage = si_get_lower_of_leakage_and_vce_voltage(adev, *voltage); + + return ret; +} + +static bool si_dpm_vblank_too_short(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + u32 vblank_time = amdgpu_dpm_get_vblank_time(adev); + /* we never hit the non-gddr5 limit so disable it */ + u32 switch_limit = adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0; + + if (vblank_time < switch_limit) + return true; + else + return false; + +} + +static int ni_copy_and_switch_arb_sets(struct amdgpu_device *adev, + u32 arb_freq_src, u32 arb_freq_dest) +{ + u32 mc_arb_dram_timing; + u32 mc_arb_dram_timing2; + u32 burst_time; + u32 mc_cg_config; + + switch (arb_freq_src) { + case MC_CG_ARB_FREQ_F0: + mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING); + mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2); + burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE0_MASK) >> STATE0_SHIFT; + break; + case MC_CG_ARB_FREQ_F1: + mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_1); + mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_1); + burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE1_MASK) >> STATE1_SHIFT; + break; + case MC_CG_ARB_FREQ_F2: + mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_2); + mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_2); + burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE2_MASK) >> STATE2_SHIFT; + break; + case MC_CG_ARB_FREQ_F3: + mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_3); + mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_3); + burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE3_MASK) >> STATE3_SHIFT; + break; + default: + return -EINVAL; + } + + switch (arb_freq_dest) { + case MC_CG_ARB_FREQ_F0: + WREG32(MC_ARB_DRAM_TIMING, mc_arb_dram_timing); + WREG32(MC_ARB_DRAM_TIMING2, mc_arb_dram_timing2); + WREG32_P(MC_ARB_BURST_TIME, STATE0(burst_time), ~STATE0_MASK); + break; + case MC_CG_ARB_FREQ_F1: + WREG32(MC_ARB_DRAM_TIMING_1, mc_arb_dram_timing); + WREG32(MC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2); + WREG32_P(MC_ARB_BURST_TIME, STATE1(burst_time), ~STATE1_MASK); + break; + case MC_CG_ARB_FREQ_F2: + WREG32(MC_ARB_DRAM_TIMING_2, mc_arb_dram_timing); + WREG32(MC_ARB_DRAM_TIMING2_2, mc_arb_dram_timing2); + WREG32_P(MC_ARB_BURST_TIME, STATE2(burst_time), ~STATE2_MASK); + break; + case MC_CG_ARB_FREQ_F3: + WREG32(MC_ARB_DRAM_TIMING_3, mc_arb_dram_timing); + WREG32(MC_ARB_DRAM_TIMING2_3, mc_arb_dram_timing2); + WREG32_P(MC_ARB_BURST_TIME, STATE3(burst_time), ~STATE3_MASK); + break; + default: + return -EINVAL; + } + + mc_cg_config = RREG32(MC_CG_CONFIG) | 0x0000000F; + WREG32(MC_CG_CONFIG, mc_cg_config); + WREG32_P(MC_ARB_CG, CG_ARB_REQ(arb_freq_dest), ~CG_ARB_REQ_MASK); + + return 0; +} + +static void ni_update_current_ps(struct amdgpu_device *adev, + struct amdgpu_ps *rps) +{ + struct si_ps *new_ps = si_get_ps(rps); + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); + struct ni_power_info *ni_pi = ni_get_pi(adev); + + eg_pi->current_rps = *rps; + ni_pi->current_ps = *new_ps; + eg_pi->current_rps.ps_priv = &ni_pi->current_ps; + adev->pm.dpm.current_ps = &eg_pi->current_rps; +} + +static void ni_update_requested_ps(struct amdgpu_device *adev, + struct amdgpu_ps *rps) +{ + struct si_ps *new_ps = si_get_ps(rps); + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); + struct ni_power_info *ni_pi = ni_get_pi(adev); + + eg_pi->requested_rps = *rps; + ni_pi->requested_ps = *new_ps; + eg_pi->requested_rps.ps_priv = &ni_pi->requested_ps; + adev->pm.dpm.requested_ps = &eg_pi->requested_rps; +} + +static void ni_set_uvd_clock_before_set_eng_clock(struct amdgpu_device *adev, + struct amdgpu_ps *new_ps, + struct amdgpu_ps *old_ps) +{ + struct si_ps *new_state = si_get_ps(new_ps); + struct si_ps *current_state = si_get_ps(old_ps); + + if ((new_ps->vclk == old_ps->vclk) && + (new_ps->dclk == old_ps->dclk)) + return; + + if (new_state->performance_levels[new_state->performance_level_count - 1].sclk >= + current_state->performance_levels[current_state->performance_level_count - 1].sclk) + return; + + amdgpu_asic_set_uvd_clocks(adev, new_ps->vclk, new_ps->dclk); +} + +static void ni_set_uvd_clock_after_set_eng_clock(struct amdgpu_device *adev, + struct amdgpu_ps *new_ps, + struct amdgpu_ps *old_ps) +{ + struct si_ps *new_state = si_get_ps(new_ps); + struct si_ps *current_state = si_get_ps(old_ps); + + if ((new_ps->vclk == old_ps->vclk) && + (new_ps->dclk == old_ps->dclk)) + return; + + if (new_state->performance_levels[new_state->performance_level_count - 1].sclk < + current_state->performance_levels[current_state->performance_level_count - 1].sclk) + return; + + amdgpu_asic_set_uvd_clocks(adev, new_ps->vclk, new_ps->dclk); +} + +static u16 btc_find_voltage(struct atom_voltage_table *table, u16 voltage) +{ + unsigned int i; + + for (i = 0; i < table->count; i++) + if (voltage <= table->entries[i].value) + return table->entries[i].value; + + return table->entries[table->count - 1].value; +} + +static u32 btc_find_valid_clock(struct amdgpu_clock_array *clocks, + u32 max_clock, u32 requested_clock) +{ + unsigned int i; + + if ((clocks == NULL) || (clocks->count == 0)) + return (requested_clock < max_clock) ? requested_clock : max_clock; + + for (i = 0; i < clocks->count; i++) { + if (clocks->values[i] >= requested_clock) + return (clocks->values[i] < max_clock) ? clocks->values[i] : max_clock; + } + + return (clocks->values[clocks->count - 1] < max_clock) ? + clocks->values[clocks->count - 1] : max_clock; +} + +static u32 btc_get_valid_mclk(struct amdgpu_device *adev, + u32 max_mclk, u32 requested_mclk) +{ + return btc_find_valid_clock(&adev->pm.dpm.dyn_state.valid_mclk_values, + max_mclk, requested_mclk); +} + +static u32 btc_get_valid_sclk(struct amdgpu_device *adev, + u32 max_sclk, u32 requested_sclk) +{ + return btc_find_valid_clock(&adev->pm.dpm.dyn_state.valid_sclk_values, + max_sclk, requested_sclk); +} + +static void btc_get_max_clock_from_voltage_dependency_table(struct amdgpu_clock_voltage_dependency_table *table, + u32 *max_clock) +{ + u32 i, clock = 0; + + if ((table == NULL) || (table->count == 0)) { + *max_clock = clock; + return; + } + + for (i = 0; i < table->count; i++) { + if (clock < table->entries[i].clk) + clock = table->entries[i].clk; + } + *max_clock = clock; +} + +static void btc_apply_voltage_dependency_rules(struct amdgpu_clock_voltage_dependency_table *table, + u32 clock, u16 max_voltage, u16 *voltage) +{ + u32 i; + + if ((table == NULL) || (table->count == 0)) + return; + + for (i= 0; i < table->count; i++) { + if (clock <= table->entries[i].clk) { + if (*voltage < table->entries[i].v) + *voltage = (u16)((table->entries[i].v < max_voltage) ? + table->entries[i].v : max_voltage); + return; + } + } + + *voltage = (*voltage > max_voltage) ? *voltage : max_voltage; +} + +static void btc_adjust_clock_combinations(struct amdgpu_device *adev, + const struct amdgpu_clock_and_voltage_limits *max_limits, + struct rv7xx_pl *pl) +{ + + if ((pl->mclk == 0) || (pl->sclk == 0)) + return; + + if (pl->mclk == pl->sclk) + return; + + if (pl->mclk > pl->sclk) { + if (((pl->mclk + (pl->sclk - 1)) / pl->sclk) > adev->pm.dpm.dyn_state.mclk_sclk_ratio) + pl->sclk = btc_get_valid_sclk(adev, + max_limits->sclk, + (pl->mclk + + (adev->pm.dpm.dyn_state.mclk_sclk_ratio - 1)) / + adev->pm.dpm.dyn_state.mclk_sclk_ratio); + } else { + if ((pl->sclk - pl->mclk) > adev->pm.dpm.dyn_state.sclk_mclk_delta) + pl->mclk = btc_get_valid_mclk(adev, + max_limits->mclk, + pl->sclk - + adev->pm.dpm.dyn_state.sclk_mclk_delta); + } +} + +static void btc_apply_voltage_delta_rules(struct amdgpu_device *adev, + u16 max_vddc, u16 max_vddci, + u16 *vddc, u16 *vddci) +{ + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); + u16 new_voltage; + + if ((0 == *vddc) || (0 == *vddci)) + return; + + if (*vddc > *vddci) { + if ((*vddc - *vddci) > adev->pm.dpm.dyn_state.vddc_vddci_delta) { + new_voltage = btc_find_voltage(&eg_pi->vddci_voltage_table, + (*vddc - adev->pm.dpm.dyn_state.vddc_vddci_delta)); + *vddci = (new_voltage < max_vddci) ? new_voltage : max_vddci; + } + } else { + if ((*vddci - *vddc) > adev->pm.dpm.dyn_state.vddc_vddci_delta) { + new_voltage = btc_find_voltage(&eg_pi->vddc_voltage_table, + (*vddci - adev->pm.dpm.dyn_state.vddc_vddci_delta)); + *vddc = (new_voltage < max_vddc) ? new_voltage : max_vddc; + } + } +} + +static void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b, + u32 *p, u32 *u) +{ + u32 b_c = 0; + u32 i_c; + u32 tmp; + + i_c = (i * r_c) / 100; + tmp = i_c >> p_b; + + while (tmp) { + b_c++; + tmp >>= 1; + } + + *u = (b_c + 1) / 2; + *p = i_c / (1 << (2 * (*u))); +} + +static int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th) +{ + u32 k, a, ah, al; + u32 t1; + + if ((fl == 0) || (fh == 0) || (fl > fh)) + return -EINVAL; + + k = (100 * fh) / fl; + t1 = (t * (k - 100)); + a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100)); + a = (a + 5) / 10; + ah = ((a * t) + 5000) / 10000; + al = a - ah; + + *th = t - ah; + *tl = t + al; + + return 0; +} + +static bool r600_is_uvd_state(u32 class, u32 class2) +{ + if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) + return true; + if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) + return true; + if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) + return true; + if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) + return true; + if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) + return true; + return false; +} + +static u8 rv770_get_memory_module_index(struct amdgpu_device *adev) +{ + return (u8) ((RREG32(BIOS_SCRATCH_4) >> 16) & 0xff); +} + +static void rv770_get_max_vddc(struct amdgpu_device *adev) +{ + struct rv7xx_power_info *pi = rv770_get_pi(adev); + u16 vddc; + + if (amdgpu_atombios_get_max_vddc(adev, 0, 0, &vddc)) + pi->max_vddc = 0; + else + pi->max_vddc = vddc; +} + +static void rv770_get_engine_memory_ss(struct amdgpu_device *adev) +{ + struct rv7xx_power_info *pi = rv770_get_pi(adev); + struct amdgpu_atom_ss ss; + + pi->sclk_ss = amdgpu_atombios_get_asic_ss_info(adev, &ss, + ASIC_INTERNAL_ENGINE_SS, 0); + pi->mclk_ss = amdgpu_atombios_get_asic_ss_info(adev, &ss, + ASIC_INTERNAL_MEMORY_SS, 0); + + if (pi->sclk_ss || pi->mclk_ss) + pi->dynamic_ss = true; + else + pi->dynamic_ss = false; +} + + +static void si_apply_state_adjust_rules(struct amdgpu_device *adev, + struct amdgpu_ps *rps) +{ + struct si_ps *ps = si_get_ps(rps); + struct amdgpu_clock_and_voltage_limits *max_limits; + bool disable_mclk_switching = false; + bool disable_sclk_switching = false; + u32 mclk, sclk; + u16 vddc, vddci, min_vce_voltage = 0; + u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; + u32 max_sclk = 0, max_mclk = 0; + int i; + + if (adev->asic_type == CHIP_HAINAN) { + if ((adev->pdev->revision == 0x81) || + (adev->pdev->revision == 0xC3) || + (adev->pdev->device == 0x6664) || + (adev->pdev->device == 0x6665) || + (adev->pdev->device == 0x6667)) { + max_sclk = 75000; + } + if ((adev->pdev->revision == 0xC3) || + (adev->pdev->device == 0x6665)) { + max_sclk = 60000; + max_mclk = 80000; + } + } else if (adev->asic_type == CHIP_OLAND) { + if ((adev->pdev->revision == 0xC7) || + (adev->pdev->revision == 0x80) || + (adev->pdev->revision == 0x81) || + (adev->pdev->revision == 0x83) || + (adev->pdev->revision == 0x87) || + (adev->pdev->device == 0x6604) || + (adev->pdev->device == 0x6605)) { + max_sclk = 75000; + } + } + + if (rps->vce_active) { + rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk; + rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk; + si_get_vce_clock_voltage(adev, rps->evclk, rps->ecclk, + &min_vce_voltage); + } else { + rps->evclk = 0; + rps->ecclk = 0; + } + + if ((adev->pm.dpm.new_active_crtc_count > 1) || + si_dpm_vblank_too_short(adev)) + disable_mclk_switching = true; + + if (rps->vclk || rps->dclk) { + disable_mclk_switching = true; + disable_sclk_switching = true; + } + + if (adev->pm.ac_power) + max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; + else + max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc; + + for (i = ps->performance_level_count - 2; i >= 0; i--) { + if (ps->performance_levels[i].vddc > ps->performance_levels[i+1].vddc) + ps->performance_levels[i].vddc = ps->performance_levels[i+1].vddc; + } + if (adev->pm.ac_power == false) { + for (i = 0; i < ps->performance_level_count; i++) { + if (ps->performance_levels[i].mclk > max_limits->mclk) + ps->performance_levels[i].mclk = max_limits->mclk; + if (ps->performance_levels[i].sclk > max_limits->sclk) + ps->performance_levels[i].sclk = max_limits->sclk; + if (ps->performance_levels[i].vddc > max_limits->vddc) + ps->performance_levels[i].vddc = max_limits->vddc; + if (ps->performance_levels[i].vddci > max_limits->vddci) + ps->performance_levels[i].vddci = max_limits->vddci; + } + } + + /* limit clocks to max supported clocks based on voltage dependency tables */ + btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk, + &max_sclk_vddc); + btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk, + &max_mclk_vddci); + btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk, + &max_mclk_vddc); + + for (i = 0; i < ps->performance_level_count; i++) { + if (max_sclk_vddc) { + if (ps->performance_levels[i].sclk > max_sclk_vddc) + ps->performance_levels[i].sclk = max_sclk_vddc; + } + if (max_mclk_vddci) { + if (ps->performance_levels[i].mclk > max_mclk_vddci) + ps->performance_levels[i].mclk = max_mclk_vddci; + } + if (max_mclk_vddc) { + if (ps->performance_levels[i].mclk > max_mclk_vddc) + ps->performance_levels[i].mclk = max_mclk_vddc; + } + if (max_mclk) { + if (ps->performance_levels[i].mclk > max_mclk) + ps->performance_levels[i].mclk = max_mclk; + } + if (max_sclk) { + if (ps->performance_levels[i].sclk > max_sclk) + ps->performance_levels[i].sclk = max_sclk; + } + } + + /* XXX validate the min clocks required for display */ + + if (disable_mclk_switching) { + mclk = ps->performance_levels[ps->performance_level_count - 1].mclk; + vddci = ps->performance_levels[ps->performance_level_count - 1].vddci; + } else { + mclk = ps->performance_levels[0].mclk; + vddci = ps->performance_levels[0].vddci; + } + + if (disable_sclk_switching) { + sclk = ps->performance_levels[ps->performance_level_count - 1].sclk; + vddc = ps->performance_levels[ps->performance_level_count - 1].vddc; + } else { + sclk = ps->performance_levels[0].sclk; + vddc = ps->performance_levels[0].vddc; + } + + if (rps->vce_active) { + if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk) + sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk; + if (mclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk) + mclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk; + } + + /* adjusted low state */ + ps->performance_levels[0].sclk = sclk; + ps->performance_levels[0].mclk = mclk; + ps->performance_levels[0].vddc = vddc; + ps->performance_levels[0].vddci = vddci; + + if (disable_sclk_switching) { + sclk = ps->performance_levels[0].sclk; + for (i = 1; i < ps->performance_level_count; i++) { + if (sclk < ps->performance_levels[i].sclk) + sclk = ps->performance_levels[i].sclk; + } + for (i = 0; i < ps->performance_level_count; i++) { + ps->performance_levels[i].sclk = sclk; + ps->performance_levels[i].vddc = vddc; + } + } else { + for (i = 1; i < ps->performance_level_count; i++) { + if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk) + ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk; + if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc) + ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc; + } + } + + if (disable_mclk_switching) { + mclk = ps->performance_levels[0].mclk; + for (i = 1; i < ps->performance_level_count; i++) { + if (mclk < ps->performance_levels[i].mclk) + mclk = ps->performance_levels[i].mclk; + } + for (i = 0; i < ps->performance_level_count; i++) { + ps->performance_levels[i].mclk = mclk; + ps->performance_levels[i].vddci = vddci; + } + } else { + for (i = 1; i < ps->performance_level_count; i++) { + if (ps->performance_levels[i].mclk < ps->performance_levels[i - 1].mclk) + ps->performance_levels[i].mclk = ps->performance_levels[i - 1].mclk; + if (ps->performance_levels[i].vddci < ps->performance_levels[i - 1].vddci) + ps->performance_levels[i].vddci = ps->performance_levels[i - 1].vddci; + } + } + + for (i = 0; i < ps->performance_level_count; i++) + btc_adjust_clock_combinations(adev, max_limits, + &ps->performance_levels[i]); + + for (i = 0; i < ps->performance_level_count; i++) { + if (ps->performance_levels[i].vddc < min_vce_voltage) + ps->performance_levels[i].vddc = min_vce_voltage; + btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk, + ps->performance_levels[i].sclk, + max_limits->vddc, &ps->performance_levels[i].vddc); + btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk, + ps->performance_levels[i].mclk, + max_limits->vddci, &ps->performance_levels[i].vddci); + btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk, + ps->performance_levels[i].mclk, + max_limits->vddc, &ps->performance_levels[i].vddc); + btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk, + adev->clock.current_dispclk, + max_limits->vddc, &ps->performance_levels[i].vddc); + } + + for (i = 0; i < ps->performance_level_count; i++) { + btc_apply_voltage_delta_rules(adev, + max_limits->vddc, max_limits->vddci, + &ps->performance_levels[i].vddc, + &ps->performance_levels[i].vddci); + } + + ps->dc_compatible = true; + for (i = 0; i < ps->performance_level_count; i++) { + if (ps->performance_levels[i].vddc > adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc) + ps->dc_compatible = false; + } +} + +#if 0 +static int si_read_smc_soft_register(struct amdgpu_device *adev, + u16 reg_offset, u32 *value) +{ + struct si_power_info *si_pi = si_get_pi(adev); + + return amdgpu_si_read_smc_sram_dword(adev, + si_pi->soft_regs_start + reg_offset, value, + si_pi->sram_end); +} +#endif + +static int si_write_smc_soft_register(struct amdgpu_device *adev, + u16 reg_offset, u32 value) +{ + struct si_power_info *si_pi = si_get_pi(adev); + + return amdgpu_si_write_smc_sram_dword(adev, + si_pi->soft_regs_start + reg_offset, + value, si_pi->sram_end); +} + +static bool si_is_special_1gb_platform(struct amdgpu_device *adev) +{ + bool ret = false; + u32 tmp, width, row, column, bank, density; + bool is_memory_gddr5, is_special; + + tmp = RREG32(MC_SEQ_MISC0); + is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE == ((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT)); + is_special = (MC_SEQ_MISC0_REV_ID_VALUE == ((tmp & MC_SEQ_MISC0_REV_ID_MASK) >> MC_SEQ_MISC0_REV_ID_SHIFT)) + & (MC_SEQ_MISC0_VEN_ID_VALUE == ((tmp & MC_SEQ_MISC0_VEN_ID_MASK) >> MC_SEQ_MISC0_VEN_ID_SHIFT)); + + WREG32(MC_SEQ_IO_DEBUG_INDEX, 0xb); + width = ((RREG32(MC_SEQ_IO_DEBUG_DATA) >> 1) & 1) ? 16 : 32; + + tmp = RREG32(MC_ARB_RAMCFG); + row = ((tmp & NOOFROWS_MASK) >> NOOFROWS_SHIFT) + 10; + column = ((tmp & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) + 8; + bank = ((tmp & NOOFBANK_MASK) >> NOOFBANK_SHIFT) + 2; + + density = (1 << (row + column - 20 + bank)) * width; + + if ((adev->pdev->device == 0x6819) && + is_memory_gddr5 && is_special && (density == 0x400)) + ret = true; + + return ret; +} + +static void si_get_leakage_vddc(struct amdgpu_device *adev) +{ + struct si_power_info *si_pi = si_get_pi(adev); + u16 vddc, count = 0; + int i, ret; + + for (i = 0; i < SISLANDS_MAX_LEAKAGE_COUNT; i++) { + ret = amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(adev, &vddc, SISLANDS_LEAKAGE_INDEX0 + i); + + if (!ret && (vddc > 0) && (vddc != (SISLANDS_LEAKAGE_INDEX0 + i))) { + si_pi->leakage_voltage.entries[count].voltage = vddc; + si_pi->leakage_voltage.entries[count].leakage_index = + SISLANDS_LEAKAGE_INDEX0 + i; + count++; + } + } + si_pi->leakage_voltage.count = count; +} + +static int si_get_leakage_voltage_from_leakage_index(struct amdgpu_device *adev, + u32 index, u16 *leakage_voltage) +{ + struct si_power_info *si_pi = si_get_pi(adev); + int i; + + if (leakage_voltage == NULL) + return -EINVAL; + + if ((index & 0xff00) != 0xff00) + return -EINVAL; + + if ((index & 0xff) > SISLANDS_MAX_LEAKAGE_COUNT + 1) + return -EINVAL; + + if (index < SISLANDS_LEAKAGE_INDEX0) + return -EINVAL; + + for (i = 0; i < si_pi->leakage_voltage.count; i++) { + if (si_pi->leakage_voltage.entries[i].leakage_index == index) { + *leakage_voltage = si_pi->leakage_voltage.entries[i].voltage; + return 0; + } + } + return -EAGAIN; +} + +static void si_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources) +{ + struct rv7xx_power_info *pi = rv770_get_pi(adev); + bool want_thermal_protection; + enum si_dpm_event_src dpm_event_src; + + switch (sources) { + case 0: + default: + want_thermal_protection = false; + break; + case (1 << SI_DPM_AUTO_THROTTLE_SRC_THERMAL): + want_thermal_protection = true; + dpm_event_src = SI_DPM_EVENT_SRC_DIGITAL; + break; + case (1 << SI_DPM_AUTO_THROTTLE_SRC_EXTERNAL): + want_thermal_protection = true; + dpm_event_src = SI_DPM_EVENT_SRC_EXTERNAL; + break; + case ((1 << SI_DPM_AUTO_THROTTLE_SRC_EXTERNAL) | + (1 << SI_DPM_AUTO_THROTTLE_SRC_THERMAL)): + want_thermal_protection = true; + dpm_event_src = SI_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL; + break; + } + + if (want_thermal_protection) { + WREG32_P(CG_THERMAL_CTRL, DPM_EVENT_SRC(dpm_event_src), ~DPM_EVENT_SRC_MASK); + if (pi->thermal_protection) + WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS); + } else { + WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS); + } +} + +static void si_enable_auto_throttle_source(struct amdgpu_device *adev, + enum si_dpm_auto_throttle_src source, + bool enable) +{ + struct rv7xx_power_info *pi = rv770_get_pi(adev); + + if (enable) { + if (!(pi->active_auto_throttle_sources & (1 << source))) { + pi->active_auto_throttle_sources |= 1 << source; + si_set_dpm_event_sources(adev, pi->active_auto_throttle_sources); + } + } else { + if (pi->active_auto_throttle_sources & (1 << source)) { + pi->active_auto_throttle_sources &= ~(1 << source); + si_set_dpm_event_sources(adev, pi->active_auto_throttle_sources); + } + } +} + +static void si_start_dpm(struct amdgpu_device *adev) +{ + WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN); +} + +static void si_stop_dpm(struct amdgpu_device *adev) +{ + WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN); +} + +static void si_enable_sclk_control(struct amdgpu_device *adev, bool enable) +{ + if (enable) + WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF); + else + WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF); + +} + +#if 0 +static int si_notify_hardware_of_thermal_state(struct amdgpu_device *adev, + u32 thermal_level) +{ + PPSMC_Result ret; + + if (thermal_level == 0) { + ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt); + if (ret == PPSMC_Result_OK) + return 0; + else + return -EINVAL; + } + return 0; +} + +static void si_notify_hardware_vpu_recovery_event(struct amdgpu_device *adev) +{ + si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen, true); +} +#endif + +#if 0 +static int si_notify_hw_of_powersource(struct amdgpu_device *adev, bool ac_power) +{ + if (ac_power) + return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_RunningOnAC) == PPSMC_Result_OK) ? + 0 : -EINVAL; + + return 0; +} +#endif + +static PPSMC_Result si_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, + PPSMC_Msg msg, u32 parameter) +{ + WREG32(SMC_SCRATCH0, parameter); + return amdgpu_si_send_msg_to_smc(adev, msg); +} + +static int si_restrict_performance_levels_before_switch(struct amdgpu_device *adev) +{ + if (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_NoForcedLevel) != PPSMC_Result_OK) + return -EINVAL; + + return (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, 1) == PPSMC_Result_OK) ? + 0 : -EINVAL; +} + +static int si_dpm_force_performance_level(void *handle, + enum amd_dpm_forced_level level) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_ps *rps = adev->pm.dpm.current_ps; + struct si_ps *ps = si_get_ps(rps); + u32 levels = ps->performance_level_count; + + if (level == AMD_DPM_FORCED_LEVEL_HIGH) { + if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK) + return -EINVAL; + + if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 1) != PPSMC_Result_OK) + return -EINVAL; + } else if (level == AMD_DPM_FORCED_LEVEL_LOW) { + if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) + return -EINVAL; + + if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, 1) != PPSMC_Result_OK) + return -EINVAL; + } else if (level == AMD_DPM_FORCED_LEVEL_AUTO) { + if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) + return -EINVAL; + + if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK) + return -EINVAL; + } + + adev->pm.dpm.forced_level = level; + + return 0; +} + +#if 0 +static int si_set_boot_state(struct amdgpu_device *adev) +{ + return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_SwitchToInitialState) == PPSMC_Result_OK) ? + 0 : -EINVAL; +} +#endif + +static int si_set_powergating_by_smu(void *handle, + uint32_t block_type, + bool gate) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + switch (block_type) { + case AMD_IP_BLOCK_TYPE_UVD: + if (!gate) { + adev->pm.dpm.uvd_active = true; + adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; + } else { + adev->pm.dpm.uvd_active = false; + } + + amdgpu_legacy_dpm_compute_clocks(handle); + break; + case AMD_IP_BLOCK_TYPE_VCE: + if (!gate) { + adev->pm.dpm.vce_active = true; + /* XXX select vce level based on ring/task */ + adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL; + } else { + adev->pm.dpm.vce_active = false; + } + + amdgpu_legacy_dpm_compute_clocks(handle); + break; + default: + break; + } + return 0; +} + +static int si_set_sw_state(struct amdgpu_device *adev) +{ + return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_SwitchToSwState) == PPSMC_Result_OK) ? + 0 : -EINVAL; +} + +static int si_halt_smc(struct amdgpu_device *adev) +{ + if (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_Halt) != PPSMC_Result_OK) + return -EINVAL; + + return (amdgpu_si_wait_for_smc_inactive(adev) == PPSMC_Result_OK) ? + 0 : -EINVAL; +} + +static int si_resume_smc(struct amdgpu_device *adev) +{ + if (amdgpu_si_send_msg_to_smc(adev, PPSMC_FlushDataCache) != PPSMC_Result_OK) + return -EINVAL; + + return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_Resume) == PPSMC_Result_OK) ? + 0 : -EINVAL; +} + +static void si_dpm_start_smc(struct amdgpu_device *adev) +{ + amdgpu_si_program_jump_on_start(adev); + amdgpu_si_start_smc(adev); + amdgpu_si_smc_clock(adev, true); +} + +static void si_dpm_stop_smc(struct amdgpu_device *adev) +{ + amdgpu_si_reset_smc(adev); + amdgpu_si_smc_clock(adev, false); +} + +static int si_process_firmware_header(struct amdgpu_device *adev) +{ + struct si_power_info *si_pi = si_get_pi(adev); + u32 tmp; + int ret; + + ret = amdgpu_si_read_smc_sram_dword(adev, + SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + + SISLANDS_SMC_FIRMWARE_HEADER_stateTable, + &tmp, si_pi->sram_end); + if (ret) + return ret; + + si_pi->state_table_start = tmp; + + ret = amdgpu_si_read_smc_sram_dword(adev, + SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + + SISLANDS_SMC_FIRMWARE_HEADER_softRegisters, + &tmp, si_pi->sram_end); + if (ret) + return ret; + + si_pi->soft_regs_start = tmp; + + ret = amdgpu_si_read_smc_sram_dword(adev, + SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + + SISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable, + &tmp, si_pi->sram_end); + if (ret) + return ret; + + si_pi->mc_reg_table_start = tmp; + + ret = amdgpu_si_read_smc_sram_dword(adev, + SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + + SISLANDS_SMC_FIRMWARE_HEADER_fanTable, + &tmp, si_pi->sram_end); + if (ret) + return ret; + + si_pi->fan_table_start = tmp; + + ret = amdgpu_si_read_smc_sram_dword(adev, + SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + + SISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable, + &tmp, si_pi->sram_end); + if (ret) + return ret; + + si_pi->arb_table_start = tmp; + + ret = amdgpu_si_read_smc_sram_dword(adev, + SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + + SISLANDS_SMC_FIRMWARE_HEADER_CacConfigTable, + &tmp, si_pi->sram_end); + if (ret) + return ret; + + si_pi->cac_table_start = tmp; + + ret = amdgpu_si_read_smc_sram_dword(adev, + SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + + SISLANDS_SMC_FIRMWARE_HEADER_DteConfiguration, + &tmp, si_pi->sram_end); + if (ret) + return ret; + + si_pi->dte_table_start = tmp; + + ret = amdgpu_si_read_smc_sram_dword(adev, + SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + + SISLANDS_SMC_FIRMWARE_HEADER_spllTable, + &tmp, si_pi->sram_end); + if (ret) + return ret; + + si_pi->spll_table_start = tmp; + + ret = amdgpu_si_read_smc_sram_dword(adev, + SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + + SISLANDS_SMC_FIRMWARE_HEADER_PAPMParameters, + &tmp, si_pi->sram_end); + if (ret) + return ret; + + si_pi->papm_cfg_table_start = tmp; + + return ret; +} + +static void si_read_clock_registers(struct amdgpu_device *adev) +{ + struct si_power_info *si_pi = si_get_pi(adev); + + si_pi->clock_registers.cg_spll_func_cntl = RREG32(CG_SPLL_FUNC_CNTL); + si_pi->clock_registers.cg_spll_func_cntl_2 = RREG32(CG_SPLL_FUNC_CNTL_2); + si_pi->clock_registers.cg_spll_func_cntl_3 = RREG32(CG_SPLL_FUNC_CNTL_3); + si_pi->clock_registers.cg_spll_func_cntl_4 = RREG32(CG_SPLL_FUNC_CNTL_4); + si_pi->clock_registers.cg_spll_spread_spectrum = RREG32(CG_SPLL_SPREAD_SPECTRUM); + si_pi->clock_registers.cg_spll_spread_spectrum_2 = RREG32(CG_SPLL_SPREAD_SPECTRUM_2); + si_pi->clock_registers.dll_cntl = RREG32(DLL_CNTL); + si_pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL); + si_pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL); + si_pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL); + si_pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL); + si_pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1); + si_pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2); + si_pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1); + si_pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2); +} + +static void si_enable_thermal_protection(struct amdgpu_device *adev, + bool enable) +{ + if (enable) + WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS); + else + WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS); +} + +static void si_enable_acpi_power_management(struct amdgpu_device *adev) +{ + WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN); +} + +#if 0 +static int si_enter_ulp_state(struct amdgpu_device *adev) +{ + WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower); + + udelay(25000); + + return 0; +} + +static int si_exit_ulp_state(struct amdgpu_device *adev) +{ + int i; + + WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower); + + udelay(7000); + + for (i = 0; i < adev->usec_timeout; i++) { + if (RREG32(SMC_RESP_0) == 1) + break; + udelay(1000); + } + + return 0; +} +#endif + +static int si_notify_smc_display_change(struct amdgpu_device *adev, + bool has_display) +{ + PPSMC_Msg msg = has_display ? + PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay; + + return (amdgpu_si_send_msg_to_smc(adev, msg) == PPSMC_Result_OK) ? + 0 : -EINVAL; +} + +static void si_program_response_times(struct amdgpu_device *adev) +{ + u32 voltage_response_time, acpi_delay_time, vbi_time_out; + u32 vddc_dly, acpi_dly, vbi_dly; + u32 reference_clock; + + si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mvdd_chg_time, 1); + + voltage_response_time = (u32)adev->pm.dpm.voltage_response_time; + + if (voltage_response_time == 0) + voltage_response_time = 1000; + + acpi_delay_time = 15000; + vbi_time_out = 100000; + + reference_clock = amdgpu_asic_get_xclk(adev); + + vddc_dly = (voltage_response_time * reference_clock) / 100; + acpi_dly = (acpi_delay_time * reference_clock) / 100; + vbi_dly = (vbi_time_out * reference_clock) / 100; + + si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_delay_vreg, vddc_dly); + si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_delay_acpi, acpi_dly); + si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mclk_chg_timeout, vbi_dly); + si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mc_block_delay, 0xAA); +} + +static void si_program_ds_registers(struct amdgpu_device *adev) +{ + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); + u32 tmp; + + /* DEEP_SLEEP_CLK_SEL field should be 0x10 on tahiti A0 */ + if (adev->asic_type == CHIP_TAHITI && adev->rev_id == 0x0) + tmp = 0x10; + else + tmp = 0x1; + + if (eg_pi->sclk_deep_sleep) { + WREG32_P(MISC_CLK_CNTL, DEEP_SLEEP_CLK_SEL(tmp), ~DEEP_SLEEP_CLK_SEL_MASK); + WREG32_P(CG_SPLL_AUTOSCALE_CNTL, AUTOSCALE_ON_SS_CLEAR, + ~AUTOSCALE_ON_SS_CLEAR); + } +} + +static void si_program_display_gap(struct amdgpu_device *adev) +{ + u32 tmp, pipe; + int i; + + tmp = RREG32(CG_DISPLAY_GAP_CNTL) & ~(DISP1_GAP_MASK | DISP2_GAP_MASK); + if (adev->pm.dpm.new_active_crtc_count > 0) + tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM); + else + tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE); + + if (adev->pm.dpm.new_active_crtc_count > 1) + tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM); + else + tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE); + + WREG32(CG_DISPLAY_GAP_CNTL, tmp); + + tmp = RREG32(DCCG_DISP_SLOW_SELECT_REG); + pipe = (tmp & DCCG_DISP1_SLOW_SELECT_MASK) >> DCCG_DISP1_SLOW_SELECT_SHIFT; + + if ((adev->pm.dpm.new_active_crtc_count > 0) && + (!(adev->pm.dpm.new_active_crtcs & (1 << pipe)))) { + /* find the first active crtc */ + for (i = 0; i < adev->mode_info.num_crtc; i++) { + if (adev->pm.dpm.new_active_crtcs & (1 << i)) + break; + } + if (i == adev->mode_info.num_crtc) + pipe = 0; + else + pipe = i; + + tmp &= ~DCCG_DISP1_SLOW_SELECT_MASK; + tmp |= DCCG_DISP1_SLOW_SELECT(pipe); + WREG32(DCCG_DISP_SLOW_SELECT_REG, tmp); + } + + /* Setting this to false forces the performance state to low if the crtcs are disabled. + * This can be a problem on PowerXpress systems or if you want to use the card + * for offscreen rendering or compute if there are no crtcs enabled. + */ + si_notify_smc_display_change(adev, adev->pm.dpm.new_active_crtc_count > 0); +} + +static void si_enable_spread_spectrum(struct amdgpu_device *adev, bool enable) +{ + struct rv7xx_power_info *pi = rv770_get_pi(adev); + + if (enable) { + if (pi->sclk_ss) + WREG32_P(GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, ~DYN_SPREAD_SPECTRUM_EN); + } else { + WREG32_P(CG_SPLL_SPREAD_SPECTRUM, 0, ~SSEN); + WREG32_P(GENERAL_PWRMGT, 0, ~DYN_SPREAD_SPECTRUM_EN); + } +} + +static void si_setup_bsp(struct amdgpu_device *adev) +{ + struct rv7xx_power_info *pi = rv770_get_pi(adev); + u32 xclk = amdgpu_asic_get_xclk(adev); + + r600_calculate_u_and_p(pi->asi, + xclk, + 16, + &pi->bsp, + &pi->bsu); + + r600_calculate_u_and_p(pi->pasi, + xclk, + 16, + &pi->pbsp, + &pi->pbsu); + + + pi->dsp = BSP(pi->bsp) | BSU(pi->bsu); + pi->psp = BSP(pi->pbsp) | BSU(pi->pbsu); + + WREG32(CG_BSP, pi->dsp); +} + +static void si_program_git(struct amdgpu_device *adev) +{ + WREG32_P(CG_GIT, CG_GICST(R600_GICST_DFLT), ~CG_GICST_MASK); +} + +static void si_program_tp(struct amdgpu_device *adev) +{ + int i; + enum r600_td td = R600_TD_DFLT; + + for (i = 0; i < R600_PM_NUMBER_OF_TC; i++) + WREG32(CG_FFCT_0 + i, (UTC_0(r600_utc[i]) | DTC_0(r600_dtc[i]))); + + if (td == R600_TD_AUTO) + WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL); + else + WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL); + + if (td == R600_TD_UP) + WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE); + + if (td == R600_TD_DOWN) + WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE); +} + +static void si_program_tpp(struct amdgpu_device *adev) +{ + WREG32(CG_TPC, R600_TPC_DFLT); +} + +static void si_program_sstp(struct amdgpu_device *adev) +{ + WREG32(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT))); +} + +static void si_enable_display_gap(struct amdgpu_device *adev) +{ + u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL); + + tmp &= ~(DISP1_GAP_MASK | DISP2_GAP_MASK); + tmp |= (DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE) | + DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE)); + + tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK); + tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK) | + DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE)); + WREG32(CG_DISPLAY_GAP_CNTL, tmp); +} + +static void si_program_vc(struct amdgpu_device *adev) +{ + struct rv7xx_power_info *pi = rv770_get_pi(adev); + + WREG32(CG_FTV, pi->vrc); +} + +static void si_clear_vc(struct amdgpu_device *adev) +{ + WREG32(CG_FTV, 0); +} + +static u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock) +{ + u8 mc_para_index; + + if (memory_clock < 10000) + mc_para_index = 0; + else if (memory_clock >= 80000) + mc_para_index = 0x0f; + else + mc_para_index = (u8)((memory_clock - 10000) / 5000 + 1); + return mc_para_index; +} + +static u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode) +{ + u8 mc_para_index; + + if (strobe_mode) { + if (memory_clock < 12500) + mc_para_index = 0x00; + else if (memory_clock > 47500) + mc_para_index = 0x0f; + else + mc_para_index = (u8)((memory_clock - 10000) / 2500); + } else { + if (memory_clock < 65000) + mc_para_index = 0x00; + else if (memory_clock > 135000) + mc_para_index = 0x0f; + else + mc_para_index = (u8)((memory_clock - 60000) / 5000); + } + return mc_para_index; +} + +static u8 si_get_strobe_mode_settings(struct amdgpu_device *adev, u32 mclk) +{ + struct rv7xx_power_info *pi = rv770_get_pi(adev); + bool strobe_mode = false; + u8 result = 0; + + if (mclk <= pi->mclk_strobe_mode_threshold) + strobe_mode = true; + + if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) + result = si_get_mclk_frequency_ratio(mclk, strobe_mode); + else + result = si_get_ddr3_mclk_frequency_ratio(mclk); + + if (strobe_mode) + result |= SISLANDS_SMC_STROBE_ENABLE; + + return result; +} + +static int si_upload_firmware(struct amdgpu_device *adev) +{ + struct si_power_info *si_pi = si_get_pi(adev); + + amdgpu_si_reset_smc(adev); + amdgpu_si_smc_clock(adev, false); + + return amdgpu_si_load_smc_ucode(adev, si_pi->sram_end); +} + +static bool si_validate_phase_shedding_tables(struct amdgpu_device *adev, + const struct atom_voltage_table *table, + const struct amdgpu_phase_shedding_limits_table *limits) +{ + u32 data, num_bits, num_levels; + + if ((table == NULL) || (limits == NULL)) + return false; + + data = table->mask_low; + + num_bits = hweight32(data); + + if (num_bits == 0) + return false; + + num_levels = (1 << num_bits); + + if (table->count != num_levels) + return false; + + if (limits->count != (num_levels - 1)) + return false; + + return true; +} + +static void si_trim_voltage_table_to_fit_state_table(struct amdgpu_device *adev, + u32 max_voltage_steps, + struct atom_voltage_table *voltage_table) +{ + unsigned int i, diff; + + if (voltage_table->count <= max_voltage_steps) + return; + + diff = voltage_table->count - max_voltage_steps; + + for (i= 0; i < max_voltage_steps; i++) + voltage_table->entries[i] = voltage_table->entries[i + diff]; + + voltage_table->count = max_voltage_steps; +} + +static int si_get_svi2_voltage_table(struct amdgpu_device *adev, + struct amdgpu_clock_voltage_dependency_table *voltage_dependency_table, + struct atom_voltage_table *voltage_table) +{ + u32 i; + + if (voltage_dependency_table == NULL) + return -EINVAL; + + voltage_table->mask_low = 0; + voltage_table->phase_delay = 0; + + voltage_table->count = voltage_dependency_table->count; + for (i = 0; i < voltage_table->count; i++) { + voltage_table->entries[i].value = voltage_dependency_table->entries[i].v; + voltage_table->entries[i].smio_low = 0; + } + + return 0; +} + +static int si_construct_voltage_tables(struct amdgpu_device *adev) +{ + struct rv7xx_power_info *pi = rv770_get_pi(adev); + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); + struct si_power_info *si_pi = si_get_pi(adev); + int ret; + + if (pi->voltage_control) { + ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC, + VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddc_voltage_table); + if (ret) + return ret; + + if (eg_pi->vddc_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS) + si_trim_voltage_table_to_fit_state_table(adev, + SISLANDS_MAX_NO_VREG_STEPS, + &eg_pi->vddc_voltage_table); + } else if (si_pi->voltage_control_svi2) { + ret = si_get_svi2_voltage_table(adev, + &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk, + &eg_pi->vddc_voltage_table); + if (ret) + return ret; + } else { + return -EINVAL; + } + + if (eg_pi->vddci_control) { + ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDCI, + VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddci_voltage_table); + if (ret) + return ret; + + if (eg_pi->vddci_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS) + si_trim_voltage_table_to_fit_state_table(adev, + SISLANDS_MAX_NO_VREG_STEPS, + &eg_pi->vddci_voltage_table); + } + if (si_pi->vddci_control_svi2) { + ret = si_get_svi2_voltage_table(adev, + &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk, + &eg_pi->vddci_voltage_table); + if (ret) + return ret; + } + + if (pi->mvdd_control) { + ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_MVDDC, + VOLTAGE_OBJ_GPIO_LUT, &si_pi->mvdd_voltage_table); + + if (ret) { + pi->mvdd_control = false; + return ret; + } + + if (si_pi->mvdd_voltage_table.count == 0) { + pi->mvdd_control = false; + return -EINVAL; + } + + if (si_pi->mvdd_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS) + si_trim_voltage_table_to_fit_state_table(adev, + SISLANDS_MAX_NO_VREG_STEPS, + &si_pi->mvdd_voltage_table); + } + + if (si_pi->vddc_phase_shed_control) { + ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC, + VOLTAGE_OBJ_PHASE_LUT, &si_pi->vddc_phase_shed_table); + if (ret) + si_pi->vddc_phase_shed_control = false; + + if ((si_pi->vddc_phase_shed_table.count == 0) || + (si_pi->vddc_phase_shed_table.count > SISLANDS_MAX_NO_VREG_STEPS)) + si_pi->vddc_phase_shed_control = false; + } + + return 0; +} + +static void si_populate_smc_voltage_table(struct amdgpu_device *adev, + const struct atom_voltage_table *voltage_table, + SISLANDS_SMC_STATETABLE *table) +{ + unsigned int i; + + for (i = 0; i < voltage_table->count; i++) + table->lowSMIO[i] |= cpu_to_be32(voltage_table->entries[i].smio_low); +} + +static int si_populate_smc_voltage_tables(struct amdgpu_device *adev, + SISLANDS_SMC_STATETABLE *table) +{ + struct rv7xx_power_info *pi = rv770_get_pi(adev); + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); + struct si_power_info *si_pi = si_get_pi(adev); + u8 i; + + if (si_pi->voltage_control_svi2) { + si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc, + si_pi->svc_gpio_id); + si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd, + si_pi->svd_gpio_id); + si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_plat_type, + 2); + } else { + if (eg_pi->vddc_voltage_table.count) { + si_populate_smc_voltage_table(adev, &eg_pi->vddc_voltage_table, table); + table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] = + cpu_to_be32(eg_pi->vddc_voltage_table.mask_low); + + for (i = 0; i < eg_pi->vddc_voltage_table.count; i++) { + if (pi->max_vddc_in_table <= eg_pi->vddc_voltage_table.entries[i].value) { + table->maxVDDCIndexInPPTable = i; + break; + } + } + } + + if (eg_pi->vddci_voltage_table.count) { + si_populate_smc_voltage_table(adev, &eg_pi->vddci_voltage_table, table); + + table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDCI] = + cpu_to_be32(eg_pi->vddci_voltage_table.mask_low); + } + + + if (si_pi->mvdd_voltage_table.count) { + si_populate_smc_voltage_table(adev, &si_pi->mvdd_voltage_table, table); + + table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_MVDD] = + cpu_to_be32(si_pi->mvdd_voltage_table.mask_low); + } + + if (si_pi->vddc_phase_shed_control) { + if (si_validate_phase_shedding_tables(adev, &si_pi->vddc_phase_shed_table, + &adev->pm.dpm.dyn_state.phase_shedding_limits_table)) { + si_populate_smc_voltage_table(adev, &si_pi->vddc_phase_shed_table, table); + + table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING] = + cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low); + + si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_phase_shedding_delay, + (u32)si_pi->vddc_phase_shed_table.phase_delay); + } else { + si_pi->vddc_phase_shed_control = false; + } + } + } + + return 0; +} + +static int si_populate_voltage_value(struct amdgpu_device *adev, + const struct atom_voltage_table *table, + u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage) +{ + unsigned int i; + + for (i = 0; i < table->count; i++) { + if (value <= table->entries[i].value) { + voltage->index = (u8)i; + voltage->value = cpu_to_be16(table->entries[i].value); + break; + } + } + + if (i >= table->count) + return -EINVAL; + + return 0; +} + +static int si_populate_mvdd_value(struct amdgpu_device *adev, u32 mclk, + SISLANDS_SMC_VOLTAGE_VALUE *voltage) +{ + struct rv7xx_power_info *pi = rv770_get_pi(adev); + struct si_power_info *si_pi = si_get_pi(adev); + + if (pi->mvdd_control) { + if (mclk <= pi->mvdd_split_frequency) + voltage->index = 0; + else + voltage->index = (u8)(si_pi->mvdd_voltage_table.count) - 1; + + voltage->value = cpu_to_be16(si_pi->mvdd_voltage_table.entries[voltage->index].value); + } + return 0; +} + +static int si_get_std_voltage_value(struct amdgpu_device *adev, + SISLANDS_SMC_VOLTAGE_VALUE *voltage, + u16 *std_voltage) +{ + u16 v_index; + bool voltage_found = false; + *std_voltage = be16_to_cpu(voltage->value); + + if (adev->pm.dpm.dyn_state.cac_leakage_table.entries) { + if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_NEW_CAC_VOLTAGE) { + if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL) + return -EINVAL; + + for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) { + if (be16_to_cpu(voltage->value) == + (u16)adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) { + voltage_found = true; + if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count) + *std_voltage = + adev->pm.dpm.dyn_state.cac_leakage_table.entries[v_index].vddc; + else + *std_voltage = + adev->pm.dpm.dyn_state.cac_leakage_table.entries[adev->pm.dpm.dyn_state.cac_leakage_table.count-1].vddc; + break; + } + } + + if (!voltage_found) { + for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) { + if (be16_to_cpu(voltage->value) <= + (u16)adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) { + voltage_found = true; + if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count) + *std_voltage = + adev->pm.dpm.dyn_state.cac_leakage_table.entries[v_index].vddc; + else + *std_voltage = + adev->pm.dpm.dyn_state.cac_leakage_table.entries[adev->pm.dpm.dyn_state.cac_leakage_table.count-1].vddc; + break; + } + } + } + } else { + if ((u32)voltage->index < adev->pm.dpm.dyn_state.cac_leakage_table.count) + *std_voltage = adev->pm.dpm.dyn_state.cac_leakage_table.entries[voltage->index].vddc; + } + } + + return 0; +} + +static int si_populate_std_voltage_value(struct amdgpu_device *adev, + u16 value, u8 index, + SISLANDS_SMC_VOLTAGE_VALUE *voltage) +{ + voltage->index = index; + voltage->value = cpu_to_be16(value); + + return 0; +} + +static int si_populate_phase_shedding_value(struct amdgpu_device *adev, + const struct amdgpu_phase_shedding_limits_table *limits, + u16 voltage, u32 sclk, u32 mclk, + SISLANDS_SMC_VOLTAGE_VALUE *smc_voltage) +{ + unsigned int i; + + for (i = 0; i < limits->count; i++) { + if ((voltage <= limits->entries[i].voltage) && + (sclk <= limits->entries[i].sclk) && + (mclk <= limits->entries[i].mclk)) + break; + } + + smc_voltage->phase_settings = (u8)i; + + return 0; +} + +static int si_init_arb_table_index(struct amdgpu_device *adev) +{ + struct si_power_info *si_pi = si_get_pi(adev); + u32 tmp; + int ret; + + ret = amdgpu_si_read_smc_sram_dword(adev, si_pi->arb_table_start, + &tmp, si_pi->sram_end); + if (ret) + return ret; + + tmp &= 0x00FFFFFF; + tmp |= MC_CG_ARB_FREQ_F1 << 24; + + return amdgpu_si_write_smc_sram_dword(adev, si_pi->arb_table_start, + tmp, si_pi->sram_end); +} + +static int si_initial_switch_from_arb_f0_to_f1(struct amdgpu_device *adev) +{ + return ni_copy_and_switch_arb_sets(adev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1); +} + +static int si_reset_to_default(struct amdgpu_device *adev) +{ + return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ? + 0 : -EINVAL; +} + +static int si_force_switch_to_arb_f0(struct amdgpu_device *adev) +{ + struct si_power_info *si_pi = si_get_pi(adev); + u32 tmp; + int ret; + + ret = amdgpu_si_read_smc_sram_dword(adev, si_pi->arb_table_start, + &tmp, si_pi->sram_end); + if (ret) + return ret; + + tmp = (tmp >> 24) & 0xff; + + if (tmp == MC_CG_ARB_FREQ_F0) + return 0; + + return ni_copy_and_switch_arb_sets(adev, tmp, MC_CG_ARB_FREQ_F0); +} + +static u32 si_calculate_memory_refresh_rate(struct amdgpu_device *adev, + u32 engine_clock) +{ + u32 dram_rows; + u32 dram_refresh_rate; + u32 mc_arb_rfsh_rate; + u32 tmp = (RREG32(MC_ARB_RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT; + + if (tmp >= 4) + dram_rows = 16384; + else + dram_rows = 1 << (tmp + 10); + + dram_refresh_rate = 1 << ((RREG32(MC_SEQ_MISC0) & 0x3) + 3); + mc_arb_rfsh_rate = ((engine_clock * 10) * dram_refresh_rate / dram_rows - 32) / 64; + + return mc_arb_rfsh_rate; +} + +static int si_populate_memory_timing_parameters(struct amdgpu_device *adev, + struct rv7xx_pl *pl, + SMC_SIslands_MCArbDramTimingRegisterSet *arb_regs) +{ + u32 dram_timing; + u32 dram_timing2; + u32 burst_time; + + arb_regs->mc_arb_rfsh_rate = + (u8)si_calculate_memory_refresh_rate(adev, pl->sclk); + + amdgpu_atombios_set_engine_dram_timings(adev, + pl->sclk, + pl->mclk); + + dram_timing = RREG32(MC_ARB_DRAM_TIMING); + dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2); + burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK; + + arb_regs->mc_arb_dram_timing = cpu_to_be32(dram_timing); + arb_regs->mc_arb_dram_timing2 = cpu_to_be32(dram_timing2); + arb_regs->mc_arb_burst_time = (u8)burst_time; + + return 0; +} + +static int si_do_program_memory_timing_parameters(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_state, + unsigned int first_arb_set) +{ + struct si_power_info *si_pi = si_get_pi(adev); + struct si_ps *state = si_get_ps(amdgpu_state); + SMC_SIslands_MCArbDramTimingRegisterSet arb_regs = { 0 }; + int i, ret = 0; + + for (i = 0; i < state->performance_level_count; i++) { + ret = si_populate_memory_timing_parameters(adev, &state->performance_levels[i], &arb_regs); + if (ret) + break; + ret = amdgpu_si_copy_bytes_to_smc(adev, + si_pi->arb_table_start + + offsetof(SMC_SIslands_MCArbDramTimingRegisters, data) + + sizeof(SMC_SIslands_MCArbDramTimingRegisterSet) * (first_arb_set + i), + (u8 *)&arb_regs, + sizeof(SMC_SIslands_MCArbDramTimingRegisterSet), + si_pi->sram_end); + if (ret) + break; + } + + return ret; +} + +static int si_program_memory_timing_parameters(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_new_state) +{ + return si_do_program_memory_timing_parameters(adev, amdgpu_new_state, + SISLANDS_DRIVER_STATE_ARB_INDEX); +} + +static int si_populate_initial_mvdd_value(struct amdgpu_device *adev, + struct SISLANDS_SMC_VOLTAGE_VALUE *voltage) +{ + struct rv7xx_power_info *pi = rv770_get_pi(adev); + struct si_power_info *si_pi = si_get_pi(adev); + + if (pi->mvdd_control) + return si_populate_voltage_value(adev, &si_pi->mvdd_voltage_table, + si_pi->mvdd_bootup_value, voltage); + + return 0; +} + +static int si_populate_smc_initial_state(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_initial_state, + SISLANDS_SMC_STATETABLE *table) +{ + struct si_ps *initial_state = si_get_ps(amdgpu_initial_state); + struct rv7xx_power_info *pi = rv770_get_pi(adev); + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); + struct si_power_info *si_pi = si_get_pi(adev); + u32 reg; + int ret; + + table->initialState.level.mclk.vDLL_CNTL = + cpu_to_be32(si_pi->clock_registers.dll_cntl); + table->initialState.level.mclk.vMCLK_PWRMGT_CNTL = + cpu_to_be32(si_pi->clock_registers.mclk_pwrmgt_cntl); + table->initialState.level.mclk.vMPLL_AD_FUNC_CNTL = + cpu_to_be32(si_pi->clock_registers.mpll_ad_func_cntl); + table->initialState.level.mclk.vMPLL_DQ_FUNC_CNTL = + cpu_to_be32(si_pi->clock_registers.mpll_dq_func_cntl); + table->initialState.level.mclk.vMPLL_FUNC_CNTL = + cpu_to_be32(si_pi->clock_registers.mpll_func_cntl); + table->initialState.level.mclk.vMPLL_FUNC_CNTL_1 = + cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_1); + table->initialState.level.mclk.vMPLL_FUNC_CNTL_2 = + cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_2); + table->initialState.level.mclk.vMPLL_SS = + cpu_to_be32(si_pi->clock_registers.mpll_ss1); + table->initialState.level.mclk.vMPLL_SS2 = + cpu_to_be32(si_pi->clock_registers.mpll_ss2); + + table->initialState.level.mclk.mclk_value = + cpu_to_be32(initial_state->performance_levels[0].mclk); + + table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL = + cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl); + table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_2 = + cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_2); + table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_3 = + cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_3); + table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_4 = + cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_4); + table->initialState.level.sclk.vCG_SPLL_SPREAD_SPECTRUM = + cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum); + table->initialState.level.sclk.vCG_SPLL_SPREAD_SPECTRUM_2 = + cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum_2); + + table->initialState.level.sclk.sclk_value = + cpu_to_be32(initial_state->performance_levels[0].sclk); + + table->initialState.level.arbRefreshState = + SISLANDS_INITIAL_STATE_ARB_INDEX; + + table->initialState.level.ACIndex = 0; + + ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table, + initial_state->performance_levels[0].vddc, + &table->initialState.level.vddc); + + if (!ret) { + u16 std_vddc; + + ret = si_get_std_voltage_value(adev, + &table->initialState.level.vddc, + &std_vddc); + if (!ret) + si_populate_std_voltage_value(adev, std_vddc, + table->initialState.level.vddc.index, + &table->initialState.level.std_vddc); + } + + if (eg_pi->vddci_control) + si_populate_voltage_value(adev, + &eg_pi->vddci_voltage_table, + initial_state->performance_levels[0].vddci, + &table->initialState.level.vddci); + + if (si_pi->vddc_phase_shed_control) + si_populate_phase_shedding_value(adev, + &adev->pm.dpm.dyn_state.phase_shedding_limits_table, + initial_state->performance_levels[0].vddc, + initial_state->performance_levels[0].sclk, + initial_state->performance_levels[0].mclk, + &table->initialState.level.vddc); + + si_populate_initial_mvdd_value(adev, &table->initialState.level.mvdd); + + reg = CG_R(0xffff) | CG_L(0); + table->initialState.level.aT = cpu_to_be32(reg); + table->initialState.level.bSP = cpu_to_be32(pi->dsp); + table->initialState.level.gen2PCIE = (u8)si_pi->boot_pcie_gen; + + if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) { + table->initialState.level.strobeMode = + si_get_strobe_mode_settings(adev, + initial_state->performance_levels[0].mclk); + + if (initial_state->performance_levels[0].mclk > pi->mclk_edc_enable_threshold) + table->initialState.level.mcFlags = SISLANDS_SMC_MC_EDC_RD_FLAG | SISLANDS_SMC_MC_EDC_WR_FLAG; + else + table->initialState.level.mcFlags = 0; + } + + table->initialState.levelCount = 1; + + table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC; + + table->initialState.level.dpm2.MaxPS = 0; + table->initialState.level.dpm2.NearTDPDec = 0; + table->initialState.level.dpm2.AboveSafeInc = 0; + table->initialState.level.dpm2.BelowSafeInc = 0; + table->initialState.level.dpm2.PwrEfficiencyRatio = 0; + + reg = MIN_POWER_MASK | MAX_POWER_MASK; + table->initialState.level.SQPowerThrottle = cpu_to_be32(reg); + + reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK; + table->initialState.level.SQPowerThrottle_2 = cpu_to_be32(reg); + + return 0; +} + +static enum si_pcie_gen si_gen_pcie_gen_support(struct amdgpu_device *adev, + u32 sys_mask, + enum si_pcie_gen asic_gen, + enum si_pcie_gen default_gen) +{ + switch (asic_gen) { + case SI_PCIE_GEN1: + return SI_PCIE_GEN1; + case SI_PCIE_GEN2: + return SI_PCIE_GEN2; + case SI_PCIE_GEN3: + return SI_PCIE_GEN3; + default: + if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) && + (default_gen == SI_PCIE_GEN3)) + return SI_PCIE_GEN3; + else if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) && + (default_gen == SI_PCIE_GEN2)) + return SI_PCIE_GEN2; + else + return SI_PCIE_GEN1; + } + return SI_PCIE_GEN1; +} + +static int si_populate_smc_acpi_state(struct amdgpu_device *adev, + SISLANDS_SMC_STATETABLE *table) +{ + struct rv7xx_power_info *pi = rv770_get_pi(adev); + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); + struct si_power_info *si_pi = si_get_pi(adev); + u32 spll_func_cntl = si_pi->clock_registers.cg_spll_func_cntl; + u32 spll_func_cntl_2 = si_pi->clock_registers.cg_spll_func_cntl_2; + u32 spll_func_cntl_3 = si_pi->clock_registers.cg_spll_func_cntl_3; + u32 spll_func_cntl_4 = si_pi->clock_registers.cg_spll_func_cntl_4; + u32 dll_cntl = si_pi->clock_registers.dll_cntl; + u32 mclk_pwrmgt_cntl = si_pi->clock_registers.mclk_pwrmgt_cntl; + u32 mpll_ad_func_cntl = si_pi->clock_registers.mpll_ad_func_cntl; + u32 mpll_dq_func_cntl = si_pi->clock_registers.mpll_dq_func_cntl; + u32 mpll_func_cntl = si_pi->clock_registers.mpll_func_cntl; + u32 mpll_func_cntl_1 = si_pi->clock_registers.mpll_func_cntl_1; + u32 mpll_func_cntl_2 = si_pi->clock_registers.mpll_func_cntl_2; + u32 reg; + int ret; + + table->ACPIState = table->initialState; + + table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC; + + if (pi->acpi_vddc) { + ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table, + pi->acpi_vddc, &table->ACPIState.level.vddc); + if (!ret) { + u16 std_vddc; + + ret = si_get_std_voltage_value(adev, + &table->ACPIState.level.vddc, &std_vddc); + if (!ret) + si_populate_std_voltage_value(adev, std_vddc, + table->ACPIState.level.vddc.index, + &table->ACPIState.level.std_vddc); + } + table->ACPIState.level.gen2PCIE = si_pi->acpi_pcie_gen; + + if (si_pi->vddc_phase_shed_control) { + si_populate_phase_shedding_value(adev, + &adev->pm.dpm.dyn_state.phase_shedding_limits_table, + pi->acpi_vddc, + 0, + 0, + &table->ACPIState.level.vddc); + } + } else { + ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table, + pi->min_vddc_in_table, &table->ACPIState.level.vddc); + if (!ret) { + u16 std_vddc; + + ret = si_get_std_voltage_value(adev, + &table->ACPIState.level.vddc, &std_vddc); + + if (!ret) + si_populate_std_voltage_value(adev, std_vddc, + table->ACPIState.level.vddc.index, + &table->ACPIState.level.std_vddc); + } + table->ACPIState.level.gen2PCIE = + (u8)si_gen_pcie_gen_support(adev, + si_pi->sys_pcie_mask, + si_pi->boot_pcie_gen, + SI_PCIE_GEN1); + + if (si_pi->vddc_phase_shed_control) + si_populate_phase_shedding_value(adev, + &adev->pm.dpm.dyn_state.phase_shedding_limits_table, + pi->min_vddc_in_table, + 0, + 0, + &table->ACPIState.level.vddc); + } + + if (pi->acpi_vddc) { + if (eg_pi->acpi_vddci) + si_populate_voltage_value(adev, &eg_pi->vddci_voltage_table, + eg_pi->acpi_vddci, + &table->ACPIState.level.vddci); + } + + mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET; + mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB); + + dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS); + + spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK; + spll_func_cntl_2 |= SCLK_MUX_SEL(4); + + table->ACPIState.level.mclk.vDLL_CNTL = + cpu_to_be32(dll_cntl); + table->ACPIState.level.mclk.vMCLK_PWRMGT_CNTL = + cpu_to_be32(mclk_pwrmgt_cntl); + table->ACPIState.level.mclk.vMPLL_AD_FUNC_CNTL = + cpu_to_be32(mpll_ad_func_cntl); + table->ACPIState.level.mclk.vMPLL_DQ_FUNC_CNTL = + cpu_to_be32(mpll_dq_func_cntl); + table->ACPIState.level.mclk.vMPLL_FUNC_CNTL = + cpu_to_be32(mpll_func_cntl); + table->ACPIState.level.mclk.vMPLL_FUNC_CNTL_1 = + cpu_to_be32(mpll_func_cntl_1); + table->ACPIState.level.mclk.vMPLL_FUNC_CNTL_2 = + cpu_to_be32(mpll_func_cntl_2); + table->ACPIState.level.mclk.vMPLL_SS = + cpu_to_be32(si_pi->clock_registers.mpll_ss1); + table->ACPIState.level.mclk.vMPLL_SS2 = + cpu_to_be32(si_pi->clock_registers.mpll_ss2); + + table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL = + cpu_to_be32(spll_func_cntl); + table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_2 = + cpu_to_be32(spll_func_cntl_2); + table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_3 = + cpu_to_be32(spll_func_cntl_3); + table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_4 = + cpu_to_be32(spll_func_cntl_4); + + table->ACPIState.level.mclk.mclk_value = 0; + table->ACPIState.level.sclk.sclk_value = 0; + + si_populate_mvdd_value(adev, 0, &table->ACPIState.level.mvdd); + + if (eg_pi->dynamic_ac_timing) + table->ACPIState.level.ACIndex = 0; + + table->ACPIState.level.dpm2.MaxPS = 0; + table->ACPIState.level.dpm2.NearTDPDec = 0; + table->ACPIState.level.dpm2.AboveSafeInc = 0; + table->ACPIState.level.dpm2.BelowSafeInc = 0; + table->ACPIState.level.dpm2.PwrEfficiencyRatio = 0; + + reg = MIN_POWER_MASK | MAX_POWER_MASK; + table->ACPIState.level.SQPowerThrottle = cpu_to_be32(reg); + + reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK; + table->ACPIState.level.SQPowerThrottle_2 = cpu_to_be32(reg); + + return 0; +} + +static int si_populate_ulv_state(struct amdgpu_device *adev, + struct SISLANDS_SMC_SWSTATE_SINGLE *state) +{ + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); + struct si_power_info *si_pi = si_get_pi(adev); + struct si_ulv_param *ulv = &si_pi->ulv; + u32 sclk_in_sr = 1350; /* ??? */ + int ret; + + ret = si_convert_power_level_to_smc(adev, &ulv->pl, + &state->level); + if (!ret) { + if (eg_pi->sclk_deep_sleep) { + if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ) + state->level.stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS; + else + state->level.stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE; + } + if (ulv->one_pcie_lane_in_ulv) + state->flags |= PPSMC_SWSTATE_FLAG_PCIE_X1; + state->level.arbRefreshState = (u8)(SISLANDS_ULV_STATE_ARB_INDEX); + state->level.ACIndex = 1; + state->level.std_vddc = state->level.vddc; + state->levelCount = 1; + + state->flags |= PPSMC_SWSTATE_FLAG_DC; + } + + return ret; +} + +static int si_program_ulv_memory_timing_parameters(struct amdgpu_device *adev) +{ + struct si_power_info *si_pi = si_get_pi(adev); + struct si_ulv_param *ulv = &si_pi->ulv; + SMC_SIslands_MCArbDramTimingRegisterSet arb_regs = { 0 }; + int ret; + + ret = si_populate_memory_timing_parameters(adev, &ulv->pl, + &arb_regs); + if (ret) + return ret; + + si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_ulv_volt_change_delay, + ulv->volt_change_delay); + + ret = amdgpu_si_copy_bytes_to_smc(adev, + si_pi->arb_table_start + + offsetof(SMC_SIslands_MCArbDramTimingRegisters, data) + + sizeof(SMC_SIslands_MCArbDramTimingRegisterSet) * SISLANDS_ULV_STATE_ARB_INDEX, + (u8 *)&arb_regs, + sizeof(SMC_SIslands_MCArbDramTimingRegisterSet), + si_pi->sram_end); + + return ret; +} + +static void si_get_mvdd_configuration(struct amdgpu_device *adev) +{ + struct rv7xx_power_info *pi = rv770_get_pi(adev); + + pi->mvdd_split_frequency = 30000; +} + +static int si_init_smc_table(struct amdgpu_device *adev) +{ + struct si_power_info *si_pi = si_get_pi(adev); + struct amdgpu_ps *amdgpu_boot_state = adev->pm.dpm.boot_ps; + const struct si_ulv_param *ulv = &si_pi->ulv; + SISLANDS_SMC_STATETABLE *table = &si_pi->smc_statetable; + int ret; + u32 lane_width; + u32 vr_hot_gpio; + + si_populate_smc_voltage_tables(adev, table); + + switch (adev->pm.int_thermal_type) { + case THERMAL_TYPE_SI: + case THERMAL_TYPE_EMC2103_WITH_INTERNAL: + table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL; + break; + case THERMAL_TYPE_NONE: + table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE; + break; + default: + table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL; + break; + } + + if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC) + table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; + + if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT) { + if ((adev->pdev->device != 0x6818) && (adev->pdev->device != 0x6819)) + table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT; + } + + if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC) + table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; + + if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) + table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5; + + if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY) + table->extraFlags |= PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH; + + if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE) { + table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT_PROG_GPIO; + vr_hot_gpio = adev->pm.dpm.backbias_response_time; + si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_vr_hot_gpio, + vr_hot_gpio); + } + + ret = si_populate_smc_initial_state(adev, amdgpu_boot_state, table); + if (ret) + return ret; + + ret = si_populate_smc_acpi_state(adev, table); + if (ret) + return ret; + + table->driverState.flags = table->initialState.flags; + table->driverState.levelCount = table->initialState.levelCount; + table->driverState.levels[0] = table->initialState.level; + + ret = si_do_program_memory_timing_parameters(adev, amdgpu_boot_state, + SISLANDS_INITIAL_STATE_ARB_INDEX); + if (ret) + return ret; + + if (ulv->supported && ulv->pl.vddc) { + ret = si_populate_ulv_state(adev, &table->ULVState); + if (ret) + return ret; + + ret = si_program_ulv_memory_timing_parameters(adev); + if (ret) + return ret; + + WREG32(CG_ULV_CONTROL, ulv->cg_ulv_control); + WREG32(CG_ULV_PARAMETER, ulv->cg_ulv_parameter); + + lane_width = amdgpu_get_pcie_lanes(adev); + si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width); + } else { + table->ULVState = table->initialState; + } + + return amdgpu_si_copy_bytes_to_smc(adev, si_pi->state_table_start, + (u8 *)table, sizeof(SISLANDS_SMC_STATETABLE), + si_pi->sram_end); +} + +static int si_calculate_sclk_params(struct amdgpu_device *adev, + u32 engine_clock, + SISLANDS_SMC_SCLK_VALUE *sclk) +{ + struct rv7xx_power_info *pi = rv770_get_pi(adev); + struct si_power_info *si_pi = si_get_pi(adev); + struct atom_clock_dividers dividers; + u32 spll_func_cntl = si_pi->clock_registers.cg_spll_func_cntl; + u32 spll_func_cntl_2 = si_pi->clock_registers.cg_spll_func_cntl_2; + u32 spll_func_cntl_3 = si_pi->clock_registers.cg_spll_func_cntl_3; + u32 spll_func_cntl_4 = si_pi->clock_registers.cg_spll_func_cntl_4; + u32 cg_spll_spread_spectrum = si_pi->clock_registers.cg_spll_spread_spectrum; + u32 cg_spll_spread_spectrum_2 = si_pi->clock_registers.cg_spll_spread_spectrum_2; + u64 tmp; + u32 reference_clock = adev->clock.spll.reference_freq; + u32 reference_divider; + u32 fbdiv; + int ret; + + ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, + engine_clock, false, ÷rs); + if (ret) + return ret; + + reference_divider = 1 + dividers.ref_div; + + tmp = (u64) engine_clock * reference_divider * dividers.post_div * 16384; + do_div(tmp, reference_clock); + fbdiv = (u32) tmp; + + spll_func_cntl &= ~(SPLL_PDIV_A_MASK | SPLL_REF_DIV_MASK); + spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div); + spll_func_cntl |= SPLL_PDIV_A(dividers.post_div); + + spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK; + spll_func_cntl_2 |= SCLK_MUX_SEL(2); + + spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK; + spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv); + spll_func_cntl_3 |= SPLL_DITHEN; + + if (pi->sclk_ss) { + struct amdgpu_atom_ss ss; + u32 vco_freq = engine_clock * dividers.post_div; + + if (amdgpu_atombios_get_asic_ss_info(adev, &ss, + ASIC_INTERNAL_ENGINE_SS, vco_freq)) { + u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate); + u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000); + + cg_spll_spread_spectrum &= ~CLK_S_MASK; + cg_spll_spread_spectrum |= CLK_S(clk_s); + cg_spll_spread_spectrum |= SSEN; + + cg_spll_spread_spectrum_2 &= ~CLK_V_MASK; + cg_spll_spread_spectrum_2 |= CLK_V(clk_v); + } + } + + sclk->sclk_value = engine_clock; + sclk->vCG_SPLL_FUNC_CNTL = spll_func_cntl; + sclk->vCG_SPLL_FUNC_CNTL_2 = spll_func_cntl_2; + sclk->vCG_SPLL_FUNC_CNTL_3 = spll_func_cntl_3; + sclk->vCG_SPLL_FUNC_CNTL_4 = spll_func_cntl_4; + sclk->vCG_SPLL_SPREAD_SPECTRUM = cg_spll_spread_spectrum; + sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cg_spll_spread_spectrum_2; + + return 0; +} + +static int si_populate_sclk_value(struct amdgpu_device *adev, + u32 engine_clock, + SISLANDS_SMC_SCLK_VALUE *sclk) +{ + SISLANDS_SMC_SCLK_VALUE sclk_tmp; + int ret; + + ret = si_calculate_sclk_params(adev, engine_clock, &sclk_tmp); + if (!ret) { + sclk->sclk_value = cpu_to_be32(sclk_tmp.sclk_value); + sclk->vCG_SPLL_FUNC_CNTL = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL); + sclk->vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_2); + sclk->vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_3); + sclk->vCG_SPLL_FUNC_CNTL_4 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_4); + sclk->vCG_SPLL_SPREAD_SPECTRUM = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM); + sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM_2); + } + + return ret; +} + +static int si_populate_mclk_value(struct amdgpu_device *adev, + u32 engine_clock, + u32 memory_clock, + SISLANDS_SMC_MCLK_VALUE *mclk, + bool strobe_mode, + bool dll_state_on) +{ + struct rv7xx_power_info *pi = rv770_get_pi(adev); + struct si_power_info *si_pi = si_get_pi(adev); + u32 dll_cntl = si_pi->clock_registers.dll_cntl; + u32 mclk_pwrmgt_cntl = si_pi->clock_registers.mclk_pwrmgt_cntl; + u32 mpll_ad_func_cntl = si_pi->clock_registers.mpll_ad_func_cntl; + u32 mpll_dq_func_cntl = si_pi->clock_registers.mpll_dq_func_cntl; + u32 mpll_func_cntl = si_pi->clock_registers.mpll_func_cntl; + u32 mpll_func_cntl_1 = si_pi->clock_registers.mpll_func_cntl_1; + u32 mpll_func_cntl_2 = si_pi->clock_registers.mpll_func_cntl_2; + u32 mpll_ss1 = si_pi->clock_registers.mpll_ss1; + u32 mpll_ss2 = si_pi->clock_registers.mpll_ss2; + struct atom_mpll_param mpll_param; + int ret; + + ret = amdgpu_atombios_get_memory_pll_dividers(adev, memory_clock, strobe_mode, &mpll_param); + if (ret) + return ret; + + mpll_func_cntl &= ~BWCTRL_MASK; + mpll_func_cntl |= BWCTRL(mpll_param.bwcntl); + + mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK); + mpll_func_cntl_1 |= CLKF(mpll_param.clkf) | + CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode); + + mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK; + mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div); + + if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) { + mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK); + mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) | + YCLK_POST_DIV(mpll_param.post_div); + } + + if (pi->mclk_ss) { + struct amdgpu_atom_ss ss; + u32 freq_nom; + u32 tmp; + u32 reference_clock = adev->clock.mpll.reference_freq; + + if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) + freq_nom = memory_clock * 4; + else + freq_nom = memory_clock * 2; + + tmp = freq_nom / reference_clock; + tmp = tmp * tmp; + if (amdgpu_atombios_get_asic_ss_info(adev, &ss, + ASIC_INTERNAL_MEMORY_SS, freq_nom)) { + u32 clks = reference_clock * 5 / ss.rate; + u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom); + + mpll_ss1 &= ~CLKV_MASK; + mpll_ss1 |= CLKV(clkv); + + mpll_ss2 &= ~CLKS_MASK; + mpll_ss2 |= CLKS(clks); + } + } + + mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK; + mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed); + + if (dll_state_on) + mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB; + else + mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB); + + mclk->mclk_value = cpu_to_be32(memory_clock); + mclk->vMPLL_FUNC_CNTL = cpu_to_be32(mpll_func_cntl); + mclk->vMPLL_FUNC_CNTL_1 = cpu_to_be32(mpll_func_cntl_1); + mclk->vMPLL_FUNC_CNTL_2 = cpu_to_be32(mpll_func_cntl_2); + mclk->vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl); + mclk->vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl); + mclk->vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl); + mclk->vDLL_CNTL = cpu_to_be32(dll_cntl); + mclk->vMPLL_SS = cpu_to_be32(mpll_ss1); + mclk->vMPLL_SS2 = cpu_to_be32(mpll_ss2); + + return 0; +} + +static void si_populate_smc_sp(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_state, + SISLANDS_SMC_SWSTATE *smc_state) +{ + struct si_ps *ps = si_get_ps(amdgpu_state); + struct rv7xx_power_info *pi = rv770_get_pi(adev); + int i; + + for (i = 0; i < ps->performance_level_count - 1; i++) + smc_state->levels[i].bSP = cpu_to_be32(pi->dsp); + + smc_state->levels[ps->performance_level_count - 1].bSP = + cpu_to_be32(pi->psp); +} + +static int si_convert_power_level_to_smc(struct amdgpu_device *adev, + struct rv7xx_pl *pl, + SISLANDS_SMC_HW_PERFORMANCE_LEVEL *level) +{ + struct rv7xx_power_info *pi = rv770_get_pi(adev); + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); + struct si_power_info *si_pi = si_get_pi(adev); + int ret; + bool dll_state_on; + u16 std_vddc; + bool gmc_pg = false; + + if (eg_pi->pcie_performance_request && + (si_pi->force_pcie_gen != SI_PCIE_GEN_INVALID)) + level->gen2PCIE = (u8)si_pi->force_pcie_gen; + else + level->gen2PCIE = (u8)pl->pcie_gen; + + ret = si_populate_sclk_value(adev, pl->sclk, &level->sclk); + if (ret) + return ret; + + level->mcFlags = 0; + + if (pi->mclk_stutter_mode_threshold && + (pl->mclk <= pi->mclk_stutter_mode_threshold) && + !eg_pi->uvd_enabled && + (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) && + (adev->pm.dpm.new_active_crtc_count <= 2)) { + level->mcFlags |= SISLANDS_SMC_MC_STUTTER_EN; + + if (gmc_pg) + level->mcFlags |= SISLANDS_SMC_MC_PG_EN; + } + + if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) { + if (pl->mclk > pi->mclk_edc_enable_threshold) + level->mcFlags |= SISLANDS_SMC_MC_EDC_RD_FLAG; + + if (pl->mclk > eg_pi->mclk_edc_wr_enable_threshold) + level->mcFlags |= SISLANDS_SMC_MC_EDC_WR_FLAG; + + level->strobeMode = si_get_strobe_mode_settings(adev, pl->mclk); + + if (level->strobeMode & SISLANDS_SMC_STROBE_ENABLE) { + if (si_get_mclk_frequency_ratio(pl->mclk, true) >= + ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf)) + dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false; + else + dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false; + } else { + dll_state_on = false; + } + } else { + level->strobeMode = si_get_strobe_mode_settings(adev, + pl->mclk); + + dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false; + } + + ret = si_populate_mclk_value(adev, + pl->sclk, + pl->mclk, + &level->mclk, + (level->strobeMode & SISLANDS_SMC_STROBE_ENABLE) != 0, dll_state_on); + if (ret) + return ret; + + ret = si_populate_voltage_value(adev, + &eg_pi->vddc_voltage_table, + pl->vddc, &level->vddc); + if (ret) + return ret; + + + ret = si_get_std_voltage_value(adev, &level->vddc, &std_vddc); + if (ret) + return ret; + + ret = si_populate_std_voltage_value(adev, std_vddc, + level->vddc.index, &level->std_vddc); + if (ret) + return ret; + + if (eg_pi->vddci_control) { + ret = si_populate_voltage_value(adev, &eg_pi->vddci_voltage_table, + pl->vddci, &level->vddci); + if (ret) + return ret; + } + + if (si_pi->vddc_phase_shed_control) { + ret = si_populate_phase_shedding_value(adev, + &adev->pm.dpm.dyn_state.phase_shedding_limits_table, + pl->vddc, + pl->sclk, + pl->mclk, + &level->vddc); + if (ret) + return ret; + } + + level->MaxPoweredUpCU = si_pi->max_cu; + + ret = si_populate_mvdd_value(adev, pl->mclk, &level->mvdd); + + return ret; +} + +static int si_populate_smc_t(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_state, + SISLANDS_SMC_SWSTATE *smc_state) +{ + struct rv7xx_power_info *pi = rv770_get_pi(adev); + struct si_ps *state = si_get_ps(amdgpu_state); + u32 a_t; + u32 t_l, t_h; + u32 high_bsp; + int i, ret; + + if (state->performance_level_count >= 9) + return -EINVAL; + + if (state->performance_level_count < 2) { + a_t = CG_R(0xffff) | CG_L(0); + smc_state->levels[0].aT = cpu_to_be32(a_t); + return 0; + } + + smc_state->levels[0].aT = cpu_to_be32(0); + + for (i = 0; i <= state->performance_level_count - 2; i++) { + ret = r600_calculate_at( + (50 / SISLANDS_MAX_HARDWARE_POWERLEVELS) * 100 * (i + 1), + 100 * R600_AH_DFLT, + state->performance_levels[i + 1].sclk, + state->performance_levels[i].sclk, + &t_l, + &t_h); + + if (ret) { + t_h = (i + 1) * 1000 - 50 * R600_AH_DFLT; + t_l = (i + 1) * 1000 + 50 * R600_AH_DFLT; + } + + a_t = be32_to_cpu(smc_state->levels[i].aT) & ~CG_R_MASK; + a_t |= CG_R(t_l * pi->bsp / 20000); + smc_state->levels[i].aT = cpu_to_be32(a_t); + + high_bsp = (i == state->performance_level_count - 2) ? + pi->pbsp : pi->bsp; + a_t = CG_R(0xffff) | CG_L(t_h * high_bsp / 20000); + smc_state->levels[i + 1].aT = cpu_to_be32(a_t); + } + + return 0; +} + +static int si_disable_ulv(struct amdgpu_device *adev) +{ + struct si_power_info *si_pi = si_get_pi(adev); + struct si_ulv_param *ulv = &si_pi->ulv; + + if (ulv->supported) + return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ? + 0 : -EINVAL; + + return 0; +} + +static bool si_is_state_ulv_compatible(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_state) +{ + const struct si_power_info *si_pi = si_get_pi(adev); + const struct si_ulv_param *ulv = &si_pi->ulv; + const struct si_ps *state = si_get_ps(amdgpu_state); + int i; + + if (state->performance_levels[0].mclk != ulv->pl.mclk) + return false; + + /* XXX validate against display requirements! */ + + for (i = 0; i < adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count; i++) { + if (adev->clock.current_dispclk <= + adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].clk) { + if (ulv->pl.vddc < + adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].v) + return false; + } + } + + if ((amdgpu_state->vclk != 0) || (amdgpu_state->dclk != 0)) + return false; + + return true; +} + +static int si_set_power_state_conditionally_enable_ulv(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_new_state) +{ + const struct si_power_info *si_pi = si_get_pi(adev); + const struct si_ulv_param *ulv = &si_pi->ulv; + + if (ulv->supported) { + if (si_is_state_ulv_compatible(adev, amdgpu_new_state)) + return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ? + 0 : -EINVAL; + } + return 0; +} + +static int si_convert_power_state_to_smc(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_state, + SISLANDS_SMC_SWSTATE *smc_state) +{ + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); + struct ni_power_info *ni_pi = ni_get_pi(adev); + struct si_power_info *si_pi = si_get_pi(adev); + struct si_ps *state = si_get_ps(amdgpu_state); + int i, ret; + u32 threshold; + u32 sclk_in_sr = 1350; /* ??? */ + + if (state->performance_level_count > SISLANDS_MAX_HARDWARE_POWERLEVELS) + return -EINVAL; + + threshold = state->performance_levels[state->performance_level_count-1].sclk * 100 / 100; + + if (amdgpu_state->vclk && amdgpu_state->dclk) { + eg_pi->uvd_enabled = true; + if (eg_pi->smu_uvd_hs) + smc_state->flags |= PPSMC_SWSTATE_FLAG_UVD; + } else { + eg_pi->uvd_enabled = false; + } + + if (state->dc_compatible) + smc_state->flags |= PPSMC_SWSTATE_FLAG_DC; + + smc_state->levelCount = 0; + for (i = 0; i < state->performance_level_count; i++) { + if (eg_pi->sclk_deep_sleep) { + if ((i == 0) || si_pi->sclk_deep_sleep_above_low) { + if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ) + smc_state->levels[i].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS; + else + smc_state->levels[i].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE; + } + } + + ret = si_convert_power_level_to_smc(adev, &state->performance_levels[i], + &smc_state->levels[i]); + smc_state->levels[i].arbRefreshState = + (u8)(SISLANDS_DRIVER_STATE_ARB_INDEX + i); + + if (ret) + return ret; + + if (ni_pi->enable_power_containment) + smc_state->levels[i].displayWatermark = + (state->performance_levels[i].sclk < threshold) ? + PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH; + else + smc_state->levels[i].displayWatermark = (i < 2) ? + PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH; + + if (eg_pi->dynamic_ac_timing) + smc_state->levels[i].ACIndex = SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i; + else + smc_state->levels[i].ACIndex = 0; + + smc_state->levelCount++; + } + + si_write_smc_soft_register(adev, + SI_SMC_SOFT_REGISTER_watermark_threshold, + threshold / 512); + + si_populate_smc_sp(adev, amdgpu_state, smc_state); + + ret = si_populate_power_containment_values(adev, amdgpu_state, smc_state); + if (ret) + ni_pi->enable_power_containment = false; + + ret = si_populate_sq_ramping_values(adev, amdgpu_state, smc_state); + if (ret) + ni_pi->enable_sq_ramping = false; + + return si_populate_smc_t(adev, amdgpu_state, smc_state); +} + +static int si_upload_sw_state(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_new_state) +{ + struct si_power_info *si_pi = si_get_pi(adev); + struct si_ps *new_state = si_get_ps(amdgpu_new_state); + int ret; + u32 address = si_pi->state_table_start + + offsetof(SISLANDS_SMC_STATETABLE, driverState); + SISLANDS_SMC_SWSTATE *smc_state = &si_pi->smc_statetable.driverState; + size_t state_size = struct_size(smc_state, levels, + new_state->performance_level_count); + memset(smc_state, 0, state_size); + + ret = si_convert_power_state_to_smc(adev, amdgpu_new_state, smc_state); + if (ret) + return ret; + + return amdgpu_si_copy_bytes_to_smc(adev, address, (u8 *)smc_state, + state_size, si_pi->sram_end); +} + +static int si_upload_ulv_state(struct amdgpu_device *adev) +{ + struct si_power_info *si_pi = si_get_pi(adev); + struct si_ulv_param *ulv = &si_pi->ulv; + int ret = 0; + + if (ulv->supported && ulv->pl.vddc) { + u32 address = si_pi->state_table_start + + offsetof(SISLANDS_SMC_STATETABLE, ULVState); + struct SISLANDS_SMC_SWSTATE_SINGLE *smc_state = &si_pi->smc_statetable.ULVState; + u32 state_size = sizeof(struct SISLANDS_SMC_SWSTATE_SINGLE); + + memset(smc_state, 0, state_size); + + ret = si_populate_ulv_state(adev, smc_state); + if (!ret) + ret = amdgpu_si_copy_bytes_to_smc(adev, address, (u8 *)smc_state, + state_size, si_pi->sram_end); + } + + return ret; +} + +static int si_upload_smc_data(struct amdgpu_device *adev) +{ + struct amdgpu_crtc *amdgpu_crtc = NULL; + int i; + + if (adev->pm.dpm.new_active_crtc_count == 0) + return 0; + + for (i = 0; i < adev->mode_info.num_crtc; i++) { + if (adev->pm.dpm.new_active_crtcs & (1 << i)) { + amdgpu_crtc = adev->mode_info.crtcs[i]; + break; + } + } + + if (amdgpu_crtc == NULL) + return 0; + + if (amdgpu_crtc->line_time <= 0) + return 0; + + if (si_write_smc_soft_register(adev, + SI_SMC_SOFT_REGISTER_crtc_index, + amdgpu_crtc->crtc_id) != PPSMC_Result_OK) + return 0; + + if (si_write_smc_soft_register(adev, + SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min, + amdgpu_crtc->wm_high / amdgpu_crtc->line_time) != PPSMC_Result_OK) + return 0; + + if (si_write_smc_soft_register(adev, + SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max, + amdgpu_crtc->wm_low / amdgpu_crtc->line_time) != PPSMC_Result_OK) + return 0; + + return 0; +} + +static int si_set_mc_special_registers(struct amdgpu_device *adev, + struct si_mc_reg_table *table) +{ + u8 i, j, k; + u32 temp_reg; + + for (i = 0, j = table->last; i < table->last; i++) { + if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) + return -EINVAL; + switch (table->mc_reg_address[i].s1) { + case MC_SEQ_MISC1: + temp_reg = RREG32(MC_PMG_CMD_EMRS); + table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS; + table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP; + for (k = 0; k < table->num_entries; k++) + table->mc_reg_table_entry[k].mc_data[j] = + ((temp_reg & 0xffff0000)) | + ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16); + j++; + + if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) + return -EINVAL; + temp_reg = RREG32(MC_PMG_CMD_MRS); + table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS; + table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP; + for (k = 0; k < table->num_entries; k++) { + table->mc_reg_table_entry[k].mc_data[j] = + (temp_reg & 0xffff0000) | + (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); + if (adev->gmc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) + table->mc_reg_table_entry[k].mc_data[j] |= 0x100; + } + j++; + + if (adev->gmc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) { + if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) + return -EINVAL; + table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD; + table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD; + for (k = 0; k < table->num_entries; k++) + table->mc_reg_table_entry[k].mc_data[j] = + (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16; + j++; + } + break; + case MC_SEQ_RESERVE_M: + temp_reg = RREG32(MC_PMG_CMD_MRS1); + table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1; + table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP; + for(k = 0; k < table->num_entries; k++) + table->mc_reg_table_entry[k].mc_data[j] = + (temp_reg & 0xffff0000) | + (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); + j++; + break; + default: + break; + } + } + + table->last = j; + + return 0; +} + +static bool si_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg) +{ + bool result = true; + switch (in_reg) { + case MC_SEQ_RAS_TIMING: + *out_reg = MC_SEQ_RAS_TIMING_LP; + break; + case MC_SEQ_CAS_TIMING: + *out_reg = MC_SEQ_CAS_TIMING_LP; + break; + case MC_SEQ_MISC_TIMING: + *out_reg = MC_SEQ_MISC_TIMING_LP; + break; + case MC_SEQ_MISC_TIMING2: + *out_reg = MC_SEQ_MISC_TIMING2_LP; + break; + case MC_SEQ_RD_CTL_D0: + *out_reg = MC_SEQ_RD_CTL_D0_LP; + break; + case MC_SEQ_RD_CTL_D1: + *out_reg = MC_SEQ_RD_CTL_D1_LP; + break; + case MC_SEQ_WR_CTL_D0: + *out_reg = MC_SEQ_WR_CTL_D0_LP; + break; + case MC_SEQ_WR_CTL_D1: + *out_reg = MC_SEQ_WR_CTL_D1_LP; + break; + case MC_PMG_CMD_EMRS: + *out_reg = MC_SEQ_PMG_CMD_EMRS_LP; + break; + case MC_PMG_CMD_MRS: + *out_reg = MC_SEQ_PMG_CMD_MRS_LP; + break; + case MC_PMG_CMD_MRS1: + *out_reg = MC_SEQ_PMG_CMD_MRS1_LP; + break; + case MC_SEQ_PMG_TIMING: + *out_reg = MC_SEQ_PMG_TIMING_LP; + break; + case MC_PMG_CMD_MRS2: + *out_reg = MC_SEQ_PMG_CMD_MRS2_LP; + break; + case MC_SEQ_WR_CTL_2: + *out_reg = MC_SEQ_WR_CTL_2_LP; + break; + default: + result = false; + break; + } + + return result; +} + +static void si_set_valid_flag(struct si_mc_reg_table *table) +{ + u8 i, j; + + for (i = 0; i < table->last; i++) { + for (j = 1; j < table->num_entries; j++) { + if (table->mc_reg_table_entry[j-1].mc_data[i] != table->mc_reg_table_entry[j].mc_data[i]) { + table->valid_flag |= 1 << i; + break; + } + } + } +} + +static void si_set_s0_mc_reg_index(struct si_mc_reg_table *table) +{ + u32 i; + u16 address; + + for (i = 0; i < table->last; i++) + table->mc_reg_address[i].s0 = si_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ? + address : table->mc_reg_address[i].s1; + +} + +static int si_copy_vbios_mc_reg_table(struct atom_mc_reg_table *table, + struct si_mc_reg_table *si_table) +{ + u8 i, j; + + if (table->last > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) + return -EINVAL; + if (table->num_entries > MAX_AC_TIMING_ENTRIES) + return -EINVAL; + + for (i = 0; i < table->last; i++) + si_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1; + si_table->last = table->last; + + for (i = 0; i < table->num_entries; i++) { + si_table->mc_reg_table_entry[i].mclk_max = + table->mc_reg_table_entry[i].mclk_max; + for (j = 0; j < table->last; j++) { + si_table->mc_reg_table_entry[i].mc_data[j] = + table->mc_reg_table_entry[i].mc_data[j]; + } + } + si_table->num_entries = table->num_entries; + + return 0; +} + +static int si_initialize_mc_reg_table(struct amdgpu_device *adev) +{ + struct si_power_info *si_pi = si_get_pi(adev); + struct atom_mc_reg_table *table; + struct si_mc_reg_table *si_table = &si_pi->mc_reg_table; + u8 module_index = rv770_get_memory_module_index(adev); + int ret; + + table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL); + if (!table) + return -ENOMEM; + + WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING)); + WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING)); + WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING)); + WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2)); + WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS)); + WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS)); + WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1)); + WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0)); + WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1)); + WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0)); + WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1)); + WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING)); + WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2)); + WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2)); + + ret = amdgpu_atombios_init_mc_reg_table(adev, module_index, table); + if (ret) + goto init_mc_done; + + ret = si_copy_vbios_mc_reg_table(table, si_table); + if (ret) + goto init_mc_done; + + si_set_s0_mc_reg_index(si_table); + + ret = si_set_mc_special_registers(adev, si_table); + if (ret) + goto init_mc_done; + + si_set_valid_flag(si_table); + +init_mc_done: + kfree(table); + + return ret; + +} + +static void si_populate_mc_reg_addresses(struct amdgpu_device *adev, + SMC_SIslands_MCRegisters *mc_reg_table) +{ + struct si_power_info *si_pi = si_get_pi(adev); + u32 i, j; + + for (i = 0, j = 0; j < si_pi->mc_reg_table.last; j++) { + if (si_pi->mc_reg_table.valid_flag & (1 << j)) { + if (i >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) + break; + mc_reg_table->address[i].s0 = + cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s0); + mc_reg_table->address[i].s1 = + cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s1); + i++; + } + } + mc_reg_table->last = (u8)i; +} + +static void si_convert_mc_registers(const struct si_mc_reg_entry *entry, + SMC_SIslands_MCRegisterSet *data, + u32 num_entries, u32 valid_flag) +{ + u32 i, j; + + for(i = 0, j = 0; j < num_entries; j++) { + if (valid_flag & (1 << j)) { + data->value[i] = cpu_to_be32(entry->mc_data[j]); + i++; + } + } +} + +static void si_convert_mc_reg_table_entry_to_smc(struct amdgpu_device *adev, + struct rv7xx_pl *pl, + SMC_SIslands_MCRegisterSet *mc_reg_table_data) +{ + struct si_power_info *si_pi = si_get_pi(adev); + u32 i = 0; + + for (i = 0; i < si_pi->mc_reg_table.num_entries; i++) { + if (pl->mclk <= si_pi->mc_reg_table.mc_reg_table_entry[i].mclk_max) + break; + } + + if ((i == si_pi->mc_reg_table.num_entries) && (i > 0)) + --i; + + si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[i], + mc_reg_table_data, si_pi->mc_reg_table.last, + si_pi->mc_reg_table.valid_flag); +} + +static void si_convert_mc_reg_table_to_smc(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_state, + SMC_SIslands_MCRegisters *mc_reg_table) +{ + struct si_ps *state = si_get_ps(amdgpu_state); + int i; + + for (i = 0; i < state->performance_level_count; i++) { + si_convert_mc_reg_table_entry_to_smc(adev, + &state->performance_levels[i], + &mc_reg_table->data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i]); + } +} + +static int si_populate_mc_reg_table(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_boot_state) +{ + struct si_ps *boot_state = si_get_ps(amdgpu_boot_state); + struct si_power_info *si_pi = si_get_pi(adev); + struct si_ulv_param *ulv = &si_pi->ulv; + SMC_SIslands_MCRegisters *smc_mc_reg_table = &si_pi->smc_mc_reg_table; + + memset(smc_mc_reg_table, 0, sizeof(SMC_SIslands_MCRegisters)); + + si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_seq_index, 1); + + si_populate_mc_reg_addresses(adev, smc_mc_reg_table); + + si_convert_mc_reg_table_entry_to_smc(adev, &boot_state->performance_levels[0], + &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_INITIAL_SLOT]); + + si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[0], + &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ACPI_SLOT], + si_pi->mc_reg_table.last, + si_pi->mc_reg_table.valid_flag); + + if (ulv->supported && ulv->pl.vddc != 0) + si_convert_mc_reg_table_entry_to_smc(adev, &ulv->pl, + &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ULV_SLOT]); + else + si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[0], + &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ULV_SLOT], + si_pi->mc_reg_table.last, + si_pi->mc_reg_table.valid_flag); + + si_convert_mc_reg_table_to_smc(adev, amdgpu_boot_state, smc_mc_reg_table); + + return amdgpu_si_copy_bytes_to_smc(adev, si_pi->mc_reg_table_start, + (u8 *)smc_mc_reg_table, + sizeof(SMC_SIslands_MCRegisters), si_pi->sram_end); +} + +static int si_upload_mc_reg_table(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_new_state) +{ + struct si_ps *new_state = si_get_ps(amdgpu_new_state); + struct si_power_info *si_pi = si_get_pi(adev); + u32 address = si_pi->mc_reg_table_start + + offsetof(SMC_SIslands_MCRegisters, + data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT]); + SMC_SIslands_MCRegisters *smc_mc_reg_table = &si_pi->smc_mc_reg_table; + + memset(smc_mc_reg_table, 0, sizeof(SMC_SIslands_MCRegisters)); + + si_convert_mc_reg_table_to_smc(adev, amdgpu_new_state, smc_mc_reg_table); + + return amdgpu_si_copy_bytes_to_smc(adev, address, + (u8 *)&smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT], + sizeof(SMC_SIslands_MCRegisterSet) * new_state->performance_level_count, + si_pi->sram_end); +} + +static void si_enable_voltage_control(struct amdgpu_device *adev, bool enable) +{ + if (enable) + WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN); + else + WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN); +} + +static enum si_pcie_gen si_get_maximum_link_speed(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_state) +{ + struct si_ps *state = si_get_ps(amdgpu_state); + int i; + u16 pcie_speed, max_speed = 0; + + for (i = 0; i < state->performance_level_count; i++) { + pcie_speed = state->performance_levels[i].pcie_gen; + if (max_speed < pcie_speed) + max_speed = pcie_speed; + } + return max_speed; +} + +static u16 si_get_current_pcie_speed(struct amdgpu_device *adev) +{ + u32 speed_cntl; + + speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK; + speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT; + + return (u16)speed_cntl; +} + +static void si_request_link_speed_change_before_state_change(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_new_state, + struct amdgpu_ps *amdgpu_current_state) +{ + struct si_power_info *si_pi = si_get_pi(adev); + enum si_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state); + enum si_pcie_gen current_link_speed; + + if (si_pi->force_pcie_gen == SI_PCIE_GEN_INVALID) + current_link_speed = si_get_maximum_link_speed(adev, amdgpu_current_state); + else + current_link_speed = si_pi->force_pcie_gen; + + si_pi->force_pcie_gen = SI_PCIE_GEN_INVALID; + si_pi->pspp_notify_required = false; + if (target_link_speed > current_link_speed) { + switch (target_link_speed) { +#if defined(CONFIG_ACPI) + case SI_PCIE_GEN3: + if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN3, false) == 0) + break; + si_pi->force_pcie_gen = SI_PCIE_GEN2; + if (current_link_speed == SI_PCIE_GEN2) + break; + fallthrough; + case SI_PCIE_GEN2: + if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0) + break; + fallthrough; +#endif + default: + si_pi->force_pcie_gen = si_get_current_pcie_speed(adev); + break; + } + } else { + if (target_link_speed < current_link_speed) + si_pi->pspp_notify_required = true; + } +} + +static void si_notify_link_speed_change_after_state_change(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_new_state, + struct amdgpu_ps *amdgpu_current_state) +{ + struct si_power_info *si_pi = si_get_pi(adev); + enum si_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state); + u8 request; + + if (si_pi->pspp_notify_required) { + if (target_link_speed == SI_PCIE_GEN3) + request = PCIE_PERF_REQ_PECI_GEN3; + else if (target_link_speed == SI_PCIE_GEN2) + request = PCIE_PERF_REQ_PECI_GEN2; + else + request = PCIE_PERF_REQ_PECI_GEN1; + + if ((request == PCIE_PERF_REQ_PECI_GEN1) && + (si_get_current_pcie_speed(adev) > 0)) + return; + +#if defined(CONFIG_ACPI) + amdgpu_acpi_pcie_performance_request(adev, request, false); +#endif + } +} + +#if 0 +static int si_ds_request(struct amdgpu_device *adev, + bool ds_status_on, u32 count_write) +{ + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); + + if (eg_pi->sclk_deep_sleep) { + if (ds_status_on) + return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_CancelThrottleOVRDSCLKDS) == + PPSMC_Result_OK) ? + 0 : -EINVAL; + else + return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_ThrottleOVRDSCLKDS) == + PPSMC_Result_OK) ? 0 : -EINVAL; + } + return 0; +} +#endif + +static void si_set_max_cu_value(struct amdgpu_device *adev) +{ + struct si_power_info *si_pi = si_get_pi(adev); + + if (adev->asic_type == CHIP_VERDE) { + switch (adev->pdev->device) { + case 0x6820: + case 0x6825: + case 0x6821: + case 0x6823: + case 0x6827: + si_pi->max_cu = 10; + break; + case 0x682D: + case 0x6824: + case 0x682F: + case 0x6826: + si_pi->max_cu = 8; + break; + case 0x6828: + case 0x6830: + case 0x6831: + case 0x6838: + case 0x6839: + case 0x683D: + si_pi->max_cu = 10; + break; + case 0x683B: + case 0x683F: + case 0x6829: + si_pi->max_cu = 8; + break; + default: + si_pi->max_cu = 0; + break; + } + } else { + si_pi->max_cu = 0; + } +} + +static int si_patch_single_dependency_table_based_on_leakage(struct amdgpu_device *adev, + struct amdgpu_clock_voltage_dependency_table *table) +{ + u32 i; + int j; + u16 leakage_voltage; + + if (table) { + for (i = 0; i < table->count; i++) { + switch (si_get_leakage_voltage_from_leakage_index(adev, + table->entries[i].v, + &leakage_voltage)) { + case 0: + table->entries[i].v = leakage_voltage; + break; + case -EAGAIN: + return -EINVAL; + case -EINVAL: + default: + break; + } + } + + for (j = (table->count - 2); j >= 0; j--) { + table->entries[j].v = (table->entries[j].v <= table->entries[j + 1].v) ? + table->entries[j].v : table->entries[j + 1].v; + } + } + return 0; +} + +static int si_patch_dependency_tables_based_on_leakage(struct amdgpu_device *adev) +{ + int ret = 0; + + ret = si_patch_single_dependency_table_based_on_leakage(adev, + &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk); + if (ret) + DRM_ERROR("Could not patch vddc_on_sclk leakage table\n"); + ret = si_patch_single_dependency_table_based_on_leakage(adev, + &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk); + if (ret) + DRM_ERROR("Could not patch vddc_on_mclk leakage table\n"); + ret = si_patch_single_dependency_table_based_on_leakage(adev, + &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk); + if (ret) + DRM_ERROR("Could not patch vddci_on_mclk leakage table\n"); + return ret; +} + +static void si_set_pcie_lane_width_in_smc(struct amdgpu_device *adev, + struct amdgpu_ps *amdgpu_new_state, + struct amdgpu_ps *amdgpu_current_state) +{ + u32 lane_width; + u32 new_lane_width = + ((amdgpu_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1; + u32 current_lane_width = + ((amdgpu_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1; + + if (new_lane_width != current_lane_width) { + amdgpu_set_pcie_lanes(adev, new_lane_width); + lane_width = amdgpu_get_pcie_lanes(adev); + si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width); + } +} + +static void si_dpm_setup_asic(struct amdgpu_device *adev) +{ + si_read_clock_registers(adev); + si_enable_acpi_power_management(adev); +} + +static int si_thermal_enable_alert(struct amdgpu_device *adev, + bool enable) +{ + u32 thermal_int = RREG32(CG_THERMAL_INT); + + if (enable) { + PPSMC_Result result; + + thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW); + WREG32(CG_THERMAL_INT, thermal_int); + result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt); + if (result != PPSMC_Result_OK) { + DRM_DEBUG_KMS("Could not enable thermal interrupts.\n"); + return -EINVAL; + } + } else { + thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW; + WREG32(CG_THERMAL_INT, thermal_int); + } + + return 0; +} + +static int si_thermal_set_temperature_range(struct amdgpu_device *adev, + int min_temp, int max_temp) +{ + int low_temp = 0 * 1000; + int high_temp = 255 * 1000; + + if (low_temp < min_temp) + low_temp = min_temp; + if (high_temp > max_temp) + high_temp = max_temp; + if (high_temp < low_temp) { + DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); + return -EINVAL; + } + + WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK); + WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK); + WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK); + + adev->pm.dpm.thermal.min_temp = low_temp; + adev->pm.dpm.thermal.max_temp = high_temp; + + return 0; +} + +static void si_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode) +{ + struct si_power_info *si_pi = si_get_pi(adev); + u32 tmp; + + if (si_pi->fan_ctrl_is_in_default_mode) { + tmp = (RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT; + si_pi->fan_ctrl_default_mode = tmp; + tmp = (RREG32(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT; + si_pi->t_min = tmp; + si_pi->fan_ctrl_is_in_default_mode = false; + } + + tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK; + tmp |= TMIN(0); + WREG32(CG_FDO_CTRL2, tmp); + + tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK; + tmp |= FDO_PWM_MODE(mode); + WREG32(CG_FDO_CTRL2, tmp); +} + +static int si_thermal_setup_fan_table(struct amdgpu_device *adev) +{ + struct si_power_info *si_pi = si_get_pi(adev); + PP_SIslands_FanTable fan_table = { FDO_MODE_HARDWARE }; + u32 duty100; + u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2; + u16 fdo_min, slope1, slope2; + u32 reference_clock, tmp; + int ret; + u64 tmp64; + + if (!si_pi->fan_table_start) { + adev->pm.dpm.fan.ucode_fan_control = false; + return 0; + } + + duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT; + + if (duty100 == 0) { + adev->pm.dpm.fan.ucode_fan_control = false; + return 0; + } + + tmp64 = (u64)adev->pm.dpm.fan.pwm_min * duty100; + do_div(tmp64, 10000); + fdo_min = (u16)tmp64; + + t_diff1 = adev->pm.dpm.fan.t_med - adev->pm.dpm.fan.t_min; + t_diff2 = adev->pm.dpm.fan.t_high - adev->pm.dpm.fan.t_med; + + pwm_diff1 = adev->pm.dpm.fan.pwm_med - adev->pm.dpm.fan.pwm_min; + pwm_diff2 = adev->pm.dpm.fan.pwm_high - adev->pm.dpm.fan.pwm_med; + + slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100); + slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100); + + fan_table.temp_min = cpu_to_be16((50 + adev->pm.dpm.fan.t_min) / 100); + fan_table.temp_med = cpu_to_be16((50 + adev->pm.dpm.fan.t_med) / 100); + fan_table.temp_max = cpu_to_be16((50 + adev->pm.dpm.fan.t_max) / 100); + fan_table.slope1 = cpu_to_be16(slope1); + fan_table.slope2 = cpu_to_be16(slope2); + fan_table.fdo_min = cpu_to_be16(fdo_min); + fan_table.hys_down = cpu_to_be16(adev->pm.dpm.fan.t_hyst); + fan_table.hys_up = cpu_to_be16(1); + fan_table.hys_slope = cpu_to_be16(1); + fan_table.temp_resp_lim = cpu_to_be16(5); + reference_clock = amdgpu_asic_get_xclk(adev); + + fan_table.refresh_period = cpu_to_be32((adev->pm.dpm.fan.cycle_delay * + reference_clock) / 1600); + fan_table.fdo_max = cpu_to_be16((u16)duty100); + + tmp = (RREG32(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT; + fan_table.temp_src = (uint8_t)tmp; + + ret = amdgpu_si_copy_bytes_to_smc(adev, + si_pi->fan_table_start, + (u8 *)(&fan_table), + sizeof(fan_table), + si_pi->sram_end); + + if (ret) { + DRM_ERROR("Failed to load fan table to the SMC."); + adev->pm.dpm.fan.ucode_fan_control = false; + } + + return ret; +} + +static int si_fan_ctrl_start_smc_fan_control(struct amdgpu_device *adev) +{ + struct si_power_info *si_pi = si_get_pi(adev); + PPSMC_Result ret; + + ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_StartFanControl); + if (ret == PPSMC_Result_OK) { + si_pi->fan_is_controlled_by_smc = true; + return 0; + } else { + return -EINVAL; + } +} + +static int si_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev) +{ + struct si_power_info *si_pi = si_get_pi(adev); + PPSMC_Result ret; + + ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_StopFanControl); + + if (ret == PPSMC_Result_OK) { + si_pi->fan_is_controlled_by_smc = false; + return 0; + } else { + return -EINVAL; + } +} + +static int si_dpm_get_fan_speed_pwm(void *handle, + u32 *speed) +{ + u32 duty, duty100; + u64 tmp64; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (adev->pm.no_fan) + return -ENOENT; + + duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT; + duty = (RREG32(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT; + + if (duty100 == 0) + return -EINVAL; + + tmp64 = (u64)duty * 255; + do_div(tmp64, duty100); + *speed = MIN((u32)tmp64, 255); + + return 0; +} + +static int si_dpm_set_fan_speed_pwm(void *handle, + u32 speed) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct si_power_info *si_pi = si_get_pi(adev); + u32 tmp; + u32 duty, duty100; + u64 tmp64; + + if (adev->pm.no_fan) + return -ENOENT; + + if (si_pi->fan_is_controlled_by_smc) + return -EINVAL; + + if (speed > 255) + return -EINVAL; + + duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT; + + if (duty100 == 0) + return -EINVAL; + + tmp64 = (u64)speed * duty100; + do_div(tmp64, 255); + duty = (u32)tmp64; + + tmp = RREG32(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK; + tmp |= FDO_STATIC_DUTY(duty); + WREG32(CG_FDO_CTRL0, tmp); + + return 0; +} + +static void si_dpm_set_fan_control_mode(void *handle, u32 mode) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (mode) { + /* stop auto-manage */ + if (adev->pm.dpm.fan.ucode_fan_control) + si_fan_ctrl_stop_smc_fan_control(adev); + si_fan_ctrl_set_static_mode(adev, mode); + } else { + /* restart auto-manage */ + if (adev->pm.dpm.fan.ucode_fan_control) + si_thermal_start_smc_fan_control(adev); + else + si_fan_ctrl_set_default_mode(adev); + } +} + +static u32 si_dpm_get_fan_control_mode(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct si_power_info *si_pi = si_get_pi(adev); + u32 tmp; + + if (si_pi->fan_is_controlled_by_smc) + return 0; + + tmp = RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK; + return (tmp >> FDO_PWM_MODE_SHIFT); +} + +#if 0 +static int si_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev, + u32 *speed) +{ + u32 tach_period; + u32 xclk = amdgpu_asic_get_xclk(adev); + + if (adev->pm.no_fan) + return -ENOENT; + + if (adev->pm.fan_pulses_per_revolution == 0) + return -ENOENT; + + tach_period = (RREG32(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT; + if (tach_period == 0) + return -ENOENT; + + *speed = 60 * xclk * 10000 / tach_period; + + return 0; +} + +static int si_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev, + u32 speed) +{ + u32 tach_period, tmp; + u32 xclk = amdgpu_asic_get_xclk(adev); + + if (adev->pm.no_fan) + return -ENOENT; + + if (adev->pm.fan_pulses_per_revolution == 0) + return -ENOENT; + + if ((speed < adev->pm.fan_min_rpm) || + (speed > adev->pm.fan_max_rpm)) + return -EINVAL; + + if (adev->pm.dpm.fan.ucode_fan_control) + si_fan_ctrl_stop_smc_fan_control(adev); + + tach_period = 60 * xclk * 10000 / (8 * speed); + tmp = RREG32(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK; + tmp |= TARGET_PERIOD(tach_period); + WREG32(CG_TACH_CTRL, tmp); + + si_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM); + + return 0; +} +#endif + +static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev) +{ + struct si_power_info *si_pi = si_get_pi(adev); + u32 tmp; + + if (!si_pi->fan_ctrl_is_in_default_mode) { + tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK; + tmp |= FDO_PWM_MODE(si_pi->fan_ctrl_default_mode); + WREG32(CG_FDO_CTRL2, tmp); + + tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK; + tmp |= TMIN(si_pi->t_min); + WREG32(CG_FDO_CTRL2, tmp); + si_pi->fan_ctrl_is_in_default_mode = true; + } +} + +static void si_thermal_start_smc_fan_control(struct amdgpu_device *adev) +{ + if (adev->pm.dpm.fan.ucode_fan_control) { + si_fan_ctrl_start_smc_fan_control(adev); + si_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC); + } +} + +static void si_thermal_initialize(struct amdgpu_device *adev) +{ + u32 tmp; + + if (adev->pm.fan_pulses_per_revolution) { + tmp = RREG32(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK; + tmp |= EDGE_PER_REV(adev->pm.fan_pulses_per_revolution -1); + WREG32(CG_TACH_CTRL, tmp); + } + + tmp = RREG32(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK; + tmp |= TACH_PWM_RESP_RATE(0x28); + WREG32(CG_FDO_CTRL2, tmp); +} + +static int si_thermal_start_thermal_controller(struct amdgpu_device *adev) +{ + int ret; + + si_thermal_initialize(adev); + ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); + if (ret) + return ret; + ret = si_thermal_enable_alert(adev, true); + if (ret) + return ret; + if (adev->pm.dpm.fan.ucode_fan_control) { + ret = si_halt_smc(adev); + if (ret) + return ret; + ret = si_thermal_setup_fan_table(adev); + if (ret) + return ret; + ret = si_resume_smc(adev); + if (ret) + return ret; + si_thermal_start_smc_fan_control(adev); + } + + return 0; +} + +static void si_thermal_stop_thermal_controller(struct amdgpu_device *adev) +{ + if (!adev->pm.no_fan) { + si_fan_ctrl_set_default_mode(adev); + si_fan_ctrl_stop_smc_fan_control(adev); + } +} + +static int si_dpm_enable(struct amdgpu_device *adev) +{ + struct rv7xx_power_info *pi = rv770_get_pi(adev); + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); + struct si_power_info *si_pi = si_get_pi(adev); + struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps; + int ret; + + if (amdgpu_si_is_smc_running(adev)) + return -EINVAL; + if (pi->voltage_control || si_pi->voltage_control_svi2) + si_enable_voltage_control(adev, true); + if (pi->mvdd_control) + si_get_mvdd_configuration(adev); + if (pi->voltage_control || si_pi->voltage_control_svi2) { + ret = si_construct_voltage_tables(adev); + if (ret) { + DRM_ERROR("si_construct_voltage_tables failed\n"); + return ret; + } + } + if (eg_pi->dynamic_ac_timing) { + ret = si_initialize_mc_reg_table(adev); + if (ret) + eg_pi->dynamic_ac_timing = false; + } + if (pi->dynamic_ss) + si_enable_spread_spectrum(adev, true); + if (pi->thermal_protection) + si_enable_thermal_protection(adev, true); + si_setup_bsp(adev); + si_program_git(adev); + si_program_tp(adev); + si_program_tpp(adev); + si_program_sstp(adev); + si_enable_display_gap(adev); + si_program_vc(adev); + ret = si_upload_firmware(adev); + if (ret) { + DRM_ERROR("si_upload_firmware failed\n"); + return ret; + } + ret = si_process_firmware_header(adev); + if (ret) { + DRM_ERROR("si_process_firmware_header failed\n"); + return ret; + } + ret = si_initial_switch_from_arb_f0_to_f1(adev); + if (ret) { + DRM_ERROR("si_initial_switch_from_arb_f0_to_f1 failed\n"); + return ret; + } + ret = si_init_smc_table(adev); + if (ret) { + DRM_ERROR("si_init_smc_table failed\n"); + return ret; + } + ret = si_init_smc_spll_table(adev); + if (ret) { + DRM_ERROR("si_init_smc_spll_table failed\n"); + return ret; + } + ret = si_init_arb_table_index(adev); + if (ret) { + DRM_ERROR("si_init_arb_table_index failed\n"); + return ret; + } + if (eg_pi->dynamic_ac_timing) { + ret = si_populate_mc_reg_table(adev, boot_ps); + if (ret) { + DRM_ERROR("si_populate_mc_reg_table failed\n"); + return ret; + } + } + ret = si_initialize_smc_cac_tables(adev); + if (ret) { + DRM_ERROR("si_initialize_smc_cac_tables failed\n"); + return ret; + } + ret = si_initialize_hardware_cac_manager(adev); + if (ret) { + DRM_ERROR("si_initialize_hardware_cac_manager failed\n"); + return ret; + } + ret = si_initialize_smc_dte_tables(adev); + if (ret) { + DRM_ERROR("si_initialize_smc_dte_tables failed\n"); + return ret; + } + ret = si_populate_smc_tdp_limits(adev, boot_ps); + if (ret) { + DRM_ERROR("si_populate_smc_tdp_limits failed\n"); + return ret; + } + ret = si_populate_smc_tdp_limits_2(adev, boot_ps); + if (ret) { + DRM_ERROR("si_populate_smc_tdp_limits_2 failed\n"); + return ret; + } + si_program_response_times(adev); + si_program_ds_registers(adev); + si_dpm_start_smc(adev); + ret = si_notify_smc_display_change(adev, false); + if (ret) { + DRM_ERROR("si_notify_smc_display_change failed\n"); + return ret; + } + si_enable_sclk_control(adev, true); + si_start_dpm(adev); + + si_enable_auto_throttle_source(adev, SI_DPM_AUTO_THROTTLE_SRC_THERMAL, true); + si_thermal_start_thermal_controller(adev); + + ni_update_current_ps(adev, boot_ps); + + return 0; +} + +static int si_set_temperature_range(struct amdgpu_device *adev) +{ + int ret; + + ret = si_thermal_enable_alert(adev, false); + if (ret) + return ret; + ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); + if (ret) + return ret; + ret = si_thermal_enable_alert(adev, true); + if (ret) + return ret; + + return ret; +} + +static void si_dpm_disable(struct amdgpu_device *adev) +{ + struct rv7xx_power_info *pi = rv770_get_pi(adev); + struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps; + + if (!amdgpu_si_is_smc_running(adev)) + return; + si_thermal_stop_thermal_controller(adev); + si_disable_ulv(adev); + si_clear_vc(adev); + if (pi->thermal_protection) + si_enable_thermal_protection(adev, false); + si_enable_power_containment(adev, boot_ps, false); + si_enable_smc_cac(adev, boot_ps, false); + si_enable_spread_spectrum(adev, false); + si_enable_auto_throttle_source(adev, SI_DPM_AUTO_THROTTLE_SRC_THERMAL, false); + si_stop_dpm(adev); + si_reset_to_default(adev); + si_dpm_stop_smc(adev); + si_force_switch_to_arb_f0(adev); + + ni_update_current_ps(adev, boot_ps); +} + +static int si_dpm_pre_set_power_state(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); + struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps; + struct amdgpu_ps *new_ps = &requested_ps; + + ni_update_requested_ps(adev, new_ps); + si_apply_state_adjust_rules(adev, &eg_pi->requested_rps); + + return 0; +} + +static int si_power_control_set_level(struct amdgpu_device *adev) +{ + struct amdgpu_ps *new_ps = adev->pm.dpm.requested_ps; + int ret; + + ret = si_restrict_performance_levels_before_switch(adev); + if (ret) + return ret; + ret = si_halt_smc(adev); + if (ret) + return ret; + ret = si_populate_smc_tdp_limits(adev, new_ps); + if (ret) + return ret; + ret = si_populate_smc_tdp_limits_2(adev, new_ps); + if (ret) + return ret; + ret = si_resume_smc(adev); + if (ret) + return ret; + ret = si_set_sw_state(adev); + if (ret) + return ret; + return 0; +} + +static void si_set_vce_clock(struct amdgpu_device *adev, + struct amdgpu_ps *new_rps, + struct amdgpu_ps *old_rps) +{ + if ((old_rps->evclk != new_rps->evclk) || + (old_rps->ecclk != new_rps->ecclk)) { + /* Turn the clocks on when encoding, off otherwise */ + if (new_rps->evclk || new_rps->ecclk) { + /* Place holder for future VCE1.0 porting to amdgpu + vce_v1_0_enable_mgcg(adev, false, false);*/ + } else { + /* Place holder for future VCE1.0 porting to amdgpu + vce_v1_0_enable_mgcg(adev, true, false); + amdgpu_asic_set_vce_clocks(adev, new_rps->evclk, new_rps->ecclk);*/ + } + } +} + +static int si_dpm_set_power_state(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); + struct amdgpu_ps *new_ps = &eg_pi->requested_rps; + struct amdgpu_ps *old_ps = &eg_pi->current_rps; + int ret; + + ret = si_disable_ulv(adev); + if (ret) { + DRM_ERROR("si_disable_ulv failed\n"); + return ret; + } + ret = si_restrict_performance_levels_before_switch(adev); + if (ret) { + DRM_ERROR("si_restrict_performance_levels_before_switch failed\n"); + return ret; + } + if (eg_pi->pcie_performance_request) + si_request_link_speed_change_before_state_change(adev, new_ps, old_ps); + ni_set_uvd_clock_before_set_eng_clock(adev, new_ps, old_ps); + ret = si_enable_power_containment(adev, new_ps, false); + if (ret) { + DRM_ERROR("si_enable_power_containment failed\n"); + return ret; + } + ret = si_enable_smc_cac(adev, new_ps, false); + if (ret) { + DRM_ERROR("si_enable_smc_cac failed\n"); + return ret; + } + ret = si_halt_smc(adev); + if (ret) { + DRM_ERROR("si_halt_smc failed\n"); + return ret; + } + ret = si_upload_sw_state(adev, new_ps); + if (ret) { + DRM_ERROR("si_upload_sw_state failed\n"); + return ret; + } + ret = si_upload_smc_data(adev); + if (ret) { + DRM_ERROR("si_upload_smc_data failed\n"); + return ret; + } + ret = si_upload_ulv_state(adev); + if (ret) { + DRM_ERROR("si_upload_ulv_state failed\n"); + return ret; + } + if (eg_pi->dynamic_ac_timing) { + ret = si_upload_mc_reg_table(adev, new_ps); + if (ret) { + DRM_ERROR("si_upload_mc_reg_table failed\n"); + return ret; + } + } + ret = si_program_memory_timing_parameters(adev, new_ps); + if (ret) { + DRM_ERROR("si_program_memory_timing_parameters failed\n"); + return ret; + } + si_set_pcie_lane_width_in_smc(adev, new_ps, old_ps); + + ret = si_resume_smc(adev); + if (ret) { + DRM_ERROR("si_resume_smc failed\n"); + return ret; + } + ret = si_set_sw_state(adev); + if (ret) { + DRM_ERROR("si_set_sw_state failed\n"); + return ret; + } + ni_set_uvd_clock_after_set_eng_clock(adev, new_ps, old_ps); + si_set_vce_clock(adev, new_ps, old_ps); + if (eg_pi->pcie_performance_request) + si_notify_link_speed_change_after_state_change(adev, new_ps, old_ps); + ret = si_set_power_state_conditionally_enable_ulv(adev, new_ps); + if (ret) { + DRM_ERROR("si_set_power_state_conditionally_enable_ulv failed\n"); + return ret; + } + ret = si_enable_smc_cac(adev, new_ps, true); + if (ret) { + DRM_ERROR("si_enable_smc_cac failed\n"); + return ret; + } + ret = si_enable_power_containment(adev, new_ps, true); + if (ret) { + DRM_ERROR("si_enable_power_containment failed\n"); + return ret; + } + + ret = si_power_control_set_level(adev); + if (ret) { + DRM_ERROR("si_power_control_set_level failed\n"); + return ret; + } + + return 0; +} + +static void si_dpm_post_set_power_state(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); + struct amdgpu_ps *new_ps = &eg_pi->requested_rps; + + ni_update_current_ps(adev, new_ps); +} + +#if 0 +void si_dpm_reset_asic(struct amdgpu_device *adev) +{ + si_restrict_performance_levels_before_switch(adev); + si_disable_ulv(adev); + si_set_boot_state(adev); +} +#endif + +static void si_dpm_display_configuration_changed(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + si_program_display_gap(adev); +} + + +static void si_parse_pplib_non_clock_info(struct amdgpu_device *adev, + struct amdgpu_ps *rps, + struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info, + u8 table_rev) +{ + rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings); + rps->class = le16_to_cpu(non_clock_info->usClassification); + rps->class2 = le16_to_cpu(non_clock_info->usClassification2); + + if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { + rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); + rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); + } else if (r600_is_uvd_state(rps->class, rps->class2)) { + rps->vclk = RV770_DEFAULT_VCLK_FREQ; + rps->dclk = RV770_DEFAULT_DCLK_FREQ; + } else { + rps->vclk = 0; + rps->dclk = 0; + } + + if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) + adev->pm.dpm.boot_ps = rps; + if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) + adev->pm.dpm.uvd_ps = rps; +} + +static void si_parse_pplib_clock_info(struct amdgpu_device *adev, + struct amdgpu_ps *rps, int index, + union pplib_clock_info *clock_info) +{ + struct rv7xx_power_info *pi = rv770_get_pi(adev); + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); + struct si_power_info *si_pi = si_get_pi(adev); + struct si_ps *ps = si_get_ps(rps); + u16 leakage_voltage; + struct rv7xx_pl *pl = &ps->performance_levels[index]; + int ret; + + ps->performance_level_count = index + 1; + + pl->sclk = le16_to_cpu(clock_info->si.usEngineClockLow); + pl->sclk |= clock_info->si.ucEngineClockHigh << 16; + pl->mclk = le16_to_cpu(clock_info->si.usMemoryClockLow); + pl->mclk |= clock_info->si.ucMemoryClockHigh << 16; + + pl->vddc = le16_to_cpu(clock_info->si.usVDDC); + pl->vddci = le16_to_cpu(clock_info->si.usVDDCI); + pl->flags = le32_to_cpu(clock_info->si.ulFlags); + pl->pcie_gen = si_gen_pcie_gen_support(adev, + si_pi->sys_pcie_mask, + si_pi->boot_pcie_gen, + clock_info->si.ucPCIEGen); + + /* patch up vddc if necessary */ + ret = si_get_leakage_voltage_from_leakage_index(adev, pl->vddc, + &leakage_voltage); + if (ret == 0) + pl->vddc = leakage_voltage; + + if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) { + pi->acpi_vddc = pl->vddc; + eg_pi->acpi_vddci = pl->vddci; + si_pi->acpi_pcie_gen = pl->pcie_gen; + } + + if ((rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) && + index == 0) { + /* XXX disable for A0 tahiti */ + si_pi->ulv.supported = false; + si_pi->ulv.pl = *pl; + si_pi->ulv.one_pcie_lane_in_ulv = false; + si_pi->ulv.volt_change_delay = SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT; + si_pi->ulv.cg_ulv_parameter = SISLANDS_CGULVPARAMETER_DFLT; + si_pi->ulv.cg_ulv_control = SISLANDS_CGULVCONTROL_DFLT; + } + + if (pi->min_vddc_in_table > pl->vddc) + pi->min_vddc_in_table = pl->vddc; + + if (pi->max_vddc_in_table < pl->vddc) + pi->max_vddc_in_table = pl->vddc; + + /* patch up boot state */ + if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) { + u16 vddc, vddci, mvdd; + amdgpu_atombios_get_default_voltages(adev, &vddc, &vddci, &mvdd); + pl->mclk = adev->clock.default_mclk; + pl->sclk = adev->clock.default_sclk; + pl->vddc = vddc; + pl->vddci = vddci; + si_pi->mvdd_bootup_value = mvdd; + } + + if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == + ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) { + adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk; + adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = pl->mclk; + adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc; + adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci; + } +} + +union pplib_power_state { + struct _ATOM_PPLIB_STATE v1; + struct _ATOM_PPLIB_STATE_V2 v2; +}; + +static int si_parse_power_table(struct amdgpu_device *adev) +{ + struct amdgpu_mode_info *mode_info = &adev->mode_info; + struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; + union pplib_power_state *power_state; + int i, j, k, non_clock_array_index, clock_array_index; + union pplib_clock_info *clock_info; + struct _StateArray *state_array; + struct _ClockInfoArray *clock_info_array; + struct _NonClockInfoArray *non_clock_info_array; + union power_info *power_info; + int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); + u16 data_offset; + u8 frev, crev; + u8 *power_state_offset; + struct si_ps *ps; + + if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, + &frev, &crev, &data_offset)) + return -EINVAL; + power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); + + amdgpu_add_thermal_controller(adev); + + state_array = (struct _StateArray *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib.usStateArrayOffset)); + clock_info_array = (struct _ClockInfoArray *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); + non_clock_info_array = (struct _NonClockInfoArray *) + (mode_info->atom_context->bios + data_offset + + le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); + + adev->pm.dpm.ps = kcalloc(state_array->ucNumEntries, + sizeof(struct amdgpu_ps), + GFP_KERNEL); + if (!adev->pm.dpm.ps) + return -ENOMEM; + power_state_offset = (u8 *)state_array->states; + for (i = 0; i < state_array->ucNumEntries; i++) { + u8 *idx; + power_state = (union pplib_power_state *)power_state_offset; + non_clock_array_index = power_state->v2.nonClockInfoIndex; + non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) + &non_clock_info_array->nonClockInfo[non_clock_array_index]; + ps = kzalloc(sizeof(struct si_ps), GFP_KERNEL); + if (ps == NULL) { + kfree(adev->pm.dpm.ps); + return -ENOMEM; + } + adev->pm.dpm.ps[i].ps_priv = ps; + si_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i], + non_clock_info, + non_clock_info_array->ucEntrySize); + k = 0; + idx = (u8 *)&power_state->v2.clockInfoIndex[0]; + for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { + clock_array_index = idx[j]; + if (clock_array_index >= clock_info_array->ucNumEntries) + continue; + if (k >= SISLANDS_MAX_HARDWARE_POWERLEVELS) + break; + clock_info = (union pplib_clock_info *) + ((u8 *)&clock_info_array->clockInfo[0] + + (clock_array_index * clock_info_array->ucEntrySize)); + si_parse_pplib_clock_info(adev, + &adev->pm.dpm.ps[i], k, + clock_info); + k++; + } + power_state_offset += 2 + power_state->v2.ucNumDPMLevels; + } + adev->pm.dpm.num_ps = state_array->ucNumEntries; + + /* fill in the vce power states */ + for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) { + u32 sclk, mclk; + clock_array_index = adev->pm.dpm.vce_states[i].clk_idx; + clock_info = (union pplib_clock_info *) + &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; + sclk = le16_to_cpu(clock_info->si.usEngineClockLow); + sclk |= clock_info->si.ucEngineClockHigh << 16; + mclk = le16_to_cpu(clock_info->si.usMemoryClockLow); + mclk |= clock_info->si.ucMemoryClockHigh << 16; + adev->pm.dpm.vce_states[i].sclk = sclk; + adev->pm.dpm.vce_states[i].mclk = mclk; + } + + return 0; +} + +static int si_dpm_init(struct amdgpu_device *adev) +{ + struct rv7xx_power_info *pi; + struct evergreen_power_info *eg_pi; + struct ni_power_info *ni_pi; + struct si_power_info *si_pi; + struct atom_clock_dividers dividers; + int ret; + + si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL); + if (si_pi == NULL) + return -ENOMEM; + adev->pm.dpm.priv = si_pi; + ni_pi = &si_pi->ni; + eg_pi = &ni_pi->eg; + pi = &eg_pi->rv7xx; + + si_pi->sys_pcie_mask = + adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK; + si_pi->force_pcie_gen = SI_PCIE_GEN_INVALID; + si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev); + + si_set_max_cu_value(adev); + + rv770_get_max_vddc(adev); + si_get_leakage_vddc(adev); + si_patch_dependency_tables_based_on_leakage(adev); + + pi->acpi_vddc = 0; + eg_pi->acpi_vddci = 0; + pi->min_vddc_in_table = 0; + pi->max_vddc_in_table = 0; + + ret = amdgpu_get_platform_caps(adev); + if (ret) + return ret; + + ret = amdgpu_parse_extended_power_table(adev); + if (ret) + return ret; + + ret = si_parse_power_table(adev); + if (ret) + return ret; + + adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries = + kcalloc(4, + sizeof(struct amdgpu_clock_voltage_dependency_entry), + GFP_KERNEL); + if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) { + amdgpu_free_extended_power_table(adev); + return -ENOMEM; + } + adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4; + adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0; + adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0; + adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000; + adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720; + adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000; + adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810; + adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000; + adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900; + + if (adev->pm.dpm.voltage_response_time == 0) + adev->pm.dpm.voltage_response_time = R600_VOLTAGERESPONSETIME_DFLT; + if (adev->pm.dpm.backbias_response_time == 0) + adev->pm.dpm.backbias_response_time = R600_BACKBIASRESPONSETIME_DFLT; + + ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, + 0, false, ÷rs); + if (ret) + pi->ref_div = dividers.ref_div + 1; + else + pi->ref_div = R600_REFERENCEDIVIDER_DFLT; + + eg_pi->smu_uvd_hs = false; + + pi->mclk_strobe_mode_threshold = 40000; + if (si_is_special_1gb_platform(adev)) + pi->mclk_stutter_mode_threshold = 0; + else + pi->mclk_stutter_mode_threshold = pi->mclk_strobe_mode_threshold; + pi->mclk_edc_enable_threshold = 40000; + eg_pi->mclk_edc_wr_enable_threshold = 40000; + + ni_pi->mclk_rtt_mode_threshold = eg_pi->mclk_edc_wr_enable_threshold; + + pi->voltage_control = + amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC, + VOLTAGE_OBJ_GPIO_LUT); + if (!pi->voltage_control) { + si_pi->voltage_control_svi2 = + amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC, + VOLTAGE_OBJ_SVID2); + if (si_pi->voltage_control_svi2) + amdgpu_atombios_get_svi2_info(adev, SET_VOLTAGE_TYPE_ASIC_VDDC, + &si_pi->svd_gpio_id, &si_pi->svc_gpio_id); + } + + pi->mvdd_control = + amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_MVDDC, + VOLTAGE_OBJ_GPIO_LUT); + + eg_pi->vddci_control = + amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDCI, + VOLTAGE_OBJ_GPIO_LUT); + if (!eg_pi->vddci_control) + si_pi->vddci_control_svi2 = + amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDCI, + VOLTAGE_OBJ_SVID2); + + si_pi->vddc_phase_shed_control = + amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC, + VOLTAGE_OBJ_PHASE_LUT); + + rv770_get_engine_memory_ss(adev); + + pi->asi = RV770_ASI_DFLT; + pi->pasi = CYPRESS_HASI_DFLT; + pi->vrc = SISLANDS_VRC_DFLT; + + pi->gfx_clock_gating = true; + + eg_pi->sclk_deep_sleep = true; + si_pi->sclk_deep_sleep_above_low = false; + + if (adev->pm.int_thermal_type != THERMAL_TYPE_NONE) + pi->thermal_protection = true; + else + pi->thermal_protection = false; + + eg_pi->dynamic_ac_timing = true; + + eg_pi->light_sleep = true; +#if defined(CONFIG_ACPI) + eg_pi->pcie_performance_request = + amdgpu_acpi_is_pcie_performance_request_supported(adev); +#else + eg_pi->pcie_performance_request = false; +#endif + + si_pi->sram_end = SMC_RAM_END; + + adev->pm.dpm.dyn_state.mclk_sclk_ratio = 4; + adev->pm.dpm.dyn_state.sclk_mclk_delta = 15000; + adev->pm.dpm.dyn_state.vddc_vddci_delta = 200; + adev->pm.dpm.dyn_state.valid_sclk_values.count = 0; + adev->pm.dpm.dyn_state.valid_sclk_values.values = NULL; + adev->pm.dpm.dyn_state.valid_mclk_values.count = 0; + adev->pm.dpm.dyn_state.valid_mclk_values.values = NULL; + + si_initialize_powertune_defaults(adev); + + /* make sure dc limits are valid */ + if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) || + (adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0)) + adev->pm.dpm.dyn_state.max_clock_voltage_on_dc = + adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; + + si_pi->fan_ctrl_is_in_default_mode = true; + + return 0; +} + +static void si_dpm_fini(struct amdgpu_device *adev) +{ + int i; + + if (adev->pm.dpm.ps) + for (i = 0; i < adev->pm.dpm.num_ps; i++) + kfree(adev->pm.dpm.ps[i].ps_priv); + kfree(adev->pm.dpm.ps); + kfree(adev->pm.dpm.priv); + kfree(adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries); + amdgpu_free_extended_power_table(adev); +} + +static void si_dpm_debugfs_print_current_performance_level(void *handle, + struct seq_file *m) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); + struct amdgpu_ps *rps = &eg_pi->current_rps; + struct si_ps *ps = si_get_ps(rps); + struct rv7xx_pl *pl; + u32 current_index = + (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >> + CURRENT_STATE_INDEX_SHIFT; + + if (current_index >= ps->performance_level_count) { + seq_printf(m, "invalid dpm profile %d\n", current_index); + } else { + pl = &ps->performance_levels[current_index]; + seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); + seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n", + current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1); + } +} + +static int si_dpm_set_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + unsigned type, + enum amdgpu_interrupt_state state) +{ + u32 cg_thermal_int; + + switch (type) { + case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH: + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + cg_thermal_int = RREG32_SMC(CG_THERMAL_INT); + cg_thermal_int |= THERM_INT_MASK_HIGH; + WREG32_SMC(CG_THERMAL_INT, cg_thermal_int); + break; + case AMDGPU_IRQ_STATE_ENABLE: + cg_thermal_int = RREG32_SMC(CG_THERMAL_INT); + cg_thermal_int &= ~THERM_INT_MASK_HIGH; + WREG32_SMC(CG_THERMAL_INT, cg_thermal_int); + break; + default: + break; + } + break; + + case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW: + switch (state) { + case AMDGPU_IRQ_STATE_DISABLE: + cg_thermal_int = RREG32_SMC(CG_THERMAL_INT); + cg_thermal_int |= THERM_INT_MASK_LOW; + WREG32_SMC(CG_THERMAL_INT, cg_thermal_int); + break; + case AMDGPU_IRQ_STATE_ENABLE: + cg_thermal_int = RREG32_SMC(CG_THERMAL_INT); + cg_thermal_int &= ~THERM_INT_MASK_LOW; + WREG32_SMC(CG_THERMAL_INT, cg_thermal_int); + break; + default: + break; + } + break; + + default: + break; + } + return 0; +} + +static int si_dpm_process_interrupt(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + bool queue_thermal = false; + + if (entry == NULL) + return -EINVAL; + + switch (entry->src_id) { + case 230: /* thermal low to high */ + DRM_DEBUG("IH: thermal low to high\n"); + adev->pm.dpm.thermal.high_to_low = false; + queue_thermal = true; + break; + case 231: /* thermal high to low */ + DRM_DEBUG("IH: thermal high to low\n"); + adev->pm.dpm.thermal.high_to_low = true; + queue_thermal = true; + break; + default: + break; + } + + if (queue_thermal) + schedule_work(&adev->pm.dpm.thermal.work); + + return 0; +} + +static int si_dpm_late_init(void *handle) +{ + int ret; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (!adev->pm.dpm_enabled) + return 0; + + ret = si_set_temperature_range(adev); + if (ret) + return ret; +#if 0 //TODO ? + si_dpm_powergate_uvd(adev, true); +#endif + return 0; +} + +/** + * si_dpm_init_microcode - load ucode images from disk + * + * @adev: amdgpu_device pointer + * + * Use the firmware interface to load the ucode images into + * the driver (not loaded into hw). + * Returns 0 on success, error on failure. + */ +static int si_dpm_init_microcode(struct amdgpu_device *adev) +{ + const char *chip_name; + char fw_name[30]; + int err; + + DRM_DEBUG("\n"); + switch (adev->asic_type) { + case CHIP_TAHITI: + chip_name = "tahiti"; + break; + case CHIP_PITCAIRN: + if ((adev->pdev->revision == 0x81) && + ((adev->pdev->device == 0x6810) || + (adev->pdev->device == 0x6811))) + chip_name = "pitcairn_k"; + else + chip_name = "pitcairn"; + break; + case CHIP_VERDE: + if (((adev->pdev->device == 0x6820) && + ((adev->pdev->revision == 0x81) || + (adev->pdev->revision == 0x83))) || + ((adev->pdev->device == 0x6821) && + ((adev->pdev->revision == 0x83) || + (adev->pdev->revision == 0x87))) || + ((adev->pdev->revision == 0x87) && + ((adev->pdev->device == 0x6823) || + (adev->pdev->device == 0x682b)))) + chip_name = "verde_k"; + else + chip_name = "verde"; + break; + case CHIP_OLAND: + if (((adev->pdev->revision == 0x81) && + ((adev->pdev->device == 0x6600) || + (adev->pdev->device == 0x6604) || + (adev->pdev->device == 0x6605) || + (adev->pdev->device == 0x6610))) || + ((adev->pdev->revision == 0x83) && + (adev->pdev->device == 0x6610))) + chip_name = "oland_k"; + else + chip_name = "oland"; + break; + case CHIP_HAINAN: + if (((adev->pdev->revision == 0x81) && + (adev->pdev->device == 0x6660)) || + ((adev->pdev->revision == 0x83) && + ((adev->pdev->device == 0x6660) || + (adev->pdev->device == 0x6663) || + (adev->pdev->device == 0x6665) || + (adev->pdev->device == 0x6667)))) + chip_name = "hainan_k"; + else if ((adev->pdev->revision == 0xc3) && + (adev->pdev->device == 0x6665)) + chip_name = "banks_k_2"; + else + chip_name = "hainan"; + break; + default: BUG(); + } + + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name); + err = request_firmware(&adev->pm.fw, fw_name, adev->dev); + if (err) + goto out; + err = amdgpu_ucode_validate(adev->pm.fw); + +out: + if (err) { + DRM_ERROR("si_smc: Failed to load firmware. err = %d\"%s\"\n", + err, fw_name); + release_firmware(adev->pm.fw); + adev->pm.fw = NULL; + } + return err; + +} + +static int si_dpm_sw_init(void *handle) +{ + int ret; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230, &adev->pm.dpm.thermal.irq); + if (ret) + return ret; + + ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 231, &adev->pm.dpm.thermal.irq); + if (ret) + return ret; + + /* default to balanced state */ + adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED; + adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; + adev->pm.dpm.forced_level = AMD_DPM_FORCED_LEVEL_AUTO; + adev->pm.default_sclk = adev->clock.default_sclk; + adev->pm.default_mclk = adev->clock.default_mclk; + adev->pm.current_sclk = adev->clock.default_sclk; + adev->pm.current_mclk = adev->clock.default_mclk; + adev->pm.int_thermal_type = THERMAL_TYPE_NONE; + + if (amdgpu_dpm == 0) + return 0; + + ret = si_dpm_init_microcode(adev); + if (ret) + return ret; + + INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler); + mutex_lock(&adev->pm.mutex); + ret = si_dpm_init(adev); + if (ret) + goto dpm_failed; + adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; + if (amdgpu_dpm == 1) + amdgpu_pm_print_power_states(adev); + mutex_unlock(&adev->pm.mutex); + DRM_INFO("amdgpu: dpm initialized\n"); + + return 0; + +dpm_failed: + si_dpm_fini(adev); + mutex_unlock(&adev->pm.mutex); + DRM_ERROR("amdgpu: dpm initialization failed\n"); + return ret; +} + +static int si_dpm_sw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + flush_work(&adev->pm.dpm.thermal.work); + + mutex_lock(&adev->pm.mutex); + si_dpm_fini(adev); + mutex_unlock(&adev->pm.mutex); + + return 0; +} + +static int si_dpm_hw_init(void *handle) +{ + int ret; + + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (!amdgpu_dpm) + return 0; + + mutex_lock(&adev->pm.mutex); + si_dpm_setup_asic(adev); + ret = si_dpm_enable(adev); + if (ret) + adev->pm.dpm_enabled = false; + else + adev->pm.dpm_enabled = true; + mutex_unlock(&adev->pm.mutex); + amdgpu_legacy_dpm_compute_clocks(adev); + return ret; +} + +static int si_dpm_hw_fini(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (adev->pm.dpm_enabled) { + mutex_lock(&adev->pm.mutex); + si_dpm_disable(adev); + mutex_unlock(&adev->pm.mutex); + } + + return 0; +} + +static int si_dpm_suspend(void *handle) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (adev->pm.dpm_enabled) { + mutex_lock(&adev->pm.mutex); + /* disable dpm */ + si_dpm_disable(adev); + /* reset the power state */ + adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; + mutex_unlock(&adev->pm.mutex); + } + return 0; +} + +static int si_dpm_resume(void *handle) +{ + int ret; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (adev->pm.dpm_enabled) { + /* asic init will reset to the boot state */ + mutex_lock(&adev->pm.mutex); + si_dpm_setup_asic(adev); + ret = si_dpm_enable(adev); + if (ret) + adev->pm.dpm_enabled = false; + else + adev->pm.dpm_enabled = true; + mutex_unlock(&adev->pm.mutex); + if (adev->pm.dpm_enabled) + amdgpu_legacy_dpm_compute_clocks(adev); + } + return 0; +} + +static bool si_dpm_is_idle(void *handle) +{ + /* XXX */ + return true; +} + +static int si_dpm_wait_for_idle(void *handle) +{ + /* XXX */ + return 0; +} + +static int si_dpm_soft_reset(void *handle) +{ + return 0; +} + +static int si_dpm_set_clockgating_state(void *handle, + enum amd_clockgating_state state) +{ + return 0; +} + +static int si_dpm_set_powergating_state(void *handle, + enum amd_powergating_state state) +{ + return 0; +} + +/* get temperature in millidegrees */ +static int si_dpm_get_temp(void *handle) +{ + u32 temp; + int actual_temp = 0; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >> + CTF_TEMP_SHIFT; + + if (temp & 0x200) + actual_temp = 255; + else + actual_temp = temp & 0x1ff; + + actual_temp = (actual_temp * 1000); + + return actual_temp; +} + +static u32 si_dpm_get_sclk(void *handle, bool low) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); + struct si_ps *requested_state = si_get_ps(&eg_pi->requested_rps); + + if (low) + return requested_state->performance_levels[0].sclk; + else + return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk; +} + +static u32 si_dpm_get_mclk(void *handle, bool low) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); + struct si_ps *requested_state = si_get_ps(&eg_pi->requested_rps); + + if (low) + return requested_state->performance_levels[0].mclk; + else + return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk; +} + +static void si_dpm_print_power_state(void *handle, + void *current_ps) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct amdgpu_ps *rps = (struct amdgpu_ps *)current_ps; + struct si_ps *ps = si_get_ps(rps); + struct rv7xx_pl *pl; + int i; + + amdgpu_dpm_print_class_info(rps->class, rps->class2); + amdgpu_dpm_print_cap_info(rps->caps); + DRM_INFO("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); + for (i = 0; i < ps->performance_level_count; i++) { + pl = &ps->performance_levels[i]; + if (adev->asic_type >= CHIP_TAHITI) + DRM_INFO("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n", + i, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1); + else + DRM_INFO("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u\n", + i, pl->sclk, pl->mclk, pl->vddc, pl->vddci); + } + amdgpu_dpm_print_ps_status(adev, rps); +} + +static int si_dpm_early_init(void *handle) +{ + + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + adev->powerplay.pp_funcs = &si_dpm_funcs; + adev->powerplay.pp_handle = adev; + si_dpm_set_irq_funcs(adev); + return 0; +} + +static inline bool si_are_power_levels_equal(const struct rv7xx_pl *si_cpl1, + const struct rv7xx_pl *si_cpl2) +{ + return ((si_cpl1->mclk == si_cpl2->mclk) && + (si_cpl1->sclk == si_cpl2->sclk) && + (si_cpl1->pcie_gen == si_cpl2->pcie_gen) && + (si_cpl1->vddc == si_cpl2->vddc) && + (si_cpl1->vddci == si_cpl2->vddci)); +} + +static int si_check_state_equal(void *handle, + void *current_ps, + void *request_ps, + bool *equal) +{ + struct si_ps *si_cps; + struct si_ps *si_rps; + int i; + struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps; + struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps; + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + + if (adev == NULL || cps == NULL || rps == NULL || equal == NULL) + return -EINVAL; + + si_cps = si_get_ps((struct amdgpu_ps *)cps); + si_rps = si_get_ps((struct amdgpu_ps *)rps); + + if (si_cps == NULL) { + printk("si_cps is NULL\n"); + *equal = false; + return 0; + } + + if (si_cps->performance_level_count != si_rps->performance_level_count) { + *equal = false; + return 0; + } + + for (i = 0; i < si_cps->performance_level_count; i++) { + if (!si_are_power_levels_equal(&(si_cps->performance_levels[i]), + &(si_rps->performance_levels[i]))) { + *equal = false; + return 0; + } + } + + /* If all performance levels are the same try to use the UVD clocks to break the tie.*/ + *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk)); + *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk)); + + return 0; +} + +static int si_dpm_read_sensor(void *handle, int idx, + void *value, int *size) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); + struct amdgpu_ps *rps = &eg_pi->current_rps; + struct si_ps *ps = si_get_ps(rps); + uint32_t sclk, mclk; + u32 pl_index = + (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >> + CURRENT_STATE_INDEX_SHIFT; + + /* size must be at least 4 bytes for all sensors */ + if (*size < 4) + return -EINVAL; + + switch (idx) { + case AMDGPU_PP_SENSOR_GFX_SCLK: + if (pl_index < ps->performance_level_count) { + sclk = ps->performance_levels[pl_index].sclk; + *((uint32_t *)value) = sclk; + *size = 4; + return 0; + } + return -EINVAL; + case AMDGPU_PP_SENSOR_GFX_MCLK: + if (pl_index < ps->performance_level_count) { + mclk = ps->performance_levels[pl_index].mclk; + *((uint32_t *)value) = mclk; + *size = 4; + return 0; + } + return -EINVAL; + case AMDGPU_PP_SENSOR_GPU_TEMP: + *((uint32_t *)value) = si_dpm_get_temp(adev); + *size = 4; + return 0; + default: + return -EOPNOTSUPP; + } +} + +static const struct amd_ip_funcs si_dpm_ip_funcs = { + .name = "si_dpm", + .early_init = si_dpm_early_init, + .late_init = si_dpm_late_init, + .sw_init = si_dpm_sw_init, + .sw_fini = si_dpm_sw_fini, + .hw_init = si_dpm_hw_init, + .hw_fini = si_dpm_hw_fini, + .suspend = si_dpm_suspend, + .resume = si_dpm_resume, + .is_idle = si_dpm_is_idle, + .wait_for_idle = si_dpm_wait_for_idle, + .soft_reset = si_dpm_soft_reset, + .set_clockgating_state = si_dpm_set_clockgating_state, + .set_powergating_state = si_dpm_set_powergating_state, +}; + +const struct amdgpu_ip_block_version si_smu_ip_block = +{ + .type = AMD_IP_BLOCK_TYPE_SMC, + .major = 6, + .minor = 0, + .rev = 0, + .funcs = &si_dpm_ip_funcs, +}; + +static const struct amd_pm_funcs si_dpm_funcs = { + .pre_set_power_state = &si_dpm_pre_set_power_state, + .set_power_state = &si_dpm_set_power_state, + .post_set_power_state = &si_dpm_post_set_power_state, + .display_configuration_changed = &si_dpm_display_configuration_changed, + .get_sclk = &si_dpm_get_sclk, + .get_mclk = &si_dpm_get_mclk, + .print_power_state = &si_dpm_print_power_state, + .debugfs_print_current_performance_level = &si_dpm_debugfs_print_current_performance_level, + .force_performance_level = &si_dpm_force_performance_level, + .set_powergating_by_smu = &si_set_powergating_by_smu, + .vblank_too_short = &si_dpm_vblank_too_short, + .set_fan_control_mode = &si_dpm_set_fan_control_mode, + .get_fan_control_mode = &si_dpm_get_fan_control_mode, + .set_fan_speed_pwm = &si_dpm_set_fan_speed_pwm, + .get_fan_speed_pwm = &si_dpm_get_fan_speed_pwm, + .check_state_equal = &si_check_state_equal, + .get_vce_clock_state = amdgpu_get_vce_clock_state, + .read_sensor = &si_dpm_read_sensor, + .pm_compute_clocks = amdgpu_legacy_dpm_compute_clocks, +}; + +static const struct amdgpu_irq_src_funcs si_dpm_irq_funcs = { + .set = si_dpm_set_interrupt_state, + .process = si_dpm_process_interrupt, +}; + +static void si_dpm_set_irq_funcs(struct amdgpu_device *adev) +{ + adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST; + adev->pm.dpm.thermal.irq.funcs = &si_dpm_irq_funcs; +} + diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.h b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.h new file mode 100644 index 000000000000..11cb7874a6bb --- /dev/null +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.h @@ -0,0 +1,1022 @@ +/* + * Copyright 2012 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __SI_DPM_H__ +#define __SI_DPM_H__ + +#include "amdgpu_atombios.h" +#include "sislands_smc.h" + +#define MC_CG_CONFIG 0x96f +#define MC_ARB_CG 0x9fa +#define CG_ARB_REQ(x) ((x) << 0) +#define CG_ARB_REQ_MASK (0xff << 0) + +#define MC_ARB_DRAM_TIMING_1 0x9fc +#define MC_ARB_DRAM_TIMING_2 0x9fd +#define MC_ARB_DRAM_TIMING_3 0x9fe +#define MC_ARB_DRAM_TIMING2_1 0x9ff +#define MC_ARB_DRAM_TIMING2_2 0xa00 +#define MC_ARB_DRAM_TIMING2_3 0xa01 + +#define MAX_NO_OF_MVDD_VALUES 2 +#define MAX_NO_VREG_STEPS 32 +#define NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16 +#define SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE 32 +#define SMC_NISLANDS_MC_REGISTER_ARRAY_SET_COUNT 20 +#define RV770_ASI_DFLT 1000 +#define CYPRESS_HASI_DFLT 400000 +#define PCIE_PERF_REQ_PECI_GEN1 2 +#define PCIE_PERF_REQ_PECI_GEN2 3 +#define PCIE_PERF_REQ_PECI_GEN3 4 +#define RV770_DEFAULT_VCLK_FREQ 53300 /* 10 khz */ +#define RV770_DEFAULT_DCLK_FREQ 40000 /* 10 khz */ + +#define SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE 16 + +#define RV770_SMC_TABLE_ADDRESS 0xB000 +#define RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 3 + +#define SMC_STROBE_RATIO 0x0F +#define SMC_STROBE_ENABLE 0x10 + +#define SMC_MC_EDC_RD_FLAG 0x01 +#define SMC_MC_EDC_WR_FLAG 0x02 +#define SMC_MC_RTT_ENABLE 0x04 +#define SMC_MC_STUTTER_EN 0x08 + +#define RV770_SMC_VOLTAGEMASK_VDDC 0 +#define RV770_SMC_VOLTAGEMASK_MVDD 1 +#define RV770_SMC_VOLTAGEMASK_VDDCI 2 +#define RV770_SMC_VOLTAGEMASK_MAX 4 + +#define NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16 +#define NISLANDS_SMC_STROBE_RATIO 0x0F +#define NISLANDS_SMC_STROBE_ENABLE 0x10 + +#define NISLANDS_SMC_MC_EDC_RD_FLAG 0x01 +#define NISLANDS_SMC_MC_EDC_WR_FLAG 0x02 +#define NISLANDS_SMC_MC_RTT_ENABLE 0x04 +#define NISLANDS_SMC_MC_STUTTER_EN 0x08 + +#define MAX_NO_VREG_STEPS 32 + +#define NISLANDS_SMC_VOLTAGEMASK_VDDC 0 +#define NISLANDS_SMC_VOLTAGEMASK_MVDD 1 +#define NISLANDS_SMC_VOLTAGEMASK_VDDCI 2 +#define NISLANDS_SMC_VOLTAGEMASK_MAX 4 + +#define SISLANDS_MCREGISTERTABLE_INITIAL_SLOT 0 +#define SISLANDS_MCREGISTERTABLE_ACPI_SLOT 1 +#define SISLANDS_MCREGISTERTABLE_ULV_SLOT 2 +#define SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT 3 + +#define SISLANDS_LEAKAGE_INDEX0 0xff01 +#define SISLANDS_MAX_LEAKAGE_COUNT 4 + +#define SISLANDS_MAX_HARDWARE_POWERLEVELS 5 +#define SISLANDS_INITIAL_STATE_ARB_INDEX 0 +#define SISLANDS_ACPI_STATE_ARB_INDEX 1 +#define SISLANDS_ULV_STATE_ARB_INDEX 2 +#define SISLANDS_DRIVER_STATE_ARB_INDEX 3 + +#define SISLANDS_DPM2_MAX_PULSE_SKIP 256 + +#define SISLANDS_DPM2_NEAR_TDP_DEC 10 +#define SISLANDS_DPM2_ABOVE_SAFE_INC 5 +#define SISLANDS_DPM2_BELOW_SAFE_INC 20 + +#define SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT 80 + +#define SISLANDS_DPM2_MAXPS_PERCENT_H 99 +#define SISLANDS_DPM2_MAXPS_PERCENT_M 99 + +#define SISLANDS_DPM2_SQ_RAMP_MAX_POWER 0x3FFF +#define SISLANDS_DPM2_SQ_RAMP_MIN_POWER 0x12 +#define SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA 0x15 +#define SISLANDS_DPM2_SQ_RAMP_STI_SIZE 0x1E +#define SISLANDS_DPM2_SQ_RAMP_LTI_RATIO 0xF + +#define SISLANDS_DPM2_PWREFFICIENCYRATIO_MARGIN 10 + +#define SISLANDS_VRC_DFLT 0xC000B3 +#define SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT 1687 +#define SISLANDS_CGULVPARAMETER_DFLT 0x00040035 +#define SISLANDS_CGULVCONTROL_DFLT 0x1f007550 + +#define SI_ASI_DFLT 10000 +#define SI_BSP_DFLT 0x41EB +#define SI_BSU_DFLT 0x2 +#define SI_AH_DFLT 5 +#define SI_RLP_DFLT 25 +#define SI_RMP_DFLT 65 +#define SI_LHP_DFLT 40 +#define SI_LMP_DFLT 15 +#define SI_TD_DFLT 0 +#define SI_UTC_DFLT_00 0x24 +#define SI_UTC_DFLT_01 0x22 +#define SI_UTC_DFLT_02 0x22 +#define SI_UTC_DFLT_03 0x22 +#define SI_UTC_DFLT_04 0x22 +#define SI_UTC_DFLT_05 0x22 +#define SI_UTC_DFLT_06 0x22 +#define SI_UTC_DFLT_07 0x22 +#define SI_UTC_DFLT_08 0x22 +#define SI_UTC_DFLT_09 0x22 +#define SI_UTC_DFLT_10 0x22 +#define SI_UTC_DFLT_11 0x22 +#define SI_UTC_DFLT_12 0x22 +#define SI_UTC_DFLT_13 0x22 +#define SI_UTC_DFLT_14 0x22 +#define SI_DTC_DFLT_00 0x24 +#define SI_DTC_DFLT_01 0x22 +#define SI_DTC_DFLT_02 0x22 +#define SI_DTC_DFLT_03 0x22 +#define SI_DTC_DFLT_04 0x22 +#define SI_DTC_DFLT_05 0x22 +#define SI_DTC_DFLT_06 0x22 +#define SI_DTC_DFLT_07 0x22 +#define SI_DTC_DFLT_08 0x22 +#define SI_DTC_DFLT_09 0x22 +#define SI_DTC_DFLT_10 0x22 +#define SI_DTC_DFLT_11 0x22 +#define SI_DTC_DFLT_12 0x22 +#define SI_DTC_DFLT_13 0x22 +#define SI_DTC_DFLT_14 0x22 +#define SI_VRC_DFLT 0x0000C003 +#define SI_VOLTAGERESPONSETIME_DFLT 1000 +#define SI_BACKBIASRESPONSETIME_DFLT 1000 +#define SI_VRU_DFLT 0x3 +#define SI_SPLLSTEPTIME_DFLT 0x1000 +#define SI_SPLLSTEPUNIT_DFLT 0x3 +#define SI_TPU_DFLT 0 +#define SI_TPC_DFLT 0x200 +#define SI_SSTU_DFLT 0 +#define SI_SST_DFLT 0x00C8 +#define SI_GICST_DFLT 0x200 +#define SI_FCT_DFLT 0x0400 +#define SI_FCTU_DFLT 0 +#define SI_CTXCGTT3DRPHC_DFLT 0x20 +#define SI_CTXCGTT3DRSDC_DFLT 0x40 +#define SI_VDDC3DOORPHC_DFLT 0x100 +#define SI_VDDC3DOORSDC_DFLT 0x7 +#define SI_VDDC3DOORSU_DFLT 0 +#define SI_MPLLLOCKTIME_DFLT 100 +#define SI_MPLLRESETTIME_DFLT 150 +#define SI_VCOSTEPPCT_DFLT 20 +#define SI_ENDINGVCOSTEPPCT_DFLT 5 +#define SI_REFERENCEDIVIDER_DFLT 4 + +#define SI_PM_NUMBER_OF_TC 15 +#define SI_PM_NUMBER_OF_SCLKS 20 +#define SI_PM_NUMBER_OF_MCLKS 4 +#define SI_PM_NUMBER_OF_VOLTAGE_LEVELS 4 +#define SI_PM_NUMBER_OF_ACTIVITY_LEVELS 3 + +/* XXX are these ok? */ +#define SI_TEMP_RANGE_MIN (90 * 1000) +#define SI_TEMP_RANGE_MAX (120 * 1000) + +#define FDO_PWM_MODE_STATIC 1 +#define FDO_PWM_MODE_STATIC_RPM 5 + +enum ni_dc_cac_level +{ + NISLANDS_DCCAC_LEVEL_0 = 0, + NISLANDS_DCCAC_LEVEL_1, + NISLANDS_DCCAC_LEVEL_2, + NISLANDS_DCCAC_LEVEL_3, + NISLANDS_DCCAC_LEVEL_4, + NISLANDS_DCCAC_LEVEL_5, + NISLANDS_DCCAC_LEVEL_6, + NISLANDS_DCCAC_LEVEL_7, + NISLANDS_DCCAC_MAX_LEVELS +}; + +enum si_cac_config_reg_type +{ + SISLANDS_CACCONFIG_MMR = 0, + SISLANDS_CACCONFIG_CGIND, + SISLANDS_CACCONFIG_MAX +}; + +enum si_power_level { + SI_POWER_LEVEL_LOW = 0, + SI_POWER_LEVEL_MEDIUM = 1, + SI_POWER_LEVEL_HIGH = 2, + SI_POWER_LEVEL_CTXSW = 3, +}; + +enum si_td { + SI_TD_AUTO, + SI_TD_UP, + SI_TD_DOWN, +}; + +enum si_display_watermark { + SI_DISPLAY_WATERMARK_LOW = 0, + SI_DISPLAY_WATERMARK_HIGH = 1, +}; + +enum si_display_gap +{ + SI_PM_DISPLAY_GAP_VBLANK_OR_WM = 0, + SI_PM_DISPLAY_GAP_VBLANK = 1, + SI_PM_DISPLAY_GAP_WATERMARK = 2, + SI_PM_DISPLAY_GAP_IGNORE = 3, +}; + +extern const struct amdgpu_ip_block_version si_smu_ip_block; + +struct ni_leakage_coeffients +{ + u32 at; + u32 bt; + u32 av; + u32 bv; + s32 t_slope; + s32 t_intercept; + u32 t_ref; +}; + +struct SMC_Evergreen_MCRegisterAddress +{ + uint16_t s0; + uint16_t s1; +}; + +typedef struct SMC_Evergreen_MCRegisterAddress SMC_Evergreen_MCRegisterAddress; + +struct evergreen_mc_reg_entry { + u32 mclk_max; + u32 mc_data[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE]; +}; + +struct evergreen_mc_reg_table { + u8 last; + u8 num_entries; + u16 valid_flag; + struct evergreen_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES]; + SMC_Evergreen_MCRegisterAddress mc_reg_address[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE]; +}; + +struct SMC_Evergreen_MCRegisterSet +{ + uint32_t value[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE]; +}; + +typedef struct SMC_Evergreen_MCRegisterSet SMC_Evergreen_MCRegisterSet; + +struct SMC_Evergreen_MCRegisters +{ + uint8_t last; + uint8_t reserved[3]; + SMC_Evergreen_MCRegisterAddress address[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE]; + SMC_Evergreen_MCRegisterSet data[5]; +}; + +typedef struct SMC_Evergreen_MCRegisters SMC_Evergreen_MCRegisters; + +struct SMC_NIslands_MCRegisterSet +{ + uint32_t value[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE]; +}; + +typedef struct SMC_NIslands_MCRegisterSet SMC_NIslands_MCRegisterSet; + +struct ni_mc_reg_entry { + u32 mclk_max; + u32 mc_data[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE]; +}; + +struct SMC_NIslands_MCRegisterAddress +{ + uint16_t s0; + uint16_t s1; +}; + +typedef struct SMC_NIslands_MCRegisterAddress SMC_NIslands_MCRegisterAddress; + +struct SMC_NIslands_MCRegisters +{ + uint8_t last; + uint8_t reserved[3]; + SMC_NIslands_MCRegisterAddress address[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE]; + SMC_NIslands_MCRegisterSet data[SMC_NISLANDS_MC_REGISTER_ARRAY_SET_COUNT]; +}; + +typedef struct SMC_NIslands_MCRegisters SMC_NIslands_MCRegisters; + +struct evergreen_ulv_param { + bool supported; + struct rv7xx_pl *pl; +}; + +struct evergreen_arb_registers { + u32 mc_arb_dram_timing; + u32 mc_arb_dram_timing2; + u32 mc_arb_rfsh_rate; + u32 mc_arb_burst_time; +}; + +struct at { + u32 rlp; + u32 rmp; + u32 lhp; + u32 lmp; +}; + +struct ni_clock_registers { + u32 cg_spll_func_cntl; + u32 cg_spll_func_cntl_2; + u32 cg_spll_func_cntl_3; + u32 cg_spll_func_cntl_4; + u32 cg_spll_spread_spectrum; + u32 cg_spll_spread_spectrum_2; + u32 mclk_pwrmgt_cntl; + u32 dll_cntl; + u32 mpll_ad_func_cntl; + u32 mpll_ad_func_cntl_2; + u32 mpll_dq_func_cntl; + u32 mpll_dq_func_cntl_2; + u32 mpll_ss1; + u32 mpll_ss2; +}; + +struct RV770_SMC_SCLK_VALUE +{ + uint32_t vCG_SPLL_FUNC_CNTL; + uint32_t vCG_SPLL_FUNC_CNTL_2; + uint32_t vCG_SPLL_FUNC_CNTL_3; + uint32_t vCG_SPLL_SPREAD_SPECTRUM; + uint32_t vCG_SPLL_SPREAD_SPECTRUM_2; + uint32_t sclk_value; +}; + +typedef struct RV770_SMC_SCLK_VALUE RV770_SMC_SCLK_VALUE; + +struct RV770_SMC_MCLK_VALUE +{ + uint32_t vMPLL_AD_FUNC_CNTL; + uint32_t vMPLL_AD_FUNC_CNTL_2; + uint32_t vMPLL_DQ_FUNC_CNTL; + uint32_t vMPLL_DQ_FUNC_CNTL_2; + uint32_t vMCLK_PWRMGT_CNTL; + uint32_t vDLL_CNTL; + uint32_t vMPLL_SS; + uint32_t vMPLL_SS2; + uint32_t mclk_value; +}; + +typedef struct RV770_SMC_MCLK_VALUE RV770_SMC_MCLK_VALUE; + + +struct RV730_SMC_MCLK_VALUE +{ + uint32_t vMCLK_PWRMGT_CNTL; + uint32_t vDLL_CNTL; + uint32_t vMPLL_FUNC_CNTL; + uint32_t vMPLL_FUNC_CNTL2; + uint32_t vMPLL_FUNC_CNTL3; + uint32_t vMPLL_SS; + uint32_t vMPLL_SS2; + uint32_t mclk_value; +}; + +typedef struct RV730_SMC_MCLK_VALUE RV730_SMC_MCLK_VALUE; + +struct RV770_SMC_VOLTAGE_VALUE +{ + uint16_t value; + uint8_t index; + uint8_t padding; +}; + +typedef struct RV770_SMC_VOLTAGE_VALUE RV770_SMC_VOLTAGE_VALUE; + +union RV7XX_SMC_MCLK_VALUE +{ + RV770_SMC_MCLK_VALUE mclk770; + RV730_SMC_MCLK_VALUE mclk730; +}; + +typedef union RV7XX_SMC_MCLK_VALUE RV7XX_SMC_MCLK_VALUE, *LPRV7XX_SMC_MCLK_VALUE; + +struct RV770_SMC_HW_PERFORMANCE_LEVEL +{ + uint8_t arbValue; + union{ + uint8_t seqValue; + uint8_t ACIndex; + }; + uint8_t displayWatermark; + uint8_t gen2PCIE; + uint8_t gen2XSP; + uint8_t backbias; + uint8_t strobeMode; + uint8_t mcFlags; + uint32_t aT; + uint32_t bSP; + RV770_SMC_SCLK_VALUE sclk; + RV7XX_SMC_MCLK_VALUE mclk; + RV770_SMC_VOLTAGE_VALUE vddc; + RV770_SMC_VOLTAGE_VALUE mvdd; + RV770_SMC_VOLTAGE_VALUE vddci; + uint8_t reserved1; + uint8_t reserved2; + uint8_t stateFlags; + uint8_t padding; +}; + +typedef struct RV770_SMC_HW_PERFORMANCE_LEVEL RV770_SMC_HW_PERFORMANCE_LEVEL; + +struct RV770_SMC_SWSTATE +{ + uint8_t flags; + uint8_t padding1; + uint8_t padding2; + uint8_t padding3; + RV770_SMC_HW_PERFORMANCE_LEVEL levels[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE]; +}; + +typedef struct RV770_SMC_SWSTATE RV770_SMC_SWSTATE; + +struct RV770_SMC_VOLTAGEMASKTABLE +{ + uint8_t highMask[RV770_SMC_VOLTAGEMASK_MAX]; + uint32_t lowMask[RV770_SMC_VOLTAGEMASK_MAX]; +}; + +typedef struct RV770_SMC_VOLTAGEMASKTABLE RV770_SMC_VOLTAGEMASKTABLE; + +struct RV770_SMC_STATETABLE +{ + uint8_t thermalProtectType; + uint8_t systemFlags; + uint8_t maxVDDCIndexInPPTable; + uint8_t extraFlags; + uint8_t highSMIO[MAX_NO_VREG_STEPS]; + uint32_t lowSMIO[MAX_NO_VREG_STEPS]; + RV770_SMC_VOLTAGEMASKTABLE voltageMaskTable; + RV770_SMC_SWSTATE initialState; + RV770_SMC_SWSTATE ACPIState; + RV770_SMC_SWSTATE driverState; + RV770_SMC_SWSTATE ULVState; +}; + +typedef struct RV770_SMC_STATETABLE RV770_SMC_STATETABLE; + +struct vddc_table_entry { + u16 vddc; + u8 vddc_index; + u8 high_smio; + u32 low_smio; +}; + +struct rv770_clock_registers { + u32 cg_spll_func_cntl; + u32 cg_spll_func_cntl_2; + u32 cg_spll_func_cntl_3; + u32 cg_spll_spread_spectrum; + u32 cg_spll_spread_spectrum_2; + u32 mpll_ad_func_cntl; + u32 mpll_ad_func_cntl_2; + u32 mpll_dq_func_cntl; + u32 mpll_dq_func_cntl_2; + u32 mclk_pwrmgt_cntl; + u32 dll_cntl; + u32 mpll_ss1; + u32 mpll_ss2; +}; + +struct rv730_clock_registers { + u32 cg_spll_func_cntl; + u32 cg_spll_func_cntl_2; + u32 cg_spll_func_cntl_3; + u32 cg_spll_spread_spectrum; + u32 cg_spll_spread_spectrum_2; + u32 mclk_pwrmgt_cntl; + u32 dll_cntl; + u32 mpll_func_cntl; + u32 mpll_func_cntl2; + u32 mpll_func_cntl3; + u32 mpll_ss; + u32 mpll_ss2; +}; + +union r7xx_clock_registers { + struct rv770_clock_registers rv770; + struct rv730_clock_registers rv730; +}; + +struct rv7xx_power_info { + /* flags */ + bool mem_gddr5; + bool pcie_gen2; + bool dynamic_pcie_gen2; + bool acpi_pcie_gen2; + bool boot_in_gen2; + bool voltage_control; /* vddc */ + bool mvdd_control; + bool sclk_ss; + bool mclk_ss; + bool dynamic_ss; + bool gfx_clock_gating; + bool mg_clock_gating; + bool mgcgtssm; + bool power_gating; + bool thermal_protection; + bool display_gap; + bool dcodt; + bool ulps; + /* registers */ + union r7xx_clock_registers clk_regs; + u32 s0_vid_lower_smio_cntl; + /* voltage */ + u32 vddc_mask_low; + u32 mvdd_mask_low; + u32 mvdd_split_frequency; + u32 mvdd_low_smio[MAX_NO_OF_MVDD_VALUES]; + u16 max_vddc; + u16 max_vddc_in_table; + u16 min_vddc_in_table; + struct vddc_table_entry vddc_table[MAX_NO_VREG_STEPS]; + u8 valid_vddc_entries; + /* dc odt */ + u32 mclk_odt_threshold; + u8 odt_value_0[2]; + u8 odt_value_1[2]; + /* stored values */ + u32 boot_sclk; + u16 acpi_vddc; + u32 ref_div; + u32 active_auto_throttle_sources; + u32 mclk_stutter_mode_threshold; + u32 mclk_strobe_mode_threshold; + u32 mclk_edc_enable_threshold; + u32 bsp; + u32 bsu; + u32 pbsp; + u32 pbsu; + u32 dsp; + u32 psp; + u32 asi; + u32 pasi; + u32 vrc; + u32 restricted_levels; + u32 rlp; + u32 rmp; + u32 lhp; + u32 lmp; + /* smc offsets */ + u16 state_table_start; + u16 soft_regs_start; + u16 sram_end; + /* scratch structs */ + RV770_SMC_STATETABLE smc_statetable; +}; + +enum si_pcie_gen { + SI_PCIE_GEN1 = 0, + SI_PCIE_GEN2 = 1, + SI_PCIE_GEN3 = 2, + SI_PCIE_GEN_INVALID = 0xffff +}; + +struct rv7xx_pl { + u32 sclk; + u32 mclk; + u16 vddc; + u16 vddci; /* eg+ only */ + u32 flags; + enum si_pcie_gen pcie_gen; /* si+ only */ +}; + +struct rv7xx_ps { + struct rv7xx_pl high; + struct rv7xx_pl medium; + struct rv7xx_pl low; + bool dc_compatible; +}; + +struct si_ps { + u16 performance_level_count; + bool dc_compatible; + struct rv7xx_pl performance_levels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE]; +}; + +struct ni_mc_reg_table { + u8 last; + u8 num_entries; + u16 valid_flag; + struct ni_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES]; + SMC_NIslands_MCRegisterAddress mc_reg_address[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE]; +}; + +struct ni_cac_data +{ + struct ni_leakage_coeffients leakage_coefficients; + u32 i_leakage; + s32 leakage_minimum_temperature; + u32 pwr_const; + u32 dc_cac_value; + u32 bif_cac_value; + u32 lkge_pwr; + u8 mc_wr_weight; + u8 mc_rd_weight; + u8 allow_ovrflw; + u8 num_win_tdp; + u8 l2num_win_tdp; + u8 lts_truncate_n; +}; + +struct evergreen_power_info { + /* must be first! */ + struct rv7xx_power_info rv7xx; + /* flags */ + bool vddci_control; + bool dynamic_ac_timing; + bool abm; + bool mcls; + bool light_sleep; + bool memory_transition; + bool pcie_performance_request; + bool pcie_performance_request_registered; + bool sclk_deep_sleep; + bool dll_default_on; + bool ls_clock_gating; + bool smu_uvd_hs; + bool uvd_enabled; + /* stored values */ + u16 acpi_vddci; + u8 mvdd_high_index; + u8 mvdd_low_index; + u32 mclk_edc_wr_enable_threshold; + struct evergreen_mc_reg_table mc_reg_table; + struct atom_voltage_table vddc_voltage_table; + struct atom_voltage_table vddci_voltage_table; + struct evergreen_arb_registers bootup_arb_registers; + struct evergreen_ulv_param ulv; + struct at ats[2]; + /* smc offsets */ + u16 mc_reg_table_start; + struct amdgpu_ps current_rps; + struct rv7xx_ps current_ps; + struct amdgpu_ps requested_rps; + struct rv7xx_ps requested_ps; +}; + +struct PP_NIslands_Dpm2PerfLevel +{ + uint8_t MaxPS; + uint8_t TgtAct; + uint8_t MaxPS_StepInc; + uint8_t MaxPS_StepDec; + uint8_t PSST; + uint8_t NearTDPDec; + uint8_t AboveSafeInc; + uint8_t BelowSafeInc; + uint8_t PSDeltaLimit; + uint8_t PSDeltaWin; + uint8_t Reserved[6]; +}; + +typedef struct PP_NIslands_Dpm2PerfLevel PP_NIslands_Dpm2PerfLevel; + +struct PP_NIslands_DPM2Parameters +{ + uint32_t TDPLimit; + uint32_t NearTDPLimit; + uint32_t SafePowerLimit; + uint32_t PowerBoostLimit; +}; +typedef struct PP_NIslands_DPM2Parameters PP_NIslands_DPM2Parameters; + +struct NISLANDS_SMC_SCLK_VALUE +{ + uint32_t vCG_SPLL_FUNC_CNTL; + uint32_t vCG_SPLL_FUNC_CNTL_2; + uint32_t vCG_SPLL_FUNC_CNTL_3; + uint32_t vCG_SPLL_FUNC_CNTL_4; + uint32_t vCG_SPLL_SPREAD_SPECTRUM; + uint32_t vCG_SPLL_SPREAD_SPECTRUM_2; + uint32_t sclk_value; +}; + +typedef struct NISLANDS_SMC_SCLK_VALUE NISLANDS_SMC_SCLK_VALUE; + +struct NISLANDS_SMC_MCLK_VALUE +{ + uint32_t vMPLL_FUNC_CNTL; + uint32_t vMPLL_FUNC_CNTL_1; + uint32_t vMPLL_FUNC_CNTL_2; + uint32_t vMPLL_AD_FUNC_CNTL; + uint32_t vMPLL_AD_FUNC_CNTL_2; + uint32_t vMPLL_DQ_FUNC_CNTL; + uint32_t vMPLL_DQ_FUNC_CNTL_2; + uint32_t vMCLK_PWRMGT_CNTL; + uint32_t vDLL_CNTL; + uint32_t vMPLL_SS; + uint32_t vMPLL_SS2; + uint32_t mclk_value; +}; + +typedef struct NISLANDS_SMC_MCLK_VALUE NISLANDS_SMC_MCLK_VALUE; + +struct NISLANDS_SMC_VOLTAGE_VALUE +{ + uint16_t value; + uint8_t index; + uint8_t padding; +}; + +typedef struct NISLANDS_SMC_VOLTAGE_VALUE NISLANDS_SMC_VOLTAGE_VALUE; + +struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL +{ + uint8_t arbValue; + uint8_t ACIndex; + uint8_t displayWatermark; + uint8_t gen2PCIE; + uint8_t reserved1; + uint8_t reserved2; + uint8_t strobeMode; + uint8_t mcFlags; + uint32_t aT; + uint32_t bSP; + NISLANDS_SMC_SCLK_VALUE sclk; + NISLANDS_SMC_MCLK_VALUE mclk; + NISLANDS_SMC_VOLTAGE_VALUE vddc; + NISLANDS_SMC_VOLTAGE_VALUE mvdd; + NISLANDS_SMC_VOLTAGE_VALUE vddci; + NISLANDS_SMC_VOLTAGE_VALUE std_vddc; + uint32_t powergate_en; + uint8_t hUp; + uint8_t hDown; + uint8_t stateFlags; + uint8_t arbRefreshState; + uint32_t SQPowerThrottle; + uint32_t SQPowerThrottle_2; + uint32_t reserved[2]; + PP_NIslands_Dpm2PerfLevel dpm2; +}; + +typedef struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL NISLANDS_SMC_HW_PERFORMANCE_LEVEL; + +struct NISLANDS_SMC_SWSTATE +{ + uint8_t flags; + uint8_t levelCount; + uint8_t padding2; + uint8_t padding3; + NISLANDS_SMC_HW_PERFORMANCE_LEVEL levels[]; +}; + +typedef struct NISLANDS_SMC_SWSTATE NISLANDS_SMC_SWSTATE; + +struct NISLANDS_SMC_VOLTAGEMASKTABLE +{ + uint8_t highMask[NISLANDS_SMC_VOLTAGEMASK_MAX]; + uint32_t lowMask[NISLANDS_SMC_VOLTAGEMASK_MAX]; +}; + +typedef struct NISLANDS_SMC_VOLTAGEMASKTABLE NISLANDS_SMC_VOLTAGEMASKTABLE; + +#define NISLANDS_MAX_NO_VREG_STEPS 32 + +struct NISLANDS_SMC_STATETABLE +{ + uint8_t thermalProtectType; + uint8_t systemFlags; + uint8_t maxVDDCIndexInPPTable; + uint8_t extraFlags; + uint8_t highSMIO[NISLANDS_MAX_NO_VREG_STEPS]; + uint32_t lowSMIO[NISLANDS_MAX_NO_VREG_STEPS]; + NISLANDS_SMC_VOLTAGEMASKTABLE voltageMaskTable; + PP_NIslands_DPM2Parameters dpm2Params; + NISLANDS_SMC_SWSTATE initialState; + NISLANDS_SMC_SWSTATE ACPIState; + NISLANDS_SMC_SWSTATE ULVState; + NISLANDS_SMC_SWSTATE driverState; + NISLANDS_SMC_HW_PERFORMANCE_LEVEL dpmLevels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1]; +}; + +typedef struct NISLANDS_SMC_STATETABLE NISLANDS_SMC_STATETABLE; + +struct ni_power_info { + /* must be first! */ + struct evergreen_power_info eg; + struct ni_clock_registers clock_registers; + struct ni_mc_reg_table mc_reg_table; + u32 mclk_rtt_mode_threshold; + /* flags */ + bool use_power_boost_limit; + bool support_cac_long_term_average; + bool cac_enabled; + bool cac_configuration_required; + bool driver_calculate_cac_leakage; + bool pc_enabled; + bool enable_power_containment; + bool enable_cac; + bool enable_sq_ramping; + /* smc offsets */ + u16 arb_table_start; + u16 fan_table_start; + u16 cac_table_start; + u16 spll_table_start; + /* CAC stuff */ + struct ni_cac_data cac_data; + u32 dc_cac_table[NISLANDS_DCCAC_MAX_LEVELS]; + const struct ni_cac_weights *cac_weights; + u8 lta_window_size; + u8 lts_truncate; + struct si_ps current_ps; + struct si_ps requested_ps; + /* scratch structs */ + SMC_NIslands_MCRegisters smc_mc_reg_table; + NISLANDS_SMC_STATETABLE smc_statetable; +}; + +struct si_cac_config_reg +{ + u32 offset; + u32 mask; + u32 shift; + u32 value; + enum si_cac_config_reg_type type; +}; + +struct si_powertune_data +{ + u32 cac_window; + u32 l2_lta_window_size_default; + u8 lts_truncate_default; + u8 shift_n_default; + u8 operating_temp; + struct ni_leakage_coeffients leakage_coefficients; + u32 fixed_kt; + u32 lkge_lut_v0_percent; + u8 dc_cac[NISLANDS_DCCAC_MAX_LEVELS]; + bool enable_powertune_by_default; +}; + +struct si_dyn_powertune_data +{ + u32 cac_leakage; + s32 leakage_minimum_temperature; + u32 wintime; + u32 l2_lta_window_size; + u8 lts_truncate; + u8 shift_n; + u8 dc_pwr_value; + bool disable_uvd_powertune; +}; + +struct si_dte_data +{ + u32 tau[SMC_SISLANDS_DTE_MAX_FILTER_STAGES]; + u32 r[SMC_SISLANDS_DTE_MAX_FILTER_STAGES]; + u32 k; + u32 t0; + u32 max_t; + u8 window_size; + u8 temp_select; + u8 dte_mode; + u8 tdep_count; + u8 t_limits[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE]; + u32 tdep_tau[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE]; + u32 tdep_r[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE]; + u32 t_threshold; + bool enable_dte_by_default; +}; + +struct si_clock_registers { + u32 cg_spll_func_cntl; + u32 cg_spll_func_cntl_2; + u32 cg_spll_func_cntl_3; + u32 cg_spll_func_cntl_4; + u32 cg_spll_spread_spectrum; + u32 cg_spll_spread_spectrum_2; + u32 dll_cntl; + u32 mclk_pwrmgt_cntl; + u32 mpll_ad_func_cntl; + u32 mpll_dq_func_cntl; + u32 mpll_func_cntl; + u32 mpll_func_cntl_1; + u32 mpll_func_cntl_2; + u32 mpll_ss1; + u32 mpll_ss2; +}; + +struct si_mc_reg_entry { + u32 mclk_max; + u32 mc_data[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE]; +}; + +struct si_mc_reg_table { + u8 last; + u8 num_entries; + u16 valid_flag; + struct si_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES]; + SMC_NIslands_MCRegisterAddress mc_reg_address[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE]; +}; + +struct si_leakage_voltage_entry +{ + u16 voltage; + u16 leakage_index; +}; + +struct si_leakage_voltage +{ + u16 count; + struct si_leakage_voltage_entry entries[SISLANDS_MAX_LEAKAGE_COUNT]; +}; + + +struct si_ulv_param { + bool supported; + u32 cg_ulv_control; + u32 cg_ulv_parameter; + u32 volt_change_delay; + struct rv7xx_pl pl; + bool one_pcie_lane_in_ulv; +}; + +struct si_power_info { + /* must be first! */ + struct ni_power_info ni; + struct si_clock_registers clock_registers; + struct si_mc_reg_table mc_reg_table; + struct atom_voltage_table mvdd_voltage_table; + struct atom_voltage_table vddc_phase_shed_table; + struct si_leakage_voltage leakage_voltage; + u16 mvdd_bootup_value; + struct si_ulv_param ulv; + u32 max_cu; + /* pcie gen */ + enum si_pcie_gen force_pcie_gen; + enum si_pcie_gen boot_pcie_gen; + enum si_pcie_gen acpi_pcie_gen; + u32 sys_pcie_mask; + /* flags */ + bool enable_dte; + bool enable_ppm; + bool vddc_phase_shed_control; + bool pspp_notify_required; + bool sclk_deep_sleep_above_low; + bool voltage_control_svi2; + bool vddci_control_svi2; + /* smc offsets */ + u32 sram_end; + u32 state_table_start; + u32 soft_regs_start; + u32 mc_reg_table_start; + u32 arb_table_start; + u32 cac_table_start; + u32 dte_table_start; + u32 spll_table_start; + u32 papm_cfg_table_start; + u32 fan_table_start; + /* CAC stuff */ + const struct si_cac_config_reg *cac_weights; + const struct si_cac_config_reg *lcac_config; + const struct si_cac_config_reg *cac_override; + const struct si_powertune_data *powertune_data; + struct si_dyn_powertune_data dyn_powertune_data; + /* DTE stuff */ + struct si_dte_data dte_data; + /* scratch structs */ + SMC_SIslands_MCRegisters smc_mc_reg_table; + SISLANDS_SMC_STATETABLE smc_statetable; + PP_SIslands_PAPMParameters papm_parm; + /* SVI2 */ + u8 svd_gpio_id; + u8 svc_gpio_id; + /* fan control */ + bool fan_ctrl_is_in_default_mode; + u32 t_min; + u32 fan_ctrl_default_mode; + bool fan_is_controlled_by_smc; +}; + +#endif diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c new file mode 100644 index 000000000000..8f994ffa9cd1 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c @@ -0,0 +1,273 @@ +/* + * Copyright 2011 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Alex Deucher + */ + +#include + +#include "amdgpu.h" +#include "sid.h" +#include "ppsmc.h" +#include "amdgpu_ucode.h" +#include "sislands_smc.h" + +static int si_set_smc_sram_address(struct amdgpu_device *adev, + u32 smc_address, u32 limit) +{ + if (smc_address & 3) + return -EINVAL; + if ((smc_address + 3) > limit) + return -EINVAL; + + WREG32(SMC_IND_INDEX_0, smc_address); + WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0); + + return 0; +} + +int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev, + u32 smc_start_address, + const u8 *src, u32 byte_count, u32 limit) +{ + unsigned long flags; + int ret = 0; + u32 data, original_data, addr, extra_shift; + + if (smc_start_address & 3) + return -EINVAL; + if ((smc_start_address + byte_count) > limit) + return -EINVAL; + + addr = smc_start_address; + + spin_lock_irqsave(&adev->smc_idx_lock, flags); + while (byte_count >= 4) { + /* SMC address space is BE */ + data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; + + ret = si_set_smc_sram_address(adev, addr, limit); + if (ret) + goto done; + + WREG32(SMC_IND_DATA_0, data); + + src += 4; + byte_count -= 4; + addr += 4; + } + + /* RMW for the final bytes */ + if (byte_count > 0) { + data = 0; + + ret = si_set_smc_sram_address(adev, addr, limit); + if (ret) + goto done; + + original_data = RREG32(SMC_IND_DATA_0); + extra_shift = 8 * (4 - byte_count); + + while (byte_count > 0) { + /* SMC address space is BE */ + data = (data << 8) + *src++; + byte_count--; + } + + data <<= extra_shift; + data |= (original_data & ~((~0UL) << extra_shift)); + + ret = si_set_smc_sram_address(adev, addr, limit); + if (ret) + goto done; + + WREG32(SMC_IND_DATA_0, data); + } + +done: + spin_unlock_irqrestore(&adev->smc_idx_lock, flags); + + return ret; +} + +void amdgpu_si_start_smc(struct amdgpu_device *adev) +{ + u32 tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL); + + tmp &= ~RST_REG; + + WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp); +} + +void amdgpu_si_reset_smc(struct amdgpu_device *adev) +{ + u32 tmp; + + RREG32(CB_CGTT_SCLK_CTRL); + RREG32(CB_CGTT_SCLK_CTRL); + RREG32(CB_CGTT_SCLK_CTRL); + RREG32(CB_CGTT_SCLK_CTRL); + + tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL) | + RST_REG; + WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp); +} + +int amdgpu_si_program_jump_on_start(struct amdgpu_device *adev) +{ + static const u8 data[] = { 0x0E, 0x00, 0x40, 0x40 }; + + return amdgpu_si_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1); +} + +void amdgpu_si_smc_clock(struct amdgpu_device *adev, bool enable) +{ + u32 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0); + + if (enable) + tmp &= ~CK_DISABLE; + else + tmp |= CK_DISABLE; + + WREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0, tmp); +} + +bool amdgpu_si_is_smc_running(struct amdgpu_device *adev) +{ + u32 rst = RREG32_SMC(SMC_SYSCON_RESET_CNTL); + u32 clk = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0); + + if (!(rst & RST_REG) && !(clk & CK_DISABLE)) + return true; + + return false; +} + +PPSMC_Result amdgpu_si_send_msg_to_smc(struct amdgpu_device *adev, + PPSMC_Msg msg) +{ + u32 tmp; + int i; + + if (!amdgpu_si_is_smc_running(adev)) + return PPSMC_Result_Failed; + + WREG32(SMC_MESSAGE_0, msg); + + for (i = 0; i < adev->usec_timeout; i++) { + tmp = RREG32(SMC_RESP_0); + if (tmp != 0) + break; + udelay(1); + } + + return (PPSMC_Result)RREG32(SMC_RESP_0); +} + +PPSMC_Result amdgpu_si_wait_for_smc_inactive(struct amdgpu_device *adev) +{ + u32 tmp; + int i; + + if (!amdgpu_si_is_smc_running(adev)) + return PPSMC_Result_OK; + + for (i = 0; i < adev->usec_timeout; i++) { + tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0); + if ((tmp & CKEN) == 0) + break; + udelay(1); + } + + return PPSMC_Result_OK; +} + +int amdgpu_si_load_smc_ucode(struct amdgpu_device *adev, u32 limit) +{ + const struct smc_firmware_header_v1_0 *hdr; + unsigned long flags; + u32 ucode_start_address; + u32 ucode_size; + const u8 *src; + u32 data; + + if (!adev->pm.fw) + return -EINVAL; + + hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data; + + amdgpu_ucode_print_smc_hdr(&hdr->header); + + adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version); + ucode_start_address = le32_to_cpu(hdr->ucode_start_addr); + ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes); + src = (const u8 *) + (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); + if (ucode_size & 3) + return -EINVAL; + + spin_lock_irqsave(&adev->smc_idx_lock, flags); + WREG32(SMC_IND_INDEX_0, ucode_start_address); + WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0); + while (ucode_size >= 4) { + /* SMC address space is BE */ + data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; + + WREG32(SMC_IND_DATA_0, data); + + src += 4; + ucode_size -= 4; + } + WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0); + spin_unlock_irqrestore(&adev->smc_idx_lock, flags); + + return 0; +} + +int amdgpu_si_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address, + u32 *value, u32 limit) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&adev->smc_idx_lock, flags); + ret = si_set_smc_sram_address(adev, smc_address, limit); + if (ret == 0) + *value = RREG32(SMC_IND_DATA_0); + spin_unlock_irqrestore(&adev->smc_idx_lock, flags); + + return ret; +} + +int amdgpu_si_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address, + u32 value, u32 limit) +{ + unsigned long flags; + int ret; + + spin_lock_irqsave(&adev->smc_idx_lock, flags); + ret = si_set_smc_sram_address(adev, smc_address, limit); + if (ret == 0) + WREG32(SMC_IND_DATA_0, value); + spin_unlock_irqrestore(&adev->smc_idx_lock, flags); + + return ret; +} diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/sislands_smc.h b/drivers/gpu/drm/amd/pm/legacy-dpm/sislands_smc.h new file mode 100644 index 000000000000..c7dc117a688c --- /dev/null +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/sislands_smc.h @@ -0,0 +1,431 @@ +/* + * Copyright 2013 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef PP_SISLANDS_SMC_H +#define PP_SISLANDS_SMC_H + +#include "ppsmc.h" + +#pragma pack(push, 1) + +#define SISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16 + +struct PP_SIslands_Dpm2PerfLevel +{ + uint8_t MaxPS; + uint8_t TgtAct; + uint8_t MaxPS_StepInc; + uint8_t MaxPS_StepDec; + uint8_t PSSamplingTime; + uint8_t NearTDPDec; + uint8_t AboveSafeInc; + uint8_t BelowSafeInc; + uint8_t PSDeltaLimit; + uint8_t PSDeltaWin; + uint16_t PwrEfficiencyRatio; + uint8_t Reserved[4]; +}; + +typedef struct PP_SIslands_Dpm2PerfLevel PP_SIslands_Dpm2PerfLevel; + +struct PP_SIslands_DPM2Status +{ + uint32_t dpm2Flags; + uint8_t CurrPSkip; + uint8_t CurrPSkipPowerShift; + uint8_t CurrPSkipTDP; + uint8_t CurrPSkipOCP; + uint8_t MaxSPLLIndex; + uint8_t MinSPLLIndex; + uint8_t CurrSPLLIndex; + uint8_t InfSweepMode; + uint8_t InfSweepDir; + uint8_t TDPexceeded; + uint8_t reserved; + uint8_t SwitchDownThreshold; + uint32_t SwitchDownCounter; + uint32_t SysScalingFactor; +}; + +typedef struct PP_SIslands_DPM2Status PP_SIslands_DPM2Status; + +struct PP_SIslands_DPM2Parameters +{ + uint32_t TDPLimit; + uint32_t NearTDPLimit; + uint32_t SafePowerLimit; + uint32_t PowerBoostLimit; + uint32_t MinLimitDelta; +}; +typedef struct PP_SIslands_DPM2Parameters PP_SIslands_DPM2Parameters; + +struct PP_SIslands_PAPMStatus +{ + uint32_t EstimatedDGPU_T; + uint32_t EstimatedDGPU_P; + uint32_t EstimatedAPU_T; + uint32_t EstimatedAPU_P; + uint8_t dGPU_T_Limit_Exceeded; + uint8_t reserved[3]; +}; +typedef struct PP_SIslands_PAPMStatus PP_SIslands_PAPMStatus; + +struct PP_SIslands_PAPMParameters +{ + uint32_t NearTDPLimitTherm; + uint32_t NearTDPLimitPAPM; + uint32_t PlatformPowerLimit; + uint32_t dGPU_T_Limit; + uint32_t dGPU_T_Warning; + uint32_t dGPU_T_Hysteresis; +}; +typedef struct PP_SIslands_PAPMParameters PP_SIslands_PAPMParameters; + +struct SISLANDS_SMC_SCLK_VALUE +{ + uint32_t vCG_SPLL_FUNC_CNTL; + uint32_t vCG_SPLL_FUNC_CNTL_2; + uint32_t vCG_SPLL_FUNC_CNTL_3; + uint32_t vCG_SPLL_FUNC_CNTL_4; + uint32_t vCG_SPLL_SPREAD_SPECTRUM; + uint32_t vCG_SPLL_SPREAD_SPECTRUM_2; + uint32_t sclk_value; +}; + +typedef struct SISLANDS_SMC_SCLK_VALUE SISLANDS_SMC_SCLK_VALUE; + +struct SISLANDS_SMC_MCLK_VALUE +{ + uint32_t vMPLL_FUNC_CNTL; + uint32_t vMPLL_FUNC_CNTL_1; + uint32_t vMPLL_FUNC_CNTL_2; + uint32_t vMPLL_AD_FUNC_CNTL; + uint32_t vMPLL_DQ_FUNC_CNTL; + uint32_t vMCLK_PWRMGT_CNTL; + uint32_t vDLL_CNTL; + uint32_t vMPLL_SS; + uint32_t vMPLL_SS2; + uint32_t mclk_value; +}; + +typedef struct SISLANDS_SMC_MCLK_VALUE SISLANDS_SMC_MCLK_VALUE; + +struct SISLANDS_SMC_VOLTAGE_VALUE +{ + uint16_t value; + uint8_t index; + uint8_t phase_settings; +}; + +typedef struct SISLANDS_SMC_VOLTAGE_VALUE SISLANDS_SMC_VOLTAGE_VALUE; + +struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL +{ + uint8_t ACIndex; + uint8_t displayWatermark; + uint8_t gen2PCIE; + uint8_t UVDWatermark; + uint8_t VCEWatermark; + uint8_t strobeMode; + uint8_t mcFlags; + uint8_t padding; + uint32_t aT; + uint32_t bSP; + SISLANDS_SMC_SCLK_VALUE sclk; + SISLANDS_SMC_MCLK_VALUE mclk; + SISLANDS_SMC_VOLTAGE_VALUE vddc; + SISLANDS_SMC_VOLTAGE_VALUE mvdd; + SISLANDS_SMC_VOLTAGE_VALUE vddci; + SISLANDS_SMC_VOLTAGE_VALUE std_vddc; + uint8_t hysteresisUp; + uint8_t hysteresisDown; + uint8_t stateFlags; + uint8_t arbRefreshState; + uint32_t SQPowerThrottle; + uint32_t SQPowerThrottle_2; + uint32_t MaxPoweredUpCU; + SISLANDS_SMC_VOLTAGE_VALUE high_temp_vddc; + SISLANDS_SMC_VOLTAGE_VALUE low_temp_vddc; + uint32_t reserved[2]; + PP_SIslands_Dpm2PerfLevel dpm2; +}; + +#define SISLANDS_SMC_STROBE_RATIO 0x0F +#define SISLANDS_SMC_STROBE_ENABLE 0x10 + +#define SISLANDS_SMC_MC_EDC_RD_FLAG 0x01 +#define SISLANDS_SMC_MC_EDC_WR_FLAG 0x02 +#define SISLANDS_SMC_MC_RTT_ENABLE 0x04 +#define SISLANDS_SMC_MC_STUTTER_EN 0x08 +#define SISLANDS_SMC_MC_PG_EN 0x10 + +typedef struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL SISLANDS_SMC_HW_PERFORMANCE_LEVEL; + +struct SISLANDS_SMC_SWSTATE +{ + uint8_t flags; + uint8_t levelCount; + uint8_t padding2; + uint8_t padding3; + SISLANDS_SMC_HW_PERFORMANCE_LEVEL levels[]; +}; + +typedef struct SISLANDS_SMC_SWSTATE SISLANDS_SMC_SWSTATE; + +struct SISLANDS_SMC_SWSTATE_SINGLE { + uint8_t flags; + uint8_t levelCount; + uint8_t padding2; + uint8_t padding3; + SISLANDS_SMC_HW_PERFORMANCE_LEVEL level; +}; + +#define SISLANDS_SMC_VOLTAGEMASK_VDDC 0 +#define SISLANDS_SMC_VOLTAGEMASK_MVDD 1 +#define SISLANDS_SMC_VOLTAGEMASK_VDDCI 2 +#define SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING 3 +#define SISLANDS_SMC_VOLTAGEMASK_MAX 4 + +struct SISLANDS_SMC_VOLTAGEMASKTABLE +{ + uint32_t lowMask[SISLANDS_SMC_VOLTAGEMASK_MAX]; +}; + +typedef struct SISLANDS_SMC_VOLTAGEMASKTABLE SISLANDS_SMC_VOLTAGEMASKTABLE; + +#define SISLANDS_MAX_NO_VREG_STEPS 32 + +struct SISLANDS_SMC_STATETABLE +{ + uint8_t thermalProtectType; + uint8_t systemFlags; + uint8_t maxVDDCIndexInPPTable; + uint8_t extraFlags; + uint32_t lowSMIO[SISLANDS_MAX_NO_VREG_STEPS]; + SISLANDS_SMC_VOLTAGEMASKTABLE voltageMaskTable; + SISLANDS_SMC_VOLTAGEMASKTABLE phaseMaskTable; + PP_SIslands_DPM2Parameters dpm2Params; + struct SISLANDS_SMC_SWSTATE_SINGLE initialState; + struct SISLANDS_SMC_SWSTATE_SINGLE ACPIState; + struct SISLANDS_SMC_SWSTATE_SINGLE ULVState; + SISLANDS_SMC_SWSTATE driverState; + SISLANDS_SMC_HW_PERFORMANCE_LEVEL dpmLevels[SISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE]; +}; + +typedef struct SISLANDS_SMC_STATETABLE SISLANDS_SMC_STATETABLE; + +#define SI_SMC_SOFT_REGISTER_mclk_chg_timeout 0x0 +#define SI_SMC_SOFT_REGISTER_delay_vreg 0xC +#define SI_SMC_SOFT_REGISTER_delay_acpi 0x28 +#define SI_SMC_SOFT_REGISTER_seq_index 0x5C +#define SI_SMC_SOFT_REGISTER_mvdd_chg_time 0x60 +#define SI_SMC_SOFT_REGISTER_mclk_switch_lim 0x70 +#define SI_SMC_SOFT_REGISTER_watermark_threshold 0x78 +#define SI_SMC_SOFT_REGISTER_phase_shedding_delay 0x88 +#define SI_SMC_SOFT_REGISTER_ulv_volt_change_delay 0x8C +#define SI_SMC_SOFT_REGISTER_mc_block_delay 0x98 +#define SI_SMC_SOFT_REGISTER_ticks_per_us 0xA8 +#define SI_SMC_SOFT_REGISTER_crtc_index 0xC4 +#define SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min 0xC8 +#define SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max 0xCC +#define SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width 0xF4 +#define SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen 0xFC +#define SI_SMC_SOFT_REGISTER_vr_hot_gpio 0x100 +#define SI_SMC_SOFT_REGISTER_svi_rework_plat_type 0x118 +#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd 0x11c +#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc 0x120 + +struct PP_SIslands_FanTable +{ + uint8_t fdo_mode; + uint8_t padding; + int16_t temp_min; + int16_t temp_med; + int16_t temp_max; + int16_t slope1; + int16_t slope2; + int16_t fdo_min; + int16_t hys_up; + int16_t hys_down; + int16_t hys_slope; + int16_t temp_resp_lim; + int16_t temp_curr; + int16_t slope_curr; + int16_t pwm_curr; + uint32_t refresh_period; + int16_t fdo_max; + uint8_t temp_src; + int8_t padding2; +}; + +typedef struct PP_SIslands_FanTable PP_SIslands_FanTable; + +#define SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16 +#define SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES 32 + +#define SMC_SISLANDS_SCALE_I 7 +#define SMC_SISLANDS_SCALE_R 12 + +struct PP_SIslands_CacConfig +{ + uint16_t cac_lkge_lut[SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES]; + uint32_t lkge_lut_V0; + uint32_t lkge_lut_Vstep; + uint32_t WinTime; + uint32_t R_LL; + uint32_t calculation_repeats; + uint32_t l2numWin_TDP; + uint32_t dc_cac; + uint8_t lts_truncate_n; + uint8_t SHIFT_N; + uint8_t log2_PG_LKG_SCALE; + uint8_t cac_temp; + uint32_t lkge_lut_T0; + uint32_t lkge_lut_Tstep; +}; + +typedef struct PP_SIslands_CacConfig PP_SIslands_CacConfig; + +#define SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE 16 +#define SMC_SISLANDS_MC_REGISTER_ARRAY_SET_COUNT 20 + +struct SMC_SIslands_MCRegisterAddress +{ + uint16_t s0; + uint16_t s1; +}; + +typedef struct SMC_SIslands_MCRegisterAddress SMC_SIslands_MCRegisterAddress; + +struct SMC_SIslands_MCRegisterSet +{ + uint32_t value[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE]; +}; + +typedef struct SMC_SIslands_MCRegisterSet SMC_SIslands_MCRegisterSet; + +struct SMC_SIslands_MCRegisters +{ + uint8_t last; + uint8_t reserved[3]; + SMC_SIslands_MCRegisterAddress address[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE]; + SMC_SIslands_MCRegisterSet data[SMC_SISLANDS_MC_REGISTER_ARRAY_SET_COUNT]; +}; + +typedef struct SMC_SIslands_MCRegisters SMC_SIslands_MCRegisters; + +struct SMC_SIslands_MCArbDramTimingRegisterSet +{ + uint32_t mc_arb_dram_timing; + uint32_t mc_arb_dram_timing2; + uint8_t mc_arb_rfsh_rate; + uint8_t mc_arb_burst_time; + uint8_t padding[2]; +}; + +typedef struct SMC_SIslands_MCArbDramTimingRegisterSet SMC_SIslands_MCArbDramTimingRegisterSet; + +struct SMC_SIslands_MCArbDramTimingRegisters +{ + uint8_t arb_current; + uint8_t reserved[3]; + SMC_SIslands_MCArbDramTimingRegisterSet data[16]; +}; + +typedef struct SMC_SIslands_MCArbDramTimingRegisters SMC_SIslands_MCArbDramTimingRegisters; + +struct SMC_SISLANDS_SPLL_DIV_TABLE +{ + uint32_t freq[256]; + uint32_t ss[256]; +}; + +#define SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK 0x01ffffff +#define SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT 0 +#define SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK 0xfe000000 +#define SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT 25 +#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK 0x000fffff +#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT 0 +#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK 0xfff00000 +#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT 20 + +typedef struct SMC_SISLANDS_SPLL_DIV_TABLE SMC_SISLANDS_SPLL_DIV_TABLE; + +#define SMC_SISLANDS_DTE_MAX_FILTER_STAGES 5 + +#define SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE 16 + +struct Smc_SIslands_DTE_Configuration +{ + uint32_t tau[SMC_SISLANDS_DTE_MAX_FILTER_STAGES]; + uint32_t R[SMC_SISLANDS_DTE_MAX_FILTER_STAGES]; + uint32_t K; + uint32_t T0; + uint32_t MaxT; + uint8_t WindowSize; + uint8_t Tdep_count; + uint8_t temp_select; + uint8_t DTE_mode; + uint8_t T_limits[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE]; + uint32_t Tdep_tau[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE]; + uint32_t Tdep_R[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE]; + uint32_t Tthreshold; +}; + +typedef struct Smc_SIslands_DTE_Configuration Smc_SIslands_DTE_Configuration; + +#define SMC_SISLANDS_DTE_STATUS_FLAG_DTE_ON 1 + +#define SISLANDS_SMC_FIRMWARE_HEADER_LOCATION 0x10000 + +#define SISLANDS_SMC_FIRMWARE_HEADER_version 0x0 +#define SISLANDS_SMC_FIRMWARE_HEADER_flags 0x4 +#define SISLANDS_SMC_FIRMWARE_HEADER_softRegisters 0xC +#define SISLANDS_SMC_FIRMWARE_HEADER_stateTable 0x10 +#define SISLANDS_SMC_FIRMWARE_HEADER_fanTable 0x14 +#define SISLANDS_SMC_FIRMWARE_HEADER_CacConfigTable 0x18 +#define SISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable 0x24 +#define SISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable 0x30 +#define SISLANDS_SMC_FIRMWARE_HEADER_spllTable 0x38 +#define SISLANDS_SMC_FIRMWARE_HEADER_DteConfiguration 0x40 +#define SISLANDS_SMC_FIRMWARE_HEADER_PAPMParameters 0x48 + +#pragma pack(pop) + +int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev, + u32 smc_start_address, + const u8 *src, u32 byte_count, u32 limit); +void amdgpu_si_start_smc(struct amdgpu_device *adev); +void amdgpu_si_reset_smc(struct amdgpu_device *adev); +int amdgpu_si_program_jump_on_start(struct amdgpu_device *adev); +void amdgpu_si_smc_clock(struct amdgpu_device *adev, bool enable); +bool amdgpu_si_is_smc_running(struct amdgpu_device *adev); +PPSMC_Result amdgpu_si_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg); +PPSMC_Result amdgpu_si_wait_for_smc_inactive(struct amdgpu_device *adev); +int amdgpu_si_load_smc_ucode(struct amdgpu_device *adev, u32 limit); +int amdgpu_si_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address, + u32 *value, u32 limit); +int amdgpu_si_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address, + u32 value, u32 limit); + +#endif + diff --git a/drivers/gpu/drm/amd/pm/powerplay/Makefile b/drivers/gpu/drm/amd/pm/powerplay/Makefile index 614d8b6a58ad..795a3624cbbf 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/Makefile +++ b/drivers/gpu/drm/amd/pm/powerplay/Makefile @@ -28,11 +28,7 @@ AMD_POWERPLAY = $(addsuffix /Makefile,$(addprefix $(FULL_AMD_PATH)/pm/powerplay/ include $(AMD_POWERPLAY) -POWER_MGR-y = amd_powerplay.o legacy_dpm.o - -POWER_MGR-$(CONFIG_DRM_AMDGPU_CIK)+= kv_dpm.o kv_smc.o - -POWER_MGR-$(CONFIG_DRM_AMDGPU_SI)+= si_dpm.o si_smc.o +POWER_MGR-y = amd_powerplay.o AMD_PP_POWER = $(addprefix $(AMD_PP_PATH)/,$(POWER_MGR-y)) diff --git a/drivers/gpu/drm/amd/pm/powerplay/cik_dpm.h b/drivers/gpu/drm/amd/pm/powerplay/cik_dpm.h deleted file mode 100644 index 2fcc4b60153c..000000000000 --- a/drivers/gpu/drm/amd/pm/powerplay/cik_dpm.h +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Copyright 2014 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#ifndef __CIK_DPM_H__ -#define __CIK_DPM_H__ - -extern const struct amdgpu_ip_block_version kv_smu_ip_block; - -#endif diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/amd_powerplay.h b/drivers/gpu/drm/amd/pm/powerplay/inc/amd_powerplay.h new file mode 100644 index 000000000000..fe3665965416 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/amd_powerplay.h @@ -0,0 +1,35 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _AMD_POWERPLAY_H_ +#define _AMD_POWERPLAY_H_ + +#include +#include +#include +#include "amd_shared.h" +#include "cgs_common.h" +#include "dm_pp_interface.h" +#include "kgd_pp_interface.h" +#include "amdgpu.h" + +#endif /* _AMD_POWERPLAY_H_ */ diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/cz_ppsmc.h b/drivers/gpu/drm/amd/pm/powerplay/inc/cz_ppsmc.h new file mode 100644 index 000000000000..9b698780aed8 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/cz_ppsmc.h @@ -0,0 +1,186 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef CZ_PP_SMC_H +#define CZ_PP_SMC_H + +#pragma pack(push, 1) + +/* Fan control algorithm:*/ +#define FDO_MODE_HARDWARE 0 +#define FDO_MODE_PIECE_WISE_LINEAR 1 + +enum FAN_CONTROL { + FAN_CONTROL_FUZZY, + FAN_CONTROL_TABLE +}; + +enum DPM_ARRAY { + DPM_ARRAY_HARD_MAX, + DPM_ARRAY_HARD_MIN, + DPM_ARRAY_SOFT_MAX, + DPM_ARRAY_SOFT_MIN +}; + +/* + * Return codes for driver to SMC communication. + * Leave these #define-s, enums might not be exactly 8-bits on the microcontroller. + */ +#define PPSMC_Result_OK ((uint16_t)0x01) +#define PPSMC_Result_NoMore ((uint16_t)0x02) +#define PPSMC_Result_NotNow ((uint16_t)0x03) +#define PPSMC_Result_Failed ((uint16_t)0xFF) +#define PPSMC_Result_UnknownCmd ((uint16_t)0xFE) +#define PPSMC_Result_UnknownVT ((uint16_t)0xFD) + +#define PPSMC_isERROR(x) ((uint16_t)0x80 & (x)) + +/* + * Supported driver messages + */ +#define PPSMC_MSG_Test ((uint16_t) 0x1) +#define PPSMC_MSG_GetFeatureStatus ((uint16_t) 0x2) +#define PPSMC_MSG_EnableAllSmuFeatures ((uint16_t) 0x3) +#define PPSMC_MSG_DisableAllSmuFeatures ((uint16_t) 0x4) +#define PPSMC_MSG_OptimizeBattery ((uint16_t) 0x5) +#define PPSMC_MSG_MaximizePerf ((uint16_t) 0x6) +#define PPSMC_MSG_UVDPowerOFF ((uint16_t) 0x7) +#define PPSMC_MSG_UVDPowerON ((uint16_t) 0x8) +#define PPSMC_MSG_VCEPowerOFF ((uint16_t) 0x9) +#define PPSMC_MSG_VCEPowerON ((uint16_t) 0xA) +#define PPSMC_MSG_ACPPowerOFF ((uint16_t) 0xB) +#define PPSMC_MSG_ACPPowerON ((uint16_t) 0xC) +#define PPSMC_MSG_SDMAPowerOFF ((uint16_t) 0xD) +#define PPSMC_MSG_SDMAPowerON ((uint16_t) 0xE) +#define PPSMC_MSG_XDMAPowerOFF ((uint16_t) 0xF) +#define PPSMC_MSG_XDMAPowerON ((uint16_t) 0x10) +#define PPSMC_MSG_SetMinDeepSleepSclk ((uint16_t) 0x11) +#define PPSMC_MSG_SetSclkSoftMin ((uint16_t) 0x12) +#define PPSMC_MSG_SetSclkSoftMax ((uint16_t) 0x13) +#define PPSMC_MSG_SetSclkHardMin ((uint16_t) 0x14) +#define PPSMC_MSG_SetSclkHardMax ((uint16_t) 0x15) +#define PPSMC_MSG_SetLclkSoftMin ((uint16_t) 0x16) +#define PPSMC_MSG_SetLclkSoftMax ((uint16_t) 0x17) +#define PPSMC_MSG_SetLclkHardMin ((uint16_t) 0x18) +#define PPSMC_MSG_SetLclkHardMax ((uint16_t) 0x19) +#define PPSMC_MSG_SetUvdSoftMin ((uint16_t) 0x1A) +#define PPSMC_MSG_SetUvdSoftMax ((uint16_t) 0x1B) +#define PPSMC_MSG_SetUvdHardMin ((uint16_t) 0x1C) +#define PPSMC_MSG_SetUvdHardMax ((uint16_t) 0x1D) +#define PPSMC_MSG_SetEclkSoftMin ((uint16_t) 0x1E) +#define PPSMC_MSG_SetEclkSoftMax ((uint16_t) 0x1F) +#define PPSMC_MSG_SetEclkHardMin ((uint16_t) 0x20) +#define PPSMC_MSG_SetEclkHardMax ((uint16_t) 0x21) +#define PPSMC_MSG_SetAclkSoftMin ((uint16_t) 0x22) +#define PPSMC_MSG_SetAclkSoftMax ((uint16_t) 0x23) +#define PPSMC_MSG_SetAclkHardMin ((uint16_t) 0x24) +#define PPSMC_MSG_SetAclkHardMax ((uint16_t) 0x25) +#define PPSMC_MSG_SetNclkSoftMin ((uint16_t) 0x26) +#define PPSMC_MSG_SetNclkSoftMax ((uint16_t) 0x27) +#define PPSMC_MSG_SetNclkHardMin ((uint16_t) 0x28) +#define PPSMC_MSG_SetNclkHardMax ((uint16_t) 0x29) +#define PPSMC_MSG_SetPstateSoftMin ((uint16_t) 0x2A) +#define PPSMC_MSG_SetPstateSoftMax ((uint16_t) 0x2B) +#define PPSMC_MSG_SetPstateHardMin ((uint16_t) 0x2C) +#define PPSMC_MSG_SetPstateHardMax ((uint16_t) 0x2D) +#define PPSMC_MSG_DisableLowMemoryPstate ((uint16_t) 0x2E) +#define PPSMC_MSG_EnableLowMemoryPstate ((uint16_t) 0x2F) +#define PPSMC_MSG_UcodeAddressLow ((uint16_t) 0x30) +#define PPSMC_MSG_UcodeAddressHigh ((uint16_t) 0x31) +#define PPSMC_MSG_UcodeLoadStatus ((uint16_t) 0x32) +#define PPSMC_MSG_DriverDramAddrHi ((uint16_t) 0x33) +#define PPSMC_MSG_DriverDramAddrLo ((uint16_t) 0x34) +#define PPSMC_MSG_CondExecDramAddrHi ((uint16_t) 0x35) +#define PPSMC_MSG_CondExecDramAddrLo ((uint16_t) 0x36) +#define PPSMC_MSG_LoadUcodes ((uint16_t) 0x37) +#define PPSMC_MSG_DriverResetMode ((uint16_t) 0x38) +#define PPSMC_MSG_PowerStateNotify ((uint16_t) 0x39) +#define PPSMC_MSG_SetDisplayPhyConfig ((uint16_t) 0x3A) +#define PPSMC_MSG_GetMaxSclkLevel ((uint16_t) 0x3B) +#define PPSMC_MSG_GetMaxLclkLevel ((uint16_t) 0x3C) +#define PPSMC_MSG_GetMaxUvdLevel ((uint16_t) 0x3D) +#define PPSMC_MSG_GetMaxEclkLevel ((uint16_t) 0x3E) +#define PPSMC_MSG_GetMaxAclkLevel ((uint16_t) 0x3F) +#define PPSMC_MSG_GetMaxNclkLevel ((uint16_t) 0x40) +#define PPSMC_MSG_GetMaxPstate ((uint16_t) 0x41) +#define PPSMC_MSG_DramAddrHiVirtual ((uint16_t) 0x42) +#define PPSMC_MSG_DramAddrLoVirtual ((uint16_t) 0x43) +#define PPSMC_MSG_DramAddrHiPhysical ((uint16_t) 0x44) +#define PPSMC_MSG_DramAddrLoPhysical ((uint16_t) 0x45) +#define PPSMC_MSG_DramBufferSize ((uint16_t) 0x46) +#define PPSMC_MSG_SetMmPwrLogDramAddrHi ((uint16_t) 0x47) +#define PPSMC_MSG_SetMmPwrLogDramAddrLo ((uint16_t) 0x48) +#define PPSMC_MSG_SetClkTableAddrHi ((uint16_t) 0x49) +#define PPSMC_MSG_SetClkTableAddrLo ((uint16_t) 0x4A) +#define PPSMC_MSG_GetConservativePowerLimit ((uint16_t) 0x4B) + +#define PPSMC_MSG_InitJobs ((uint16_t) 0x252) +#define PPSMC_MSG_ExecuteJob ((uint16_t) 0x254) + +#define PPSMC_MSG_NBDPM_Enable ((uint16_t) 0x140) +#define PPSMC_MSG_NBDPM_Disable ((uint16_t) 0x141) + +#define PPSMC_MSG_DPM_FPS_Mode ((uint16_t) 0x15d) +#define PPSMC_MSG_DPM_Activity_Mode ((uint16_t) 0x15e) + +#define PPSMC_MSG_PmStatusLogStart ((uint16_t) 0x170) +#define PPSMC_MSG_PmStatusLogSample ((uint16_t) 0x171) + +#define PPSMC_MSG_AllowLowSclkInterrupt ((uint16_t) 0x184) +#define PPSMC_MSG_MmPowerMonitorStart ((uint16_t) 0x18F) +#define PPSMC_MSG_MmPowerMonitorStop ((uint16_t) 0x190) +#define PPSMC_MSG_MmPowerMonitorRestart ((uint16_t) 0x191) + +#define PPSMC_MSG_SetClockGateMask ((uint16_t) 0x260) +#define PPSMC_MSG_SetFpsThresholdLo ((uint16_t) 0x264) +#define PPSMC_MSG_SetFpsThresholdHi ((uint16_t) 0x265) +#define PPSMC_MSG_SetLowSclkIntrThreshold ((uint16_t) 0x266) + +#define PPSMC_MSG_ClkTableXferToDram ((uint16_t) 0x267) +#define PPSMC_MSG_ClkTableXferToSmu ((uint16_t) 0x268) +#define PPSMC_MSG_GetAverageGraphicsActivity ((uint16_t) 0x269) +#define PPSMC_MSG_GetAverageGioActivity ((uint16_t) 0x26A) +#define PPSMC_MSG_SetLoggerBufferSize ((uint16_t) 0x26B) +#define PPSMC_MSG_SetLoggerAddressHigh ((uint16_t) 0x26C) +#define PPSMC_MSG_SetLoggerAddressLow ((uint16_t) 0x26D) +#define PPSMC_MSG_SetWatermarkFrequency ((uint16_t) 0x26E) +#define PPSMC_MSG_SetDisplaySizePowerParams ((uint16_t) 0x26F) + +/* REMOVE LATER*/ +#define PPSMC_MSG_DPM_ForceState ((uint16_t) 0x104) + +/* Feature Enable Masks*/ +#define NB_DPM_MASK 0x00000800 +#define VDDGFX_MASK 0x00800000 +#define VCE_DPM_MASK 0x00400000 +#define ACP_DPM_MASK 0x00040000 +#define UVD_DPM_MASK 0x00010000 +#define GFX_CU_PG_MASK 0x00004000 +#define SCLK_DPM_MASK 0x00080000 + +#if !defined(SMC_MICROCODE) +#pragma pack(pop) + +#endif + +#endif diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/fiji_ppsmc.h b/drivers/gpu/drm/amd/pm/powerplay/inc/fiji_ppsmc.h new file mode 100644 index 000000000000..7ae494569a60 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/fiji_ppsmc.h @@ -0,0 +1,412 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + + +#ifndef _FIJI_PP_SMC_H_ +#define _FIJI_PP_SMC_H_ + +#pragma pack(push, 1) + +#define PPSMC_SWSTATE_FLAG_DC 0x01 +#define PPSMC_SWSTATE_FLAG_UVD 0x02 +#define PPSMC_SWSTATE_FLAG_VCE 0x04 + +#define PPSMC_THERMAL_PROTECT_TYPE_INTERNAL 0x00 +#define PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL 0x01 +#define PPSMC_THERMAL_PROTECT_TYPE_NONE 0xff + +#define PPSMC_SYSTEMFLAG_GPIO_DC 0x01 +#define PPSMC_SYSTEMFLAG_STEPVDDC 0x02 +#define PPSMC_SYSTEMFLAG_GDDR5 0x04 + +#define PPSMC_SYSTEMFLAG_DISABLE_BABYSTEP 0x08 + +#define PPSMC_SYSTEMFLAG_REGULATOR_HOT 0x10 +#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_ANALOG 0x20 + +#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_MASK 0x07 +#define PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK 0x08 + +#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTODPMLOWSTATE 0x00 +#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE 0x01 + +/* Defines for DPM 2.0 */ +#define PPSMC_DPM2FLAGS_TDPCLMP 0x01 +#define PPSMC_DPM2FLAGS_PWRSHFT 0x02 +#define PPSMC_DPM2FLAGS_OCP 0x04 + +/* Defines for display watermark level */ +#define PPSMC_DISPLAY_WATERMARK_LOW 0 +#define PPSMC_DISPLAY_WATERMARK_HIGH 1 + +/* In the HW performance level's state flags: */ +#define PPSMC_STATEFLAG_AUTO_PULSE_SKIP 0x01 +#define PPSMC_STATEFLAG_POWERBOOST 0x02 +#define PPSMC_STATEFLAG_PSKIP_ON_TDP_FAULT 0x04 +#define PPSMC_STATEFLAG_POWERSHIFT 0x08 +#define PPSMC_STATEFLAG_SLOW_READ_MARGIN 0x10 +#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 0x20 +#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS 0x40 + +/* Fan control algorithm: */ +#define FDO_MODE_HARDWARE 0 +#define FDO_MODE_PIECE_WISE_LINEAR 1 + +enum FAN_CONTROL { + FAN_CONTROL_FUZZY, + FAN_CONTROL_TABLE +}; + +/* Gemini Modes*/ +#define PPSMC_GeminiModeNone 0 /*Single GPU board*/ +#define PPSMC_GeminiModeMaster 1 /*Master GPU on a Gemini board*/ +#define PPSMC_GeminiModeSlave 2 /*Slave GPU on a Gemini board*/ + + +/* Return codes for driver to SMC communication. */ +#define PPSMC_Result_OK ((uint16_t)0x01) +#define PPSMC_Result_NoMore ((uint16_t)0x02) + +#define PPSMC_Result_NotNow ((uint16_t)0x03) + +#define PPSMC_Result_Failed ((uint16_t)0xFF) +#define PPSMC_Result_UnknownCmd ((uint16_t)0xFE) +#define PPSMC_Result_UnknownVT ((uint16_t)0xFD) + +#define PPSMC_isERROR(x) ((uint16_t)0x80 & (x)) + + +#define PPSMC_MSG_Halt ((uint16_t)0x10) +#define PPSMC_MSG_Resume ((uint16_t)0x11) +#define PPSMC_MSG_EnableDPMLevel ((uint16_t)0x12) +#define PPSMC_MSG_ZeroLevelsDisabled ((uint16_t)0x13) +#define PPSMC_MSG_OneLevelsDisabled ((uint16_t)0x14) +#define PPSMC_MSG_TwoLevelsDisabled ((uint16_t)0x15) +#define PPSMC_MSG_EnableThermalInterrupt ((uint16_t)0x16) +#define PPSMC_MSG_RunningOnAC ((uint16_t)0x17) +#define PPSMC_MSG_LevelUp ((uint16_t)0x18) +#define PPSMC_MSG_LevelDown ((uint16_t)0x19) +#define PPSMC_MSG_ResetDPMCounters ((uint16_t)0x1a) +#define PPSMC_MSG_SwitchToSwState ((uint16_t)0x20) + +#define PPSMC_MSG_SwitchToSwStateLast ((uint16_t)0x3f) +#define PPSMC_MSG_SwitchToInitialState ((uint16_t)0x40) +#define PPSMC_MSG_NoForcedLevel ((uint16_t)0x41) +#define PPSMC_MSG_ForceHigh ((uint16_t)0x42) +#define PPSMC_MSG_ForceMediumOrHigh ((uint16_t)0x43) + +#define PPSMC_MSG_SwitchToMinimumPower ((uint16_t)0x51) +#define PPSMC_MSG_ResumeFromMinimumPower ((uint16_t)0x52) +#define PPSMC_MSG_EnableCac ((uint16_t)0x53) +#define PPSMC_MSG_DisableCac ((uint16_t)0x54) +#define PPSMC_DPMStateHistoryStart ((uint16_t)0x55) +#define PPSMC_DPMStateHistoryStop ((uint16_t)0x56) +#define PPSMC_CACHistoryStart ((uint16_t)0x57) +#define PPSMC_CACHistoryStop ((uint16_t)0x58) +#define PPSMC_TDPClampingActive ((uint16_t)0x59) +#define PPSMC_TDPClampingInactive ((uint16_t)0x5A) +#define PPSMC_StartFanControl ((uint16_t)0x5B) +#define PPSMC_StopFanControl ((uint16_t)0x5C) +#define PPSMC_NoDisplay ((uint16_t)0x5D) +#define PPSMC_HasDisplay ((uint16_t)0x5E) +#define PPSMC_MSG_UVDPowerOFF ((uint16_t)0x60) +#define PPSMC_MSG_UVDPowerON ((uint16_t)0x61) +#define PPSMC_MSG_EnableULV ((uint16_t)0x62) +#define PPSMC_MSG_DisableULV ((uint16_t)0x63) +#define PPSMC_MSG_EnterULV ((uint16_t)0x64) +#define PPSMC_MSG_ExitULV ((uint16_t)0x65) +#define PPSMC_PowerShiftActive ((uint16_t)0x6A) +#define PPSMC_PowerShiftInactive ((uint16_t)0x6B) +#define PPSMC_OCPActive ((uint16_t)0x6C) +#define PPSMC_OCPInactive ((uint16_t)0x6D) +#define PPSMC_CACLongTermAvgEnable ((uint16_t)0x6E) +#define PPSMC_CACLongTermAvgDisable ((uint16_t)0x6F) +#define PPSMC_MSG_InferredStateSweep_Start ((uint16_t)0x70) +#define PPSMC_MSG_InferredStateSweep_Stop ((uint16_t)0x71) +#define PPSMC_MSG_SwitchToLowestInfState ((uint16_t)0x72) +#define PPSMC_MSG_SwitchToNonInfState ((uint16_t)0x73) +#define PPSMC_MSG_AllStateSweep_Start ((uint16_t)0x74) +#define PPSMC_MSG_AllStateSweep_Stop ((uint16_t)0x75) +#define PPSMC_MSG_SwitchNextLowerInfState ((uint16_t)0x76) +#define PPSMC_MSG_SwitchNextHigherInfState ((uint16_t)0x77) +#define PPSMC_MSG_MclkRetrainingTest ((uint16_t)0x78) +#define PPSMC_MSG_ForceTDPClamping ((uint16_t)0x79) +#define PPSMC_MSG_CollectCAC_PowerCorreln ((uint16_t)0x7A) +#define PPSMC_MSG_CollectCAC_WeightCalib ((uint16_t)0x7B) +#define PPSMC_MSG_CollectCAC_SQonly ((uint16_t)0x7C) +#define PPSMC_MSG_CollectCAC_TemperaturePwr ((uint16_t)0x7D) + +#define PPSMC_MSG_ExtremitiesTest_Start ((uint16_t)0x7E) +#define PPSMC_MSG_ExtremitiesTest_Stop ((uint16_t)0x7F) +#define PPSMC_FlushDataCache ((uint16_t)0x80) +#define PPSMC_FlushInstrCache ((uint16_t)0x81) + +#define PPSMC_MSG_SetEnabledLevels ((uint16_t)0x82) +#define PPSMC_MSG_SetForcedLevels ((uint16_t)0x83) + +#define PPSMC_MSG_ResetToDefaults ((uint16_t)0x84) + +#define PPSMC_MSG_SetForcedLevelsAndJump ((uint16_t)0x85) +#define PPSMC_MSG_SetCACHistoryMode ((uint16_t)0x86) +#define PPSMC_MSG_EnableDTE ((uint16_t)0x87) +#define PPSMC_MSG_DisableDTE ((uint16_t)0x88) + +#define PPSMC_MSG_SmcSpaceSetAddress ((uint16_t)0x89) + +#define PPSMC_MSG_BREAK ((uint16_t)0xF8) + +/* Trinity Specific Messages*/ +#define PPSMC_MSG_Test ((uint16_t) 0x100) +#define PPSMC_MSG_DPM_Voltage_Pwrmgt ((uint16_t) 0x101) +#define PPSMC_MSG_DPM_Config ((uint16_t) 0x102) +#define PPSMC_MSG_PM_Controller_Start ((uint16_t) 0x103) +#define PPSMC_MSG_DPM_ForceState ((uint16_t) 0x104) +#define PPSMC_MSG_PG_PowerDownSIMD ((uint16_t) 0x105) +#define PPSMC_MSG_PG_PowerUpSIMD ((uint16_t) 0x106) +#define PPSMC_MSG_PM_Controller_Stop ((uint16_t) 0x107) +#define PPSMC_MSG_PG_SIMD_Config ((uint16_t) 0x108) +#define PPSMC_MSG_Voltage_Cntl_Enable ((uint16_t) 0x109) +#define PPSMC_MSG_Thermal_Cntl_Enable ((uint16_t) 0x10a) +#define PPSMC_MSG_Reset_Service ((uint16_t) 0x10b) +#define PPSMC_MSG_VCEPowerOFF ((uint16_t) 0x10e) +#define PPSMC_MSG_VCEPowerON ((uint16_t) 0x10f) +#define PPSMC_MSG_DPM_Disable_VCE_HS ((uint16_t) 0x110) +#define PPSMC_MSG_DPM_Enable_VCE_HS ((uint16_t) 0x111) +#define PPSMC_MSG_DPM_N_LevelsDisabled ((uint16_t) 0x112) +#define PPSMC_MSG_DCEPowerOFF ((uint16_t) 0x113) +#define PPSMC_MSG_DCEPowerON ((uint16_t) 0x114) +#define PPSMC_MSG_PCIE_DDIPowerDown ((uint16_t) 0x117) +#define PPSMC_MSG_PCIE_DDIPowerUp ((uint16_t) 0x118) +#define PPSMC_MSG_PCIE_CascadePLLPowerDown ((uint16_t) 0x119) +#define PPSMC_MSG_PCIE_CascadePLLPowerUp ((uint16_t) 0x11a) +#define PPSMC_MSG_SYSPLLPowerOff ((uint16_t) 0x11b) +#define PPSMC_MSG_SYSPLLPowerOn ((uint16_t) 0x11c) +#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint16_t) 0x11d) +#define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint16_t) 0x11e) +#define PPSMC_MSG_DISPLAYPHYStatusNotify ((uint16_t) 0x11f) +#define PPSMC_MSG_EnableBAPM ((uint16_t) 0x120) +#define PPSMC_MSG_DisableBAPM ((uint16_t) 0x121) +#define PPSMC_MSG_Spmi_Enable ((uint16_t) 0x122) +#define PPSMC_MSG_Spmi_Timer ((uint16_t) 0x123) +#define PPSMC_MSG_LCLK_DPM_Config ((uint16_t) 0x124) +#define PPSMC_MSG_VddNB_Request ((uint16_t) 0x125) +#define PPSMC_MSG_PCIE_DDIPhyPowerDown ((uint32_t) 0x126) +#define PPSMC_MSG_PCIE_DDIPhyPowerUp ((uint32_t) 0x127) +#define PPSMC_MSG_MCLKDPM_Config ((uint16_t) 0x128) + +#define PPSMC_MSG_UVDDPM_Config ((uint16_t) 0x129) +#define PPSMC_MSG_VCEDPM_Config ((uint16_t) 0x12A) +#define PPSMC_MSG_ACPDPM_Config ((uint16_t) 0x12B) +#define PPSMC_MSG_SAMUDPM_Config ((uint16_t) 0x12C) +#define PPSMC_MSG_UVDDPM_SetEnabledMask ((uint16_t) 0x12D) +#define PPSMC_MSG_VCEDPM_SetEnabledMask ((uint16_t) 0x12E) +#define PPSMC_MSG_ACPDPM_SetEnabledMask ((uint16_t) 0x12F) +#define PPSMC_MSG_SAMUDPM_SetEnabledMask ((uint16_t) 0x130) +#define PPSMC_MSG_MCLKDPM_ForceState ((uint16_t) 0x131) +#define PPSMC_MSG_MCLKDPM_NoForcedLevel ((uint16_t) 0x132) +#define PPSMC_MSG_Thermal_Cntl_Disable ((uint16_t) 0x133) +#define PPSMC_MSG_SetTDPLimit ((uint16_t) 0x134) +#define PPSMC_MSG_Voltage_Cntl_Disable ((uint16_t) 0x135) +#define PPSMC_MSG_PCIeDPM_Enable ((uint16_t) 0x136) +#define PPSMC_MSG_ACPPowerOFF ((uint16_t) 0x137) +#define PPSMC_MSG_ACPPowerON ((uint16_t) 0x138) +#define PPSMC_MSG_SAMPowerOFF ((uint16_t) 0x139) +#define PPSMC_MSG_SAMPowerON ((uint16_t) 0x13a) +#define PPSMC_MSG_SDMAPowerOFF ((uint16_t) 0x13b) +#define PPSMC_MSG_SDMAPowerON ((uint16_t) 0x13c) +#define PPSMC_MSG_PCIeDPM_Disable ((uint16_t) 0x13d) +#define PPSMC_MSG_IOMMUPowerOFF ((uint16_t) 0x13e) +#define PPSMC_MSG_IOMMUPowerON ((uint16_t) 0x13f) +#define PPSMC_MSG_NBDPM_Enable ((uint16_t) 0x140) +#define PPSMC_MSG_NBDPM_Disable ((uint16_t) 0x141) +#define PPSMC_MSG_NBDPM_ForceNominal ((uint16_t) 0x142) +#define PPSMC_MSG_NBDPM_ForcePerformance ((uint16_t) 0x143) +#define PPSMC_MSG_NBDPM_UnForce ((uint16_t) 0x144) +#define PPSMC_MSG_SCLKDPM_SetEnabledMask ((uint16_t) 0x145) +#define PPSMC_MSG_MCLKDPM_SetEnabledMask ((uint16_t) 0x146) +#define PPSMC_MSG_PCIeDPM_ForceLevel ((uint16_t) 0x147) +#define PPSMC_MSG_PCIeDPM_UnForceLevel ((uint16_t) 0x148) +#define PPSMC_MSG_EnableACDCGPIOInterrupt ((uint16_t) 0x149) +#define PPSMC_MSG_EnableVRHotGPIOInterrupt ((uint16_t) 0x14a) +#define PPSMC_MSG_SwitchToAC ((uint16_t) 0x14b) + +#define PPSMC_MSG_XDMAPowerOFF ((uint16_t) 0x14c) +#define PPSMC_MSG_XDMAPowerON ((uint16_t) 0x14d) + +#define PPSMC_MSG_DPM_Enable ((uint16_t) 0x14e) +#define PPSMC_MSG_DPM_Disable ((uint16_t) 0x14f) +#define PPSMC_MSG_MCLKDPM_Enable ((uint16_t) 0x150) +#define PPSMC_MSG_MCLKDPM_Disable ((uint16_t) 0x151) +#define PPSMC_MSG_LCLKDPM_Enable ((uint16_t) 0x152) +#define PPSMC_MSG_LCLKDPM_Disable ((uint16_t) 0x153) +#define PPSMC_MSG_UVDDPM_Enable ((uint16_t) 0x154) +#define PPSMC_MSG_UVDDPM_Disable ((uint16_t) 0x155) +#define PPSMC_MSG_SAMUDPM_Enable ((uint16_t) 0x156) +#define PPSMC_MSG_SAMUDPM_Disable ((uint16_t) 0x157) +#define PPSMC_MSG_ACPDPM_Enable ((uint16_t) 0x158) +#define PPSMC_MSG_ACPDPM_Disable ((uint16_t) 0x159) +#define PPSMC_MSG_VCEDPM_Enable ((uint16_t) 0x15a) +#define PPSMC_MSG_VCEDPM_Disable ((uint16_t) 0x15b) +#define PPSMC_MSG_LCLKDPM_SetEnabledMask ((uint16_t) 0x15c) +#define PPSMC_MSG_DPM_FPS_Mode ((uint16_t) 0x15d) +#define PPSMC_MSG_DPM_Activity_Mode ((uint16_t) 0x15e) +#define PPSMC_MSG_VddC_Request ((uint16_t) 0x15f) +#define PPSMC_MSG_MCLKDPM_GetEnabledMask ((uint16_t) 0x160) +#define PPSMC_MSG_LCLKDPM_GetEnabledMask ((uint16_t) 0x161) +#define PPSMC_MSG_SCLKDPM_GetEnabledMask ((uint16_t) 0x162) +#define PPSMC_MSG_UVDDPM_GetEnabledMask ((uint16_t) 0x163) +#define PPSMC_MSG_SAMUDPM_GetEnabledMask ((uint16_t) 0x164) +#define PPSMC_MSG_ACPDPM_GetEnabledMask ((uint16_t) 0x165) +#define PPSMC_MSG_VCEDPM_GetEnabledMask ((uint16_t) 0x166) +#define PPSMC_MSG_PCIeDPM_SetEnabledMask ((uint16_t) 0x167) +#define PPSMC_MSG_PCIeDPM_GetEnabledMask ((uint16_t) 0x168) +#define PPSMC_MSG_TDCLimitEnable ((uint16_t) 0x169) +#define PPSMC_MSG_TDCLimitDisable ((uint16_t) 0x16a) +#define PPSMC_MSG_DPM_AutoRotate_Mode ((uint16_t) 0x16b) +#define PPSMC_MSG_DISPCLK_FROM_FCH ((uint16_t) 0x16c) +#define PPSMC_MSG_DISPCLK_FROM_DFS ((uint16_t) 0x16d) +#define PPSMC_MSG_DPREFCLK_FROM_FCH ((uint16_t) 0x16e) +#define PPSMC_MSG_DPREFCLK_FROM_DFS ((uint16_t) 0x16f) +#define PPSMC_MSG_PmStatusLogStart ((uint16_t) 0x170) +#define PPSMC_MSG_PmStatusLogSample ((uint16_t) 0x171) +#define PPSMC_MSG_SCLK_AutoDPM_ON ((uint16_t) 0x172) +#define PPSMC_MSG_MCLK_AutoDPM_ON ((uint16_t) 0x173) +#define PPSMC_MSG_LCLK_AutoDPM_ON ((uint16_t) 0x174) +#define PPSMC_MSG_UVD_AutoDPM_ON ((uint16_t) 0x175) +#define PPSMC_MSG_SAMU_AutoDPM_ON ((uint16_t) 0x176) +#define PPSMC_MSG_ACP_AutoDPM_ON ((uint16_t) 0x177) +#define PPSMC_MSG_VCE_AutoDPM_ON ((uint16_t) 0x178) +#define PPSMC_MSG_PCIe_AutoDPM_ON ((uint16_t) 0x179) +#define PPSMC_MSG_MASTER_AutoDPM_ON ((uint16_t) 0x17a) +#define PPSMC_MSG_MASTER_AutoDPM_OFF ((uint16_t) 0x17b) +#define PPSMC_MSG_DYNAMICDISPPHYPOWER ((uint16_t) 0x17c) +#define PPSMC_MSG_CAC_COLLECTION_ON ((uint16_t) 0x17d) +#define PPSMC_MSG_CAC_COLLECTION_OFF ((uint16_t) 0x17e) +#define PPSMC_MSG_CAC_CORRELATION_ON ((uint16_t) 0x17f) +#define PPSMC_MSG_CAC_CORRELATION_OFF ((uint16_t) 0x180) +#define PPSMC_MSG_PM_STATUS_TO_DRAM_ON ((uint16_t) 0x181) +#define PPSMC_MSG_PM_STATUS_TO_DRAM_OFF ((uint16_t) 0x182) +#define PPSMC_MSG_ALLOW_LOWSCLK_INTERRUPT ((uint16_t) 0x184) +#define PPSMC_MSG_PkgPwrLimitEnable ((uint16_t) 0x185) +#define PPSMC_MSG_PkgPwrLimitDisable ((uint16_t) 0x186) +#define PPSMC_MSG_PkgPwrSetLimit ((uint16_t) 0x187) +#define PPSMC_MSG_OverDriveSetTargetTdp ((uint16_t) 0x188) +#define PPSMC_MSG_SCLKDPM_FreezeLevel ((uint16_t) 0x189) +#define PPSMC_MSG_SCLKDPM_UnfreezeLevel ((uint16_t) 0x18A) +#define PPSMC_MSG_MCLKDPM_FreezeLevel ((uint16_t) 0x18B) +#define PPSMC_MSG_MCLKDPM_UnfreezeLevel ((uint16_t) 0x18C) +#define PPSMC_MSG_START_DRAM_LOGGING ((uint16_t) 0x18D) +#define PPSMC_MSG_STOP_DRAM_LOGGING ((uint16_t) 0x18E) +#define PPSMC_MSG_MASTER_DeepSleep_ON ((uint16_t) 0x18F) +#define PPSMC_MSG_MASTER_DeepSleep_OFF ((uint16_t) 0x190) +#define PPSMC_MSG_Remove_DC_Clamp ((uint16_t) 0x191) +#define PPSMC_MSG_DisableACDCGPIOInterrupt ((uint16_t) 0x192) +#define PPSMC_MSG_OverrideVoltageControl_SetVddc ((uint16_t) 0x193) +#define PPSMC_MSG_OverrideVoltageControl_SetVddci ((uint16_t) 0x194) +#define PPSMC_MSG_SetVidOffset_1 ((uint16_t) 0x195) +#define PPSMC_MSG_SetVidOffset_2 ((uint16_t) 0x207) +#define PPSMC_MSG_GetVidOffset_1 ((uint16_t) 0x196) +#define PPSMC_MSG_GetVidOffset_2 ((uint16_t) 0x208) +#define PPSMC_MSG_THERMAL_OVERDRIVE_Enable ((uint16_t) 0x197) +#define PPSMC_MSG_THERMAL_OVERDRIVE_Disable ((uint16_t) 0x198) +#define PPSMC_MSG_SetTjMax ((uint16_t) 0x199) +#define PPSMC_MSG_SetFanPwmMax ((uint16_t) 0x19A) +#define PPSMC_MSG_WaitForMclkSwitchFinish ((uint16_t) 0x19B) +#define PPSMC_MSG_ENABLE_THERMAL_DPM ((uint16_t) 0x19C) +#define PPSMC_MSG_DISABLE_THERMAL_DPM ((uint16_t) 0x19D) + +#define PPSMC_MSG_API_GetSclkFrequency ((uint16_t) 0x200) +#define PPSMC_MSG_API_GetMclkFrequency ((uint16_t) 0x201) +#define PPSMC_MSG_API_GetSclkBusy ((uint16_t) 0x202) +#define PPSMC_MSG_API_GetMclkBusy ((uint16_t) 0x203) +#define PPSMC_MSG_API_GetAsicPower ((uint16_t) 0x204) +#define PPSMC_MSG_SetFanRpmMax ((uint16_t) 0x205) +#define PPSMC_MSG_SetFanSclkTarget ((uint16_t) 0x206) +#define PPSMC_MSG_SetFanMinPwm ((uint16_t) 0x209) +#define PPSMC_MSG_SetFanTemperatureTarget ((uint16_t) 0x20A) + +#define PPSMC_MSG_BACO_StartMonitor ((uint16_t) 0x240) +#define PPSMC_MSG_BACO_Cancel ((uint16_t) 0x241) +#define PPSMC_MSG_EnableVddGfx ((uint16_t) 0x242) +#define PPSMC_MSG_DisableVddGfx ((uint16_t) 0x243) +#define PPSMC_MSG_UcodeAddressLow ((uint16_t) 0x244) +#define PPSMC_MSG_UcodeAddressHigh ((uint16_t) 0x245) +#define PPSMC_MSG_UcodeLoadStatus ((uint16_t) 0x246) + +#define PPSMC_MSG_DRV_DRAM_ADDR_HI ((uint16_t) 0x250) +#define PPSMC_MSG_DRV_DRAM_ADDR_LO ((uint16_t) 0x251) +#define PPSMC_MSG_SMU_DRAM_ADDR_HI ((uint16_t) 0x252) +#define PPSMC_MSG_SMU_DRAM_ADDR_LO ((uint16_t) 0x253) +#define PPSMC_MSG_LoadUcodes ((uint16_t) 0x254) +#define PPSMC_MSG_PowerStateNotify ((uint16_t) 0x255) +#define PPSMC_MSG_COND_EXEC_DRAM_ADDR_HI ((uint16_t) 0x256) +#define PPSMC_MSG_COND_EXEC_DRAM_ADDR_LO ((uint16_t) 0x257) +#define PPSMC_MSG_VBIOS_DRAM_ADDR_HI ((uint16_t) 0x258) +#define PPSMC_MSG_VBIOS_DRAM_ADDR_LO ((uint16_t) 0x259) +#define PPSMC_MSG_LoadVBios ((uint16_t) 0x25A) +#define PPSMC_MSG_GetUcodeVersion ((uint16_t) 0x25B) +#define DMCUSMC_MSG_PSREntry ((uint16_t) 0x25C) +#define DMCUSMC_MSG_PSRExit ((uint16_t) 0x25D) +#define PPSMC_MSG_EnableClockGatingFeature ((uint16_t) 0x260) +#define PPSMC_MSG_DisableClockGatingFeature ((uint16_t) 0x261) +#define PPSMC_MSG_IsDeviceRunning ((uint16_t) 0x262) +#define PPSMC_MSG_LoadMetaData ((uint16_t) 0x263) +#define PPSMC_MSG_TMON_AutoCaliberate_Enable ((uint16_t) 0x264) +#define PPSMC_MSG_TMON_AutoCaliberate_Disable ((uint16_t) 0x265) +#define PPSMC_MSG_GetTelemetry1Slope ((uint16_t) 0x266) +#define PPSMC_MSG_GetTelemetry1Offset ((uint16_t) 0x267) +#define PPSMC_MSG_GetTelemetry2Slope ((uint16_t) 0x268) +#define PPSMC_MSG_GetTelemetry2Offset ((uint16_t) 0x269) +#define PPSMC_MSG_EnableAvfs ((uint16_t) 0x26A) +#define PPSMC_MSG_DisableAvfs ((uint16_t) 0x26B) +#define PPSMC_MSG_PerformBtc ((uint16_t) 0x26C) +#define PPSMC_MSG_GetHbmCode ((uint16_t) 0x26D) +#define PPSMC_MSG_GetVrVddcTemperature ((uint16_t) 0x26E) +#define PPSMC_MSG_GetVrMvddTemperature ((uint16_t) 0x26F) +#define PPSMC_MSG_GetLiquidTemperature ((uint16_t) 0x270) +#define PPSMC_MSG_GetPlxTemperature ((uint16_t) 0x271) +#define PPSMC_MSG_RequestI2CControl ((uint16_t) 0x272) +#define PPSMC_MSG_ReleaseI2CControl ((uint16_t) 0x273) +#define PPSMC_MSG_LedConfig ((uint16_t) 0x274) +#define PPSMC_MSG_SetHbmFanCode ((uint16_t) 0x275) +#define PPSMC_MSG_SetHbmThrottleCode ((uint16_t) 0x276) + +#define PPSMC_MSG_GetEnabledPsm ((uint16_t) 0x400) +#define PPSMC_MSG_AgmStartPsm ((uint16_t) 0x401) +#define PPSMC_MSG_AgmReadPsm ((uint16_t) 0x402) +#define PPSMC_MSG_AgmResetPsm ((uint16_t) 0x403) +#define PPSMC_MSG_ReadVftCell ((uint16_t) 0x404) + +/* AVFS Only - Remove Later */ +#define PPSMC_MSG_VftTableIsValid ((uint16_t) 0x666) + +/* If the SMC firmware has an event status soft register this is what the individual bits mean.*/ +#define PPSMC_EVENT_STATUS_THERMAL 0x00000001 +#define PPSMC_EVENT_STATUS_REGULATORHOT 0x00000002 +#define PPSMC_EVENT_STATUS_DC 0x00000004 + +typedef uint16_t PPSMC_Msg; + +#pragma pack(pop) + +#endif diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h new file mode 100644 index 000000000000..6e0be6027705 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h @@ -0,0 +1,469 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _HARDWARE_MANAGER_H_ +#define _HARDWARE_MANAGER_H_ + + + +struct pp_hwmgr; +struct pp_hw_power_state; +struct pp_power_state; +enum amd_dpm_forced_level; +struct PP_TemperatureRange; + + +struct phm_fan_speed_info { + uint32_t min_percent; + uint32_t max_percent; + uint32_t min_rpm; + uint32_t max_rpm; + bool supports_percent_read; + bool supports_percent_write; + bool supports_rpm_read; + bool supports_rpm_write; +}; + +/* Automatic Power State Throttling */ +enum PHM_AutoThrottleSource +{ + PHM_AutoThrottleSource_Thermal, + PHM_AutoThrottleSource_External +}; + +typedef enum PHM_AutoThrottleSource PHM_AutoThrottleSource; + +enum phm_platform_caps { + PHM_PlatformCaps_AtomBiosPpV1 = 0, + PHM_PlatformCaps_PowerPlaySupport, + PHM_PlatformCaps_ACOverdriveSupport, + PHM_PlatformCaps_BacklightSupport, + PHM_PlatformCaps_ThermalController, + PHM_PlatformCaps_BiosPowerSourceControl, + PHM_PlatformCaps_DisableVoltageTransition, + PHM_PlatformCaps_DisableEngineTransition, + PHM_PlatformCaps_DisableMemoryTransition, + PHM_PlatformCaps_DynamicPowerManagement, + PHM_PlatformCaps_EnableASPML0s, + PHM_PlatformCaps_EnableASPML1, + PHM_PlatformCaps_OD5inACSupport, + PHM_PlatformCaps_OD5inDCSupport, + PHM_PlatformCaps_SoftStateOD5, + PHM_PlatformCaps_NoOD5Support, + PHM_PlatformCaps_ContinuousHardwarePerformanceRange, + PHM_PlatformCaps_ActivityReporting, + PHM_PlatformCaps_EnableBackbias, + PHM_PlatformCaps_OverdriveDisabledByPowerBudget, + PHM_PlatformCaps_ShowPowerBudgetWarning, + PHM_PlatformCaps_PowerBudgetWaiverAvailable, + PHM_PlatformCaps_GFXClockGatingSupport, + PHM_PlatformCaps_MMClockGatingSupport, + PHM_PlatformCaps_AutomaticDCTransition, + PHM_PlatformCaps_GeminiPrimary, + PHM_PlatformCaps_MemorySpreadSpectrumSupport, + PHM_PlatformCaps_EngineSpreadSpectrumSupport, + PHM_PlatformCaps_StepVddc, + PHM_PlatformCaps_DynamicPCIEGen2Support, + PHM_PlatformCaps_SMC, + PHM_PlatformCaps_FaultyInternalThermalReading, /* Internal thermal controller reports faulty temperature value when DAC2 is active */ + PHM_PlatformCaps_EnableVoltageControl, /* indicates voltage can be controlled */ + PHM_PlatformCaps_EnableSideportControl, /* indicates Sideport can be controlled */ + PHM_PlatformCaps_VideoPlaybackEEUNotification, /* indicates EEU notification of video start/stop is required */ + PHM_PlatformCaps_TurnOffPll_ASPML1, /* PCIE Turn Off PLL in ASPM L1 */ + PHM_PlatformCaps_EnableHTLinkControl, /* indicates HT Link can be controlled by ACPI or CLMC overridden/automated mode. */ + PHM_PlatformCaps_PerformanceStateOnly, /* indicates only performance power state to be used on current system. */ + PHM_PlatformCaps_ExclusiveModeAlwaysHigh, /* In Exclusive (3D) mode always stay in High state. */ + PHM_PlatformCaps_DisableMGClockGating, /* to disable Medium Grain Clock Gating or not */ + PHM_PlatformCaps_DisableMGCGTSSM, /* TO disable Medium Grain Clock Gating Shader Complex control */ + PHM_PlatformCaps_UVDAlwaysHigh, /* In UVD mode always stay in High state */ + PHM_PlatformCaps_DisablePowerGating, /* to disable power gating */ + PHM_PlatformCaps_CustomThermalPolicy, /* indicates only performance power state to be used on current system. */ + PHM_PlatformCaps_StayInBootState, /* Stay in Boot State, do not do clock/voltage or PCIe Lane and Gen switching (RV7xx and up). */ + PHM_PlatformCaps_SMCAllowSeparateSWThermalState, /* SMC use separate SW thermal state, instead of the default SMC thermal policy. */ + PHM_PlatformCaps_MultiUVDStateSupport, /* Powerplay state table supports multi UVD states. */ + PHM_PlatformCaps_EnableSCLKDeepSleepForUVD, /* With HW ECOs, we don't need to disable SCLK Deep Sleep for UVD state. */ + PHM_PlatformCaps_EnableMCUHTLinkControl, /* Enable HT link control by MCU */ + PHM_PlatformCaps_ABM, /* ABM support.*/ + PHM_PlatformCaps_KongThermalPolicy, /* A thermal policy specific for Kong */ + PHM_PlatformCaps_SwitchVDDNB, /* if the users want to switch VDDNB */ + PHM_PlatformCaps_ULPS, /* support ULPS mode either through ACPI state or ULPS state */ + PHM_PlatformCaps_NativeULPS, /* hardware capable of ULPS state (other than through the ACPI state) */ + PHM_PlatformCaps_EnableMVDDControl, /* indicates that memory voltage can be controlled */ + PHM_PlatformCaps_ControlVDDCI, /* Control VDDCI separately from VDDC. */ + PHM_PlatformCaps_DisableDCODT, /* indicates if DC ODT apply or not */ + PHM_PlatformCaps_DynamicACTiming, /* if the SMC dynamically re-programs MC SEQ register values */ + PHM_PlatformCaps_EnableThermalIntByGPIO, /* enable throttle control through GPIO */ + PHM_PlatformCaps_BootStateOnAlert, /* Go to boot state on alerts, e.g. on an AC->DC transition. */ + PHM_PlatformCaps_DontWaitForVBlankOnAlert, /* Do NOT wait for VBLANK during an alert (e.g. AC->DC transition). */ + PHM_PlatformCaps_Force3DClockSupport, /* indicates if the platform supports force 3D clock. */ + PHM_PlatformCaps_MicrocodeFanControl, /* Fan is controlled by the SMC microcode. */ + PHM_PlatformCaps_AdjustUVDPriorityForSP, + PHM_PlatformCaps_DisableLightSleep, /* Light sleep for evergreen family. */ + PHM_PlatformCaps_DisableMCLS, /* MC Light sleep */ + PHM_PlatformCaps_RegulatorHot, /* Enable throttling on 'regulator hot' events. */ + PHM_PlatformCaps_BACO, /* Support Bus Alive Chip Off mode */ + PHM_PlatformCaps_DisableDPM, /* Disable DPM, supported from Llano */ + PHM_PlatformCaps_DynamicM3Arbiter, /* support dynamically change m3 arbitor parameters */ + PHM_PlatformCaps_SclkDeepSleep, /* support sclk deep sleep */ + PHM_PlatformCaps_DynamicPatchPowerState, /* this ASIC supports to patch power state dynamically */ + PHM_PlatformCaps_ThermalAutoThrottling, /* enabling auto thermal throttling, */ + PHM_PlatformCaps_SumoThermalPolicy, /* A thermal policy specific for Sumo */ + PHM_PlatformCaps_PCIEPerformanceRequest, /* support to change RC voltage */ + PHM_PlatformCaps_BLControlledByGPU, /* support varibright */ + PHM_PlatformCaps_PowerContainment, /* support DPM2 power containment (AKA TDP clamping) */ + PHM_PlatformCaps_SQRamping, /* support DPM2 SQ power throttle */ + PHM_PlatformCaps_CAC, /* support Capacitance * Activity power estimation */ + PHM_PlatformCaps_NIChipsets, /* Northern Island and beyond chipsets */ + PHM_PlatformCaps_TrinityChipsets, /* Trinity chipset */ + PHM_PlatformCaps_EvergreenChipsets, /* Evergreen family chipset */ + PHM_PlatformCaps_PowerControl, /* Cayman and beyond chipsets */ + PHM_PlatformCaps_DisableLSClockGating, /* to disable Light Sleep control for HDP memories */ + PHM_PlatformCaps_BoostState, /* this ASIC supports boost state */ + PHM_PlatformCaps_UserMaxClockForMultiDisplays, /* indicates if max memory clock is used for all status when multiple displays are connected */ + PHM_PlatformCaps_RegWriteDelay, /* indicates if back to back reg write delay is required */ + PHM_PlatformCaps_NonABMSupportInPPLib, /* ABM is not supported in PPLIB, (moved from PPLIB to DAL) */ + PHM_PlatformCaps_GFXDynamicMGPowerGating, /* Enable Dynamic MG PowerGating on Trinity */ + PHM_PlatformCaps_DisableSMUUVDHandshake, /* Disable SMU UVD Handshake */ + PHM_PlatformCaps_DTE, /* Support Digital Temperature Estimation */ + PHM_PlatformCaps_W5100Specifc_SmuSkipMsgDTE, /* This is for the feature requested by David B., and Tonny W.*/ + PHM_PlatformCaps_UVDPowerGating, /* enable UVD power gating, supported from Llano */ + PHM_PlatformCaps_UVDDynamicPowerGating, /* enable UVD Dynamic power gating, supported from UVD5 */ + PHM_PlatformCaps_VCEPowerGating, /* Enable VCE power gating, supported for TN and later ASICs */ + PHM_PlatformCaps_SamuPowerGating, /* Enable SAMU power gating, supported for KV and later ASICs */ + PHM_PlatformCaps_UVDDPM, /* UVD clock DPM */ + PHM_PlatformCaps_VCEDPM, /* VCE clock DPM */ + PHM_PlatformCaps_SamuDPM, /* SAMU clock DPM */ + PHM_PlatformCaps_AcpDPM, /* ACP clock DPM */ + PHM_PlatformCaps_SclkDeepSleepAboveLow, /* Enable SCLK Deep Sleep on all DPM states */ + PHM_PlatformCaps_DynamicUVDState, /* Dynamic UVD State */ + PHM_PlatformCaps_WantSAMClkWithDummyBackEnd, /* Set SAM Clk With Dummy Back End */ + PHM_PlatformCaps_WantUVDClkWithDummyBackEnd, /* Set UVD Clk With Dummy Back End */ + PHM_PlatformCaps_WantVCEClkWithDummyBackEnd, /* Set VCE Clk With Dummy Back End */ + PHM_PlatformCaps_WantACPClkWithDummyBackEnd, /* Set SAM Clk With Dummy Back End */ + PHM_PlatformCaps_OD6inACSupport, /* indicates that the ASIC/back end supports OD6 */ + PHM_PlatformCaps_OD6inDCSupport, /* indicates that the ASIC/back end supports OD6 in DC */ + PHM_PlatformCaps_EnablePlatformPowerManagement, /* indicates that Platform Power Management feature is supported */ + PHM_PlatformCaps_SurpriseRemoval, /* indicates that surprise removal feature is requested */ + PHM_PlatformCaps_NewCACVoltage, /* indicates new CAC voltage table support */ + PHM_PlatformCaps_DiDtSupport, /* for dI/dT feature */ + PHM_PlatformCaps_DBRamping, /* for dI/dT feature */ + PHM_PlatformCaps_TDRamping, /* for dI/dT feature */ + PHM_PlatformCaps_TCPRamping, /* for dI/dT feature */ + PHM_PlatformCaps_DBRRamping, /* for dI/dT feature */ + PHM_PlatformCaps_DiDtEDCEnable, /* for dI/dT feature */ + PHM_PlatformCaps_GCEDC, /* for dI/dT feature */ + PHM_PlatformCaps_PSM, /* for dI/dT feature */ + PHM_PlatformCaps_EnableSMU7ThermalManagement, /* SMC will manage thermal events */ + PHM_PlatformCaps_FPS, /* FPS support */ + PHM_PlatformCaps_ACP, /* ACP support */ + PHM_PlatformCaps_SclkThrottleLowNotification, /* SCLK Throttle Low Notification */ + PHM_PlatformCaps_XDMAEnabled, /* XDMA engine is enabled */ + PHM_PlatformCaps_UseDummyBackEnd, /* use dummy back end */ + PHM_PlatformCaps_EnableDFSBypass, /* Enable DFS bypass */ + PHM_PlatformCaps_VddNBDirectRequest, + PHM_PlatformCaps_PauseMMSessions, + PHM_PlatformCaps_UnTabledHardwareInterface, /* Tableless/direct call hardware interface for CI and newer ASICs */ + PHM_PlatformCaps_SMU7, /* indicates that vpuRecoveryBegin without SMU shutdown */ + PHM_PlatformCaps_RevertGPIO5Polarity, /* indicates revert GPIO5 plarity table support */ + PHM_PlatformCaps_Thermal2GPIO17, /* indicates thermal2GPIO17 table support */ + PHM_PlatformCaps_ThermalOutGPIO, /* indicates ThermalOutGPIO support, pin number is assigned by VBIOS */ + PHM_PlatformCaps_DisableMclkSwitchingForFrameLock, /* Disable memory clock switch during Framelock */ + PHM_PlatformCaps_ForceMclkHigh, /* Disable memory clock switching by forcing memory clock high */ + PHM_PlatformCaps_VRHotGPIOConfigurable, /* indicates VR_HOT GPIO configurable */ + PHM_PlatformCaps_TempInversion, /* enable Temp Inversion feature */ + PHM_PlatformCaps_IOIC3, + PHM_PlatformCaps_ConnectedStandby, + PHM_PlatformCaps_EVV, + PHM_PlatformCaps_EnableLongIdleBACOSupport, + PHM_PlatformCaps_CombinePCCWithThermalSignal, + PHM_PlatformCaps_DisableUsingActualTemperatureForPowerCalc, + PHM_PlatformCaps_StablePState, + PHM_PlatformCaps_OD6PlusinACSupport, + PHM_PlatformCaps_OD6PlusinDCSupport, + PHM_PlatformCaps_ODThermalLimitUnlock, + PHM_PlatformCaps_ReducePowerLimit, + PHM_PlatformCaps_ODFuzzyFanControlSupport, + PHM_PlatformCaps_GeminiRegulatorFanControlSupport, + PHM_PlatformCaps_ControlVDDGFX, + PHM_PlatformCaps_BBBSupported, + PHM_PlatformCaps_DisableVoltageIsland, + PHM_PlatformCaps_FanSpeedInTableIsRPM, + PHM_PlatformCaps_GFXClockGatingManagedInCAIL, + PHM_PlatformCaps_IcelandULPSSWWorkAround, + PHM_PlatformCaps_FPSEnhancement, + PHM_PlatformCaps_LoadPostProductionFirmware, + PHM_PlatformCaps_VpuRecoveryInProgress, + PHM_PlatformCaps_Falcon_QuickTransition, + PHM_PlatformCaps_AVFS, + PHM_PlatformCaps_ClockStretcher, + PHM_PlatformCaps_TablelessHardwareInterface, + PHM_PlatformCaps_EnableDriverEVV, + PHM_PlatformCaps_SPLLShutdownSupport, + PHM_PlatformCaps_VirtualBatteryState, + PHM_PlatformCaps_IgnoreForceHighClockRequestsInAPUs, + PHM_PlatformCaps_DisableMclkSwitchForVR, + PHM_PlatformCaps_SMU8, + PHM_PlatformCaps_VRHotPolarityHigh, + PHM_PlatformCaps_IPS_UlpsExclusive, + PHM_PlatformCaps_SMCtoPPLIBAcdcGpioScheme, + PHM_PlatformCaps_GeminiAsymmetricPower, + PHM_PlatformCaps_OCLPowerOptimization, + PHM_PlatformCaps_MaxPCIEBandWidth, + PHM_PlatformCaps_PerfPerWattOptimizationSupport, + PHM_PlatformCaps_UVDClientMCTuning, + PHM_PlatformCaps_ODNinACSupport, + PHM_PlatformCaps_ODNinDCSupport, + PHM_PlatformCaps_OD8inACSupport, + PHM_PlatformCaps_OD8inDCSupport, + PHM_PlatformCaps_UMDPState, + PHM_PlatformCaps_AutoWattmanSupport, + PHM_PlatformCaps_AutoWattmanEnable_CCCState, + PHM_PlatformCaps_FreeSyncActive, + PHM_PlatformCaps_EnableShadowPstate, + PHM_PlatformCaps_customThermalManagement, + PHM_PlatformCaps_staticFanControl, + PHM_PlatformCaps_Virtual_System, + PHM_PlatformCaps_LowestUclkReservedForUlv, + PHM_PlatformCaps_EnableBoostState, + PHM_PlatformCaps_AVFSSupport, + PHM_PlatformCaps_ThermalPolicyDelay, + PHM_PlatformCaps_CustomFanControlSupport, + PHM_PlatformCaps_BAMACO, + PHM_PlatformCaps_Max +}; + +#define PHM_MAX_NUM_CAPS_BITS_PER_FIELD (sizeof(uint32_t)*8) + +/* Number of uint32_t entries used by CAPS table */ +#define PHM_MAX_NUM_CAPS_ULONG_ENTRIES \ + ((PHM_PlatformCaps_Max + ((PHM_MAX_NUM_CAPS_BITS_PER_FIELD) - 1)) / (PHM_MAX_NUM_CAPS_BITS_PER_FIELD)) + +struct pp_hw_descriptor { + uint32_t hw_caps[PHM_MAX_NUM_CAPS_ULONG_ENTRIES]; +}; + +enum PHM_PerformanceLevelDesignation { + PHM_PerformanceLevelDesignation_Activity, + PHM_PerformanceLevelDesignation_PowerContainment +}; + +typedef enum PHM_PerformanceLevelDesignation PHM_PerformanceLevelDesignation; + +struct PHM_PerformanceLevel { + uint32_t coreClock; + uint32_t memory_clock; + uint32_t vddc; + uint32_t vddci; + uint32_t nonLocalMemoryFreq; + uint32_t nonLocalMemoryWidth; +}; + +typedef struct PHM_PerformanceLevel PHM_PerformanceLevel; + +/* Function for setting a platform cap */ +static inline void phm_cap_set(uint32_t *caps, + enum phm_platform_caps c) +{ + caps[c / PHM_MAX_NUM_CAPS_BITS_PER_FIELD] |= (1UL << + (c & (PHM_MAX_NUM_CAPS_BITS_PER_FIELD - 1))); +} + +static inline void phm_cap_unset(uint32_t *caps, + enum phm_platform_caps c) +{ + caps[c / PHM_MAX_NUM_CAPS_BITS_PER_FIELD] &= ~(1UL << (c & (PHM_MAX_NUM_CAPS_BITS_PER_FIELD - 1))); +} + +static inline bool phm_cap_enabled(const uint32_t *caps, enum phm_platform_caps c) +{ + return (0 != (caps[c / PHM_MAX_NUM_CAPS_BITS_PER_FIELD] & + (1UL << (c & (PHM_MAX_NUM_CAPS_BITS_PER_FIELD - 1))))); +} + +#define PP_CAP(c) phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, (c)) + +#define PP_PCIEGenInvalid 0xffff +enum PP_PCIEGen { + PP_PCIEGen1 = 0, /* PCIE 1.0 - Transfer rate of 2.5 GT/s */ + PP_PCIEGen2, /*PCIE 2.0 - Transfer rate of 5.0 GT/s */ + PP_PCIEGen3 /*PCIE 3.0 - Transfer rate of 8.0 GT/s */ +}; + +typedef enum PP_PCIEGen PP_PCIEGen; + +#define PP_Min_PCIEGen PP_PCIEGen1 +#define PP_Max_PCIEGen PP_PCIEGen3 +#define PP_Min_PCIELane 1 +#define PP_Max_PCIELane 16 + +enum phm_clock_Type { + PHM_DispClock = 1, + PHM_SClock, + PHM_MemClock +}; + +#define MAX_NUM_CLOCKS 16 + +struct PP_Clocks { + uint32_t engineClock; + uint32_t memoryClock; + uint32_t BusBandwidth; + uint32_t engineClockInSR; + uint32_t dcefClock; + uint32_t dcefClockInSR; +}; + +struct pp_clock_info { + uint32_t min_mem_clk; + uint32_t max_mem_clk; + uint32_t min_eng_clk; + uint32_t max_eng_clk; + uint32_t min_bus_bandwidth; + uint32_t max_bus_bandwidth; +}; + +struct phm_platform_descriptor { + uint32_t platformCaps[PHM_MAX_NUM_CAPS_ULONG_ENTRIES]; + uint32_t vbiosInterruptId; + struct PP_Clocks overdriveLimit; + struct PP_Clocks clockStep; + uint32_t hardwareActivityPerformanceLevels; + uint32_t minimumClocksReductionPercentage; + uint32_t minOverdriveVDDC; + uint32_t maxOverdriveVDDC; + uint32_t overdriveVDDCStep; + uint32_t hardwarePerformanceLevels; + uint16_t powerBudget; + uint32_t TDPLimit; + uint32_t nearTDPLimit; + uint32_t nearTDPLimitAdjusted; + uint32_t SQRampingThreshold; + uint32_t CACLeakage; + uint16_t TDPODLimit; + uint32_t TDPAdjustment; + bool TDPAdjustmentPolarity; + uint16_t LoadLineSlope; + uint32_t VidMinLimit; + uint32_t VidMaxLimit; + uint32_t VidStep; + uint32_t VidAdjustment; + bool VidAdjustmentPolarity; +}; + +struct phm_clocks { + uint32_t num_of_entries; + uint32_t clock[MAX_NUM_CLOCKS]; +}; + +#define DPMTABLE_OD_UPDATE_SCLK 0x00000001 +#define DPMTABLE_OD_UPDATE_MCLK 0x00000002 +#define DPMTABLE_UPDATE_SCLK 0x00000004 +#define DPMTABLE_UPDATE_MCLK 0x00000008 +#define DPMTABLE_OD_UPDATE_VDDC 0x00000010 +#define DPMTABLE_UPDATE_SOCCLK 0x00000020 + +struct phm_odn_performance_level { + uint32_t clock; + uint32_t vddc; + bool enabled; +}; + +struct phm_odn_clock_levels { + uint32_t size; + uint32_t options; + uint32_t flags; + uint32_t num_of_pl; + /* variable-sized array, specify by num_of_pl. */ + struct phm_odn_performance_level entries[8]; +}; + +extern int phm_disable_clock_power_gatings(struct pp_hwmgr *hwmgr); +extern int phm_powerdown_uvd(struct pp_hwmgr *hwmgr); +extern int phm_setup_asic(struct pp_hwmgr *hwmgr); +extern int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr); +extern int phm_disable_dynamic_state_management(struct pp_hwmgr *hwmgr); +extern bool phm_is_hw_access_blocked(struct pp_hwmgr *hwmgr); +extern int phm_block_hw_access(struct pp_hwmgr *hwmgr, bool block); +extern int phm_set_power_state(struct pp_hwmgr *hwmgr, + const struct pp_hw_power_state *pcurrent_state, + const struct pp_hw_power_state *pnew_power_state); + +extern int phm_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, + struct pp_power_state *adjusted_ps, + const struct pp_power_state *current_ps); + +extern int phm_apply_clock_adjust_rules(struct pp_hwmgr *hwmgr); + +extern int phm_force_dpm_levels(struct pp_hwmgr *hwmgr, enum amd_dpm_forced_level level); +extern int phm_pre_display_configuration_changed(struct pp_hwmgr *hwmgr); +extern int phm_display_configuration_changed(struct pp_hwmgr *hwmgr); +extern int phm_notify_smc_display_config_after_ps_adjustment(struct pp_hwmgr *hwmgr); +extern int phm_register_irq_handlers(struct pp_hwmgr *hwmgr); +extern int phm_start_thermal_controller(struct pp_hwmgr *hwmgr); +extern int phm_stop_thermal_controller(struct pp_hwmgr *hwmgr); +extern bool phm_check_smc_update_required_for_display_configuration(struct pp_hwmgr *hwmgr); + +extern int phm_check_states_equal(struct pp_hwmgr *hwmgr, + const struct pp_hw_power_state *pstate1, + const struct pp_hw_power_state *pstate2, + bool *equal); + +extern int phm_store_dal_configuration_data(struct pp_hwmgr *hwmgr, + const struct amd_pp_display_configuration *display_config); + +extern int phm_get_dal_power_level(struct pp_hwmgr *hwmgr, + struct amd_pp_simple_clock_info *info); + +extern int phm_set_cpu_power_state(struct pp_hwmgr *hwmgr); + +extern int phm_power_down_asic(struct pp_hwmgr *hwmgr); + +extern int phm_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, + PHM_PerformanceLevelDesignation designation, uint32_t index, + PHM_PerformanceLevel *level); + +extern int phm_get_clock_info(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, + struct pp_clock_info *pclock_info, + PHM_PerformanceLevelDesignation designation); + +extern int phm_get_current_shallow_sleep_clocks(struct pp_hwmgr *hwmgr, const struct pp_hw_power_state *state, struct pp_clock_info *clock_info); + +extern int phm_get_clock_by_type(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks); + +extern int phm_get_clock_by_type_with_latency(struct pp_hwmgr *hwmgr, + enum amd_pp_clock_type type, + struct pp_clock_levels_with_latency *clocks); +extern int phm_get_clock_by_type_with_voltage(struct pp_hwmgr *hwmgr, + enum amd_pp_clock_type type, + struct pp_clock_levels_with_voltage *clocks); +extern int phm_set_watermarks_for_clocks_ranges(struct pp_hwmgr *hwmgr, + void *clock_ranges); +extern int phm_display_clock_voltage_request(struct pp_hwmgr *hwmgr, + struct pp_display_clock_request *clock); + +extern int phm_get_max_high_clocks(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks); +extern int phm_disable_smc_firmware_ctf(struct pp_hwmgr *hwmgr); + +extern int phm_set_active_display_count(struct pp_hwmgr *hwmgr, uint32_t count); + +#endif /* _HARDWARE_MANAGER_H_ */ + diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h new file mode 100644 index 000000000000..03226baea65e --- /dev/null +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h @@ -0,0 +1,833 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _HWMGR_H_ +#define _HWMGR_H_ + +#include +#include "amd_powerplay.h" +#include "hardwaremanager.h" +#include "hwmgr_ppt.h" +#include "ppatomctrl.h" +#include "power_state.h" +#include "smu_helper.h" + +struct pp_hwmgr; +struct phm_fan_speed_info; +struct pp_atomctrl_voltage_table; + +#define VOLTAGE_SCALE 4 +#define VOLTAGE_VID_OFFSET_SCALE1 625 +#define VOLTAGE_VID_OFFSET_SCALE2 100 + +enum DISPLAY_GAP { + DISPLAY_GAP_VBLANK_OR_WM = 0, /* Wait for vblank or MCHG watermark. */ + DISPLAY_GAP_VBLANK = 1, /* Wait for vblank. */ + DISPLAY_GAP_WATERMARK = 2, /* Wait for MCHG watermark. (Note that HW may deassert WM in VBI depending on DC_STUTTER_CNTL.) */ + DISPLAY_GAP_IGNORE = 3 /* Do not wait. */ +}; +typedef enum DISPLAY_GAP DISPLAY_GAP; + +enum BACO_STATE { + BACO_STATE_OUT = 0, + BACO_STATE_IN, +}; + +struct vi_dpm_level { + bool enabled; + uint32_t value; + uint32_t param1; +}; + +struct vi_dpm_table { + uint32_t count; + struct vi_dpm_level dpm_level[]; +}; + +#define PCIE_PERF_REQ_REMOVE_REGISTRY 0 +#define PCIE_PERF_REQ_FORCE_LOWPOWER 1 +#define PCIE_PERF_REQ_GEN1 2 +#define PCIE_PERF_REQ_GEN2 3 +#define PCIE_PERF_REQ_GEN3 4 + +enum PHM_BackEnd_Magic { + PHM_Dummy_Magic = 0xAA5555AA, + PHM_RV770_Magic = 0xDCBAABCD, + PHM_Kong_Magic = 0x239478DF, + PHM_NIslands_Magic = 0x736C494E, + PHM_Sumo_Magic = 0x8339FA11, + PHM_SIslands_Magic = 0x369431AC, + PHM_Trinity_Magic = 0x96751873, + PHM_CIslands_Magic = 0x38AC78B0, + PHM_Kv_Magic = 0xDCBBABC0, + PHM_VIslands_Magic = 0x20130307, + PHM_Cz_Magic = 0x67DCBA25, + PHM_Rv_Magic = 0x20161121 +}; + +struct phm_set_power_state_input { + const struct pp_hw_power_state *pcurrent_state; + const struct pp_hw_power_state *pnew_state; +}; + +struct phm_clock_array { + uint32_t count; + uint32_t values[]; +}; + +struct phm_clock_voltage_dependency_record { + uint32_t clk; + uint32_t v; +}; + +struct phm_vceclock_voltage_dependency_record { + uint32_t ecclk; + uint32_t evclk; + uint32_t v; +}; + +struct phm_uvdclock_voltage_dependency_record { + uint32_t vclk; + uint32_t dclk; + uint32_t v; +}; + +struct phm_samuclock_voltage_dependency_record { + uint32_t samclk; + uint32_t v; +}; + +struct phm_acpclock_voltage_dependency_record { + uint32_t acpclk; + uint32_t v; +}; + +struct phm_clock_voltage_dependency_table { + uint32_t count; /* Number of entries. */ + struct phm_clock_voltage_dependency_record entries[]; /* Dynamically allocate count entries. */ +}; + +struct phm_phase_shedding_limits_record { + uint32_t Voltage; + uint32_t Sclk; + uint32_t Mclk; +}; + +struct phm_uvd_clock_voltage_dependency_record { + uint32_t vclk; + uint32_t dclk; + uint32_t v; +}; + +struct phm_uvd_clock_voltage_dependency_table { + uint8_t count; + struct phm_uvd_clock_voltage_dependency_record entries[]; +}; + +struct phm_acp_clock_voltage_dependency_record { + uint32_t acpclk; + uint32_t v; +}; + +struct phm_acp_clock_voltage_dependency_table { + uint32_t count; + struct phm_acp_clock_voltage_dependency_record entries[]; +}; + +struct phm_vce_clock_voltage_dependency_record { + uint32_t ecclk; + uint32_t evclk; + uint32_t v; +}; + +struct phm_phase_shedding_limits_table { + uint32_t count; + struct phm_phase_shedding_limits_record entries[]; +}; + +struct phm_vceclock_voltage_dependency_table { + uint8_t count; /* Number of entries. */ + struct phm_vceclock_voltage_dependency_record entries[1]; /* Dynamically allocate count entries. */ +}; + +struct phm_uvdclock_voltage_dependency_table { + uint8_t count; /* Number of entries. */ + struct phm_uvdclock_voltage_dependency_record entries[1]; /* Dynamically allocate count entries. */ +}; + +struct phm_samuclock_voltage_dependency_table { + uint8_t count; /* Number of entries. */ + struct phm_samuclock_voltage_dependency_record entries[1]; /* Dynamically allocate count entries. */ +}; + +struct phm_acpclock_voltage_dependency_table { + uint32_t count; /* Number of entries. */ + struct phm_acpclock_voltage_dependency_record entries[1]; /* Dynamically allocate count entries. */ +}; + +struct phm_vce_clock_voltage_dependency_table { + uint8_t count; + struct phm_vce_clock_voltage_dependency_record entries[]; +}; + + +enum SMU_ASIC_RESET_MODE +{ + SMU_ASIC_RESET_MODE_0, + SMU_ASIC_RESET_MODE_1, + SMU_ASIC_RESET_MODE_2, +}; + +struct pp_smumgr_func { + char *name; + int (*smu_init)(struct pp_hwmgr *hwmgr); + int (*smu_fini)(struct pp_hwmgr *hwmgr); + int (*start_smu)(struct pp_hwmgr *hwmgr); + int (*check_fw_load_finish)(struct pp_hwmgr *hwmgr, + uint32_t firmware); + int (*request_smu_load_fw)(struct pp_hwmgr *hwmgr); + int (*request_smu_load_specific_fw)(struct pp_hwmgr *hwmgr, + uint32_t firmware); + uint32_t (*get_argument)(struct pp_hwmgr *hwmgr); + int (*send_msg_to_smc)(struct pp_hwmgr *hwmgr, uint16_t msg); + int (*send_msg_to_smc_with_parameter)(struct pp_hwmgr *hwmgr, + uint16_t msg, uint32_t parameter); + int (*download_pptable_settings)(struct pp_hwmgr *hwmgr, + void **table); + int (*upload_pptable_settings)(struct pp_hwmgr *hwmgr); + int (*update_smc_table)(struct pp_hwmgr *hwmgr, uint32_t type); + int (*process_firmware_header)(struct pp_hwmgr *hwmgr); + int (*update_sclk_threshold)(struct pp_hwmgr *hwmgr); + int (*thermal_setup_fan_table)(struct pp_hwmgr *hwmgr); + int (*thermal_avfs_enable)(struct pp_hwmgr *hwmgr); + int (*init_smc_table)(struct pp_hwmgr *hwmgr); + int (*populate_all_graphic_levels)(struct pp_hwmgr *hwmgr); + int (*populate_all_memory_levels)(struct pp_hwmgr *hwmgr); + int (*initialize_mc_reg_table)(struct pp_hwmgr *hwmgr); + uint32_t (*get_offsetof)(uint32_t type, uint32_t member); + uint32_t (*get_mac_definition)(uint32_t value); + bool (*is_dpm_running)(struct pp_hwmgr *hwmgr); + bool (*is_hw_avfs_present)(struct pp_hwmgr *hwmgr); + int (*update_dpm_settings)(struct pp_hwmgr *hwmgr, void *profile_setting); + int (*smc_table_manager)(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw); /*rw: true for read, false for write */ + int (*stop_smc)(struct pp_hwmgr *hwmgr); +}; + +struct pp_hwmgr_func { + int (*backend_init)(struct pp_hwmgr *hw_mgr); + int (*backend_fini)(struct pp_hwmgr *hw_mgr); + int (*asic_setup)(struct pp_hwmgr *hw_mgr); + int (*get_power_state_size)(struct pp_hwmgr *hw_mgr); + + int (*apply_state_adjust_rules)(struct pp_hwmgr *hwmgr, + struct pp_power_state *prequest_ps, + const struct pp_power_state *pcurrent_ps); + + int (*apply_clocks_adjust_rules)(struct pp_hwmgr *hwmgr); + + int (*force_dpm_level)(struct pp_hwmgr *hw_mgr, + enum amd_dpm_forced_level level); + + int (*dynamic_state_management_enable)( + struct pp_hwmgr *hw_mgr); + int (*dynamic_state_management_disable)( + struct pp_hwmgr *hw_mgr); + + int (*patch_boot_state)(struct pp_hwmgr *hwmgr, + struct pp_hw_power_state *hw_ps); + + int (*get_pp_table_entry)(struct pp_hwmgr *hwmgr, + unsigned long, struct pp_power_state *); + int (*get_num_of_pp_table_entries)(struct pp_hwmgr *hwmgr); + int (*powerdown_uvd)(struct pp_hwmgr *hwmgr); + void (*powergate_vce)(struct pp_hwmgr *hwmgr, bool bgate); + void (*powergate_uvd)(struct pp_hwmgr *hwmgr, bool bgate); + void (*powergate_acp)(struct pp_hwmgr *hwmgr, bool bgate); + uint32_t (*get_mclk)(struct pp_hwmgr *hwmgr, bool low); + uint32_t (*get_sclk)(struct pp_hwmgr *hwmgr, bool low); + int (*power_state_set)(struct pp_hwmgr *hwmgr, + const void *state); + int (*notify_smc_display_config_after_ps_adjustment)(struct pp_hwmgr *hwmgr); + int (*pre_display_config_changed)(struct pp_hwmgr *hwmgr); + int (*display_config_changed)(struct pp_hwmgr *hwmgr); + int (*disable_clock_power_gating)(struct pp_hwmgr *hwmgr); + int (*update_clock_gatings)(struct pp_hwmgr *hwmgr, + const uint32_t *msg_id); + int (*set_max_fan_rpm_output)(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm); + int (*set_max_fan_pwm_output)(struct pp_hwmgr *hwmgr, uint16_t us_max_fan_pwm); + int (*stop_thermal_controller)(struct pp_hwmgr *hwmgr); + int (*get_fan_speed_info)(struct pp_hwmgr *hwmgr, struct phm_fan_speed_info *fan_speed_info); + void (*set_fan_control_mode)(struct pp_hwmgr *hwmgr, uint32_t mode); + uint32_t (*get_fan_control_mode)(struct pp_hwmgr *hwmgr); + int (*set_fan_speed_pwm)(struct pp_hwmgr *hwmgr, uint32_t speed); + int (*get_fan_speed_pwm)(struct pp_hwmgr *hwmgr, uint32_t *speed); + int (*set_fan_speed_rpm)(struct pp_hwmgr *hwmgr, uint32_t speed); + int (*get_fan_speed_rpm)(struct pp_hwmgr *hwmgr, uint32_t *speed); + int (*reset_fan_speed_to_default)(struct pp_hwmgr *hwmgr); + int (*uninitialize_thermal_controller)(struct pp_hwmgr *hwmgr); + int (*register_irq_handlers)(struct pp_hwmgr *hwmgr); + bool (*check_smc_update_required_for_display_configuration)(struct pp_hwmgr *hwmgr); + int (*check_states_equal)(struct pp_hwmgr *hwmgr, + const struct pp_hw_power_state *pstate1, + const struct pp_hw_power_state *pstate2, + bool *equal); + int (*set_cpu_power_state)(struct pp_hwmgr *hwmgr); + int (*store_cc6_data)(struct pp_hwmgr *hwmgr, uint32_t separation_time, + bool cc6_disable, bool pstate_disable, + bool pstate_switch_disable); + int (*get_dal_power_level)(struct pp_hwmgr *hwmgr, + struct amd_pp_simple_clock_info *info); + int (*get_performance_level)(struct pp_hwmgr *, const struct pp_hw_power_state *, + PHM_PerformanceLevelDesignation, uint32_t, PHM_PerformanceLevel *); + int (*get_current_shallow_sleep_clocks)(struct pp_hwmgr *hwmgr, + const struct pp_hw_power_state *state, struct pp_clock_info *clock_info); + int (*get_clock_by_type)(struct pp_hwmgr *hwmgr, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks); + int (*get_clock_by_type_with_latency)(struct pp_hwmgr *hwmgr, + enum amd_pp_clock_type type, + struct pp_clock_levels_with_latency *clocks); + int (*get_clock_by_type_with_voltage)(struct pp_hwmgr *hwmgr, + enum amd_pp_clock_type type, + struct pp_clock_levels_with_voltage *clocks); + int (*set_watermarks_for_clocks_ranges)(struct pp_hwmgr *hwmgr, void *clock_ranges); + int (*display_clock_voltage_request)(struct pp_hwmgr *hwmgr, + struct pp_display_clock_request *clock); + int (*get_max_high_clocks)(struct pp_hwmgr *hwmgr, struct amd_pp_simple_clock_info *clocks); + int (*power_off_asic)(struct pp_hwmgr *hwmgr); + int (*force_clock_level)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, uint32_t mask); + int (*print_clock_levels)(struct pp_hwmgr *hwmgr, enum pp_clock_type type, char *buf); + int (*powergate_gfx)(struct pp_hwmgr *hwmgr, bool enable); + int (*get_sclk_od)(struct pp_hwmgr *hwmgr); + int (*set_sclk_od)(struct pp_hwmgr *hwmgr, uint32_t value); + int (*get_mclk_od)(struct pp_hwmgr *hwmgr); + int (*set_mclk_od)(struct pp_hwmgr *hwmgr, uint32_t value); + int (*read_sensor)(struct pp_hwmgr *hwmgr, int idx, void *value, int *size); + int (*avfs_control)(struct pp_hwmgr *hwmgr, bool enable); + int (*disable_smc_firmware_ctf)(struct pp_hwmgr *hwmgr); + int (*set_active_display_count)(struct pp_hwmgr *hwmgr, uint32_t count); + int (*set_min_deep_sleep_dcefclk)(struct pp_hwmgr *hwmgr, uint32_t clock); + int (*start_thermal_controller)(struct pp_hwmgr *hwmgr, struct PP_TemperatureRange *range); + int (*notify_cac_buffer_info)(struct pp_hwmgr *hwmgr, + uint32_t virtual_addr_low, + uint32_t virtual_addr_hi, + uint32_t mc_addr_low, + uint32_t mc_addr_hi, + uint32_t size); + int (*get_thermal_temperature_range)(struct pp_hwmgr *hwmgr, + struct PP_TemperatureRange *range); + int (*get_power_profile_mode)(struct pp_hwmgr *hwmgr, char *buf); + int (*set_power_profile_mode)(struct pp_hwmgr *hwmgr, long *input, uint32_t size); + int (*odn_edit_dpm_table)(struct pp_hwmgr *hwmgr, + enum PP_OD_DPM_TABLE_COMMAND type, + long *input, uint32_t size); + int (*set_fine_grain_clk_vol)(struct pp_hwmgr *hwmgr, + enum PP_OD_DPM_TABLE_COMMAND type, + long *input, uint32_t size); + int (*set_power_limit)(struct pp_hwmgr *hwmgr, uint32_t n); + int (*powergate_mmhub)(struct pp_hwmgr *hwmgr); + int (*smus_notify_pwe)(struct pp_hwmgr *hwmgr); + int (*powergate_sdma)(struct pp_hwmgr *hwmgr, bool bgate); + int (*enable_mgpu_fan_boost)(struct pp_hwmgr *hwmgr); + int (*set_hard_min_dcefclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock); + int (*set_hard_min_fclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock); + int (*set_hard_min_gfxclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock); + int (*set_soft_max_gfxclk_by_freq)(struct pp_hwmgr *hwmgr, uint32_t clock); + int (*get_asic_baco_capability)(struct pp_hwmgr *hwmgr, bool *cap); + int (*get_asic_baco_state)(struct pp_hwmgr *hwmgr, enum BACO_STATE *state); + int (*set_asic_baco_state)(struct pp_hwmgr *hwmgr, enum BACO_STATE state); + int (*get_ppfeature_status)(struct pp_hwmgr *hwmgr, char *buf); + int (*set_ppfeature_status)(struct pp_hwmgr *hwmgr, uint64_t ppfeature_masks); + int (*set_mp1_state)(struct pp_hwmgr *hwmgr, enum pp_mp1_state mp1_state); + int (*asic_reset)(struct pp_hwmgr *hwmgr, enum SMU_ASIC_RESET_MODE mode); + int (*smu_i2c_bus_access)(struct pp_hwmgr *hwmgr, bool aquire); + int (*set_df_cstate)(struct pp_hwmgr *hwmgr, enum pp_df_cstate state); + int (*set_xgmi_pstate)(struct pp_hwmgr *hwmgr, uint32_t pstate); + int (*disable_power_features_for_compute_performance)(struct pp_hwmgr *hwmgr, + bool disable); + ssize_t (*get_gpu_metrics)(struct pp_hwmgr *hwmgr, void **table); + int (*gfx_state_change)(struct pp_hwmgr *hwmgr, uint32_t state); +}; + +struct pp_table_func { + int (*pptable_init)(struct pp_hwmgr *hw_mgr); + int (*pptable_fini)(struct pp_hwmgr *hw_mgr); + int (*pptable_get_number_of_vce_state_table_entries)(struct pp_hwmgr *hw_mgr); + int (*pptable_get_vce_state_table_entry)( + struct pp_hwmgr *hwmgr, + unsigned long i, + struct amd_vce_state *vce_state, + void **clock_info, + unsigned long *flag); +}; + +union phm_cac_leakage_record { + struct { + uint16_t Vddc; /* in CI, we use it for StdVoltageHiSidd */ + uint32_t Leakage; /* in CI, we use it for StdVoltageLoSidd */ + }; + struct { + uint16_t Vddc1; + uint16_t Vddc2; + uint16_t Vddc3; + }; +}; + +struct phm_cac_leakage_table { + uint32_t count; + union phm_cac_leakage_record entries[]; +}; + +struct phm_samu_clock_voltage_dependency_record { + uint32_t samclk; + uint32_t v; +}; + + +struct phm_samu_clock_voltage_dependency_table { + uint8_t count; + struct phm_samu_clock_voltage_dependency_record entries[]; +}; + +struct phm_cac_tdp_table { + uint16_t usTDP; + uint16_t usConfigurableTDP; + uint16_t usTDC; + uint16_t usBatteryPowerLimit; + uint16_t usSmallPowerLimit; + uint16_t usLowCACLeakage; + uint16_t usHighCACLeakage; + uint16_t usMaximumPowerDeliveryLimit; + uint16_t usEDCLimit; + uint16_t usOperatingTempMinLimit; + uint16_t usOperatingTempMaxLimit; + uint16_t usOperatingTempStep; + uint16_t usOperatingTempHyst; + uint16_t usDefaultTargetOperatingTemp; + uint16_t usTargetOperatingTemp; + uint16_t usPowerTuneDataSetID; + uint16_t usSoftwareShutdownTemp; + uint16_t usClockStretchAmount; + uint16_t usTemperatureLimitHotspot; + uint16_t usTemperatureLimitLiquid1; + uint16_t usTemperatureLimitLiquid2; + uint16_t usTemperatureLimitVrVddc; + uint16_t usTemperatureLimitVrMvdd; + uint16_t usTemperatureLimitPlx; + uint8_t ucLiquid1_I2C_address; + uint8_t ucLiquid2_I2C_address; + uint8_t ucLiquid_I2C_Line; + uint8_t ucVr_I2C_address; + uint8_t ucVr_I2C_Line; + uint8_t ucPlx_I2C_address; + uint8_t ucPlx_I2C_Line; + uint32_t usBoostPowerLimit; + uint8_t ucCKS_LDO_REFSEL; + uint8_t ucHotSpotOnly; +}; + +struct phm_tdp_table { + uint16_t usTDP; + uint16_t usConfigurableTDP; + uint16_t usTDC; + uint16_t usBatteryPowerLimit; + uint16_t usSmallPowerLimit; + uint16_t usLowCACLeakage; + uint16_t usHighCACLeakage; + uint16_t usMaximumPowerDeliveryLimit; + uint16_t usEDCLimit; + uint16_t usOperatingTempMinLimit; + uint16_t usOperatingTempMaxLimit; + uint16_t usOperatingTempStep; + uint16_t usOperatingTempHyst; + uint16_t usDefaultTargetOperatingTemp; + uint16_t usTargetOperatingTemp; + uint16_t usPowerTuneDataSetID; + uint16_t usSoftwareShutdownTemp; + uint16_t usClockStretchAmount; + uint16_t usTemperatureLimitTedge; + uint16_t usTemperatureLimitHotspot; + uint16_t usTemperatureLimitLiquid1; + uint16_t usTemperatureLimitLiquid2; + uint16_t usTemperatureLimitHBM; + uint16_t usTemperatureLimitVrVddc; + uint16_t usTemperatureLimitVrMvdd; + uint16_t usTemperatureLimitPlx; + uint8_t ucLiquid1_I2C_address; + uint8_t ucLiquid2_I2C_address; + uint8_t ucLiquid_I2C_Line; + uint8_t ucVr_I2C_address; + uint8_t ucVr_I2C_Line; + uint8_t ucPlx_I2C_address; + uint8_t ucPlx_I2C_Line; + uint8_t ucLiquid_I2C_LineSDA; + uint8_t ucVr_I2C_LineSDA; + uint8_t ucPlx_I2C_LineSDA; + uint32_t usBoostPowerLimit; + uint16_t usBoostStartTemperature; + uint16_t usBoostStopTemperature; + uint32_t ulBoostClock; +}; + +struct phm_ppm_table { + uint8_t ppm_design; + uint16_t cpu_core_number; + uint32_t platform_tdp; + uint32_t small_ac_platform_tdp; + uint32_t platform_tdc; + uint32_t small_ac_platform_tdc; + uint32_t apu_tdp; + uint32_t dgpu_tdp; + uint32_t dgpu_ulv_power; + uint32_t tj_max; +}; + +struct phm_vq_budgeting_record { + uint32_t ulCUs; + uint32_t ulSustainableSOCPowerLimitLow; + uint32_t ulSustainableSOCPowerLimitHigh; + uint32_t ulMinSclkLow; + uint32_t ulMinSclkHigh; + uint8_t ucDispConfig; + uint32_t ulDClk; + uint32_t ulEClk; + uint32_t ulSustainableSclk; + uint32_t ulSustainableCUs; +}; + +struct phm_vq_budgeting_table { + uint8_t numEntries; + struct phm_vq_budgeting_record entries[1]; +}; + +struct phm_clock_and_voltage_limits { + uint32_t sclk; + uint32_t mclk; + uint32_t gfxclk; + uint16_t vddc; + uint16_t vddci; + uint16_t vddgfx; + uint16_t vddmem; +}; + +/* Structure to hold PPTable information */ + +struct phm_ppt_v1_information { + struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_on_sclk; + struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_on_mclk; + struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_on_socclk; + struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_on_dcefclk; + struct phm_clock_array *valid_sclk_values; + struct phm_clock_array *valid_mclk_values; + struct phm_clock_array *valid_socclk_values; + struct phm_clock_array *valid_dcefclk_values; + struct phm_clock_and_voltage_limits max_clock_voltage_on_dc; + struct phm_clock_and_voltage_limits max_clock_voltage_on_ac; + struct phm_clock_voltage_dependency_table *vddc_dep_on_dal_pwrl; + struct phm_ppm_table *ppm_parameter_table; + struct phm_cac_tdp_table *cac_dtp_table; + struct phm_tdp_table *tdp_table; + struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_dep_table; + struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table; + struct phm_ppt_v1_voltage_lookup_table *vddgfx_lookup_table; + struct phm_ppt_v1_voltage_lookup_table *vddmem_lookup_table; + struct phm_ppt_v1_pcie_table *pcie_table; + struct phm_ppt_v1_gpio_table *gpio_table; + uint16_t us_ulv_voltage_offset; + uint16_t us_ulv_smnclk_did; + uint16_t us_ulv_mp1clk_did; + uint16_t us_ulv_gfxclk_bypass; + uint16_t us_gfxclk_slew_rate; + uint16_t us_min_gfxclk_freq_limit; +}; + +struct phm_ppt_v2_information { + struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_on_sclk; + struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_on_mclk; + struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_on_socclk; + struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_on_dcefclk; + struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_on_pixclk; + struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_on_dispclk; + struct phm_ppt_v1_clock_voltage_dependency_table *vdd_dep_on_phyclk; + struct phm_ppt_v1_mm_clock_voltage_dependency_table *mm_dep_table; + + struct phm_clock_voltage_dependency_table *vddc_dep_on_dalpwrl; + + struct phm_clock_array *valid_sclk_values; + struct phm_clock_array *valid_mclk_values; + struct phm_clock_array *valid_socclk_values; + struct phm_clock_array *valid_dcefclk_values; + + struct phm_clock_and_voltage_limits max_clock_voltage_on_dc; + struct phm_clock_and_voltage_limits max_clock_voltage_on_ac; + + struct phm_ppm_table *ppm_parameter_table; + struct phm_cac_tdp_table *cac_dtp_table; + struct phm_tdp_table *tdp_table; + + struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table; + struct phm_ppt_v1_voltage_lookup_table *vddgfx_lookup_table; + struct phm_ppt_v1_voltage_lookup_table *vddmem_lookup_table; + struct phm_ppt_v1_voltage_lookup_table *vddci_lookup_table; + + struct phm_ppt_v1_pcie_table *pcie_table; + + uint16_t us_ulv_voltage_offset; + uint16_t us_ulv_smnclk_did; + uint16_t us_ulv_mp1clk_did; + uint16_t us_ulv_gfxclk_bypass; + uint16_t us_gfxclk_slew_rate; + uint16_t us_min_gfxclk_freq_limit; + + uint8_t uc_gfx_dpm_voltage_mode; + uint8_t uc_soc_dpm_voltage_mode; + uint8_t uc_uclk_dpm_voltage_mode; + uint8_t uc_uvd_dpm_voltage_mode; + uint8_t uc_vce_dpm_voltage_mode; + uint8_t uc_mp0_dpm_voltage_mode; + uint8_t uc_dcef_dpm_voltage_mode; +}; + +struct phm_ppt_v3_information +{ + uint8_t uc_thermal_controller_type; + + uint16_t us_small_power_limit1; + uint16_t us_small_power_limit2; + uint16_t us_boost_power_limit; + + uint16_t us_od_turbo_power_limit; + uint16_t us_od_powersave_power_limit; + uint16_t us_software_shutdown_temp; + + uint32_t *power_saving_clock_max; + uint32_t *power_saving_clock_min; + + uint8_t *od_feature_capabilities; + uint32_t *od_settings_max; + uint32_t *od_settings_min; + + void *smc_pptable; +}; + +struct phm_dynamic_state_info { + struct phm_clock_voltage_dependency_table *vddc_dependency_on_sclk; + struct phm_clock_voltage_dependency_table *vddci_dependency_on_mclk; + struct phm_clock_voltage_dependency_table *vddc_dependency_on_mclk; + struct phm_clock_voltage_dependency_table *mvdd_dependency_on_mclk; + struct phm_clock_voltage_dependency_table *vddc_dep_on_dal_pwrl; + struct phm_clock_array *valid_sclk_values; + struct phm_clock_array *valid_mclk_values; + struct phm_clock_and_voltage_limits max_clock_voltage_on_dc; + struct phm_clock_and_voltage_limits max_clock_voltage_on_ac; + uint32_t mclk_sclk_ratio; + uint32_t sclk_mclk_delta; + uint32_t vddc_vddci_delta; + uint32_t min_vddc_for_pcie_gen2; + struct phm_cac_leakage_table *cac_leakage_table; + struct phm_phase_shedding_limits_table *vddc_phase_shed_limits_table; + + struct phm_vce_clock_voltage_dependency_table + *vce_clock_voltage_dependency_table; + struct phm_uvd_clock_voltage_dependency_table + *uvd_clock_voltage_dependency_table; + struct phm_acp_clock_voltage_dependency_table + *acp_clock_voltage_dependency_table; + struct phm_samu_clock_voltage_dependency_table + *samu_clock_voltage_dependency_table; + + struct phm_ppm_table *ppm_parameter_table; + struct phm_cac_tdp_table *cac_dtp_table; + struct phm_clock_voltage_dependency_table *vdd_gfx_dependency_on_sclk; +}; + +struct pp_fan_info { + bool bNoFan; + uint8_t ucTachometerPulsesPerRevolution; + uint32_t ulMinRPM; + uint32_t ulMaxRPM; +}; + +struct pp_advance_fan_control_parameters { + uint16_t usTMin; /* The temperature, in 0.01 centigrades, below which we just run at a minimal PWM. */ + uint16_t usTMed; /* The middle temperature where we change slopes. */ + uint16_t usTHigh; /* The high temperature for setting the second slope. */ + uint16_t usPWMMin; /* The minimum PWM value in percent (0.01% increments). */ + uint16_t usPWMMed; /* The PWM value (in percent) at TMed. */ + uint16_t usPWMHigh; /* The PWM value at THigh. */ + uint8_t ucTHyst; /* Temperature hysteresis. Integer. */ + uint32_t ulCycleDelay; /* The time between two invocations of the fan control routine in microseconds. */ + uint16_t usTMax; /* The max temperature */ + uint8_t ucFanControlMode; + uint16_t usFanPWMMinLimit; + uint16_t usFanPWMMaxLimit; + uint16_t usFanPWMStep; + uint16_t usDefaultMaxFanPWM; + uint16_t usFanOutputSensitivity; + uint16_t usDefaultFanOutputSensitivity; + uint16_t usMaxFanPWM; /* The max Fan PWM value for Fuzzy Fan Control feature */ + uint16_t usFanRPMMinLimit; /* Minimum limit range in percentage, need to calculate based on minRPM/MaxRpm */ + uint16_t usFanRPMMaxLimit; /* Maximum limit range in percentage, usually set to 100% by default */ + uint16_t usFanRPMStep; /* Step increments/decerements, in percent */ + uint16_t usDefaultMaxFanRPM; /* The max Fan RPM value for Fuzzy Fan Control feature, default from PPTable */ + uint16_t usMaxFanRPM; /* The max Fan RPM value for Fuzzy Fan Control feature, user defined */ + uint16_t usFanCurrentLow; /* Low current */ + uint16_t usFanCurrentHigh; /* High current */ + uint16_t usFanRPMLow; /* Low RPM */ + uint16_t usFanRPMHigh; /* High RPM */ + uint32_t ulMinFanSCLKAcousticLimit; /* Minimum Fan Controller SCLK Frequency Acoustic Limit. */ + uint8_t ucTargetTemperature; /* Advanced fan controller target temperature. */ + uint8_t ucMinimumPWMLimit; /* The minimum PWM that the advanced fan controller can set. This should be set to the highest PWM that will run the fan at its lowest RPM. */ + uint16_t usFanGainEdge; /* The following is added for Fiji */ + uint16_t usFanGainHotspot; + uint16_t usFanGainLiquid; + uint16_t usFanGainVrVddc; + uint16_t usFanGainVrMvdd; + uint16_t usFanGainPlx; + uint16_t usFanGainHbm; + uint8_t ucEnableZeroRPM; + uint8_t ucFanStopTemperature; + uint8_t ucFanStartTemperature; + uint32_t ulMaxFanSCLKAcousticLimit; /* Maximum Fan Controller SCLK Frequency Acoustic Limit. */ + uint32_t ulTargetGfxClk; + uint16_t usZeroRPMStartTemperature; + uint16_t usZeroRPMStopTemperature; + uint16_t usMGpuThrottlingRPMLimit; +}; + +struct pp_thermal_controller_info { + uint8_t ucType; + uint8_t ucI2cLine; + uint8_t ucI2cAddress; + uint8_t use_hw_fan_control; + struct pp_fan_info fanInfo; + struct pp_advance_fan_control_parameters advanceFanControlParameters; +}; + +struct phm_microcode_version_info { + uint32_t SMC; + uint32_t DMCU; + uint32_t MC; + uint32_t NB; +}; + +enum PP_TABLE_VERSION { + PP_TABLE_V0 = 0, + PP_TABLE_V1, + PP_TABLE_V2, + PP_TABLE_MAX +}; + +/** + * The main hardware manager structure. + */ +#define Workload_Policy_Max 6 + +struct pp_hwmgr { + void *adev; + uint32_t chip_family; + uint32_t chip_id; + uint32_t smu_version; + bool not_vf; + bool pm_en; + bool pp_one_vf; + struct mutex smu_lock; + struct mutex msg_lock; + + uint32_t pp_table_version; + void *device; + struct pp_smumgr *smumgr; + const void *soft_pp_table; + uint32_t soft_pp_table_size; + void *hardcode_pp_table; + bool need_pp_table_upload; + + struct amd_vce_state vce_states[AMD_MAX_VCE_LEVELS]; + uint32_t num_vce_state_tables; + + enum amd_dpm_forced_level dpm_level; + enum amd_dpm_forced_level saved_dpm_level; + enum amd_dpm_forced_level request_dpm_level; + uint32_t usec_timeout; + void *pptable; + struct phm_platform_descriptor platform_descriptor; + void *backend; + + void *smu_backend; + const struct pp_smumgr_func *smumgr_funcs; + bool is_kicker; + + enum PP_DAL_POWERLEVEL dal_power_level; + struct phm_dynamic_state_info dyn_state; + const struct pp_hwmgr_func *hwmgr_func; + const struct pp_table_func *pptable_func; + + struct pp_power_state *ps; + uint32_t num_ps; + struct pp_thermal_controller_info thermal_controller; + bool fan_ctrl_is_in_default_mode; + uint32_t fan_ctrl_default_mode; + bool fan_ctrl_enabled; + uint32_t tmin; + struct phm_microcode_version_info microcode_version_info; + uint32_t ps_size; + struct pp_power_state *current_ps; + struct pp_power_state *request_ps; + struct pp_power_state *boot_ps; + struct pp_power_state *uvd_ps; + const struct amd_pp_display_configuration *display_config; + uint32_t feature_mask; + bool avfs_supported; + /* UMD Pstate */ + bool en_umd_pstate; + uint32_t power_profile_mode; + uint32_t default_power_profile_mode; + uint32_t pstate_sclk; + uint32_t pstate_mclk; + bool od_enabled; + uint32_t power_limit; + uint32_t default_power_limit; + uint32_t workload_mask; + uint32_t workload_prority[Workload_Policy_Max]; + uint32_t workload_setting[Workload_Policy_Max]; + bool gfxoff_state_changed_by_workload; +}; + +int hwmgr_early_init(struct pp_hwmgr *hwmgr); +int hwmgr_sw_init(struct pp_hwmgr *hwmgr); +int hwmgr_sw_fini(struct pp_hwmgr *hwmgr); +int hwmgr_hw_init(struct pp_hwmgr *hwmgr); +int hwmgr_hw_fini(struct pp_hwmgr *hwmgr); +int hwmgr_suspend(struct pp_hwmgr *hwmgr); +int hwmgr_resume(struct pp_hwmgr *hwmgr); + +int hwmgr_handle_task(struct pp_hwmgr *hwmgr, + enum amd_pp_task task_id, + enum amd_pm_state_type *user_state); + + +#define PHM_ENTIRE_REGISTER_MASK 0xFFFFFFFFU + +int smu7_init_function_pointers(struct pp_hwmgr *hwmgr); +int smu8_init_function_pointers(struct pp_hwmgr *hwmgr); +int vega12_hwmgr_init(struct pp_hwmgr *hwmgr); +int vega20_hwmgr_init(struct pp_hwmgr *hwmgr); + +#endif /* _HWMGR_H_ */ diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/polaris10_pwrvirus.h b/drivers/gpu/drm/amd/pm/powerplay/inc/polaris10_pwrvirus.h new file mode 100644 index 000000000000..6a53b7e74ccd --- /dev/null +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/polaris10_pwrvirus.h @@ -0,0 +1,1793 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef _POLARIS10_PWRVIRUS_H +#define _POLARIS10_PWRVIRUS_H + + +#define mmCP_HYP_MEC1_UCODE_ADDR 0xf81a +#define mmCP_HYP_MEC1_UCODE_DATA 0xf81b +#define mmCP_HYP_MEC2_UCODE_ADDR 0xf81c +#define mmCP_HYP_MEC2_UCODE_DATA 0xf81d + +struct PWR_Command_Table { + uint32_t data; + uint32_t reg; +}; + +typedef struct PWR_Command_Table PWR_Command_Table; + +struct PWR_DFY_Section { + uint32_t dfy_cntl; + uint32_t dfy_addr_hi, dfy_addr_lo; + uint32_t dfy_size; + uint32_t dfy_data[]; +}; + +typedef struct PWR_DFY_Section PWR_DFY_Section; + +static const PWR_Command_Table pwr_virus_table_pre[] = { + { 0x00000000, mmRLC_CNTL }, + { 0x00000002, mmRLC_SRM_CNTL }, + { 0x15000000, mmCP_ME_CNTL }, + { 0x50000000, mmCP_MEC_CNTL }, + { 0x80000004, mmCP_DFY_CNTL }, + { 0x0840800a, mmCP_RB0_CNTL }, + { 0xf30fff0f, mmTCC_CTRL }, + { 0x00000002, mmTCC_EXE_DISABLE }, + { 0x000000ff, mmTCP_ADDR_CONFIG }, + { 0x540ff000, mmCP_CPC_IC_BASE_LO }, + { 0x000000b4, mmCP_CPC_IC_BASE_HI }, + { 0x00010000, mmCP_HYP_MEC1_UCODE_ADDR }, + { 0x00041b75, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000710e8, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000910dd, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000a1081, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000b016f, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000c0e3c, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000d10ec, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000e0188, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x00101b5d, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x00150a6c, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x00170c5e, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x001d0c8c, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x001e0cfe, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x00221408, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x00370d7b, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x00390dcb, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x003c142f, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x003f0b27, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x00400e63, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x00500f62, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x00460fa7, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x00490fa7, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x005811d4, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x00680ad6, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x00760b00, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x00780b0c, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x00790af7, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x007d1aba, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x007e1abe, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x00591260, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x005a12fb, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x00861ac7, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x008c1b01, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x008d1b34, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x00a014b9, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x00a1152e, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x00a216fb, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x00a41890, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x00a31906, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x00a50b14, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x00621387, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x005c0b27, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x00160a75, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC1_UCODE_DATA }, + { 0x00010000, mmCP_HYP_MEC2_UCODE_ADDR }, + { 0x00041b75, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000710e8, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000910dd, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000a1081, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000b016f, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000c0e3c, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000d10ec, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000e0188, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x00101b5d, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x00150a6c, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x00170c5e, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x001d0c8c, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x001e0cfe, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x00221408, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x00370d7b, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x00390dcb, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x003c142f, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x003f0b27, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x00400e63, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x00500f62, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x00460fa7, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x00490fa7, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x005811d4, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x00680ad6, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x00760b00, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x00780b0c, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x00790af7, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x007d1aba, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x007e1abe, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x00591260, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x005a12fb, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x00861ac7, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x008c1b01, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x008d1b34, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x00a014b9, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x00a1152e, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x00a216fb, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x00a41890, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x00a31906, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x00a50b14, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x00621387, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x005c0b27, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x00160a75, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x000f016a, mmCP_HYP_MEC2_UCODE_DATA }, + { 0x00000000, 0xFFFFFFFF }, +}; + +static const PWR_DFY_Section pwr_virus_section1 = { + .dfy_cntl = 0x80000004, + .dfy_addr_hi = 0x000000b4, + .dfy_addr_lo = 0x540fe800, + .dfy_data = { + 0x7e000200, 0x7e020201, 0x7e040204, 0x7e060205, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, + 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, + 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, + 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, + 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, + 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, + 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, + 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, + 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, + 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, + 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, + 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, + 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, + 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, + 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, + 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, + 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, + 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, + 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, + 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, + 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, + 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, + 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, + 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, + 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, + 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, + 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, + 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, + 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, + 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, + 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, + 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, + 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, + 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, + 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, + 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, + 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, + 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, + 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, + 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, + 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, + 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, + 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, + 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, + 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, + 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, + 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, + 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0x0a080102, 0x0a0a0701, 0x0a080102, 0x0a0a0701, + 0x0a080500, 0x0a0a0303, 0x0a080500, 0x0a0a0303, 0xbf810000, 0x00000000, 0x00000000, 0x00000000, + 0x00000005, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x54106f00, 0x000400b4, 0x00004000, 0x00804fac, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + }, + .dfy_size = 416 +}; + +static const PWR_DFY_Section pwr_virus_section2 = { + .dfy_cntl = 0x80000004, + .dfy_addr_hi = 0x000000b4, + .dfy_addr_lo = 0x540fef00, + .dfy_data = { + 0xc0031502, 0x00001e00, 0x00000001, 0x00000001, 0x00000001, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + }, + .dfy_size = 16 +}; + +static const PWR_DFY_Section pwr_virus_section3 = { + .dfy_cntl = 0x80000004, + .dfy_addr_hi = 0x000000b4, + .dfy_addr_lo = 0x540ff000, + .dfy_data = { + 0xc424000b, 0x80000145, 0x94800001, 0x94c00001, 0x95000001, 0x95400001, 0x95800001, 0xdc810000, + 0xdcc10000, 0xdd010000, 0xdd410000, 0xdd810000, 0xc4080061, 0xd8400013, 0xd8000003, 0xc40c0001, + 0x24ccffff, 0x3cd08000, 0x9500fffd, 0x1cd0ffcf, 0x7d018001, 0xc4140004, 0x050c0019, 0xd8400008, + 0x84c00000, 0x80000023, 0x80000067, 0x8000006a, 0x8000006d, 0x80000079, 0x80000084, 0x8000008f, + 0x80000099, 0x800000a0, 0x800000af, 0xd8400053, 0xc4080007, 0x388c0001, 0x08880002, 0x04100003, + 0x94c00005, 0x98800003, 0x04100004, 0x8000002d, 0x04100005, 0x8c00003f, 0x8c000043, 0x28cc0000, + 0xccc00050, 0x8c000055, 0x28080001, 0xcc000004, 0x7d808001, 0xd8400013, 0xd88130b8, 0xcd400008, + 0xdc180000, 0xdc140000, 0xdc100000, 0xdc0c0000, 0xcc800005, 0xdc080000, 0x80000168, 0xc40c000e, + 0x28cc0008, 0xccc00013, 0x90000000, 0xcd013278, 0xc4113278, 0x95000001, 0x24cc0700, 0xd8400029, + 0xc4113255, 0xcd01324f, 0xc4113254, 0x1d10ffdf, 0xcd013254, 0x10cc0014, 0x1d10c017, 0x7d0d000a, + 0xd8400013, 0xd8400008, 0xcd0130b7, 0x14cc0010, 0x90000000, 0xd9c00036, 0x8000005d, 0xd8400013, + 0xc00c4000, 0xccc130b5, 0xc40c000e, 0x28cc0008, 0xccc00013, 0xc40c0021, 0x14d00011, 0x9500fffe, + 0xdc030000, 0xd800000c, 0xd800000d, 0xc40c005e, 0x94c01b10, 0xd8400013, 0x90000000, 0xc00e0080, + 0xccc130b5, 0x8000013b, 0xc00e0800, 0xccc130b5, 0x8000013b, 0xd8400053, 0x04100006, 0x8c00003f, + 0x8c000043, 0x28cc0000, 0xccc00050, 0x8c000055, 0x280c0008, 0xccc00052, 0xd8000021, 0x28180039, + 0x80000034, 0xd8400053, 0x04100007, 0x8c00003f, 0x8c000043, 0x28cc0001, 0xccc00050, 0x8c000055, + 0x280c0010, 0xccc00052, 0x28180039, 0x80000034, 0xd8400053, 0x04100008, 0x8c00003f, 0x8c000043, + 0x28cc0003, 0xccc00050, 0x8c000055, 0x280c0020, 0xccc00052, 0x28180039, 0x80000034, 0xdc030000, + 0xd8000069, 0x28080001, 0xc428000d, 0x7ca88004, 0xcc800079, 0x04280001, 0xcc00006f, 0x8000013b, + 0x80000034, 0x04100010, 0x8c00003f, 0x8c000043, 0xccc00078, 0x8c000055, 0x28180080, 0x80000034, + 0x04100001, 0xc40c000e, 0x28cc0008, 0xccc00013, 0xcd013278, 0xc4113278, 0x95000001, 0xc00c4000, + 0xc4113254, 0x1d10c017, 0xd8400013, 0xd8400008, 0xccc130b5, 0xcd0130b7, 0x8000013b, 0x95c00001, + 0x96000001, 0x96400001, 0x96800001, 0x96c00001, 0x97000001, 0x97400001, 0x97800001, 0x97c00001, + 0xdc810000, 0xc40c000c, 0xcd4c0380, 0xcdcc0388, 0x55dc0020, 0xcdcc038c, 0xce0c0390, 0x56200020, + 0xce0c0394, 0xce4c0398, 0x56640020, 0xce4c039c, 0xce8c03a0, 0x56a80020, 0xce8c03a4, 0xcecc03a8, + 0x56ec0020, 0xcecc03ac, 0xcf0c03b0, 0x57300020, 0xcf0c03b4, 0xcf4c03b8, 0x57740020, 0xcf4c03bc, + 0xcf8c03c0, 0x57b80020, 0xcf8c03c4, 0xcfcc03c8, 0x57fc0020, 0xcfcc03cc, 0xd9000033, 0xc41c0009, + 0x25dc0010, 0x95c0fffe, 0xd8400013, 0xc41c000c, 0x05dc002f, 0xcdc12009, 0xc41d200a, 0xd8400013, + 0xcc012009, 0xd9000034, 0x25e01c00, 0x12200013, 0x25e40300, 0x12640008, 0x25e800c0, 0x12a80002, + 0x25ec003f, 0x7e25c00a, 0x7eae400a, 0x7de5c00a, 0xddc10000, 0xc02ee000, 0xcec1c200, 0xc40c005f, + 0xccc00037, 0x24d000ff, 0x31100006, 0x9500007b, 0x8c000190, 0xdc1c0000, 0xd8400013, 0xcdc1c200, + 0xc40c000c, 0xc4df0388, 0xc4d7038c, 0x51540020, 0x7d5dc01a, 0xc4e30390, 0xc4d70394, 0x51540020, + 0x7d62001a, 0xc4e70398, 0xc4d7039c, 0x51540020, 0x7d66401a, 0xc4eb03a0, 0xc4d703a4, 0x51540020, + 0x7d6a801a, 0xc4ef03a8, 0xc4d703ac, 0x51540020, 0x7d6ec01a, 0xc4f303b0, 0xc4d703b4, 0x51540020, + 0x7d73001a, 0xc4f703b8, 0xc4d703bc, 0x51540020, 0x7d77401a, 0xc4fb03c0, 0xc4d703c4, 0x51540020, + 0x7d7b801a, 0xc4ff03c8, 0xc4d703cc, 0x51540020, 0x7d7fc01a, 0xdc080000, 0xcc800013, 0xc4d70380, + 0xc4080001, 0x1c88001c, 0xcd400008, 0xc40c0083, 0x94c00010, 0xdc0e0000, 0x94c0000e, 0xc40c0082, + 0x24d00001, 0x9900000b, 0x18cc01e3, 0x3cd00004, 0x95000008, 0xc40c0085, 0x18cc006a, 0x98c00005, + 0xc40c0082, 0x18cc01e3, 0x3cd00004, 0x9900fffa, 0xdc180000, 0xdc140000, 0xdc100000, 0xdc0c0000, + 0xcc800004, 0xdc080000, 0x90000000, 0xc4080001, 0x1c88001c, 0xcd400008, 0xdc180000, 0xdc140000, + 0xdc100000, 0xdc0c0000, 0xcc800004, 0xdc080000, 0x90000000, 0xd8400051, 0xc428000c, 0x04180018, + 0x32640002, 0x9a80001f, 0x9a40001e, 0xcd800013, 0xc4293265, 0x040c0000, 0x1aac0027, 0x2aa80080, + 0xce813265, 0x9ac00017, 0xd80002f1, 0x04080002, 0x08880001, 0xd8080250, 0xd8080258, 0xd8080230, + 0xd8080238, 0xd8080240, 0xd8080248, 0xd8080268, 0xd8080270, 0xd8080278, 0xd8080280, 0xd8080228, + 0xd8000367, 0x9880fff3, 0x04080010, 0x08880001, 0xd80c0309, 0xd80c0319, 0x04cc0001, 0x9880fffc, + 0x7c408001, 0x88000000, 0xc00e0100, 0xd8400013, 0xd8400008, 0xccc130b5, 0x8000016e, 0xc4180032, + 0x29980008, 0xcd800013, 0x95800001, 0x7c40c001, 0x18d0003f, 0x24d4001f, 0x24d80001, 0x155c0001, + 0x05e80180, 0x9900000b, 0x202c003d, 0xcd800010, 0xcec1325b, 0xc42d325b, 0x96c00001, 0x86800000, + 0x80000168, 0x80000aa7, 0x80000bfc, 0x800012e9, 0xc4200007, 0x0a200001, 0xce000010, 0x80001b70, + 0x7c40c001, 0x8c000190, 0xc410001b, 0xd8000032, 0xd8000031, 0x9900091a, 0x7c408001, 0x88000000, + 0x24d000ff, 0x05280196, 0x18d4fe04, 0x29540008, 0xcd400013, 0x86800000, 0x800001b4, 0x8000032b, + 0x80000350, 0x80000352, 0x8000035f, 0x80000701, 0x8000047c, 0x8000019f, 0x80000800, 0xc419325b, + 0x1d98001f, 0xcd81325b, 0x8c00003f, 0xc4140004, 0xd8400008, 0x04100002, 0x8c000043, 0x28cc0002, + 0xccc00050, 0xc43c0044, 0x27fc0003, 0x9bc00002, 0x97c00006, 0xc00c4000, 0xccc130b5, 0x8c000055, + 0xd8400013, 0xd88130b8, 0xcd400008, 0x90000000, 0xd8400008, 0xcd400013, 0x7d40c001, 0xd8400028, + 0xd8400029, 0xd9400036, 0xc4193256, 0xc41d3254, 0x15540008, 0xcd400009, 0xcd40005b, 0xcd40005e, + 0xcd40005d, 0xd840006d, 0xc421325a, 0xc42d3249, 0x11540015, 0x19a4003c, 0x1998003f, 0x1af0007d, + 0x11dc000b, 0x1264001f, 0x15dc000d, 0x7d65400a, 0x13300018, 0x1a38003f, 0x7dd5c00a, 0x7df1c00a, + 0xcd800045, 0xcdc00100, 0xc411326a, 0xc415326b, 0xc419326c, 0xc41d326d, 0xc425326e, 0xc4293279, + 0xce800077, 0xcd000056, 0xcd400057, 0xcd800058, 0xcdc00059, 0xc4193265, 0x259c8000, 0x99c00004, + 0xce40005a, 0x29988000, 0xcd813265, 0xc4113248, 0x2510000f, 0xcd000073, 0xc418000d, 0xc411326f, + 0x17300019, 0x97000009, 0x25140fff, 0x95400007, 0xd800003a, 0x8c001b6d, 0xc4153279, 0xcd400077, + 0xcd00005f, 0xd8000075, 0x26f00001, 0x15100010, 0x7d190004, 0xcd000035, 0x97000035, 0x1af07fe8, + 0xd8800013, 0xd8400010, 0xd8400008, 0xcf00000d, 0xcf00000a, 0x8c001427, 0x04340022, 0x07740001, + 0x04300010, 0xdf430000, 0x7c434001, 0x7c408001, 0xd4412e01, 0x0434001e, 0xdf430000, 0xd4400078, + 0xdf030000, 0xd4412e40, 0xd8400013, 0xcc41c030, 0xcc41c031, 0xc43dc031, 0xccc00013, 0x04343000, + 0xc4113246, 0xc41d3245, 0xcf413267, 0x51100020, 0x7dd1c01a, 0xc4353267, 0x45dc0160, 0xc810001f, + 0x1b4c0057, 0x1b700213, 0x1b740199, 0x7f4f400a, 0x7f73400a, 0x55180020, 0x2198003f, 0xd1c00025, + 0xcf400024, 0xcd000026, 0xcd800026, 0xd8400027, 0x9bc00001, 0x248dfffe, 0xd8800013, 0xccc12e00, + 0x7c434001, 0x7c434001, 0x8c00142b, 0xc43c000e, 0x1af4007d, 0x2bfc0008, 0x33740003, 0x26d80001, + 0xcfc00013, 0x1ae8003e, 0x9680000c, 0xc4253277, 0x26680001, 0x96800009, 0x2a640002, 0xce413277, + 0xd8400013, 0xc4253348, 0xce413348, 0xc4253348, 0x96400001, 0xcfc00013, 0x9b400003, 0x958000d8, + 0x80000315, 0xc4253277, 0x04303000, 0x26680001, 0xcf013267, 0xc4193246, 0xc41d3245, 0xc4313267, + 0x96800041, 0x51980020, 0x1b342010, 0x7d9d801a, 0x1714000c, 0x25540800, 0x1b30c012, 0x459801b0, + 0x7d77400a, 0x7f37000a, 0x2b300000, 0xcf00001c, 0xd180001e, 0xd8400021, 0x04240010, 0x199c01e2, + 0x7e5e4002, 0x3e5c0004, 0x3e540002, 0xc428000f, 0x9a80ffff, 0x95c00006, 0xc80c0011, 0xc8140011, + 0x54d00020, 0x55580020, 0x80000282, 0x95400015, 0xc80c0011, 0x0a640002, 0x041c0001, 0x45980008, + 0x54d00020, 0x96400004, 0xc8140011, 0x45980004, 0x041c0000, 0xcf00001c, 0xd180001e, 0xd8400021, + 0xc428000f, 0x9a80ffff, 0x99c00003, 0xc8180011, 0x80000282, 0xc8140011, 0x55580020, 0x80000282, + 0x45980004, 0xc80c0011, 0xcf00001c, 0xd180001e, 0xd8400021, 0xc428000f, 0x9a80ffff, 0xc8100011, + 0xc8140011, 0x55580020, 0xd8400013, 0xccc1334e, 0xcd01334f, 0xcd413350, 0xcd813351, 0xd881334d, + 0xcfc00013, 0xc4193273, 0xc41d3275, 0xc40d3271, 0xc4113270, 0xc4153274, 0x50cc0020, 0x7cd0c01a, + 0x7cdcc011, 0x05900008, 0xcd00006a, 0xcdc0006b, 0xc41d3272, 0x7d594002, 0x54d00020, 0xd8800013, + 0xccc12e23, 0xcd012e24, 0xcdc12e25, 0xcfc00013, 0xc4193246, 0xc41d3245, 0xc4313267, 0x15540002, + 0x51980020, 0x7d9d801a, 0xc81c001f, 0x1b340057, 0x1b280213, 0x1b300199, 0x45980198, 0x7f37000a, + 0x7f2b000a, 0x55e40020, 0xcf000024, 0xd1800025, 0xcdc00026, 0xce400026, 0xd8400027, 0xcd40000d, + 0xcd40000a, 0xc40d3249, 0x20cc003c, 0xccc13249, 0xc4113274, 0xdd430000, 0xc01e0001, 0x29dc0002, + 0x04280000, 0xd8000036, 0xcc400078, 0xcc400078, 0x2d540002, 0x95400022, 0x078c0000, 0x07d40000, + 0x8c00120d, 0x8c001239, 0x8c001232, 0x04f80000, 0x057c0000, 0xcdc00013, 0xc414000d, 0xc41c0019, + 0x7dd5c005, 0x25dc0001, 0xd840007c, 0xd8400074, 0xd8400069, 0xc40c005e, 0x94c018a6, 0xd4412e22, + 0xd800007c, 0xc40c005e, 0x94c018a2, 0x95c00007, 0xc40c0019, 0x7cd4c005, 0x24cc0001, 0x94c00008, + 0x9680fffc, 0x800002e3, 0xc40c0057, 0x7cd0c002, 0x94c00003, 0x9680fffd, 0x800002e3, 0xd8000069, + 0xcfc00013, 0xcd013273, 0xcd013275, 0xd8000074, 0xc414005e, 0x9540188f, 0xcfc00013, 0xc40d3249, + 0xc013cfff, 0x7cd0c009, 0xccc13249, 0x9680000b, 0xc40c0077, 0x38d00001, 0x99000006, 0x04cc0002, + 0xdcc30000, 0xc40c005e, 0x94c01882, 0xd4400078, 0xd800000d, 0x80000304, 0x7c41c001, 0x7c41c001, + 0xd840002f, 0xc41c0015, 0x95c0ffff, 0xd8400030, 0xc41c0016, 0x95c0ffff, 0xd8000030, 0xc41c0016, + 0x99c0ffff, 0xd800002f, 0xc41c0015, 0x99c0ffff, 0xc81c001f, 0x49980198, 0x55e40020, 0x459801a0, + 0xcf000024, 0xd1800025, 0xcdc00026, 0xce400026, 0xd8400027, 0x04302000, 0xcfc00013, 0xcf013267, + 0xc4313267, 0x96800004, 0x97000001, 0xd8000036, 0x80000329, 0xd8800013, 0xcc812e00, 0x04302000, + 0xcfc00013, 0xcf013267, 0xc4313267, 0x97000001, 0xc4193256, 0xc42d3249, 0x16ec001f, 0xd8000028, + 0xd800002b, 0x1998003e, 0xcec00031, 0xd8000036, 0xd8000010, 0x97800004, 0xd8400010, 0xce00000a, + 0x1a18003e, 0xcd800008, 0x90000000, 0xc4380004, 0xd8400008, 0xd8400013, 0xd88130b8, 0x04100000, + 0x7d43c001, 0xcd400013, 0xc4093249, 0x1888003e, 0x94800015, 0xd8400074, 0x8c000671, 0xcd400013, + 0x9a400006, 0xc419324c, 0x259c0001, 0x1598001f, 0x95c0000d, 0x9580000c, 0x99000003, 0xd8400036, + 0x04100001, 0xc40c0021, 0x14d80011, 0x24dc00ff, 0x31e00002, 0x31dc0003, 0x9580fff0, 0x9a000003, + 0x99c00002, 0xd9c00036, 0x94800004, 0xd8000074, 0xc418005e, 0x95801827, 0xcf800008, 0x90000000, + 0xd8800036, 0x90000000, 0xd8c00036, 0xc424000b, 0x32640002, 0x9a400004, 0xc4180014, 0x9580ffff, + 0xd840002f, 0xc40c0021, 0x14dc0011, 0x95c0fffe, 0xccc00037, 0x8c000190, 0x90000000, 0xd8400008, + 0xd800006d, 0xc41d3246, 0xc4193245, 0x51dc0020, 0x7d9d801a, 0xd8400028, 0xd8400029, 0xc420000b, + 0x32200002, 0x9a0000ad, 0x04200032, 0xd9000010, 0xde030000, 0xd8400033, 0x04080000, 0xc43c0009, + 0x27fc0002, 0x97c0fffe, 0xc42c0015, 0x96c0ffff, 0xd800002e, 0xc42d3249, 0x1af4003e, 0x9740004d, + 0xc428000d, 0xc4080060, 0x7ca88005, 0x24880001, 0x7f4b4009, 0x97400046, 0xc4313274, 0xc4100057, + 0x7d33400c, 0x97400009, 0x28240100, 0x7e6a4004, 0xce400079, 0x1eecffdd, 0xcec13249, 0xcf013273, + 0xcf013275, 0x800003c3, 0xc429326f, 0x1aa80030, 0x96800006, 0x28240001, 0xc428000d, 0x06a80008, + 0x7e6a8004, 0xce800035, 0xc41d3272, 0x25cc0001, 0x10cc0004, 0x19e80042, 0x25dc0006, 0x11dc0001, + 0x7e8e800a, 0x7de9c00a, 0xc40d3271, 0xc4293270, 0x50cc0020, 0x7ce8c01a, 0x7cd30011, 0x11e80007, + 0x2aa80000, 0xce80001c, 0xd300001e, 0xd8400021, 0xc428000f, 0x9a80ffff, 0xc4300011, 0x1b30003f, + 0x33300000, 0xc4240059, 0x1660001f, 0x7e320009, 0xc0328000, 0x7e72400a, 0x0430000c, 0x9a000002, + 0x04300008, 0xc02ac000, 0x7d310002, 0x17300002, 0x2aa87600, 0x7cd0c011, 0xcdc00024, 0xd0c00025, + 0xce800026, 0x04280222, 0xce800026, 0x96000002, 0xce400026, 0xd8400027, 0xc4280058, 0x22ec003d, + 0xcec13249, 0xcd013273, 0xce813275, 0xd800007b, 0xc8380018, 0x57b00020, 0x04343108, 0xc429325d, + 0x040c3000, 0x13740008, 0x2374007e, 0x32a80003, 0xccc13267, 0xc40d3267, 0x18ec0057, 0x18e40213, + 0x18cc0199, 0x7cecc00a, 0x7ce4c00a, 0x94800003, 0xd4400078, 0x800003e7, 0x04200022, 0xde030000, + 0xccc00024, 0xd1800025, 0xcf400026, 0xd4400026, 0xd8400027, 0x04200010, 0xde030000, 0xccc00024, + 0x45980104, 0xd1800025, 0xd4400026, 0xcf800026, 0xcf000026, 0xd8400027, 0x49980104, 0x9a80000a, + 0xc81c001f, 0x45980168, 0x55e00020, 0xccc00024, 0xd1800025, 0xcdc00026, 0xce000026, 0xd8400027, + 0x800003f2, 0x8c000448, 0xcd400013, 0x040c2000, 0xccc13267, 0xc40d3267, 0x94c00001, 0xc40d3249, + 0x18cc003e, 0xd8400030, 0xc42c0016, 0x96c0ffff, 0xd8000030, 0xc42c0016, 0x9ac0ffff, 0xd800002f, + 0xc42c0015, 0x9ac0ffff, 0xd8400034, 0xc4300025, 0xc4340024, 0xc4380081, 0xcf813279, 0xcf41326e, + 0xcf01326d, 0x94c0000d, 0x254c0700, 0xc424001e, 0x10cc0010, 0x1a641fe8, 0x28cc0726, 0x2a640200, + 0xd8400013, 0xccc1237b, 0x2264003f, 0xcd400013, 0xd8813260, 0xce41325b, 0xc4240033, 0xc4280034, + 0xd9000036, 0xd8000010, 0x8c001427, 0x96400006, 0xde430000, 0xce40000c, 0xc40c005e, 0x94c01755, + 0xd4400078, 0x9680000a, 0xce80000a, 0x06a80002, 0xd8400010, 0xde830000, 0xce80000d, 0xc40c005e, + 0x94c0174c, 0xd4400078, 0xd8000010, 0x8c00142b, 0xc4393265, 0x2bb80040, 0xd8400032, 0xcf813265, + 0xc4200012, 0x9a00ffff, 0xc4100044, 0x19180024, 0xc8100072, 0x551c003f, 0x99c00003, 0x95800010, + 0x8000043d, 0xc00c8000, 0xd840006c, 0x28200000, 0x8000043f, 0xc00c4000, 0x282000f0, 0xcd400013, + 0xd8400008, 0xc4113255, 0xcd01324f, 0xd8400013, 0xd88130b8, 0xccc130b5, 0xce000053, 0x90000000, + 0x195c00e8, 0xc4100004, 0x2555fff0, 0xc0360001, 0x042c0000, 0x29540001, 0xd8400008, 0x04240000, + 0x04280004, 0xc420000b, 0x32200002, 0x9a000009, 0xcd400013, 0xcec1c200, 0xc5e124dc, 0x0aa80001, + 0x7ef6c001, 0x7e624001, 0x96000001, 0x9a80fff9, 0xc02ee000, 0xcd400013, 0x2555fff0, 0xcec1c200, + 0x29540008, 0xc81c001f, 0xcd400013, 0x55e00020, 0xc42d3255, 0xc4353259, 0xd8013260, 0x45980158, + 0xccc00024, 0xd1800025, 0xcdc00026, 0xce000026, 0xd8400027, 0x49980158, 0x45980170, 0xc4200012, + 0x16200010, 0x9a00fffe, 0xccc00024, 0xd1800025, 0xc429324f, 0xce400026, 0xce800026, 0xcec00026, + 0xcf400026, 0xd8400027, 0xcd000008, 0x90000000, 0xc40d325b, 0x7d43c001, 0x195400e8, 0x1154000a, + 0x18dc00e8, 0x05e80488, 0x18d0006c, 0x18f807f0, 0x18e40077, 0x18ec0199, 0x7e6e400a, 0x86800000, + 0x8000048e, 0x80000494, 0x800004de, 0x80000685, 0x80000686, 0x800006ac, 0x1ccc001f, 0xccc1325b, + 0xc411325d, 0x251001ef, 0xcd01325d, 0x90000000, 0xc4293254, 0x1264000a, 0xc4300004, 0x7d79400a, + 0x7e7a400a, 0x52a8001e, 0x15180001, 0x7d69401a, 0x202c007d, 0xcec1325b, 0x95000008, 0x95800028, + 0xc42d3267, 0xc4193246, 0xc41d3245, 0x1aec0028, 0xc40d325c, 0x800004cc, 0xc42d3256, 0xc419324e, + 0x26e8003f, 0x1aec003e, 0x12f4000e, 0xc41d324d, 0xc40d324f, 0x7d75401a, 0x04100002, 0x7d290004, + 0x7f8f4001, 0x7f52800f, 0x51980020, 0x7d9d801a, 0x50e00002, 0x51980008, 0x9a800002, 0x800004d1, + 0x7d0dc002, 0x6665fc00, 0x7e5e401a, 0xcec00008, 0x7da1c011, 0xd140000b, 0xd1c00002, 0x2a644000, + 0xce400002, 0x7f534002, 0x6665fc00, 0x7e76401a, 0xd1800002, 0xce400002, 0x800004d7, 0xc42d325a, + 0xc4193258, 0x1aec003e, 0xc41d3257, 0xc4213259, 0x12f4000e, 0x7d75401a, 0x51980020, 0x52200002, + 0x7d9d801a, 0xcec00008, 0x7da1c011, 0xd140000b, 0xd1c00002, 0x2a644000, 0xce400002, 0x202c003d, + 0xcf000008, 0xcfc00013, 0xcec1325b, 0xc42d325b, 0x96c00001, 0x90000000, 0xc4193260, 0x259c0007, + 0x15980004, 0x05e804e3, 0x86800000, 0x800004e7, 0x800004f0, 0x80000505, 0x8000016a, 0xc4380004, + 0xcfc00013, 0xd8400008, 0xc435325d, 0xd801325b, 0x277401ef, 0xcf41325d, 0xcf800008, 0x90000000, + 0xc4380004, 0xd8400008, 0x8c000671, 0x9640fff4, 0x17e00008, 0xc418000d, 0xce000009, 0xd84131db, + 0xcf800008, 0xcd800009, 0xc430001e, 0xcfc00013, 0xc42d325b, 0x1b301ff8, 0x2b300400, 0x2330003f, + 0x26edf000, 0x7ef2c00a, 0xd8413260, 0xcec1325b, 0x90000000, 0x05a80507, 0x86800000, 0x8000050c, + 0x80000528, 0x8000057d, 0x800005c2, 0x800005f3, 0xc4380004, 0xd8400008, 0x8c000671, 0xcfc00013, + 0x9a400012, 0x1bd400e8, 0xc42c004a, 0xcd40005e, 0xc41c004d, 0xcec0005e, 0x99c0000c, 0xc4100019, + 0x7d150005, 0x25100001, 0x99000008, 0x8c00063b, 0xcfc00013, 0xc4113277, 0x2511fffd, 0xcd013277, + 0xd801326f, 0x80000624, 0x04240012, 0x1be00fe4, 0xce413260, 0xce000066, 0xcf800008, 0x90000000, + 0xd8400068, 0xc4380004, 0xd8400008, 0x8c000671, 0xcfc00013, 0x9a400013, 0x1bd400e8, 0xc42c004a, + 0xcd40005e, 0xc41c004d, 0xcec0005e, 0x99c0000d, 0xc4100019, 0x7d150005, 0x25100001, 0x99000009, + 0xd8400067, 0x8c00063b, 0xcfc00013, 0xc4113277, 0x2511fffd, 0xcd013277, 0xd801326f, 0x80000624, + 0x1bd400e8, 0xc42c0060, 0x7ed6c005, 0x26ec0001, 0xc4113271, 0xc4153270, 0xc4193272, 0xc41d3273, + 0x04280022, 0x51100020, 0x7d51401a, 0xc4113274, 0xc4213275, 0xc4253276, 0xc4313248, 0xd1400061, + 0x2730000f, 0x13300010, 0x7db1800a, 0xcd800060, 0x96c00002, 0x05dc0008, 0xcdc00062, 0x042c3000, + 0xcd000063, 0xce000064, 0xce400065, 0xcec13267, 0xc42d3246, 0xc4313245, 0xc4353267, 0xce813260, + 0x52ec0020, 0x7ef2c01a, 0xc820001f, 0x1b700057, 0x1b680213, 0x1b740199, 0x46ec0188, 0x7f73400a, + 0x7f6b400a, 0x56240020, 0xcf400024, 0xd2c00025, 0xce000026, 0xce400026, 0x042c2000, 0xd8400027, + 0xc418000d, 0x17e00008, 0xce000009, 0xcec13267, 0xc42d3267, 0x26e01000, 0x9a00fffe, 0xd8400013, + 0xd9c131fc, 0xcd800009, 0xcf800008, 0x96c00001, 0x90000000, 0xc4380004, 0xd8400008, 0xc4113277, + 0xc41c000b, 0xc420000c, 0x11dc0002, 0x7de1c001, 0x11dc0008, 0x29dc0001, 0x25140001, 0x191807e4, + 0x192007ec, 0x95400004, 0xd8400013, 0xcdc1334a, 0xcfc00013, 0x9580000e, 0x09980001, 0x041c0001, + 0x95800005, 0x09980001, 0x51dc0001, 0x69dc0001, 0x9980fffd, 0x7de20014, 0x561c0020, 0xd8400013, + 0xce013344, 0xcdc13345, 0xcfc00013, 0x95400022, 0x042c3000, 0xcec13267, 0xc42d3246, 0xc4313245, + 0xc4353267, 0xd8400013, 0xc425334d, 0x26640001, 0x9640fffe, 0xc419334e, 0xc41d334f, 0xc4213350, + 0xc4253351, 0x52ec0020, 0x1b680057, 0x7ef2c01a, 0x1b700213, 0x1b740199, 0x46ec01b0, 0x7f6b400a, + 0x7f73400a, 0xcfc00013, 0xcf400024, 0xd2c00025, 0xcd800026, 0xcdc00026, 0xce000026, 0xce400026, + 0x042c2000, 0xd8400027, 0xcec13267, 0xc42d3267, 0x96c00001, 0x04280032, 0xce813260, 0xd8800068, + 0xcf800008, 0x90000000, 0xc4380004, 0xd8400008, 0x2010007d, 0xcd01325b, 0xc411325b, 0x1910003e, + 0x9500fffe, 0x04100040, 0xcd00001b, 0xd8400021, 0xc410000f, 0x9900ffff, 0x04100060, 0xcd00001b, + 0xd8400021, 0xc410000f, 0x9900ffff, 0xcfc00013, 0x2010003d, 0xcd01325b, 0xc4113277, 0x25140001, + 0x191807e4, 0x9540000b, 0x2511fffd, 0xcd013277, 0xc41c000b, 0xc420000c, 0x11dc0002, 0x7de1c001, + 0x11dc0008, 0xd8400013, 0xcdc1334a, 0xcfc00013, 0x95800005, 0xd8400013, 0xd8013344, 0xd8013345, + 0xcfc00013, 0xc4180050, 0xc41c0052, 0x04280042, 0xcd813273, 0xcdc13275, 0xce813260, 0xd9000068, + 0xd8400067, 0xcf800008, 0x90000000, 0x07d40000, 0x8c00120d, 0x8c00124f, 0x8c001232, 0x057c0000, + 0x042c3000, 0xc4380004, 0xcfc00013, 0xd8400008, 0xcec13267, 0xc42d3246, 0xc4313245, 0xc4353267, + 0x52ec0020, 0x7ef2c01a, 0x1b680057, 0x1b700213, 0x1b740199, 0xc820001f, 0x46ec0190, 0x7f6b400a, + 0x7f73400a, 0x56240020, 0xcf400024, 0xd2c00025, 0xce000026, 0xce400026, 0x042c2000, 0xd8400027, + 0xcfc00013, 0xcec13267, 0xc4153249, 0x2154003d, 0xc41c0019, 0x1bd800e8, 0x7dd9c005, 0x25dc0001, + 0xc42c004a, 0xcd80005e, 0xc420004d, 0xcec0005e, 0x11dc0010, 0x7e1e000a, 0xcd413249, 0xce01326f, + 0x28340001, 0x05980008, 0x7f598004, 0xcd800035, 0x1be800e8, 0xc42c004a, 0xce80005e, 0xd801327a, + 0xd800005f, 0xd8000075, 0xd800007f, 0xc424004c, 0xce41326e, 0xcec0005e, 0x28240100, 0x7e6a4004, + 0xce400079, 0xc435325d, 0x277401ef, 0x04240020, 0xce41325e, 0xd801325b, 0xd8013260, 0xcf41325d, + 0xda000068, 0xcf800008, 0x90000000, 0xc4113277, 0xc41c000b, 0xc420000c, 0x11dc0002, 0x7de1c001, + 0x11dc0008, 0x29dc0001, 0x25140001, 0x9540002d, 0xd8400013, 0xcdc1334a, 0xcfc00013, 0x042c3000, + 0xcec13267, 0xc42d3246, 0xc4313245, 0xc4353267, 0xd8400013, 0xc425334d, 0x26640001, 0x9640fffe, + 0xc419334e, 0xc41d334f, 0xc4213350, 0xc4253351, 0x52ec0020, 0x1b680057, 0x7ef2c01a, 0x1b700213, + 0x1b740199, 0x46ec01b0, 0x7f6b400a, 0x7f73400a, 0xcfc00013, 0xcf400024, 0xd2c00025, 0xcd800026, + 0xcdc00026, 0xce000026, 0xce400026, 0x042c2000, 0xd8400027, 0xcec13267, 0xc42d3267, 0x96c00001, + 0xc41c000b, 0xc420000c, 0x11dc0002, 0x7de1c001, 0x11dc0008, 0xd8400013, 0xcdc1334a, 0xcfc00013, + 0x90000000, 0xc430000b, 0x33300002, 0x04240000, 0x9b000010, 0x1be000e8, 0x042c0000, 0xc0360001, + 0x04280004, 0xd8400013, 0xcec1c200, 0xc63124dc, 0x0aa80001, 0x7ef6c001, 0x7e724001, 0x97000001, + 0x9a80fff9, 0xc02ee000, 0xd8400013, 0xcec1c200, 0x90000000, 0x90000000, 0xc4253260, 0x7fc14001, + 0xc40d3249, 0x18cc003e, 0x98c00005, 0x194c1c03, 0xccc0003b, 0xc40c002d, 0x80000697, 0xc420004a, + 0x194c00e8, 0xccc0005e, 0xc40c004c, 0xc431326d, 0x27301fff, 0xce00005e, 0x7cf0c00d, 0x98c00003, + 0x8c0007e0, 0x95c00008, 0xc430001e, 0x1b301ff8, 0x2b300400, 0x2330003f, 0xcd400013, 0xcf01325b, + 0x90000000, 0xcd400013, 0xd801325b, 0xc411325d, 0x251001ef, 0xcd01325d, 0x25100007, 0x31100005, + 0x9900008e, 0xc40c0007, 0xd9000010, 0x8000075e, 0x202c007d, 0xcec1325b, 0xc4293265, 0xc4353254, + 0x26a9feff, 0xc4380004, 0xd8400008, 0x1374000b, 0xc40c000d, 0xd8000009, 0x1774000d, 0xd8400013, + 0xc41d30b8, 0xcfc00013, 0x95c00008, 0xc411325d, 0xd801325b, 0xccc00009, 0xcf800008, 0x251001ef, + 0xcd01325d, 0x90000000, 0xce813265, 0xcf400100, 0xc00ac006, 0xc00e0000, 0x28880700, 0x28cc0014, + 0x8c0006de, 0x14cc0010, 0x30d4000f, 0x04cc0001, 0x10cc0010, 0x28cc0014, 0x99400009, 0xd8400013, + 0xc41530b8, 0xcfc00013, 0xc4193265, 0x19980028, 0x99400003, 0x99800002, 0x800006c8, 0xcfc00013, + 0xc411325d, 0xd801325b, 0xcf800008, 0x251001ef, 0xcd01325d, 0x90000000, 0x15600008, 0xce000009, + 0xc8380023, 0xc4180081, 0x11a00002, 0x7fa38011, 0xc4100026, 0x05980008, 0x7d1a0002, 0x282c2002, + 0x3e280008, 0xcec00013, 0xc4300027, 0x042c0008, 0xd3800025, 0xcf000024, 0x202400d0, 0x7ca48001, + 0xcc800026, 0xccc00026, 0x28240006, 0xcc000026, 0x0a640001, 0x9a40fffe, 0x9a800004, 0x32280000, + 0x9a800002, 0x9a000000, 0xd8400027, 0x24d8003f, 0xd840003c, 0xcec0003a, 0xd8800013, 0xcd81a2a4, + 0x90000000, 0xc41d325d, 0x25dc0007, 0xc40d3249, 0x18cc003e, 0x94c0000a, 0xc420004a, 0x194c00e8, + 0xccc0005e, 0xc40c004c, 0xc431326d, 0x27301fff, 0xce00005e, 0x7cf0c00d, 0x80000712, 0x194c1c03, + 0xccc0003b, 0xc40c002d, 0x05e80714, 0x86800000, 0x8000071c, 0x80000720, 0x80000747, 0x8000071d, + 0x800007c4, 0x80000732, 0x80000745, 0x80000744, 0x90000000, 0x98c00006, 0x8000072e, 0x90000000, + 0x98c00003, 0x8c0007e0, 0x95c0000c, 0xcd400013, 0xc4253265, 0x2a64008c, 0xce413265, 0xc430001e, + 0x1b301fe8, 0x2b300400, 0x2330003f, 0xd8013260, 0xcf01325b, 0x90000000, 0xc40c0007, 0xd9000010, + 0x04240000, 0x8000075e, 0x98c0fff1, 0x8c0007e0, 0x95c00002, 0x80000723, 0xcd400013, 0xc41f02f1, + 0x95c00004, 0xd8013247, 0xd801325d, 0x80000743, 0xd8813247, 0xd801325d, 0xc4100004, 0xd8400008, + 0xd8400013, 0xd88130b8, 0xcd000008, 0x90000000, 0x04100001, 0x98c0ffde, 0x8000072e, 0x98c00003, + 0x8c0007e0, 0x95c00012, 0xc4340004, 0xd8400008, 0x15600008, 0xc418000d, 0xce000009, 0xd8400013, + 0xd84131db, 0xcf400008, 0xcd800009, 0xc430001e, 0x1b301ff8, 0x2b300400, 0x2330003f, 0xcd400013, + 0xd8413260, 0xcf01325b, 0x90000000, 0xc40c0007, 0xd9000010, 0x04240000, 0xcd400013, 0x041c3000, + 0xcdc13267, 0xc41d3267, 0xc41d3265, 0x25dc8000, 0x95c00007, 0xc41c004a, 0x195800e8, 0xcd80005e, + 0xc418004c, 0xcd81326e, 0xcdc0005e, 0xc41d3265, 0x25dd7fff, 0xcdc13265, 0xc41d3246, 0xc4193245, + 0xc42d3267, 0x51e00020, 0x7e1a001a, 0x46200200, 0x04283247, 0x04300033, 0x1af80057, 0x1af40213, + 0x042c000c, 0x7f7b400a, 0x7f6f400a, 0xcf400024, 0xd2000025, 0xcd800026, 0xcdc00026, 0xc6990000, + 0x329c325d, 0x99c00008, 0x329c3269, 0x99c00006, 0x329c3267, 0x95c00005, 0xc01defff, 0x7d9d8009, + 0x8000078a, 0x25980000, 0x0b300001, 0x06a80001, 0xcd800026, 0x9b00fff2, 0xd8400027, 0xc43c0012, + 0x9bc0ffff, 0xcd400013, 0xd801325b, 0xc431325a, 0xc03e7ff0, 0x7f3f0009, 0xcf01325a, 0xc4313249, + 0x1f30001f, 0xcf013249, 0xc03e4000, 0xcfc13254, 0xcd400013, 0xd8013254, 0xc431325d, 0xd801324f, + 0xd8013255, 0xd8013247, 0xd801325d, 0x1b300028, 0x8c00120d, 0x8c001219, 0x8c001232, 0xc4380004, + 0xd8400008, 0xd8400013, 0x9900000d, 0xd88130b8, 0x9700000b, 0xc43d30b5, 0x1bf0003a, 0x9b000b80, + 0x203c003a, 0xc430000e, 0x27300700, 0x13300014, 0x2b300001, 0xcf0130b7, 0xcfc130b5, 0x46200008, + 0xcf400024, 0xd2000025, 0xd8000026, 0xd8400027, 0x043c2000, 0xcd400013, 0xcfc13267, 0xc43d3267, + 0x9bc00001, 0xccc00010, 0xcf800008, 0x90000000, 0xc4080007, 0xd9000010, 0xc4193260, 0x259c0003, + 0x31dc0003, 0x95c00014, 0x040c3000, 0xd8400008, 0xccc13267, 0xc40d3267, 0x18ec0057, 0x18e40213, + 0x18cc0199, 0x7cecc00a, 0x7ce4c00a, 0xc4193246, 0xc41d3245, 0x51980020, 0x7d9d801a, 0x8c000448, + 0xcd400013, 0x040c2000, 0xccc13267, 0xc40d3267, 0x94c00001, 0xcc800010, 0xd801325d, 0x90000000, + 0xc418000b, 0x31980002, 0x041c0000, 0x9980001c, 0x19580066, 0x15600008, 0x040c0000, 0xc0120001, + 0x11980003, 0x04240004, 0x7da18001, 0xc4200007, 0xc4340004, 0xd9000010, 0xd8400008, 0xd8400013, + 0xccc1c200, 0xc41d24db, 0x7cd0c001, 0x0a640001, 0x7dd9c005, 0x25dc0001, 0x99c00002, 0x9a40fff8, + 0xc418005e, 0x9580137b, 0xc00ee000, 0xd8400013, 0xccc1c200, 0xce000010, 0xcf400008, 0x90000000, + 0xd840004f, 0xc4113269, 0x19080070, 0x190c00e8, 0x2510003f, 0x2518000f, 0xcd813268, 0x05a80809, + 0x86800000, 0x8000080e, 0x8000080f, 0x80000898, 0x80000946, 0x800009e1, 0x80000a5a, 0x04a80811, + 0x86800000, 0x80000815, 0x80000834, 0x8000085e, 0x8000085e, 0x04341001, 0xcf400013, 0xc4380004, + 0xd8400008, 0xc42d3045, 0xcec1c091, 0x31300021, 0x9700000b, 0xd84002f1, 0xd8400013, 0xc43130b8, + 0x27300001, 0xc4293059, 0x56a8001f, 0x7f2b000a, 0xcf800008, 0x9b000241, 0x8000084a, 0xcf400013, + 0xd8400008, 0xc43130b6, 0x9b000003, 0xc02f0001, 0xcec130b6, 0xc4252087, 0x5668001a, 0x26a80005, + 0x9a80fffd, 0xcf400013, 0xd80130b6, 0x8000084a, 0xc4380004, 0xd8400008, 0x04341001, 0xcf400013, + 0xc431ecaa, 0x27300080, 0x9b000010, 0xc02e0001, 0xcec130b6, 0xcf400013, 0xd80130b6, 0x31300021, + 0x9700000a, 0xd84002f1, 0xd8400013, 0xc43130b8, 0x27300001, 0xc4293059, 0x56a8001f, 0x7f2b000a, + 0xcf800008, 0x9b00021d, 0xdd410000, 0x040c0005, 0xd84802e9, 0x8c001a41, 0xc43b02f1, 0x9b800006, + 0xc4380004, 0xd8400008, 0xd8400013, 0xd88130b8, 0xcf800008, 0xcec80278, 0x56f00020, 0xcf080280, + 0x8c001608, 0xdc140000, 0xcd400013, 0xd8813247, 0xd80802e9, 0x8000085e, 0xcd400013, 0x31100011, + 0x950001fa, 0xc02e0001, 0x2aec0008, 0xc01c0020, 0xc0180001, 0xc00c0007, 0x11a40006, 0x7de6000a, + 0x10e40008, 0x7e26000a, 0x7e2e000a, 0xce000013, 0xc4113254, 0x1d10ffdf, 0x2110003e, 0xcd013254, + 0xd801324f, 0xd8013255, 0x1d10ff9e, 0xcd013254, 0xd8013247, 0xd801325d, 0xd801325e, 0xc0245301, + 0xce413249, 0xd801325f, 0xc425326c, 0xc0121fff, 0x29108eff, 0x7e524009, 0xce41326c, 0xc425325a, + 0xc0127ff0, 0x7e524009, 0xce41325a, 0xc425325b, 0xc0131fff, 0x7e524009, 0xce41325b, 0xd801326d, + 0xd801326e, 0xd8013279, 0x94c00003, 0x08cc0001, 0x80000866, 0xc00c0007, 0x95800003, 0x09980001, + 0x80000866, 0xc0100010, 0x7dd2400c, 0x9a400004, 0xc0180003, 0x7dd1c002, 0x80000866, 0x80000a5a, + 0x04a8089a, 0x86800000, 0x8000089e, 0x800008fa, 0x80000945, 0x80000945, 0x31300022, 0x97000007, + 0xc4380004, 0xd8400008, 0xd8400013, 0xc43130b8, 0x27300001, 0xcf800008, 0xcd400013, 0x04183000, + 0xcd813267, 0xc4113246, 0xc4193245, 0x51100020, 0x7d91801a, 0x459801e0, 0xc4313267, 0x2738000f, + 0x1b342010, 0x172c000c, 0x26ec0800, 0x1b30c012, 0x7ef7400a, 0x7f37000a, 0x2b300000, 0xcf00001c, + 0xd180001e, 0xd8400021, 0xc42c000f, 0x9ac0ffff, 0xc8300011, 0x97000036, 0x45980008, 0xd180001e, + 0xd8400021, 0xc42c000f, 0x9ac0ffff, 0xc8340011, 0x9740002f, 0xc43c0004, 0xd8400008, 0xd8400013, + 0x13b80001, 0xc79d3300, 0xc7a13301, 0x96000001, 0xd8393300, 0xc0260001, 0xce793301, 0xc424005e, + 0x964012a4, 0x7c028009, 0x9740001c, 0x27580001, 0x99800004, 0x57740001, 0x06a80400, 0x800008d2, + 0xc4180006, 0x9980ffff, 0x29640001, 0xce40001a, 0x242c0000, 0x06ec0400, 0x57740001, 0x27580001, + 0x9980fffd, 0xc02620c0, 0xce41c078, 0xce81c080, 0xcc01c081, 0xcf01c082, 0x57240020, 0xce41c083, + 0xc0260400, 0x7e6e400a, 0xce41c084, 0x7eae8001, 0x7f2f0011, 0x800008d2, 0xc4180006, 0x9980ffff, + 0xcdf93300, 0xce393301, 0xcfc00008, 0xcd400013, 0xc43c0004, 0xd8400008, 0x04182000, 0xcd813267, + 0xcfc00008, 0x80000903, 0x31240022, 0x96400008, 0x04100001, 0xc4380004, 0xd8400008, 0xd8400013, + 0xc43130b8, 0x27300001, 0xcf800008, 0xc4af0280, 0xc4b30278, 0x52ec0020, 0x7ef2c01a, 0x7ec30011, + 0x32f80000, 0x9b800011, 0x043c0020, 0x04280000, 0x67180001, 0x0bfc0001, 0x57300001, 0x95800006, + 0x8c001628, 0x9a400003, 0xd981325d, 0x80000915, 0xd9c1325d, 0x06a80001, 0x9bc0fff6, 0x7f818001, + 0x8c001606, 0x7d838001, 0x94800010, 0xcd400013, 0xc41d3259, 0xc421325a, 0x16240014, 0x12640014, + 0x1a2801f0, 0x12a80010, 0x2620ffff, 0x7e2a000a, 0x7de1c001, 0x7e5e400a, 0x9b800002, 0x2264003f, + 0xce41325a, 0xd8013259, 0xc40c0007, 0xd9000010, 0x8c00075e, 0xc4af0228, 0x043c0000, 0x66d80001, + 0x95800010, 0x04300002, 0x1330000d, 0x13f40014, 0x7f73400a, 0xcf400013, 0x04380040, 0xcf80001b, + 0xd8400021, 0xc438000f, 0x9b80ffff, 0x04380060, 0xcf80001b, 0xd8400021, 0xc438000f, 0x9b80ffff, + 0x07fc0001, 0x56ec0001, 0x33e80010, 0x9680ffec, 0x80000a5a, 0x80000a5a, 0x04a80948, 0x86800000, + 0x8000094c, 0x8000099b, 0x800009e0, 0x800009e0, 0xc43c0004, 0xd8400008, 0xcd400013, 0x04183000, + 0xcd813267, 0xc4113246, 0xc4193245, 0x51100020, 0x7d91801a, 0x459801e0, 0xc4313267, 0x2738000f, + 0x1b342010, 0x172c000c, 0x26ec0800, 0x1b30c012, 0x7ef7400a, 0x7f37000a, 0x2b300000, 0xcf00001c, + 0xd180001e, 0xd8400021, 0xc42c000f, 0x9ac0ffff, 0xc8300011, 0x97000033, 0x45980008, 0xd180001e, + 0xd8400021, 0xc42c000f, 0x9ac0ffff, 0xc8340011, 0x9740002c, 0xd8400013, 0x13b80001, 0xc79d3300, + 0xc7a13301, 0x96000001, 0xd8393300, 0xc0260001, 0xce793301, 0xc424005e, 0x964011fe, 0x7c028009, + 0x9740001c, 0x27580001, 0x99800004, 0x57740001, 0x06a80400, 0x80000978, 0xc4180006, 0x9980ffff, + 0x29640001, 0xce40001a, 0x242c0000, 0x06ec0400, 0x57740001, 0x27580001, 0x9980fffd, 0xc0260010, + 0xce41c078, 0xcf01c080, 0x57240020, 0xce41c081, 0xce81c082, 0xcc01c083, 0xc0260800, 0x7e6e400a, + 0xce41c084, 0x7eae8001, 0x7f2f0011, 0x80000978, 0xc4180006, 0x9980ffff, 0xcdf93300, 0xce393301, + 0x04182000, 0xcd813267, 0xcfc00008, 0xcd400013, 0xc4193246, 0xc41d3245, 0x51980020, 0x7dda801a, + 0x7d41c001, 0x7e838011, 0xd84802e9, 0x8c001802, 0x469c0390, 0xc4313267, 0x04183000, 0xcd813267, + 0x1b342010, 0x172c000c, 0x26ec0800, 0x1b30c012, 0x7ef7400a, 0x7f37000a, 0x2b300000, 0xcf00001c, + 0x45dc0004, 0xd1c0001e, 0xd8400021, 0xc418000f, 0x9980ffff, 0xc4200011, 0x45dc0004, 0xd1c0001e, + 0xd8400021, 0xc418000f, 0x9980ffff, 0xc4240011, 0x45dc0004, 0xd1c0001e, 0xd8400021, 0xc418000f, + 0x9980ffff, 0xc4280011, 0x45dc0004, 0xd1c0001e, 0xd8400021, 0xc418000f, 0x9980ffff, 0xc42c0011, + 0x45dc0004, 0xd1c0001e, 0xd8400021, 0xc418000f, 0x9980ffff, 0xc4300011, 0x45dc0004, 0xd1c0001e, + 0xd8400021, 0xc418000f, 0x9980ffff, 0xc4340011, 0x45dc0004, 0xd1c0001e, 0xd8400021, 0xc418000f, + 0x9980ffff, 0xc4380011, 0xcd400013, 0x04182000, 0xcd813267, 0x043c0001, 0x8c0014df, 0x80000a5a, + 0x80000a5a, 0x31280014, 0xce8802ef, 0x9a800062, 0x31280034, 0x9a800060, 0x04a809e8, 0x86800000, + 0x800009ec, 0x80000a45, 0x80000a59, 0x80000a59, 0xcd400013, 0xc4113246, 0xc4193245, 0x51100020, + 0x7d91801a, 0x45980400, 0xc4b30258, 0xc4a70250, 0x53300020, 0x7e72401a, 0xc4313267, 0x1b342010, + 0x172c000c, 0x26ec0800, 0x1b30c012, 0x7ef7400a, 0x7f37000a, 0x2b300000, 0xcf00001c, 0x042c0020, + 0x66740001, 0x97400041, 0xcd400013, 0x04383000, 0xcf813267, 0xc4393267, 0x9b800001, 0xd180001e, + 0xd8400021, 0xc438000f, 0x9b80ffff, 0xc4300011, 0x1b38007e, 0x33b40003, 0x9b400003, 0x4598001c, + 0x9740002f, 0x45980004, 0xd180001e, 0xd8400021, 0xc438000f, 0x9b80ffff, 0xc40c0011, 0x45980004, + 0xd180001e, 0xd8400021, 0xc438000f, 0x9b80ffff, 0xc4100011, 0x45980004, 0xd180001e, 0xd8400021, + 0xc438000f, 0x9b80ffff, 0xc4340011, 0xcf4002eb, 0x45980004, 0xd180001e, 0xd8400021, 0xc438000f, + 0x9b80ffff, 0xc4340011, 0xcf4002ec, 0x45980004, 0xd180001e, 0xd8400021, 0xc438000f, 0x9b80ffff, + 0xc4340011, 0xcf4002ed, 0x45980004, 0xd180001e, 0xd8400021, 0xc438000f, 0x9b80ffff, 0xc4340011, + 0xcf4002ee, 0x45980004, 0xcd400013, 0x04382000, 0xcf813267, 0xd84802e9, 0x8c001715, 0xcd400013, + 0x04382000, 0xcf813267, 0x56640001, 0x0aec0001, 0x9ac0ffbc, 0xc4380004, 0xd8400008, 0x04341001, + 0xcf400013, 0x94800005, 0xc431ecaa, 0x27300080, 0x97000002, 0x80000a55, 0xc43130b6, 0x233c0032, + 0xcfc130b6, 0xcf400013, 0xcf0130b6, 0xc49302ef, 0x99000003, 0xcd400013, 0xd8413247, 0xcf800008, + 0x80000a5a, 0x80000a5a, 0xcd400013, 0x04180001, 0x5198001f, 0xcd813268, 0xc4193269, 0x2598000f, + 0x9980fffe, 0xd80002f1, 0xcd400013, 0xd8013268, 0xd800004f, 0x90000000, 0xcd400013, 0x04380001, + 0x53b8001f, 0x7db9801a, 0xcd813268, 0x80000a5e, 0xd8400029, 0xc40c005e, 0x94c01106, 0xd8800013, + 0xcc412e01, 0xcc412e02, 0xcc412e03, 0xcc412e00, 0x80000aa7, 0xd8400029, 0xc40c005e, 0x94c010fd, + 0x7c40c001, 0x50640020, 0x7ce4c01a, 0xd0c00072, 0xc80c0072, 0x58e801fc, 0x12a80009, 0x2aa80000, + 0xd0c0001e, 0xce80001c, 0xd8400021, 0xc424000f, 0x9a40ffff, 0x04240010, 0x18dc01e2, 0x7e5e4002, + 0x3e5c0003, 0x3e540002, 0x95c00006, 0xc8180011, 0xc8100011, 0xc8100011, 0x55140020, 0x80000aa2, + 0x9540000a, 0xc8180011, 0x44cc0008, 0x55900020, 0xd0c0001e, 0xd8400021, 0xc424000f, 0x9a40ffff, + 0xc4140011, 0x80000aa2, 0x44cc0004, 0xc4180011, 0xd0c0001e, 0xd8400021, 0xc424000f, 0x9a40ffff, + 0xc8100011, 0x55140020, 0xd8800013, 0xcd812e01, 0xcd012e02, 0xcd412e03, 0xcc412e00, 0xc428000e, + 0x2aa80008, 0xce800013, 0xc4253249, 0x2264003f, 0xce413249, 0xce800013, 0xc4253249, 0x96400001, + 0xd800002a, 0xc410001a, 0xc40c0021, 0xc4140028, 0x95000005, 0x1e64001f, 0xce800013, 0xce413249, + 0x80001b70, 0x14d00010, 0xc4180030, 0xc41c0007, 0x99000004, 0x99400009, 0x9980000c, 0x80000ab1, + 0xccc00037, 0x8c000190, 0xc420001c, 0xd8000032, 0x9a0010ac, 0x80000aa7, 0xd880003f, 0x95c00002, + 0xd8c0003f, 0x80001082, 0xd8800040, 0x95c00002, 0xd8c00040, 0x800010de, 0xc010ffff, 0x18d403f7, + 0x7d0cc009, 0xc41b0367, 0x7d958004, 0x7d85800a, 0xdc1e0000, 0x90000000, 0xc424000b, 0x32640002, + 0x7c40c001, 0x18d001fc, 0x05280adc, 0x86800000, 0x80000af1, 0x80000adf, 0x80000ae7, 0x8c000ace, + 0xd8c00013, 0x96400002, 0xd8400013, 0xcd8d2000, 0x99c00010, 0x7c408001, 0x88000000, 0x18d803f7, + 0xc010ffff, 0x7d0cc009, 0x04140000, 0x11940014, 0x29544001, 0x9a400002, 0x29544003, 0xcd400013, + 0x80000af4, 0xd8c00013, 0x96400002, 0xd8400013, 0xd44d2000, 0x7c408001, 0x88000000, 0xc424000b, + 0x32640002, 0x7c40c001, 0xd8c00013, 0x96400002, 0xd8400013, 0xd44dc000, 0x7c408001, 0x88000000, + 0x7c40c001, 0x18d0003c, 0x95000006, 0x8c000ace, 0xd8800013, 0xcd8d2c00, 0x99c00003, 0x80000b0a, + 0xd8800013, 0xd44d2c00, 0x7c408001, 0x88000000, 0x7c40c001, 0x28148004, 0x24d800ff, 0xccc00019, + 0xcd400013, 0xd4593240, 0x7c408001, 0x88000000, 0xd8400029, 0xc40c005e, 0x94c0105e, 0x7c410001, + 0x50540020, 0x7c418001, 0x2198003f, 0x199c0034, 0xc40c0007, 0x95c00028, 0xc428000e, 0x2aa80008, + 0xce800013, 0xc42d324f, 0xc4313255, 0x7ef3400c, 0x9b400021, 0xd800002a, 0x80001b70, 0xc40c0007, + 0x14e80001, 0x9a8000af, 0xd9000010, 0x041c0002, 0x042c01c8, 0x8c000d61, 0xccc00010, 0xd8400029, + 0xc40c005e, 0x94c01043, 0x7c410001, 0x50540020, 0x7c418001, 0x18a01fe8, 0x3620005c, 0x9a00000e, + 0x2464003f, 0xd8400013, 0xc6290ce7, 0x16ac001f, 0x96c00004, 0x26ac003f, 0x7ee6c00d, 0x96c00005, + 0x06200001, 0x2620000f, 0x9a00fff8, 0x8000016a, 0xce000367, 0xc424005e, 0x9640102e, 0xc428000e, + 0x199c0037, 0x19a00035, 0x2aa80008, 0xce800013, 0x95c0005d, 0xd800002a, 0xc42d3256, 0xc431325a, + 0x2330003f, 0x16f8001f, 0x9780000d, 0xc4253248, 0xc035f0ff, 0x7e764009, 0x19b401f8, 0x13740008, + 0x7e76400a, 0xce800013, 0xce413248, 0xcf01325a, 0xce800013, 0xc431325a, 0x97000001, 0x7d15001a, + 0xd1000072, 0xc8100072, 0x55140020, 0x199c0034, 0xd8400010, 0xd8400029, 0x9b800004, 0x1ae4003e, + 0xce400008, 0x80000b7c, 0xc4353254, 0x16a80008, 0x1aec003c, 0x19a4003f, 0x12a80015, 0x12ec001f, + 0x1374000b, 0x7eae800a, 0xc02e4000, 0x1774000d, 0x7eae800a, 0xce400008, 0x7f6b400a, 0x95c00005, + 0xc43d3248, 0x1bfc01e8, 0x13fc0018, 0x7dbd800a, 0x1d98ff15, 0x592c00fc, 0xcd80000a, 0x12e00016, + 0x7da1800a, 0x592c007e, 0x12e00015, 0x7da1800a, 0xd1000001, 0xcd800001, 0x11a0000c, 0x1264001e, + 0x1620000c, 0x7e26000a, 0x7e32000a, 0x12e4001b, 0x7e26000a, 0x5924007e, 0x12640017, 0x7e26000a, + 0x19a4003c, 0x12640018, 0x7e26000a, 0xd800002a, 0xce01325a, 0xcd013257, 0xcd413258, 0xc429325a, + 0xc40c005e, 0x94c00fdb, 0x96800001, 0x95c00003, 0x7c40c001, 0x7c410001, 0x9780f5ca, 0xcf400100, + 0xc40c0007, 0xd9000010, 0x8c00120d, 0x8c001219, 0x8c001232, 0xccc00010, 0x8c001b6d, 0x7c408001, + 0x88000000, 0xc42d324e, 0xc431324d, 0x52ec0020, 0x7ef2c01a, 0xc435324f, 0xc4293256, 0x52ec0008, + 0x07740003, 0x04240002, 0x269c003f, 0x7e5e4004, 0x7f67000f, 0x97000003, 0x7f674002, 0x0b740001, + 0x53740002, 0x7ef6c011, 0x1ab42010, 0x1ab8c006, 0x16a8000c, 0x26a80800, 0x2b740000, 0x7f7b400a, + 0x7f6b400a, 0xcf40001c, 0xd2c0001e, 0xd8400021, 0xc438000f, 0x9b80ffff, 0xc4180011, 0x9a000003, + 0x8c000bec, 0x80000b47, 0xc42c001d, 0xc4313256, 0x1b34060b, 0x1b300077, 0x7f370009, 0x13300017, + 0x04340100, 0x26ec00ff, 0xc03a8004, 0x7ef6c00a, 0x7f3b000a, 0x7ef2c00a, 0xcec1325b, 0x80000c16, + 0xc40c0032, 0xc410001d, 0x28cc0008, 0xccc00013, 0xc415325b, 0x7c418001, 0x7c418001, 0x18580037, + 0x251000ff, 0xc421325d, 0x262001ef, 0xce01325d, 0x99800004, 0x7d15400a, 0xcd41325b, 0x80000168, + 0x1d54001f, 0xcd41325b, 0x7c408001, 0x88000000, 0xc428000b, 0xc42c000c, 0x12a80001, 0x26a80004, + 0x7eae800a, 0xc40c0021, 0xc4340028, 0x14f00010, 0xc4380030, 0xc43c0007, 0xcd280200, 0xcd680208, + 0xcda80210, 0x9b00000c, 0x9b400014, 0x9b800017, 0xc428000b, 0xc42c000c, 0x12a80001, 0x26a80004, + 0x7eae800a, 0xc6930200, 0xc6970208, 0xc69b0210, 0x90000000, 0x17300001, 0x9b000005, 0xccc00037, + 0x8c000190, 0xd8000032, 0x90000000, 0xd8000028, 0xd800002b, 0x80000168, 0xd900003f, 0x97c00002, + 0xd940003f, 0x80001082, 0xd9000040, 0x97c00002, 0xd9400040, 0x800010de, 0xc40c0021, 0x14fc0011, + 0x24f800ff, 0x33b80001, 0x97c0fffc, 0x9b800007, 0xccc00037, 0x8c000190, 0xd8000032, 0xd8000028, + 0xd800002b, 0x80001b70, 0xc4380004, 0xd8400008, 0xd8400013, 0xd88130b8, 0x04100000, 0x04140000, + 0xc418000e, 0x29980008, 0x7d83c001, 0xcd800013, 0xc4093249, 0x1888003e, 0x94800020, 0xd8400074, + 0x8c000671, 0x9a400009, 0xc418000e, 0x29980008, 0xcd800013, 0xc419324c, 0x259c0001, 0x1598001f, + 0x95c00016, 0x95800015, 0x99000003, 0xd8400036, 0x04100001, 0xc40c0021, 0x14d80011, 0x24e000ff, + 0x321c0002, 0x32200001, 0x9580ffee, 0x99c00014, 0x96000004, 0xccc00037, 0x04140001, 0x80000c30, + 0x9480000a, 0xd8000074, 0xc418005e, 0x95800f29, 0xcf800008, 0x80000c16, 0x94800004, 0xd8000074, + 0xc418005e, 0x95800f23, 0xd9c00036, 0x99400002, 0xccc00037, 0xcf800008, 0x80000c16, 0x94800004, + 0xd8000074, 0xc418005e, 0x95800f1a, 0xccc00037, 0xd8800036, 0x80001b70, 0x041c0003, 0x042c01c8, + 0x8c000d61, 0xc4200007, 0xc40c0077, 0x94c00001, 0x7c418001, 0xc428000e, 0x9600f502, 0x0a200001, + 0x98c0f500, 0x2aa80008, 0xce000010, 0x9a000f05, 0xce800013, 0xc431325a, 0xc42d3256, 0x1f30001f, + 0x16e4001f, 0xcf01325a, 0xc431325a, 0x97000001, 0x9640f4f4, 0xc434000b, 0x33740002, 0x9b40f4f1, + 0xc4353254, 0x16a80008, 0x1aec003c, 0x12a80015, 0x12ec001f, 0x1374000b, 0x7eae800a, 0xc02e4000, + 0x1774000d, 0x7eae800a, 0x7f6b400a, 0xcf400100, 0x12780001, 0x2bb80001, 0xc00ac005, 0xc00e0002, + 0x28cc8000, 0x28884900, 0x28cc0014, 0x80000ff3, 0xc43c0007, 0x7c40c001, 0x17fc0001, 0xd8400013, + 0x9bc00004, 0xd8400029, 0xc424005e, 0x96400ee1, 0xcc41c40a, 0xcc41c40c, 0xcc41c40d, 0x7c414001, + 0x24d0007f, 0x15580010, 0x255400ff, 0xcd01c411, 0xcd81c40f, 0xcd41c40e, 0xcc41c410, 0x7c414001, + 0x7c418001, 0x04200000, 0x18e80033, 0x18ec0034, 0xcc41c414, 0xcc41c415, 0xcd81c413, 0xcd41c412, + 0x18dc0032, 0x7c030011, 0x7c038011, 0x95c00027, 0x96c00002, 0xc431c417, 0xc435c416, 0x96800004, + 0x96c00002, 0xc439c419, 0xc43dc418, 0xc41c000e, 0x29dc0008, 0xcdc00013, 0xcf413261, 0x96c00002, + 0xcf013262, 0x96800004, 0xcfc13263, 0x96c00002, 0xcf813264, 0x18dc0030, 0xc43c0007, 0x95c00017, + 0x17fc0001, 0x9ac00005, 0x7d77000c, 0x9bc00015, 0x9700000a, 0x80000cd6, 0x51b80020, 0x53300020, + 0x7f97801a, 0x7f37001a, 0x7f3b000c, 0x9bc0000d, 0x97800002, 0x80000cd6, 0x9a000018, 0xd8400013, + 0x28200001, 0x80000ca7, 0x18dc0031, 0x95c00003, 0xc435c40b, 0x9740fffd, 0xd800002a, 0x80001b70, + 0xc4280032, 0x2aa80008, 0xce800013, 0xc40d325b, 0x97000002, 0x800012c2, 0xc438001d, 0x1bb81ff0, + 0x7f8cc00a, 0xccc1325b, 0xc411325d, 0x251001ef, 0xcd01325d, 0x80001b70, 0xc428000e, 0xc43c0007, + 0x2aa80008, 0xc438001d, 0xce800013, 0x13f4000c, 0x9bc00006, 0xc43d3256, 0x1bf0060b, 0x1bfc0077, + 0x7ff3c00a, 0x80000cf4, 0xc43d325a, 0x1bfc0677, 0x13fc0017, 0x04300100, 0x1bb81fe8, 0x7f73400a, + 0xc032800b, 0x7fb7800a, 0x7ff3c00a, 0x7ffbc00a, 0xcfc1325b, 0x80000c16, 0xc43c0007, 0x7c40c001, + 0x18d42011, 0x17fc0001, 0x18d001e8, 0x24cc007f, 0x7cd4c00a, 0x9bc00004, 0xd8400029, 0xc428005e, + 0x96800e6c, 0x7c414001, 0x50580020, 0x7d59401a, 0xd1400072, 0xc8140072, 0x596001fc, 0x12200009, + 0x7ce0c00a, 0x7c418001, 0x505c0020, 0x7d9d801a, 0x7c41c001, 0x50600020, 0x7de1c01a, 0x7c420001, + 0xccc0001b, 0xd140001d, 0xd180001f, 0xd1c00020, 0xd8400021, 0x95000010, 0x04300000, 0xc428000f, + 0x9a80ffff, 0xc8240010, 0x7e5e800c, 0x9bc00015, 0x9a80000c, 0x9b000024, 0x28300001, 0x122c0004, + 0x06ec0001, 0x0aec0001, 0x9ac0ffff, 0xd8400021, 0x80000d1f, 0xc428000f, 0x9a80ffff, 0xc8240010, + 0x566c0020, 0xc428000e, 0x2aa80008, 0xce800013, 0xce413261, 0xcec13262, 0xd800002a, 0x80001b70, + 0xc4340032, 0x2b740008, 0xcf400013, 0xc40d325b, 0x96800005, 0x566c0020, 0xce413261, 0xcec13262, + 0x800012c2, 0xc438001d, 0x1bb81fe8, 0x7f8cc00a, 0xccc1325b, 0xc411325d, 0x251001ef, 0xcd01325d, + 0x80001b70, 0xc43c0007, 0xc438001d, 0xc428000e, 0x2aa80008, 0xce800013, 0x13f4000c, 0x9bc00006, + 0xc43d3256, 0x1bf0060b, 0x1bfc0077, 0x7ff3c00a, 0x80000d57, 0xc43d325a, 0x1bfc0677, 0x13fc0017, + 0x04300100, 0x1bb81fe8, 0x7f73400a, 0xc0328009, 0x7fb7800a, 0x7ff3c00a, 0x7ffbc00a, 0xcfc1325b, + 0x80000c16, 0xc43c000e, 0x2bfc0008, 0xcfc00013, 0xc4253246, 0xc4113245, 0x04143000, 0xcd413267, + 0x52640020, 0x7e51001a, 0xc4153267, 0x7d2d0011, 0x19640057, 0x19580213, 0x19600199, 0x7da6400a, + 0x7e26400a, 0xd1000025, 0xce400024, 0xcdc00026, 0xd8400027, 0x04142000, 0xcfc00013, 0xcd413267, + 0xc4153267, 0x99400001, 0x90000000, 0x7c40c001, 0x18d001e8, 0x18d40030, 0x18d80034, 0x05280d83, + 0x7c420001, 0x7c424001, 0x86800000, 0x80000d8a, 0x8000016a, 0x80000d95, 0x80000db1, 0x8000016a, + 0x80000d95, 0x80000dbc, 0x11540010, 0x7e010001, 0x8c00187c, 0x7d75400a, 0xcd400013, 0xd4610000, + 0x9580f3d8, 0xc439c040, 0x97800001, 0x7c408001, 0x88000000, 0xd8000016, 0x526c0020, 0x18e80058, + 0x7e2ec01a, 0xd2c00072, 0xc82c0072, 0x5ae0073a, 0x7ea2800a, 0x9940000a, 0xce800024, 0xd2c00025, + 0xd4400026, 0xd8400027, 0x9580f3c6, 0xc4380012, 0x9b80ffff, 0x7c408001, 0x88000000, 0xdc3a0000, + 0x0bb80001, 0xce800024, 0xd2c00025, 0xcc400026, 0xd8400027, 0x9b80fffb, 0x9980fff5, 0x7c408001, + 0x88000000, 0xc02a0001, 0x2aa80001, 0x16200002, 0xce800013, 0xce01c405, 0xd441c406, 0x9580f3b1, + 0xc439c409, 0x97800001, 0x7c408001, 0x88000000, 0xc424000b, 0x32640002, 0x9a40000b, 0x11540010, + 0x29540002, 0xcd400013, 0xd4610000, 0x9580f3a5, 0xd8400013, 0xc439c040, 0x97800001, 0x7c408001, + 0x88000000, 0xd4400078, 0x80000168, 0xd8400029, 0xc40c005e, 0x94c00da7, 0x7c40c001, 0x50500020, + 0x7cd0c01a, 0xd0c00072, 0xc8280072, 0x5aac007e, 0x12d80017, 0x7c41c001, 0x7d9d800a, 0x56a00020, + 0x2620ffff, 0x7da1800a, 0x51980020, 0x7e82400a, 0x7e58c01a, 0x19d4003d, 0x28182002, 0x99400030, + 0x8c00104f, 0xc430000d, 0xc4340035, 0xd800002a, 0xcd800013, 0xc8140023, 0xc4180081, 0x13300005, + 0xc011000f, 0xc4240004, 0x11a00002, 0x7c908009, 0x12640004, 0x7d614011, 0xc4100026, 0x05980008, + 0x7ca4800a, 0x7d1a0002, 0x7cb0800a, 0x3e280008, 0x20880188, 0x54ec0020, 0x7cb4800a, 0xc4300027, + 0x04380008, 0xd1400025, 0xcf000024, 0x20240090, 0x7ca48001, 0xcc800026, 0xccc00026, 0xcec00026, + 0xcec00026, 0x28240004, 0xcc000026, 0x0a640001, 0x9a40fffe, 0x9a800005, 0x32280000, 0x9a800002, + 0x9a000000, 0x7c018001, 0xd8400027, 0xd8000016, 0xcf80003a, 0xd901a2a4, 0x80001037, 0xc418000e, + 0x29980008, 0xcd800013, 0xc421326c, 0x1624001f, 0x9a40fffe, 0xd841325f, 0xd8800033, 0xc43c0009, + 0x27fc0004, 0x97c0fffe, 0xd8000039, 0xd0c00038, 0xc43c0022, 0x9bc0ffff, 0xd8800034, 0xc429325f, + 0x26ac0001, 0x9ac0fffe, 0x26ac0002, 0x96c00003, 0xd800002a, 0x80001b70, 0xc43c0007, 0xc430001e, + 0xd8800033, 0x13f4000c, 0x1b301ff0, 0x2b300300, 0x2330003f, 0x7f37000a, 0x9680000b, 0xc43c0009, + 0x27fc0004, 0x97c0fffe, 0xd8400039, 0xd0c00038, 0xc43c0022, 0x9bc0ffff, 0xcf01325b, 0xd8800034, + 0x80000c16, 0xd8800034, 0x8c0001a2, 0x80001b70, 0xcc80003b, 0x24b00008, 0xc418000e, 0x1330000a, + 0x18ac0024, 0x2b304000, 0x7c40c001, 0xcec00008, 0x18a800e5, 0x1d980008, 0x12a80008, 0x7da9800a, + 0x29980008, 0xcd800013, 0xc4113249, 0x1910003e, 0x99000002, 0xd840003d, 0x7c410001, 0xd4400078, + 0x51100020, 0xcf01326c, 0x7cd0c01a, 0xc421326c, 0x12a80014, 0x2220003f, 0x7e2a000a, 0xcd800013, + 0xce01326c, 0xd8800033, 0xc43c0009, 0x27fc0004, 0x97c0fffe, 0xd8000039, 0xd0c00038, 0xc43c0022, + 0x9bc0ffff, 0xd8800034, 0x80001190, 0x7c40c001, 0x18dc003d, 0x95c00004, 0x041c0001, 0x042c01c8, + 0x8c000d61, 0x18d40030, 0x18d001e8, 0x18fc0034, 0x24e8000f, 0x06a80e71, 0x7c418001, 0x7c41c001, + 0x86800000, 0x80000edd, 0x80000e91, 0x80000e91, 0x80000ea1, 0x80000eaa, 0x80000e7c, 0x80000e7f, + 0x80000e7f, 0x80000e87, 0x80000e8f, 0x8000016a, 0x51dc0020, 0x7d9e001a, 0x80000ee6, 0xc420000e, + 0x2a200008, 0xce000013, 0xc4213262, 0xc4253261, 0x52200020, 0x7e26001a, 0x80000ee6, 0xc420000e, + 0x2a200008, 0xce000013, 0xc4213264, 0xc4253263, 0x52200020, 0x7e26001a, 0x80000ee6, 0xc820001f, + 0x80000ee6, 0x18e82005, 0x51e00020, 0x2aa80000, 0x7da1801a, 0xd1800072, 0xc8180072, 0x59a001fc, + 0x12200009, 0x7ea2800a, 0xce80001c, 0xd180001e, 0xd8400021, 0xc428000f, 0x9a80ffff, 0xc8200011, + 0x80000ee6, 0x15980002, 0xd8400013, 0xcd81c400, 0xc421c401, 0x95400041, 0xc425c401, 0x52640020, + 0x7e26001a, 0x80000ee6, 0x31ac2580, 0x9ac00011, 0x31ac260c, 0x9ac0000f, 0x31ac0800, 0x9ac0000d, + 0x31ac0828, 0x9ac0000b, 0x31ac2440, 0x9ac00009, 0x31ac2390, 0x9ac00007, 0x31ac0093, 0x9ac00005, + 0x31ac31dc, 0x9ac00003, 0x31ac31e6, 0x96c00004, 0xc4340004, 0xd8400008, 0x80000ede, 0x39ac7c06, + 0x3db07c00, 0x9ac00003, 0x97000002, 0x80000ebc, 0x39acc337, 0x3db0c330, 0x9ac00003, 0x97000002, + 0x80000ebc, 0x39acc335, 0x3db0c336, 0x9ac00003, 0x97000002, 0x80000ebc, 0x39ac9002, 0x3db09001, + 0x9ac00003, 0x97000002, 0x80000ebc, 0x39ac9012, 0x3db09011, 0x9ac00003, 0x97000002, 0x80000ebc, + 0x39acec70, 0x3db0ec6f, 0x9ac00003, 0x97000002, 0x80000ebc, 0xc4340004, 0xd8400013, 0xc5a10000, + 0x95400005, 0x05980001, 0xc5a50000, 0x52640020, 0x7e26001a, 0xcf400008, 0x05280eea, 0x7c418001, + 0x7c41c001, 0x86800000, 0x80000ef1, 0x8000016a, 0x80000efe, 0x80000f11, 0x80000f2e, 0x80000efe, + 0x80000f1f, 0xc4340004, 0xd8400013, 0xce190000, 0x95400005, 0x05980001, 0x56200020, 0xce190000, + 0xcf400008, 0x97c0f26f, 0xc439c040, 0x97800001, 0x7c408001, 0x88000000, 0x51ec0020, 0x18e80058, + 0x7daec01a, 0xd2c00072, 0xc82c0072, 0x5af8073a, 0x7eba800a, 0xd2c00025, 0xce800024, 0xce000026, + 0x95400003, 0x56240020, 0xce400026, 0xd8400027, 0x97c0f25c, 0xc4380012, 0x9b80ffff, 0x7c408001, + 0x88000000, 0xc02a0001, 0x2aa80001, 0x15980002, 0xce800013, 0xcd81c405, 0xce01c406, 0x95400003, + 0x56240020, 0xce41c406, 0x97c0f24e, 0xc439c409, 0x97800001, 0x7c408001, 0x88000000, 0xc424000b, + 0x32640002, 0x9a40f247, 0xd8800013, 0xce190000, 0x95400004, 0x05980001, 0x56200020, 0xce190000, + 0x97c0f240, 0xd8400013, 0xc439c040, 0x97800001, 0x7c408001, 0x88000000, 0x31ac2580, 0x9ac00011, + 0x31ac260c, 0x9ac0000f, 0x31ac0800, 0x9ac0000d, 0x31ac0828, 0x9ac0000b, 0x31ac2440, 0x9ac00009, + 0x31ac2390, 0x9ac00007, 0x31ac0093, 0x9ac00005, 0x31ac31dc, 0x9ac00003, 0x31ac31e6, 0x96c00004, + 0xc4340004, 0xd8400008, 0x80000ef2, 0x39ac7c06, 0x3db07c00, 0x9ac00003, 0x97000002, 0x80000f40, + 0x39acc337, 0x3db0c330, 0x9ac00003, 0x97000002, 0x80000f40, 0x39acc335, 0x3db0c336, 0x9ac00003, + 0x97000002, 0x80000f40, 0x39acec70, 0x3db0ec6f, 0x9ac00003, 0x97000002, 0x80000f40, 0x39ac9002, + 0x3db09002, 0x9ac00003, 0x97000002, 0x80000f40, 0x39ac9012, 0x3db09012, 0x9ac00003, 0x97000002, + 0x80000f40, 0x80000ef1, 0xc40c0006, 0x98c0ffff, 0x7c40c001, 0x7c410001, 0x7c414001, 0x7c418001, + 0x7c41c001, 0x7c43c001, 0x95c00001, 0xc434000e, 0x2b740008, 0x2b780001, 0xcf400013, 0xd8c1325e, + 0xcf80001a, 0xd8400013, 0x7c034001, 0x7c038001, 0x18e0007d, 0x32240003, 0x9a400006, 0x32240000, + 0x9a400004, 0xcd01c080, 0xcd41c081, 0x80000f88, 0x51640020, 0x7e52401a, 0xd2400072, 0xc8280072, + 0xce81c080, 0x56ac0020, 0x26f0ffff, 0xcf01c081, 0x1af000fc, 0x1334000a, 0x24e02000, 0x7f63400a, + 0x18e00074, 0x32240003, 0x9a400006, 0x32240000, 0x9a400004, 0xcd81c082, 0xcdc1c083, 0x80000f9d, + 0x51e40020, 0x7e5a401a, 0xd2400072, 0xc8280072, 0xce81c082, 0x56ac0020, 0x26f0ffff, 0xcf01c083, + 0x1af000fc, 0x13380016, 0x18e00039, 0x12200019, 0x7fa3800a, 0x7fb7800a, 0x18e0007d, 0x1220001d, + 0x7fa3800a, 0x18e00074, 0x12200014, 0x7fa3800a, 0xcf81c078, 0xcfc1c084, 0x80000c16, 0x7c40c001, + 0x18dc003d, 0x95c00004, 0x041c0000, 0x042c01c8, 0x8c000d61, 0x18d001e8, 0x31140005, 0x99400003, + 0x31140006, 0x95400002, 0x8c00104f, 0x05280fb7, 0x28140002, 0xcd400013, 0x86800000, 0x80000fbe, + 0x80000fbe, 0x80000fc2, 0x80000fbe, 0x80000fd1, 0x80000ff2, 0x80000ff2, 0x24cc003f, 0xccc1a2a4, + 0x7c408001, 0x88000000, 0x7c414001, 0x18e80039, 0x52a8003b, 0x50580020, 0x24cc003f, 0x7d59401a, + 0xd1400072, 0xc8140072, 0x7d69401a, 0xc41c0017, 0x99c0ffff, 0xd140004b, 0xccc1a2a4, 0x7c408001, + 0x88000000, 0xc414000d, 0x04180001, 0x24cc003f, 0x7d958004, 0xcd800035, 0xccc1a2a4, 0xc43c000e, + 0x2bfc0008, 0xcfc00013, 0xc43d3249, 0x1bfc003e, 0x97c00002, 0xd8400074, 0xc4100019, 0x7d150005, + 0x25100001, 0x9500000b, 0x97c0fffc, 0xc4180021, 0x159c0011, 0x259800ff, 0x31a00003, 0x31a40001, + 0x7e25800a, 0x95c0fff5, 0x9580fff4, 0x80000fef, 0xc411326f, 0x1d100010, 0xcd01326f, 0x97c00002, + 0xd8000074, 0x80001b70, 0x04380000, 0xc430000d, 0xc8140023, 0xc4180081, 0x13300005, 0xc011000f, + 0xc4240004, 0x33b40003, 0x97400003, 0xc0340008, 0x80000ffe, 0xc4340035, 0x11a00002, 0x7c908009, + 0x12640004, 0x7d614011, 0xc4100026, 0x05980008, 0x7ca4800a, 0x7d1a0002, 0x7cb0800a, 0x282c2002, + 0x208801a8, 0x3e280008, 0x7cb4800a, 0xcec00013, 0xc4300027, 0x042c0008, 0xd1400025, 0xcf000024, + 0x20240030, 0x7ca48001, 0xcc800026, 0xccc00026, 0x9b800013, 0xcc400026, 0x7c414001, 0x28340000, + 0xcf400013, 0x507c0020, 0x7d7d401a, 0xd1400072, 0xc8140072, 0x557c0020, 0x28342002, 0xcf400013, + 0xcd400026, 0xcfc00026, 0xd4400026, 0x9a80000e, 0x32280000, 0x9a80000b, 0x8000102f, 0xcc000026, + 0xcc000026, 0xcc000026, 0xcc000026, 0xcc000026, 0x9a800005, 0x32280000, 0x9a800002, 0x9a000000, + 0x7c018001, 0xcc000026, 0xd8400027, 0x1cccfe08, 0xd8800013, 0xcec0003a, 0xccc1a2a4, 0xc43c000e, + 0x2bfc0008, 0xcfc00013, 0xc43d3249, 0x1bfc003e, 0x9bc00007, 0xc428000e, 0x16a80008, 0xce800009, + 0xc42c005e, 0x96c00b33, 0xd840003c, 0xc4200025, 0x7da2400f, 0x7da28002, 0x7e1ac002, 0x0aec0001, + 0x96400002, 0x7d2ac002, 0x3ef40010, 0x9b40f11d, 0x04380030, 0xcf81325e, 0x80000c16, 0xde410000, + 0xdcc10000, 0xdd010000, 0xdd410000, 0xdd810000, 0xddc10000, 0xde010000, 0xc40c000e, 0x7c024001, + 0x28cc0008, 0xccc00013, 0xc8100086, 0x5510003f, 0xc40d3249, 0x18cc003e, 0x98c00003, 0x99000011, + 0x80001075, 0x9900000c, 0xc40c0026, 0xc4100081, 0xc4140025, 0x7d15800f, 0x7d15c002, 0x7d520002, + 0x0a200001, 0x95800002, 0x7cde0002, 0x3e20001a, 0x9a000009, 0x040c0030, 0xccc1325e, 0x80001071, + 0xd9c00036, 0xd8400029, 0xc40c005e, 0x94c00b01, 0x04240001, 0xdc200000, 0xdc1c0000, 0xdc180000, + 0xdc140000, 0xdc100000, 0xdc0c0000, 0x96400004, 0xdc240000, 0xdc0c0000, 0x80000c16, 0xdc240000, + 0x90000000, 0xcc40003f, 0xd8c00010, 0xc4080029, 0xcc80003b, 0xc418000e, 0x18a800e5, 0x1d980008, + 0x12a80008, 0x7da9800a, 0x29980008, 0xcd800013, 0x18a400e5, 0x12500009, 0x248c0008, 0x94c00006, + 0x200c006d, 0x7cd0c00a, 0xccc1326c, 0xc421326c, 0x96000001, 0xcd800013, 0x200c0228, 0x7cd0c00a, + 0xccc1326c, 0xc421326c, 0x96000001, 0xc40c002a, 0xc410002b, 0x18881fe8, 0x18d4072c, 0x18cc00d1, + 0x7cd4c00a, 0x3094000d, 0x38d80000, 0x311c0003, 0x99400006, 0x30940007, 0x1620001f, 0x9940001d, + 0x9a000023, 0x800010c4, 0x9580001a, 0x99c00019, 0xccc00041, 0x25140001, 0xc418002c, 0x9940000d, + 0x259c007f, 0x95c00013, 0x19a00030, 0xcdc0001b, 0xd8400021, 0xd8400022, 0xc430000f, 0x17300001, + 0x9b00fffe, 0x9a000012, 0xd8400023, 0x800010cb, 0x199c0fe8, 0xcdc0001b, 0xd8400021, 0xd8400023, + 0xc430000f, 0x17300001, 0x9b00fffe, 0x800010cb, 0xd8c00010, 0xd8000022, 0xd8000023, 0xc430005e, + 0x97000aac, 0x7c408001, 0x88000000, 0xc43c000e, 0xc434002e, 0x2bfc0008, 0x2020002c, 0xcfc00013, + 0xce01326c, 0x17780001, 0x27740001, 0x07a810d8, 0xcf400010, 0xc421326c, 0x96000001, 0x86800000, + 0x80000168, 0x80000aa7, 0x80000bfc, 0x800012e9, 0x8000104c, 0xcc400040, 0xd8800010, 0xc4180032, + 0x29980008, 0xcd800013, 0x200c007d, 0xccc1325b, 0xc411325b, 0x95000001, 0x7c408001, 0x88000000, + 0x28240007, 0xde430000, 0xd4400078, 0x80001190, 0xcc80003b, 0x24b00008, 0xc418000e, 0x1330000a, + 0x18a800e5, 0x1d980008, 0x12a80008, 0x7da9800a, 0x29980008, 0xcd800013, 0xc40d3249, 0x18cc003e, + 0x98c00002, 0xd840003d, 0x2b304000, 0xcf01326c, 0xc431326c, 0x7c40c001, 0x7c410001, 0x7c414001, + 0x192400fd, 0x50580020, 0x7d59401a, 0x7c41c001, 0x06681110, 0x7c420001, 0xcc400078, 0x18ac0024, + 0x19180070, 0x19100078, 0xcec00008, 0x18f40058, 0x5978073a, 0x7f7b400a, 0x97000001, 0x86800000, + 0x80001117, 0x80001118, 0x80001122, 0x8000112d, 0x80001130, 0x80001133, 0x8000016a, 0x8000117b, + 0x24ec0f00, 0x32ec0600, 0x96c00003, 0xc4300006, 0x9b00ffff, 0xd1400025, 0xcf400024, 0xcdc00026, + 0xd8400027, 0x8000117b, 0x24ec0f00, 0x32ec0600, 0x96c00003, 0xc4300006, 0x9b00ffff, 0xd1400025, + 0xcf400024, 0xcdc00026, 0xce000026, 0xd8400027, 0x8000117b, 0xc81c001f, 0x55e00020, 0x80001122, + 0xc81c0020, 0x55e00020, 0x80001122, 0x8c00116b, 0xd8400013, 0xc02a0200, 0x7e8e8009, 0x22a8003d, + 0x22a80074, 0x2774001c, 0x13740014, 0x7eb6800a, 0x25ecffff, 0x55700020, 0x15f40010, 0x13740002, + 0x275c001f, 0x95c00027, 0x7c018001, 0x7f41c001, 0x15dc0002, 0x39e00008, 0x25dc0007, 0x7dc1c01e, + 0x05dc0001, 0x96000004, 0x05e40008, 0x8c00116e, 0x80001168, 0x7dc2001e, 0x06200001, 0x05e40008, + 0x7e62000e, 0x9a000004, 0x7da58001, 0x8c00116e, 0x80001165, 0x7dc2001e, 0x06200001, 0x7e1a0001, + 0x05cc0008, 0x7e0d000e, 0x95000007, 0x7e02401e, 0x06640001, 0x06640008, 0x05d80008, 0x8c00116e, + 0x80001168, 0x7dc2401e, 0x06640001, 0x7da58001, 0x8c00116e, 0x05e00008, 0x7da2000c, 0x9600ffe6, + 0x17640002, 0x8c00116e, 0x80001190, 0xc4200006, 0x9a00ffff, 0x90000000, 0x8c00116b, 0xc420000e, + 0x2a200001, 0xce00001a, 0xce81c078, 0xcec1c080, 0xcc01c081, 0xcd41c082, 0xcf01c083, 0x12640002, + 0x22640435, 0xce41c084, 0x90000000, 0x0528117e, 0x312c0003, 0x86800000, 0x80001190, 0x80001185, + 0x80001182, 0x80001182, 0xc4300012, 0x9b00ffff, 0x9ac0000c, 0xc03a0400, 0xc4340004, 0xd8400013, + 0xd8400008, 0xc418000e, 0x15980008, 0x1198001c, 0x7d81c00a, 0xcdc130b7, 0xcf8130b5, 0xcf400008, + 0x04240008, 0xc418000e, 0xc41c0049, 0x19a000e8, 0x29a80008, 0x7de2c00c, 0xce800013, 0xc421325e, + 0x26200010, 0xc415326d, 0x9a000006, 0xc420007d, 0x96000004, 0x96c00003, 0xce40003e, 0x800011a3, + 0x7d654001, 0xcd41326d, 0x7c020001, 0x96000005, 0xc4100026, 0xc4240081, 0xc4140025, 0x800011b6, + 0xc4253279, 0xc415326d, 0xc431326c, 0x2730003f, 0x3b380006, 0x97800004, 0x3f38000b, 0x9b800004, + 0x800011b4, 0x04300006, 0x800011b4, 0x0430000b, 0x04380002, 0x7fb10004, 0x7e57000f, 0x7e578002, + 0x7d67c002, 0x0be40001, 0x97000002, 0x7d3a4002, 0x202c002c, 0xc421325e, 0x04280020, 0xcec1326c, + 0x26200010, 0x3e640010, 0x96000003, 0x96400002, 0xce81325e, 0xc4300028, 0xc434002e, 0x17780001, + 0x27740001, 0x07a811cf, 0x9b00feb8, 0xcf400010, 0xc414005e, 0x954009a7, 0x86800000, 0x80000168, + 0x80000aa7, 0x80000bfc, 0x800012e9, 0x80000168, 0x8c00120d, 0x7c40c001, 0xccc1c07c, 0xcc41c07d, + 0xcc41c08c, 0x7c410001, 0xcc41c079, 0xcd01c07e, 0x7c414001, 0x18f0012f, 0x18f40612, 0x18cc00c1, + 0x7f73400a, 0x7cf7400a, 0x39600004, 0x9a000002, 0xc0140004, 0x11600001, 0x18fc003e, 0x9740001c, + 0xcf400041, 0xc425c07f, 0x97c00003, 0x166c001f, 0x800011ee, 0x1a6c003e, 0x96c00006, 0x04200002, + 0x0a200001, 0x9a00ffff, 0xd8400013, 0x800011e8, 0xc428002c, 0x96800010, 0x26ac007f, 0xcec0001b, + 0xd8400021, 0x1ab00030, 0x1aac0fe8, 0xc434000f, 0x9b40ffff, 0x97000008, 0xcec0001b, 0xd8400021, + 0xc434000f, 0x9b40ffff, 0x80001205, 0x0a200001, 0x9a00ffff, 0xd8400013, 0xc425c07f, 0x166c001f, + 0x11600001, 0x9ac0fffa, 0x8c001232, 0x7c408001, 0x88000000, 0xd8000033, 0xc438000b, 0xc43c0009, + 0x27fc0001, 0x97c0fffe, 0xd8400013, 0xd841c07f, 0xc43dc07f, 0x1bfc0078, 0x7ffbc00c, 0x97c0fffd, + 0x90000000, 0xc03a2800, 0xcf81c07c, 0xcc01c07d, 0xcc01c08c, 0xcc01c079, 0xcc01c07e, 0x04380040, + 0xcf80001b, 0xd8400021, 0xc438000f, 0x9b80ffff, 0x04380060, 0xcf80001b, 0xd8400021, 0xc438000f, + 0x9b80ffff, 0x04380002, 0x0bb80001, 0x9b80ffff, 0xd8400013, 0xc43dc07f, 0x17fc001f, 0x04380010, + 0x9bc0fffa, 0x90000000, 0xd8400013, 0xd801c07f, 0xd8400013, 0xc43dc07f, 0xcfc00078, 0xd8000034, + 0x90000000, 0xc03ae000, 0xcf81c200, 0xc03a0800, 0xcf81c07c, 0xcc01c07d, 0xcc01c08c, 0xcc01c079, + 0xcc01c07e, 0x04380040, 0xcf80001b, 0xd8400021, 0xc438000f, 0x9b80ffff, 0x04380002, 0x0bb80001, + 0x9b80ffff, 0xd8400013, 0xc43dc07f, 0x17fc001f, 0x04380010, 0x9bc0fffa, 0x90000000, 0xc03ae000, + 0xcf81c200, 0xc03a4000, 0xcf81c07c, 0xcc01c07d, 0xcc01c08c, 0xcc01c079, 0xcc01c07e, 0x04380002, + 0x0bb80001, 0x9b80ffff, 0xd8400013, 0xc43dc07f, 0x17fc001f, 0x04380010, 0x9bc0fffa, 0x90000000, + 0xc40c0007, 0x30d00002, 0x99000052, 0xd8400029, 0xc424005e, 0x9640090f, 0x7c410001, 0xc428000e, + 0x1514001f, 0x19180038, 0x2aa80008, 0x99400030, 0x30dc0001, 0xce800013, 0x99c0000a, 0xc42d324e, + 0xc431324d, 0x52ec0020, 0x7ef2c01a, 0xc435324f, 0xc4293256, 0x1ab0c006, 0x52ec0008, 0x8000127f, + 0xc42d3258, 0xc4313257, 0x52ec0020, 0x7ef2c01a, 0xc4353259, 0xc429325a, 0x1ab0c012, 0x07740001, + 0x04240002, 0x26a0003f, 0x7e624004, 0x7f67800f, 0x97800002, 0x04340000, 0x53740002, 0x7ef6c011, + 0x1ab42010, 0x16a8000c, 0x26a80800, 0x2b740000, 0x7f73400a, 0x7f6b400a, 0xcf40001c, 0xd2c0001e, + 0xd8400021, 0xc438000f, 0x9b80ffff, 0xc4100011, 0x1514001f, 0x99400006, 0x9980000a, 0x8c0012e1, + 0xc40c0007, 0x04100000, 0x80001267, 0xd800002a, 0xc424005e, 0x964008d7, 0xd9800036, 0x80000c16, + 0xc42c001d, 0x95c00005, 0xc431325a, 0x1b300677, 0x11dc000c, 0x800012aa, 0xc4313256, 0x1b34060b, + 0x1b300077, 0x7f37000a, 0x13300017, 0x04340100, 0x26ec00ff, 0xc03a8002, 0x7ef6c00a, 0x7edec00a, + 0x7f3b000a, 0x7ef2c00a, 0xcec1325b, 0x80000c16, 0xc4140032, 0xc410001d, 0x29540008, 0xcd400013, + 0xc40d325b, 0x1858003f, 0x251000ff, 0x99800007, 0x7d0cc00a, 0xccc1325b, 0xc411325d, 0x251001ef, + 0xcd01325d, 0x80000168, 0x18d0006c, 0x18d407f0, 0x9900000e, 0x04100002, 0xc4193256, 0xc41d324f, + 0x2598003f, 0x7d190004, 0x7d5d4001, 0x7d52000f, 0x9a000003, 0xcd41324f, 0x800012d8, 0x7d514002, + 0xcd41324f, 0x800012d8, 0xc4193259, 0xc41d325a, 0x7d958001, 0x7dd5c002, 0xcd813259, 0xcdc1325a, + 0xc411325d, 0x251001ef, 0xcd01325d, 0x1ccc001e, 0xccc1325b, 0xc40d325b, 0x94c00001, 0x7c408001, + 0x88000000, 0xc40c0021, 0xc4340028, 0x14f00010, 0xc4380030, 0xc43c0007, 0x9b000004, 0x9b40000c, + 0x9b80000f, 0x90000000, 0x17300001, 0x9b000005, 0xccc00037, 0x8c000190, 0xd8000032, 0x90000000, + 0xd8000028, 0xd800002b, 0x80000168, 0xd980003f, 0x97c00002, 0xd9c0003f, 0x80001082, 0xd9800040, + 0x97c00002, 0xd9c00040, 0x800010de, 0xc43c0007, 0x33f80003, 0x97800051, 0xcc80003b, 0x24b00008, + 0xc418000e, 0x1330000a, 0x18a800e5, 0x1d980008, 0x12a80008, 0x7da9800a, 0x29980008, 0xcd800013, + 0xc4353249, 0x1b74003e, 0x9b400002, 0xd840003d, 0x2b304000, 0xcf01326c, 0xc431326c, 0x97000001, + 0x7c434001, 0x1b4c00f8, 0x7c410001, 0x7c414001, 0x50700020, 0x04e81324, 0x18ac0024, 0x7c41c001, + 0x50600020, 0xcc400078, 0x30e40004, 0x9a400007, 0x7d71401a, 0x596401fc, 0x12640009, 0x1b74008d, + 0x7e76400a, 0x2a640000, 0xcec00008, 0x86800000, 0x8000016a, 0x8000016a, 0x8000016a, 0x8000016a, + 0x8000132c, 0x8000133b, 0x80001344, 0x8000016a, 0xc4340004, 0xd8400013, 0xd8400008, 0xc42530b5, + 0x1a68003a, 0x9a80fffe, 0x2024003a, 0xc418000e, 0x25980700, 0x11980014, 0x7d19000a, 0xcd0130b7, + 0xce4130b5, 0xcf400008, 0x80001190, 0xce40001c, 0xd140001e, 0xd8400021, 0xc428000f, 0x9a80ffff, + 0xc4240011, 0x7de6800f, 0x9a80ffea, 0x80001190, 0xce40001c, 0xd140001e, 0xd8400021, 0xc428000f, + 0x9a80ffff, 0xc8240011, 0x7de1c01a, 0x7de6800f, 0x9a80ffe0, 0x80001190, 0x8c00104f, 0x28182002, + 0xc430000d, 0xc4340035, 0xcd800013, 0xc8140023, 0xc4180081, 0x13300005, 0xc4240004, 0x11a00002, + 0x12640004, 0x7d614011, 0xc4100026, 0x05980008, 0x7ca4800a, 0x7d1a0002, 0x7cb0800a, 0x3e280008, + 0x7cb4800a, 0xc4300027, 0x042c0008, 0xd1400025, 0xcf000024, 0x20240030, 0x7ca48001, 0xcc800026, + 0x7c434001, 0x1b4c00f8, 0xcf400026, 0xcc400026, 0x28340000, 0xcf400013, 0x7c414001, 0x507c0020, + 0x30e40004, 0x9a400005, 0x7d7d401a, 0xd1400072, 0xc8140072, 0x557c0020, 0x28342002, 0xcf400013, + 0xcd400026, 0xcfc00026, 0xd4400026, 0xcc000026, 0x9a800005, 0x32280000, 0x9a800002, 0x9a000000, + 0x7c018001, 0xd8400027, 0xd8800013, 0x04380028, 0xcec0003a, 0xcf81a2a4, 0x80001037, 0xd8400029, + 0xc40c005e, 0x94c007eb, 0x7c40c001, 0x50500020, 0x7d0d001a, 0xd1000072, 0xc8100072, 0x591c01fc, + 0x11dc0009, 0x45140210, 0x595801fc, 0x11980009, 0x29dc0000, 0xcdc0001c, 0xd140001e, 0xd8400021, + 0xc418000f, 0x9980ffff, 0xc4200011, 0x1624001f, 0x96400069, 0xc40c000e, 0x28cc0008, 0xccc00013, + 0xce013249, 0x1a307fe8, 0xcf00000a, 0x23304076, 0xd1000001, 0xcf000001, 0xc41d3254, 0xc4253256, + 0x18cc00e8, 0x10cc0015, 0x4514020c, 0xd140001e, 0xd8400021, 0xc418000f, 0x9980ffff, 0xc4200011, + 0xce013248, 0x1a2001e8, 0x12200014, 0x2a204001, 0xce000013, 0x1a64003c, 0x1264001f, 0x11dc0009, + 0x15dc000b, 0x7dcdc00a, 0x7e5dc00a, 0xcdc00100, 0xd8800013, 0xd8400010, 0xd800002a, 0xd8400008, + 0xcf00000d, 0xcf00000a, 0x8c001427, 0x04340022, 0x07740001, 0x04300010, 0xdf430000, 0x7c434001, + 0x7c408001, 0xd4412e01, 0x0434001e, 0xdf430000, 0xd4400078, 0xdf030000, 0xd4412e40, 0xd8400013, + 0xcc41c030, 0xcc41c031, 0x248dfffe, 0xccc12e00, 0xd8800013, 0xcc812e00, 0x7c434001, 0x7c434001, + 0x8c00142b, 0xd8000010, 0xc40c000e, 0x28cc0008, 0xccc00013, 0x45140248, 0xd140001e, 0xd8400021, + 0xc418000f, 0x9980ffff, 0xc8200011, 0xce013257, 0x56200020, 0xce013258, 0x0434000c, 0xdb000024, + 0xd1400025, 0xd8000026, 0xd8000026, 0xd8400027, 0x45540008, 0xd140001e, 0xd8400021, 0xc418000f, + 0x9980ffff, 0xc8200011, 0xce013259, 0x56200020, 0xc0337fff, 0x7f220009, 0xce01325a, 0x55300020, + 0x7d01c001, 0x042c01d0, 0x8c000d61, 0x06ec0004, 0x7f01c001, 0x8c000d61, 0x041c0002, 0x042c01c8, + 0x8c000d61, 0xc4380012, 0x9b80ffff, 0xd800002a, 0x80000aa7, 0xd800002a, 0x7c408001, 0x88000000, + 0xd8400029, 0x7c40c001, 0x50500020, 0x8c001427, 0x7cd0c01a, 0xc4200007, 0xd0c00072, 0xc8240072, + 0xd240001e, 0x7c414001, 0x19682011, 0x5a6c01fc, 0x12ec0009, 0x7eeac00a, 0x2aec0000, 0xcec0001c, + 0xd8400021, 0xc430000f, 0x9b00ffff, 0xc4180011, 0x7c438001, 0x99800007, 0xdf830000, 0xcfa0000c, + 0x8c00142b, 0xd4400078, 0xd800002a, 0x80001b70, 0x8c00142b, 0xd800002a, 0x80001b70, 0xd8000012, + 0xc43c0008, 0x9bc0ffff, 0x90000000, 0xd8400012, 0xc43c0008, 0x97c0ffff, 0x90000000, 0xc4380007, + 0x7c40c001, 0x17b80001, 0x18d40038, 0x7c410001, 0x9b800004, 0xd8400029, 0xc414005e, 0x9540073d, + 0x18c80066, 0x7c414001, 0x30880001, 0x7c418001, 0x94800008, 0x8c00187c, 0xcf400013, 0xc42c0004, + 0xd8400008, 0xcd910000, 0xcec00008, 0x7d410001, 0x043c0000, 0x7c41c001, 0x7c420001, 0x04240001, + 0x06200001, 0x4220000c, 0x0a640001, 0xcc000078, 0x9a40fffe, 0x24e80007, 0x24ec0010, 0xd8400013, + 0x9ac00006, 0xc42c0004, 0xd8400008, 0xc5310000, 0xcec00008, 0x80001465, 0x51540020, 0x7d15001a, + 0xd1000072, 0xc82c0072, 0xd2c0001e, 0x18f02011, 0x5aec01fc, 0x12ec0009, 0x7ef2c00a, 0x2aec0000, + 0xcec0001c, 0xd8400021, 0xc42c000f, 0x9ac0ffff, 0xc4300011, 0x96800012, 0x12a80001, 0x0aa80001, + 0x06a8146a, 0x7f1f0009, 0x86800000, 0x7f1b400f, 0x80001478, 0x7f1b400e, 0x80001478, 0x7f1b400c, + 0x8000147a, 0x7f1b400d, 0x8000147a, 0x7f1b400f, 0x8000147a, 0x7f1b400e, 0x8000147a, 0x7f334002, + 0x97400014, 0x8000147b, 0x9b400012, 0x9b800005, 0x9bc0001f, 0x7e024001, 0x043c0001, 0x8000144a, + 0xc40c0032, 0xc438001d, 0x28cc0008, 0xccc00013, 0xc43d325b, 0x1bb81ff0, 0x7fbfc00a, 0xcfc1325b, + 0xc411325d, 0x251001ef, 0xcd01325d, 0x80001b70, 0x94800007, 0x8c00187c, 0xcf400013, 0xc42c0004, + 0xd8400008, 0xcd910000, 0xcec00008, 0x9b800003, 0xd800002a, 0x80001b70, 0xc40c0032, 0x28cc0008, + 0xccc00013, 0xc40d325b, 0x800012c2, 0xc40c000e, 0xc43c0007, 0xc438001d, 0x28cc0008, 0xccc00013, + 0x13f4000c, 0x9bc00006, 0xc43d3256, 0x1bf0060b, 0x1bfc0077, 0x7ff3c00a, 0x800014a9, 0xc43d325a, + 0x1bfc0677, 0x04300100, 0x1bb81ff0, 0x7f73400a, 0xc0328007, 0x7fb7800a, 0x13fc0017, 0x7ff3c00a, + 0x7ffbc00a, 0xcfc1325b, 0xc03a0002, 0xc4340004, 0xd8400013, 0xd8400008, 0xcf8130b5, 0xcf400008, + 0x80000c16, 0x043c0000, 0xc414000e, 0x29540008, 0xcd400013, 0xc4193246, 0xc41d3245, 0x51980020, + 0x7dd9c01a, 0x45dc0390, 0xc4313267, 0x04183000, 0xcd813267, 0x1b380057, 0x1b340213, 0x1b300199, + 0x7f7b400a, 0x7f73400a, 0xcf400024, 0xd1c00025, 0xcc800026, 0x7c420001, 0xce000026, 0x7c424001, + 0xce400026, 0x7c428001, 0xce800026, 0x7c42c001, 0xcec00026, 0x7c430001, 0xcf000026, 0x7c434001, + 0xcf400026, 0x7c438001, 0xcf800026, 0xd8400027, 0xcd400013, 0x04182000, 0xcd813267, 0xd840004f, + 0x1a0800fd, 0x109c000a, 0xc4193265, 0x7dd9c00a, 0xcdc13265, 0x2620ffff, 0xce080228, 0x9880000e, + 0xce480250, 0xce880258, 0xd8080230, 0xd8080238, 0xd8080240, 0xd8080248, 0xd8080268, 0xd8080270, + 0xd8080278, 0xd8080280, 0xd800004f, 0x97c0ec75, 0x90000000, 0x040c0000, 0x041c0010, 0x26180001, + 0x09dc0001, 0x16200001, 0x95800002, 0x04cc0001, 0x99c0fffb, 0xccc80230, 0xd8080238, 0xd8080240, + 0xd8080248, 0x040c0000, 0xce480250, 0xce880258, 0x52a80020, 0x7e6a401a, 0x041c0020, 0x66580001, + 0x09dc0001, 0x56640001, 0x95800002, 0x04cc0001, 0x99c0fffb, 0xccc80260, 0xd8080268, 0xd8080270, + 0xd8080278, 0xd8080280, 0x040c0000, 0xcec80288, 0xcf080290, 0xcec80298, 0xcf0802a0, 0x040c0000, + 0x041c0010, 0xcf4802a8, 0x27580001, 0x09dc0001, 0x17740001, 0x95800002, 0x04cc0001, 0x99c0fffb, + 0xccc802b0, 0xd80802b8, 0x178c000b, 0x27b8003f, 0x7cf8c001, 0xcf8802c0, 0xccc802c8, 0xcf8802d0, + 0xcf8802d8, 0xd800004f, 0x97c00002, 0x90000000, 0x7c408001, 0x88000000, 0xc40c000e, 0x28cc0008, + 0xccc00013, 0xc43d3265, 0x1bc800ea, 0x7c418001, 0x25b8ffff, 0xc4930240, 0xc48f0238, 0x04cc0001, + 0x24cc000f, 0x7cd2800c, 0x9a80000b, 0xc5230309, 0x2620ffff, 0x7e3a400c, 0x9a400004, 0x05100001, + 0x2510000f, 0x80001539, 0xcd08034b, 0xd4400078, 0x80000168, 0xc48f0230, 0xc4930240, 0x98c00004, + 0xcd880353, 0x8c00163f, 0xc49b0353, 0xc4930238, 0xc48f0228, 0x05100001, 0x2510000f, 0x7cd14005, + 0x25540001, 0x99400004, 0x05100001, 0x2510000f, 0x8000154f, 0xc48f0230, 0x7c41c001, 0xcd080238, + 0xcd08034b, 0x08cc0001, 0x2598ffff, 0x3d200008, 0xccc80230, 0xcd900309, 0xd8100319, 0x04340801, + 0x2198003f, 0xcf400013, 0xcd910ce7, 0xc4190ce6, 0x7d918005, 0x25980001, 0x9580fffd, 0x7d918004, + 0xcd810ce6, 0x9a000003, 0xcdd1054f, 0x8000156e, 0x090c0008, 0xcdcd050e, 0x040c0000, 0x110c0014, + 0x28cc4001, 0xccc00013, 0xcc41230a, 0xcc41230b, 0xcc41230c, 0xcc41230d, 0xcc480329, 0xcc48032a, + 0xcc4802e0, 0xd8000055, 0xc48f02e0, 0x24d8003f, 0x09940001, 0x44100001, 0x9580002c, 0x95400005, + 0x09540001, 0x51100001, 0x69100001, 0x8000157f, 0x24cc003f, 0xc4970290, 0xc49b0288, 0x51540020, + 0x7d59401a, 0xc49b02a0, 0xc49f0298, 0x51980020, 0x7d9d801a, 0x041c0040, 0x04200000, 0x7dcdc002, + 0x7d924019, 0x7d26400c, 0x09dc0001, 0x9a400008, 0x51100001, 0x06200001, 0x99c0fffa, 0xc48f0230, + 0xc4930240, 0x8c00163f, 0x80001579, 0x7d010021, 0x7d914019, 0xc4930238, 0x55580020, 0xcd480298, + 0xcd8802a0, 0x10d40010, 0x12180016, 0xc51f0309, 0x7d95800a, 0x7d62000a, 0x7dd9c00a, 0xd8400013, + 0xcdd00309, 0xce113320, 0xc48f02e0, 0xc49b02b0, 0x18dc01e8, 0x7dd9400e, 0xc48f0230, 0xc4930240, + 0x95c0001d, 0x95400003, 0x8c00163f, 0x800015aa, 0xc48f0238, 0xc4a302b8, 0x12240004, 0x7e5e400a, + 0xc4ab02a8, 0x04100000, 0xce4c0319, 0x7d9d8002, 0x7ea14005, 0x25540001, 0x99400004, 0x06200001, + 0x2620000f, 0x800015bc, 0x09dc0001, 0x04240001, 0x7e624004, 0x06200001, 0x7d25000a, 0x2620000f, + 0x99c0fff4, 0xd8400013, 0xcd0d3330, 0xce0802b8, 0xcd8802b0, 0xc4ab02e0, 0x1aa807f0, 0xc48f02d0, + 0xc49702d8, 0xc49b02c8, 0xc49f02c0, 0x96800028, 0x7d4e000f, 0x9600000b, 0x7d964002, 0x7e6a000f, + 0x96000003, 0x7d694001, 0x800015e9, 0x7cde4002, 0x7e6a000f, 0x96000008, 0x7de94001, 0x800015e9, + 0x7cd64002, 0x7e6a000e, 0x96000003, 0x7d694001, 0x800015e9, 0xc48f0230, 0xc4930240, 0x8c00163f, + 0x800015cd, 0xc4930238, 0x7d698002, 0xcd4802d8, 0x129c0008, 0xc50f0319, 0x11a0000e, 0x11140001, + 0xc4340004, 0xd8400008, 0xd8400013, 0x7e1e000a, 0x1198000a, 0xcd953300, 0x7e0e000a, 0x12a8000a, + 0xce953301, 0xce100319, 0xcf400008, 0xc4b70280, 0xc4b30278, 0x7f73800a, 0x536c0020, 0x7ef2c01a, + 0x9780eb68, 0x8c001608, 0xd8080278, 0xd8080280, 0x7c408001, 0x88000000, 0x043c0003, 0x80001609, + 0x043c0001, 0x30b40000, 0x9b400011, 0xc4b70258, 0xc4b30250, 0x53780020, 0x7fb3801a, 0x7faf8019, + 0x04300020, 0x04280000, 0x67b40001, 0x0b300001, 0x57b80001, 0x97400002, 0x06a80001, 0x9b00fffb, + 0xc4bb0260, 0x7fab8001, 0xcf880260, 0x04300020, 0x04280000, 0x66f40001, 0x0b300001, 0x56ec0001, + 0x97400005, 0x8c001628, 0xc4353247, 0x7f7f4009, 0x9b40fffe, 0x06a80001, 0x9b00fff7, 0x90000000, + 0x269c0007, 0x11dc0008, 0x29dc0008, 0x26a00018, 0x12200003, 0x7de1c00a, 0x26a00060, 0x06200020, + 0x16200001, 0x7de1c00a, 0xcdc00013, 0x90000000, 0x269c0018, 0x26a00007, 0x26a40060, 0x11dc0006, + 0x12200006, 0x16640001, 0x29dc0008, 0x7de1c00a, 0x7de5c00a, 0xcdc00013, 0x90000000, 0xc4b70228, + 0x05100001, 0x04cc0001, 0x2510000f, 0xccc80230, 0x7f514005, 0x25540001, 0x99400004, 0x05100001, + 0x2510000f, 0x80001644, 0xc4b30248, 0xcd080240, 0x7f130005, 0x27300001, 0x9b000002, 0x8c001688, + 0x8c00120d, 0x8c001219, 0x8c001232, 0x04300001, 0x04340801, 0x7f130004, 0xcf400013, 0xcf01051e, + 0xc42d051f, 0x7ed2c005, 0x26ec0001, 0x96c0fffd, 0xcf01051f, 0xd8000055, 0xc5170309, 0x195c07f0, + 0x196007f6, 0x04340000, 0x95c00008, 0x09dc0001, 0x04340001, 0x95c00005, 0x09dc0001, 0x53740001, + 0x6b740001, 0x80001665, 0xc4a702a0, 0xc4ab0298, 0x52640020, 0x7e6a401a, 0x7f634014, 0x7e76401a, + 0xc4300004, 0xd8400008, 0xd8400013, 0x56680020, 0xd8113320, 0xce480298, 0xce8802a0, 0xc5170319, + 0xc4b702b0, 0x255c000f, 0x7f5f4001, 0xd8113330, 0xcf4802b0, 0x11340001, 0x195c07e8, 0x196007ee, + 0xd8353300, 0x7e1e4001, 0xd8353301, 0xce4802d0, 0xd8100309, 0xd8100319, 0xcf000008, 0x90000000, + 0xc4970258, 0xc48f0250, 0x51540020, 0x7cd4c01a, 0xc4af0280, 0xc4b30278, 0x52ec0020, 0x7ef2c01a, + 0x04140020, 0x04280000, 0x64d80001, 0x09540001, 0x54cc0001, 0x95800060, 0x8c001628, 0xc4193247, + 0x25980001, 0x9580005c, 0x7dc24001, 0xc41d3248, 0x25dc000f, 0x7dd2000c, 0x96000057, 0xc41d3255, + 0xc435324f, 0x7df5c00c, 0x99c00004, 0xc4193265, 0x25980040, 0x9580fffe, 0xc439325b, 0x1bb0003f, + 0x97000049, 0x1bb000e8, 0x33380003, 0x9b800046, 0x33300002, 0x9700000a, 0xc4393260, 0x1bb000e4, + 0x33300004, 0x97000040, 0xc431325d, 0x27300010, 0x9b00fffe, 0x800016f1, 0xce400013, 0xc033ffff, + 0x2f3000ff, 0xc439325b, 0x7f3b0009, 0xcf01325b, 0xc439325b, 0x27b800ff, 0x9b80fffe, 0xd8c00033, + 0xc4300009, 0x27300008, 0x9700fffe, 0x1a7003e6, 0x27380003, 0x13b80004, 0x27300003, 0x13300003, + 0x7fb38001, 0x1a7000e8, 0x7fb38001, 0x13300001, 0x7fb38001, 0x07b80002, 0xd8400013, 0x1a700064, + 0x33300002, 0x97000009, 0x17b00005, 0x07300003, 0xcf012082, 0xcc01203f, 0xd8400013, 0xcc01203f, + 0x0b300003, 0x800016df, 0x17b00005, 0xcf012082, 0xcc01203f, 0xd8400013, 0xcc01203f, 0x13300005, + 0x7fb30002, 0xc4392083, 0x7fb38005, 0x27b80001, 0x9b80ffdf, 0xd8c00034, 0xce400013, 0xc431325d, + 0x27300010, 0x9b00fffe, 0xc439325b, 0x27b000ff, 0x9b00ffca, 0xd841325d, 0x2030007b, 0xcf01325b, + 0x800016f2, 0xd841325d, 0x04300001, 0x7f2b0014, 0x7ef2c01a, 0x06a80001, 0x9940ff9c, 0x8c001608, + 0xd8080278, 0xd8080280, 0x90000000, 0xd840004f, 0xc414000e, 0x29540008, 0xcd400013, 0xc43d3265, + 0x1bc800ea, 0xd80802e9, 0x7c40c001, 0x18fc0064, 0x9bc00042, 0xc4193246, 0xc41d3245, 0x51980020, + 0x7dd9801a, 0x45980400, 0xc4313267, 0x043c3000, 0xcfc13267, 0xc43d3267, 0x9bc00001, 0x1b380057, + 0x1b340213, 0x1b300199, 0x7f7b400a, 0x7f73400a, 0xcf400024, 0x14f4001d, 0xc4bf02e9, 0x9bc0001c, + 0x7c410001, 0x192807fa, 0xc4bf0258, 0xc4a70250, 0x53fc0020, 0x7e7e401a, 0x042c0000, 0x04300000, + 0x667c0001, 0x56640001, 0x06ec0001, 0x97c0fffd, 0x07300001, 0x0aec0001, 0x7eebc00c, 0x06ec0001, + 0x97c0fff8, 0x0b300001, 0x43300007, 0x53300002, 0x7db30011, 0xd3000025, 0xc03ec005, 0x2bfca200, + 0xcfc00026, 0xccc00026, 0xcd000026, 0x192807fa, 0xc01f007f, 0x7d1d0009, 0x2110007d, 0x8c001628, + 0x203c003f, 0xcfc13256, 0x8c0017f5, 0xcd013254, 0x18fc01e8, 0xcfc13248, 0x8c00185b, 0xd8413247, + 0x0b740001, 0x9b40ffd5, 0xd800004f, 0xc4bf02e9, 0x97c0ea24, 0x90000000, 0x14d4001d, 0xc4930260, + 0x7d52400e, 0xc49f0258, 0xc4a30250, 0x51dc0020, 0x7de1801a, 0x96400017, 0x7d534002, 0xc4af0270, + 0x7dae4005, 0x26640001, 0x32e0001f, 0x9a400006, 0x06ec0001, 0x96000002, 0x042c0000, 0xcec80270, + 0x8000174f, 0x0b740001, 0x8c00178a, 0x05100001, 0x9b40fff3, 0xc4af0280, 0xc4b30278, 0x52ec0020, + 0x7ef2c01a, 0x8c001608, 0xd8080278, 0xd8080280, 0xc4ab0268, 0x7daa4005, 0x26640001, 0x32a0001f, + 0x9a400005, 0x06a80001, 0x96000002, 0x24280000, 0x80001765, 0x7c410001, 0xc01f007f, 0x09540001, + 0x7d1d0009, 0x2110007d, 0x8c001628, 0xd8013256, 0x8c0017f2, 0xcd013254, 0xc4113248, 0x15100004, + 0x11100004, 0xc4b3034b, 0x7f13000a, 0xcf013248, 0xc4930260, 0x8c001855, 0x32a4001f, 0xd8413247, + 0xd800004f, 0x09100001, 0x06a80001, 0x96400002, 0x24280000, 0xcd080260, 0xce880268, 0x9940ffc0, + 0x7c408001, 0x88000000, 0x7ec28001, 0x8c001628, 0x32e0001f, 0xc4253247, 0x26640001, 0x9640005e, + 0xc4293265, 0xc4253255, 0xc431324f, 0x7e72400c, 0x26a80040, 0x9a400002, 0x9680fff7, 0xc429325b, + 0x1aa4003f, 0x96400049, 0x1aa400e8, 0x32680003, 0x9a800046, 0x32640002, 0x9640000a, 0xc4293260, + 0x1aa400e4, 0x32640004, 0x96400040, 0xc425325d, 0x26640010, 0x9a40fffe, 0x800017e2, 0xcdc00013, + 0xc027ffff, 0x2e6400ff, 0xc429325b, 0x7e6a4009, 0xce41325b, 0xc429325b, 0x26a800ff, 0x9a80fffe, + 0xd8c00033, 0xc4240009, 0x26640008, 0x9640fffe, 0x19e403e6, 0x26680003, 0x12a80004, 0x26640003, + 0x12640003, 0x7ea68001, 0x19e400e8, 0x7ea68001, 0x12640001, 0x7ea68001, 0x06a80002, 0xd8400013, + 0x19e40064, 0x32640002, 0x96400009, 0x16a40005, 0x06640003, 0xce412082, 0xcc01203f, 0xd8400013, + 0xcc01203f, 0x0a640003, 0x800017d0, 0x16a40005, 0xce412082, 0xcc01203f, 0xd8400013, 0xcc01203f, + 0x12640005, 0x7ea64002, 0xc4292083, 0x7ea68005, 0x26a80001, 0x9a80ffdf, 0xd8c00034, 0xcdc00013, + 0xc425325d, 0x26640010, 0x9a40fffe, 0xc429325b, 0x26a400ff, 0x9a40ffca, 0xd841325d, 0x2024007b, + 0xce41325b, 0x800017e3, 0xd841325d, 0xc4a70280, 0xc4ab0278, 0x52640020, 0x7e6a401a, 0x04280001, + 0x7eae8014, 0x7e6a401a, 0x56680020, 0xce480278, 0xce880280, 0x06ec0001, 0x96000002, 0x042c0000, + 0xcec80270, 0x90000000, 0x7c438001, 0x7c420001, 0x800017fe, 0xc4bf02e9, 0x9bc00006, 0x7c438001, + 0x7c420001, 0xcf800026, 0xce000026, 0x800017fe, 0xc43b02eb, 0xc42302ec, 0xcf813245, 0xce013246, + 0x52200020, 0x7fa3801a, 0x47b8020c, 0x15e00008, 0x1220000a, 0x2a206032, 0x513c001e, 0x7e3e001a, + 0xc4bf02e9, 0x9bc00005, 0xc43c000e, 0x2bfc0008, 0xcfc00013, 0x8000180f, 0xcd400013, 0xc4313267, + 0x1b3c0077, 0x1b300199, 0x7ff3000a, 0x1330000a, 0x2b300032, 0x043c3000, 0xcfc13267, 0xc43d3267, + 0xd200000b, 0xc4200007, 0xd3800002, 0xcf000002, 0xd8000040, 0x96000002, 0xd8400040, 0xd8400018, + 0x043c2000, 0xcfc13267, 0xd8000018, 0xd8800010, 0xcdc00013, 0x7dc30001, 0xdc1e0000, 0x04380032, + 0xcf80000e, 0x8c001427, 0xcc413248, 0xc43d3269, 0x27fc000f, 0x33fc0003, 0x97c00011, 0x043c001f, + 0xdfc30000, 0xd4413249, 0x7c43c001, 0x7c43c001, 0x043c0024, 0x0bfc0021, 0xdfc30000, 0xd441326a, + 0x173c0008, 0x1b300303, 0x7f3f0001, 0x043c0001, 0x7ff3c004, 0xcfc13084, 0x80001842, 0x043c0024, + 0xdfc30000, 0xd4413249, 0x7c43c001, 0x23fc003f, 0xcfc1326d, 0x0bb80026, 0xdf830000, 0xd441326e, + 0x7c438001, 0x7c438001, 0xc4393265, 0x1fb8ffc6, 0xddc30000, 0xcf813265, 0x9a000003, 0xcdc0000c, + 0x80001852, 0xcdc0000d, 0xce000010, 0x8c00142b, 0x90000000, 0x7c41c001, 0x7c420001, 0xcdc13252, + 0xce013253, 0x8c001628, 0x80001878, 0xc49f02e9, 0x99c00018, 0x7c41c001, 0x7c420001, 0xcdc13252, + 0xce013253, 0xc43c000e, 0x2bfc0008, 0xcfc00013, 0x043c3000, 0xcfc13267, 0xc43d3267, 0x97c0ffff, + 0xcdc00026, 0xce000026, 0xd8400027, 0xc41c0012, 0x99c0ffff, 0xc43c000e, 0x2bfc0008, 0xcfc00013, + 0x043c2000, 0xcfc13267, 0x8c001628, 0x80001878, 0xc41f02ed, 0xc42302ee, 0xcdc13252, 0xce013253, + 0x04200001, 0x7e2a0004, 0xce013084, 0x90000000, 0x28340001, 0x313c0bcc, 0x9bc00010, 0x393c051f, + 0x9bc00004, 0x3d3c050e, 0x9bc0000c, 0x97c0000c, 0x393c0560, 0x9bc00004, 0x3d3c054f, 0x9bc00007, + 0x97c00007, 0x393c1538, 0x9bc00005, 0x3d3c1537, 0x9bc00002, 0x97c00002, 0x2b740800, 0x90000000, + 0xc40c000e, 0x28cc0008, 0xccc00013, 0xc43d3265, 0x1bc800ea, 0x7c40c001, 0x18e8007c, 0x7c42c001, + 0x06a8189a, 0x86800000, 0x8000189e, 0x800018c5, 0x800018f2, 0x8000016a, 0x7c414001, 0x18d0007e, + 0x50580020, 0x09200001, 0x7d59401a, 0xd1400072, 0xc8140072, 0x09240002, 0x7c418001, 0x7c41c001, + 0x99000011, 0xc4340004, 0xd8400013, 0xd8400008, 0xc42130b5, 0x1a24002c, 0x9a40fffe, 0x2020002c, + 0xc418000d, 0x1198001c, 0x10cc0004, 0x14cc0004, 0x7cd8c00a, 0xccc130b7, 0xce0130b5, 0xcf400008, + 0x80000168, 0xd1400025, 0x5978073a, 0x2bb80002, 0xcf800024, 0xcd800026, 0xcdc00026, 0xd8400027, + 0x9600e8a8, 0xc4300012, 0x9b00ffff, 0x9640e8a5, 0x800018a9, 0x04140000, 0xc55b0309, 0x3d5c0010, + 0x05540001, 0x2598ffff, 0x09780001, 0x7dad800c, 0x99c0ffd2, 0x9580fff9, 0xc4970258, 0xc4930250, + 0x51540020, 0x7d15001a, 0x04140020, 0x04280000, 0x442c0000, 0x65180001, 0x09540001, 0x55100001, + 0x9580000b, 0x8c001628, 0xc41d3248, 0x04300001, 0x7f2b0014, 0x25dc000f, 0x7df9c00c, 0x95c00004, + 0x7ef2c01a, 0xd8c13260, 0xd901325d, 0x06a80001, 0x9940fff1, 0x04140020, 0x04280000, 0x66d80001, + 0x09540001, 0x56ec0001, 0x95800005, 0x8c001628, 0xc421325d, 0x26240007, 0x9a40fffe, 0x06a80001, + 0x9940fff7, 0x8000189e, 0x04140020, 0x04280000, 0x09540001, 0x8c001628, 0xc41d3254, 0xc023007f, + 0x19e4003e, 0x7de1c009, 0x7dee000c, 0x96400008, 0x96000007, 0xd8c13260, 0xd901325d, 0xc421325d, + 0x261c0007, 0x99c0fffe, 0x8000189e, 0x06a80001, 0x9940fff0, 0x8000189e, 0xc40c000e, 0x28cc0008, + 0xccc00013, 0xc43d3265, 0x1bc800ea, 0x7c40c001, 0x18e00064, 0x06281911, 0x14f4001d, 0x24cc0003, + 0x86800000, 0x80001915, 0x800019af, 0x80001a2b, 0x8000016a, 0xcc48032b, 0xcc480333, 0xcc48033b, + 0xcc480343, 0x98800011, 0xc4213246, 0xc4253245, 0x52200020, 0x7e26401a, 0x46640400, 0xc4313267, + 0x04203000, 0xce013267, 0xc4213267, 0x9a000001, 0x1b3c0057, 0x1b200213, 0x1b300199, 0x7e3e000a, + 0x7e32000a, 0xce000024, 0xc4970258, 0xc4930250, 0x51540020, 0x7d15001a, 0xc4af0280, 0xc4b30278, + 0x52ec0020, 0x7ef2c01a, 0x04180000, 0x04140020, 0x04280000, 0x7f438001, 0x8c001628, 0xc41d3247, + 0x25dc0001, 0x95c00068, 0xc4213254, 0x1a1c003e, 0x95c00065, 0xc01f007f, 0x7e1e0009, 0x97800062, + 0x0bb80001, 0x43bc0008, 0x7fcbc001, 0xc7df032b, 0x7e1fc00c, 0x97c0fffa, 0x043c0101, 0x94c00002, + 0x043c0102, 0xc439325b, 0x1bb0003f, 0x97000049, 0x1bb000e8, 0x33380003, 0x9b800046, 0x33300002, + 0x97000009, 0xc4393260, 0x1bb000e4, 0x33300004, 0x97000040, 0xc431325d, 0x27300010, 0x9b00fffe, + 0x80001994, 0x8c001628, 0xc033ffff, 0x2f3000ff, 0xc439325b, 0x7f3b0009, 0xcf01325b, 0xc439325b, + 0x27b800ff, 0x9b80fffe, 0xd8c00033, 0xc4300009, 0x27300008, 0x9700fffe, 0x19f003e6, 0x27380003, + 0x13b80004, 0x27300003, 0x13300003, 0x7fb38001, 0x19f000e8, 0x7fb38001, 0x13300001, 0x7fb38001, + 0x07b80002, 0xd8400013, 0x19f00064, 0x33300002, 0x97000009, 0x17b00005, 0x07300003, 0xcf012082, + 0xcc01203f, 0xd8400013, 0xcc01203f, 0x0b300003, 0x80001982, 0x17b00005, 0xcf012082, 0xcc01203f, + 0xd8400013, 0xcc01203f, 0x13300005, 0x7fb30002, 0xc4392083, 0x7fb38005, 0x27b80001, 0x9b80ffdf, + 0xd8c00034, 0xcdc00013, 0xc431325d, 0x27300010, 0x9b00fffe, 0xc439325b, 0x27b000ff, 0x9b00ffcb, + 0xcfc1325d, 0x2030007b, 0xcf01325b, 0x80001995, 0xcfc1325d, 0x04300001, 0x7f2b0014, 0x7ef2c01a, + 0x98800009, 0x41bc0007, 0x53fc0002, 0x7e7fc011, 0xd3c00025, 0xd8000026, 0xd8400027, 0xc43c0012, + 0x9bc0ffff, 0x653c0001, 0x7dbd8001, 0x06a80001, 0x09540001, 0x55100001, 0x9940ff8f, 0xc43c000e, + 0x2bfc0008, 0xcfc00013, 0x043c2000, 0xcfc13267, 0xd8080278, 0xd8080280, 0x80000168, 0x7c410001, + 0x04140000, 0xc55b0309, 0x3d5c0010, 0x2598ffff, 0x05540001, 0x7d91800c, 0x95c00003, 0xd4400078, + 0x80000168, 0x9580fff8, 0x09780001, 0xc4970258, 0xc4930250, 0x51540020, 0x7d15001a, 0xc4af0280, + 0xc4b30278, 0x52ec0020, 0x7ef2c01a, 0x04140020, 0x04280000, 0x65180001, 0x09540001, 0x55100001, + 0x9580005d, 0x8c001628, 0xc4253247, 0x26640001, 0x04200101, 0x96400058, 0x7dc24001, 0xc41d3248, + 0x25dc000f, 0x7df9c00c, 0x95c00053, 0x94c00002, 0x04200102, 0x7e41c001, 0xc425325b, 0x1a70003f, + 0x97000049, 0x1a7000e8, 0x33240003, 0x9a400046, 0x33300002, 0x9700000a, 0xc4253260, 0x1a7000e4, + 0x33300004, 0x97000040, 0xc431325d, 0x27300010, 0x9b00fffe, 0x80001a21, 0xcdc00013, 0xc033ffff, + 0x2f3000ff, 0xc425325b, 0x7f270009, 0xcf01325b, 0xc425325b, 0x266400ff, 0x9a40fffe, 0xd8c00033, + 0xc4300009, 0x27300008, 0x9700fffe, 0x19f003e6, 0x27240003, 0x12640004, 0x27300003, 0x13300003, + 0x7e724001, 0x19f000e8, 0x7e724001, 0x13300001, 0x7e724001, 0x06640002, 0xd8400013, 0x19f00064, + 0x33300002, 0x97000009, 0x16700005, 0x07300003, 0xcf012082, 0xcc01203f, 0xd8400013, 0xcc01203f, + 0x0b300003, 0x80001a0f, 0x16700005, 0xcf012082, 0xcc01203f, 0xd8400013, 0xcc01203f, 0x13300005, + 0x7e730002, 0xc4252083, 0x7e724005, 0x26640001, 0x9a40ffdf, 0xd8c00034, 0xcdc00013, 0xc431325d, + 0x27300010, 0x9b00fffe, 0xc425325b, 0x267000ff, 0x9b00ffca, 0xce01325d, 0x2030007b, 0xcf01325b, + 0x80001a22, 0xce01325d, 0x04300001, 0x7f2b0014, 0x7ef2c01a, 0x06a80001, 0x9940ff9f, 0xd4400078, + 0xd8080278, 0xd8080280, 0x80000168, 0x8c001a31, 0xd4400078, 0xd8080278, 0xd8080280, 0x7c408001, + 0x88000000, 0xc4213246, 0xc4253245, 0x52200020, 0x7e26401a, 0x46640400, 0xc4313267, 0x04203000, + 0xce013267, 0xc4213267, 0x9a000001, 0x1b180057, 0x1b200213, 0x1b300199, 0x7e1a000a, 0x7e32000a, + 0xce000024, 0xc4970258, 0xc4930250, 0x51540020, 0x7d15001a, 0xc4af0280, 0xc4b30278, 0x52ec0020, + 0x7ef2c01a, 0x04140020, 0x04280000, 0x65180001, 0x95800060, 0x8c001628, 0xc4193247, 0x25980001, + 0x04200101, 0x94c00005, 0x30f00005, 0x04200005, 0x9b000002, 0x04200102, 0x95800056, 0xc439325b, + 0x1bb0003f, 0x97000049, 0x1bb000e8, 0x33380003, 0x9b800046, 0x33300002, 0x9700000a, 0xc4393260, + 0x1bb000e4, 0x33300004, 0x97000040, 0xc431325d, 0x27300010, 0x9b00fffe, 0x80001aa2, 0xcdc00013, + 0xc033ffff, 0x2f3000ff, 0xc439325b, 0x7f3b0009, 0xcf01325b, 0xc439325b, 0x27b800ff, 0x9b80fffe, + 0xd8c00033, 0xc4300009, 0x27300008, 0x9700fffe, 0x19f003e6, 0x27380003, 0x13b80004, 0x27300003, + 0x13300003, 0x7fb38001, 0x19f000e8, 0x7fb38001, 0x13300001, 0x7fb38001, 0x07b80002, 0xd8400013, + 0x19f00064, 0x33300002, 0x97000009, 0x17b00005, 0x07300003, 0xcf012082, 0xcc01203f, 0xd8400013, + 0xcc01203f, 0x0b300003, 0x80001a90, 0x17b00005, 0xcf012082, 0xcc01203f, 0xd8400013, 0xcc01203f, + 0x13300005, 0x7fb30002, 0xc4392083, 0x7fb38005, 0x27b80001, 0x9b80ffdf, 0xd8c00034, 0xcdc00013, + 0xc431325d, 0x27300010, 0x9b00fffe, 0xc439325b, 0x27b000ff, 0x9b00ffca, 0xce01325d, 0x2030007b, + 0xcf00325b, 0x80001aa3, 0xce01325d, 0x04300001, 0x7f2b0014, 0x7ef2c01a, 0xc49b02e9, 0x99800005, + 0xd2400025, 0x4664001c, 0xd8000026, 0xd8400027, 0x06a80001, 0x09540001, 0x55100001, 0x9940ff9c, + 0xc49b02e9, 0x99800008, 0xc430000e, 0x2b300008, 0xcf000013, 0x04302000, 0xcf013267, 0xc4313267, + 0x97000001, 0x90000000, 0x244c00ff, 0xcc4c0200, 0x7c408001, 0x88000000, 0xc44f0200, 0xc410000b, + 0xc414000c, 0x7d158010, 0x059cc000, 0xd8400013, 0xccdd0000, 0x7c408001, 0x88000000, 0xc40c0037, + 0x94c0ffff, 0xcc000049, 0xc40c003a, 0x94c0ffff, 0x7c40c001, 0x24d00001, 0x9500e69a, 0x18d0003b, + 0x18d40021, 0x99400006, 0xd840004a, 0xc40c003c, 0x94c0ffff, 0x14cc0001, 0x94c00028, 0xd8000033, + 0xc438000b, 0xc43c0009, 0x27fc0001, 0x97c0fffe, 0xd8400013, 0xd841c07f, 0xc43dc07f, 0x1bfc0078, + 0x7ffbc00c, 0x97c0fffd, 0x99000004, 0xc0120840, 0x282c0040, 0x80001ae8, 0xc0121841, 0x282c001a, + 0xcd01c07c, 0xcc01c07d, 0xcc01c08c, 0xcc01c079, 0xcc01c07e, 0x04200004, 0xcec0001b, 0xd8400021, + 0x0a200001, 0x9a00ffff, 0xc425c07f, 0x166c001f, 0x04200004, 0x9ac0fffb, 0xc434000f, 0x9b40ffff, + 0xd801c07f, 0xd8400013, 0xc425c07f, 0xce400078, 0xd8000034, 0x9940e66b, 0xd800004a, 0x7c408001, + 0x88000000, 0xc40c0036, 0x24d00001, 0x9900fffe, 0x18cc0021, 0xccc00047, 0xcc000046, 0xc40c0039, + 0x94c0ffff, 0xc40c003d, 0x98c0ffff, 0x7c40c001, 0x24d003ff, 0x18d47fea, 0x18d87ff4, 0xcd00004c, + 0xcd40004e, 0xcd80004d, 0xd8400013, 0xcd41c405, 0xc02a0001, 0x2aa80001, 0xce800013, 0xcd01c406, + 0xcc01c406, 0xcc01c406, 0xc40c0006, 0x98c0ffff, 0xc414000e, 0x29540008, 0x295c0001, 0xcd400013, + 0xd8c1325e, 0xcdc0001a, 0x11980002, 0x4110000c, 0xc0160800, 0x7d15000a, 0xc0164010, 0xd8400013, + 0xcd41c078, 0xcc01c080, 0xcc01c081, 0xcd81c082, 0xcc01c083, 0xcd01c084, 0xc40c0006, 0x98c0ffff, + 0xd8400048, 0xc40c003b, 0x94c0ffff, 0x80000c16, 0xd8400013, 0xd801c40a, 0xd901c40d, 0xd801c410, + 0xd801c40e, 0xd801c40f, 0xc40c0040, 0x04140001, 0x09540001, 0x9940ffff, 0x04140096, 0xd8400013, + 0xccc1c400, 0xc411c401, 0x9500fffa, 0xc424003e, 0x04d00001, 0x11100002, 0xcd01c40c, 0xc0180034, + 0xcd81c411, 0xd841c414, 0x0a540001, 0xcd41c412, 0x2468000f, 0xc419c416, 0x41980003, 0xc41c003f, + 0x7dda0001, 0x12200002, 0x10cc0002, 0xccc1c40c, 0xd901c411, 0xce41c412, 0xd8800013, 0xce292e40, + 0xcc412e01, 0xcc412e02, 0xcc412e03, 0xcc412e00, 0x80000aa7, 0xc43c0007, 0xdc120000, 0x31144000, + 0x95400005, 0xdc030000, 0xd800002a, 0xcc3c000c, 0x80001b70, 0x33f80003, 0xd4400078, 0x9780e601, + 0x188cfff0, 0x04e40002, 0x80001190, 0x7c408001, 0x88000000, 0xc424005e, 0x96400006, 0x90000000, + 0xc424005e, 0x96400003, 0x7c408001, 0x88000000, 0x80001b74, 0x80000168, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, + 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, + 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, + 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, + 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, + 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, + 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, + 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, + 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, + 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, + 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, + 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, + 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, + 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, + 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, + 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, + 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, + 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, + 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, + 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, + 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, + 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, + 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, + 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, + 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, + 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, + 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, + 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, + 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, + 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, + 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, + 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, + 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, + 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, + 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, + 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, + 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, + 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, + 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, + 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, + 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, + 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, + 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, + 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, + 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, + 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, + 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, + 0x92100004, 0x92110501, 0x92120206, 0x92130703, 0x92100400, 0x92110105, 0x92120602, 0x92130307, + 0xbf810000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + }, + .dfy_size = 7440 +}; + +static const PWR_DFY_Section pwr_virus_section4 = { + .dfy_cntl = 0x80000004, + .dfy_addr_hi = 0x000000b4, + .dfy_addr_lo = 0x54106500, + .dfy_data = { + 0x7e000200, 0x7e020204, 0xc00a0505, 0x00000000, 0xbf8c007f, 0xb8900904, 0xb8911a04, 0xb8920304, + 0xb8930b44, 0x921c0d0c, 0x921c1c13, 0x921d0c12, 0x811c1d1c, 0x811c111c, 0x921cff1c, 0x00000400, + 0x921dff10, 0x00000100, 0x81181d1c, 0x7e040218, 0xe0701000, 0x80050002, 0xe0501000, 0x80050302, + 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, + 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, + 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, + 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0701000, 0x80050102, + 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, + 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, + 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, + 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, + 0xe0701000, 0x80050002, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, + 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, + 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, + 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, + 0xe0501000, 0x80050302, 0xe0701000, 0x80050102, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, + 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, + 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, + 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, + 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0701000, 0x80050002, 0xe0501000, 0x80050302, + 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, + 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, + 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, + 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0701000, 0x80050102, + 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, + 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, + 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, + 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, 0xe0501000, 0x80050302, + 0xbf810000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + }, + .dfy_size = 240 +}; + +static const PWR_DFY_Section pwr_virus_section5 = { + .dfy_cntl = 0x80000004, + .dfy_addr_hi = 0x000000b4, + .dfy_addr_lo = 0x54106900, + .dfy_data = { + 0x7e080200, 0x7e100204, 0xbefc00ff, 0x00010000, 0x24200087, 0x262200ff, 0x000001f0, 0x20222282, + 0x28182111, 0xd81a0000, 0x0000040c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, + 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, + 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000, + 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, + 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000, + 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, + 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, + 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, + 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, + 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, + 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, + 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000, + 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, + 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000, + 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, + 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, + 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, + 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, + 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, + 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, + 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000, + 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, + 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000, + 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, + 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, + 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, + 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, + 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, + 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, + 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000, + 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, + 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000, + 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, + 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, + 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, + 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, + 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, + 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, + 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000, + 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, + 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, 0x0000040c, 0xd86c0000, + 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, + 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd81a0000, + 0x0000080c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, + 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, 0x1100000c, 0xd86c0000, + 0x1100000c, 0xbf810000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + }, + .dfy_size = 384 +}; + +static const PWR_DFY_Section pwr_virus_section6 = { + .dfy_cntl = 0x80000004, + .dfy_addr_hi = 0x000000b4, + .dfy_addr_lo = 0x54116f00, + .dfy_data = { + 0xc0310800, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000040, 0x00000001, 0x00000001, 0x00000001, 0x00000000, 0xb4540fe8, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000041, 0x0000000c, 0x00000000, 0x07808000, 0xffffffff, + 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000002, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0x55555555, 0x55555555, 0x55555555, + 0x55555555, 0x00000000, 0x00000000, 0x540fee40, 0x000000b4, 0x00000010, 0x00000001, 0x00000004, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x54116f00, 0x000000b4, 0x00000000, 0x00000000, 0x00005301, 0x00000000, 0x00000000, 0x00000000, + 0xb4540fef, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x540fee20, 0x000000b4, 0x00000000, + 0x00000000, 0x08000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xc0310800, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000040, 0x00000001, 0x00000001, 0x00000001, 0x00000000, 0xb454105e, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x000000c0, 0x00000010, 0x00000000, 0x07808000, 0xffffffff, + 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000002, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0x55555555, 0x55555555, 0x55555555, + 0x55555555, 0x00000000, 0x00000000, 0x540fee40, 0x000000b4, 0x00000010, 0x00000001, 0x00000004, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x54117300, 0x000000b4, 0x00000000, 0x00000000, 0x00005301, 0x00000000, 0x00000000, 0x00000000, + 0xb4540fef, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x540fee20, 0x000000b4, 0x00000000, + 0x00000000, 0x08000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xc0310800, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000040, 0x00000001, 0x00000001, 0x00000001, 0x00000000, 0xb4541065, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000500, 0x0000001c, 0x00000000, 0x07808000, 0xffffffff, + 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000002, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0x55555555, 0x55555555, 0x55555555, + 0x55555555, 0x00000000, 0x00000000, 0x540fee40, 0x000000b4, 0x00000010, 0x00000001, 0x00000004, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x54117700, 0x000000b4, 0x00000000, 0x00000000, 0x00005301, 0x00000000, 0x00000000, 0x00000000, + 0xb4540fef, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x540fee20, 0x000000b4, 0x00000000, + 0x00000000, 0x08000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0xc0310800, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000040, 0x00000001, 0x00000001, 0x00000001, 0x00000000, 0xb4541069, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000444, 0x0000008a, 0x00000000, 0x07808000, 0xffffffff, + 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000002, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0xaaaaaaaa, 0x55555555, 0x55555555, 0x55555555, + 0x55555555, 0x00000000, 0x00000000, 0x540fee40, 0x000000b4, 0x00000010, 0x00000001, 0x00000004, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x54117b00, 0x000000b4, 0x00000000, 0x00000000, 0x00005301, 0x00000000, 0x00000000, 0x00000000, + 0xb4540fef, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x540fee20, 0x000000b4, 0x00000000, + 0x00000000, 0x08000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, + }, + .dfy_size = 1024 +}; + +static const PWR_Command_Table pwr_virus_table_post[] = { + { 0x00000000, mmCP_MEC_CNTL }, + { 0x00000000, mmCP_MEC_CNTL }, + { 0x00000004, mmSRBM_GFX_CNTL }, + { 0x54116f00, mmCP_MQD_BASE_ADDR }, + { 0x000000b4, mmCP_MQD_BASE_ADDR_HI }, + { 0xb4540fef, mmCP_HQD_PQ_BASE }, + { 0x00000000, mmCP_HQD_PQ_BASE_HI }, + { 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR }, + { 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI }, + { 0x00005301, mmCP_HQD_PERSISTENT_STATE }, + { 0x00010000, mmCP_HQD_VMID }, + { 0xc8318509, mmCP_HQD_PQ_CONTROL }, + { 0x00000005, mmSRBM_GFX_CNTL }, + { 0x54117300, mmCP_MQD_BASE_ADDR }, + { 0x000000b4, mmCP_MQD_BASE_ADDR_HI }, + { 0xb4540fef, mmCP_HQD_PQ_BASE }, + { 0x00000000, mmCP_HQD_PQ_BASE_HI }, + { 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR }, + { 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI }, + { 0x00005301, mmCP_HQD_PERSISTENT_STATE }, + { 0x00010000, mmCP_HQD_VMID }, + { 0xc8318509, mmCP_HQD_PQ_CONTROL }, + { 0x00000006, mmSRBM_GFX_CNTL }, + { 0x54117700, mmCP_MQD_BASE_ADDR }, + { 0x000000b4, mmCP_MQD_BASE_ADDR_HI }, + { 0xb4540fef, mmCP_HQD_PQ_BASE }, + { 0x00000000, mmCP_HQD_PQ_BASE_HI }, + { 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR }, + { 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI }, + { 0x00005301, mmCP_HQD_PERSISTENT_STATE }, + { 0x00010000, mmCP_HQD_VMID }, + { 0xc8318509, mmCP_HQD_PQ_CONTROL }, + { 0x00000007, mmSRBM_GFX_CNTL }, + { 0x54117b00, mmCP_MQD_BASE_ADDR }, + { 0x000000b4, mmCP_MQD_BASE_ADDR_HI }, + { 0xb4540fef, mmCP_HQD_PQ_BASE }, + { 0x00000000, mmCP_HQD_PQ_BASE_HI }, + { 0x540fee20, mmCP_HQD_PQ_WPTR_POLL_ADDR }, + { 0x000000b4, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI }, + { 0x00005301, mmCP_HQD_PERSISTENT_STATE }, + { 0x00010000, mmCP_HQD_VMID }, + { 0xc8318509, mmCP_HQD_PQ_CONTROL }, + { 0x00000004, mmSRBM_GFX_CNTL }, + { 0x00000000, mmCP_HQD_ACTIVE }, + { 0x00000000, mmCP_HQD_PQ_RPTR }, + { 0x00000000, mmCP_HQD_PQ_WPTR }, + { 0x00000001, mmCP_HQD_ACTIVE }, + { 0x00000104, mmSRBM_GFX_CNTL }, + { 0x00000000, mmCP_HQD_ACTIVE }, + { 0x00000000, mmCP_HQD_PQ_RPTR }, + { 0x00000000, mmCP_HQD_PQ_WPTR }, + { 0x00000001, mmCP_HQD_ACTIVE }, + { 0x00000204, mmSRBM_GFX_CNTL }, + { 0x00000000, mmCP_HQD_ACTIVE }, + { 0x00000000, mmCP_HQD_PQ_RPTR }, + { 0x00000000, mmCP_HQD_PQ_WPTR }, + { 0x00000001, mmCP_HQD_ACTIVE }, + { 0x00000304, mmSRBM_GFX_CNTL }, + { 0x00000000, mmCP_HQD_ACTIVE }, + { 0x00000000, mmCP_HQD_PQ_RPTR }, + { 0x00000000, mmCP_HQD_PQ_WPTR }, + { 0x00000001, mmCP_HQD_ACTIVE }, + { 0x00000404, mmSRBM_GFX_CNTL }, + { 0x00000000, mmCP_HQD_ACTIVE }, + { 0x00000000, mmCP_HQD_PQ_RPTR }, + { 0x00000000, mmCP_HQD_PQ_WPTR }, + { 0x00000001, mmCP_HQD_ACTIVE }, + { 0x00000504, mmSRBM_GFX_CNTL }, + { 0x00000000, mmCP_HQD_ACTIVE }, + { 0x00000000, mmCP_HQD_PQ_RPTR }, + { 0x00000000, mmCP_HQD_PQ_WPTR }, + { 0x00000001, mmCP_HQD_ACTIVE }, + { 0x00000604, mmSRBM_GFX_CNTL }, + { 0x00000000, mmCP_HQD_ACTIVE }, + { 0x00000000, mmCP_HQD_PQ_RPTR }, + { 0x00000000, mmCP_HQD_PQ_WPTR }, + { 0x00000001, mmCP_HQD_ACTIVE }, + { 0x00000704, mmSRBM_GFX_CNTL }, + { 0x00000000, mmCP_HQD_ACTIVE }, + { 0x00000000, mmCP_HQD_PQ_RPTR }, + { 0x00000000, mmCP_HQD_PQ_WPTR }, + { 0x00000001, mmCP_HQD_ACTIVE }, + { 0x00000005, mmSRBM_GFX_CNTL }, + { 0x00000000, mmCP_HQD_ACTIVE }, + { 0x00000000, mmCP_HQD_PQ_RPTR }, + { 0x00000000, mmCP_HQD_PQ_WPTR }, + { 0x00000001, mmCP_HQD_ACTIVE }, + { 0x00000105, mmSRBM_GFX_CNTL }, + { 0x00000000, mmCP_HQD_ACTIVE }, + { 0x00000000, mmCP_HQD_PQ_RPTR }, + { 0x00000000, mmCP_HQD_PQ_WPTR }, + { 0x00000001, mmCP_HQD_ACTIVE }, + { 0x00000205, mmSRBM_GFX_CNTL }, + { 0x00000000, mmCP_HQD_ACTIVE }, + { 0x00000000, mmCP_HQD_PQ_RPTR }, + { 0x00000000, mmCP_HQD_PQ_WPTR }, + { 0x00000001, mmCP_HQD_ACTIVE }, + { 0x00000305, mmSRBM_GFX_CNTL }, + { 0x00000000, mmCP_HQD_ACTIVE }, + { 0x00000000, mmCP_HQD_PQ_RPTR }, + { 0x00000000, mmCP_HQD_PQ_WPTR }, + { 0x00000001, mmCP_HQD_ACTIVE }, + { 0x00000405, mmSRBM_GFX_CNTL }, + { 0x00000000, mmCP_HQD_ACTIVE }, + { 0x00000000, mmCP_HQD_PQ_RPTR }, + { 0x00000000, mmCP_HQD_PQ_WPTR }, + { 0x00000001, mmCP_HQD_ACTIVE }, + { 0x00000505, mmSRBM_GFX_CNTL }, + { 0x00000000, mmCP_HQD_ACTIVE }, + { 0x00000000, mmCP_HQD_PQ_RPTR }, + { 0x00000000, mmCP_HQD_PQ_WPTR }, + { 0x00000001, mmCP_HQD_ACTIVE }, + { 0x00000605, mmSRBM_GFX_CNTL }, + { 0x00000000, mmCP_HQD_ACTIVE }, + { 0x00000000, mmCP_HQD_PQ_RPTR }, + { 0x00000000, mmCP_HQD_PQ_WPTR }, + { 0x00000001, mmCP_HQD_ACTIVE }, + { 0x00000705, mmSRBM_GFX_CNTL }, + { 0x00000000, mmCP_HQD_ACTIVE }, + { 0x00000000, mmCP_HQD_PQ_RPTR }, + { 0x00000000, mmCP_HQD_PQ_WPTR }, + { 0x00000001, mmCP_HQD_ACTIVE }, + { 0x00000006, mmSRBM_GFX_CNTL }, + { 0x00000000, mmCP_HQD_ACTIVE }, + { 0x00000000, mmCP_HQD_PQ_RPTR }, + { 0x00000000, mmCP_HQD_PQ_WPTR }, + { 0x00000001, mmCP_HQD_ACTIVE }, + { 0x00000106, mmSRBM_GFX_CNTL }, + { 0x00000000, mmCP_HQD_ACTIVE }, + { 0x00000000, mmCP_HQD_PQ_RPTR }, + { 0x00000000, mmCP_HQD_PQ_WPTR }, + { 0x00000001, mmCP_HQD_ACTIVE }, + { 0x00000206, mmSRBM_GFX_CNTL }, + { 0x00000000, mmCP_HQD_ACTIVE }, + { 0x00000000, mmCP_HQD_PQ_RPTR }, + { 0x00000000, mmCP_HQD_PQ_WPTR }, + { 0x00000001, mmCP_HQD_ACTIVE }, + { 0x00000306, mmSRBM_GFX_CNTL }, + { 0x00000000, mmCP_HQD_ACTIVE }, + { 0x00000000, mmCP_HQD_PQ_RPTR }, + { 0x00000000, mmCP_HQD_PQ_WPTR }, + { 0x00000001, mmCP_HQD_ACTIVE }, + { 0x00000406, mmSRBM_GFX_CNTL }, + { 0x00000000, mmCP_HQD_ACTIVE }, + { 0x00000000, mmCP_HQD_PQ_RPTR }, + { 0x00000000, mmCP_HQD_PQ_WPTR }, + { 0x00000001, mmCP_HQD_ACTIVE }, + { 0x00000506, mmSRBM_GFX_CNTL }, + { 0x00000000, mmCP_HQD_ACTIVE }, + { 0x00000000, mmCP_HQD_PQ_RPTR }, + { 0x00000000, mmCP_HQD_PQ_WPTR }, + { 0x00000001, mmCP_HQD_ACTIVE }, + { 0x00000606, mmSRBM_GFX_CNTL }, + { 0x00000000, mmCP_HQD_ACTIVE }, + { 0x00000000, mmCP_HQD_PQ_RPTR }, + { 0x00000000, mmCP_HQD_PQ_WPTR }, + { 0x00000001, mmCP_HQD_ACTIVE }, + { 0x00000706, mmSRBM_GFX_CNTL }, + { 0x00000000, mmCP_HQD_ACTIVE }, + { 0x00000000, mmCP_HQD_PQ_RPTR }, + { 0x00000000, mmCP_HQD_PQ_WPTR }, + { 0x00000001, mmCP_HQD_ACTIVE }, + { 0x00000007, mmSRBM_GFX_CNTL }, + { 0x00000000, mmCP_HQD_ACTIVE }, + { 0x00000000, mmCP_HQD_PQ_RPTR }, + { 0x00000000, mmCP_HQD_PQ_WPTR }, + { 0x00000001, mmCP_HQD_ACTIVE }, + { 0x00000107, mmSRBM_GFX_CNTL }, + { 0x00000000, mmCP_HQD_ACTIVE }, + { 0x00000000, mmCP_HQD_PQ_RPTR }, + { 0x00000000, mmCP_HQD_PQ_WPTR }, + { 0x00000001, mmCP_HQD_ACTIVE }, + { 0x00000207, mmSRBM_GFX_CNTL }, + { 0x00000000, mmCP_HQD_ACTIVE }, + { 0x00000000, mmCP_HQD_PQ_RPTR }, + { 0x00000000, mmCP_HQD_PQ_WPTR }, + { 0x00000001, mmCP_HQD_ACTIVE }, + { 0x00000307, mmSRBM_GFX_CNTL }, + { 0x00000000, mmCP_HQD_ACTIVE }, + { 0x00000000, mmCP_HQD_PQ_RPTR }, + { 0x00000000, mmCP_HQD_PQ_WPTR }, + { 0x00000001, mmCP_HQD_ACTIVE }, + { 0x00000407, mmSRBM_GFX_CNTL }, + { 0x00000000, mmCP_HQD_ACTIVE }, + { 0x00000000, mmCP_HQD_PQ_RPTR }, + { 0x00000000, mmCP_HQD_PQ_WPTR }, + { 0x00000001, mmCP_HQD_ACTIVE }, + { 0x00000507, mmSRBM_GFX_CNTL }, + { 0x00000000, mmCP_HQD_ACTIVE }, + { 0x00000000, mmCP_HQD_PQ_RPTR }, + { 0x00000000, mmCP_HQD_PQ_WPTR }, + { 0x00000001, mmCP_HQD_ACTIVE }, + { 0x00000607, mmSRBM_GFX_CNTL }, + { 0x00000000, mmCP_HQD_ACTIVE }, + { 0x00000000, mmCP_HQD_PQ_RPTR }, + { 0x00000000, mmCP_HQD_PQ_WPTR }, + { 0x00000001, mmCP_HQD_ACTIVE }, + { 0x00000707, mmSRBM_GFX_CNTL }, + { 0x00000000, mmCP_HQD_ACTIVE }, + { 0x00000000, mmCP_HQD_PQ_RPTR }, + { 0x00000000, mmCP_HQD_PQ_WPTR }, + { 0x00000001, mmCP_HQD_ACTIVE }, + { 0x00000008, mmSRBM_GFX_CNTL }, + { 0x00000000, mmCP_HQD_ACTIVE }, + { 0x00000000, mmCP_HQD_PQ_RPTR }, + { 0x00000000, mmCP_HQD_PQ_WPTR }, + { 0x00000001, mmCP_HQD_ACTIVE }, + { 0x00000108, mmSRBM_GFX_CNTL }, + { 0x00000000, mmCP_HQD_ACTIVE }, + { 0x00000000, mmCP_HQD_PQ_RPTR }, + { 0x00000000, mmCP_HQD_PQ_WPTR }, + { 0x00000001, mmCP_HQD_ACTIVE }, + { 0x00000208, mmSRBM_GFX_CNTL }, + { 0x00000000, mmCP_HQD_ACTIVE }, + { 0x00000000, mmCP_HQD_PQ_RPTR }, + { 0x00000000, mmCP_HQD_PQ_WPTR }, + { 0x00000001, mmCP_HQD_ACTIVE }, + { 0x00000308, mmSRBM_GFX_CNTL }, + { 0x00000000, mmCP_HQD_ACTIVE }, + { 0x00000000, mmCP_HQD_PQ_RPTR }, + { 0x00000000, mmCP_HQD_PQ_WPTR }, + { 0x00000001, mmCP_HQD_ACTIVE }, + { 0x00000408, mmSRBM_GFX_CNTL }, + { 0x00000000, mmCP_HQD_ACTIVE }, + { 0x00000000, mmCP_HQD_PQ_RPTR }, + { 0x00000000, mmCP_HQD_PQ_WPTR }, + { 0x00000001, mmCP_HQD_ACTIVE }, + { 0x00000508, mmSRBM_GFX_CNTL }, + { 0x00000000, mmCP_HQD_ACTIVE }, + { 0x00000000, mmCP_HQD_PQ_RPTR }, + { 0x00000000, mmCP_HQD_PQ_WPTR }, + { 0x00000001, mmCP_HQD_ACTIVE }, + { 0x00000608, mmSRBM_GFX_CNTL }, + { 0x00000000, mmCP_HQD_ACTIVE }, + { 0x00000000, mmCP_HQD_PQ_RPTR }, + { 0x00000000, mmCP_HQD_PQ_WPTR }, + { 0x00000001, mmCP_HQD_ACTIVE }, + { 0x00000708, mmSRBM_GFX_CNTL }, + { 0x00000000, mmCP_HQD_ACTIVE }, + { 0x00000000, mmCP_HQD_PQ_RPTR }, + { 0x00000000, mmCP_HQD_PQ_WPTR }, + { 0x00000001, mmCP_HQD_ACTIVE }, + { 0x00000009, mmSRBM_GFX_CNTL }, + { 0x00000000, mmCP_HQD_ACTIVE }, + { 0x00000000, mmCP_HQD_PQ_RPTR }, + { 0x00000000, mmCP_HQD_PQ_WPTR }, + { 0x00000001, mmCP_HQD_ACTIVE }, + { 0x00000109, mmSRBM_GFX_CNTL }, + { 0x00000000, mmCP_HQD_ACTIVE }, + { 0x00000000, mmCP_HQD_PQ_RPTR }, + { 0x00000000, mmCP_HQD_PQ_WPTR }, + { 0x00000001, mmCP_HQD_ACTIVE }, + { 0x00000209, mmSRBM_GFX_CNTL }, + { 0x00000000, mmCP_HQD_ACTIVE }, + { 0x00000000, mmCP_HQD_PQ_RPTR }, + { 0x00000000, mmCP_HQD_PQ_WPTR }, + { 0x00000001, mmCP_HQD_ACTIVE }, + { 0x00000309, mmSRBM_GFX_CNTL }, + { 0x00000000, mmCP_HQD_ACTIVE }, + { 0x00000000, mmCP_HQD_PQ_RPTR }, + { 0x00000000, mmCP_HQD_PQ_WPTR }, + { 0x00000001, mmCP_HQD_ACTIVE }, + { 0x00000409, mmSRBM_GFX_CNTL }, + { 0x00000000, mmCP_HQD_ACTIVE }, + { 0x00000000, mmCP_HQD_PQ_RPTR }, + { 0x00000000, mmCP_HQD_PQ_WPTR }, + { 0x00000001, mmCP_HQD_ACTIVE }, + { 0x00000509, mmSRBM_GFX_CNTL }, + { 0x00000000, mmCP_HQD_ACTIVE }, + { 0x00000000, mmCP_HQD_PQ_RPTR }, + { 0x00000000, mmCP_HQD_PQ_WPTR }, + { 0x00000001, mmCP_HQD_ACTIVE }, + { 0x00000609, mmSRBM_GFX_CNTL }, + { 0x00000000, mmCP_HQD_ACTIVE }, + { 0x00000000, mmCP_HQD_PQ_RPTR }, + { 0x00000000, mmCP_HQD_PQ_WPTR }, + { 0x00000001, mmCP_HQD_ACTIVE }, + { 0x00000709, mmSRBM_GFX_CNTL }, + { 0x00000000, mmCP_HQD_ACTIVE }, + { 0x00000000, mmCP_HQD_PQ_RPTR }, + { 0x00000000, mmCP_HQD_PQ_WPTR }, + { 0x00000001, mmCP_HQD_ACTIVE }, + { 0x00000004, mmSRBM_GFX_CNTL }, + { 0x01010101, mmCP_PQ_WPTR_POLL_CNTL1 }, + { 0x00000000, mmGRBM_STATUS }, + { 0x00000000, mmGRBM_STATUS }, + { 0x00000000, mmGRBM_STATUS }, + { 0x00000000, 0xFFFFFFFF }, +}; + + +#endif diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/power_state.h b/drivers/gpu/drm/amd/pm/powerplay/inc/power_state.h new file mode 100644 index 000000000000..a5f2227a3971 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/power_state.h @@ -0,0 +1,196 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef PP_POWERSTATE_H +#define PP_POWERSTATE_H + +struct pp_hw_power_state { + unsigned int magic; +}; + +struct pp_power_state; + + +#define PP_INVALID_POWER_STATE_ID (0) + + +/* + * An item of a list containing Power States. + */ + +struct PP_StateLinkedList { + struct pp_power_state *next; + struct pp_power_state *prev; +}; + + +enum PP_StateUILabel { + PP_StateUILabel_None, + PP_StateUILabel_Battery, + PP_StateUILabel_MiddleLow, + PP_StateUILabel_Balanced, + PP_StateUILabel_MiddleHigh, + PP_StateUILabel_Performance, + PP_StateUILabel_BACO +}; + +enum PP_StateClassificationFlag { + PP_StateClassificationFlag_Boot = 0x0001, + PP_StateClassificationFlag_Thermal = 0x0002, + PP_StateClassificationFlag_LimitedPowerSource = 0x0004, + PP_StateClassificationFlag_Rest = 0x0008, + PP_StateClassificationFlag_Forced = 0x0010, + PP_StateClassificationFlag_User3DPerformance = 0x0020, + PP_StateClassificationFlag_User2DPerformance = 0x0040, + PP_StateClassificationFlag_3DPerformance = 0x0080, + PP_StateClassificationFlag_ACOverdriveTemplate = 0x0100, + PP_StateClassificationFlag_Uvd = 0x0200, + PP_StateClassificationFlag_3DPerformanceLow = 0x0400, + PP_StateClassificationFlag_ACPI = 0x0800, + PP_StateClassificationFlag_HD2 = 0x1000, + PP_StateClassificationFlag_UvdHD = 0x2000, + PP_StateClassificationFlag_UvdSD = 0x4000, + PP_StateClassificationFlag_UserDCPerformance = 0x8000, + PP_StateClassificationFlag_DCOverdriveTemplate = 0x10000, + PP_StateClassificationFlag_BACO = 0x20000, + PP_StateClassificationFlag_LimitedPowerSource_2 = 0x40000, + PP_StateClassificationFlag_ULV = 0x80000, + PP_StateClassificationFlag_UvdMVC = 0x100000, +}; + +typedef unsigned int PP_StateClassificationFlags; + +struct PP_StateClassificationBlock { + enum PP_StateUILabel ui_label; + enum PP_StateClassificationFlag flags; + int bios_index; + bool temporary_state; + bool to_be_deleted; +}; + +struct PP_StatePcieBlock { + unsigned int lanes; +}; + +enum PP_RefreshrateSource { + PP_RefreshrateSource_EDID, + PP_RefreshrateSource_Explicit +}; + +struct PP_StateDisplayBlock { + bool disableFrameModulation; + bool limitRefreshrate; + enum PP_RefreshrateSource refreshrateSource; + int explicitRefreshrate; + int edidRefreshrateIndex; + bool enableVariBright; +}; + +struct PP_StateMemroyBlock { + bool dllOff; + uint8_t m3arb; + uint8_t unused[3]; +}; + +struct PP_StateSoftwareAlgorithmBlock { + bool disableLoadBalancing; + bool enableSleepForTimestamps; +}; + +#define PP_TEMPERATURE_UNITS_PER_CENTIGRADES 1000 + +/** + * Type to hold a temperature range. + */ +struct PP_TemperatureRange { + int min; + int max; + int edge_emergency_max; + int hotspot_min; + int hotspot_crit_max; + int hotspot_emergency_max; + int mem_min; + int mem_crit_max; + int mem_emergency_max; +}; + +struct PP_StateValidationBlock { + bool singleDisplayOnly; + bool disallowOnDC; + uint8_t supportedPowerLevels; +}; + +struct PP_UVD_CLOCKS { + uint32_t VCLK; + uint32_t DCLK; +}; + +/** +* Structure to hold a PowerPlay Power State. +*/ +struct pp_power_state { + uint32_t id; + struct PP_StateLinkedList orderedList; + struct PP_StateLinkedList allStatesList; + + struct PP_StateClassificationBlock classification; + struct PP_StateValidationBlock validation; + struct PP_StatePcieBlock pcie; + struct PP_StateDisplayBlock display; + struct PP_StateMemroyBlock memory; + struct PP_TemperatureRange temperatures; + struct PP_StateSoftwareAlgorithmBlock software; + struct PP_UVD_CLOCKS uvd_clocks; + struct pp_hw_power_state hardware; +}; + +enum PP_MMProfilingState { + PP_MMProfilingState_NA = 0, + PP_MMProfilingState_Started, + PP_MMProfilingState_Stopped +}; + +struct pp_clock_engine_request { + unsigned long client_type; + unsigned long ctx_id; + uint64_t context_handle; + unsigned long sclk; + unsigned long sclk_hard_min; + unsigned long mclk; + unsigned long iclk; + unsigned long evclk; + unsigned long ecclk; + unsigned long ecclk_hard_min; + unsigned long vclk; + unsigned long dclk; + unsigned long sclk_over_drive; + unsigned long mclk_over_drive; + unsigned long sclk_threshold; + unsigned long flag; + unsigned long vclk_ceiling; + unsigned long dclk_ceiling; + unsigned long num_cus; + unsigned long pm_flag; + enum PP_MMProfilingState mm_profiling_state; +}; + +#endif diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/pp_debug.h b/drivers/gpu/drm/amd/pm/powerplay/inc/pp_debug.h new file mode 100644 index 000000000000..cea65093b6ad --- /dev/null +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/pp_debug.h @@ -0,0 +1,62 @@ + +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef PP_DEBUG_H +#define PP_DEBUG_H + +#ifdef pr_fmt +#undef pr_fmt +#endif + +#define pr_fmt(fmt) "amdgpu: [powerplay] " fmt + +#include +#include +#include + +#define PP_ASSERT_WITH_CODE(cond, msg, code) \ + do { \ + if (!(cond)) { \ + pr_warn_ratelimited("%s\n", msg); \ + code; \ + } \ + } while (0) + +#define PP_ASSERT(cond, msg) \ + do { \ + if (!(cond)) { \ + pr_warn_ratelimited("%s\n", msg); \ + } \ + } while (0) + +#define PP_DBG_LOG(fmt, ...) \ + do { \ + pr_debug(fmt, ##__VA_ARGS__); \ + } while (0) + + +#define GET_FLEXIBLE_ARRAY_MEMBER_ADDR(type, member, ptr, n) \ + (type *)((char *)&(ptr)->member + (sizeof(type) * (n))) + +#endif /* PP_DEBUG_H */ + diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/pp_endian.h b/drivers/gpu/drm/amd/pm/powerplay/inc/pp_endian.h new file mode 100644 index 000000000000..f49d1963fe85 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/pp_endian.h @@ -0,0 +1,38 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _PP_ENDIAN_H_ +#define _PP_ENDIAN_H_ + +#define PP_HOST_TO_SMC_UL(X) cpu_to_be32(X) +#define PP_SMC_TO_HOST_UL(X) be32_to_cpu(X) + +#define PP_HOST_TO_SMC_US(X) cpu_to_be16(X) +#define PP_SMC_TO_HOST_US(X) be16_to_cpu(X) + +#define CONVERT_FROM_HOST_TO_SMC_UL(X) ((X) = PP_HOST_TO_SMC_UL(X)) +#define CONVERT_FROM_SMC_TO_HOST_UL(X) ((X) = PP_SMC_TO_HOST_UL(X)) + +#define CONVERT_FROM_HOST_TO_SMC_US(X) ((X) = PP_HOST_TO_SMC_US(X)) + +#endif /* _PP_ENDIAN_H_ */ diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/pp_thermal.h b/drivers/gpu/drm/amd/pm/powerplay/inc/pp_thermal.h new file mode 100644 index 000000000000..f7c41185097e --- /dev/null +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/pp_thermal.h @@ -0,0 +1,44 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef PP_THERMAL_H +#define PP_THERMAL_H + +#include "power_state.h" + +static const struct PP_TemperatureRange __maybe_unused SMU7ThermalWithDelayPolicy[] = +{ + {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000}, + { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000}, +}; + +static const struct PP_TemperatureRange __maybe_unused SMU7ThermalPolicy[] = +{ + {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000}, + { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000}, +}; + +#define CTF_OFFSET_EDGE 5 +#define CTF_OFFSET_HOTSPOT 5 +#define CTF_OFFSET_HBM 5 + +#endif diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/ppinterrupt.h b/drivers/gpu/drm/amd/pm/powerplay/inc/ppinterrupt.h new file mode 100644 index 000000000000..c067e0925b6b --- /dev/null +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/ppinterrupt.h @@ -0,0 +1,46 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef _PP_INTERRUPT_H_ +#define _PP_INTERRUPT_H_ + +enum amd_thermal_irq { + AMD_THERMAL_IRQ_LOW_TO_HIGH = 0, + AMD_THERMAL_IRQ_HIGH_TO_LOW, + + AMD_THERMAL_IRQ_LAST +}; + +/* The type of the interrupt callback functions in PowerPlay */ +typedef int (*irq_handler_func_t)(void *private_data, + unsigned src_id, const uint32_t *iv_entry); + +/* Event Manager action chain list information */ +struct pp_interrupt_registration_info { + irq_handler_func_t call_back; /* Pointer to callback function */ + void *context; /* Pointer to callback function context */ + uint32_t src_id; /* Registered interrupt id */ + const uint32_t *iv_entry; +}; + +#endif /* _PP_INTERRUPT_H_ */ diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/rv_ppsmc.h b/drivers/gpu/drm/amd/pm/powerplay/inc/rv_ppsmc.h new file mode 100644 index 000000000000..171f12b82716 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/rv_ppsmc.h @@ -0,0 +1,96 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef RAVEN_PP_SMC_H +#define RAVEN_PP_SMC_H + +#pragma pack(push, 1) + +#define PPSMC_Result_OK 0x1 +#define PPSMC_Result_Failed 0xFF +#define PPSMC_Result_UnknownCmd 0xFE +#define PPSMC_Result_CmdRejectedPrereq 0xFD +#define PPSMC_Result_CmdRejectedBusy 0xFC + +#define PPSMC_MSG_TestMessage 0x1 +#define PPSMC_MSG_GetSmuVersion 0x2 +#define PPSMC_MSG_GetDriverIfVersion 0x3 +#define PPSMC_MSG_PowerUpGfx 0x6 +#define PPSMC_MSG_EnableGfxOff 0x7 +#define PPSMC_MSG_DisableGfxOff 0x8 +#define PPSMC_MSG_PowerDownIspByTile 0x9 +#define PPSMC_MSG_PowerUpIspByTile 0xA +#define PPSMC_MSG_PowerDownVcn 0xB +#define PPSMC_MSG_PowerUpVcn 0xC +#define PPSMC_MSG_PowerDownSdma 0xD +#define PPSMC_MSG_PowerUpSdma 0xE +#define PPSMC_MSG_SetHardMinIspclkByFreq 0xF +#define PPSMC_MSG_SetHardMinVcn 0x10 +#define PPSMC_MSG_SetMinDisplayClock 0x11 +#define PPSMC_MSG_SetHardMinFclkByFreq 0x12 +#define PPSMC_MSG_SetAllowFclkSwitch 0x13 +#define PPSMC_MSG_SetMinVideoGfxclkFreq 0x14 +#define PPSMC_MSG_ActiveProcessNotify 0x15 +#define PPSMC_MSG_SetCustomPolicy 0x16 +#define PPSMC_MSG_SetVideoFps 0x17 +#define PPSMC_MSG_SetDisplayCount 0x18 +#define PPSMC_MSG_QueryPowerLimit 0x19 +#define PPSMC_MSG_SetDriverDramAddrHigh 0x1A +#define PPSMC_MSG_SetDriverDramAddrLow 0x1B +#define PPSMC_MSG_TransferTableSmu2Dram 0x1C +#define PPSMC_MSG_TransferTableDram2Smu 0x1D +#define PPSMC_MSG_DeviceDriverReset 0x1E +#define PPSMC_MSG_SetGfxclkOverdriveByFreqVid 0x1F +#define PPSMC_MSG_SetHardMinDcefclkByFreq 0x20 +#define PPSMC_MSG_SetHardMinSocclkByFreq 0x21 +#define PPSMC_MSG_SetMinVddcrSocVoltage 0x22 +#define PPSMC_MSG_SetMinVideoFclkFreq 0x23 +#define PPSMC_MSG_SetMinDeepSleepDcefclk 0x24 +#define PPSMC_MSG_ForcePowerDownGfx 0x25 +#define PPSMC_MSG_SetPhyclkVoltageByFreq 0x26 +#define PPSMC_MSG_SetDppclkVoltageByFreq 0x27 +#define PPSMC_MSG_SetSoftMinVcn 0x28 +#define PPSMC_MSG_GetGfxclkFrequency 0x2A +#define PPSMC_MSG_GetFclkFrequency 0x2B +#define PPSMC_MSG_GetMinGfxclkFrequency 0x2C +#define PPSMC_MSG_GetMaxGfxclkFrequency 0x2D +#define PPSMC_MSG_SoftReset 0x2E +#define PPSMC_MSG_SetGfxCGPG 0x2F +#define PPSMC_MSG_SetSoftMaxGfxClk 0x30 +#define PPSMC_MSG_SetHardMinGfxClk 0x31 +#define PPSMC_MSG_SetSoftMaxSocclkByFreq 0x32 +#define PPSMC_MSG_SetSoftMaxFclkByFreq 0x33 +#define PPSMC_MSG_SetSoftMaxVcn 0x34 +#define PPSMC_MSG_PowerGateMmHub 0x35 +#define PPSMC_MSG_SetRccPfcPmeRestoreRegister 0x36 +#define PPSMC_MSG_GpuChangeState 0x37 +#define PPSMC_MSG_GetGfxBusy 0x3D +#define PPSMC_Message_Count 0x42 + +typedef uint16_t PPSMC_Result; +typedef int PPSMC_Msg; + + +#pragma pack(pop) + +#endif diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/smu10.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu10.h new file mode 100644 index 000000000000..9e837a5014c5 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu10.h @@ -0,0 +1,188 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef SMU10_H +#define SMU10_H + +#pragma pack(push, 1) + +#define ENABLE_DEBUG_FEATURES + +/* Feature Control Defines */ +#define FEATURE_CCLK_CONTROLLER_BIT 0 +#define FEATURE_FAN_CONTROLLER_BIT 1 +#define FEATURE_DATA_CALCULATION_BIT 2 +#define FEATURE_PPT_BIT 3 +#define FEATURE_TDC_BIT 4 +#define FEATURE_THERMAL_BIT 5 +#define FEATURE_FIT_BIT 6 +#define FEATURE_EDC_BIT 7 +#define FEATURE_PLL_POWER_DOWN_BIT 8 +#define FEATURE_ULV_BIT 9 +#define FEATURE_VDDOFF_BIT 10 +#define FEATURE_VCN_DPM_BIT 11 +#define FEATURE_ACP_DPM_BIT 12 +#define FEATURE_ISP_DPM_BIT 13 +#define FEATURE_FCLK_DPM_BIT 14 +#define FEATURE_SOCCLK_DPM_BIT 15 +#define FEATURE_MP0CLK_DPM_BIT 16 +#define FEATURE_LCLK_DPM_BIT 17 +#define FEATURE_SHUBCLK_DPM_BIT 18 +#define FEATURE_DCEFCLK_DPM_BIT 19 +#define FEATURE_GFX_DPM_BIT 20 +#define FEATURE_DS_GFXCLK_BIT 21 +#define FEATURE_DS_SOCCLK_BIT 22 +#define FEATURE_DS_LCLK_BIT 23 +#define FEATURE_DS_DCEFCLK_BIT 24 +#define FEATURE_DS_SHUBCLK_BIT 25 +#define FEATURE_RM_BIT 26 +#define FEATURE_S0i2_BIT 27 +#define FEATURE_WHISPER_MODE_BIT 28 +#define FEATURE_DS_FCLK_BIT 29 +#define FEATURE_DS_SMNCLK_BIT 30 +#define FEATURE_DS_MP1CLK_BIT 31 +#define FEATURE_DS_MP0CLK_BIT 32 +#define FEATURE_MGCG_BIT 33 +#define FEATURE_DS_FUSE_SRAM_BIT 34 +#define FEATURE_GFX_CKS 35 +#define FEATURE_PSI0_BIT 36 +#define FEATURE_PROCHOT_BIT 37 +#define FEATURE_CPUOFF_BIT 38 +#define FEATURE_STAPM_BIT 39 +#define FEATURE_CORE_CSTATES_BIT 40 +#define FEATURE_SPARE_41_BIT 41 +#define FEATURE_SPARE_42_BIT 42 +#define FEATURE_SPARE_43_BIT 43 +#define FEATURE_SPARE_44_BIT 44 +#define FEATURE_SPARE_45_BIT 45 +#define FEATURE_SPARE_46_BIT 46 +#define FEATURE_SPARE_47_BIT 47 +#define FEATURE_SPARE_48_BIT 48 +#define FEATURE_SPARE_49_BIT 49 +#define FEATURE_SPARE_50_BIT 50 +#define FEATURE_SPARE_51_BIT 51 +#define FEATURE_SPARE_52_BIT 52 +#define FEATURE_SPARE_53_BIT 53 +#define FEATURE_SPARE_54_BIT 54 +#define FEATURE_SPARE_55_BIT 55 +#define FEATURE_SPARE_56_BIT 56 +#define FEATURE_SPARE_57_BIT 57 +#define FEATURE_SPARE_58_BIT 58 +#define FEATURE_SPARE_59_BIT 59 +#define FEATURE_SPARE_60_BIT 60 +#define FEATURE_SPARE_61_BIT 61 +#define FEATURE_SPARE_62_BIT 62 +#define FEATURE_SPARE_63_BIT 63 + +#define NUM_FEATURES 64 + +#define FEATURE_CCLK_CONTROLLER_MASK (1 << FEATURE_CCLK_CONTROLLER_BIT) +#define FEATURE_FAN_CONTROLLER_MASK (1 << FEATURE_FAN_CONTROLLER_BIT) +#define FEATURE_DATA_CALCULATION_MASK (1 << FEATURE_DATA_CALCULATION_BIT) +#define FEATURE_PPT_MASK (1 << FEATURE_PPT_BIT) +#define FEATURE_TDC_MASK (1 << FEATURE_TDC_BIT) +#define FEATURE_THERMAL_MASK (1 << FEATURE_THERMAL_BIT) +#define FEATURE_FIT_MASK (1 << FEATURE_FIT_BIT) +#define FEATURE_EDC_MASK (1 << FEATURE_EDC_BIT) +#define FEATURE_PLL_POWER_DOWN_MASK (1 << FEATURE_PLL_POWER_DOWN_BIT) +#define FEATURE_ULV_MASK (1 << FEATURE_ULV_BIT) +#define FEATURE_VDDOFF_MASK (1 << FEATURE_VDDOFF_BIT) +#define FEATURE_VCN_DPM_MASK (1 << FEATURE_VCN_DPM_BIT) +#define FEATURE_ACP_DPM_MASK (1 << FEATURE_ACP_DPM_BIT) +#define FEATURE_ISP_DPM_MASK (1 << FEATURE_ISP_DPM_BIT) +#define FEATURE_FCLK_DPM_MASK (1 << FEATURE_FCLK_DPM_BIT) +#define FEATURE_SOCCLK_DPM_MASK (1 << FEATURE_SOCCLK_DPM_BIT) +#define FEATURE_MP0CLK_DPM_MASK (1 << FEATURE_MP0CLK_DPM_BIT) +#define FEATURE_LCLK_DPM_MASK (1 << FEATURE_LCLK_DPM_BIT) +#define FEATURE_SHUBCLK_DPM_MASK (1 << FEATURE_SHUBCLK_DPM_BIT) +#define FEATURE_DCEFCLK_DPM_MASK (1 << FEATURE_DCEFCLK_DPM_BIT) +#define FEATURE_GFX_DPM_MASK (1 << FEATURE_GFX_DPM_BIT) +#define FEATURE_DS_GFXCLK_MASK (1 << FEATURE_DS_GFXCLK_BIT) +#define FEATURE_DS_SOCCLK_MASK (1 << FEATURE_DS_SOCCLK_BIT) +#define FEATURE_DS_LCLK_MASK (1 << FEATURE_DS_LCLK_BIT) +#define FEATURE_DS_DCEFCLK_MASK (1 << FEATURE_DS_DCEFCLK_BIT) +#define FEATURE_DS_SHUBCLK_MASK (1 << FEATURE_DS_SHUBCLK_BIT) +#define FEATURE_RM_MASK (1 << FEATURE_RM_BIT) +#define FEATURE_DS_FCLK_MASK (1 << FEATURE_DS_FCLK_BIT) +#define FEATURE_DS_SMNCLK_MASK (1 << FEATURE_DS_SMNCLK_BIT) +#define FEATURE_DS_MP1CLK_MASK (1 << FEATURE_DS_MP1CLK_BIT) +#define FEATURE_DS_MP0CLK_MASK (1 << FEATURE_DS_MP0CLK_BIT) +#define FEATURE_MGCG_MASK (1 << FEATURE_MGCG_BIT) +#define FEATURE_DS_FUSE_SRAM_MASK (1 << FEATURE_DS_FUSE_SRAM_BIT) +#define FEATURE_PSI0_MASK (1 << FEATURE_PSI0_BIT) +#define FEATURE_STAPM_MASK (1 << FEATURE_STAPM_BIT) +#define FEATURE_PROCHOT_MASK (1 << FEATURE_PROCHOT_BIT) +#define FEATURE_CPUOFF_MASK (1 << FEATURE_CPUOFF_BIT) +#define FEATURE_CORE_CSTATES_MASK (1 << FEATURE_CORE_CSTATES_BIT) + +/* Workload bits */ +#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 0 +#define WORKLOAD_PPLIB_VIDEO_BIT 2 +#define WORKLOAD_PPLIB_VR_BIT 3 +#define WORKLOAD_PPLIB_COMPUTE_BIT 4 +#define WORKLOAD_PPLIB_CUSTOM_BIT 5 +#define WORKLOAD_PPLIB_COUNT 6 + +typedef struct { + /* MP1_EXT_SCRATCH0 */ + uint32_t CurrLevel_ACP : 4; + uint32_t CurrLevel_ISP : 4; + uint32_t CurrLevel_VCN : 4; + uint32_t CurrLevel_LCLK : 4; + uint32_t CurrLevel_MP0CLK : 4; + uint32_t CurrLevel_FCLK : 4; + uint32_t CurrLevel_SOCCLK : 4; + uint32_t CurrLevel_DCEFCLK : 4; + /* MP1_EXT_SCRATCH1 */ + uint32_t TargLevel_ACP : 4; + uint32_t TargLevel_ISP : 4; + uint32_t TargLevel_VCN : 4; + uint32_t TargLevel_LCLK : 4; + uint32_t TargLevel_MP0CLK : 4; + uint32_t TargLevel_FCLK : 4; + uint32_t TargLevel_SOCCLK : 4; + uint32_t TargLevel_DCEFCLK : 4; + /* MP1_EXT_SCRATCH2 */ + uint32_t CurrLevel_SHUBCLK : 4; + uint32_t TargLevel_SHUBCLK : 4; + uint32_t InUlv : 1; + uint32_t InS0i2 : 1; + uint32_t InWhisperMode : 1; + uint32_t Reserved : 21; + /* MP1_EXT_SCRATCH3-4 */ + uint32_t Reserved2[2]; + /* MP1_EXT_SCRATCH5 */ + uint32_t FeatureStatus[NUM_FEATURES / 32]; +} FwStatus_t; + +#define TABLE_BIOS_IF 0 /* Called by BIOS */ +#define TABLE_WATERMARKS 1 /* Called by Driver */ +#define TABLE_CUSTOM_DPM 2 /* Called by Driver */ +#define TABLE_PMSTATUSLOG 3 /* Called by Tools for Agm logging */ +#define TABLE_DPMCLOCKS 4 /* Called by Driver */ +#define TABLE_MOMENTARY_PM 5 /* Called by Tools */ +#define TABLE_COUNT 6 + +#pragma pack(pop) + +#endif diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/smu10_driver_if.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu10_driver_if.h new file mode 100644 index 000000000000..c498158771cc --- /dev/null +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu10_driver_if.h @@ -0,0 +1,117 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef SMU10_DRIVER_IF_H +#define SMU10_DRIVER_IF_H + +#define SMU10_DRIVER_IF_VERSION 0x6 + +#define NUM_DSPCLK_LEVELS 8 + +typedef struct { + int32_t value; + uint32_t numFractionalBits; +} FloatInIntFormat_t; + +typedef enum { + DSPCLK_DCEFCLK = 0, + DSPCLK_DISPCLK, + DSPCLK_PIXCLK, + DSPCLK_PHYCLK, + DSPCLK_COUNT, +} DSPCLK_e; + +typedef struct { + uint16_t Freq; + uint16_t Vid; +} DisplayClockTable_t; + + +typedef struct { + uint16_t MinClock; /* This is either DCFCLK or SOCCLK (in MHz) */ + uint16_t MaxClock; /* This is either DCFCLK or SOCCLK (in MHz) */ + uint16_t MinMclk; + uint16_t MaxMclk; + + uint8_t WmSetting; + uint8_t WmType; + uint8_t Padding[2]; +} WatermarkRowGeneric_t; + +#define NUM_WM_RANGES 4 + +typedef enum { + WM_SOCCLK = 0, + WM_DCFCLK, + WM_COUNT, +} WM_CLOCK_e; + +typedef struct { + WatermarkRowGeneric_t WatermarkRow[WM_COUNT][NUM_WM_RANGES]; + uint32_t MmHubPadding[7]; +} Watermarks_t; + +typedef enum { + CUSTOM_DPM_SETTING_GFXCLK, + CUSTOM_DPM_SETTING_CCLK, + CUSTOM_DPM_SETTING_FCLK_CCX, + CUSTOM_DPM_SETTING_FCLK_GFX, + CUSTOM_DPM_SETTING_FCLK_STALLS, + CUSTOM_DPM_SETTING_LCLK, + CUSTOM_DPM_SETTING_COUNT, +} CUSTOM_DPM_SETTING_e; + +typedef struct { + uint8_t ActiveHystLimit; + uint8_t IdleHystLimit; + uint8_t FPS; + uint8_t MinActiveFreqType; + FloatInIntFormat_t MinActiveFreq; + FloatInIntFormat_t PD_Data_limit; + FloatInIntFormat_t PD_Data_time_constant; + FloatInIntFormat_t PD_Data_error_coeff; + FloatInIntFormat_t PD_Data_error_rate_coeff; +} DpmActivityMonitorCoeffExt_t; + +typedef struct { + DpmActivityMonitorCoeffExt_t DpmActivityMonitorCoeff[CUSTOM_DPM_SETTING_COUNT]; +} CustomDpmSettings_t; + +#define NUM_SOCCLK_DPM_LEVELS 8 +#define NUM_DCEFCLK_DPM_LEVELS 4 +#define NUM_FCLK_DPM_LEVELS 4 +#define NUM_MEMCLK_DPM_LEVELS 4 + +typedef struct { + uint32_t Freq; /* In MHz */ + uint32_t Vol; /* Millivolts with 2 fractional bits */ +} DpmClock_t; + +typedef struct { + DpmClock_t DcefClocks[NUM_DCEFCLK_DPM_LEVELS]; + DpmClock_t SocClocks[NUM_SOCCLK_DPM_LEVELS]; + DpmClock_t FClocks[NUM_FCLK_DPM_LEVELS]; + DpmClock_t MemClocks[NUM_MEMCLK_DPM_LEVELS]; +} DpmClocks_t; + +#endif diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/smu11_driver_if.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu11_driver_if.h new file mode 100644 index 000000000000..fdc6b7a57bc9 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu11_driver_if.h @@ -0,0 +1,893 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef SMU11_DRIVER_IF_H +#define SMU11_DRIVER_IF_H + +// *** IMPORTANT *** +// SMU TEAM: Always increment the interface version if +// any structure is changed in this file +// Be aware of that the version should be updated in +// smu_v11_0.h, rename is also needed. +// #define SMU11_DRIVER_IF_VERSION 0x13 + +#define PPTABLE_V20_SMU_VERSION 3 + +#define NUM_GFXCLK_DPM_LEVELS 16 +#define NUM_VCLK_DPM_LEVELS 8 +#define NUM_DCLK_DPM_LEVELS 8 +#define NUM_ECLK_DPM_LEVELS 8 +#define NUM_MP0CLK_DPM_LEVELS 2 +#define NUM_SOCCLK_DPM_LEVELS 8 +#define NUM_UCLK_DPM_LEVELS 4 +#define NUM_FCLK_DPM_LEVELS 8 +#define NUM_DCEFCLK_DPM_LEVELS 8 +#define NUM_DISPCLK_DPM_LEVELS 8 +#define NUM_PIXCLK_DPM_LEVELS 8 +#define NUM_PHYCLK_DPM_LEVELS 8 +#define NUM_LINK_LEVELS 2 +#define NUM_XGMI_LEVELS 2 + +#define MAX_GFXCLK_DPM_LEVEL (NUM_GFXCLK_DPM_LEVELS - 1) +#define MAX_VCLK_DPM_LEVEL (NUM_VCLK_DPM_LEVELS - 1) +#define MAX_DCLK_DPM_LEVEL (NUM_DCLK_DPM_LEVELS - 1) +#define MAX_ECLK_DPM_LEVEL (NUM_ECLK_DPM_LEVELS - 1) +#define MAX_MP0CLK_DPM_LEVEL (NUM_MP0CLK_DPM_LEVELS - 1) +#define MAX_SOCCLK_DPM_LEVEL (NUM_SOCCLK_DPM_LEVELS - 1) +#define MAX_UCLK_DPM_LEVEL (NUM_UCLK_DPM_LEVELS - 1) +#define MAX_FCLK_DPM_LEVEL (NUM_FCLK_DPM_LEVELS - 1) +#define MAX_DCEFCLK_DPM_LEVEL (NUM_DCEFCLK_DPM_LEVELS - 1) +#define MAX_DISPCLK_DPM_LEVEL (NUM_DISPCLK_DPM_LEVELS - 1) +#define MAX_PIXCLK_DPM_LEVEL (NUM_PIXCLK_DPM_LEVELS - 1) +#define MAX_PHYCLK_DPM_LEVEL (NUM_PHYCLK_DPM_LEVELS - 1) +#define MAX_LINK_LEVEL (NUM_LINK_LEVELS - 1) +#define MAX_XGMI_LEVEL (NUM_XGMI_LEVELS - 1) + +#define PPSMC_GeminiModeNone 0 +#define PPSMC_GeminiModeMaster 1 +#define PPSMC_GeminiModeSlave 2 + + +#define FEATURE_DPM_PREFETCHER_BIT 0 +#define FEATURE_DPM_GFXCLK_BIT 1 +#define FEATURE_DPM_UCLK_BIT 2 +#define FEATURE_DPM_SOCCLK_BIT 3 +#define FEATURE_DPM_UVD_BIT 4 +#define FEATURE_DPM_VCE_BIT 5 +#define FEATURE_ULV_BIT 6 +#define FEATURE_DPM_MP0CLK_BIT 7 +#define FEATURE_DPM_LINK_BIT 8 +#define FEATURE_DPM_DCEFCLK_BIT 9 +#define FEATURE_DS_GFXCLK_BIT 10 +#define FEATURE_DS_SOCCLK_BIT 11 +#define FEATURE_DS_LCLK_BIT 12 +#define FEATURE_PPT_BIT 13 +#define FEATURE_TDC_BIT 14 +#define FEATURE_THERMAL_BIT 15 +#define FEATURE_GFX_PER_CU_CG_BIT 16 +#define FEATURE_RM_BIT 17 +#define FEATURE_DS_DCEFCLK_BIT 18 +#define FEATURE_ACDC_BIT 19 +#define FEATURE_VR0HOT_BIT 20 +#define FEATURE_VR1HOT_BIT 21 +#define FEATURE_FW_CTF_BIT 22 +#define FEATURE_LED_DISPLAY_BIT 23 +#define FEATURE_FAN_CONTROL_BIT 24 +#define FEATURE_GFX_EDC_BIT 25 +#define FEATURE_GFXOFF_BIT 26 +#define FEATURE_CG_BIT 27 +#define FEATURE_DPM_FCLK_BIT 28 +#define FEATURE_DS_FCLK_BIT 29 +#define FEATURE_DS_MP1CLK_BIT 30 +#define FEATURE_DS_MP0CLK_BIT 31 +#define FEATURE_XGMI_BIT 32 +#define FEATURE_ECC_BIT 33 +#define FEATURE_SPARE_34_BIT 34 +#define FEATURE_SPARE_35_BIT 35 +#define FEATURE_SPARE_36_BIT 36 +#define FEATURE_SPARE_37_BIT 37 +#define FEATURE_SPARE_38_BIT 38 +#define FEATURE_SPARE_39_BIT 39 +#define FEATURE_SPARE_40_BIT 40 +#define FEATURE_SPARE_41_BIT 41 +#define FEATURE_SPARE_42_BIT 42 +#define FEATURE_SPARE_43_BIT 43 +#define FEATURE_SPARE_44_BIT 44 +#define FEATURE_SPARE_45_BIT 45 +#define FEATURE_SPARE_46_BIT 46 +#define FEATURE_SPARE_47_BIT 47 +#define FEATURE_SPARE_48_BIT 48 +#define FEATURE_SPARE_49_BIT 49 +#define FEATURE_SPARE_50_BIT 50 +#define FEATURE_SPARE_51_BIT 51 +#define FEATURE_SPARE_52_BIT 52 +#define FEATURE_SPARE_53_BIT 53 +#define FEATURE_SPARE_54_BIT 54 +#define FEATURE_SPARE_55_BIT 55 +#define FEATURE_SPARE_56_BIT 56 +#define FEATURE_SPARE_57_BIT 57 +#define FEATURE_SPARE_58_BIT 58 +#define FEATURE_SPARE_59_BIT 59 +#define FEATURE_SPARE_60_BIT 60 +#define FEATURE_SPARE_61_BIT 61 +#define FEATURE_SPARE_62_BIT 62 +#define FEATURE_SPARE_63_BIT 63 + +#define NUM_FEATURES 64 + +#define FEATURE_DPM_PREFETCHER_MASK (1 << FEATURE_DPM_PREFETCHER_BIT ) +#define FEATURE_DPM_GFXCLK_MASK (1 << FEATURE_DPM_GFXCLK_BIT ) +#define FEATURE_DPM_UCLK_MASK (1 << FEATURE_DPM_UCLK_BIT ) +#define FEATURE_DPM_SOCCLK_MASK (1 << FEATURE_DPM_SOCCLK_BIT ) +#define FEATURE_DPM_UVD_MASK (1 << FEATURE_DPM_UVD_BIT ) +#define FEATURE_DPM_VCE_MASK (1 << FEATURE_DPM_VCE_BIT ) +#define FEATURE_ULV_MASK (1 << FEATURE_ULV_BIT ) +#define FEATURE_DPM_MP0CLK_MASK (1 << FEATURE_DPM_MP0CLK_BIT ) +#define FEATURE_DPM_LINK_MASK (1 << FEATURE_DPM_LINK_BIT ) +#define FEATURE_DPM_DCEFCLK_MASK (1 << FEATURE_DPM_DCEFCLK_BIT ) +#define FEATURE_DS_GFXCLK_MASK (1 << FEATURE_DS_GFXCLK_BIT ) +#define FEATURE_DS_SOCCLK_MASK (1 << FEATURE_DS_SOCCLK_BIT ) +#define FEATURE_DS_LCLK_MASK (1 << FEATURE_DS_LCLK_BIT ) +#define FEATURE_PPT_MASK (1 << FEATURE_PPT_BIT ) +#define FEATURE_TDC_MASK (1 << FEATURE_TDC_BIT ) +#define FEATURE_THERMAL_MASK (1 << FEATURE_THERMAL_BIT ) +#define FEATURE_GFX_PER_CU_CG_MASK (1 << FEATURE_GFX_PER_CU_CG_BIT ) +#define FEATURE_RM_MASK (1 << FEATURE_RM_BIT ) +#define FEATURE_DS_DCEFCLK_MASK (1 << FEATURE_DS_DCEFCLK_BIT ) +#define FEATURE_ACDC_MASK (1 << FEATURE_ACDC_BIT ) +#define FEATURE_VR0HOT_MASK (1 << FEATURE_VR0HOT_BIT ) +#define FEATURE_VR1HOT_MASK (1 << FEATURE_VR1HOT_BIT ) +#define FEATURE_FW_CTF_MASK (1 << FEATURE_FW_CTF_BIT ) +#define FEATURE_LED_DISPLAY_MASK (1 << FEATURE_LED_DISPLAY_BIT ) +#define FEATURE_FAN_CONTROL_MASK (1 << FEATURE_FAN_CONTROL_BIT ) +#define FEATURE_GFX_EDC_MASK (1 << FEATURE_GFX_EDC_BIT ) +#define FEATURE_GFXOFF_MASK (1 << FEATURE_GFXOFF_BIT ) +#define FEATURE_CG_MASK (1 << FEATURE_CG_BIT ) +#define FEATURE_DPM_FCLK_MASK (1 << FEATURE_DPM_FCLK_BIT ) +#define FEATURE_DS_FCLK_MASK (1 << FEATURE_DS_FCLK_BIT ) +#define FEATURE_DS_MP1CLK_MASK (1 << FEATURE_DS_MP1CLK_BIT ) +#define FEATURE_DS_MP0CLK_MASK (1 << FEATURE_DS_MP0CLK_BIT ) +#define FEATURE_XGMI_MASK (1ULL << FEATURE_XGMI_BIT ) +#define FEATURE_ECC_MASK (1ULL << FEATURE_ECC_BIT ) + +#define DPM_OVERRIDE_DISABLE_SOCCLK_PID 0x00000001 +#define DPM_OVERRIDE_DISABLE_UCLK_PID 0x00000002 +#define DPM_OVERRIDE_ENABLE_VOLT_LINK_UVD_SOCCLK 0x00000004 +#define DPM_OVERRIDE_ENABLE_VOLT_LINK_UVD_UCLK 0x00000008 +#define DPM_OVERRIDE_ENABLE_FREQ_LINK_VCLK_SOCCLK 0x00000010 +#define DPM_OVERRIDE_ENABLE_FREQ_LINK_VCLK_UCLK 0x00000020 +#define DPM_OVERRIDE_ENABLE_FREQ_LINK_DCLK_SOCCLK 0x00000040 +#define DPM_OVERRIDE_ENABLE_FREQ_LINK_DCLK_UCLK 0x00000080 +#define DPM_OVERRIDE_ENABLE_VOLT_LINK_VCE_SOCCLK 0x00000100 +#define DPM_OVERRIDE_ENABLE_VOLT_LINK_VCE_UCLK 0x00000200 +#define DPM_OVERRIDE_ENABLE_FREQ_LINK_ECLK_SOCCLK 0x00000400 +#define DPM_OVERRIDE_ENABLE_FREQ_LINK_ECLK_UCLK 0x00000800 +#define DPM_OVERRIDE_ENABLE_FREQ_LINK_GFXCLK_SOCCLK 0x00001000 +#define DPM_OVERRIDE_ENABLE_FREQ_LINK_GFXCLK_UCLK 0x00002000 +#define DPM_OVERRIDE_ENABLE_GFXOFF_GFXCLK_SWITCH 0x00004000 +#define DPM_OVERRIDE_ENABLE_GFXOFF_SOCCLK_SWITCH 0x00008000 +#define DPM_OVERRIDE_ENABLE_GFXOFF_UCLK_SWITCH 0x00010000 +#define DPM_OVERRIDE_ENABLE_GFXOFF_FCLK_SWITCH 0x00020000 + +#define I2C_CONTROLLER_ENABLED 1 +#define I2C_CONTROLLER_DISABLED 0 + +#define VR_MAPPING_VR_SELECT_MASK 0x01 +#define VR_MAPPING_VR_SELECT_SHIFT 0x00 + +#define VR_MAPPING_PLANE_SELECT_MASK 0x02 +#define VR_MAPPING_PLANE_SELECT_SHIFT 0x01 + + +#define PSI_SEL_VR0_PLANE0_PSI0 0x01 +#define PSI_SEL_VR0_PLANE0_PSI1 0x02 +#define PSI_SEL_VR0_PLANE1_PSI0 0x04 +#define PSI_SEL_VR0_PLANE1_PSI1 0x08 +#define PSI_SEL_VR1_PLANE0_PSI0 0x10 +#define PSI_SEL_VR1_PLANE0_PSI1 0x20 +#define PSI_SEL_VR1_PLANE1_PSI0 0x40 +#define PSI_SEL_VR1_PLANE1_PSI1 0x80 + + +#define THROTTLER_STATUS_PADDING_BIT 0 +#define THROTTLER_STATUS_TEMP_EDGE_BIT 1 +#define THROTTLER_STATUS_TEMP_HOTSPOT_BIT 2 +#define THROTTLER_STATUS_TEMP_HBM_BIT 3 +#define THROTTLER_STATUS_TEMP_VR_GFX_BIT 4 +#define THROTTLER_STATUS_TEMP_VR_SOC_BIT 5 +#define THROTTLER_STATUS_TEMP_VR_MEM0_BIT 6 +#define THROTTLER_STATUS_TEMP_VR_MEM1_BIT 7 +#define THROTTLER_STATUS_TEMP_LIQUID_BIT 8 +#define THROTTLER_STATUS_TEMP_PLX_BIT 9 +#define THROTTLER_STATUS_TEMP_SKIN_BIT 10 +#define THROTTLER_STATUS_TDC_GFX_BIT 11 +#define THROTTLER_STATUS_TDC_SOC_BIT 12 +#define THROTTLER_STATUS_PPT_BIT 13 +#define THROTTLER_STATUS_FIT_BIT 14 +#define THROTTLER_STATUS_PPM_BIT 15 + + +#define TABLE_TRANSFER_OK 0x0 +#define TABLE_TRANSFER_FAILED 0xFF + + +#define WORKLOAD_DEFAULT_BIT 0 +#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 1 +#define WORKLOAD_PPLIB_POWER_SAVING_BIT 2 +#define WORKLOAD_PPLIB_VIDEO_BIT 3 +#define WORKLOAD_PPLIB_VR_BIT 4 +#define WORKLOAD_PPLIB_COMPUTE_BIT 5 +#define WORKLOAD_PPLIB_CUSTOM_BIT 6 +#define WORKLOAD_PPLIB_COUNT 7 + + +#define XGMI_STATE_D0 1 +#define XGMI_STATE_D3 0 + +typedef enum { + I2C_CONTROLLER_PORT_0 = 0, + I2C_CONTROLLER_PORT_1 = 1, +} I2cControllerPort_e; + +typedef enum { + I2C_CONTROLLER_NAME_VR_GFX = 0, + I2C_CONTROLLER_NAME_VR_SOC, + I2C_CONTROLLER_NAME_VR_VDDCI, + I2C_CONTROLLER_NAME_VR_HBM, + I2C_CONTROLLER_NAME_LIQUID_0, + I2C_CONTROLLER_NAME_LIQUID_1, + I2C_CONTROLLER_NAME_PLX, + I2C_CONTROLLER_NAME_COUNT, +} I2cControllerName_e; + +typedef enum { + I2C_CONTROLLER_THROTTLER_TYPE_NONE = 0, + I2C_CONTROLLER_THROTTLER_VR_GFX, + I2C_CONTROLLER_THROTTLER_VR_SOC, + I2C_CONTROLLER_THROTTLER_VR_VDDCI, + I2C_CONTROLLER_THROTTLER_VR_HBM, + I2C_CONTROLLER_THROTTLER_LIQUID_0, + I2C_CONTROLLER_THROTTLER_LIQUID_1, + I2C_CONTROLLER_THROTTLER_PLX, +} I2cControllerThrottler_e; + +typedef enum { + I2C_CONTROLLER_PROTOCOL_VR_XPDE132G5, + I2C_CONTROLLER_PROTOCOL_VR_IR35217, + I2C_CONTROLLER_PROTOCOL_TMP_TMP102A, + I2C_CONTROLLER_PROTOCOL_SPARE_0, + I2C_CONTROLLER_PROTOCOL_SPARE_1, + I2C_CONTROLLER_PROTOCOL_SPARE_2, +} I2cControllerProtocol_e; + +typedef enum { + I2C_CONTROLLER_SPEED_SLOW = 0, + I2C_CONTROLLER_SPEED_FAST = 1, +} I2cControllerSpeed_e; + +typedef struct { + uint32_t Enabled; + uint32_t SlaveAddress; + uint32_t ControllerPort; + uint32_t ControllerName; + + uint32_t ThermalThrottler; + uint32_t I2cProtocol; + uint32_t I2cSpeed; +} I2cControllerConfig_t; + +typedef struct { + uint32_t a; + uint32_t b; + uint32_t c; +} QuadraticInt_t; + +typedef struct { + uint32_t m; + uint32_t b; +} LinearInt_t; + +typedef struct { + uint32_t a; + uint32_t b; + uint32_t c; +} DroopInt_t; + +typedef enum { + PPCLK_GFXCLK, + PPCLK_VCLK, + PPCLK_DCLK, + PPCLK_ECLK, + PPCLK_SOCCLK, + PPCLK_UCLK, + PPCLK_DCEFCLK, + PPCLK_DISPCLK, + PPCLK_PIXCLK, + PPCLK_PHYCLK, + PPCLK_FCLK, + PPCLK_COUNT, +} PPCLK_e; + +typedef enum { + POWER_SOURCE_AC, + POWER_SOURCE_DC, + POWER_SOURCE_COUNT, +} POWER_SOURCE_e; + +typedef enum { + VOLTAGE_MODE_AVFS = 0, + VOLTAGE_MODE_AVFS_SS, + VOLTAGE_MODE_SS, + VOLTAGE_MODE_COUNT, +} VOLTAGE_MODE_e; + + +typedef enum { + AVFS_VOLTAGE_GFX = 0, + AVFS_VOLTAGE_SOC, + AVFS_VOLTAGE_COUNT, +} AVFS_VOLTAGE_TYPE_e; + + +typedef struct { + uint8_t VoltageMode; + uint8_t SnapToDiscrete; + uint8_t NumDiscreteLevels; + uint8_t padding; + LinearInt_t ConversionToAvfsClk; + QuadraticInt_t SsCurve; +} DpmDescriptor_t; + +typedef struct { + uint32_t Version; + + + uint32_t FeaturesToRun[2]; + + + uint16_t SocketPowerLimitAc0; + uint16_t SocketPowerLimitAc0Tau; + uint16_t SocketPowerLimitAc1; + uint16_t SocketPowerLimitAc1Tau; + uint16_t SocketPowerLimitAc2; + uint16_t SocketPowerLimitAc2Tau; + uint16_t SocketPowerLimitAc3; + uint16_t SocketPowerLimitAc3Tau; + uint16_t SocketPowerLimitDc; + uint16_t SocketPowerLimitDcTau; + uint16_t TdcLimitSoc; + uint16_t TdcLimitSocTau; + uint16_t TdcLimitGfx; + uint16_t TdcLimitGfxTau; + + uint16_t TedgeLimit; + uint16_t ThotspotLimit; + uint16_t ThbmLimit; + uint16_t Tvr_gfxLimit; + uint16_t Tvr_memLimit; + uint16_t Tliquid1Limit; + uint16_t Tliquid2Limit; + uint16_t TplxLimit; + uint32_t FitLimit; + + uint16_t PpmPowerLimit; + uint16_t PpmTemperatureThreshold; + + uint8_t MemoryOnPackage; + uint8_t padding8_limits; + uint16_t Tvr_SocLimit; + + uint16_t UlvVoltageOffsetSoc; + uint16_t UlvVoltageOffsetGfx; + + uint8_t UlvSmnclkDid; + uint8_t UlvMp1clkDid; + uint8_t UlvGfxclkBypass; + uint8_t Padding234; + + + uint16_t MinVoltageGfx; + uint16_t MinVoltageSoc; + uint16_t MaxVoltageGfx; + uint16_t MaxVoltageSoc; + + uint16_t LoadLineResistanceGfx; + uint16_t LoadLineResistanceSoc; + + DpmDescriptor_t DpmDescriptor[PPCLK_COUNT]; + + uint16_t FreqTableGfx [NUM_GFXCLK_DPM_LEVELS ]; + uint16_t FreqTableVclk [NUM_VCLK_DPM_LEVELS ]; + uint16_t FreqTableDclk [NUM_DCLK_DPM_LEVELS ]; + uint16_t FreqTableEclk [NUM_ECLK_DPM_LEVELS ]; + uint16_t FreqTableSocclk [NUM_SOCCLK_DPM_LEVELS ]; + uint16_t FreqTableUclk [NUM_UCLK_DPM_LEVELS ]; + uint16_t FreqTableFclk [NUM_FCLK_DPM_LEVELS ]; + uint16_t FreqTableDcefclk [NUM_DCEFCLK_DPM_LEVELS ]; + uint16_t FreqTableDispclk [NUM_DISPCLK_DPM_LEVELS ]; + uint16_t FreqTablePixclk [NUM_PIXCLK_DPM_LEVELS ]; + uint16_t FreqTablePhyclk [NUM_PHYCLK_DPM_LEVELS ]; + + uint16_t DcModeMaxFreq [PPCLK_COUNT ]; + uint16_t Padding8_Clks; + + uint16_t Mp0clkFreq [NUM_MP0CLK_DPM_LEVELS]; + uint16_t Mp0DpmVoltage [NUM_MP0CLK_DPM_LEVELS]; + + + uint16_t GfxclkFidle; + uint16_t GfxclkSlewRate; + uint16_t CksEnableFreq; + uint16_t Padding789; + QuadraticInt_t CksVoltageOffset; + uint8_t Padding567[4]; + uint16_t GfxclkDsMaxFreq; + uint8_t GfxclkSource; + uint8_t Padding456; + + uint8_t LowestUclkReservedForUlv; + uint8_t Padding8_Uclk[3]; + + + uint8_t PcieGenSpeed[NUM_LINK_LEVELS]; + uint8_t PcieLaneCount[NUM_LINK_LEVELS]; + uint16_t LclkFreq[NUM_LINK_LEVELS]; + + + uint16_t EnableTdpm; + uint16_t TdpmHighHystTemperature; + uint16_t TdpmLowHystTemperature; + uint16_t GfxclkFreqHighTempLimit; + + + uint16_t FanStopTemp; + uint16_t FanStartTemp; + + uint16_t FanGainEdge; + uint16_t FanGainHotspot; + uint16_t FanGainLiquid; + uint16_t FanGainVrGfx; + uint16_t FanGainVrSoc; + uint16_t FanGainPlx; + uint16_t FanGainHbm; + uint16_t FanPwmMin; + uint16_t FanAcousticLimitRpm; + uint16_t FanThrottlingRpm; + uint16_t FanMaximumRpm; + uint16_t FanTargetTemperature; + uint16_t FanTargetGfxclk; + uint8_t FanZeroRpmEnable; + uint8_t FanTachEdgePerRev; + + + + int16_t FuzzyFan_ErrorSetDelta; + int16_t FuzzyFan_ErrorRateSetDelta; + int16_t FuzzyFan_PwmSetDelta; + uint16_t FuzzyFan_Reserved; + + + uint8_t OverrideAvfsGb[AVFS_VOLTAGE_COUNT]; + uint8_t Padding8_Avfs[2]; + + QuadraticInt_t qAvfsGb[AVFS_VOLTAGE_COUNT]; + DroopInt_t dBtcGbGfxCksOn; + DroopInt_t dBtcGbGfxCksOff; + DroopInt_t dBtcGbGfxAfll; + DroopInt_t dBtcGbSoc; + LinearInt_t qAgingGb[AVFS_VOLTAGE_COUNT]; + + QuadraticInt_t qStaticVoltageOffset[AVFS_VOLTAGE_COUNT]; + + uint16_t DcTol[AVFS_VOLTAGE_COUNT]; + + uint8_t DcBtcEnabled[AVFS_VOLTAGE_COUNT]; + uint8_t Padding8_GfxBtc[2]; + + int16_t DcBtcMin[AVFS_VOLTAGE_COUNT]; + uint16_t DcBtcMax[AVFS_VOLTAGE_COUNT]; + + + uint8_t XgmiLinkSpeed [NUM_XGMI_LEVELS]; + uint8_t XgmiLinkWidth [NUM_XGMI_LEVELS]; + uint16_t XgmiFclkFreq [NUM_XGMI_LEVELS]; + uint16_t XgmiUclkFreq [NUM_XGMI_LEVELS]; + uint16_t XgmiSocclkFreq [NUM_XGMI_LEVELS]; + uint16_t XgmiSocVoltage [NUM_XGMI_LEVELS]; + + uint32_t DebugOverrides; + QuadraticInt_t ReservedEquation0; + QuadraticInt_t ReservedEquation1; + QuadraticInt_t ReservedEquation2; + QuadraticInt_t ReservedEquation3; + + uint16_t MinVoltageUlvGfx; + uint16_t MinVoltageUlvSoc; + + uint16_t MGpuFanBoostLimitRpm; + uint16_t padding16_Fan; + + uint16_t FanGainVrMem0; + uint16_t FanGainVrMem1; + + uint16_t DcBtcGb[AVFS_VOLTAGE_COUNT]; + + uint32_t Reserved[11]; + + uint32_t Padding32[3]; + + uint16_t MaxVoltageStepGfx; + uint16_t MaxVoltageStepSoc; + + uint8_t VddGfxVrMapping; + uint8_t VddSocVrMapping; + uint8_t VddMem0VrMapping; + uint8_t VddMem1VrMapping; + + uint8_t GfxUlvPhaseSheddingMask; + uint8_t SocUlvPhaseSheddingMask; + uint8_t ExternalSensorPresent; + uint8_t Padding8_V; + + + uint16_t GfxMaxCurrent; + int8_t GfxOffset; + uint8_t Padding_TelemetryGfx; + + uint16_t SocMaxCurrent; + int8_t SocOffset; + uint8_t Padding_TelemetrySoc; + + uint16_t Mem0MaxCurrent; + int8_t Mem0Offset; + uint8_t Padding_TelemetryMem0; + + uint16_t Mem1MaxCurrent; + int8_t Mem1Offset; + uint8_t Padding_TelemetryMem1; + + + uint8_t AcDcGpio; + uint8_t AcDcPolarity; + uint8_t VR0HotGpio; + uint8_t VR0HotPolarity; + + uint8_t VR1HotGpio; + uint8_t VR1HotPolarity; + uint8_t Padding1; + uint8_t Padding2; + + + + uint8_t LedPin0; + uint8_t LedPin1; + uint8_t LedPin2; + uint8_t padding8_4; + + + uint8_t PllGfxclkSpreadEnabled; + uint8_t PllGfxclkSpreadPercent; + uint16_t PllGfxclkSpreadFreq; + + uint8_t UclkSpreadEnabled; + uint8_t UclkSpreadPercent; + uint16_t UclkSpreadFreq; + + uint8_t FclkSpreadEnabled; + uint8_t FclkSpreadPercent; + uint16_t FclkSpreadFreq; + + uint8_t FllGfxclkSpreadEnabled; + uint8_t FllGfxclkSpreadPercent; + uint16_t FllGfxclkSpreadFreq; + + I2cControllerConfig_t I2cControllers[I2C_CONTROLLER_NAME_COUNT]; + + uint32_t BoardReserved[10]; + + + uint32_t MmHubPadding[8]; + +} PPTable_t; + +typedef struct { + + uint16_t GfxclkAverageLpfTau; + uint16_t SocclkAverageLpfTau; + uint16_t UclkAverageLpfTau; + uint16_t GfxActivityLpfTau; + uint16_t UclkActivityLpfTau; + uint16_t SocketPowerLpfTau; + + + uint32_t MmHubPadding[8]; +} DriverSmuConfig_t; + +typedef struct { + + uint16_t GfxclkFmin; + uint16_t GfxclkFmax; + uint16_t GfxclkFreq1; + uint16_t GfxclkVolt1; + uint16_t GfxclkFreq2; + uint16_t GfxclkVolt2; + uint16_t GfxclkFreq3; + uint16_t GfxclkVolt3; + uint16_t UclkFmax; + int16_t OverDrivePct; + uint16_t FanMaximumRpm; + uint16_t FanMinimumPwm; + uint16_t FanTargetTemperature; + uint16_t MaxOpTemp; + uint16_t FanZeroRpmEnable; + uint16_t Padding; + +} OverDriveTable_t; + +typedef struct { + uint16_t CurrClock[PPCLK_COUNT]; + uint16_t AverageGfxclkFrequency; + uint16_t AverageSocclkFrequency; + uint16_t AverageUclkFrequency ; + uint16_t AverageGfxActivity ; + uint16_t AverageUclkActivity ; + uint8_t CurrSocVoltageOffset ; + uint8_t CurrGfxVoltageOffset ; + uint8_t CurrMemVidOffset ; + uint8_t Padding8 ; + uint16_t CurrSocketPower ; + uint16_t TemperatureEdge ; + uint16_t TemperatureHotspot ; + uint16_t TemperatureHBM ; + uint16_t TemperatureVrGfx ; + uint16_t TemperatureVrSoc ; + uint16_t TemperatureVrMem0 ; + uint16_t TemperatureVrMem1 ; + uint16_t TemperatureLiquid ; + uint16_t TemperaturePlx ; + uint32_t ThrottlerStatus ; + + uint8_t LinkDpmLevel; + uint16_t AverageSocketPower; + uint8_t Padding; + + + uint32_t MmHubPadding[7]; +} SmuMetrics_t; + +typedef struct { + uint16_t MinClock; + uint16_t MaxClock; + uint16_t MinUclk; + uint16_t MaxUclk; + + uint8_t WmSetting; + uint8_t Padding[3]; +} WatermarkRowGeneric_t; + +#define NUM_WM_RANGES 4 + +typedef enum { + WM_SOCCLK = 0, + WM_DCEFCLK, + WM_COUNT_PP, +} WM_CLOCK_e; + +typedef struct { + + WatermarkRowGeneric_t WatermarkRow[WM_COUNT_PP][NUM_WM_RANGES]; + + uint32_t MmHubPadding[7]; +} Watermarks_t; + +typedef struct { + uint16_t avgPsmCount[45]; + uint16_t minPsmCount[45]; + float avgPsmVoltage[45]; + float minPsmVoltage[45]; + + uint16_t avgScsPsmCount; + uint16_t minScsPsmCount; + float avgScsPsmVoltage; + float minScsPsmVoltage; + + + uint32_t MmHubPadding[6]; +} AvfsDebugTable_t; + +typedef struct { + uint8_t AvfsVersion; + uint8_t AvfsEn[AVFS_VOLTAGE_COUNT]; + + uint8_t OverrideVFT[AVFS_VOLTAGE_COUNT]; + uint8_t OverrideAvfsGb[AVFS_VOLTAGE_COUNT]; + + uint8_t OverrideTemperatures[AVFS_VOLTAGE_COUNT]; + uint8_t OverrideVInversion[AVFS_VOLTAGE_COUNT]; + uint8_t OverrideP2V[AVFS_VOLTAGE_COUNT]; + uint8_t OverrideP2VCharzFreq[AVFS_VOLTAGE_COUNT]; + + int32_t VFT0_m1[AVFS_VOLTAGE_COUNT]; + int32_t VFT0_m2[AVFS_VOLTAGE_COUNT]; + int32_t VFT0_b[AVFS_VOLTAGE_COUNT]; + + int32_t VFT1_m1[AVFS_VOLTAGE_COUNT]; + int32_t VFT1_m2[AVFS_VOLTAGE_COUNT]; + int32_t VFT1_b[AVFS_VOLTAGE_COUNT]; + + int32_t VFT2_m1[AVFS_VOLTAGE_COUNT]; + int32_t VFT2_m2[AVFS_VOLTAGE_COUNT]; + int32_t VFT2_b[AVFS_VOLTAGE_COUNT]; + + int32_t AvfsGb0_m1[AVFS_VOLTAGE_COUNT]; + int32_t AvfsGb0_m2[AVFS_VOLTAGE_COUNT]; + int32_t AvfsGb0_b[AVFS_VOLTAGE_COUNT]; + + int32_t AcBtcGb_m1[AVFS_VOLTAGE_COUNT]; + int32_t AcBtcGb_m2[AVFS_VOLTAGE_COUNT]; + int32_t AcBtcGb_b[AVFS_VOLTAGE_COUNT]; + + uint32_t AvfsTempCold[AVFS_VOLTAGE_COUNT]; + uint32_t AvfsTempMid[AVFS_VOLTAGE_COUNT]; + uint32_t AvfsTempHot[AVFS_VOLTAGE_COUNT]; + + uint32_t VInversion[AVFS_VOLTAGE_COUNT]; + + + int32_t P2V_m1[AVFS_VOLTAGE_COUNT]; + int32_t P2V_m2[AVFS_VOLTAGE_COUNT]; + int32_t P2V_b[AVFS_VOLTAGE_COUNT]; + + uint32_t P2VCharzFreq[AVFS_VOLTAGE_COUNT]; + + uint32_t EnabledAvfsModules; + + uint32_t MmHubPadding[7]; +} AvfsFuseOverride_t; + +typedef struct { + + uint8_t Gfx_ActiveHystLimit; + uint8_t Gfx_IdleHystLimit; + uint8_t Gfx_FPS; + uint8_t Gfx_MinActiveFreqType; + uint8_t Gfx_BoosterFreqType; + uint8_t Gfx_UseRlcBusy; + uint16_t Gfx_MinActiveFreq; + uint16_t Gfx_BoosterFreq; + uint16_t Gfx_PD_Data_time_constant; + uint32_t Gfx_PD_Data_limit_a; + uint32_t Gfx_PD_Data_limit_b; + uint32_t Gfx_PD_Data_limit_c; + uint32_t Gfx_PD_Data_error_coeff; + uint32_t Gfx_PD_Data_error_rate_coeff; + + uint8_t Soc_ActiveHystLimit; + uint8_t Soc_IdleHystLimit; + uint8_t Soc_FPS; + uint8_t Soc_MinActiveFreqType; + uint8_t Soc_BoosterFreqType; + uint8_t Soc_UseRlcBusy; + uint16_t Soc_MinActiveFreq; + uint16_t Soc_BoosterFreq; + uint16_t Soc_PD_Data_time_constant; + uint32_t Soc_PD_Data_limit_a; + uint32_t Soc_PD_Data_limit_b; + uint32_t Soc_PD_Data_limit_c; + uint32_t Soc_PD_Data_error_coeff; + uint32_t Soc_PD_Data_error_rate_coeff; + + uint8_t Mem_ActiveHystLimit; + uint8_t Mem_IdleHystLimit; + uint8_t Mem_FPS; + uint8_t Mem_MinActiveFreqType; + uint8_t Mem_BoosterFreqType; + uint8_t Mem_UseRlcBusy; + uint16_t Mem_MinActiveFreq; + uint16_t Mem_BoosterFreq; + uint16_t Mem_PD_Data_time_constant; + uint32_t Mem_PD_Data_limit_a; + uint32_t Mem_PD_Data_limit_b; + uint32_t Mem_PD_Data_limit_c; + uint32_t Mem_PD_Data_error_coeff; + uint32_t Mem_PD_Data_error_rate_coeff; + + uint8_t Fclk_ActiveHystLimit; + uint8_t Fclk_IdleHystLimit; + uint8_t Fclk_FPS; + uint8_t Fclk_MinActiveFreqType; + uint8_t Fclk_BoosterFreqType; + uint8_t Fclk_UseRlcBusy; + uint16_t Fclk_MinActiveFreq; + uint16_t Fclk_BoosterFreq; + uint16_t Fclk_PD_Data_time_constant; + uint32_t Fclk_PD_Data_limit_a; + uint32_t Fclk_PD_Data_limit_b; + uint32_t Fclk_PD_Data_limit_c; + uint32_t Fclk_PD_Data_error_coeff; + uint32_t Fclk_PD_Data_error_rate_coeff; + +} DpmActivityMonitorCoeffInt_t; + +#define TABLE_PPTABLE 0 +#define TABLE_WATERMARKS 1 +#define TABLE_AVFS 2 +#define TABLE_AVFS_PSM_DEBUG 3 +#define TABLE_AVFS_FUSE_OVERRIDE 4 +#define TABLE_PMSTATUSLOG 5 +#define TABLE_SMU_METRICS 6 +#define TABLE_DRIVER_SMU_CONFIG 7 +#define TABLE_ACTIVITY_MONITOR_COEFF 8 +#define TABLE_OVERDRIVE 9 +#define TABLE_COUNT 10 + + +#define UCLK_SWITCH_SLOW 0 +#define UCLK_SWITCH_FAST 1 + + +#define SQ_Enable_MASK 0x1 +#define SQ_IR_MASK 0x2 +#define SQ_PCC_MASK 0x4 +#define SQ_EDC_MASK 0x8 + +#define TCP_Enable_MASK 0x100 +#define TCP_IR_MASK 0x200 +#define TCP_PCC_MASK 0x400 +#define TCP_EDC_MASK 0x800 + +#define TD_Enable_MASK 0x10000 +#define TD_IR_MASK 0x20000 +#define TD_PCC_MASK 0x40000 +#define TD_EDC_MASK 0x80000 + +#define DB_Enable_MASK 0x1000000 +#define DB_IR_MASK 0x2000000 +#define DB_PCC_MASK 0x4000000 +#define DB_EDC_MASK 0x8000000 + +#define SQ_Enable_SHIFT 0 +#define SQ_IR_SHIFT 1 +#define SQ_PCC_SHIFT 2 +#define SQ_EDC_SHIFT 3 + +#define TCP_Enable_SHIFT 8 +#define TCP_IR_SHIFT 9 +#define TCP_PCC_SHIFT 10 +#define TCP_EDC_SHIFT 11 + +#define TD_Enable_SHIFT 16 +#define TD_IR_SHIFT 17 +#define TD_PCC_SHIFT 18 +#define TD_EDC_SHIFT 19 + +#define DB_Enable_SHIFT 24 +#define DB_IR_SHIFT 25 +#define DB_PCC_SHIFT 26 +#define DB_EDC_SHIFT 27 + +#define REMOVE_FMAX_MARGIN_BIT 0x0 +#define REMOVE_DCTOL_MARGIN_BIT 0x1 +#define REMOVE_PLATFORM_MARGIN_BIT 0x2 + +#endif diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/smu7.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu7.h new file mode 100644 index 000000000000..e14072d45918 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu7.h @@ -0,0 +1,189 @@ +/* + * Copyright 2013 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef SMU7_H +#define SMU7_H + +#pragma pack(push, 1) + +#define SMU7_CONTEXT_ID_SMC 1 +#define SMU7_CONTEXT_ID_VBIOS 2 + + +#define SMU7_CONTEXT_ID_SMC 1 +#define SMU7_CONTEXT_ID_VBIOS 2 + +#define SMU7_MAX_LEVELS_VDDC 8 +#define SMU7_MAX_LEVELS_VDDCI 4 +#define SMU7_MAX_LEVELS_MVDD 4 +#define SMU7_MAX_LEVELS_VDDNB 8 + +#define SMU7_MAX_LEVELS_GRAPHICS SMU__NUM_SCLK_DPM_STATE // SCLK + SQ DPM + ULV +#define SMU7_MAX_LEVELS_MEMORY SMU__NUM_MCLK_DPM_LEVELS // MCLK Levels DPM +#define SMU7_MAX_LEVELS_GIO SMU__NUM_LCLK_DPM_LEVELS // LCLK Levels +#define SMU7_MAX_LEVELS_LINK SMU__NUM_PCIE_DPM_LEVELS // PCIe speed and number of lanes. +#define SMU7_MAX_LEVELS_UVD 8 // VCLK/DCLK levels for UVD. +#define SMU7_MAX_LEVELS_VCE 8 // ECLK levels for VCE. +#define SMU7_MAX_LEVELS_ACP 8 // ACLK levels for ACP. +#define SMU7_MAX_LEVELS_SAMU 8 // SAMCLK levels for SAMU. +#define SMU7_MAX_ENTRIES_SMIO 32 // Number of entries in SMIO table. + +#define DPM_NO_LIMIT 0 +#define DPM_NO_UP 1 +#define DPM_GO_DOWN 2 +#define DPM_GO_UP 3 + +#define SMU7_FIRST_DPM_GRAPHICS_LEVEL 0 +#define SMU7_FIRST_DPM_MEMORY_LEVEL 0 + +#define GPIO_CLAMP_MODE_VRHOT 1 +#define GPIO_CLAMP_MODE_THERM 2 +#define GPIO_CLAMP_MODE_DC 4 + +#define SCRATCH_B_TARG_PCIE_INDEX_SHIFT 0 +#define SCRATCH_B_TARG_PCIE_INDEX_MASK (0x7< +#include "amd_powerplay.h" +#include "hwmgr.h" + +enum SMU_TABLE { + SMU_UVD_TABLE = 0, + SMU_VCE_TABLE, + SMU_BIF_TABLE, +}; + +enum SMU_TYPE { + SMU_SoftRegisters = 0, + SMU_Discrete_DpmTable, +}; + +enum SMU_MEMBER { + HandshakeDisables = 0, + VoltageChangeTimeout, + AverageGraphicsActivity, + AverageMemoryActivity, + PreVBlankGap, + VBlankTimeout, + UcodeLoadStatus, + UvdBootLevel, + VceBootLevel, + LowSclkInterruptThreshold, + DRAM_LOG_ADDR_H, + DRAM_LOG_ADDR_L, + DRAM_LOG_PHY_ADDR_H, + DRAM_LOG_PHY_ADDR_L, + DRAM_LOG_BUFF_SIZE, +}; + + +enum SMU_MAC_DEFINITION { + SMU_MAX_LEVELS_GRAPHICS = 0, + SMU_MAX_LEVELS_MEMORY, + SMU_MAX_LEVELS_LINK, + SMU_MAX_ENTRIES_SMIO, + SMU_MAX_LEVELS_VDDC, + SMU_MAX_LEVELS_VDDGFX, + SMU_MAX_LEVELS_VDDCI, + SMU_MAX_LEVELS_MVDD, + SMU_UVD_MCLK_HANDSHAKE_DISABLE, +}; + +enum SMU9_TABLE_ID { + PPTABLE = 0, + WMTABLE, + AVFSTABLE, + TOOLSTABLE, + AVFSFUSETABLE +}; + +enum SMU10_TABLE_ID { + SMU10_WMTABLE = 0, + SMU10_CLOCKTABLE, +}; + +extern int smum_download_powerplay_table(struct pp_hwmgr *hwmgr, void **table); + +extern int smum_upload_powerplay_table(struct pp_hwmgr *hwmgr); + +extern int smum_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg, uint32_t *resp); + +extern int smum_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, + uint16_t msg, uint32_t parameter, + uint32_t *resp); + +extern int smum_update_sclk_threshold(struct pp_hwmgr *hwmgr); + +extern int smum_update_smc_table(struct pp_hwmgr *hwmgr, uint32_t type); +extern int smum_process_firmware_header(struct pp_hwmgr *hwmgr); +extern int smum_thermal_avfs_enable(struct pp_hwmgr *hwmgr); +extern int smum_thermal_setup_fan_table(struct pp_hwmgr *hwmgr); +extern int smum_init_smc_table(struct pp_hwmgr *hwmgr); +extern int smum_populate_all_graphic_levels(struct pp_hwmgr *hwmgr); +extern int smum_populate_all_memory_levels(struct pp_hwmgr *hwmgr); +extern int smum_initialize_mc_reg_table(struct pp_hwmgr *hwmgr); +extern uint32_t smum_get_offsetof(struct pp_hwmgr *hwmgr, + uint32_t type, uint32_t member); +extern uint32_t smum_get_mac_definition(struct pp_hwmgr *hwmgr, uint32_t value); + +extern bool smum_is_dpm_running(struct pp_hwmgr *hwmgr); + +extern bool smum_is_hw_avfs_present(struct pp_hwmgr *hwmgr); + +extern int smum_update_dpm_settings(struct pp_hwmgr *hwmgr, void *profile_setting); + +extern int smum_smc_table_manager(struct pp_hwmgr *hwmgr, uint8_t *table, uint16_t table_id, bool rw); + +extern int smum_stop_smc(struct pp_hwmgr *hwmgr); + +#endif diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/tonga_ppsmc.h b/drivers/gpu/drm/amd/pm/powerplay/inc/tonga_ppsmc.h new file mode 100644 index 000000000000..63631296d751 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/tonga_ppsmc.h @@ -0,0 +1,420 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef TONGA_PP_SMC_H +#define TONGA_PP_SMC_H + +#pragma pack(push, 1) + +#define PPSMC_SWSTATE_FLAG_DC 0x01 +#define PPSMC_SWSTATE_FLAG_UVD 0x02 +#define PPSMC_SWSTATE_FLAG_VCE 0x04 +#define PPSMC_SWSTATE_FLAG_PCIE_X1 0x08 + +#define PPSMC_THERMAL_PROTECT_TYPE_INTERNAL 0x00 +#define PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL 0x01 +#define PPSMC_THERMAL_PROTECT_TYPE_NONE 0xff + +#define PPSMC_SYSTEMFLAG_GPIO_DC 0x01 +#define PPSMC_SYSTEMFLAG_STEPVDDC 0x02 +#define PPSMC_SYSTEMFLAG_GDDR5 0x04 + +#define PPSMC_SYSTEMFLAG_DISABLE_BABYSTEP 0x08 + +#define PPSMC_SYSTEMFLAG_REGULATOR_HOT 0x10 +#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_ANALOG 0x20 +#define PPSMC_SYSTEMFLAG_12CHANNEL 0x40 + + +#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_MASK 0x07 +#define PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK 0x08 + +#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTODPMLOWSTATE 0x00 +#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE 0x01 + +#define PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH 0x10 +#define PPSMC_EXTRAFLAGS_DRIVER_TO_GPIO17 0x20 +#define PPSMC_EXTRAFLAGS_PCC_TO_GPIO17 0x40 + +/* Defines for DPM 2.0 */ +#define PPSMC_DPM2FLAGS_TDPCLMP 0x01 +#define PPSMC_DPM2FLAGS_PWRSHFT 0x02 +#define PPSMC_DPM2FLAGS_OCP 0x04 + +/* Defines for display watermark level */ + +#define PPSMC_DISPLAY_WATERMARK_LOW 0 +#define PPSMC_DISPLAY_WATERMARK_HIGH 1 + +/* In the HW performance level's state flags:*/ +#define PPSMC_STATEFLAG_AUTO_PULSE_SKIP 0x01 +#define PPSMC_STATEFLAG_POWERBOOST 0x02 +#define PPSMC_STATEFLAG_PSKIP_ON_TDP_FAULT 0x04 +#define PPSMC_STATEFLAG_POWERSHIFT 0x08 +#define PPSMC_STATEFLAG_SLOW_READ_MARGIN 0x10 +#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 0x20 +#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS 0x40 + +/* Fan control algorithm:*/ +#define FDO_MODE_HARDWARE 0 +#define FDO_MODE_PIECE_WISE_LINEAR 1 + +enum FAN_CONTROL { + FAN_CONTROL_FUZZY, + FAN_CONTROL_TABLE +}; + +/* Return codes for driver to SMC communication.*/ + +#define PPSMC_Result_OK ((uint16_t)0x01) +#define PPSMC_Result_NoMore ((uint16_t)0x02) +#define PPSMC_Result_NotNow ((uint16_t)0x03) + +#define PPSMC_Result_Failed ((uint16_t)0xFF) +#define PPSMC_Result_UnknownCmd ((uint16_t)0xFE) +#define PPSMC_Result_UnknownVT ((uint16_t)0xFD) + +typedef uint16_t PPSMC_Result; + +#define PPSMC_isERROR(x) ((uint16_t)0x80 & (x)) + + +#define PPSMC_MSG_Halt ((uint16_t)0x10) +#define PPSMC_MSG_Resume ((uint16_t)0x11) +#define PPSMC_MSG_EnableDPMLevel ((uint16_t)0x12) +#define PPSMC_MSG_ZeroLevelsDisabled ((uint16_t)0x13) +#define PPSMC_MSG_OneLevelsDisabled ((uint16_t)0x14) +#define PPSMC_MSG_TwoLevelsDisabled ((uint16_t)0x15) +#define PPSMC_MSG_EnableThermalInterrupt ((uint16_t)0x16) +#define PPSMC_MSG_RunningOnAC ((uint16_t)0x17) +#define PPSMC_MSG_LevelUp ((uint16_t)0x18) +#define PPSMC_MSG_LevelDown ((uint16_t)0x19) +#define PPSMC_MSG_ResetDPMCounters ((uint16_t)0x1a) +#define PPSMC_MSG_SwitchToSwState ((uint16_t)0x20) + +#define PPSMC_MSG_SwitchToSwStateLast ((uint16_t)0x3f) +#define PPSMC_MSG_SwitchToInitialState ((uint16_t)0x40) +#define PPSMC_MSG_NoForcedLevel ((uint16_t)0x41) +#define PPSMC_MSG_ForceHigh ((uint16_t)0x42) +#define PPSMC_MSG_ForceMediumOrHigh ((uint16_t)0x43) + +#define PPSMC_MSG_SwitchToMinimumPower ((uint16_t)0x51) +#define PPSMC_MSG_ResumeFromMinimumPower ((uint16_t)0x52) +#define PPSMC_MSG_EnableCac ((uint16_t)0x53) +#define PPSMC_MSG_DisableCac ((uint16_t)0x54) +#define PPSMC_DPMStateHistoryStart ((uint16_t)0x55) +#define PPSMC_DPMStateHistoryStop ((uint16_t)0x56) +#define PPSMC_CACHistoryStart ((uint16_t)0x57) +#define PPSMC_CACHistoryStop ((uint16_t)0x58) +#define PPSMC_TDPClampingActive ((uint16_t)0x59) +#define PPSMC_TDPClampingInactive ((uint16_t)0x5A) +#define PPSMC_StartFanControl ((uint16_t)0x5B) +#define PPSMC_StopFanControl ((uint16_t)0x5C) +#define PPSMC_NoDisplay ((uint16_t)0x5D) +#define PPSMC_HasDisplay ((uint16_t)0x5E) +#define PPSMC_MSG_UVDPowerOFF ((uint16_t)0x60) +#define PPSMC_MSG_UVDPowerON ((uint16_t)0x61) +#define PPSMC_MSG_EnableULV ((uint16_t)0x62) +#define PPSMC_MSG_DisableULV ((uint16_t)0x63) +#define PPSMC_MSG_EnterULV ((uint16_t)0x64) +#define PPSMC_MSG_ExitULV ((uint16_t)0x65) +#define PPSMC_PowerShiftActive ((uint16_t)0x6A) +#define PPSMC_PowerShiftInactive ((uint16_t)0x6B) +#define PPSMC_OCPActive ((uint16_t)0x6C) +#define PPSMC_OCPInactive ((uint16_t)0x6D) +#define PPSMC_CACLongTermAvgEnable ((uint16_t)0x6E) +#define PPSMC_CACLongTermAvgDisable ((uint16_t)0x6F) +#define PPSMC_MSG_InferredStateSweep_Start ((uint16_t)0x70) +#define PPSMC_MSG_InferredStateSweep_Stop ((uint16_t)0x71) +#define PPSMC_MSG_SwitchToLowestInfState ((uint16_t)0x72) +#define PPSMC_MSG_SwitchToNonInfState ((uint16_t)0x73) +#define PPSMC_MSG_AllStateSweep_Start ((uint16_t)0x74) +#define PPSMC_MSG_AllStateSweep_Stop ((uint16_t)0x75) +#define PPSMC_MSG_SwitchNextLowerInfState ((uint16_t)0x76) +#define PPSMC_MSG_SwitchNextHigherInfState ((uint16_t)0x77) +#define PPSMC_MSG_MclkRetrainingTest ((uint16_t)0x78) +#define PPSMC_MSG_ForceTDPClamping ((uint16_t)0x79) +#define PPSMC_MSG_CollectCAC_PowerCorreln ((uint16_t)0x7A) +#define PPSMC_MSG_CollectCAC_WeightCalib ((uint16_t)0x7B) +#define PPSMC_MSG_CollectCAC_SQonly ((uint16_t)0x7C) +#define PPSMC_MSG_CollectCAC_TemperaturePwr ((uint16_t)0x7D) + +#define PPSMC_MSG_ExtremitiesTest_Start ((uint16_t)0x7E) +#define PPSMC_MSG_ExtremitiesTest_Stop ((uint16_t)0x7F) +#define PPSMC_FlushDataCache ((uint16_t)0x80) +#define PPSMC_FlushInstrCache ((uint16_t)0x81) + +#define PPSMC_MSG_SetEnabledLevels ((uint16_t)0x82) +#define PPSMC_MSG_SetForcedLevels ((uint16_t)0x83) + +#define PPSMC_MSG_ResetToDefaults ((uint16_t)0x84) + +#define PPSMC_MSG_SetForcedLevelsAndJump ((uint16_t)0x85) +#define PPSMC_MSG_SetCACHistoryMode ((uint16_t)0x86) +#define PPSMC_MSG_EnableDTE ((uint16_t)0x87) +#define PPSMC_MSG_DisableDTE ((uint16_t)0x88) + +#define PPSMC_MSG_SmcSpaceSetAddress ((uint16_t)0x89) +#define PPSMC_MSG_ChangeNearTDPLimit ((uint16_t)0x90) +#define PPSMC_MSG_ChangeSafePowerLimit ((uint16_t)0x91) + +#define PPSMC_MSG_DPMStateSweepStart ((uint16_t)0x92) +#define PPSMC_MSG_DPMStateSweepStop ((uint16_t)0x93) + +#define PPSMC_MSG_OVRDDisableSCLKDS ((uint16_t)0x94) +#define PPSMC_MSG_CancelDisableOVRDSCLKDS ((uint16_t)0x95) +#define PPSMC_MSG_ThrottleOVRDSCLKDS ((uint16_t)0x96) +#define PPSMC_MSG_CancelThrottleOVRDSCLKDS ((uint16_t)0x97) +#define PPSMC_MSG_GPIO17 ((uint16_t)0x98) + +#define PPSMC_MSG_API_SetSvi2Volt_Vddc ((uint16_t)0x99) +#define PPSMC_MSG_API_SetSvi2Volt_Vddci ((uint16_t)0x9A) +#define PPSMC_MSG_API_SetSvi2Volt_Mvdd ((uint16_t)0x9B) +#define PPSMC_MSG_API_GetSvi2Volt_Vddc ((uint16_t)0x9C) +#define PPSMC_MSG_API_GetSvi2Volt_Vddci ((uint16_t)0x9D) +#define PPSMC_MSG_API_GetSvi2Volt_Mvdd ((uint16_t)0x9E) + +#define PPSMC_MSG_BREAK ((uint16_t)0xF8) + +/* Trinity Specific Messages*/ +#define PPSMC_MSG_Test ((uint16_t) 0x100) +#define PPSMC_MSG_DPM_Voltage_Pwrmgt ((uint16_t) 0x101) +#define PPSMC_MSG_DPM_Config ((uint16_t) 0x102) +#define PPSMC_MSG_PM_Controller_Start ((uint16_t) 0x103) +#define PPSMC_MSG_DPM_ForceState ((uint16_t) 0x104) +#define PPSMC_MSG_PG_PowerDownSIMD ((uint16_t) 0x105) +#define PPSMC_MSG_PG_PowerUpSIMD ((uint16_t) 0x106) +#define PPSMC_MSG_PM_Controller_Stop ((uint16_t) 0x107) +#define PPSMC_MSG_PG_SIMD_Config ((uint16_t) 0x108) +#define PPSMC_MSG_Voltage_Cntl_Enable ((uint16_t) 0x109) +#define PPSMC_MSG_Thermal_Cntl_Enable ((uint16_t) 0x10a) +#define PPSMC_MSG_Reset_Service ((uint16_t) 0x10b) +#define PPSMC_MSG_VCEPowerOFF ((uint16_t) 0x10e) +#define PPSMC_MSG_VCEPowerON ((uint16_t) 0x10f) +#define PPSMC_MSG_DPM_Disable_VCE_HS ((uint16_t) 0x110) +#define PPSMC_MSG_DPM_Enable_VCE_HS ((uint16_t) 0x111) +#define PPSMC_MSG_DPM_N_LevelsDisabled ((uint16_t) 0x112) +#define PPSMC_MSG_DCEPowerOFF ((uint16_t) 0x113) +#define PPSMC_MSG_DCEPowerON ((uint16_t) 0x114) +#define PPSMC_MSG_PCIE_DDIPowerDown ((uint16_t) 0x117) +#define PPSMC_MSG_PCIE_DDIPowerUp ((uint16_t) 0x118) +#define PPSMC_MSG_PCIE_CascadePLLPowerDown ((uint16_t) 0x119) +#define PPSMC_MSG_PCIE_CascadePLLPowerUp ((uint16_t) 0x11a) +#define PPSMC_MSG_SYSPLLPowerOff ((uint16_t) 0x11b) +#define PPSMC_MSG_SYSPLLPowerOn ((uint16_t) 0x11c) +#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint16_t) 0x11d) +#define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint16_t) 0x11e) +#define PPSMC_MSG_DISPLAYPHYStatusNotify ((uint16_t) 0x11f) +#define PPSMC_MSG_EnableBAPM ((uint16_t) 0x120) +#define PPSMC_MSG_DisableBAPM ((uint16_t) 0x121) +#define PPSMC_MSG_PCIE_PHYPowerDown ((uint16_t) 0x122) +#define PPSMC_MSG_PCIE_PHYPowerUp ((uint16_t) 0x123) +#define PPSMC_MSG_UVD_DPM_Config ((uint16_t) 0x124) +#define PPSMC_MSG_Spmi_Enable ((uint16_t) 0x122) +#define PPSMC_MSG_Spmi_Timer ((uint16_t) 0x123) +#define PPSMC_MSG_LCLK_DPM_Config ((uint16_t) 0x124) +#define PPSMC_MSG_NBDPM_Config ((uint16_t) 0x125) +#define PPSMC_MSG_PCIE_DDIPhyPowerDown ((uint16_t) 0x126) +#define PPSMC_MSG_PCIE_DDIPhyPowerUp ((uint16_t) 0x127) +#define PPSMC_MSG_MCLKDPM_Config ((uint16_t) 0x128) + +#define PPSMC_MSG_UVDDPM_Config ((uint16_t) 0x129) +#define PPSMC_MSG_VCEDPM_Config ((uint16_t) 0x12A) +#define PPSMC_MSG_ACPDPM_Config ((uint16_t) 0x12B) +#define PPSMC_MSG_SAMUDPM_Config ((uint16_t) 0x12C) +#define PPSMC_MSG_UVDDPM_SetEnabledMask ((uint16_t) 0x12D) +#define PPSMC_MSG_VCEDPM_SetEnabledMask ((uint16_t) 0x12E) +#define PPSMC_MSG_ACPDPM_SetEnabledMask ((uint16_t) 0x12F) +#define PPSMC_MSG_SAMUDPM_SetEnabledMask ((uint16_t) 0x130) +#define PPSMC_MSG_MCLKDPM_ForceState ((uint16_t) 0x131) +#define PPSMC_MSG_MCLKDPM_NoForcedLevel ((uint16_t) 0x132) +#define PPSMC_MSG_Thermal_Cntl_Disable ((uint16_t) 0x133) +#define PPSMC_MSG_SetTDPLimit ((uint16_t) 0x134) +#define PPSMC_MSG_Voltage_Cntl_Disable ((uint16_t) 0x135) +#define PPSMC_MSG_PCIeDPM_Enable ((uint16_t) 0x136) +#define PPSMC_MSG_ACPPowerOFF ((uint16_t) 0x137) +#define PPSMC_MSG_ACPPowerON ((uint16_t) 0x138) +#define PPSMC_MSG_SAMPowerOFF ((uint16_t) 0x139) +#define PPSMC_MSG_SAMPowerON ((uint16_t) 0x13a) +#define PPSMC_MSG_SDMAPowerOFF ((uint16_t) 0x13b) +#define PPSMC_MSG_SDMAPowerON ((uint16_t) 0x13c) +#define PPSMC_MSG_PCIeDPM_Disable ((uint16_t) 0x13d) +#define PPSMC_MSG_IOMMUPowerOFF ((uint16_t) 0x13e) +#define PPSMC_MSG_IOMMUPowerON ((uint16_t) 0x13f) +#define PPSMC_MSG_NBDPM_Enable ((uint16_t) 0x140) +#define PPSMC_MSG_NBDPM_Disable ((uint16_t) 0x141) +#define PPSMC_MSG_NBDPM_ForceNominal ((uint16_t) 0x142) +#define PPSMC_MSG_NBDPM_ForcePerformance ((uint16_t) 0x143) +#define PPSMC_MSG_NBDPM_UnForce ((uint16_t) 0x144) +#define PPSMC_MSG_SCLKDPM_SetEnabledMask ((uint16_t) 0x145) +#define PPSMC_MSG_MCLKDPM_SetEnabledMask ((uint16_t) 0x146) +#define PPSMC_MSG_PCIeDPM_ForceLevel ((uint16_t) 0x147) +#define PPSMC_MSG_PCIeDPM_UnForceLevel ((uint16_t) 0x148) +#define PPSMC_MSG_EnableACDCGPIOInterrupt ((uint16_t) 0x149) +#define PPSMC_MSG_EnableVRHotGPIOInterrupt ((uint16_t) 0x14a) +#define PPSMC_MSG_SwitchToAC ((uint16_t) 0x14b) + +#define PPSMC_MSG_XDMAPowerOFF ((uint16_t) 0x14c) +#define PPSMC_MSG_XDMAPowerON ((uint16_t) 0x14d) + +#define PPSMC_MSG_DPM_Enable ((uint16_t)0x14e) +#define PPSMC_MSG_DPM_Disable ((uint16_t)0x14f) +#define PPSMC_MSG_MCLKDPM_Enable ((uint16_t)0x150) +#define PPSMC_MSG_MCLKDPM_Disable ((uint16_t)0x151) +#define PPSMC_MSG_LCLKDPM_Enable ((uint16_t)0x152) +#define PPSMC_MSG_LCLKDPM_Disable ((uint16_t)0x153) +#define PPSMC_MSG_UVDDPM_Enable ((uint16_t)0x154) +#define PPSMC_MSG_UVDDPM_Disable ((uint16_t)0x155) +#define PPSMC_MSG_SAMUDPM_Enable ((uint16_t)0x156) +#define PPSMC_MSG_SAMUDPM_Disable ((uint16_t)0x157) +#define PPSMC_MSG_ACPDPM_Enable ((uint16_t)0x158) +#define PPSMC_MSG_ACPDPM_Disable ((uint16_t)0x159) +#define PPSMC_MSG_VCEDPM_Enable ((uint16_t)0x15a) +#define PPSMC_MSG_VCEDPM_Disable ((uint16_t)0x15b) +#define PPSMC_MSG_LCLKDPM_SetEnabledMask ((uint16_t)0x15c) + +#define PPSMC_MSG_DPM_FPS_Mode ((uint16_t) 0x15d) +#define PPSMC_MSG_DPM_Activity_Mode ((uint16_t) 0x15e) +#define PPSMC_MSG_VddC_Request ((uint16_t) 0x15f) +#define PPSMC_MSG_MCLKDPM_GetEnabledMask ((uint16_t) 0x160) +#define PPSMC_MSG_LCLKDPM_GetEnabledMask ((uint16_t) 0x161) +#define PPSMC_MSG_SCLKDPM_GetEnabledMask ((uint16_t) 0x162) +#define PPSMC_MSG_UVDDPM_GetEnabledMask ((uint16_t) 0x163) +#define PPSMC_MSG_SAMUDPM_GetEnabledMask ((uint16_t) 0x164) +#define PPSMC_MSG_ACPDPM_GetEnabledMask ((uint16_t) 0x165) +#define PPSMC_MSG_VCEDPM_GetEnabledMask ((uint16_t) 0x166) +#define PPSMC_MSG_PCIeDPM_SetEnabledMask ((uint16_t) 0x167) +#define PPSMC_MSG_PCIeDPM_GetEnabledMask ((uint16_t) 0x168) +#define PPSMC_MSG_TDCLimitEnable ((uint16_t) 0x169) +#define PPSMC_MSG_TDCLimitDisable ((uint16_t) 0x16a) +#define PPSMC_MSG_DPM_AutoRotate_Mode ((uint16_t) 0x16b) +#define PPSMC_MSG_DISPCLK_FROM_FCH ((uint16_t)0x16c) +#define PPSMC_MSG_DISPCLK_FROM_DFS ((uint16_t)0x16d) +#define PPSMC_MSG_DPREFCLK_FROM_FCH ((uint16_t)0x16e) +#define PPSMC_MSG_DPREFCLK_FROM_DFS ((uint16_t)0x16f) +#define PPSMC_MSG_PmStatusLogStart ((uint16_t)0x170) +#define PPSMC_MSG_PmStatusLogSample ((uint16_t)0x171) +#define PPSMC_MSG_SCLK_AutoDPM_ON ((uint16_t) 0x172) +#define PPSMC_MSG_MCLK_AutoDPM_ON ((uint16_t) 0x173) +#define PPSMC_MSG_LCLK_AutoDPM_ON ((uint16_t) 0x174) +#define PPSMC_MSG_UVD_AutoDPM_ON ((uint16_t) 0x175) +#define PPSMC_MSG_SAMU_AutoDPM_ON ((uint16_t) 0x176) +#define PPSMC_MSG_ACP_AutoDPM_ON ((uint16_t) 0x177) +#define PPSMC_MSG_VCE_AutoDPM_ON ((uint16_t) 0x178) +#define PPSMC_MSG_PCIe_AutoDPM_ON ((uint16_t) 0x179) +#define PPSMC_MSG_MASTER_AutoDPM_ON ((uint16_t) 0x17a) +#define PPSMC_MSG_MASTER_AutoDPM_OFF ((uint16_t) 0x17b) +#define PPSMC_MSG_DYNAMICDISPPHYPOWER ((uint16_t) 0x17c) +#define PPSMC_MSG_CAC_COLLECTION_ON ((uint16_t) 0x17d) +#define PPSMC_MSG_CAC_COLLECTION_OFF ((uint16_t) 0x17e) +#define PPSMC_MSG_CAC_CORRELATION_ON ((uint16_t) 0x17f) +#define PPSMC_MSG_CAC_CORRELATION_OFF ((uint16_t) 0x180) +#define PPSMC_MSG_PM_STATUS_TO_DRAM_ON ((uint16_t) 0x181) +#define PPSMC_MSG_PM_STATUS_TO_DRAM_OFF ((uint16_t) 0x182) +#define PPSMC_MSG_UVD_HANDSHAKE_OFF ((uint16_t) 0x183) +#define PPSMC_MSG_ALLOW_LOWSCLK_INTERRUPT ((uint16_t) 0x184) +#define PPSMC_MSG_PkgPwrLimitEnable ((uint16_t) 0x185) +#define PPSMC_MSG_PkgPwrLimitDisable ((uint16_t) 0x186) +#define PPSMC_MSG_PkgPwrSetLimit ((uint16_t) 0x187) +#define PPSMC_MSG_OverDriveSetTargetTdp ((uint16_t) 0x188) +#define PPSMC_MSG_SCLKDPM_FreezeLevel ((uint16_t) 0x189) +#define PPSMC_MSG_SCLKDPM_UnfreezeLevel ((uint16_t) 0x18A) +#define PPSMC_MSG_MCLKDPM_FreezeLevel ((uint16_t) 0x18B) +#define PPSMC_MSG_MCLKDPM_UnfreezeLevel ((uint16_t) 0x18C) +#define PPSMC_MSG_START_DRAM_LOGGING ((uint16_t) 0x18D) +#define PPSMC_MSG_STOP_DRAM_LOGGING ((uint16_t) 0x18E) +#define PPSMC_MSG_MASTER_DeepSleep_ON ((uint16_t) 0x18F) +#define PPSMC_MSG_MASTER_DeepSleep_OFF ((uint16_t) 0x190) +#define PPSMC_MSG_Remove_DC_Clamp ((uint16_t) 0x191) +#define PPSMC_MSG_DisableACDCGPIOInterrupt ((uint16_t) 0x192) +#define PPSMC_MSG_OverrideVoltageControl_SetVddc ((uint16_t) 0x193) +#define PPSMC_MSG_OverrideVoltageControl_SetVddci ((uint16_t) 0x194) +#define PPSMC_MSG_SetVidOffset_1 ((uint16_t) 0x195) +#define PPSMC_MSG_SetVidOffset_2 ((uint16_t) 0x207) +#define PPSMC_MSG_GetVidOffset_1 ((uint16_t) 0x196) +#define PPSMC_MSG_GetVidOffset_2 ((uint16_t) 0x208) +#define PPSMC_MSG_THERMAL_OVERDRIVE_Enable ((uint16_t) 0x197) +#define PPSMC_MSG_THERMAL_OVERDRIVE_Disable ((uint16_t) 0x198) +#define PPSMC_MSG_SetTjMax ((uint16_t) 0x199) +#define PPSMC_MSG_SetFanPwmMax ((uint16_t) 0x19A) + +#define PPSMC_MSG_WaitForMclkSwitchFinish ((uint16_t) 0x19B) +#define PPSMC_MSG_ENABLE_THERMAL_DPM ((uint16_t) 0x19C) +#define PPSMC_MSG_DISABLE_THERMAL_DPM ((uint16_t) 0x19D) +#define PPSMC_MSG_Enable_PCC ((uint16_t) 0x19E) +#define PPSMC_MSG_Disable_PCC ((uint16_t) 0x19F) + +#define PPSMC_MSG_API_GetSclkFrequency ((uint16_t) 0x200) +#define PPSMC_MSG_API_GetMclkFrequency ((uint16_t) 0x201) +#define PPSMC_MSG_API_GetSclkBusy ((uint16_t) 0x202) +#define PPSMC_MSG_API_GetMclkBusy ((uint16_t) 0x203) +#define PPSMC_MSG_API_GetAsicPower ((uint16_t) 0x204) +#define PPSMC_MSG_SetFanRpmMax ((uint16_t) 0x205) +#define PPSMC_MSG_SetFanSclkTarget ((uint16_t) 0x206) +#define PPSMC_MSG_SetFanMinPwm ((uint16_t) 0x209) +#define PPSMC_MSG_SetFanTemperatureTarget ((uint16_t) 0x20A) + +#define PPSMC_MSG_BACO_StartMonitor ((uint16_t) 0x240) +#define PPSMC_MSG_BACO_Cancel ((uint16_t) 0x241) +#define PPSMC_MSG_EnableVddGfx ((uint16_t) 0x242) +#define PPSMC_MSG_DisableVddGfx ((uint16_t) 0x243) +#define PPSMC_MSG_UcodeAddressLow ((uint16_t) 0x244) +#define PPSMC_MSG_UcodeAddressHigh ((uint16_t) 0x245) +#define PPSMC_MSG_UcodeLoadStatus ((uint16_t) 0x246) + +#define PPSMC_MSG_DRV_DRAM_ADDR_HI ((uint16_t) 0x250) +#define PPSMC_MSG_DRV_DRAM_ADDR_LO ((uint16_t) 0x251) +#define PPSMC_MSG_SMU_DRAM_ADDR_HI ((uint16_t) 0x252) +#define PPSMC_MSG_SMU_DRAM_ADDR_LO ((uint16_t) 0x253) +#define PPSMC_MSG_LoadUcodes ((uint16_t) 0x254) +#define PPSMC_MSG_PowerStateNotify ((uint16_t) 0x255) +#define PPSMC_MSG_COND_EXEC_DRAM_ADDR_HI ((uint16_t) 0x256) +#define PPSMC_MSG_COND_EXEC_DRAM_ADDR_LO ((uint16_t) 0x257) +#define PPSMC_MSG_VBIOS_DRAM_ADDR_HI ((uint16_t) 0x258) +#define PPSMC_MSG_VBIOS_DRAM_ADDR_LO ((uint16_t) 0x259) +#define PPSMC_MSG_LoadVBios ((uint16_t) 0x25A) +#define PPSMC_MSG_GetUcodeVersion ((uint16_t) 0x25B) +#define DMCUSMC_MSG_PSREntry ((uint16_t) 0x25C) +#define DMCUSMC_MSG_PSRExit ((uint16_t) 0x25D) +#define PPSMC_MSG_EnableClockGatingFeature ((uint16_t) 0x260) +#define PPSMC_MSG_DisableClockGatingFeature ((uint16_t) 0x261) +#define PPSMC_MSG_IsDeviceRunning ((uint16_t) 0x262) +#define PPSMC_MSG_LoadMetaData ((uint16_t) 0x263) +#define PPSMC_MSG_TMON_AutoCaliberate_Enable ((uint16_t) 0x264) +#define PPSMC_MSG_TMON_AutoCaliberate_Disable ((uint16_t) 0x265) +#define PPSMC_MSG_GetTelemetry1Slope ((uint16_t) 0x266) +#define PPSMC_MSG_GetTelemetry1Offset ((uint16_t) 0x267) +#define PPSMC_MSG_GetTelemetry2Slope ((uint16_t) 0x268) +#define PPSMC_MSG_GetTelemetry2Offset ((uint16_t) 0x269) + +typedef uint16_t PPSMC_Msg; + +/* If the SMC firmware has an event status soft register this is what the individual bits mean.*/ +#define PPSMC_EVENT_STATUS_THERMAL 0x00000001 +#define PPSMC_EVENT_STATUS_REGULATORHOT 0x00000002 +#define PPSMC_EVENT_STATUS_DC 0x00000004 +#define PPSMC_EVENT_STATUS_GPIO17 0x00000008 + + +#pragma pack(pop) +#endif diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/vega10_ppsmc.h b/drivers/gpu/drm/amd/pm/powerplay/inc/vega10_ppsmc.h new file mode 100644 index 000000000000..715b5a168831 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/vega10_ppsmc.h @@ -0,0 +1,144 @@ +/* + * Copyright 2016 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef PP_SMC_H +#define PP_SMC_H + +#pragma pack(push, 1) + +#define SMU_UCODE_VERSION 0x001c0800 + +/* SMU Response Codes: */ +#define PPSMC_Result_OK 0x1 +#define PPSMC_Result_Failed 0xFF +#define PPSMC_Result_UnknownCmd 0xFE +#define PPSMC_Result_CmdRejectedPrereq 0xFD +#define PPSMC_Result_CmdRejectedBusy 0xFC + +typedef uint16_t PPSMC_Result; + +/* Message Definitions */ +#define PPSMC_MSG_TestMessage 0x1 +#define PPSMC_MSG_GetSmuVersion 0x2 +#define PPSMC_MSG_GetDriverIfVersion 0x3 +#define PPSMC_MSG_EnableSmuFeatures 0x4 +#define PPSMC_MSG_DisableSmuFeatures 0x5 +#define PPSMC_MSG_GetEnabledSmuFeatures 0x6 +#define PPSMC_MSG_SetWorkloadMask 0x7 +#define PPSMC_MSG_SetPptLimit 0x8 +#define PPSMC_MSG_SetDriverDramAddrHigh 0x9 +#define PPSMC_MSG_SetDriverDramAddrLow 0xA +#define PPSMC_MSG_SetToolsDramAddrHigh 0xB +#define PPSMC_MSG_SetToolsDramAddrLow 0xC +#define PPSMC_MSG_TransferTableSmu2Dram 0xD +#define PPSMC_MSG_TransferTableDram2Smu 0xE +#define PPSMC_MSG_UseDefaultPPTable 0xF +#define PPSMC_MSG_UseBackupPPTable 0x10 +#define PPSMC_MSG_RunBtc 0x11 +#define PPSMC_MSG_RequestI2CBus 0x12 +#define PPSMC_MSG_ReleaseI2CBus 0x13 +#define PPSMC_MSG_ConfigureTelemetry 0x14 +#define PPSMC_MSG_SetUlvIpMask 0x15 +#define PPSMC_MSG_SetSocVidOffset 0x16 +#define PPSMC_MSG_SetMemVidOffset 0x17 +#define PPSMC_MSG_GetSocVidOffset 0x18 +#define PPSMC_MSG_GetMemVidOffset 0x19 +#define PPSMC_MSG_SetFloorSocVoltage 0x1A +#define PPSMC_MSG_SoftReset 0x1B +#define PPSMC_MSG_StartBacoMonitor 0x1C +#define PPSMC_MSG_CancelBacoMonitor 0x1D +#define PPSMC_MSG_EnterBaco 0x1E +#define PPSMC_MSG_AllowLowGfxclkInterrupt 0x1F +#define PPSMC_MSG_SetLowGfxclkInterruptThreshold 0x20 +#define PPSMC_MSG_SetSoftMinGfxclkByIndex 0x21 +#define PPSMC_MSG_SetSoftMaxGfxclkByIndex 0x22 +#define PPSMC_MSG_GetCurrentGfxclkIndex 0x23 +#define PPSMC_MSG_SetSoftMinUclkByIndex 0x24 +#define PPSMC_MSG_SetSoftMaxUclkByIndex 0x25 +#define PPSMC_MSG_GetCurrentUclkIndex 0x26 +#define PPSMC_MSG_SetSoftMinUvdByIndex 0x27 +#define PPSMC_MSG_SetSoftMaxUvdByIndex 0x28 +#define PPSMC_MSG_GetCurrentUvdIndex 0x29 +#define PPSMC_MSG_SetSoftMinVceByIndex 0x2A +#define PPSMC_MSG_SetSoftMaxVceByIndex 0x2B +#define PPSMC_MSG_SetHardMinVceByIndex 0x2C +#define PPSMC_MSG_GetCurrentVceIndex 0x2D +#define PPSMC_MSG_SetSoftMinSocclkByIndex 0x2E +#define PPSMC_MSG_SetHardMinSocclkByIndex 0x2F +#define PPSMC_MSG_SetSoftMaxSocclkByIndex 0x30 +#define PPSMC_MSG_GetCurrentSocclkIndex 0x31 +#define PPSMC_MSG_SetMinLinkDpmByIndex 0x32 +#define PPSMC_MSG_GetCurrentLinkIndex 0x33 +#define PPSMC_MSG_GetAverageGfxclkFrequency 0x34 +#define PPSMC_MSG_GetAverageSocclkFrequency 0x35 +#define PPSMC_MSG_GetAverageUclkFrequency 0x36 +#define PPSMC_MSG_GetAverageGfxActivity 0x37 +#define PPSMC_MSG_GetTemperatureEdge 0x38 +#define PPSMC_MSG_GetTemperatureHotspot 0x39 +#define PPSMC_MSG_GetTemperatureHBM 0x3A +#define PPSMC_MSG_GetTemperatureVrSoc 0x3B +#define PPSMC_MSG_GetTemperatureVrMem 0x3C +#define PPSMC_MSG_GetTemperatureLiquid 0x3D +#define PPSMC_MSG_GetTemperaturePlx 0x3E +#define PPSMC_MSG_OverDriveSetPercentage 0x3F +#define PPSMC_MSG_SetMinDeepSleepDcefclk 0x40 +#define PPSMC_MSG_SwitchToAC 0x41 +#define PPSMC_MSG_SetUclkFastSwitch 0x42 +#define PPSMC_MSG_SetUclkDownHyst 0x43 +#define PPSMC_MSG_RemoveDCClamp 0x44 +#define PPSMC_MSG_GfxDeviceDriverReset 0x45 +#define PPSMC_MSG_GetCurrentRpm 0x46 +#define PPSMC_MSG_SetVideoFps 0x47 +#define PPSMC_MSG_SetCustomGfxDpmParameters 0x48 +#define PPSMC_MSG_SetTjMax 0x49 +#define PPSMC_MSG_SetFanTemperatureTarget 0x4A +#define PPSMC_MSG_PrepareMp1ForUnload 0x4B +#define PPSMC_MSG_RequestDisplayClockByFreq 0x4C +#define PPSMC_MSG_GetClockFreqMHz 0x4D +#define PPSMC_MSG_DramLogSetDramAddrHigh 0x4E +#define PPSMC_MSG_DramLogSetDramAddrLow 0x4F +#define PPSMC_MSG_DramLogSetDramSize 0x50 +#define PPSMC_MSG_SetFanMaxRpm 0x51 +#define PPSMC_MSG_SetFanMinPwm 0x52 +#define PPSMC_MSG_ConfigureGfxDidt 0x55 +#define PPSMC_MSG_NumOfDisplays 0x56 +#define PPSMC_MSG_ReadSerialNumTop32 0x58 +#define PPSMC_MSG_ReadSerialNumBottom32 0x59 +#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x5A +#define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x5B +#define PPSMC_MSG_RunAcgBtc 0x5C +#define PPSMC_MSG_RunAcgInClosedLoop 0x5D +#define PPSMC_MSG_RunAcgInOpenLoop 0x5E +#define PPSMC_MSG_InitializeAcg 0x5F +#define PPSMC_MSG_GetCurrPkgPwr 0x61 +#define PPSMC_MSG_GetAverageGfxclkActualFrequency 0x63 +#define PPSMC_MSG_SetPccThrottleLevel 0x67 +#define PPSMC_MSG_UpdatePkgPwrPidAlpha 0x68 +#define PPSMC_Message_Count 0x69 + + +typedef int PPSMC_Msg; + +#pragma pack(pop) + +#endif diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/vega12/smu9_driver_if.h b/drivers/gpu/drm/amd/pm/powerplay/inc/vega12/smu9_driver_if.h new file mode 100644 index 000000000000..b6ffd08784e7 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/vega12/smu9_driver_if.h @@ -0,0 +1,767 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef VEGA12_SMU9_DRIVER_IF_H +#define VEGA12_SMU9_DRIVER_IF_H + +/**** IMPORTANT *** + * SMU TEAM: Always increment the interface version if + * any structure is changed in this file + */ +#define SMU9_DRIVER_IF_VERSION 0x10 + +#define PPTABLE_V12_SMU_VERSION 1 + +#define NUM_GFXCLK_DPM_LEVELS 16 +#define NUM_VCLK_DPM_LEVELS 8 +#define NUM_DCLK_DPM_LEVELS 8 +#define NUM_ECLK_DPM_LEVELS 8 +#define NUM_MP0CLK_DPM_LEVELS 2 +#define NUM_UCLK_DPM_LEVELS 4 +#define NUM_SOCCLK_DPM_LEVELS 8 +#define NUM_DCEFCLK_DPM_LEVELS 8 +#define NUM_DISPCLK_DPM_LEVELS 8 +#define NUM_PIXCLK_DPM_LEVELS 8 +#define NUM_PHYCLK_DPM_LEVELS 8 +#define NUM_LINK_LEVELS 2 + +#define MAX_GFXCLK_DPM_LEVEL (NUM_GFXCLK_DPM_LEVELS - 1) +#define MAX_VCLK_DPM_LEVEL (NUM_VCLK_DPM_LEVELS - 1) +#define MAX_DCLK_DPM_LEVEL (NUM_DCLK_DPM_LEVELS - 1) +#define MAX_ECLK_DPM_LEVEL (NUM_ECLK_DPM_LEVELS - 1) +#define MAX_MP0CLK_DPM_LEVEL (NUM_MP0CLK_DPM_LEVELS - 1) +#define MAX_UCLK_DPM_LEVEL (NUM_UCLK_DPM_LEVELS - 1) +#define MAX_SOCCLK_DPM_LEVEL (NUM_SOCCLK_DPM_LEVELS - 1) +#define MAX_DCEFCLK_DPM_LEVEL (NUM_DCEFCLK_DPM_LEVELS - 1) +#define MAX_DISPCLK_DPM_LEVEL (NUM_DISPCLK_DPM_LEVELS - 1) +#define MAX_PIXCLK_DPM_LEVEL (NUM_PIXCLK_DPM_LEVELS - 1) +#define MAX_PHYCLK_DPM_LEVEL (NUM_PHYCLK_DPM_LEVELS - 1) +#define MAX_LINK_LEVEL (NUM_LINK_LEVELS - 1) + + +#define PPSMC_GeminiModeNone 0 +#define PPSMC_GeminiModeMaster 1 +#define PPSMC_GeminiModeSlave 2 + + +#define FEATURE_DPM_PREFETCHER_BIT 0 +#define FEATURE_DPM_GFXCLK_BIT 1 +#define FEATURE_DPM_UCLK_BIT 2 +#define FEATURE_DPM_SOCCLK_BIT 3 +#define FEATURE_DPM_UVD_BIT 4 +#define FEATURE_DPM_VCE_BIT 5 +#define FEATURE_ULV_BIT 6 +#define FEATURE_DPM_MP0CLK_BIT 7 +#define FEATURE_DPM_LINK_BIT 8 +#define FEATURE_DPM_DCEFCLK_BIT 9 +#define FEATURE_DS_GFXCLK_BIT 10 +#define FEATURE_DS_SOCCLK_BIT 11 +#define FEATURE_DS_LCLK_BIT 12 +#define FEATURE_PPT_BIT 13 +#define FEATURE_TDC_BIT 14 +#define FEATURE_THERMAL_BIT 15 +#define FEATURE_GFX_PER_CU_CG_BIT 16 +#define FEATURE_RM_BIT 17 +#define FEATURE_DS_DCEFCLK_BIT 18 +#define FEATURE_ACDC_BIT 19 +#define FEATURE_VR0HOT_BIT 20 +#define FEATURE_VR1HOT_BIT 21 +#define FEATURE_FW_CTF_BIT 22 +#define FEATURE_LED_DISPLAY_BIT 23 +#define FEATURE_FAN_CONTROL_BIT 24 +#define FEATURE_GFX_EDC_BIT 25 +#define FEATURE_GFXOFF_BIT 26 +#define FEATURE_CG_BIT 27 +#define FEATURE_ACG_BIT 28 +#define FEATURE_SPARE_29_BIT 29 +#define FEATURE_SPARE_30_BIT 30 +#define FEATURE_SPARE_31_BIT 31 + +#define NUM_FEATURES 32 + +#define FEATURE_DPM_PREFETCHER_MASK (1 << FEATURE_DPM_PREFETCHER_BIT ) +#define FEATURE_DPM_GFXCLK_MASK (1 << FEATURE_DPM_GFXCLK_BIT ) +#define FEATURE_DPM_UCLK_MASK (1 << FEATURE_DPM_UCLK_BIT ) +#define FEATURE_DPM_SOCCLK_MASK (1 << FEATURE_DPM_SOCCLK_BIT ) +#define FEATURE_DPM_UVD_MASK (1 << FEATURE_DPM_UVD_BIT ) +#define FEATURE_DPM_VCE_MASK (1 << FEATURE_DPM_VCE_BIT ) +#define FEATURE_ULV_MASK (1 << FEATURE_ULV_BIT ) +#define FEATURE_DPM_MP0CLK_MASK (1 << FEATURE_DPM_MP0CLK_BIT ) +#define FEATURE_DPM_LINK_MASK (1 << FEATURE_DPM_LINK_BIT ) +#define FEATURE_DPM_DCEFCLK_MASK (1 << FEATURE_DPM_DCEFCLK_BIT ) +#define FEATURE_DS_GFXCLK_MASK (1 << FEATURE_DS_GFXCLK_BIT ) +#define FEATURE_DS_SOCCLK_MASK (1 << FEATURE_DS_SOCCLK_BIT ) +#define FEATURE_DS_LCLK_MASK (1 << FEATURE_DS_LCLK_BIT ) +#define FEATURE_PPT_MASK (1 << FEATURE_PPT_BIT ) +#define FEATURE_TDC_MASK (1 << FEATURE_TDC_BIT ) +#define FEATURE_THERMAL_MASK (1 << FEATURE_THERMAL_BIT ) +#define FEATURE_GFX_PER_CU_CG_MASK (1 << FEATURE_GFX_PER_CU_CG_BIT ) +#define FEATURE_RM_MASK (1 << FEATURE_RM_BIT ) +#define FEATURE_DS_DCEFCLK_MASK (1 << FEATURE_DS_DCEFCLK_BIT ) +#define FEATURE_ACDC_MASK (1 << FEATURE_ACDC_BIT ) +#define FEATURE_VR0HOT_MASK (1 << FEATURE_VR0HOT_BIT ) +#define FEATURE_VR1HOT_MASK (1 << FEATURE_VR1HOT_BIT ) +#define FEATURE_FW_CTF_MASK (1 << FEATURE_FW_CTF_BIT ) +#define FEATURE_LED_DISPLAY_MASK (1 << FEATURE_LED_DISPLAY_BIT ) +#define FEATURE_FAN_CONTROL_MASK (1 << FEATURE_FAN_CONTROL_BIT ) +#define FEATURE_GFX_EDC_MASK (1 << FEATURE_GFX_EDC_BIT ) +#define FEATURE_GFXOFF_MASK (1 << FEATURE_GFXOFF_BIT ) +#define FEATURE_CG_MASK (1 << FEATURE_CG_BIT ) +#define FEATURE_ACG_MASK (1 << FEATURE_ACG_BIT) +#define FEATURE_SPARE_29_MASK (1 << FEATURE_SPARE_29_BIT ) +#define FEATURE_SPARE_30_MASK (1 << FEATURE_SPARE_30_BIT ) +#define FEATURE_SPARE_31_MASK (1 << FEATURE_SPARE_31_BIT ) + + +#define DPM_OVERRIDE_DISABLE_SOCCLK_PID 0x00000001 +#define DPM_OVERRIDE_DISABLE_UCLK_PID 0x00000002 +#define DPM_OVERRIDE_ENABLE_VOLT_LINK_UVD_SOCCLK 0x00000004 +#define DPM_OVERRIDE_ENABLE_VOLT_LINK_UVD_UCLK 0x00000008 +#define DPM_OVERRIDE_ENABLE_FREQ_LINK_VCLK_SOCCLK 0x00000010 +#define DPM_OVERRIDE_ENABLE_FREQ_LINK_VCLK_UCLK 0x00000020 +#define DPM_OVERRIDE_ENABLE_FREQ_LINK_DCLK_SOCCLK 0x00000040 +#define DPM_OVERRIDE_ENABLE_FREQ_LINK_DCLK_UCLK 0x00000080 +#define DPM_OVERRIDE_ENABLE_VOLT_LINK_VCE_SOCCLK 0x00000100 +#define DPM_OVERRIDE_ENABLE_VOLT_LINK_VCE_UCLK 0x00000200 +#define DPM_OVERRIDE_ENABLE_FREQ_LINK_ECLK_SOCCLK 0x00000400 +#define DPM_OVERRIDE_ENABLE_FREQ_LINK_ECLK_UCLK 0x00000800 +#define DPM_OVERRIDE_ENABLE_FREQ_LINK_GFXCLK_SOCCLK 0x00001000 +#define DPM_OVERRIDE_ENABLE_FREQ_LINK_GFXCLK_UCLK 0x00002000 +#define DPM_OVERRIDE_ENABLE_GFXOFF_GFXCLK_SWITCH 0x00004000 +#define DPM_OVERRIDE_ENABLE_GFXOFF_SOCCLK_SWITCH 0x00008000 +#define DPM_OVERRIDE_ENABLE_GFXOFF_UCLK_SWITCH 0x00010000 + + +#define VR_MAPPING_VR_SELECT_MASK 0x01 +#define VR_MAPPING_VR_SELECT_SHIFT 0x00 + +#define VR_MAPPING_PLANE_SELECT_MASK 0x02 +#define VR_MAPPING_PLANE_SELECT_SHIFT 0x01 + + +#define PSI_SEL_VR0_PLANE0_PSI0 0x01 +#define PSI_SEL_VR0_PLANE0_PSI1 0x02 +#define PSI_SEL_VR0_PLANE1_PSI0 0x04 +#define PSI_SEL_VR0_PLANE1_PSI1 0x08 +#define PSI_SEL_VR1_PLANE0_PSI0 0x10 +#define PSI_SEL_VR1_PLANE0_PSI1 0x20 +#define PSI_SEL_VR1_PLANE1_PSI0 0x40 +#define PSI_SEL_VR1_PLANE1_PSI1 0x80 + + +#define THROTTLER_STATUS_PADDING_BIT 0 +#define THROTTLER_STATUS_TEMP_EDGE_BIT 1 +#define THROTTLER_STATUS_TEMP_HOTSPOT_BIT 2 +#define THROTTLER_STATUS_TEMP_HBM_BIT 3 +#define THROTTLER_STATUS_TEMP_VR_GFX_BIT 4 +#define THROTTLER_STATUS_TEMP_VR_MEM_BIT 5 +#define THROTTLER_STATUS_TEMP_LIQUID_BIT 6 +#define THROTTLER_STATUS_TEMP_PLX_BIT 7 +#define THROTTLER_STATUS_TEMP_SKIN_BIT 8 +#define THROTTLER_STATUS_TDC_GFX_BIT 9 +#define THROTTLER_STATUS_TDC_SOC_BIT 10 +#define THROTTLER_STATUS_PPT_BIT 11 +#define THROTTLER_STATUS_FIT_BIT 12 +#define THROTTLER_STATUS_PPM_BIT 13 + + +#define TABLE_TRANSFER_OK 0x0 +#define TABLE_TRANSFER_FAILED 0xFF + + +#define WORKLOAD_DEFAULT_BIT 0 +#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 1 +#define WORKLOAD_PPLIB_POWER_SAVING_BIT 2 +#define WORKLOAD_PPLIB_VIDEO_BIT 3 +#define WORKLOAD_PPLIB_VR_BIT 4 +#define WORKLOAD_PPLIB_COMPUTE_BIT 5 +#define WORKLOAD_PPLIB_CUSTOM_BIT 6 +#define WORKLOAD_PPLIB_COUNT 7 + +typedef struct { + uint32_t a; + uint32_t b; + uint32_t c; +} QuadraticInt_t; + +typedef struct { + uint32_t m; + uint32_t b; +} LinearInt_t; + +typedef struct { + uint32_t a; + uint32_t b; + uint32_t c; +} DroopInt_t; + +typedef enum { + PPCLK_GFXCLK, + PPCLK_VCLK, + PPCLK_DCLK, + PPCLK_ECLK, + PPCLK_SOCCLK, + PPCLK_UCLK, + PPCLK_DCEFCLK, + PPCLK_DISPCLK, + PPCLK_PIXCLK, + PPCLK_PHYCLK, + PPCLK_COUNT, +} PPCLK_e; + +enum { + VOLTAGE_MODE_AVFS, + VOLTAGE_MODE_AVFS_SS, + VOLTAGE_MODE_SS, + VOLTAGE_MODE_COUNT, +}; + +typedef struct { + uint8_t VoltageMode; + uint8_t SnapToDiscrete; + uint8_t NumDiscreteLevels; + uint8_t padding; + LinearInt_t ConversionToAvfsClk; + QuadraticInt_t SsCurve; +} DpmDescriptor_t; + +typedef struct { + uint32_t Version; + + + uint32_t FeaturesToRun[2]; + + + uint16_t SocketPowerLimitAc0; + uint16_t SocketPowerLimitAc0Tau; + uint16_t SocketPowerLimitAc1; + uint16_t SocketPowerLimitAc1Tau; + uint16_t SocketPowerLimitAc2; + uint16_t SocketPowerLimitAc2Tau; + uint16_t SocketPowerLimitAc3; + uint16_t SocketPowerLimitAc3Tau; + uint16_t SocketPowerLimitDc; + uint16_t SocketPowerLimitDcTau; + uint16_t TdcLimitSoc; + uint16_t TdcLimitSocTau; + uint16_t TdcLimitGfx; + uint16_t TdcLimitGfxTau; + + uint16_t TedgeLimit; + uint16_t ThotspotLimit; + uint16_t ThbmLimit; + uint16_t Tvr_gfxLimit; + uint16_t Tvr_memLimit; + uint16_t Tliquid1Limit; + uint16_t Tliquid2Limit; + uint16_t TplxLimit; + uint32_t FitLimit; + + uint16_t PpmPowerLimit; + uint16_t PpmTemperatureThreshold; + + uint8_t MemoryOnPackage; + uint8_t padding8_limits[3]; + + + uint16_t UlvVoltageOffsetSoc; + uint16_t UlvVoltageOffsetGfx; + + uint8_t UlvSmnclkDid; + uint8_t UlvMp1clkDid; + uint8_t UlvGfxclkBypass; + uint8_t Padding234; + + + uint16_t MinVoltageGfx; + uint16_t MinVoltageSoc; + uint16_t MaxVoltageGfx; + uint16_t MaxVoltageSoc; + + uint16_t LoadLineResistance; + uint16_t LoadLine_padding; + + + DpmDescriptor_t DpmDescriptor[PPCLK_COUNT]; + + uint16_t FreqTableGfx [NUM_GFXCLK_DPM_LEVELS ]; + uint16_t FreqTableVclk [NUM_VCLK_DPM_LEVELS ]; + uint16_t FreqTableDclk [NUM_DCLK_DPM_LEVELS ]; + uint16_t FreqTableEclk [NUM_ECLK_DPM_LEVELS ]; + uint16_t FreqTableSocclk [NUM_SOCCLK_DPM_LEVELS ]; + uint16_t FreqTableUclk [NUM_UCLK_DPM_LEVELS ]; + uint16_t FreqTableDcefclk [NUM_DCEFCLK_DPM_LEVELS ]; + uint16_t FreqTableDispclk [NUM_DISPCLK_DPM_LEVELS ]; + uint16_t FreqTablePixclk [NUM_PIXCLK_DPM_LEVELS ]; + uint16_t FreqTablePhyclk [NUM_PHYCLK_DPM_LEVELS ]; + + uint16_t DcModeMaxFreq [PPCLK_COUNT ]; + + + uint16_t Mp0clkFreq [NUM_MP0CLK_DPM_LEVELS]; + uint16_t Mp0DpmVoltage [NUM_MP0CLK_DPM_LEVELS]; + + + uint16_t GfxclkFidle; + uint16_t GfxclkSlewRate; + uint16_t CksEnableFreq; + uint16_t Padding789; + QuadraticInt_t CksVoltageOffset; + uint16_t AcgThresholdFreqHigh; + uint16_t AcgThresholdFreqLow; + uint16_t GfxclkDsMaxFreq; + uint8_t Padding456[2]; + + + uint8_t LowestUclkReservedForUlv; + uint8_t Padding8_Uclk[3]; + + + uint8_t PcieGenSpeed[NUM_LINK_LEVELS]; + uint8_t PcieLaneCount[NUM_LINK_LEVELS]; + uint16_t LclkFreq[NUM_LINK_LEVELS]; + + + uint16_t EnableTdpm; + uint16_t TdpmHighHystTemperature; + uint16_t TdpmLowHystTemperature; + uint16_t GfxclkFreqHighTempLimit; + + + uint16_t FanStopTemp; + uint16_t FanStartTemp; + + uint16_t FanGainEdge; + uint16_t FanGainHotspot; + uint16_t FanGainLiquid; + uint16_t FanGainVrVddc; + uint16_t FanGainVrMvdd; + uint16_t FanGainPlx; + uint16_t FanGainHbm; + uint16_t FanPwmMin; + uint16_t FanAcousticLimitRpm; + uint16_t FanThrottlingRpm; + uint16_t FanMaximumRpm; + uint16_t FanTargetTemperature; + uint16_t FanTargetGfxclk; + uint8_t FanZeroRpmEnable; + uint8_t FanTachEdgePerRev; + + + + int16_t FuzzyFan_ErrorSetDelta; + int16_t FuzzyFan_ErrorRateSetDelta; + int16_t FuzzyFan_PwmSetDelta; + uint16_t FuzzyFan_Reserved; + + + + + uint8_t OverrideAvfsGb; + uint8_t Padding8_Avfs[3]; + + QuadraticInt_t qAvfsGb; + DroopInt_t dBtcGbGfxCksOn; + DroopInt_t dBtcGbGfxCksOff; + DroopInt_t dBtcGbGfxAcg; + DroopInt_t dBtcGbSoc; + LinearInt_t qAgingGbGfx; + LinearInt_t qAgingGbSoc; + + QuadraticInt_t qStaticVoltageOffsetGfx; + QuadraticInt_t qStaticVoltageOffsetSoc; + + uint16_t DcTolGfx; + uint16_t DcTolSoc; + + uint8_t DcBtcGfxEnabled; + uint8_t DcBtcSocEnabled; + uint8_t Padding8_GfxBtc[2]; + + uint16_t DcBtcGfxMin; + uint16_t DcBtcGfxMax; + + uint16_t DcBtcSocMin; + uint16_t DcBtcSocMax; + + + + uint32_t DebugOverrides; + QuadraticInt_t ReservedEquation0; + QuadraticInt_t ReservedEquation1; + QuadraticInt_t ReservedEquation2; + QuadraticInt_t ReservedEquation3; + + uint16_t MinVoltageUlvGfx; + uint16_t MinVoltageUlvSoc; + + uint32_t Reserved[14]; + + + + uint8_t Liquid1_I2C_address; + uint8_t Liquid2_I2C_address; + uint8_t Vr_I2C_address; + uint8_t Plx_I2C_address; + + uint8_t Liquid_I2C_LineSCL; + uint8_t Liquid_I2C_LineSDA; + uint8_t Vr_I2C_LineSCL; + uint8_t Vr_I2C_LineSDA; + + uint8_t Plx_I2C_LineSCL; + uint8_t Plx_I2C_LineSDA; + uint8_t VrSensorPresent; + uint8_t LiquidSensorPresent; + + uint16_t MaxVoltageStepGfx; + uint16_t MaxVoltageStepSoc; + + uint8_t VddGfxVrMapping; + uint8_t VddSocVrMapping; + uint8_t VddMem0VrMapping; + uint8_t VddMem1VrMapping; + + uint8_t GfxUlvPhaseSheddingMask; + uint8_t SocUlvPhaseSheddingMask; + uint8_t ExternalSensorPresent; + uint8_t Padding8_V; + + + uint16_t GfxMaxCurrent; + int8_t GfxOffset; + uint8_t Padding_TelemetryGfx; + + uint16_t SocMaxCurrent; + int8_t SocOffset; + uint8_t Padding_TelemetrySoc; + + uint16_t Mem0MaxCurrent; + int8_t Mem0Offset; + uint8_t Padding_TelemetryMem0; + + uint16_t Mem1MaxCurrent; + int8_t Mem1Offset; + uint8_t Padding_TelemetryMem1; + + + uint8_t AcDcGpio; + uint8_t AcDcPolarity; + uint8_t VR0HotGpio; + uint8_t VR0HotPolarity; + + uint8_t VR1HotGpio; + uint8_t VR1HotPolarity; + uint8_t Padding1; + uint8_t Padding2; + + + + uint8_t LedPin0; + uint8_t LedPin1; + uint8_t LedPin2; + uint8_t padding8_4; + + + uint8_t PllGfxclkSpreadEnabled; + uint8_t PllGfxclkSpreadPercent; + uint16_t PllGfxclkSpreadFreq; + + uint8_t UclkSpreadEnabled; + uint8_t UclkSpreadPercent; + uint16_t UclkSpreadFreq; + + uint8_t SocclkSpreadEnabled; + uint8_t SocclkSpreadPercent; + uint16_t SocclkSpreadFreq; + + uint8_t AcgGfxclkSpreadEnabled; + uint8_t AcgGfxclkSpreadPercent; + uint16_t AcgGfxclkSpreadFreq; + + uint8_t Vr2_I2C_address; + uint8_t padding_vr2[3]; + + uint32_t BoardReserved[9]; + + + uint32_t MmHubPadding[7]; + +} PPTable_t; + +typedef struct { + + uint16_t GfxclkAverageLpfTau; + uint16_t SocclkAverageLpfTau; + uint16_t UclkAverageLpfTau; + uint16_t GfxActivityLpfTau; + uint16_t UclkActivityLpfTau; + + + uint32_t MmHubPadding[7]; +} DriverSmuConfig_t; + +typedef struct { + + uint16_t GfxclkFmin; + uint16_t GfxclkFmax; + uint16_t GfxclkFreq1; + uint16_t GfxclkOffsetVolt1; + uint16_t GfxclkFreq2; + uint16_t GfxclkOffsetVolt2; + uint16_t GfxclkFreq3; + uint16_t GfxclkOffsetVolt3; + uint16_t UclkFmax; + int16_t OverDrivePct; + uint16_t FanMaximumRpm; + uint16_t FanMinimumPwm; + uint16_t FanTargetTemperature; + uint16_t MaxOpTemp; + +} OverDriveTable_t; + +typedef struct { + uint16_t CurrClock[PPCLK_COUNT]; + uint16_t AverageGfxclkFrequency; + uint16_t AverageSocclkFrequency; + uint16_t AverageUclkFrequency ; + uint16_t AverageGfxActivity ; + uint16_t AverageUclkActivity ; + uint8_t CurrSocVoltageOffset ; + uint8_t CurrGfxVoltageOffset ; + uint8_t CurrMemVidOffset ; + uint8_t Padding8 ; + uint16_t CurrSocketPower ; + uint16_t TemperatureEdge ; + uint16_t TemperatureHotspot ; + uint16_t TemperatureHBM ; + uint16_t TemperatureVrGfx ; + uint16_t TemperatureVrMem ; + uint16_t TemperatureLiquid ; + uint16_t TemperaturePlx ; + uint32_t ThrottlerStatus ; + + uint8_t LinkDpmLevel; + uint8_t Padding[3]; + + + uint32_t MmHubPadding[7]; +} SmuMetrics_t; + +typedef struct { + uint16_t MinClock; + uint16_t MaxClock; + uint16_t MinUclk; + uint16_t MaxUclk; + + uint8_t WmSetting; + uint8_t Padding[3]; +} WatermarkRowGeneric_t; + +#define NUM_WM_RANGES 4 + +typedef enum { + WM_SOCCLK = 0, + WM_DCEFCLK, + WM_COUNT_PP, +} WM_CLOCK_e; + +typedef struct { + + WatermarkRowGeneric_t WatermarkRow[WM_COUNT_PP][NUM_WM_RANGES]; + + uint32_t MmHubPadding[7]; +} Watermarks_t; + +typedef struct { + uint16_t avgPsmCount[30]; + uint16_t minPsmCount[30]; + float avgPsmVoltage[30]; + float minPsmVoltage[30]; + + uint32_t MmHubPadding[7]; +} AvfsDebugTable_t; + +typedef struct { + uint8_t AvfsEn; + uint8_t AvfsVersion; + uint8_t OverrideVFT; + uint8_t OverrideAvfsGb; + + uint8_t OverrideTemperatures; + uint8_t OverrideVInversion; + uint8_t OverrideP2V; + uint8_t OverrideP2VCharzFreq; + + int32_t VFT0_m1; + int32_t VFT0_m2; + int32_t VFT0_b; + + int32_t VFT1_m1; + int32_t VFT1_m2; + int32_t VFT1_b; + + int32_t VFT2_m1; + int32_t VFT2_m2; + int32_t VFT2_b; + + int32_t AvfsGb0_m1; + int32_t AvfsGb0_m2; + int32_t AvfsGb0_b; + + int32_t AcBtcGb_m1; + int32_t AcBtcGb_m2; + int32_t AcBtcGb_b; + + uint32_t AvfsTempCold; + uint32_t AvfsTempMid; + uint32_t AvfsTempHot; + + uint32_t GfxVInversion; + uint32_t SocVInversion; + + int32_t P2V_m1; + int32_t P2V_m2; + int32_t P2V_b; + + uint32_t P2VCharzFreq; + + uint32_t EnabledAvfsModules; + + uint32_t MmHubPadding[7]; +} AvfsFuseOverride_t; + +typedef struct { + + uint8_t Gfx_ActiveHystLimit; + uint8_t Gfx_IdleHystLimit; + uint8_t Gfx_FPS; + uint8_t Gfx_MinActiveFreqType; + uint8_t Gfx_BoosterFreqType; + uint8_t Gfx_UseRlcBusy; + uint16_t Gfx_MinActiveFreq; + uint16_t Gfx_BoosterFreq; + uint16_t Gfx_PD_Data_time_constant; + uint32_t Gfx_PD_Data_limit_a; + uint32_t Gfx_PD_Data_limit_b; + uint32_t Gfx_PD_Data_limit_c; + uint32_t Gfx_PD_Data_error_coeff; + uint32_t Gfx_PD_Data_error_rate_coeff; + + uint8_t Soc_ActiveHystLimit; + uint8_t Soc_IdleHystLimit; + uint8_t Soc_FPS; + uint8_t Soc_MinActiveFreqType; + uint8_t Soc_BoosterFreqType; + uint8_t Soc_UseRlcBusy; + uint16_t Soc_MinActiveFreq; + uint16_t Soc_BoosterFreq; + uint16_t Soc_PD_Data_time_constant; + uint32_t Soc_PD_Data_limit_a; + uint32_t Soc_PD_Data_limit_b; + uint32_t Soc_PD_Data_limit_c; + uint32_t Soc_PD_Data_error_coeff; + uint32_t Soc_PD_Data_error_rate_coeff; + + uint8_t Mem_ActiveHystLimit; + uint8_t Mem_IdleHystLimit; + uint8_t Mem_FPS; + uint8_t Mem_MinActiveFreqType; + uint8_t Mem_BoosterFreqType; + uint8_t Mem_UseRlcBusy; + uint16_t Mem_MinActiveFreq; + uint16_t Mem_BoosterFreq; + uint16_t Mem_PD_Data_time_constant; + uint32_t Mem_PD_Data_limit_a; + uint32_t Mem_PD_Data_limit_b; + uint32_t Mem_PD_Data_limit_c; + uint32_t Mem_PD_Data_error_coeff; + uint32_t Mem_PD_Data_error_rate_coeff; + +} DpmActivityMonitorCoeffInt_t; + + + + +#define TABLE_PPTABLE 0 +#define TABLE_WATERMARKS 1 +#define TABLE_AVFS 2 +#define TABLE_AVFS_PSM_DEBUG 3 +#define TABLE_AVFS_FUSE_OVERRIDE 4 +#define TABLE_PMSTATUSLOG 5 +#define TABLE_SMU_METRICS 6 +#define TABLE_DRIVER_SMU_CONFIG 7 +#define TABLE_ACTIVITY_MONITOR_COEFF 8 +#define TABLE_OVERDRIVE 9 +#define TABLE_COUNT 10 + + +#define UCLK_SWITCH_SLOW 0 +#define UCLK_SWITCH_FAST 1 + + +#define SQ_Enable_MASK 0x1 +#define SQ_IR_MASK 0x2 +#define SQ_PCC_MASK 0x4 +#define SQ_EDC_MASK 0x8 + +#define TCP_Enable_MASK 0x100 +#define TCP_IR_MASK 0x200 +#define TCP_PCC_MASK 0x400 +#define TCP_EDC_MASK 0x800 + +#define TD_Enable_MASK 0x10000 +#define TD_IR_MASK 0x20000 +#define TD_PCC_MASK 0x40000 +#define TD_EDC_MASK 0x80000 + +#define DB_Enable_MASK 0x1000000 +#define DB_IR_MASK 0x2000000 +#define DB_PCC_MASK 0x4000000 +#define DB_EDC_MASK 0x8000000 + +#define SQ_Enable_SHIFT 0 +#define SQ_IR_SHIFT 1 +#define SQ_PCC_SHIFT 2 +#define SQ_EDC_SHIFT 3 + +#define TCP_Enable_SHIFT 8 +#define TCP_IR_SHIFT 9 +#define TCP_PCC_SHIFT 10 +#define TCP_EDC_SHIFT 11 + +#define TD_Enable_SHIFT 16 +#define TD_IR_SHIFT 17 +#define TD_PCC_SHIFT 18 +#define TD_EDC_SHIFT 19 + +#define DB_Enable_SHIFT 24 +#define DB_IR_SHIFT 25 +#define DB_PCC_SHIFT 26 +#define DB_EDC_SHIFT 27 + +#define REMOVE_FMAX_MARGIN_BIT 0x0 +#define REMOVE_DCTOL_MARGIN_BIT 0x1 +#define REMOVE_PLATFORM_MARGIN_BIT 0x2 + +#endif diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/vega12_ppsmc.h b/drivers/gpu/drm/amd/pm/powerplay/inc/vega12_ppsmc.h new file mode 100644 index 000000000000..f985c78d746a --- /dev/null +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/vega12_ppsmc.h @@ -0,0 +1,123 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef VEGA12_PP_SMC_H +#define VEGA12_PP_SMC_H + +#pragma pack(push, 1) + +#define SMU_UCODE_VERSION 0x00270a00 + +/* SMU Response Codes: */ +#define PPSMC_Result_OK 0x1 +#define PPSMC_Result_Failed 0xFF +#define PPSMC_Result_UnknownCmd 0xFE +#define PPSMC_Result_CmdRejectedPrereq 0xFD +#define PPSMC_Result_CmdRejectedBusy 0xFC + +#define PPSMC_MSG_TestMessage 0x1 +#define PPSMC_MSG_GetSmuVersion 0x2 +#define PPSMC_MSG_GetDriverIfVersion 0x3 +#define PPSMC_MSG_SetAllowedFeaturesMaskLow 0x4 +#define PPSMC_MSG_SetAllowedFeaturesMaskHigh 0x5 +#define PPSMC_MSG_EnableAllSmuFeatures 0x6 +#define PPSMC_MSG_DisableAllSmuFeatures 0x7 +#define PPSMC_MSG_EnableSmuFeaturesLow 0x8 +#define PPSMC_MSG_EnableSmuFeaturesHigh 0x9 +#define PPSMC_MSG_DisableSmuFeaturesLow 0xA +#define PPSMC_MSG_DisableSmuFeaturesHigh 0xB +#define PPSMC_MSG_GetEnabledSmuFeaturesLow 0xC +#define PPSMC_MSG_GetEnabledSmuFeaturesHigh 0xD +#define PPSMC_MSG_SetWorkloadMask 0xE +#define PPSMC_MSG_SetPptLimit 0xF +#define PPSMC_MSG_SetDriverDramAddrHigh 0x10 +#define PPSMC_MSG_SetDriverDramAddrLow 0x11 +#define PPSMC_MSG_SetToolsDramAddrHigh 0x12 +#define PPSMC_MSG_SetToolsDramAddrLow 0x13 +#define PPSMC_MSG_TransferTableSmu2Dram 0x14 +#define PPSMC_MSG_TransferTableDram2Smu 0x15 +#define PPSMC_MSG_UseDefaultPPTable 0x16 +#define PPSMC_MSG_UseBackupPPTable 0x17 +#define PPSMC_MSG_RunBtc 0x18 +#define PPSMC_MSG_RequestI2CBus 0x19 +#define PPSMC_MSG_ReleaseI2CBus 0x1A +#define PPSMC_MSG_SetFloorSocVoltage 0x21 +#define PPSMC_MSG_SoftReset 0x22 +#define PPSMC_MSG_StartBacoMonitor 0x23 +#define PPSMC_MSG_CancelBacoMonitor 0x24 +#define PPSMC_MSG_EnterBaco 0x25 +#define PPSMC_MSG_SetSoftMinByFreq 0x26 +#define PPSMC_MSG_SetSoftMaxByFreq 0x27 +#define PPSMC_MSG_SetHardMinByFreq 0x28 +#define PPSMC_MSG_SetHardMaxByFreq 0x29 +#define PPSMC_MSG_GetMinDpmFreq 0x2A +#define PPSMC_MSG_GetMaxDpmFreq 0x2B +#define PPSMC_MSG_GetDpmFreqByIndex 0x2C +#define PPSMC_MSG_GetDpmClockFreq 0x2D +#define PPSMC_MSG_GetSsVoltageByDpm 0x2E +#define PPSMC_MSG_SetMemoryChannelConfig 0x2F +#define PPSMC_MSG_SetGeminiMode 0x30 +#define PPSMC_MSG_SetGeminiApertureHigh 0x31 +#define PPSMC_MSG_SetGeminiApertureLow 0x32 +#define PPSMC_MSG_SetMinLinkDpmByIndex 0x33 +#define PPSMC_MSG_OverridePcieParameters 0x34 +#define PPSMC_MSG_OverDriveSetPercentage 0x35 +#define PPSMC_MSG_SetMinDeepSleepDcefclk 0x36 +#define PPSMC_MSG_ReenableAcDcInterrupt 0x37 +#define PPSMC_MSG_NotifyPowerSource 0x38 +#define PPSMC_MSG_SetUclkFastSwitch 0x39 +#define PPSMC_MSG_SetUclkDownHyst 0x3A +#define PPSMC_MSG_GfxDeviceDriverReset 0x3B +#define PPSMC_MSG_GetCurrentRpm 0x3C +#define PPSMC_MSG_SetVideoFps 0x3D +#define PPSMC_MSG_SetTjMax 0x3E +#define PPSMC_MSG_SetFanTemperatureTarget 0x3F +#define PPSMC_MSG_PrepareMp1ForUnload 0x40 +#define PPSMC_MSG_DramLogSetDramAddrHigh 0x41 +#define PPSMC_MSG_DramLogSetDramAddrLow 0x42 +#define PPSMC_MSG_DramLogSetDramSize 0x43 +#define PPSMC_MSG_SetFanMaxRpm 0x44 +#define PPSMC_MSG_SetFanMinPwm 0x45 +#define PPSMC_MSG_ConfigureGfxDidt 0x46 +#define PPSMC_MSG_NumOfDisplays 0x47 +#define PPSMC_MSG_RemoveMargins 0x48 +#define PPSMC_MSG_ReadSerialNumTop32 0x49 +#define PPSMC_MSG_ReadSerialNumBottom32 0x4A +#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x4B +#define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x4C +#define PPSMC_MSG_RunAcgBtc 0x4D +#define PPSMC_MSG_InitializeAcg 0x4E +#define PPSMC_MSG_EnableAcgBtcTestMode 0x4F +#define PPSMC_MSG_EnableAcgSpreadSpectrum 0x50 +#define PPSMC_MSG_AllowGfxOff 0x51 +#define PPSMC_MSG_DisallowGfxOff 0x52 +#define PPSMC_MSG_GetPptLimit 0x53 +#define PPSMC_MSG_GetDcModeMaxDpmFreq 0x54 +#define PPSMC_Message_Count 0x56 + +typedef uint16_t PPSMC_Result; +typedef int PPSMC_Msg; + +#pragma pack(pop) + +#endif diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/vega20_ppsmc.h b/drivers/gpu/drm/amd/pm/powerplay/inc/vega20_ppsmc.h new file mode 100644 index 000000000000..0c66f0fe1aaf --- /dev/null +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/vega20_ppsmc.h @@ -0,0 +1,131 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef VEGA20_PP_SMC_H +#define VEGA20_PP_SMC_H + +#pragma pack(push, 1) + +// SMU Response Codes: +#define PPSMC_Result_OK 0x1 +#define PPSMC_Result_Failed 0xFF +#define PPSMC_Result_UnknownCmd 0xFE +#define PPSMC_Result_CmdRejectedPrereq 0xFD +#define PPSMC_Result_CmdRejectedBusy 0xFC + +// Message Definitions: +#define PPSMC_MSG_TestMessage 0x1 +#define PPSMC_MSG_GetSmuVersion 0x2 +#define PPSMC_MSG_GetDriverIfVersion 0x3 +#define PPSMC_MSG_SetAllowedFeaturesMaskLow 0x4 +#define PPSMC_MSG_SetAllowedFeaturesMaskHigh 0x5 +#define PPSMC_MSG_EnableAllSmuFeatures 0x6 +#define PPSMC_MSG_DisableAllSmuFeatures 0x7 +#define PPSMC_MSG_EnableSmuFeaturesLow 0x8 +#define PPSMC_MSG_EnableSmuFeaturesHigh 0x9 +#define PPSMC_MSG_DisableSmuFeaturesLow 0xA +#define PPSMC_MSG_DisableSmuFeaturesHigh 0xB +#define PPSMC_MSG_GetEnabledSmuFeaturesLow 0xC +#define PPSMC_MSG_GetEnabledSmuFeaturesHigh 0xD +#define PPSMC_MSG_SetWorkloadMask 0xE +#define PPSMC_MSG_SetPptLimit 0xF +#define PPSMC_MSG_SetDriverDramAddrHigh 0x10 +#define PPSMC_MSG_SetDriverDramAddrLow 0x11 +#define PPSMC_MSG_SetToolsDramAddrHigh 0x12 +#define PPSMC_MSG_SetToolsDramAddrLow 0x13 +#define PPSMC_MSG_TransferTableSmu2Dram 0x14 +#define PPSMC_MSG_TransferTableDram2Smu 0x15 +#define PPSMC_MSG_UseDefaultPPTable 0x16 +#define PPSMC_MSG_UseBackupPPTable 0x17 +#define PPSMC_MSG_RunBtc 0x18 +#define PPSMC_MSG_RequestI2CBus 0x19 +#define PPSMC_MSG_ReleaseI2CBus 0x1A +#define PPSMC_MSG_SetFloorSocVoltage 0x21 +#define PPSMC_MSG_SoftReset 0x22 +#define PPSMC_MSG_StartBacoMonitor 0x23 +#define PPSMC_MSG_CancelBacoMonitor 0x24 +#define PPSMC_MSG_EnterBaco 0x25 +#define PPSMC_MSG_SetSoftMinByFreq 0x26 +#define PPSMC_MSG_SetSoftMaxByFreq 0x27 +#define PPSMC_MSG_SetHardMinByFreq 0x28 +#define PPSMC_MSG_SetHardMaxByFreq 0x29 +#define PPSMC_MSG_GetMinDpmFreq 0x2A +#define PPSMC_MSG_GetMaxDpmFreq 0x2B +#define PPSMC_MSG_GetDpmFreqByIndex 0x2C +#define PPSMC_MSG_GetDpmClockFreq 0x2D +#define PPSMC_MSG_GetSsVoltageByDpm 0x2E +#define PPSMC_MSG_SetMemoryChannelConfig 0x2F +#define PPSMC_MSG_SetGeminiMode 0x30 +#define PPSMC_MSG_SetGeminiApertureHigh 0x31 +#define PPSMC_MSG_SetGeminiApertureLow 0x32 +#define PPSMC_MSG_SetMinLinkDpmByIndex 0x33 +#define PPSMC_MSG_OverridePcieParameters 0x34 +#define PPSMC_MSG_OverDriveSetPercentage 0x35 +#define PPSMC_MSG_SetMinDeepSleepDcefclk 0x36 +#define PPSMC_MSG_ReenableAcDcInterrupt 0x37 +#define PPSMC_MSG_NotifyPowerSource 0x38 +#define PPSMC_MSG_SetUclkFastSwitch 0x39 +#define PPSMC_MSG_SetUclkDownHyst 0x3A +//#define PPSMC_MSG_GfxDeviceDriverReset 0x3B +#define PPSMC_MSG_GetCurrentRpm 0x3C +#define PPSMC_MSG_SetVideoFps 0x3D +#define PPSMC_MSG_SetTjMax 0x3E +#define PPSMC_MSG_SetFanTemperatureTarget 0x3F +#define PPSMC_MSG_PrepareMp1ForUnload 0x40 +#define PPSMC_MSG_DramLogSetDramAddrHigh 0x41 +#define PPSMC_MSG_DramLogSetDramAddrLow 0x42 +#define PPSMC_MSG_DramLogSetDramSize 0x43 +#define PPSMC_MSG_SetFanMaxRpm 0x44 +#define PPSMC_MSG_SetFanMinPwm 0x45 +#define PPSMC_MSG_ConfigureGfxDidt 0x46 +#define PPSMC_MSG_NumOfDisplays 0x47 +#define PPSMC_MSG_RemoveMargins 0x48 +#define PPSMC_MSG_ReadSerialNumTop32 0x49 +#define PPSMC_MSG_ReadSerialNumBottom32 0x4A +#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x4B +#define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x4C +#define PPSMC_MSG_WaflTest 0x4D +#define PPSMC_MSG_SetFclkGfxClkRatio 0x4E +// Unused ID 0x4F to 0x50 +#define PPSMC_MSG_AllowGfxOff 0x51 +#define PPSMC_MSG_DisallowGfxOff 0x52 +#define PPSMC_MSG_GetPptLimit 0x53 +#define PPSMC_MSG_GetDcModeMaxDpmFreq 0x54 +#define PPSMC_MSG_GetDebugData 0x55 +#define PPSMC_MSG_SetXgmiMode 0x56 +#define PPSMC_MSG_RunAfllBtc 0x57 +#define PPSMC_MSG_ExitBaco 0x58 +#define PPSMC_MSG_PrepareMp1ForReset 0x59 +#define PPSMC_MSG_PrepareMp1ForShutdown 0x5A +#define PPSMC_MSG_SetMGpuFanBoostLimitRpm 0x5D +#define PPSMC_MSG_GetAVFSVoltageByDpm 0x5F +#define PPSMC_MSG_BacoWorkAroundFlushVDCI 0x60 +#define PPSMC_MSG_DFCstateControl 0x63 +#define PPSMC_Message_Count 0x64 + +typedef uint32_t PPSMC_Result; +typedef uint32_t PPSMC_Msg; + +#pragma pack(pop) + +#endif diff --git a/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c b/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c deleted file mode 100644 index 72824ef61edd..000000000000 --- a/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c +++ /dev/null @@ -1,3405 +0,0 @@ -/* - * Copyright 2013 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include "amdgpu.h" -#include "amdgpu_pm.h" -#include "cikd.h" -#include "atom.h" -#include "amdgpu_atombios.h" -#include "amdgpu_dpm.h" -#include "kv_dpm.h" -#include "gfx_v7_0.h" -#include - -#include "smu/smu_7_0_0_d.h" -#include "smu/smu_7_0_0_sh_mask.h" - -#include "gca/gfx_7_2_d.h" -#include "gca/gfx_7_2_sh_mask.h" -#include "legacy_dpm.h" - -#define KV_MAX_DEEPSLEEP_DIVIDER_ID 5 -#define KV_MINIMUM_ENGINE_CLOCK 800 -#define SMC_RAM_END 0x40000 - -static const struct amd_pm_funcs kv_dpm_funcs; - -static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev); -static int kv_enable_nb_dpm(struct amdgpu_device *adev, - bool enable); -static void kv_init_graphics_levels(struct amdgpu_device *adev); -static int kv_calculate_ds_divider(struct amdgpu_device *adev); -static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev); -static int kv_calculate_dpm_settings(struct amdgpu_device *adev); -static void kv_enable_new_levels(struct amdgpu_device *adev); -static void kv_program_nbps_index_settings(struct amdgpu_device *adev, - struct amdgpu_ps *new_rps); -static int kv_set_enabled_level(struct amdgpu_device *adev, u32 level); -static int kv_set_enabled_levels(struct amdgpu_device *adev); -static int kv_force_dpm_highest(struct amdgpu_device *adev); -static int kv_force_dpm_lowest(struct amdgpu_device *adev); -static void kv_apply_state_adjust_rules(struct amdgpu_device *adev, - struct amdgpu_ps *new_rps, - struct amdgpu_ps *old_rps); -static int kv_set_thermal_temperature_range(struct amdgpu_device *adev, - int min_temp, int max_temp); -static int kv_init_fps_limits(struct amdgpu_device *adev); - -static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate); -static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate); - - -static u32 kv_convert_vid2_to_vid7(struct amdgpu_device *adev, - struct sumo_vid_mapping_table *vid_mapping_table, - u32 vid_2bit) -{ - struct amdgpu_clock_voltage_dependency_table *vddc_sclk_table = - &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; - u32 i; - - if (vddc_sclk_table && vddc_sclk_table->count) { - if (vid_2bit < vddc_sclk_table->count) - return vddc_sclk_table->entries[vid_2bit].v; - else - return vddc_sclk_table->entries[vddc_sclk_table->count - 1].v; - } else { - for (i = 0; i < vid_mapping_table->num_entries; i++) { - if (vid_mapping_table->entries[i].vid_2bit == vid_2bit) - return vid_mapping_table->entries[i].vid_7bit; - } - return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_7bit; - } -} - -static u32 kv_convert_vid7_to_vid2(struct amdgpu_device *adev, - struct sumo_vid_mapping_table *vid_mapping_table, - u32 vid_7bit) -{ - struct amdgpu_clock_voltage_dependency_table *vddc_sclk_table = - &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; - u32 i; - - if (vddc_sclk_table && vddc_sclk_table->count) { - for (i = 0; i < vddc_sclk_table->count; i++) { - if (vddc_sclk_table->entries[i].v == vid_7bit) - return i; - } - return vddc_sclk_table->count - 1; - } else { - for (i = 0; i < vid_mapping_table->num_entries; i++) { - if (vid_mapping_table->entries[i].vid_7bit == vid_7bit) - return vid_mapping_table->entries[i].vid_2bit; - } - - return vid_mapping_table->entries[vid_mapping_table->num_entries - 1].vid_2bit; - } -} - -static void sumo_take_smu_control(struct amdgpu_device *adev, bool enable) -{ -/* This bit selects who handles display phy powergating. - * Clear the bit to let atom handle it. - * Set it to let the driver handle it. - * For now we just let atom handle it. - */ -#if 0 - u32 v = RREG32(mmDOUT_SCRATCH3); - - if (enable) - v |= 0x4; - else - v &= 0xFFFFFFFB; - - WREG32(mmDOUT_SCRATCH3, v); -#endif -} - -static void sumo_construct_sclk_voltage_mapping_table(struct amdgpu_device *adev, - struct sumo_sclk_voltage_mapping_table *sclk_voltage_mapping_table, - ATOM_AVAILABLE_SCLK_LIST *table) -{ - u32 i; - u32 n = 0; - u32 prev_sclk = 0; - - for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) { - if (table[i].ulSupportedSCLK > prev_sclk) { - sclk_voltage_mapping_table->entries[n].sclk_frequency = - table[i].ulSupportedSCLK; - sclk_voltage_mapping_table->entries[n].vid_2bit = - table[i].usVoltageIndex; - prev_sclk = table[i].ulSupportedSCLK; - n++; - } - } - - sclk_voltage_mapping_table->num_max_dpm_entries = n; -} - -static void sumo_construct_vid_mapping_table(struct amdgpu_device *adev, - struct sumo_vid_mapping_table *vid_mapping_table, - ATOM_AVAILABLE_SCLK_LIST *table) -{ - u32 i, j; - - for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) { - if (table[i].ulSupportedSCLK != 0) { - vid_mapping_table->entries[table[i].usVoltageIndex].vid_7bit = - table[i].usVoltageID; - vid_mapping_table->entries[table[i].usVoltageIndex].vid_2bit = - table[i].usVoltageIndex; - } - } - - for (i = 0; i < SUMO_MAX_NUMBER_VOLTAGES; i++) { - if (vid_mapping_table->entries[i].vid_7bit == 0) { - for (j = i + 1; j < SUMO_MAX_NUMBER_VOLTAGES; j++) { - if (vid_mapping_table->entries[j].vid_7bit != 0) { - vid_mapping_table->entries[i] = - vid_mapping_table->entries[j]; - vid_mapping_table->entries[j].vid_7bit = 0; - break; - } - } - - if (j == SUMO_MAX_NUMBER_VOLTAGES) - break; - } - } - - vid_mapping_table->num_entries = i; -} - -#if 0 -static const struct kv_lcac_config_values sx_local_cac_cfg_kv[] = -{ - { 0, 4, 1 }, - { 1, 4, 1 }, - { 2, 5, 1 }, - { 3, 4, 2 }, - { 4, 1, 1 }, - { 5, 5, 2 }, - { 6, 6, 1 }, - { 7, 9, 2 }, - { 0xffffffff } -}; - -static const struct kv_lcac_config_values mc0_local_cac_cfg_kv[] = -{ - { 0, 4, 1 }, - { 0xffffffff } -}; - -static const struct kv_lcac_config_values mc1_local_cac_cfg_kv[] = -{ - { 0, 4, 1 }, - { 0xffffffff } -}; - -static const struct kv_lcac_config_values mc2_local_cac_cfg_kv[] = -{ - { 0, 4, 1 }, - { 0xffffffff } -}; - -static const struct kv_lcac_config_values mc3_local_cac_cfg_kv[] = -{ - { 0, 4, 1 }, - { 0xffffffff } -}; - -static const struct kv_lcac_config_values cpl_local_cac_cfg_kv[] = -{ - { 0, 4, 1 }, - { 1, 4, 1 }, - { 2, 5, 1 }, - { 3, 4, 1 }, - { 4, 1, 1 }, - { 5, 5, 1 }, - { 6, 6, 1 }, - { 7, 9, 1 }, - { 8, 4, 1 }, - { 9, 2, 1 }, - { 10, 3, 1 }, - { 11, 6, 1 }, - { 12, 8, 2 }, - { 13, 1, 1 }, - { 14, 2, 1 }, - { 15, 3, 1 }, - { 16, 1, 1 }, - { 17, 4, 1 }, - { 18, 3, 1 }, - { 19, 1, 1 }, - { 20, 8, 1 }, - { 21, 5, 1 }, - { 22, 1, 1 }, - { 23, 1, 1 }, - { 24, 4, 1 }, - { 27, 6, 1 }, - { 28, 1, 1 }, - { 0xffffffff } -}; - -static const struct kv_lcac_config_reg sx0_cac_config_reg[] = -{ - { 0xc0400d00, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } -}; - -static const struct kv_lcac_config_reg mc0_cac_config_reg[] = -{ - { 0xc0400d30, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } -}; - -static const struct kv_lcac_config_reg mc1_cac_config_reg[] = -{ - { 0xc0400d3c, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } -}; - -static const struct kv_lcac_config_reg mc2_cac_config_reg[] = -{ - { 0xc0400d48, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } -}; - -static const struct kv_lcac_config_reg mc3_cac_config_reg[] = -{ - { 0xc0400d54, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } -}; - -static const struct kv_lcac_config_reg cpl_cac_config_reg[] = -{ - { 0xc0400d80, 0x003e0000, 17, 0x3fc00000, 22, 0x0001fffe, 1, 0x00000001, 0 } -}; -#endif - -static const struct kv_pt_config_reg didt_config_kv[] = -{ - { 0x10, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x10, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x10, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x10, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x11, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x11, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x11, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x11, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x12, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x12, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x12, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x12, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x2, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, - { 0x2, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, - { 0x2, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, - { 0x1, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, - { 0x1, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, - { 0x0, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x30, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x30, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x30, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x30, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x31, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x31, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x31, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x31, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x32, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x32, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x32, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x32, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x22, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, - { 0x22, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, - { 0x22, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, - { 0x21, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, - { 0x21, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, - { 0x20, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x50, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x50, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x50, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x50, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x51, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x51, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x51, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x51, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x52, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x52, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x52, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x52, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x42, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, - { 0x42, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, - { 0x42, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, - { 0x41, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, - { 0x41, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, - { 0x40, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x70, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x70, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x70, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x70, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x71, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x71, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x71, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x71, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x72, 0x000000ff, 0, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x72, 0x0000ff00, 8, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x72, 0x00ff0000, 16, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x72, 0xff000000, 24, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0x62, 0x00003fff, 0, 0x4, KV_CONFIGREG_DIDT_IND }, - { 0x62, 0x03ff0000, 16, 0x80, KV_CONFIGREG_DIDT_IND }, - { 0x62, 0x78000000, 27, 0x3, KV_CONFIGREG_DIDT_IND }, - { 0x61, 0x0000ffff, 0, 0x3FFF, KV_CONFIGREG_DIDT_IND }, - { 0x61, 0xffff0000, 16, 0x3FFF, KV_CONFIGREG_DIDT_IND }, - { 0x60, 0x00000001, 0, 0x0, KV_CONFIGREG_DIDT_IND }, - { 0xFFFFFFFF } -}; - -static struct kv_ps *kv_get_ps(struct amdgpu_ps *rps) -{ - struct kv_ps *ps = rps->ps_priv; - - return ps; -} - -static struct kv_power_info *kv_get_pi(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = adev->pm.dpm.priv; - - return pi; -} - -#if 0 -static void kv_program_local_cac_table(struct amdgpu_device *adev, - const struct kv_lcac_config_values *local_cac_table, - const struct kv_lcac_config_reg *local_cac_reg) -{ - u32 i, count, data; - const struct kv_lcac_config_values *values = local_cac_table; - - while (values->block_id != 0xffffffff) { - count = values->signal_id; - for (i = 0; i < count; i++) { - data = ((values->block_id << local_cac_reg->block_shift) & - local_cac_reg->block_mask); - data |= ((i << local_cac_reg->signal_shift) & - local_cac_reg->signal_mask); - data |= ((values->t << local_cac_reg->t_shift) & - local_cac_reg->t_mask); - data |= ((1 << local_cac_reg->enable_shift) & - local_cac_reg->enable_mask); - WREG32_SMC(local_cac_reg->cntl, data); - } - values++; - } -} -#endif - -static int kv_program_pt_config_registers(struct amdgpu_device *adev, - const struct kv_pt_config_reg *cac_config_regs) -{ - const struct kv_pt_config_reg *config_regs = cac_config_regs; - u32 data; - u32 cache = 0; - - if (config_regs == NULL) - return -EINVAL; - - while (config_regs->offset != 0xFFFFFFFF) { - if (config_regs->type == KV_CONFIGREG_CACHE) { - cache |= ((config_regs->value << config_regs->shift) & config_regs->mask); - } else { - switch (config_regs->type) { - case KV_CONFIGREG_SMC_IND: - data = RREG32_SMC(config_regs->offset); - break; - case KV_CONFIGREG_DIDT_IND: - data = RREG32_DIDT(config_regs->offset); - break; - default: - data = RREG32(config_regs->offset); - break; - } - - data &= ~config_regs->mask; - data |= ((config_regs->value << config_regs->shift) & config_regs->mask); - data |= cache; - cache = 0; - - switch (config_regs->type) { - case KV_CONFIGREG_SMC_IND: - WREG32_SMC(config_regs->offset, data); - break; - case KV_CONFIGREG_DIDT_IND: - WREG32_DIDT(config_regs->offset, data); - break; - default: - WREG32(config_regs->offset, data); - break; - } - } - config_regs++; - } - - return 0; -} - -static void kv_do_enable_didt(struct amdgpu_device *adev, bool enable) -{ - struct kv_power_info *pi = kv_get_pi(adev); - u32 data; - - if (pi->caps_sq_ramping) { - data = RREG32_DIDT(ixDIDT_SQ_CTRL0); - if (enable) - data |= DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK; - else - data &= ~DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK; - WREG32_DIDT(ixDIDT_SQ_CTRL0, data); - } - - if (pi->caps_db_ramping) { - data = RREG32_DIDT(ixDIDT_DB_CTRL0); - if (enable) - data |= DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK; - else - data &= ~DIDT_DB_CTRL0__DIDT_CTRL_EN_MASK; - WREG32_DIDT(ixDIDT_DB_CTRL0, data); - } - - if (pi->caps_td_ramping) { - data = RREG32_DIDT(ixDIDT_TD_CTRL0); - if (enable) - data |= DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK; - else - data &= ~DIDT_TD_CTRL0__DIDT_CTRL_EN_MASK; - WREG32_DIDT(ixDIDT_TD_CTRL0, data); - } - - if (pi->caps_tcp_ramping) { - data = RREG32_DIDT(ixDIDT_TCP_CTRL0); - if (enable) - data |= DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK; - else - data &= ~DIDT_TCP_CTRL0__DIDT_CTRL_EN_MASK; - WREG32_DIDT(ixDIDT_TCP_CTRL0, data); - } -} - -static int kv_enable_didt(struct amdgpu_device *adev, bool enable) -{ - struct kv_power_info *pi = kv_get_pi(adev); - int ret; - - if (pi->caps_sq_ramping || - pi->caps_db_ramping || - pi->caps_td_ramping || - pi->caps_tcp_ramping) { - amdgpu_gfx_rlc_enter_safe_mode(adev); - - if (enable) { - ret = kv_program_pt_config_registers(adev, didt_config_kv); - if (ret) { - amdgpu_gfx_rlc_exit_safe_mode(adev); - return ret; - } - } - - kv_do_enable_didt(adev, enable); - - amdgpu_gfx_rlc_exit_safe_mode(adev); - } - - return 0; -} - -#if 0 -static void kv_initialize_hardware_cac_manager(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - - if (pi->caps_cac) { - WREG32_SMC(ixLCAC_SX0_OVR_SEL, 0); - WREG32_SMC(ixLCAC_SX0_OVR_VAL, 0); - kv_program_local_cac_table(adev, sx_local_cac_cfg_kv, sx0_cac_config_reg); - - WREG32_SMC(ixLCAC_MC0_OVR_SEL, 0); - WREG32_SMC(ixLCAC_MC0_OVR_VAL, 0); - kv_program_local_cac_table(adev, mc0_local_cac_cfg_kv, mc0_cac_config_reg); - - WREG32_SMC(ixLCAC_MC1_OVR_SEL, 0); - WREG32_SMC(ixLCAC_MC1_OVR_VAL, 0); - kv_program_local_cac_table(adev, mc1_local_cac_cfg_kv, mc1_cac_config_reg); - - WREG32_SMC(ixLCAC_MC2_OVR_SEL, 0); - WREG32_SMC(ixLCAC_MC2_OVR_VAL, 0); - kv_program_local_cac_table(adev, mc2_local_cac_cfg_kv, mc2_cac_config_reg); - - WREG32_SMC(ixLCAC_MC3_OVR_SEL, 0); - WREG32_SMC(ixLCAC_MC3_OVR_VAL, 0); - kv_program_local_cac_table(adev, mc3_local_cac_cfg_kv, mc3_cac_config_reg); - - WREG32_SMC(ixLCAC_CPL_OVR_SEL, 0); - WREG32_SMC(ixLCAC_CPL_OVR_VAL, 0); - kv_program_local_cac_table(adev, cpl_local_cac_cfg_kv, cpl_cac_config_reg); - } -} -#endif - -static int kv_enable_smc_cac(struct amdgpu_device *adev, bool enable) -{ - struct kv_power_info *pi = kv_get_pi(adev); - int ret = 0; - - if (pi->caps_cac) { - if (enable) { - ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_EnableCac); - if (ret) - pi->cac_enabled = false; - else - pi->cac_enabled = true; - } else if (pi->cac_enabled) { - amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DisableCac); - pi->cac_enabled = false; - } - } - - return ret; -} - -static int kv_process_firmware_header(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - u32 tmp; - int ret; - - ret = amdgpu_kv_read_smc_sram_dword(adev, SMU7_FIRMWARE_HEADER_LOCATION + - offsetof(SMU7_Firmware_Header, DpmTable), - &tmp, pi->sram_end); - - if (ret == 0) - pi->dpm_table_start = tmp; - - ret = amdgpu_kv_read_smc_sram_dword(adev, SMU7_FIRMWARE_HEADER_LOCATION + - offsetof(SMU7_Firmware_Header, SoftRegisters), - &tmp, pi->sram_end); - - if (ret == 0) - pi->soft_regs_start = tmp; - - return ret; -} - -static int kv_enable_dpm_voltage_scaling(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - int ret; - - pi->graphics_voltage_change_enable = 1; - - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, GraphicsVoltageChangeEnable), - &pi->graphics_voltage_change_enable, - sizeof(u8), pi->sram_end); - - return ret; -} - -static int kv_set_dpm_interval(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - int ret; - - pi->graphics_interval = 1; - - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, GraphicsInterval), - &pi->graphics_interval, - sizeof(u8), pi->sram_end); - - return ret; -} - -static int kv_set_dpm_boot_state(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - int ret; - - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, GraphicsBootLevel), - &pi->graphics_boot_level, - sizeof(u8), pi->sram_end); - - return ret; -} - -static void kv_program_vc(struct amdgpu_device *adev) -{ - WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0x3FFFC100); -} - -static void kv_clear_vc(struct amdgpu_device *adev) -{ - WREG32_SMC(ixCG_FREQ_TRAN_VOTING_0, 0); -} - -static int kv_set_divider_value(struct amdgpu_device *adev, - u32 index, u32 sclk) -{ - struct kv_power_info *pi = kv_get_pi(adev); - struct atom_clock_dividers dividers; - int ret; - - ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, - sclk, false, ÷rs); - if (ret) - return ret; - - pi->graphics_level[index].SclkDid = (u8)dividers.post_div; - pi->graphics_level[index].SclkFrequency = cpu_to_be32(sclk); - - return 0; -} - -static u16 kv_convert_8bit_index_to_voltage(struct amdgpu_device *adev, - u16 voltage) -{ - return 6200 - (voltage * 25); -} - -static u16 kv_convert_2bit_index_to_voltage(struct amdgpu_device *adev, - u32 vid_2bit) -{ - struct kv_power_info *pi = kv_get_pi(adev); - u32 vid_8bit = kv_convert_vid2_to_vid7(adev, - &pi->sys_info.vid_mapping_table, - vid_2bit); - - return kv_convert_8bit_index_to_voltage(adev, (u16)vid_8bit); -} - - -static int kv_set_vid(struct amdgpu_device *adev, u32 index, u32 vid) -{ - struct kv_power_info *pi = kv_get_pi(adev); - - pi->graphics_level[index].VoltageDownH = (u8)pi->voltage_drop_t; - pi->graphics_level[index].MinVddNb = - cpu_to_be32(kv_convert_2bit_index_to_voltage(adev, vid)); - - return 0; -} - -static int kv_set_at(struct amdgpu_device *adev, u32 index, u32 at) -{ - struct kv_power_info *pi = kv_get_pi(adev); - - pi->graphics_level[index].AT = cpu_to_be16((u16)at); - - return 0; -} - -static void kv_dpm_power_level_enable(struct amdgpu_device *adev, - u32 index, bool enable) -{ - struct kv_power_info *pi = kv_get_pi(adev); - - pi->graphics_level[index].EnabledForActivity = enable ? 1 : 0; -} - -static void kv_start_dpm(struct amdgpu_device *adev) -{ - u32 tmp = RREG32_SMC(ixGENERAL_PWRMGT); - - tmp |= GENERAL_PWRMGT__GLOBAL_PWRMGT_EN_MASK; - WREG32_SMC(ixGENERAL_PWRMGT, tmp); - - amdgpu_kv_smc_dpm_enable(adev, true); -} - -static void kv_stop_dpm(struct amdgpu_device *adev) -{ - amdgpu_kv_smc_dpm_enable(adev, false); -} - -static void kv_start_am(struct amdgpu_device *adev) -{ - u32 sclk_pwrmgt_cntl = RREG32_SMC(ixSCLK_PWRMGT_CNTL); - - sclk_pwrmgt_cntl &= ~(SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | - SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK); - sclk_pwrmgt_cntl |= SCLK_PWRMGT_CNTL__DYNAMIC_PM_EN_MASK; - - WREG32_SMC(ixSCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl); -} - -static void kv_reset_am(struct amdgpu_device *adev) -{ - u32 sclk_pwrmgt_cntl = RREG32_SMC(ixSCLK_PWRMGT_CNTL); - - sclk_pwrmgt_cntl |= (SCLK_PWRMGT_CNTL__RESET_SCLK_CNT_MASK | - SCLK_PWRMGT_CNTL__RESET_BUSY_CNT_MASK); - - WREG32_SMC(ixSCLK_PWRMGT_CNTL, sclk_pwrmgt_cntl); -} - -static int kv_freeze_sclk_dpm(struct amdgpu_device *adev, bool freeze) -{ - return amdgpu_kv_notify_message_to_smu(adev, freeze ? - PPSMC_MSG_SCLKDPM_FreezeLevel : PPSMC_MSG_SCLKDPM_UnfreezeLevel); -} - -static int kv_force_lowest_valid(struct amdgpu_device *adev) -{ - return kv_force_dpm_lowest(adev); -} - -static int kv_unforce_levels(struct amdgpu_device *adev) -{ - if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) - return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NoForcedLevel); - else - return kv_set_enabled_levels(adev); -} - -static int kv_update_sclk_t(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - u32 low_sclk_interrupt_t = 0; - int ret = 0; - - if (pi->caps_sclk_throttle_low_notification) { - low_sclk_interrupt_t = cpu_to_be32(pi->low_sclk_interrupt_t); - - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, LowSclkInterruptT), - (u8 *)&low_sclk_interrupt_t, - sizeof(u32), pi->sram_end); - } - return ret; -} - -static int kv_program_bootup_state(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - u32 i; - struct amdgpu_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; - - if (table && table->count) { - for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { - if (table->entries[i].clk == pi->boot_pl.sclk) - break; - } - - pi->graphics_boot_level = (u8)i; - kv_dpm_power_level_enable(adev, i, true); - } else { - struct sumo_sclk_voltage_mapping_table *table = - &pi->sys_info.sclk_voltage_mapping_table; - - if (table->num_max_dpm_entries == 0) - return -EINVAL; - - for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { - if (table->entries[i].sclk_frequency == pi->boot_pl.sclk) - break; - } - - pi->graphics_boot_level = (u8)i; - kv_dpm_power_level_enable(adev, i, true); - } - return 0; -} - -static int kv_enable_auto_thermal_throttling(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - int ret; - - pi->graphics_therm_throttle_enable = 1; - - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, GraphicsThermThrottleEnable), - &pi->graphics_therm_throttle_enable, - sizeof(u8), pi->sram_end); - - return ret; -} - -static int kv_upload_dpm_settings(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - int ret; - - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, GraphicsLevel), - (u8 *)&pi->graphics_level, - sizeof(SMU7_Fusion_GraphicsLevel) * SMU7_MAX_LEVELS_GRAPHICS, - pi->sram_end); - - if (ret) - return ret; - - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, GraphicsDpmLevelCount), - &pi->graphics_dpm_level_count, - sizeof(u8), pi->sram_end); - - return ret; -} - -static u32 kv_get_clock_difference(u32 a, u32 b) -{ - return (a >= b) ? a - b : b - a; -} - -static u32 kv_get_clk_bypass(struct amdgpu_device *adev, u32 clk) -{ - struct kv_power_info *pi = kv_get_pi(adev); - u32 value; - - if (pi->caps_enable_dfs_bypass) { - if (kv_get_clock_difference(clk, 40000) < 200) - value = 3; - else if (kv_get_clock_difference(clk, 30000) < 200) - value = 2; - else if (kv_get_clock_difference(clk, 20000) < 200) - value = 7; - else if (kv_get_clock_difference(clk, 15000) < 200) - value = 6; - else if (kv_get_clock_difference(clk, 10000) < 200) - value = 8; - else - value = 0; - } else { - value = 0; - } - - return value; -} - -static int kv_populate_uvd_table(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - struct amdgpu_uvd_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; - struct atom_clock_dividers dividers; - int ret; - u32 i; - - if (table == NULL || table->count == 0) - return 0; - - pi->uvd_level_count = 0; - for (i = 0; i < table->count; i++) { - if (pi->high_voltage_t && - (pi->high_voltage_t < table->entries[i].v)) - break; - - pi->uvd_level[i].VclkFrequency = cpu_to_be32(table->entries[i].vclk); - pi->uvd_level[i].DclkFrequency = cpu_to_be32(table->entries[i].dclk); - pi->uvd_level[i].MinVddNb = cpu_to_be16(table->entries[i].v); - - pi->uvd_level[i].VClkBypassCntl = - (u8)kv_get_clk_bypass(adev, table->entries[i].vclk); - pi->uvd_level[i].DClkBypassCntl = - (u8)kv_get_clk_bypass(adev, table->entries[i].dclk); - - ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, - table->entries[i].vclk, false, ÷rs); - if (ret) - return ret; - pi->uvd_level[i].VclkDivider = (u8)dividers.post_div; - - ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, - table->entries[i].dclk, false, ÷rs); - if (ret) - return ret; - pi->uvd_level[i].DclkDivider = (u8)dividers.post_div; - - pi->uvd_level_count++; - } - - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, UvdLevelCount), - (u8 *)&pi->uvd_level_count, - sizeof(u8), pi->sram_end); - if (ret) - return ret; - - pi->uvd_interval = 1; - - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, UVDInterval), - &pi->uvd_interval, - sizeof(u8), pi->sram_end); - if (ret) - return ret; - - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, UvdLevel), - (u8 *)&pi->uvd_level, - sizeof(SMU7_Fusion_UvdLevel) * SMU7_MAX_LEVELS_UVD, - pi->sram_end); - - return ret; - -} - -static int kv_populate_vce_table(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - int ret; - u32 i; - struct amdgpu_vce_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; - struct atom_clock_dividers dividers; - - if (table == NULL || table->count == 0) - return 0; - - pi->vce_level_count = 0; - for (i = 0; i < table->count; i++) { - if (pi->high_voltage_t && - pi->high_voltage_t < table->entries[i].v) - break; - - pi->vce_level[i].Frequency = cpu_to_be32(table->entries[i].evclk); - pi->vce_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); - - pi->vce_level[i].ClkBypassCntl = - (u8)kv_get_clk_bypass(adev, table->entries[i].evclk); - - ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, - table->entries[i].evclk, false, ÷rs); - if (ret) - return ret; - pi->vce_level[i].Divider = (u8)dividers.post_div; - - pi->vce_level_count++; - } - - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, VceLevelCount), - (u8 *)&pi->vce_level_count, - sizeof(u8), - pi->sram_end); - if (ret) - return ret; - - pi->vce_interval = 1; - - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, VCEInterval), - (u8 *)&pi->vce_interval, - sizeof(u8), - pi->sram_end); - if (ret) - return ret; - - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, VceLevel), - (u8 *)&pi->vce_level, - sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_VCE, - pi->sram_end); - - return ret; -} - -static int kv_populate_samu_table(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - struct amdgpu_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; - struct atom_clock_dividers dividers; - int ret; - u32 i; - - if (table == NULL || table->count == 0) - return 0; - - pi->samu_level_count = 0; - for (i = 0; i < table->count; i++) { - if (pi->high_voltage_t && - pi->high_voltage_t < table->entries[i].v) - break; - - pi->samu_level[i].Frequency = cpu_to_be32(table->entries[i].clk); - pi->samu_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); - - pi->samu_level[i].ClkBypassCntl = - (u8)kv_get_clk_bypass(adev, table->entries[i].clk); - - ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, - table->entries[i].clk, false, ÷rs); - if (ret) - return ret; - pi->samu_level[i].Divider = (u8)dividers.post_div; - - pi->samu_level_count++; - } - - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, SamuLevelCount), - (u8 *)&pi->samu_level_count, - sizeof(u8), - pi->sram_end); - if (ret) - return ret; - - pi->samu_interval = 1; - - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, SAMUInterval), - (u8 *)&pi->samu_interval, - sizeof(u8), - pi->sram_end); - if (ret) - return ret; - - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, SamuLevel), - (u8 *)&pi->samu_level, - sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_SAMU, - pi->sram_end); - if (ret) - return ret; - - return ret; -} - - -static int kv_populate_acp_table(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - struct amdgpu_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; - struct atom_clock_dividers dividers; - int ret; - u32 i; - - if (table == NULL || table->count == 0) - return 0; - - pi->acp_level_count = 0; - for (i = 0; i < table->count; i++) { - pi->acp_level[i].Frequency = cpu_to_be32(table->entries[i].clk); - pi->acp_level[i].MinVoltage = cpu_to_be16(table->entries[i].v); - - ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, - table->entries[i].clk, false, ÷rs); - if (ret) - return ret; - pi->acp_level[i].Divider = (u8)dividers.post_div; - - pi->acp_level_count++; - } - - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, AcpLevelCount), - (u8 *)&pi->acp_level_count, - sizeof(u8), - pi->sram_end); - if (ret) - return ret; - - pi->acp_interval = 1; - - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, ACPInterval), - (u8 *)&pi->acp_interval, - sizeof(u8), - pi->sram_end); - if (ret) - return ret; - - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, AcpLevel), - (u8 *)&pi->acp_level, - sizeof(SMU7_Fusion_ExtClkLevel) * SMU7_MAX_LEVELS_ACP, - pi->sram_end); - if (ret) - return ret; - - return ret; -} - -static void kv_calculate_dfs_bypass_settings(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - u32 i; - struct amdgpu_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; - - if (table && table->count) { - for (i = 0; i < pi->graphics_dpm_level_count; i++) { - if (pi->caps_enable_dfs_bypass) { - if (kv_get_clock_difference(table->entries[i].clk, 40000) < 200) - pi->graphics_level[i].ClkBypassCntl = 3; - else if (kv_get_clock_difference(table->entries[i].clk, 30000) < 200) - pi->graphics_level[i].ClkBypassCntl = 2; - else if (kv_get_clock_difference(table->entries[i].clk, 26600) < 200) - pi->graphics_level[i].ClkBypassCntl = 7; - else if (kv_get_clock_difference(table->entries[i].clk , 20000) < 200) - pi->graphics_level[i].ClkBypassCntl = 6; - else if (kv_get_clock_difference(table->entries[i].clk , 10000) < 200) - pi->graphics_level[i].ClkBypassCntl = 8; - else - pi->graphics_level[i].ClkBypassCntl = 0; - } else { - pi->graphics_level[i].ClkBypassCntl = 0; - } - } - } else { - struct sumo_sclk_voltage_mapping_table *table = - &pi->sys_info.sclk_voltage_mapping_table; - for (i = 0; i < pi->graphics_dpm_level_count; i++) { - if (pi->caps_enable_dfs_bypass) { - if (kv_get_clock_difference(table->entries[i].sclk_frequency, 40000) < 200) - pi->graphics_level[i].ClkBypassCntl = 3; - else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 30000) < 200) - pi->graphics_level[i].ClkBypassCntl = 2; - else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 26600) < 200) - pi->graphics_level[i].ClkBypassCntl = 7; - else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 20000) < 200) - pi->graphics_level[i].ClkBypassCntl = 6; - else if (kv_get_clock_difference(table->entries[i].sclk_frequency, 10000) < 200) - pi->graphics_level[i].ClkBypassCntl = 8; - else - pi->graphics_level[i].ClkBypassCntl = 0; - } else { - pi->graphics_level[i].ClkBypassCntl = 0; - } - } - } -} - -static int kv_enable_ulv(struct amdgpu_device *adev, bool enable) -{ - return amdgpu_kv_notify_message_to_smu(adev, enable ? - PPSMC_MSG_EnableULV : PPSMC_MSG_DisableULV); -} - -static void kv_reset_acp_boot_level(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - - pi->acp_boot_level = 0xff; -} - -static void kv_update_current_ps(struct amdgpu_device *adev, - struct amdgpu_ps *rps) -{ - struct kv_ps *new_ps = kv_get_ps(rps); - struct kv_power_info *pi = kv_get_pi(adev); - - pi->current_rps = *rps; - pi->current_ps = *new_ps; - pi->current_rps.ps_priv = &pi->current_ps; - adev->pm.dpm.current_ps = &pi->current_rps; -} - -static void kv_update_requested_ps(struct amdgpu_device *adev, - struct amdgpu_ps *rps) -{ - struct kv_ps *new_ps = kv_get_ps(rps); - struct kv_power_info *pi = kv_get_pi(adev); - - pi->requested_rps = *rps; - pi->requested_ps = *new_ps; - pi->requested_rps.ps_priv = &pi->requested_ps; - adev->pm.dpm.requested_ps = &pi->requested_rps; -} - -static void kv_dpm_enable_bapm(void *handle, bool enable) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct kv_power_info *pi = kv_get_pi(adev); - int ret; - - if (pi->bapm_enable) { - ret = amdgpu_kv_smc_bapm_enable(adev, enable); - if (ret) - DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); - } -} - -static bool kv_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor) -{ - switch (sensor) { - case THERMAL_TYPE_RV6XX: - case THERMAL_TYPE_RV770: - case THERMAL_TYPE_EVERGREEN: - case THERMAL_TYPE_SUMO: - case THERMAL_TYPE_NI: - case THERMAL_TYPE_SI: - case THERMAL_TYPE_CI: - case THERMAL_TYPE_KV: - return true; - case THERMAL_TYPE_ADT7473_WITH_INTERNAL: - case THERMAL_TYPE_EMC2103_WITH_INTERNAL: - return false; /* need special handling */ - case THERMAL_TYPE_NONE: - case THERMAL_TYPE_EXTERNAL: - case THERMAL_TYPE_EXTERNAL_GPIO: - default: - return false; - } -} - -static int kv_dpm_enable(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - int ret; - - ret = kv_process_firmware_header(adev); - if (ret) { - DRM_ERROR("kv_process_firmware_header failed\n"); - return ret; - } - kv_init_fps_limits(adev); - kv_init_graphics_levels(adev); - ret = kv_program_bootup_state(adev); - if (ret) { - DRM_ERROR("kv_program_bootup_state failed\n"); - return ret; - } - kv_calculate_dfs_bypass_settings(adev); - ret = kv_upload_dpm_settings(adev); - if (ret) { - DRM_ERROR("kv_upload_dpm_settings failed\n"); - return ret; - } - ret = kv_populate_uvd_table(adev); - if (ret) { - DRM_ERROR("kv_populate_uvd_table failed\n"); - return ret; - } - ret = kv_populate_vce_table(adev); - if (ret) { - DRM_ERROR("kv_populate_vce_table failed\n"); - return ret; - } - ret = kv_populate_samu_table(adev); - if (ret) { - DRM_ERROR("kv_populate_samu_table failed\n"); - return ret; - } - ret = kv_populate_acp_table(adev); - if (ret) { - DRM_ERROR("kv_populate_acp_table failed\n"); - return ret; - } - kv_program_vc(adev); -#if 0 - kv_initialize_hardware_cac_manager(adev); -#endif - kv_start_am(adev); - if (pi->enable_auto_thermal_throttling) { - ret = kv_enable_auto_thermal_throttling(adev); - if (ret) { - DRM_ERROR("kv_enable_auto_thermal_throttling failed\n"); - return ret; - } - } - ret = kv_enable_dpm_voltage_scaling(adev); - if (ret) { - DRM_ERROR("kv_enable_dpm_voltage_scaling failed\n"); - return ret; - } - ret = kv_set_dpm_interval(adev); - if (ret) { - DRM_ERROR("kv_set_dpm_interval failed\n"); - return ret; - } - ret = kv_set_dpm_boot_state(adev); - if (ret) { - DRM_ERROR("kv_set_dpm_boot_state failed\n"); - return ret; - } - ret = kv_enable_ulv(adev, true); - if (ret) { - DRM_ERROR("kv_enable_ulv failed\n"); - return ret; - } - kv_start_dpm(adev); - ret = kv_enable_didt(adev, true); - if (ret) { - DRM_ERROR("kv_enable_didt failed\n"); - return ret; - } - ret = kv_enable_smc_cac(adev, true); - if (ret) { - DRM_ERROR("kv_enable_smc_cac failed\n"); - return ret; - } - - kv_reset_acp_boot_level(adev); - - ret = amdgpu_kv_smc_bapm_enable(adev, false); - if (ret) { - DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); - return ret; - } - - if (adev->irq.installed && - kv_is_internal_thermal_sensor(adev->pm.int_thermal_type)) { - ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX); - if (ret) { - DRM_ERROR("kv_set_thermal_temperature_range failed\n"); - return ret; - } - amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq, - AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); - amdgpu_irq_get(adev, &adev->pm.dpm.thermal.irq, - AMDGPU_THERMAL_IRQ_HIGH_TO_LOW); - } - - return ret; -} - -static void kv_dpm_disable(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - - amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, - AMDGPU_THERMAL_IRQ_LOW_TO_HIGH); - amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq, - AMDGPU_THERMAL_IRQ_HIGH_TO_LOW); - - amdgpu_kv_smc_bapm_enable(adev, false); - - if (adev->asic_type == CHIP_MULLINS) - kv_enable_nb_dpm(adev, false); - - /* powerup blocks */ - kv_dpm_powergate_acp(adev, false); - kv_dpm_powergate_samu(adev, false); - if (pi->caps_vce_pg) /* power on the VCE block */ - amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); - if (pi->caps_uvd_pg) /* power on the UVD block */ - amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON); - - kv_enable_smc_cac(adev, false); - kv_enable_didt(adev, false); - kv_clear_vc(adev); - kv_stop_dpm(adev); - kv_enable_ulv(adev, false); - kv_reset_am(adev); - - kv_update_current_ps(adev, adev->pm.dpm.boot_ps); -} - -#if 0 -static int kv_write_smc_soft_register(struct amdgpu_device *adev, - u16 reg_offset, u32 value) -{ - struct kv_power_info *pi = kv_get_pi(adev); - - return amdgpu_kv_copy_bytes_to_smc(adev, pi->soft_regs_start + reg_offset, - (u8 *)&value, sizeof(u16), pi->sram_end); -} - -static int kv_read_smc_soft_register(struct amdgpu_device *adev, - u16 reg_offset, u32 *value) -{ - struct kv_power_info *pi = kv_get_pi(adev); - - return amdgpu_kv_read_smc_sram_dword(adev, pi->soft_regs_start + reg_offset, - value, pi->sram_end); -} -#endif - -static void kv_init_sclk_t(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - - pi->low_sclk_interrupt_t = 0; -} - -static int kv_init_fps_limits(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - int ret = 0; - - if (pi->caps_fps) { - u16 tmp; - - tmp = 45; - pi->fps_high_t = cpu_to_be16(tmp); - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, FpsHighT), - (u8 *)&pi->fps_high_t, - sizeof(u16), pi->sram_end); - - tmp = 30; - pi->fps_low_t = cpu_to_be16(tmp); - - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, FpsLowT), - (u8 *)&pi->fps_low_t, - sizeof(u16), pi->sram_end); - - } - return ret; -} - -static void kv_init_powergate_state(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - - pi->uvd_power_gated = false; - pi->vce_power_gated = false; - pi->samu_power_gated = false; - pi->acp_power_gated = false; - -} - -static int kv_enable_uvd_dpm(struct amdgpu_device *adev, bool enable) -{ - return amdgpu_kv_notify_message_to_smu(adev, enable ? - PPSMC_MSG_UVDDPM_Enable : PPSMC_MSG_UVDDPM_Disable); -} - -static int kv_enable_vce_dpm(struct amdgpu_device *adev, bool enable) -{ - return amdgpu_kv_notify_message_to_smu(adev, enable ? - PPSMC_MSG_VCEDPM_Enable : PPSMC_MSG_VCEDPM_Disable); -} - -static int kv_enable_samu_dpm(struct amdgpu_device *adev, bool enable) -{ - return amdgpu_kv_notify_message_to_smu(adev, enable ? - PPSMC_MSG_SAMUDPM_Enable : PPSMC_MSG_SAMUDPM_Disable); -} - -static int kv_enable_acp_dpm(struct amdgpu_device *adev, bool enable) -{ - return amdgpu_kv_notify_message_to_smu(adev, enable ? - PPSMC_MSG_ACPDPM_Enable : PPSMC_MSG_ACPDPM_Disable); -} - -static int kv_update_uvd_dpm(struct amdgpu_device *adev, bool gate) -{ - struct kv_power_info *pi = kv_get_pi(adev); - struct amdgpu_uvd_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; - int ret; - u32 mask; - - if (!gate) { - if (table->count) - pi->uvd_boot_level = table->count - 1; - else - pi->uvd_boot_level = 0; - - if (!pi->caps_uvd_dpm || pi->caps_stable_p_state) { - mask = 1 << pi->uvd_boot_level; - } else { - mask = 0x1f; - } - - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, UvdBootLevel), - (uint8_t *)&pi->uvd_boot_level, - sizeof(u8), pi->sram_end); - if (ret) - return ret; - - amdgpu_kv_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_UVDDPM_SetEnabledMask, - mask); - } - - return kv_enable_uvd_dpm(adev, !gate); -} - -static u8 kv_get_vce_boot_level(struct amdgpu_device *adev, u32 evclk) -{ - u8 i; - struct amdgpu_vce_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; - - for (i = 0; i < table->count; i++) { - if (table->entries[i].evclk >= evclk) - break; - } - - return i; -} - -static int kv_update_vce_dpm(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_new_state, - struct amdgpu_ps *amdgpu_current_state) -{ - struct kv_power_info *pi = kv_get_pi(adev); - struct amdgpu_vce_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; - int ret; - - if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) { - if (pi->caps_stable_p_state) - pi->vce_boot_level = table->count - 1; - else - pi->vce_boot_level = kv_get_vce_boot_level(adev, amdgpu_new_state->evclk); - - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, VceBootLevel), - (u8 *)&pi->vce_boot_level, - sizeof(u8), - pi->sram_end); - if (ret) - return ret; - - if (pi->caps_stable_p_state) - amdgpu_kv_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_VCEDPM_SetEnabledMask, - (1 << pi->vce_boot_level)); - kv_enable_vce_dpm(adev, true); - } else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) { - kv_enable_vce_dpm(adev, false); - } - - return 0; -} - -static int kv_update_samu_dpm(struct amdgpu_device *adev, bool gate) -{ - struct kv_power_info *pi = kv_get_pi(adev); - struct amdgpu_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; - int ret; - - if (!gate) { - if (pi->caps_stable_p_state) - pi->samu_boot_level = table->count - 1; - else - pi->samu_boot_level = 0; - - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, SamuBootLevel), - (u8 *)&pi->samu_boot_level, - sizeof(u8), - pi->sram_end); - if (ret) - return ret; - - if (pi->caps_stable_p_state) - amdgpu_kv_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SAMUDPM_SetEnabledMask, - (1 << pi->samu_boot_level)); - } - - return kv_enable_samu_dpm(adev, !gate); -} - -static u8 kv_get_acp_boot_level(struct amdgpu_device *adev) -{ - u8 i; - struct amdgpu_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; - - for (i = 0; i < table->count; i++) { - if (table->entries[i].clk >= 0) /* XXX */ - break; - } - - if (i >= table->count) - i = table->count - 1; - - return i; -} - -static void kv_update_acp_boot_level(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - u8 acp_boot_level; - - if (!pi->caps_stable_p_state) { - acp_boot_level = kv_get_acp_boot_level(adev); - if (acp_boot_level != pi->acp_boot_level) { - pi->acp_boot_level = acp_boot_level; - amdgpu_kv_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_ACPDPM_SetEnabledMask, - (1 << pi->acp_boot_level)); - } - } -} - -static int kv_update_acp_dpm(struct amdgpu_device *adev, bool gate) -{ - struct kv_power_info *pi = kv_get_pi(adev); - struct amdgpu_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; - int ret; - - if (!gate) { - if (pi->caps_stable_p_state) - pi->acp_boot_level = table->count - 1; - else - pi->acp_boot_level = kv_get_acp_boot_level(adev); - - ret = amdgpu_kv_copy_bytes_to_smc(adev, - pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, AcpBootLevel), - (u8 *)&pi->acp_boot_level, - sizeof(u8), - pi->sram_end); - if (ret) - return ret; - - if (pi->caps_stable_p_state) - amdgpu_kv_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_ACPDPM_SetEnabledMask, - (1 << pi->acp_boot_level)); - } - - return kv_enable_acp_dpm(adev, !gate); -} - -static void kv_dpm_powergate_uvd(void *handle, bool gate) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct kv_power_info *pi = kv_get_pi(adev); - - pi->uvd_power_gated = gate; - - if (gate) { - /* stop the UVD block */ - amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, - AMD_PG_STATE_GATE); - kv_update_uvd_dpm(adev, gate); - if (pi->caps_uvd_pg) - /* power off the UVD block */ - amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerOFF); - } else { - if (pi->caps_uvd_pg) - /* power on the UVD block */ - amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON); - /* re-init the UVD block */ - kv_update_uvd_dpm(adev, gate); - - amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_UVD, - AMD_PG_STATE_UNGATE); - } -} - -static void kv_dpm_powergate_vce(void *handle, bool gate) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct kv_power_info *pi = kv_get_pi(adev); - - pi->vce_power_gated = gate; - - if (gate) { - /* stop the VCE block */ - amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, - AMD_PG_STATE_GATE); - kv_enable_vce_dpm(adev, false); - if (pi->caps_vce_pg) /* power off the VCE block */ - amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF); - } else { - if (pi->caps_vce_pg) /* power on the VCE block */ - amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON); - kv_enable_vce_dpm(adev, true); - /* re-init the VCE block */ - amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE, - AMD_PG_STATE_UNGATE); - } -} - - -static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate) -{ - struct kv_power_info *pi = kv_get_pi(adev); - - if (pi->samu_power_gated == gate) - return; - - pi->samu_power_gated = gate; - - if (gate) { - kv_update_samu_dpm(adev, true); - if (pi->caps_samu_pg) - amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SAMPowerOFF); - } else { - if (pi->caps_samu_pg) - amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SAMPowerON); - kv_update_samu_dpm(adev, false); - } -} - -static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate) -{ - struct kv_power_info *pi = kv_get_pi(adev); - - if (pi->acp_power_gated == gate) - return; - - if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) - return; - - pi->acp_power_gated = gate; - - if (gate) { - kv_update_acp_dpm(adev, true); - if (pi->caps_acp_pg) - amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_ACPPowerOFF); - } else { - if (pi->caps_acp_pg) - amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_ACPPowerON); - kv_update_acp_dpm(adev, false); - } -} - -static void kv_set_valid_clock_range(struct amdgpu_device *adev, - struct amdgpu_ps *new_rps) -{ - struct kv_ps *new_ps = kv_get_ps(new_rps); - struct kv_power_info *pi = kv_get_pi(adev); - u32 i; - struct amdgpu_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; - - if (table && table->count) { - for (i = 0; i < pi->graphics_dpm_level_count; i++) { - if ((table->entries[i].clk >= new_ps->levels[0].sclk) || - (i == (pi->graphics_dpm_level_count - 1))) { - pi->lowest_valid = i; - break; - } - } - - for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { - if (table->entries[i].clk <= new_ps->levels[new_ps->num_levels - 1].sclk) - break; - } - pi->highest_valid = i; - - if (pi->lowest_valid > pi->highest_valid) { - if ((new_ps->levels[0].sclk - table->entries[pi->highest_valid].clk) > - (table->entries[pi->lowest_valid].clk - new_ps->levels[new_ps->num_levels - 1].sclk)) - pi->highest_valid = pi->lowest_valid; - else - pi->lowest_valid = pi->highest_valid; - } - } else { - struct sumo_sclk_voltage_mapping_table *table = - &pi->sys_info.sclk_voltage_mapping_table; - - for (i = 0; i < (int)pi->graphics_dpm_level_count; i++) { - if (table->entries[i].sclk_frequency >= new_ps->levels[0].sclk || - i == (int)(pi->graphics_dpm_level_count - 1)) { - pi->lowest_valid = i; - break; - } - } - - for (i = pi->graphics_dpm_level_count - 1; i > 0; i--) { - if (table->entries[i].sclk_frequency <= - new_ps->levels[new_ps->num_levels - 1].sclk) - break; - } - pi->highest_valid = i; - - if (pi->lowest_valid > pi->highest_valid) { - if ((new_ps->levels[0].sclk - - table->entries[pi->highest_valid].sclk_frequency) > - (table->entries[pi->lowest_valid].sclk_frequency - - new_ps->levels[new_ps->num_levels -1].sclk)) - pi->highest_valid = pi->lowest_valid; - else - pi->lowest_valid = pi->highest_valid; - } - } -} - -static int kv_update_dfs_bypass_settings(struct amdgpu_device *adev, - struct amdgpu_ps *new_rps) -{ - struct kv_ps *new_ps = kv_get_ps(new_rps); - struct kv_power_info *pi = kv_get_pi(adev); - int ret = 0; - u8 clk_bypass_cntl; - - if (pi->caps_enable_dfs_bypass) { - clk_bypass_cntl = new_ps->need_dfs_bypass ? - pi->graphics_level[pi->graphics_boot_level].ClkBypassCntl : 0; - ret = amdgpu_kv_copy_bytes_to_smc(adev, - (pi->dpm_table_start + - offsetof(SMU7_Fusion_DpmTable, GraphicsLevel) + - (pi->graphics_boot_level * sizeof(SMU7_Fusion_GraphicsLevel)) + - offsetof(SMU7_Fusion_GraphicsLevel, ClkBypassCntl)), - &clk_bypass_cntl, - sizeof(u8), pi->sram_end); - } - - return ret; -} - -static int kv_enable_nb_dpm(struct amdgpu_device *adev, - bool enable) -{ - struct kv_power_info *pi = kv_get_pi(adev); - int ret = 0; - - if (enable) { - if (pi->enable_nb_dpm && !pi->nb_dpm_enabled) { - ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NBDPM_Enable); - if (ret == 0) - pi->nb_dpm_enabled = true; - } - } else { - if (pi->enable_nb_dpm && pi->nb_dpm_enabled) { - ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_NBDPM_Disable); - if (ret == 0) - pi->nb_dpm_enabled = false; - } - } - - return ret; -} - -static int kv_dpm_force_performance_level(void *handle, - enum amd_dpm_forced_level level) -{ - int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (level == AMD_DPM_FORCED_LEVEL_HIGH) { - ret = kv_force_dpm_highest(adev); - if (ret) - return ret; - } else if (level == AMD_DPM_FORCED_LEVEL_LOW) { - ret = kv_force_dpm_lowest(adev); - if (ret) - return ret; - } else if (level == AMD_DPM_FORCED_LEVEL_AUTO) { - ret = kv_unforce_levels(adev); - if (ret) - return ret; - } - - adev->pm.dpm.forced_level = level; - - return 0; -} - -static int kv_dpm_pre_set_power_state(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct kv_power_info *pi = kv_get_pi(adev); - struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps; - struct amdgpu_ps *new_ps = &requested_ps; - - kv_update_requested_ps(adev, new_ps); - - kv_apply_state_adjust_rules(adev, - &pi->requested_rps, - &pi->current_rps); - - return 0; -} - -static int kv_dpm_set_power_state(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct kv_power_info *pi = kv_get_pi(adev); - struct amdgpu_ps *new_ps = &pi->requested_rps; - struct amdgpu_ps *old_ps = &pi->current_rps; - int ret; - - if (pi->bapm_enable) { - ret = amdgpu_kv_smc_bapm_enable(adev, adev->pm.ac_power); - if (ret) { - DRM_ERROR("amdgpu_kv_smc_bapm_enable failed\n"); - return ret; - } - } - - if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { - if (pi->enable_dpm) { - kv_set_valid_clock_range(adev, new_ps); - kv_update_dfs_bypass_settings(adev, new_ps); - ret = kv_calculate_ds_divider(adev); - if (ret) { - DRM_ERROR("kv_calculate_ds_divider failed\n"); - return ret; - } - kv_calculate_nbps_level_settings(adev); - kv_calculate_dpm_settings(adev); - kv_force_lowest_valid(adev); - kv_enable_new_levels(adev); - kv_upload_dpm_settings(adev); - kv_program_nbps_index_settings(adev, new_ps); - kv_unforce_levels(adev); - kv_set_enabled_levels(adev); - kv_force_lowest_valid(adev); - kv_unforce_levels(adev); - - ret = kv_update_vce_dpm(adev, new_ps, old_ps); - if (ret) { - DRM_ERROR("kv_update_vce_dpm failed\n"); - return ret; - } - kv_update_sclk_t(adev); - if (adev->asic_type == CHIP_MULLINS) - kv_enable_nb_dpm(adev, true); - } - } else { - if (pi->enable_dpm) { - kv_set_valid_clock_range(adev, new_ps); - kv_update_dfs_bypass_settings(adev, new_ps); - ret = kv_calculate_ds_divider(adev); - if (ret) { - DRM_ERROR("kv_calculate_ds_divider failed\n"); - return ret; - } - kv_calculate_nbps_level_settings(adev); - kv_calculate_dpm_settings(adev); - kv_freeze_sclk_dpm(adev, true); - kv_upload_dpm_settings(adev); - kv_program_nbps_index_settings(adev, new_ps); - kv_freeze_sclk_dpm(adev, false); - kv_set_enabled_levels(adev); - ret = kv_update_vce_dpm(adev, new_ps, old_ps); - if (ret) { - DRM_ERROR("kv_update_vce_dpm failed\n"); - return ret; - } - kv_update_acp_boot_level(adev); - kv_update_sclk_t(adev); - kv_enable_nb_dpm(adev, true); - } - } - - return 0; -} - -static void kv_dpm_post_set_power_state(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct kv_power_info *pi = kv_get_pi(adev); - struct amdgpu_ps *new_ps = &pi->requested_rps; - - kv_update_current_ps(adev, new_ps); -} - -static void kv_dpm_setup_asic(struct amdgpu_device *adev) -{ - sumo_take_smu_control(adev, true); - kv_init_powergate_state(adev); - kv_init_sclk_t(adev); -} - -#if 0 -static void kv_dpm_reset_asic(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - - if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { - kv_force_lowest_valid(adev); - kv_init_graphics_levels(adev); - kv_program_bootup_state(adev); - kv_upload_dpm_settings(adev); - kv_force_lowest_valid(adev); - kv_unforce_levels(adev); - } else { - kv_init_graphics_levels(adev); - kv_program_bootup_state(adev); - kv_freeze_sclk_dpm(adev, true); - kv_upload_dpm_settings(adev); - kv_freeze_sclk_dpm(adev, false); - kv_set_enabled_level(adev, pi->graphics_boot_level); - } -} -#endif - -static void kv_construct_max_power_limits_table(struct amdgpu_device *adev, - struct amdgpu_clock_and_voltage_limits *table) -{ - struct kv_power_info *pi = kv_get_pi(adev); - - if (pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries > 0) { - int idx = pi->sys_info.sclk_voltage_mapping_table.num_max_dpm_entries - 1; - table->sclk = - pi->sys_info.sclk_voltage_mapping_table.entries[idx].sclk_frequency; - table->vddc = - kv_convert_2bit_index_to_voltage(adev, - pi->sys_info.sclk_voltage_mapping_table.entries[idx].vid_2bit); - } - - table->mclk = pi->sys_info.nbp_memory_clock[0]; -} - -static void kv_patch_voltage_values(struct amdgpu_device *adev) -{ - int i; - struct amdgpu_uvd_clock_voltage_dependency_table *uvd_table = - &adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table; - struct amdgpu_vce_clock_voltage_dependency_table *vce_table = - &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; - struct amdgpu_clock_voltage_dependency_table *samu_table = - &adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table; - struct amdgpu_clock_voltage_dependency_table *acp_table = - &adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table; - - if (uvd_table->count) { - for (i = 0; i < uvd_table->count; i++) - uvd_table->entries[i].v = - kv_convert_8bit_index_to_voltage(adev, - uvd_table->entries[i].v); - } - - if (vce_table->count) { - for (i = 0; i < vce_table->count; i++) - vce_table->entries[i].v = - kv_convert_8bit_index_to_voltage(adev, - vce_table->entries[i].v); - } - - if (samu_table->count) { - for (i = 0; i < samu_table->count; i++) - samu_table->entries[i].v = - kv_convert_8bit_index_to_voltage(adev, - samu_table->entries[i].v); - } - - if (acp_table->count) { - for (i = 0; i < acp_table->count; i++) - acp_table->entries[i].v = - kv_convert_8bit_index_to_voltage(adev, - acp_table->entries[i].v); - } - -} - -static void kv_construct_boot_state(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - - pi->boot_pl.sclk = pi->sys_info.bootup_sclk; - pi->boot_pl.vddc_index = pi->sys_info.bootup_nb_voltage_index; - pi->boot_pl.ds_divider_index = 0; - pi->boot_pl.ss_divider_index = 0; - pi->boot_pl.allow_gnb_slow = 1; - pi->boot_pl.force_nbp_state = 0; - pi->boot_pl.display_wm = 0; - pi->boot_pl.vce_wm = 0; -} - -static int kv_force_dpm_highest(struct amdgpu_device *adev) -{ - int ret; - u32 enable_mask, i; - - ret = amdgpu_kv_dpm_get_enable_mask(adev, &enable_mask); - if (ret) - return ret; - - for (i = SMU7_MAX_LEVELS_GRAPHICS - 1; i > 0; i--) { - if (enable_mask & (1 << i)) - break; - } - - if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) - return amdgpu_kv_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DPM_ForceState, i); - else - return kv_set_enabled_level(adev, i); -} - -static int kv_force_dpm_lowest(struct amdgpu_device *adev) -{ - int ret; - u32 enable_mask, i; - - ret = amdgpu_kv_dpm_get_enable_mask(adev, &enable_mask); - if (ret) - return ret; - - for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) { - if (enable_mask & (1 << i)) - break; - } - - if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) - return amdgpu_kv_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_DPM_ForceState, i); - else - return kv_set_enabled_level(adev, i); -} - -static u8 kv_get_sleep_divider_id_from_clock(struct amdgpu_device *adev, - u32 sclk, u32 min_sclk_in_sr) -{ - struct kv_power_info *pi = kv_get_pi(adev); - u32 i; - u32 temp; - u32 min = max(min_sclk_in_sr, (u32)KV_MINIMUM_ENGINE_CLOCK); - - if (sclk < min) - return 0; - - if (!pi->caps_sclk_ds) - return 0; - - for (i = KV_MAX_DEEPSLEEP_DIVIDER_ID; i > 0; i--) { - temp = sclk >> i; - if (temp >= min) - break; - } - - return (u8)i; -} - -static int kv_get_high_voltage_limit(struct amdgpu_device *adev, int *limit) -{ - struct kv_power_info *pi = kv_get_pi(adev); - struct amdgpu_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; - int i; - - if (table && table->count) { - for (i = table->count - 1; i >= 0; i--) { - if (pi->high_voltage_t && - (kv_convert_8bit_index_to_voltage(adev, table->entries[i].v) <= - pi->high_voltage_t)) { - *limit = i; - return 0; - } - } - } else { - struct sumo_sclk_voltage_mapping_table *table = - &pi->sys_info.sclk_voltage_mapping_table; - - for (i = table->num_max_dpm_entries - 1; i >= 0; i--) { - if (pi->high_voltage_t && - (kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit) <= - pi->high_voltage_t)) { - *limit = i; - return 0; - } - } - } - - *limit = 0; - return 0; -} - -static void kv_apply_state_adjust_rules(struct amdgpu_device *adev, - struct amdgpu_ps *new_rps, - struct amdgpu_ps *old_rps) -{ - struct kv_ps *ps = kv_get_ps(new_rps); - struct kv_power_info *pi = kv_get_pi(adev); - u32 min_sclk = 10000; /* ??? */ - u32 sclk, mclk = 0; - int i, limit; - bool force_high; - struct amdgpu_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; - u32 stable_p_state_sclk = 0; - struct amdgpu_clock_and_voltage_limits *max_limits = - &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; - - if (new_rps->vce_active) { - new_rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk; - new_rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk; - } else { - new_rps->evclk = 0; - new_rps->ecclk = 0; - } - - mclk = max_limits->mclk; - sclk = min_sclk; - - if (pi->caps_stable_p_state) { - stable_p_state_sclk = (max_limits->sclk * 75) / 100; - - for (i = table->count - 1; i >= 0; i--) { - if (stable_p_state_sclk >= table->entries[i].clk) { - stable_p_state_sclk = table->entries[i].clk; - break; - } - } - - if (i > 0) - stable_p_state_sclk = table->entries[0].clk; - - sclk = stable_p_state_sclk; - } - - if (new_rps->vce_active) { - if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk) - sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk; - } - - ps->need_dfs_bypass = true; - - for (i = 0; i < ps->num_levels; i++) { - if (ps->levels[i].sclk < sclk) - ps->levels[i].sclk = sclk; - } - - if (table && table->count) { - for (i = 0; i < ps->num_levels; i++) { - if (pi->high_voltage_t && - (pi->high_voltage_t < - kv_convert_8bit_index_to_voltage(adev, ps->levels[i].vddc_index))) { - kv_get_high_voltage_limit(adev, &limit); - ps->levels[i].sclk = table->entries[limit].clk; - } - } - } else { - struct sumo_sclk_voltage_mapping_table *table = - &pi->sys_info.sclk_voltage_mapping_table; - - for (i = 0; i < ps->num_levels; i++) { - if (pi->high_voltage_t && - (pi->high_voltage_t < - kv_convert_8bit_index_to_voltage(adev, ps->levels[i].vddc_index))) { - kv_get_high_voltage_limit(adev, &limit); - ps->levels[i].sclk = table->entries[limit].sclk_frequency; - } - } - } - - if (pi->caps_stable_p_state) { - for (i = 0; i < ps->num_levels; i++) { - ps->levels[i].sclk = stable_p_state_sclk; - } - } - - pi->video_start = new_rps->dclk || new_rps->vclk || - new_rps->evclk || new_rps->ecclk; - - if ((new_rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == - ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) - pi->battery_state = true; - else - pi->battery_state = false; - - if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { - ps->dpm0_pg_nb_ps_lo = 0x1; - ps->dpm0_pg_nb_ps_hi = 0x0; - ps->dpmx_nb_ps_lo = 0x1; - ps->dpmx_nb_ps_hi = 0x0; - } else { - ps->dpm0_pg_nb_ps_lo = 0x3; - ps->dpm0_pg_nb_ps_hi = 0x0; - ps->dpmx_nb_ps_lo = 0x3; - ps->dpmx_nb_ps_hi = 0x0; - - if (pi->sys_info.nb_dpm_enable) { - force_high = (mclk >= pi->sys_info.nbp_memory_clock[3]) || - pi->video_start || (adev->pm.dpm.new_active_crtc_count >= 3) || - pi->disable_nb_ps3_in_battery; - ps->dpm0_pg_nb_ps_lo = force_high ? 0x2 : 0x3; - ps->dpm0_pg_nb_ps_hi = 0x2; - ps->dpmx_nb_ps_lo = force_high ? 0x2 : 0x3; - ps->dpmx_nb_ps_hi = 0x2; - } - } -} - -static void kv_dpm_power_level_enabled_for_throttle(struct amdgpu_device *adev, - u32 index, bool enable) -{ - struct kv_power_info *pi = kv_get_pi(adev); - - pi->graphics_level[index].EnabledForThrottle = enable ? 1 : 0; -} - -static int kv_calculate_ds_divider(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - u32 sclk_in_sr = 10000; /* ??? */ - u32 i; - - if (pi->lowest_valid > pi->highest_valid) - return -EINVAL; - - for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { - pi->graphics_level[i].DeepSleepDivId = - kv_get_sleep_divider_id_from_clock(adev, - be32_to_cpu(pi->graphics_level[i].SclkFrequency), - sclk_in_sr); - } - return 0; -} - -static int kv_calculate_nbps_level_settings(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - u32 i; - bool force_high; - struct amdgpu_clock_and_voltage_limits *max_limits = - &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; - u32 mclk = max_limits->mclk; - - if (pi->lowest_valid > pi->highest_valid) - return -EINVAL; - - if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) { - for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { - pi->graphics_level[i].GnbSlow = 1; - pi->graphics_level[i].ForceNbPs1 = 0; - pi->graphics_level[i].UpH = 0; - } - - if (!pi->sys_info.nb_dpm_enable) - return 0; - - force_high = ((mclk >= pi->sys_info.nbp_memory_clock[3]) || - (adev->pm.dpm.new_active_crtc_count >= 3) || pi->video_start); - - if (force_high) { - for (i = pi->lowest_valid; i <= pi->highest_valid; i++) - pi->graphics_level[i].GnbSlow = 0; - } else { - if (pi->battery_state) - pi->graphics_level[0].ForceNbPs1 = 1; - - pi->graphics_level[1].GnbSlow = 0; - pi->graphics_level[2].GnbSlow = 0; - pi->graphics_level[3].GnbSlow = 0; - pi->graphics_level[4].GnbSlow = 0; - } - } else { - for (i = pi->lowest_valid; i <= pi->highest_valid; i++) { - pi->graphics_level[i].GnbSlow = 1; - pi->graphics_level[i].ForceNbPs1 = 0; - pi->graphics_level[i].UpH = 0; - } - - if (pi->sys_info.nb_dpm_enable && pi->battery_state) { - pi->graphics_level[pi->lowest_valid].UpH = 0x28; - pi->graphics_level[pi->lowest_valid].GnbSlow = 0; - if (pi->lowest_valid != pi->highest_valid) - pi->graphics_level[pi->lowest_valid].ForceNbPs1 = 1; - } - } - return 0; -} - -static int kv_calculate_dpm_settings(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - u32 i; - - if (pi->lowest_valid > pi->highest_valid) - return -EINVAL; - - for (i = pi->lowest_valid; i <= pi->highest_valid; i++) - pi->graphics_level[i].DisplayWatermark = (i == pi->highest_valid) ? 1 : 0; - - return 0; -} - -static void kv_init_graphics_levels(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - u32 i; - struct amdgpu_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk; - - if (table && table->count) { - u32 vid_2bit; - - pi->graphics_dpm_level_count = 0; - for (i = 0; i < table->count; i++) { - if (pi->high_voltage_t && - (pi->high_voltage_t < - kv_convert_8bit_index_to_voltage(adev, table->entries[i].v))) - break; - - kv_set_divider_value(adev, i, table->entries[i].clk); - vid_2bit = kv_convert_vid7_to_vid2(adev, - &pi->sys_info.vid_mapping_table, - table->entries[i].v); - kv_set_vid(adev, i, vid_2bit); - kv_set_at(adev, i, pi->at[i]); - kv_dpm_power_level_enabled_for_throttle(adev, i, true); - pi->graphics_dpm_level_count++; - } - } else { - struct sumo_sclk_voltage_mapping_table *table = - &pi->sys_info.sclk_voltage_mapping_table; - - pi->graphics_dpm_level_count = 0; - for (i = 0; i < table->num_max_dpm_entries; i++) { - if (pi->high_voltage_t && - pi->high_voltage_t < - kv_convert_2bit_index_to_voltage(adev, table->entries[i].vid_2bit)) - break; - - kv_set_divider_value(adev, i, table->entries[i].sclk_frequency); - kv_set_vid(adev, i, table->entries[i].vid_2bit); - kv_set_at(adev, i, pi->at[i]); - kv_dpm_power_level_enabled_for_throttle(adev, i, true); - pi->graphics_dpm_level_count++; - } - } - - for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) - kv_dpm_power_level_enable(adev, i, false); -} - -static void kv_enable_new_levels(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - u32 i; - - for (i = 0; i < SMU7_MAX_LEVELS_GRAPHICS; i++) { - if (i >= pi->lowest_valid && i <= pi->highest_valid) - kv_dpm_power_level_enable(adev, i, true); - } -} - -static int kv_set_enabled_level(struct amdgpu_device *adev, u32 level) -{ - u32 new_mask = (1 << level); - - return amdgpu_kv_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SCLKDPM_SetEnabledMask, - new_mask); -} - -static int kv_set_enabled_levels(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - u32 i, new_mask = 0; - - for (i = pi->lowest_valid; i <= pi->highest_valid; i++) - new_mask |= (1 << i); - - return amdgpu_kv_send_msg_to_smc_with_parameter(adev, - PPSMC_MSG_SCLKDPM_SetEnabledMask, - new_mask); -} - -static void kv_program_nbps_index_settings(struct amdgpu_device *adev, - struct amdgpu_ps *new_rps) -{ - struct kv_ps *new_ps = kv_get_ps(new_rps); - struct kv_power_info *pi = kv_get_pi(adev); - u32 nbdpmconfig1; - - if (adev->asic_type == CHIP_KABINI || adev->asic_type == CHIP_MULLINS) - return; - - if (pi->sys_info.nb_dpm_enable) { - nbdpmconfig1 = RREG32_SMC(ixNB_DPM_CONFIG_1); - nbdpmconfig1 &= ~(NB_DPM_CONFIG_1__Dpm0PgNbPsLo_MASK | - NB_DPM_CONFIG_1__Dpm0PgNbPsHi_MASK | - NB_DPM_CONFIG_1__DpmXNbPsLo_MASK | - NB_DPM_CONFIG_1__DpmXNbPsHi_MASK); - nbdpmconfig1 |= (new_ps->dpm0_pg_nb_ps_lo << NB_DPM_CONFIG_1__Dpm0PgNbPsLo__SHIFT) | - (new_ps->dpm0_pg_nb_ps_hi << NB_DPM_CONFIG_1__Dpm0PgNbPsHi__SHIFT) | - (new_ps->dpmx_nb_ps_lo << NB_DPM_CONFIG_1__DpmXNbPsLo__SHIFT) | - (new_ps->dpmx_nb_ps_hi << NB_DPM_CONFIG_1__DpmXNbPsHi__SHIFT); - WREG32_SMC(ixNB_DPM_CONFIG_1, nbdpmconfig1); - } -} - -static int kv_set_thermal_temperature_range(struct amdgpu_device *adev, - int min_temp, int max_temp) -{ - int low_temp = 0 * 1000; - int high_temp = 255 * 1000; - u32 tmp; - - if (low_temp < min_temp) - low_temp = min_temp; - if (high_temp > max_temp) - high_temp = max_temp; - if (high_temp < low_temp) { - DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); - return -EINVAL; - } - - tmp = RREG32_SMC(ixCG_THERMAL_INT_CTRL); - tmp &= ~(CG_THERMAL_INT_CTRL__DIG_THERM_INTH_MASK | - CG_THERMAL_INT_CTRL__DIG_THERM_INTL_MASK); - tmp |= ((49 + (high_temp / 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTH__SHIFT) | - ((49 + (low_temp / 1000)) << CG_THERMAL_INT_CTRL__DIG_THERM_INTL__SHIFT); - WREG32_SMC(ixCG_THERMAL_INT_CTRL, tmp); - - adev->pm.dpm.thermal.min_temp = low_temp; - adev->pm.dpm.thermal.max_temp = high_temp; - - return 0; -} - -union igp_info { - struct _ATOM_INTEGRATED_SYSTEM_INFO info; - struct _ATOM_INTEGRATED_SYSTEM_INFO_V2 info_2; - struct _ATOM_INTEGRATED_SYSTEM_INFO_V5 info_5; - struct _ATOM_INTEGRATED_SYSTEM_INFO_V6 info_6; - struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_7 info_7; - struct _ATOM_INTEGRATED_SYSTEM_INFO_V1_8 info_8; -}; - -static int kv_parse_sys_info_table(struct amdgpu_device *adev) -{ - struct kv_power_info *pi = kv_get_pi(adev); - struct amdgpu_mode_info *mode_info = &adev->mode_info; - int index = GetIndexIntoMasterTable(DATA, IntegratedSystemInfo); - union igp_info *igp_info; - u8 frev, crev; - u16 data_offset; - int i; - - if (amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, - &frev, &crev, &data_offset)) { - igp_info = (union igp_info *)(mode_info->atom_context->bios + - data_offset); - - if (crev != 8) { - DRM_ERROR("Unsupported IGP table: %d %d\n", frev, crev); - return -EINVAL; - } - pi->sys_info.bootup_sclk = le32_to_cpu(igp_info->info_8.ulBootUpEngineClock); - pi->sys_info.bootup_uma_clk = le32_to_cpu(igp_info->info_8.ulBootUpUMAClock); - pi->sys_info.bootup_nb_voltage_index = - le16_to_cpu(igp_info->info_8.usBootUpNBVoltage); - if (igp_info->info_8.ucHtcTmpLmt == 0) - pi->sys_info.htc_tmp_lmt = 203; - else - pi->sys_info.htc_tmp_lmt = igp_info->info_8.ucHtcTmpLmt; - if (igp_info->info_8.ucHtcHystLmt == 0) - pi->sys_info.htc_hyst_lmt = 5; - else - pi->sys_info.htc_hyst_lmt = igp_info->info_8.ucHtcHystLmt; - if (pi->sys_info.htc_tmp_lmt <= pi->sys_info.htc_hyst_lmt) { - DRM_ERROR("The htcTmpLmt should be larger than htcHystLmt.\n"); - } - - if (le32_to_cpu(igp_info->info_8.ulSystemConfig) & (1 << 3)) - pi->sys_info.nb_dpm_enable = true; - else - pi->sys_info.nb_dpm_enable = false; - - for (i = 0; i < KV_NUM_NBPSTATES; i++) { - pi->sys_info.nbp_memory_clock[i] = - le32_to_cpu(igp_info->info_8.ulNbpStateMemclkFreq[i]); - pi->sys_info.nbp_n_clock[i] = - le32_to_cpu(igp_info->info_8.ulNbpStateNClkFreq[i]); - } - if (le32_to_cpu(igp_info->info_8.ulGPUCapInfo) & - SYS_INFO_GPUCAPS__ENABEL_DFS_BYPASS) - pi->caps_enable_dfs_bypass = true; - - sumo_construct_sclk_voltage_mapping_table(adev, - &pi->sys_info.sclk_voltage_mapping_table, - igp_info->info_8.sAvail_SCLK); - - sumo_construct_vid_mapping_table(adev, - &pi->sys_info.vid_mapping_table, - igp_info->info_8.sAvail_SCLK); - - kv_construct_max_power_limits_table(adev, - &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac); - } - return 0; -} - -union power_info { - struct _ATOM_POWERPLAY_INFO info; - struct _ATOM_POWERPLAY_INFO_V2 info_2; - struct _ATOM_POWERPLAY_INFO_V3 info_3; - struct _ATOM_PPLIB_POWERPLAYTABLE pplib; - struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; - struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; -}; - -union pplib_clock_info { - struct _ATOM_PPLIB_R600_CLOCK_INFO r600; - struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; - struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; - struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; -}; - -union pplib_power_state { - struct _ATOM_PPLIB_STATE v1; - struct _ATOM_PPLIB_STATE_V2 v2; -}; - -static void kv_patch_boot_state(struct amdgpu_device *adev, - struct kv_ps *ps) -{ - struct kv_power_info *pi = kv_get_pi(adev); - - ps->num_levels = 1; - ps->levels[0] = pi->boot_pl; -} - -static void kv_parse_pplib_non_clock_info(struct amdgpu_device *adev, - struct amdgpu_ps *rps, - struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info, - u8 table_rev) -{ - struct kv_ps *ps = kv_get_ps(rps); - - rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings); - rps->class = le16_to_cpu(non_clock_info->usClassification); - rps->class2 = le16_to_cpu(non_clock_info->usClassification2); - - if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { - rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); - rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); - } else { - rps->vclk = 0; - rps->dclk = 0; - } - - if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) { - adev->pm.dpm.boot_ps = rps; - kv_patch_boot_state(adev, ps); - } - if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) - adev->pm.dpm.uvd_ps = rps; -} - -static void kv_parse_pplib_clock_info(struct amdgpu_device *adev, - struct amdgpu_ps *rps, int index, - union pplib_clock_info *clock_info) -{ - struct kv_power_info *pi = kv_get_pi(adev); - struct kv_ps *ps = kv_get_ps(rps); - struct kv_pl *pl = &ps->levels[index]; - u32 sclk; - - sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); - sclk |= clock_info->sumo.ucEngineClockHigh << 16; - pl->sclk = sclk; - pl->vddc_index = clock_info->sumo.vddcIndex; - - ps->num_levels = index + 1; - - if (pi->caps_sclk_ds) { - pl->ds_divider_index = 5; - pl->ss_divider_index = 5; - } -} - -static int kv_parse_power_table(struct amdgpu_device *adev) -{ - struct amdgpu_mode_info *mode_info = &adev->mode_info; - struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; - union pplib_power_state *power_state; - int i, j, k, non_clock_array_index, clock_array_index; - union pplib_clock_info *clock_info; - struct _StateArray *state_array; - struct _ClockInfoArray *clock_info_array; - struct _NonClockInfoArray *non_clock_info_array; - union power_info *power_info; - int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); - u16 data_offset; - u8 frev, crev; - u8 *power_state_offset; - struct kv_ps *ps; - - if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, - &frev, &crev, &data_offset)) - return -EINVAL; - power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); - - amdgpu_add_thermal_controller(adev); - - state_array = (struct _StateArray *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib.usStateArrayOffset)); - clock_info_array = (struct _ClockInfoArray *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); - non_clock_info_array = (struct _NonClockInfoArray *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); - - adev->pm.dpm.ps = kcalloc(state_array->ucNumEntries, - sizeof(struct amdgpu_ps), - GFP_KERNEL); - if (!adev->pm.dpm.ps) - return -ENOMEM; - power_state_offset = (u8 *)state_array->states; - for (i = 0; i < state_array->ucNumEntries; i++) { - u8 *idx; - power_state = (union pplib_power_state *)power_state_offset; - non_clock_array_index = power_state->v2.nonClockInfoIndex; - non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) - &non_clock_info_array->nonClockInfo[non_clock_array_index]; - ps = kzalloc(sizeof(struct kv_ps), GFP_KERNEL); - if (ps == NULL) { - kfree(adev->pm.dpm.ps); - return -ENOMEM; - } - adev->pm.dpm.ps[i].ps_priv = ps; - k = 0; - idx = (u8 *)&power_state->v2.clockInfoIndex[0]; - for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { - clock_array_index = idx[j]; - if (clock_array_index >= clock_info_array->ucNumEntries) - continue; - if (k >= SUMO_MAX_HARDWARE_POWERLEVELS) - break; - clock_info = (union pplib_clock_info *) - ((u8 *)&clock_info_array->clockInfo[0] + - (clock_array_index * clock_info_array->ucEntrySize)); - kv_parse_pplib_clock_info(adev, - &adev->pm.dpm.ps[i], k, - clock_info); - k++; - } - kv_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i], - non_clock_info, - non_clock_info_array->ucEntrySize); - power_state_offset += 2 + power_state->v2.ucNumDPMLevels; - } - adev->pm.dpm.num_ps = state_array->ucNumEntries; - - /* fill in the vce power states */ - for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) { - u32 sclk; - clock_array_index = adev->pm.dpm.vce_states[i].clk_idx; - clock_info = (union pplib_clock_info *) - &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; - sclk = le16_to_cpu(clock_info->sumo.usEngineClockLow); - sclk |= clock_info->sumo.ucEngineClockHigh << 16; - adev->pm.dpm.vce_states[i].sclk = sclk; - adev->pm.dpm.vce_states[i].mclk = 0; - } - - return 0; -} - -static int kv_dpm_init(struct amdgpu_device *adev) -{ - struct kv_power_info *pi; - int ret, i; - - pi = kzalloc(sizeof(struct kv_power_info), GFP_KERNEL); - if (pi == NULL) - return -ENOMEM; - adev->pm.dpm.priv = pi; - - ret = amdgpu_get_platform_caps(adev); - if (ret) - return ret; - - ret = amdgpu_parse_extended_power_table(adev); - if (ret) - return ret; - - for (i = 0; i < SUMO_MAX_HARDWARE_POWERLEVELS; i++) - pi->at[i] = TRINITY_AT_DFLT; - - pi->sram_end = SMC_RAM_END; - - pi->enable_nb_dpm = true; - - pi->caps_power_containment = true; - pi->caps_cac = true; - pi->enable_didt = false; - if (pi->enable_didt) { - pi->caps_sq_ramping = true; - pi->caps_db_ramping = true; - pi->caps_td_ramping = true; - pi->caps_tcp_ramping = true; - } - - if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK) - pi->caps_sclk_ds = true; - else - pi->caps_sclk_ds = false; - - pi->enable_auto_thermal_throttling = true; - pi->disable_nb_ps3_in_battery = false; - if (amdgpu_bapm == 0) - pi->bapm_enable = false; - else - pi->bapm_enable = true; - pi->voltage_drop_t = 0; - pi->caps_sclk_throttle_low_notification = false; - pi->caps_fps = false; /* true? */ - pi->caps_uvd_pg = (adev->pg_flags & AMD_PG_SUPPORT_UVD) ? true : false; - pi->caps_uvd_dpm = true; - pi->caps_vce_pg = (adev->pg_flags & AMD_PG_SUPPORT_VCE) ? true : false; - pi->caps_samu_pg = (adev->pg_flags & AMD_PG_SUPPORT_SAMU) ? true : false; - pi->caps_acp_pg = (adev->pg_flags & AMD_PG_SUPPORT_ACP) ? true : false; - pi->caps_stable_p_state = false; - - ret = kv_parse_sys_info_table(adev); - if (ret) - return ret; - - kv_patch_voltage_values(adev); - kv_construct_boot_state(adev); - - ret = kv_parse_power_table(adev); - if (ret) - return ret; - - pi->enable_dpm = true; - - return 0; -} - -static void -kv_dpm_debugfs_print_current_performance_level(void *handle, - struct seq_file *m) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct kv_power_info *pi = kv_get_pi(adev); - u32 current_index = - (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) & - TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >> - TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT; - u32 sclk, tmp; - u16 vddc; - - if (current_index >= SMU__NUM_SCLK_DPM_STATE) { - seq_printf(m, "invalid dpm profile %d\n", current_index); - } else { - sclk = be32_to_cpu(pi->graphics_level[current_index].SclkFrequency); - tmp = (RREG32_SMC(ixSMU_VOLTAGE_STATUS) & - SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL_MASK) >> - SMU_VOLTAGE_STATUS__SMU_VOLTAGE_CURRENT_LEVEL__SHIFT; - vddc = kv_convert_8bit_index_to_voltage(adev, (u16)tmp); - seq_printf(m, "uvd %sabled\n", pi->uvd_power_gated ? "dis" : "en"); - seq_printf(m, "vce %sabled\n", pi->vce_power_gated ? "dis" : "en"); - seq_printf(m, "power level %d sclk: %u vddc: %u\n", - current_index, sclk, vddc); - } -} - -static void -kv_dpm_print_power_state(void *handle, void *request_ps) -{ - int i; - struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps; - struct kv_ps *ps = kv_get_ps(rps); - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - amdgpu_dpm_print_class_info(rps->class, rps->class2); - amdgpu_dpm_print_cap_info(rps->caps); - printk("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); - for (i = 0; i < ps->num_levels; i++) { - struct kv_pl *pl = &ps->levels[i]; - printk("\t\tpower level %d sclk: %u vddc: %u\n", - i, pl->sclk, - kv_convert_8bit_index_to_voltage(adev, pl->vddc_index)); - } - amdgpu_dpm_print_ps_status(adev, rps); -} - -static void kv_dpm_fini(struct amdgpu_device *adev) -{ - int i; - - for (i = 0; i < adev->pm.dpm.num_ps; i++) { - kfree(adev->pm.dpm.ps[i].ps_priv); - } - kfree(adev->pm.dpm.ps); - kfree(adev->pm.dpm.priv); - amdgpu_free_extended_power_table(adev); -} - -static void kv_dpm_display_configuration_changed(void *handle) -{ - -} - -static u32 kv_dpm_get_sclk(void *handle, bool low) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct kv_power_info *pi = kv_get_pi(adev); - struct kv_ps *requested_state = kv_get_ps(&pi->requested_rps); - - if (low) - return requested_state->levels[0].sclk; - else - return requested_state->levels[requested_state->num_levels - 1].sclk; -} - -static u32 kv_dpm_get_mclk(void *handle, bool low) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct kv_power_info *pi = kv_get_pi(adev); - - return pi->sys_info.bootup_uma_clk; -} - -/* get temperature in millidegrees */ -static int kv_dpm_get_temp(void *handle) -{ - u32 temp; - int actual_temp = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - temp = RREG32_SMC(0xC0300E0C); - - if (temp) - actual_temp = (temp / 8) - 49; - else - actual_temp = 0; - - actual_temp = actual_temp * 1000; - - return actual_temp; -} - -static int kv_dpm_early_init(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - adev->powerplay.pp_funcs = &kv_dpm_funcs; - adev->powerplay.pp_handle = adev; - kv_dpm_set_irq_funcs(adev); - - return 0; -} - -static int kv_dpm_late_init(void *handle) -{ - /* powerdown unused blocks for now */ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (!adev->pm.dpm_enabled) - return 0; - - kv_dpm_powergate_acp(adev, true); - kv_dpm_powergate_samu(adev, true); - - return 0; -} - -static int kv_dpm_sw_init(void *handle) -{ - int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230, - &adev->pm.dpm.thermal.irq); - if (ret) - return ret; - - ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 231, - &adev->pm.dpm.thermal.irq); - if (ret) - return ret; - - /* default to balanced state */ - adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED; - adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; - adev->pm.dpm.forced_level = AMD_DPM_FORCED_LEVEL_AUTO; - adev->pm.default_sclk = adev->clock.default_sclk; - adev->pm.default_mclk = adev->clock.default_mclk; - adev->pm.current_sclk = adev->clock.default_sclk; - adev->pm.current_mclk = adev->clock.default_mclk; - adev->pm.int_thermal_type = THERMAL_TYPE_NONE; - - if (amdgpu_dpm == 0) - return 0; - - INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler); - mutex_lock(&adev->pm.mutex); - ret = kv_dpm_init(adev); - if (ret) - goto dpm_failed; - adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; - if (amdgpu_dpm == 1) - amdgpu_pm_print_power_states(adev); - mutex_unlock(&adev->pm.mutex); - DRM_INFO("amdgpu: dpm initialized\n"); - - return 0; - -dpm_failed: - kv_dpm_fini(adev); - mutex_unlock(&adev->pm.mutex); - DRM_ERROR("amdgpu: dpm initialization failed\n"); - return ret; -} - -static int kv_dpm_sw_fini(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - flush_work(&adev->pm.dpm.thermal.work); - - mutex_lock(&adev->pm.mutex); - kv_dpm_fini(adev); - mutex_unlock(&adev->pm.mutex); - - return 0; -} - -static int kv_dpm_hw_init(void *handle) -{ - int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (!amdgpu_dpm) - return 0; - - mutex_lock(&adev->pm.mutex); - kv_dpm_setup_asic(adev); - ret = kv_dpm_enable(adev); - if (ret) - adev->pm.dpm_enabled = false; - else - adev->pm.dpm_enabled = true; - mutex_unlock(&adev->pm.mutex); - amdgpu_legacy_dpm_compute_clocks(adev); - return ret; -} - -static int kv_dpm_hw_fini(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (adev->pm.dpm_enabled) { - mutex_lock(&adev->pm.mutex); - kv_dpm_disable(adev); - mutex_unlock(&adev->pm.mutex); - } - - return 0; -} - -static int kv_dpm_suspend(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (adev->pm.dpm_enabled) { - mutex_lock(&adev->pm.mutex); - /* disable dpm */ - kv_dpm_disable(adev); - /* reset the power state */ - adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; - mutex_unlock(&adev->pm.mutex); - } - return 0; -} - -static int kv_dpm_resume(void *handle) -{ - int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (adev->pm.dpm_enabled) { - /* asic init will reset to the boot state */ - mutex_lock(&adev->pm.mutex); - kv_dpm_setup_asic(adev); - ret = kv_dpm_enable(adev); - if (ret) - adev->pm.dpm_enabled = false; - else - adev->pm.dpm_enabled = true; - mutex_unlock(&adev->pm.mutex); - if (adev->pm.dpm_enabled) - amdgpu_legacy_dpm_compute_clocks(adev); - } - return 0; -} - -static bool kv_dpm_is_idle(void *handle) -{ - return true; -} - -static int kv_dpm_wait_for_idle(void *handle) -{ - return 0; -} - - -static int kv_dpm_soft_reset(void *handle) -{ - return 0; -} - -static int kv_dpm_set_interrupt_state(struct amdgpu_device *adev, - struct amdgpu_irq_src *src, - unsigned type, - enum amdgpu_interrupt_state state) -{ - u32 cg_thermal_int; - - switch (type) { - case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH: - switch (state) { - case AMDGPU_IRQ_STATE_DISABLE: - cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); - cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; - WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); - break; - case AMDGPU_IRQ_STATE_ENABLE: - cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); - cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK; - WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); - break; - default: - break; - } - break; - - case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW: - switch (state) { - case AMDGPU_IRQ_STATE_DISABLE: - cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); - cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; - WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); - break; - case AMDGPU_IRQ_STATE_ENABLE: - cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT_CTRL); - cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK; - WREG32_SMC(ixCG_THERMAL_INT_CTRL, cg_thermal_int); - break; - default: - break; - } - break; - - default: - break; - } - return 0; -} - -static int kv_dpm_process_interrupt(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - struct amdgpu_iv_entry *entry) -{ - bool queue_thermal = false; - - if (entry == NULL) - return -EINVAL; - - switch (entry->src_id) { - case 230: /* thermal low to high */ - DRM_DEBUG("IH: thermal low to high\n"); - adev->pm.dpm.thermal.high_to_low = false; - queue_thermal = true; - break; - case 231: /* thermal high to low */ - DRM_DEBUG("IH: thermal high to low\n"); - adev->pm.dpm.thermal.high_to_low = true; - queue_thermal = true; - break; - default: - break; - } - - if (queue_thermal) - schedule_work(&adev->pm.dpm.thermal.work); - - return 0; -} - -static int kv_dpm_set_clockgating_state(void *handle, - enum amd_clockgating_state state) -{ - return 0; -} - -static int kv_dpm_set_powergating_state(void *handle, - enum amd_powergating_state state) -{ - return 0; -} - -static inline bool kv_are_power_levels_equal(const struct kv_pl *kv_cpl1, - const struct kv_pl *kv_cpl2) -{ - return ((kv_cpl1->sclk == kv_cpl2->sclk) && - (kv_cpl1->vddc_index == kv_cpl2->vddc_index) && - (kv_cpl1->ds_divider_index == kv_cpl2->ds_divider_index) && - (kv_cpl1->force_nbp_state == kv_cpl2->force_nbp_state)); -} - -static int kv_check_state_equal(void *handle, - void *current_ps, - void *request_ps, - bool *equal) -{ - struct kv_ps *kv_cps; - struct kv_ps *kv_rps; - int i; - struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps; - struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (adev == NULL || cps == NULL || rps == NULL || equal == NULL) - return -EINVAL; - - kv_cps = kv_get_ps(cps); - kv_rps = kv_get_ps(rps); - - if (kv_cps == NULL) { - *equal = false; - return 0; - } - - if (kv_cps->num_levels != kv_rps->num_levels) { - *equal = false; - return 0; - } - - for (i = 0; i < kv_cps->num_levels; i++) { - if (!kv_are_power_levels_equal(&(kv_cps->levels[i]), - &(kv_rps->levels[i]))) { - *equal = false; - return 0; - } - } - - /* If all performance levels are the same try to use the UVD clocks to break the tie.*/ - *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk)); - *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk)); - - return 0; -} - -static int kv_dpm_read_sensor(void *handle, int idx, - void *value, int *size) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct kv_power_info *pi = kv_get_pi(adev); - uint32_t sclk; - u32 pl_index = - (RREG32_SMC(ixTARGET_AND_CURRENT_PROFILE_INDEX) & - TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX_MASK) >> - TARGET_AND_CURRENT_PROFILE_INDEX__CURR_SCLK_INDEX__SHIFT; - - /* size must be at least 4 bytes for all sensors */ - if (*size < 4) - return -EINVAL; - - switch (idx) { - case AMDGPU_PP_SENSOR_GFX_SCLK: - if (pl_index < SMU__NUM_SCLK_DPM_STATE) { - sclk = be32_to_cpu( - pi->graphics_level[pl_index].SclkFrequency); - *((uint32_t *)value) = sclk; - *size = 4; - return 0; - } - return -EINVAL; - case AMDGPU_PP_SENSOR_GPU_TEMP: - *((uint32_t *)value) = kv_dpm_get_temp(adev); - *size = 4; - return 0; - default: - return -EOPNOTSUPP; - } -} - -static int kv_set_powergating_by_smu(void *handle, - uint32_t block_type, bool gate) -{ - switch (block_type) { - case AMD_IP_BLOCK_TYPE_UVD: - kv_dpm_powergate_uvd(handle, gate); - break; - case AMD_IP_BLOCK_TYPE_VCE: - kv_dpm_powergate_vce(handle, gate); - break; - default: - break; - } - return 0; -} - -static const struct amd_ip_funcs kv_dpm_ip_funcs = { - .name = "kv_dpm", - .early_init = kv_dpm_early_init, - .late_init = kv_dpm_late_init, - .sw_init = kv_dpm_sw_init, - .sw_fini = kv_dpm_sw_fini, - .hw_init = kv_dpm_hw_init, - .hw_fini = kv_dpm_hw_fini, - .suspend = kv_dpm_suspend, - .resume = kv_dpm_resume, - .is_idle = kv_dpm_is_idle, - .wait_for_idle = kv_dpm_wait_for_idle, - .soft_reset = kv_dpm_soft_reset, - .set_clockgating_state = kv_dpm_set_clockgating_state, - .set_powergating_state = kv_dpm_set_powergating_state, -}; - -const struct amdgpu_ip_block_version kv_smu_ip_block = -{ - .type = AMD_IP_BLOCK_TYPE_SMC, - .major = 1, - .minor = 0, - .rev = 0, - .funcs = &kv_dpm_ip_funcs, -}; - -static const struct amd_pm_funcs kv_dpm_funcs = { - .pre_set_power_state = &kv_dpm_pre_set_power_state, - .set_power_state = &kv_dpm_set_power_state, - .post_set_power_state = &kv_dpm_post_set_power_state, - .display_configuration_changed = &kv_dpm_display_configuration_changed, - .get_sclk = &kv_dpm_get_sclk, - .get_mclk = &kv_dpm_get_mclk, - .print_power_state = &kv_dpm_print_power_state, - .debugfs_print_current_performance_level = &kv_dpm_debugfs_print_current_performance_level, - .force_performance_level = &kv_dpm_force_performance_level, - .set_powergating_by_smu = kv_set_powergating_by_smu, - .enable_bapm = &kv_dpm_enable_bapm, - .get_vce_clock_state = amdgpu_get_vce_clock_state, - .check_state_equal = kv_check_state_equal, - .read_sensor = &kv_dpm_read_sensor, - .pm_compute_clocks = amdgpu_legacy_dpm_compute_clocks, -}; - -static const struct amdgpu_irq_src_funcs kv_dpm_irq_funcs = { - .set = kv_dpm_set_interrupt_state, - .process = kv_dpm_process_interrupt, -}; - -static void kv_dpm_set_irq_funcs(struct amdgpu_device *adev) -{ - adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST; - adev->pm.dpm.thermal.irq.funcs = &kv_dpm_irq_funcs; -} diff --git a/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.h b/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.h deleted file mode 100644 index 6df0ed41317c..000000000000 --- a/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.h +++ /dev/null @@ -1,229 +0,0 @@ -/* - * Copyright 2013 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#ifndef __KV_DPM_H__ -#define __KV_DPM_H__ - -#define SMU__NUM_SCLK_DPM_STATE 8 -#define SMU__NUM_MCLK_DPM_LEVELS 4 -#define SMU__NUM_LCLK_DPM_LEVELS 8 -#define SMU__NUM_PCIE_DPM_LEVELS 0 /* ??? */ -#include "smu7_fusion.h" -#include "ppsmc.h" - -#define SUMO_MAX_HARDWARE_POWERLEVELS 5 - -#define SUMO_MAX_NUMBER_VOLTAGES 4 - -struct sumo_vid_mapping_entry { - u16 vid_2bit; - u16 vid_7bit; -}; - -struct sumo_vid_mapping_table { - u32 num_entries; - struct sumo_vid_mapping_entry entries[SUMO_MAX_NUMBER_VOLTAGES]; -}; - -struct sumo_sclk_voltage_mapping_entry { - u32 sclk_frequency; - u16 vid_2bit; - u16 rsv; -}; - -struct sumo_sclk_voltage_mapping_table { - u32 num_max_dpm_entries; - struct sumo_sclk_voltage_mapping_entry entries[SUMO_MAX_HARDWARE_POWERLEVELS]; -}; - -#define TRINITY_AT_DFLT 30 - -#define KV_NUM_NBPSTATES 4 - -enum kv_pt_config_reg_type { - KV_CONFIGREG_MMR = 0, - KV_CONFIGREG_SMC_IND, - KV_CONFIGREG_DIDT_IND, - KV_CONFIGREG_CACHE, - KV_CONFIGREG_MAX -}; - -struct kv_pt_config_reg { - u32 offset; - u32 mask; - u32 shift; - u32 value; - enum kv_pt_config_reg_type type; -}; - -struct kv_lcac_config_values { - u32 block_id; - u32 signal_id; - u32 t; -}; - -struct kv_lcac_config_reg { - u32 cntl; - u32 block_mask; - u32 block_shift; - u32 signal_mask; - u32 signal_shift; - u32 t_mask; - u32 t_shift; - u32 enable_mask; - u32 enable_shift; -}; - -struct kv_pl { - u32 sclk; - u8 vddc_index; - u8 ds_divider_index; - u8 ss_divider_index; - u8 allow_gnb_slow; - u8 force_nbp_state; - u8 display_wm; - u8 vce_wm; -}; - -struct kv_ps { - struct kv_pl levels[SUMO_MAX_HARDWARE_POWERLEVELS]; - u32 num_levels; - bool need_dfs_bypass; - u8 dpm0_pg_nb_ps_lo; - u8 dpm0_pg_nb_ps_hi; - u8 dpmx_nb_ps_lo; - u8 dpmx_nb_ps_hi; -}; - -struct kv_sys_info { - u32 bootup_uma_clk; - u32 bootup_sclk; - u32 dentist_vco_freq; - u32 nb_dpm_enable; - u32 nbp_memory_clock[KV_NUM_NBPSTATES]; - u32 nbp_n_clock[KV_NUM_NBPSTATES]; - u16 bootup_nb_voltage_index; - u8 htc_tmp_lmt; - u8 htc_hyst_lmt; - struct sumo_sclk_voltage_mapping_table sclk_voltage_mapping_table; - struct sumo_vid_mapping_table vid_mapping_table; - u32 uma_channel_number; -}; - -struct kv_power_info { - u32 at[SUMO_MAX_HARDWARE_POWERLEVELS]; - u32 voltage_drop_t; - struct kv_sys_info sys_info; - struct kv_pl boot_pl; - bool enable_nb_ps_policy; - bool disable_nb_ps3_in_battery; - bool video_start; - bool battery_state; - u32 lowest_valid; - u32 highest_valid; - u16 high_voltage_t; - bool cac_enabled; - bool bapm_enable; - /* smc offsets */ - u32 sram_end; - u32 dpm_table_start; - u32 soft_regs_start; - /* dpm SMU tables */ - u8 graphics_dpm_level_count; - u8 uvd_level_count; - u8 vce_level_count; - u8 acp_level_count; - u8 samu_level_count; - u16 fps_high_t; - SMU7_Fusion_GraphicsLevel graphics_level[SMU__NUM_SCLK_DPM_STATE]; - SMU7_Fusion_ACPILevel acpi_level; - SMU7_Fusion_UvdLevel uvd_level[SMU7_MAX_LEVELS_UVD]; - SMU7_Fusion_ExtClkLevel vce_level[SMU7_MAX_LEVELS_VCE]; - SMU7_Fusion_ExtClkLevel acp_level[SMU7_MAX_LEVELS_ACP]; - SMU7_Fusion_ExtClkLevel samu_level[SMU7_MAX_LEVELS_SAMU]; - u8 uvd_boot_level; - u8 vce_boot_level; - u8 acp_boot_level; - u8 samu_boot_level; - u8 uvd_interval; - u8 vce_interval; - u8 acp_interval; - u8 samu_interval; - u8 graphics_boot_level; - u8 graphics_interval; - u8 graphics_therm_throttle_enable; - u8 graphics_voltage_change_enable; - u8 graphics_clk_slow_enable; - u8 graphics_clk_slow_divider; - u8 fps_low_t; - u32 low_sclk_interrupt_t; - bool uvd_power_gated; - bool vce_power_gated; - bool acp_power_gated; - bool samu_power_gated; - bool nb_dpm_enabled; - /* flags */ - bool enable_didt; - bool enable_dpm; - bool enable_auto_thermal_throttling; - bool enable_nb_dpm; - /* caps */ - bool caps_cac; - bool caps_power_containment; - bool caps_sq_ramping; - bool caps_db_ramping; - bool caps_td_ramping; - bool caps_tcp_ramping; - bool caps_sclk_throttle_low_notification; - bool caps_fps; - bool caps_uvd_dpm; - bool caps_uvd_pg; - bool caps_vce_pg; - bool caps_samu_pg; - bool caps_acp_pg; - bool caps_stable_p_state; - bool caps_enable_dfs_bypass; - bool caps_sclk_ds; - struct amdgpu_ps current_rps; - struct kv_ps current_ps; - struct amdgpu_ps requested_rps; - struct kv_ps requested_ps; -}; - -/* XXX are these ok? */ -#define KV_TEMP_RANGE_MIN (90 * 1000) -#define KV_TEMP_RANGE_MAX (120 * 1000) - -/* kv_smc.c */ -int amdgpu_kv_notify_message_to_smu(struct amdgpu_device *adev, u32 id); -int amdgpu_kv_dpm_get_enable_mask(struct amdgpu_device *adev, u32 *enable_mask); -int amdgpu_kv_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, - PPSMC_Msg msg, u32 parameter); -int amdgpu_kv_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address, - u32 *value, u32 limit); -int amdgpu_kv_smc_dpm_enable(struct amdgpu_device *adev, bool enable); -int amdgpu_kv_smc_bapm_enable(struct amdgpu_device *adev, bool enable); -int amdgpu_kv_copy_bytes_to_smc(struct amdgpu_device *adev, - u32 smc_start_address, - const u8 *src, u32 byte_count, u32 limit); - -#endif diff --git a/drivers/gpu/drm/amd/pm/powerplay/kv_smc.c b/drivers/gpu/drm/amd/pm/powerplay/kv_smc.c deleted file mode 100644 index 2d9ab6b8be66..000000000000 --- a/drivers/gpu/drm/amd/pm/powerplay/kv_smc.c +++ /dev/null @@ -1,218 +0,0 @@ -/* - * Copyright 2013 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Alex Deucher - */ - -#include "amdgpu.h" -#include "cikd.h" -#include "kv_dpm.h" - -#include "smu/smu_7_0_0_d.h" -#include "smu/smu_7_0_0_sh_mask.h" - -int amdgpu_kv_notify_message_to_smu(struct amdgpu_device *adev, u32 id) -{ - u32 i; - u32 tmp = 0; - - WREG32(mmSMC_MESSAGE_0, id & SMC_MESSAGE_0__SMC_MSG_MASK); - - for (i = 0; i < adev->usec_timeout; i++) { - if ((RREG32(mmSMC_RESP_0) & SMC_RESP_0__SMC_RESP_MASK) != 0) - break; - udelay(1); - } - tmp = RREG32(mmSMC_RESP_0) & SMC_RESP_0__SMC_RESP_MASK; - - if (tmp != 1) { - if (tmp == 0xFF) - return -EINVAL; - else if (tmp == 0xFE) - return -EINVAL; - } - - return 0; -} - -int amdgpu_kv_dpm_get_enable_mask(struct amdgpu_device *adev, u32 *enable_mask) -{ - int ret; - - ret = amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_SCLKDPM_GetEnabledMask); - - if (ret == 0) - *enable_mask = RREG32_SMC(ixSMC_SYSCON_MSG_ARG_0); - - return ret; -} - -int amdgpu_kv_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, - PPSMC_Msg msg, u32 parameter) -{ - - WREG32(mmSMC_MSG_ARG_0, parameter); - - return amdgpu_kv_notify_message_to_smu(adev, msg); -} - -static int kv_set_smc_sram_address(struct amdgpu_device *adev, - u32 smc_address, u32 limit) -{ - if (smc_address & 3) - return -EINVAL; - if ((smc_address + 3) > limit) - return -EINVAL; - - WREG32(mmSMC_IND_INDEX_0, smc_address); - WREG32_P(mmSMC_IND_ACCESS_CNTL, 0, - ~SMC_IND_ACCESS_CNTL__AUTO_INCREMENT_IND_0_MASK); - - return 0; -} - -int amdgpu_kv_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address, - u32 *value, u32 limit) -{ - int ret; - - ret = kv_set_smc_sram_address(adev, smc_address, limit); - if (ret) - return ret; - - *value = RREG32(mmSMC_IND_DATA_0); - return 0; -} - -int amdgpu_kv_smc_dpm_enable(struct amdgpu_device *adev, bool enable) -{ - if (enable) - return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DPM_Enable); - else - return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DPM_Disable); -} - -int amdgpu_kv_smc_bapm_enable(struct amdgpu_device *adev, bool enable) -{ - if (enable) - return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_EnableBAPM); - else - return amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_DisableBAPM); -} - -int amdgpu_kv_copy_bytes_to_smc(struct amdgpu_device *adev, - u32 smc_start_address, - const u8 *src, u32 byte_count, u32 limit) -{ - int ret; - u32 data, original_data, addr, extra_shift, t_byte, count, mask; - - if ((smc_start_address + byte_count) > limit) - return -EINVAL; - - addr = smc_start_address; - t_byte = addr & 3; - - /* RMW for the initial bytes */ - if (t_byte != 0) { - addr -= t_byte; - - ret = kv_set_smc_sram_address(adev, addr, limit); - if (ret) - return ret; - - original_data = RREG32(mmSMC_IND_DATA_0); - - data = 0; - mask = 0; - count = 4; - while (count > 0) { - if (t_byte > 0) { - mask = (mask << 8) | 0xff; - t_byte--; - } else if (byte_count > 0) { - data = (data << 8) + *src++; - byte_count--; - mask <<= 8; - } else { - data <<= 8; - mask = (mask << 8) | 0xff; - } - count--; - } - - data |= original_data & mask; - - ret = kv_set_smc_sram_address(adev, addr, limit); - if (ret) - return ret; - - WREG32(mmSMC_IND_DATA_0, data); - - addr += 4; - } - - while (byte_count >= 4) { - /* SMC address space is BE */ - data = (src[0] << 24) + (src[1] << 16) + (src[2] << 8) + src[3]; - - ret = kv_set_smc_sram_address(adev, addr, limit); - if (ret) - return ret; - - WREG32(mmSMC_IND_DATA_0, data); - - src += 4; - byte_count -= 4; - addr += 4; - } - - /* RMW for the final bytes */ - if (byte_count > 0) { - data = 0; - - ret = kv_set_smc_sram_address(adev, addr, limit); - if (ret) - return ret; - - original_data = RREG32(mmSMC_IND_DATA_0); - - extra_shift = 8 * (4 - byte_count); - - while (byte_count > 0) { - /* SMC address space is BE */ - data = (data << 8) + *src++; - byte_count--; - } - - data <<= extra_shift; - - data |= (original_data & ~((~0UL) << extra_shift)); - - ret = kv_set_smc_sram_address(adev, addr, limit); - if (ret) - return ret; - - WREG32(mmSMC_IND_DATA_0, data); - } - return 0; -} - diff --git a/drivers/gpu/drm/amd/pm/powerplay/legacy_dpm.c b/drivers/gpu/drm/amd/pm/powerplay/legacy_dpm.c deleted file mode 100644 index 3c6ee493e410..000000000000 --- a/drivers/gpu/drm/amd/pm/powerplay/legacy_dpm.c +++ /dev/null @@ -1,1080 +0,0 @@ -/* - * Copyright 2021 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - */ - -#include "amdgpu.h" -#include "amdgpu_i2c.h" -#include "amdgpu_atombios.h" -#include "atom.h" -#include "amd_pcie.h" -#include "legacy_dpm.h" -#include "amdgpu_dpm_internal.h" -#include "amdgpu_display.h" - -#define amdgpu_dpm_pre_set_power_state(adev) \ - ((adev)->powerplay.pp_funcs->pre_set_power_state((adev)->powerplay.pp_handle)) - -#define amdgpu_dpm_post_set_power_state(adev) \ - ((adev)->powerplay.pp_funcs->post_set_power_state((adev)->powerplay.pp_handle)) - -#define amdgpu_dpm_display_configuration_changed(adev) \ - ((adev)->powerplay.pp_funcs->display_configuration_changed((adev)->powerplay.pp_handle)) - -#define amdgpu_dpm_print_power_state(adev, ps) \ - ((adev)->powerplay.pp_funcs->print_power_state((adev)->powerplay.pp_handle, (ps))) - -#define amdgpu_dpm_vblank_too_short(adev) \ - ((adev)->powerplay.pp_funcs->vblank_too_short((adev)->powerplay.pp_handle)) - -#define amdgpu_dpm_check_state_equal(adev, cps, rps, equal) \ - ((adev)->powerplay.pp_funcs->check_state_equal((adev)->powerplay.pp_handle, (cps), (rps), (equal))) - -void amdgpu_dpm_print_class_info(u32 class, u32 class2) -{ - const char *s; - - switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) { - case ATOM_PPLIB_CLASSIFICATION_UI_NONE: - default: - s = "none"; - break; - case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY: - s = "battery"; - break; - case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED: - s = "balanced"; - break; - case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE: - s = "performance"; - break; - } - printk("\tui class: %s\n", s); - printk("\tinternal class:"); - if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) && - (class2 == 0)) - pr_cont(" none"); - else { - if (class & ATOM_PPLIB_CLASSIFICATION_BOOT) - pr_cont(" boot"); - if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL) - pr_cont(" thermal"); - if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE) - pr_cont(" limited_pwr"); - if (class & ATOM_PPLIB_CLASSIFICATION_REST) - pr_cont(" rest"); - if (class & ATOM_PPLIB_CLASSIFICATION_FORCED) - pr_cont(" forced"); - if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) - pr_cont(" 3d_perf"); - if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE) - pr_cont(" ovrdrv"); - if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) - pr_cont(" uvd"); - if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW) - pr_cont(" 3d_low"); - if (class & ATOM_PPLIB_CLASSIFICATION_ACPI) - pr_cont(" acpi"); - if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) - pr_cont(" uvd_hd2"); - if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) - pr_cont(" uvd_hd"); - if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) - pr_cont(" uvd_sd"); - if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2) - pr_cont(" limited_pwr2"); - if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) - pr_cont(" ulv"); - if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) - pr_cont(" uvd_mvc"); - } - pr_cont("\n"); -} - -void amdgpu_dpm_print_cap_info(u32 caps) -{ - printk("\tcaps:"); - if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) - pr_cont(" single_disp"); - if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK) - pr_cont(" video"); - if (caps & ATOM_PPLIB_DISALLOW_ON_DC) - pr_cont(" no_dc"); - pr_cont("\n"); -} - -void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev, - struct amdgpu_ps *rps) -{ - printk("\tstatus:"); - if (rps == adev->pm.dpm.current_ps) - pr_cont(" c"); - if (rps == adev->pm.dpm.requested_ps) - pr_cont(" r"); - if (rps == adev->pm.dpm.boot_ps) - pr_cont(" b"); - pr_cont("\n"); -} - -void amdgpu_pm_print_power_states(struct amdgpu_device *adev) -{ - int i; - - if (adev->powerplay.pp_funcs->print_power_state == NULL) - return; - - for (i = 0; i < adev->pm.dpm.num_ps; i++) - amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]); - -} - -union power_info { - struct _ATOM_POWERPLAY_INFO info; - struct _ATOM_POWERPLAY_INFO_V2 info_2; - struct _ATOM_POWERPLAY_INFO_V3 info_3; - struct _ATOM_PPLIB_POWERPLAYTABLE pplib; - struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; - struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; - struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4; - struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5; -}; - -int amdgpu_get_platform_caps(struct amdgpu_device *adev) -{ - struct amdgpu_mode_info *mode_info = &adev->mode_info; - union power_info *power_info; - int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); - u16 data_offset; - u8 frev, crev; - - if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, - &frev, &crev, &data_offset)) - return -EINVAL; - power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); - - adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps); - adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime); - adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime); - - return 0; -} - -union fan_info { - struct _ATOM_PPLIB_FANTABLE fan; - struct _ATOM_PPLIB_FANTABLE2 fan2; - struct _ATOM_PPLIB_FANTABLE3 fan3; -}; - -static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table, - ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table) -{ - u32 size = atom_table->ucNumEntries * - sizeof(struct amdgpu_clock_voltage_dependency_entry); - int i; - ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry; - - amdgpu_table->entries = kzalloc(size, GFP_KERNEL); - if (!amdgpu_table->entries) - return -ENOMEM; - - entry = &atom_table->entries[0]; - for (i = 0; i < atom_table->ucNumEntries; i++) { - amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) | - (entry->ucClockHigh << 16); - amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage); - entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *) - ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record)); - } - amdgpu_table->count = atom_table->ucNumEntries; - - return 0; -} - -/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */ -#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12 -#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14 -#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16 -#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18 -#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20 -#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22 -#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24 -#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26 - -int amdgpu_parse_extended_power_table(struct amdgpu_device *adev) -{ - struct amdgpu_mode_info *mode_info = &adev->mode_info; - union power_info *power_info; - union fan_info *fan_info; - ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table; - int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); - u16 data_offset; - u8 frev, crev; - int ret, i; - - if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, - &frev, &crev, &data_offset)) - return -EINVAL; - power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); - - /* fan table */ - if (le16_to_cpu(power_info->pplib.usTableSize) >= - sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) { - if (power_info->pplib3.usFanTableOffset) { - fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib3.usFanTableOffset)); - adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst; - adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin); - adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed); - adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh); - adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin); - adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed); - adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh); - if (fan_info->fan.ucFanTableFormat >= 2) - adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax); - else - adev->pm.dpm.fan.t_max = 10900; - adev->pm.dpm.fan.cycle_delay = 100000; - if (fan_info->fan.ucFanTableFormat >= 3) { - adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode; - adev->pm.dpm.fan.default_max_fan_pwm = - le16_to_cpu(fan_info->fan3.usFanPWMMax); - adev->pm.dpm.fan.default_fan_output_sensitivity = 4836; - adev->pm.dpm.fan.fan_output_sensitivity = - le16_to_cpu(fan_info->fan3.usFanOutputSensitivity); - } - adev->pm.dpm.fan.ucode_fan_control = true; - } - } - - /* clock dependancy tables, shedding tables */ - if (le16_to_cpu(power_info->pplib.usTableSize) >= - sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) { - if (power_info->pplib4.usVddcDependencyOnSCLKOffset) { - dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset)); - ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk, - dep_table); - if (ret) { - amdgpu_free_extended_power_table(adev); - return ret; - } - } - if (power_info->pplib4.usVddciDependencyOnMCLKOffset) { - dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset)); - ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk, - dep_table); - if (ret) { - amdgpu_free_extended_power_table(adev); - return ret; - } - } - if (power_info->pplib4.usVddcDependencyOnMCLKOffset) { - dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset)); - ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk, - dep_table); - if (ret) { - amdgpu_free_extended_power_table(adev); - return ret; - } - } - if (power_info->pplib4.usMvddDependencyOnMCLKOffset) { - dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset)); - ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk, - dep_table); - if (ret) { - amdgpu_free_extended_power_table(adev); - return ret; - } - } - if (power_info->pplib4.usMaxClockVoltageOnDCOffset) { - ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v = - (ATOM_PPLIB_Clock_Voltage_Limit_Table *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset)); - if (clk_v->ucNumEntries) { - adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk = - le16_to_cpu(clk_v->entries[0].usSclkLow) | - (clk_v->entries[0].ucSclkHigh << 16); - adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk = - le16_to_cpu(clk_v->entries[0].usMclkLow) | - (clk_v->entries[0].ucMclkHigh << 16); - adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc = - le16_to_cpu(clk_v->entries[0].usVddc); - adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci = - le16_to_cpu(clk_v->entries[0].usVddci); - } - } - if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) { - ATOM_PPLIB_PhaseSheddingLimits_Table *psl = - (ATOM_PPLIB_PhaseSheddingLimits_Table *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset)); - ATOM_PPLIB_PhaseSheddingLimits_Record *entry; - - adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries = - kcalloc(psl->ucNumEntries, - sizeof(struct amdgpu_phase_shedding_limits_entry), - GFP_KERNEL); - if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) { - amdgpu_free_extended_power_table(adev); - return -ENOMEM; - } - - entry = &psl->entries[0]; - for (i = 0; i < psl->ucNumEntries; i++) { - adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk = - le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16); - adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk = - le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16); - adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage = - le16_to_cpu(entry->usVoltage); - entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *) - ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record)); - } - adev->pm.dpm.dyn_state.phase_shedding_limits_table.count = - psl->ucNumEntries; - } - } - - /* cac data */ - if (le16_to_cpu(power_info->pplib.usTableSize) >= - sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) { - adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit); - adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit); - adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit; - adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit); - if (adev->pm.dpm.tdp_od_limit) - adev->pm.dpm.power_control = true; - else - adev->pm.dpm.power_control = false; - adev->pm.dpm.tdp_adjustment = 0; - adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold); - adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage); - adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope); - if (power_info->pplib5.usCACLeakageTableOffset) { - ATOM_PPLIB_CAC_Leakage_Table *cac_table = - (ATOM_PPLIB_CAC_Leakage_Table *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset)); - ATOM_PPLIB_CAC_Leakage_Record *entry; - u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table); - adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL); - if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) { - amdgpu_free_extended_power_table(adev); - return -ENOMEM; - } - entry = &cac_table->entries[0]; - for (i = 0; i < cac_table->ucNumEntries; i++) { - if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) { - adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 = - le16_to_cpu(entry->usVddc1); - adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 = - le16_to_cpu(entry->usVddc2); - adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 = - le16_to_cpu(entry->usVddc3); - } else { - adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc = - le16_to_cpu(entry->usVddc); - adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage = - le32_to_cpu(entry->ulLeakageValue); - } - entry = (ATOM_PPLIB_CAC_Leakage_Record *) - ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record)); - } - adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries; - } - } - - /* ext tables */ - if (le16_to_cpu(power_info->pplib.usTableSize) >= - sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) { - ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset)); - if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) && - ext_hdr->usVCETableOffset) { - VCEClockInfoArray *array = (VCEClockInfoArray *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(ext_hdr->usVCETableOffset) + 1); - ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits = - (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(ext_hdr->usVCETableOffset) + 1 + - 1 + array->ucNumEntries * sizeof(VCEClockInfo)); - ATOM_PPLIB_VCE_State_Table *states = - (ATOM_PPLIB_VCE_State_Table *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(ext_hdr->usVCETableOffset) + 1 + - 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) + - 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record))); - ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry; - ATOM_PPLIB_VCE_State_Record *state_entry; - VCEClockInfo *vce_clk; - u32 size = limits->numEntries * - sizeof(struct amdgpu_vce_clock_voltage_dependency_entry); - adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries = - kzalloc(size, GFP_KERNEL); - if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) { - amdgpu_free_extended_power_table(adev); - return -ENOMEM; - } - adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count = - limits->numEntries; - entry = &limits->entries[0]; - state_entry = &states->entries[0]; - for (i = 0; i < limits->numEntries; i++) { - vce_clk = (VCEClockInfo *) - ((u8 *)&array->entries[0] + - (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo))); - adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk = - le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16); - adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk = - le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16); - adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v = - le16_to_cpu(entry->usVoltage); - entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *) - ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)); - } - adev->pm.dpm.num_of_vce_states = - states->numEntries > AMD_MAX_VCE_LEVELS ? - AMD_MAX_VCE_LEVELS : states->numEntries; - for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) { - vce_clk = (VCEClockInfo *) - ((u8 *)&array->entries[0] + - (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo))); - adev->pm.dpm.vce_states[i].evclk = - le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16); - adev->pm.dpm.vce_states[i].ecclk = - le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16); - adev->pm.dpm.vce_states[i].clk_idx = - state_entry->ucClockInfoIndex & 0x3f; - adev->pm.dpm.vce_states[i].pstate = - (state_entry->ucClockInfoIndex & 0xc0) >> 6; - state_entry = (ATOM_PPLIB_VCE_State_Record *) - ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record)); - } - } - if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) && - ext_hdr->usUVDTableOffset) { - UVDClockInfoArray *array = (UVDClockInfoArray *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(ext_hdr->usUVDTableOffset) + 1); - ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits = - (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 + - 1 + (array->ucNumEntries * sizeof (UVDClockInfo))); - ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry; - u32 size = limits->numEntries * - sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry); - adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries = - kzalloc(size, GFP_KERNEL); - if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) { - amdgpu_free_extended_power_table(adev); - return -ENOMEM; - } - adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count = - limits->numEntries; - entry = &limits->entries[0]; - for (i = 0; i < limits->numEntries; i++) { - UVDClockInfo *uvd_clk = (UVDClockInfo *) - ((u8 *)&array->entries[0] + - (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo))); - adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk = - le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16); - adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk = - le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16); - adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v = - le16_to_cpu(entry->usVoltage); - entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *) - ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record)); - } - } - if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) && - ext_hdr->usSAMUTableOffset) { - ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits = - (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1); - ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry; - u32 size = limits->numEntries * - sizeof(struct amdgpu_clock_voltage_dependency_entry); - adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries = - kzalloc(size, GFP_KERNEL); - if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) { - amdgpu_free_extended_power_table(adev); - return -ENOMEM; - } - adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count = - limits->numEntries; - entry = &limits->entries[0]; - for (i = 0; i < limits->numEntries; i++) { - adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk = - le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16); - adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v = - le16_to_cpu(entry->usVoltage); - entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *) - ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record)); - } - } - if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) && - ext_hdr->usPPMTableOffset) { - ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(ext_hdr->usPPMTableOffset)); - adev->pm.dpm.dyn_state.ppm_table = - kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL); - if (!adev->pm.dpm.dyn_state.ppm_table) { - amdgpu_free_extended_power_table(adev); - return -ENOMEM; - } - adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign; - adev->pm.dpm.dyn_state.ppm_table->cpu_core_number = - le16_to_cpu(ppm->usCpuCoreNumber); - adev->pm.dpm.dyn_state.ppm_table->platform_tdp = - le32_to_cpu(ppm->ulPlatformTDP); - adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp = - le32_to_cpu(ppm->ulSmallACPlatformTDP); - adev->pm.dpm.dyn_state.ppm_table->platform_tdc = - le32_to_cpu(ppm->ulPlatformTDC); - adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc = - le32_to_cpu(ppm->ulSmallACPlatformTDC); - adev->pm.dpm.dyn_state.ppm_table->apu_tdp = - le32_to_cpu(ppm->ulApuTDP); - adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp = - le32_to_cpu(ppm->ulDGpuTDP); - adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power = - le32_to_cpu(ppm->ulDGpuUlvPower); - adev->pm.dpm.dyn_state.ppm_table->tj_max = - le32_to_cpu(ppm->ulTjmax); - } - if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) && - ext_hdr->usACPTableOffset) { - ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits = - (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(ext_hdr->usACPTableOffset) + 1); - ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry; - u32 size = limits->numEntries * - sizeof(struct amdgpu_clock_voltage_dependency_entry); - adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries = - kzalloc(size, GFP_KERNEL); - if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) { - amdgpu_free_extended_power_table(adev); - return -ENOMEM; - } - adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count = - limits->numEntries; - entry = &limits->entries[0]; - for (i = 0; i < limits->numEntries; i++) { - adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk = - le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16); - adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v = - le16_to_cpu(entry->usVoltage); - entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *) - ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record)); - } - } - if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) && - ext_hdr->usPowerTuneTableOffset) { - u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset + - le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); - ATOM_PowerTune_Table *pt; - adev->pm.dpm.dyn_state.cac_tdp_table = - kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL); - if (!adev->pm.dpm.dyn_state.cac_tdp_table) { - amdgpu_free_extended_power_table(adev); - return -ENOMEM; - } - if (rev > 0) { - ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); - adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = - ppt->usMaximumPowerDeliveryLimit; - pt = &ppt->power_tune_table; - } else { - ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(ext_hdr->usPowerTuneTableOffset)); - adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255; - pt = &ppt->power_tune_table; - } - adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP); - adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp = - le16_to_cpu(pt->usConfigurableTDP); - adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC); - adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit = - le16_to_cpu(pt->usBatteryPowerLimit); - adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit = - le16_to_cpu(pt->usSmallPowerLimit); - adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage = - le16_to_cpu(pt->usLowCACLeakage); - adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage = - le16_to_cpu(pt->usHighCACLeakage); - } - if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) && - ext_hdr->usSclkVddgfxTableOffset) { - dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset)); - ret = amdgpu_parse_clk_voltage_dep_table( - &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk, - dep_table); - if (ret) { - kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries); - return ret; - } - } - } - - return 0; -} - -void amdgpu_free_extended_power_table(struct amdgpu_device *adev) -{ - struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state; - - kfree(dyn_state->vddc_dependency_on_sclk.entries); - kfree(dyn_state->vddci_dependency_on_mclk.entries); - kfree(dyn_state->vddc_dependency_on_mclk.entries); - kfree(dyn_state->mvdd_dependency_on_mclk.entries); - kfree(dyn_state->cac_leakage_table.entries); - kfree(dyn_state->phase_shedding_limits_table.entries); - kfree(dyn_state->ppm_table); - kfree(dyn_state->cac_tdp_table); - kfree(dyn_state->vce_clock_voltage_dependency_table.entries); - kfree(dyn_state->uvd_clock_voltage_dependency_table.entries); - kfree(dyn_state->samu_clock_voltage_dependency_table.entries); - kfree(dyn_state->acp_clock_voltage_dependency_table.entries); - kfree(dyn_state->vddgfx_dependency_on_sclk.entries); -} - -static const char *pp_lib_thermal_controller_names[] = { - "NONE", - "lm63", - "adm1032", - "adm1030", - "max6649", - "lm64", - "f75375", - "RV6xx", - "RV770", - "adt7473", - "NONE", - "External GPIO", - "Evergreen", - "emc2103", - "Sumo", - "Northern Islands", - "Southern Islands", - "lm96163", - "Sea Islands", - "Kaveri/Kabini", -}; - -void amdgpu_add_thermal_controller(struct amdgpu_device *adev) -{ - struct amdgpu_mode_info *mode_info = &adev->mode_info; - ATOM_PPLIB_POWERPLAYTABLE *power_table; - int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); - ATOM_PPLIB_THERMALCONTROLLER *controller; - struct amdgpu_i2c_bus_rec i2c_bus; - u16 data_offset; - u8 frev, crev; - - if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, - &frev, &crev, &data_offset)) - return; - power_table = (ATOM_PPLIB_POWERPLAYTABLE *) - (mode_info->atom_context->bios + data_offset); - controller = &power_table->sThermalController; - - /* add the i2c bus for thermal/fan chip */ - if (controller->ucType > 0) { - if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN) - adev->pm.no_fan = true; - adev->pm.fan_pulses_per_revolution = - controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK; - if (adev->pm.fan_pulses_per_revolution) { - adev->pm.fan_min_rpm = controller->ucFanMinRPM; - adev->pm.fan_max_rpm = controller->ucFanMaxRPM; - } - if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) { - DRM_INFO("Internal thermal controller %s fan control\n", - (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); - adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX; - } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) { - DRM_INFO("Internal thermal controller %s fan control\n", - (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); - adev->pm.int_thermal_type = THERMAL_TYPE_RV770; - } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) { - DRM_INFO("Internal thermal controller %s fan control\n", - (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); - adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN; - } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) { - DRM_INFO("Internal thermal controller %s fan control\n", - (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); - adev->pm.int_thermal_type = THERMAL_TYPE_SUMO; - } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) { - DRM_INFO("Internal thermal controller %s fan control\n", - (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); - adev->pm.int_thermal_type = THERMAL_TYPE_NI; - } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) { - DRM_INFO("Internal thermal controller %s fan control\n", - (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); - adev->pm.int_thermal_type = THERMAL_TYPE_SI; - } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) { - DRM_INFO("Internal thermal controller %s fan control\n", - (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); - adev->pm.int_thermal_type = THERMAL_TYPE_CI; - } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) { - DRM_INFO("Internal thermal controller %s fan control\n", - (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); - adev->pm.int_thermal_type = THERMAL_TYPE_KV; - } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) { - DRM_INFO("External GPIO thermal controller %s fan control\n", - (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); - adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO; - } else if (controller->ucType == - ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) { - DRM_INFO("ADT7473 with internal thermal controller %s fan control\n", - (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); - adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL; - } else if (controller->ucType == - ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) { - DRM_INFO("EMC2103 with internal thermal controller %s fan control\n", - (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); - adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL; - } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) { - DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n", - pp_lib_thermal_controller_names[controller->ucType], - controller->ucI2cAddress >> 1, - (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); - adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL; - i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine); - adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus); - if (adev->pm.i2c_bus) { - struct i2c_board_info info = { }; - const char *name = pp_lib_thermal_controller_names[controller->ucType]; - info.addr = controller->ucI2cAddress >> 1; - strlcpy(info.type, name, sizeof(info.type)); - i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info); - } - } else { - DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n", - controller->ucType, - controller->ucI2cAddress >> 1, - (controller->ucFanParameters & - ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with"); - } - } -} - -struct amd_vce_state* amdgpu_get_vce_clock_state(void *handle, u32 idx) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (idx < adev->pm.dpm.num_of_vce_states) - return &adev->pm.dpm.vce_states[idx]; - - return NULL; -} - -static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev, - enum amd_pm_state_type dpm_state) -{ - int i; - struct amdgpu_ps *ps; - u32 ui_class; - bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ? - true : false; - - /* check if the vblank period is too short to adjust the mclk */ - if (single_display && adev->powerplay.pp_funcs->vblank_too_short) { - if (amdgpu_dpm_vblank_too_short(adev)) - single_display = false; - } - - /* certain older asics have a separare 3D performance state, - * so try that first if the user selected performance - */ - if (dpm_state == POWER_STATE_TYPE_PERFORMANCE) - dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF; - /* balanced states don't exist at the moment */ - if (dpm_state == POWER_STATE_TYPE_BALANCED) - dpm_state = POWER_STATE_TYPE_PERFORMANCE; - -restart_search: - /* Pick the best power state based on current conditions */ - for (i = 0; i < adev->pm.dpm.num_ps; i++) { - ps = &adev->pm.dpm.ps[i]; - ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK; - switch (dpm_state) { - /* user states */ - case POWER_STATE_TYPE_BATTERY: - if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) { - if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { - if (single_display) - return ps; - } else - return ps; - } - break; - case POWER_STATE_TYPE_BALANCED: - if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) { - if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { - if (single_display) - return ps; - } else - return ps; - } - break; - case POWER_STATE_TYPE_PERFORMANCE: - if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) { - if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { - if (single_display) - return ps; - } else - return ps; - } - break; - /* internal states */ - case POWER_STATE_TYPE_INTERNAL_UVD: - if (adev->pm.dpm.uvd_ps) - return adev->pm.dpm.uvd_ps; - else - break; - case POWER_STATE_TYPE_INTERNAL_UVD_SD: - if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) - return ps; - break; - case POWER_STATE_TYPE_INTERNAL_UVD_HD: - if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) - return ps; - break; - case POWER_STATE_TYPE_INTERNAL_UVD_HD2: - if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) - return ps; - break; - case POWER_STATE_TYPE_INTERNAL_UVD_MVC: - if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) - return ps; - break; - case POWER_STATE_TYPE_INTERNAL_BOOT: - return adev->pm.dpm.boot_ps; - case POWER_STATE_TYPE_INTERNAL_THERMAL: - if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL) - return ps; - break; - case POWER_STATE_TYPE_INTERNAL_ACPI: - if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) - return ps; - break; - case POWER_STATE_TYPE_INTERNAL_ULV: - if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) - return ps; - break; - case POWER_STATE_TYPE_INTERNAL_3DPERF: - if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) - return ps; - break; - default: - break; - } - } - /* use a fallback state if we didn't match */ - switch (dpm_state) { - case POWER_STATE_TYPE_INTERNAL_UVD_SD: - dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD; - goto restart_search; - case POWER_STATE_TYPE_INTERNAL_UVD_HD: - case POWER_STATE_TYPE_INTERNAL_UVD_HD2: - case POWER_STATE_TYPE_INTERNAL_UVD_MVC: - if (adev->pm.dpm.uvd_ps) { - return adev->pm.dpm.uvd_ps; - } else { - dpm_state = POWER_STATE_TYPE_PERFORMANCE; - goto restart_search; - } - case POWER_STATE_TYPE_INTERNAL_THERMAL: - dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI; - goto restart_search; - case POWER_STATE_TYPE_INTERNAL_ACPI: - dpm_state = POWER_STATE_TYPE_BATTERY; - goto restart_search; - case POWER_STATE_TYPE_BATTERY: - case POWER_STATE_TYPE_BALANCED: - case POWER_STATE_TYPE_INTERNAL_3DPERF: - dpm_state = POWER_STATE_TYPE_PERFORMANCE; - goto restart_search; - default: - break; - } - - return NULL; -} - -static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) -{ - struct amdgpu_ps *ps; - enum amd_pm_state_type dpm_state; - int ret; - bool equal = false; - - /* if dpm init failed */ - if (!adev->pm.dpm_enabled) - return 0; - - if (adev->pm.dpm.user_state != adev->pm.dpm.state) { - /* add other state override checks here */ - if ((!adev->pm.dpm.thermal_active) && - (!adev->pm.dpm.uvd_active)) - adev->pm.dpm.state = adev->pm.dpm.user_state; - } - dpm_state = adev->pm.dpm.state; - - ps = amdgpu_dpm_pick_power_state(adev, dpm_state); - if (ps) - adev->pm.dpm.requested_ps = ps; - else - return -EINVAL; - - if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) { - printk("switching from power state:\n"); - amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps); - printk("switching to power state:\n"); - amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps); - } - - /* update whether vce is active */ - ps->vce_active = adev->pm.dpm.vce_active; - if (adev->powerplay.pp_funcs->display_configuration_changed) - amdgpu_dpm_display_configuration_changed(adev); - - ret = amdgpu_dpm_pre_set_power_state(adev); - if (ret) - return ret; - - if (adev->powerplay.pp_funcs->check_state_equal) { - if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal)) - equal = false; - } - - if (equal) - return 0; - - if (adev->powerplay.pp_funcs->set_power_state) - adev->powerplay.pp_funcs->set_power_state(adev->powerplay.pp_handle); - - amdgpu_dpm_post_set_power_state(adev); - - adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; - adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count; - - if (adev->powerplay.pp_funcs->force_performance_level) { - if (adev->pm.dpm.thermal_active) { - enum amd_dpm_forced_level level = adev->pm.dpm.forced_level; - /* force low perf level for thermal */ - amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW); - /* save the user's level */ - adev->pm.dpm.forced_level = level; - } else { - /* otherwise, user selected level */ - amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level); - } - } - - return 0; -} - -void amdgpu_legacy_dpm_compute_clocks(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - int i = 0; - - if (adev->mode_info.num_crtc) - amdgpu_display_bandwidth_update(adev); - - for (i = 0; i < AMDGPU_MAX_RINGS; i++) { - struct amdgpu_ring *ring = adev->rings[i]; - if (ring && ring->sched.ready) - amdgpu_fence_wait_empty(ring); - } - - amdgpu_dpm_get_active_displays(adev); - - amdgpu_dpm_change_power_state_locked(adev); -} - -void amdgpu_dpm_thermal_work_handler(struct work_struct *work) -{ - struct amdgpu_device *adev = - container_of(work, struct amdgpu_device, - pm.dpm.thermal.work); - const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - /* switch to the thermal state */ - enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL; - int temp, size = sizeof(temp); - - if (!adev->pm.dpm_enabled) - return; - - if (!pp_funcs->read_sensor(adev->powerplay.pp_handle, - AMDGPU_PP_SENSOR_GPU_TEMP, - (void *)&temp, - &size)) { - if (temp < adev->pm.dpm.thermal.min_temp) - /* switch back the user state */ - dpm_state = adev->pm.dpm.user_state; - } else { - if (adev->pm.dpm.thermal.high_to_low) - /* switch back the user state */ - dpm_state = adev->pm.dpm.user_state; - } - - if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL) - adev->pm.dpm.thermal_active = true; - else - adev->pm.dpm.thermal_active = false; - - adev->pm.dpm.state = dpm_state; - - amdgpu_legacy_dpm_compute_clocks(adev->powerplay.pp_handle); -} diff --git a/drivers/gpu/drm/amd/pm/powerplay/legacy_dpm.h b/drivers/gpu/drm/amd/pm/powerplay/legacy_dpm.h deleted file mode 100644 index 93bd3973330c..000000000000 --- a/drivers/gpu/drm/amd/pm/powerplay/legacy_dpm.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright 2021 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#ifndef __LEGACY_DPM_H__ -#define __LEGACY_DPM_H__ - -void amdgpu_dpm_print_class_info(u32 class, u32 class2); -void amdgpu_dpm_print_cap_info(u32 caps); -void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev, - struct amdgpu_ps *rps); -int amdgpu_get_platform_caps(struct amdgpu_device *adev); -int amdgpu_parse_extended_power_table(struct amdgpu_device *adev); -void amdgpu_free_extended_power_table(struct amdgpu_device *adev); -void amdgpu_add_thermal_controller(struct amdgpu_device *adev); -struct amd_vce_state* amdgpu_get_vce_clock_state(void *handle, u32 idx); -void amdgpu_pm_print_power_states(struct amdgpu_device *adev); -void amdgpu_legacy_dpm_compute_clocks(void *handle); -void amdgpu_dpm_thermal_work_handler(struct work_struct *work); -#endif diff --git a/drivers/gpu/drm/amd/pm/powerplay/ppsmc.h b/drivers/gpu/drm/amd/pm/powerplay/ppsmc.h deleted file mode 100644 index 8463245f424f..000000000000 --- a/drivers/gpu/drm/amd/pm/powerplay/ppsmc.h +++ /dev/null @@ -1,200 +0,0 @@ -/* - * Copyright 2011 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#ifndef PP_SMC_H -#define PP_SMC_H - -#pragma pack(push, 1) - -#define PPSMC_SWSTATE_FLAG_DC 0x01 -#define PPSMC_SWSTATE_FLAG_UVD 0x02 -#define PPSMC_SWSTATE_FLAG_VCE 0x04 -#define PPSMC_SWSTATE_FLAG_PCIE_X1 0x08 - -#define PPSMC_THERMAL_PROTECT_TYPE_INTERNAL 0x00 -#define PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL 0x01 -#define PPSMC_THERMAL_PROTECT_TYPE_NONE 0xff - -#define PPSMC_SYSTEMFLAG_GPIO_DC 0x01 -#define PPSMC_SYSTEMFLAG_STEPVDDC 0x02 -#define PPSMC_SYSTEMFLAG_GDDR5 0x04 -#define PPSMC_SYSTEMFLAG_DISABLE_BABYSTEP 0x08 -#define PPSMC_SYSTEMFLAG_REGULATOR_HOT 0x10 -#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_ANALOG 0x20 -#define PPSMC_SYSTEMFLAG_REGULATOR_HOT_PROG_GPIO 0x40 - -#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_MASK 0x07 -#define PPSMC_EXTRAFLAGS_AC2DC_DONT_WAIT_FOR_VBLANK 0x08 -#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTODPMLOWSTATE 0x00 -#define PPSMC_EXTRAFLAGS_AC2DC_ACTION_GOTOINITIALSTATE 0x01 -#define PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH 0x02 - -#define PPSMC_DISPLAY_WATERMARK_LOW 0 -#define PPSMC_DISPLAY_WATERMARK_HIGH 1 - -#define PPSMC_STATEFLAG_AUTO_PULSE_SKIP 0x01 -#define PPSMC_STATEFLAG_POWERBOOST 0x02 -#define PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE 0x20 -#define PPSMC_STATEFLAG_DEEPSLEEP_BYPASS 0x40 - -#define FDO_MODE_HARDWARE 0 -#define FDO_MODE_PIECE_WISE_LINEAR 1 - -enum FAN_CONTROL { - FAN_CONTROL_FUZZY, - FAN_CONTROL_TABLE -}; - -#define PPSMC_Result_OK ((uint8_t)0x01) -#define PPSMC_Result_Failed ((uint8_t)0xFF) - -typedef uint8_t PPSMC_Result; - -#define PPSMC_MSG_Halt ((uint8_t)0x10) -#define PPSMC_MSG_Resume ((uint8_t)0x11) -#define PPSMC_MSG_ZeroLevelsDisabled ((uint8_t)0x13) -#define PPSMC_MSG_OneLevelsDisabled ((uint8_t)0x14) -#define PPSMC_MSG_TwoLevelsDisabled ((uint8_t)0x15) -#define PPSMC_MSG_EnableThermalInterrupt ((uint8_t)0x16) -#define PPSMC_MSG_RunningOnAC ((uint8_t)0x17) -#define PPSMC_MSG_SwitchToSwState ((uint8_t)0x20) -#define PPSMC_MSG_SwitchToInitialState ((uint8_t)0x40) -#define PPSMC_MSG_NoForcedLevel ((uint8_t)0x41) -#define PPSMC_MSG_ForceHigh ((uint8_t)0x42) -#define PPSMC_MSG_ForceMediumOrHigh ((uint8_t)0x43) -#define PPSMC_MSG_SwitchToMinimumPower ((uint8_t)0x51) -#define PPSMC_MSG_ResumeFromMinimumPower ((uint8_t)0x52) -#define PPSMC_MSG_EnableCac ((uint8_t)0x53) -#define PPSMC_MSG_DisableCac ((uint8_t)0x54) -#define PPSMC_TDPClampingActive ((uint8_t)0x59) -#define PPSMC_TDPClampingInactive ((uint8_t)0x5A) -#define PPSMC_StartFanControl ((uint8_t)0x5B) -#define PPSMC_StopFanControl ((uint8_t)0x5C) -#define PPSMC_MSG_NoDisplay ((uint8_t)0x5D) -#define PPSMC_NoDisplay ((uint8_t)0x5D) -#define PPSMC_MSG_HasDisplay ((uint8_t)0x5E) -#define PPSMC_HasDisplay ((uint8_t)0x5E) -#define PPSMC_MSG_UVDPowerOFF ((uint8_t)0x60) -#define PPSMC_MSG_UVDPowerON ((uint8_t)0x61) -#define PPSMC_MSG_EnableULV ((uint8_t)0x62) -#define PPSMC_MSG_DisableULV ((uint8_t)0x63) -#define PPSMC_MSG_EnterULV ((uint8_t)0x64) -#define PPSMC_MSG_ExitULV ((uint8_t)0x65) -#define PPSMC_CACLongTermAvgEnable ((uint8_t)0x6E) -#define PPSMC_CACLongTermAvgDisable ((uint8_t)0x6F) -#define PPSMC_MSG_CollectCAC_PowerCorreln ((uint8_t)0x7A) -#define PPSMC_FlushDataCache ((uint8_t)0x80) -#define PPSMC_MSG_SetEnabledLevels ((uint8_t)0x82) -#define PPSMC_MSG_SetForcedLevels ((uint8_t)0x83) -#define PPSMC_MSG_ResetToDefaults ((uint8_t)0x84) -#define PPSMC_MSG_EnableDTE ((uint8_t)0x87) -#define PPSMC_MSG_DisableDTE ((uint8_t)0x88) -#define PPSMC_MSG_ThrottleOVRDSCLKDS ((uint8_t)0x96) -#define PPSMC_MSG_CancelThrottleOVRDSCLKDS ((uint8_t)0x97) -#define PPSMC_MSG_EnableACDCGPIOInterrupt ((uint16_t) 0x149) - -/* CI/KV/KB */ -#define PPSMC_MSG_UVDDPM_SetEnabledMask ((uint16_t) 0x12D) -#define PPSMC_MSG_VCEDPM_SetEnabledMask ((uint16_t) 0x12E) -#define PPSMC_MSG_ACPDPM_SetEnabledMask ((uint16_t) 0x12F) -#define PPSMC_MSG_SAMUDPM_SetEnabledMask ((uint16_t) 0x130) -#define PPSMC_MSG_MCLKDPM_ForceState ((uint16_t) 0x131) -#define PPSMC_MSG_MCLKDPM_NoForcedLevel ((uint16_t) 0x132) -#define PPSMC_MSG_Thermal_Cntl_Disable ((uint16_t) 0x133) -#define PPSMC_MSG_Voltage_Cntl_Disable ((uint16_t) 0x135) -#define PPSMC_MSG_PCIeDPM_Enable ((uint16_t) 0x136) -#define PPSMC_MSG_PCIeDPM_Disable ((uint16_t) 0x13d) -#define PPSMC_MSG_ACPPowerOFF ((uint16_t) 0x137) -#define PPSMC_MSG_ACPPowerON ((uint16_t) 0x138) -#define PPSMC_MSG_SAMPowerOFF ((uint16_t) 0x139) -#define PPSMC_MSG_SAMPowerON ((uint16_t) 0x13a) -#define PPSMC_MSG_PCIeDPM_Disable ((uint16_t) 0x13d) -#define PPSMC_MSG_NBDPM_Enable ((uint16_t) 0x140) -#define PPSMC_MSG_NBDPM_Disable ((uint16_t) 0x141) -#define PPSMC_MSG_SCLKDPM_SetEnabledMask ((uint16_t) 0x145) -#define PPSMC_MSG_MCLKDPM_SetEnabledMask ((uint16_t) 0x146) -#define PPSMC_MSG_PCIeDPM_ForceLevel ((uint16_t) 0x147) -#define PPSMC_MSG_PCIeDPM_UnForceLevel ((uint16_t) 0x148) -#define PPSMC_MSG_EnableVRHotGPIOInterrupt ((uint16_t) 0x14a) -#define PPSMC_MSG_DPM_Enable ((uint16_t) 0x14e) -#define PPSMC_MSG_DPM_Disable ((uint16_t) 0x14f) -#define PPSMC_MSG_MCLKDPM_Enable ((uint16_t) 0x150) -#define PPSMC_MSG_MCLKDPM_Disable ((uint16_t) 0x151) -#define PPSMC_MSG_UVDDPM_Enable ((uint16_t) 0x154) -#define PPSMC_MSG_UVDDPM_Disable ((uint16_t) 0x155) -#define PPSMC_MSG_SAMUDPM_Enable ((uint16_t) 0x156) -#define PPSMC_MSG_SAMUDPM_Disable ((uint16_t) 0x157) -#define PPSMC_MSG_ACPDPM_Enable ((uint16_t) 0x158) -#define PPSMC_MSG_ACPDPM_Disable ((uint16_t) 0x159) -#define PPSMC_MSG_VCEDPM_Enable ((uint16_t) 0x15a) -#define PPSMC_MSG_VCEDPM_Disable ((uint16_t) 0x15b) -#define PPSMC_MSG_VddC_Request ((uint16_t) 0x15f) -#define PPSMC_MSG_SCLKDPM_GetEnabledMask ((uint16_t) 0x162) -#define PPSMC_MSG_PCIeDPM_SetEnabledMask ((uint16_t) 0x167) -#define PPSMC_MSG_TDCLimitEnable ((uint16_t) 0x169) -#define PPSMC_MSG_TDCLimitDisable ((uint16_t) 0x16a) -#define PPSMC_MSG_PkgPwrLimitEnable ((uint16_t) 0x185) -#define PPSMC_MSG_PkgPwrLimitDisable ((uint16_t) 0x186) -#define PPSMC_MSG_PkgPwrSetLimit ((uint16_t) 0x187) -#define PPSMC_MSG_OverDriveSetTargetTdp ((uint16_t) 0x188) -#define PPSMC_MSG_SCLKDPM_FreezeLevel ((uint16_t) 0x189) -#define PPSMC_MSG_SCLKDPM_UnfreezeLevel ((uint16_t) 0x18A) -#define PPSMC_MSG_MCLKDPM_FreezeLevel ((uint16_t) 0x18B) -#define PPSMC_MSG_MCLKDPM_UnfreezeLevel ((uint16_t) 0x18C) -#define PPSMC_MSG_MASTER_DeepSleep_ON ((uint16_t) 0x18F) -#define PPSMC_MSG_MASTER_DeepSleep_OFF ((uint16_t) 0x190) -#define PPSMC_MSG_Remove_DC_Clamp ((uint16_t) 0x191) -#define PPSMC_MSG_SetFanPwmMax ((uint16_t) 0x19A) -#define PPSMC_MSG_SetFanRpmMax ((uint16_t) 0x205) - -#define PPSMC_MSG_ENABLE_THERMAL_DPM ((uint16_t) 0x19C) -#define PPSMC_MSG_DISABLE_THERMAL_DPM ((uint16_t) 0x19D) - -#define PPSMC_MSG_API_GetSclkFrequency ((uint16_t) 0x200) -#define PPSMC_MSG_API_GetMclkFrequency ((uint16_t) 0x201) - -/* TN */ -#define PPSMC_MSG_DPM_Config ((uint32_t) 0x102) -#define PPSMC_MSG_DPM_ForceState ((uint32_t) 0x104) -#define PPSMC_MSG_PG_SIMD_Config ((uint32_t) 0x108) -#define PPSMC_MSG_Voltage_Cntl_Enable ((uint32_t) 0x109) -#define PPSMC_MSG_Thermal_Cntl_Enable ((uint32_t) 0x10a) -#define PPSMC_MSG_VCEPowerOFF ((uint32_t) 0x10e) -#define PPSMC_MSG_VCEPowerON ((uint32_t) 0x10f) -#define PPSMC_MSG_DPM_N_LevelsDisabled ((uint32_t) 0x112) -#define PPSMC_MSG_DCE_RemoveVoltageAdjustment ((uint32_t) 0x11d) -#define PPSMC_MSG_DCE_AllowVoltageAdjustment ((uint32_t) 0x11e) -#define PPSMC_MSG_EnableBAPM ((uint32_t) 0x120) -#define PPSMC_MSG_DisableBAPM ((uint32_t) 0x121) -#define PPSMC_MSG_UVD_DPM_Config ((uint32_t) 0x124) - -#define PPSMC_MSG_DRV_DRAM_ADDR_HI ((uint16_t) 0x250) -#define PPSMC_MSG_DRV_DRAM_ADDR_LO ((uint16_t) 0x251) -#define PPSMC_MSG_SMU_DRAM_ADDR_HI ((uint16_t) 0x252) -#define PPSMC_MSG_SMU_DRAM_ADDR_LO ((uint16_t) 0x253) -#define PPSMC_MSG_LoadUcodes ((uint16_t) 0x254) - -typedef uint16_t PPSMC_Msg; - -#pragma pack(pop) - -#endif diff --git a/drivers/gpu/drm/amd/pm/powerplay/r600_dpm.h b/drivers/gpu/drm/amd/pm/powerplay/r600_dpm.h deleted file mode 100644 index 055321f61ca7..000000000000 --- a/drivers/gpu/drm/amd/pm/powerplay/r600_dpm.h +++ /dev/null @@ -1,127 +0,0 @@ -/* - * Copyright 2011 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#ifndef __R600_DPM_H__ -#define __R600_DPM_H__ - -#define R600_ASI_DFLT 10000 -#define R600_BSP_DFLT 0x41EB -#define R600_BSU_DFLT 0x2 -#define R600_AH_DFLT 5 -#define R600_RLP_DFLT 25 -#define R600_RMP_DFLT 65 -#define R600_LHP_DFLT 40 -#define R600_LMP_DFLT 15 -#define R600_TD_DFLT 0 -#define R600_UTC_DFLT_00 0x24 -#define R600_UTC_DFLT_01 0x22 -#define R600_UTC_DFLT_02 0x22 -#define R600_UTC_DFLT_03 0x22 -#define R600_UTC_DFLT_04 0x22 -#define R600_UTC_DFLT_05 0x22 -#define R600_UTC_DFLT_06 0x22 -#define R600_UTC_DFLT_07 0x22 -#define R600_UTC_DFLT_08 0x22 -#define R600_UTC_DFLT_09 0x22 -#define R600_UTC_DFLT_10 0x22 -#define R600_UTC_DFLT_11 0x22 -#define R600_UTC_DFLT_12 0x22 -#define R600_UTC_DFLT_13 0x22 -#define R600_UTC_DFLT_14 0x22 -#define R600_DTC_DFLT_00 0x24 -#define R600_DTC_DFLT_01 0x22 -#define R600_DTC_DFLT_02 0x22 -#define R600_DTC_DFLT_03 0x22 -#define R600_DTC_DFLT_04 0x22 -#define R600_DTC_DFLT_05 0x22 -#define R600_DTC_DFLT_06 0x22 -#define R600_DTC_DFLT_07 0x22 -#define R600_DTC_DFLT_08 0x22 -#define R600_DTC_DFLT_09 0x22 -#define R600_DTC_DFLT_10 0x22 -#define R600_DTC_DFLT_11 0x22 -#define R600_DTC_DFLT_12 0x22 -#define R600_DTC_DFLT_13 0x22 -#define R600_DTC_DFLT_14 0x22 -#define R600_VRC_DFLT 0x0000C003 -#define R600_VOLTAGERESPONSETIME_DFLT 1000 -#define R600_BACKBIASRESPONSETIME_DFLT 1000 -#define R600_VRU_DFLT 0x3 -#define R600_SPLLSTEPTIME_DFLT 0x1000 -#define R600_SPLLSTEPUNIT_DFLT 0x3 -#define R600_TPU_DFLT 0 -#define R600_TPC_DFLT 0x200 -#define R600_SSTU_DFLT 0 -#define R600_SST_DFLT 0x00C8 -#define R600_GICST_DFLT 0x200 -#define R600_FCT_DFLT 0x0400 -#define R600_FCTU_DFLT 0 -#define R600_CTXCGTT3DRPHC_DFLT 0x20 -#define R600_CTXCGTT3DRSDC_DFLT 0x40 -#define R600_VDDC3DOORPHC_DFLT 0x100 -#define R600_VDDC3DOORSDC_DFLT 0x7 -#define R600_VDDC3DOORSU_DFLT 0 -#define R600_MPLLLOCKTIME_DFLT 100 -#define R600_MPLLRESETTIME_DFLT 150 -#define R600_VCOSTEPPCT_DFLT 20 -#define R600_ENDINGVCOSTEPPCT_DFLT 5 -#define R600_REFERENCEDIVIDER_DFLT 4 - -#define R600_PM_NUMBER_OF_TC 15 -#define R600_PM_NUMBER_OF_SCLKS 20 -#define R600_PM_NUMBER_OF_MCLKS 4 -#define R600_PM_NUMBER_OF_VOLTAGE_LEVELS 4 -#define R600_PM_NUMBER_OF_ACTIVITY_LEVELS 3 - -/* XXX are these ok? */ -#define R600_TEMP_RANGE_MIN (90 * 1000) -#define R600_TEMP_RANGE_MAX (120 * 1000) - -#define FDO_PWM_MODE_STATIC 1 -#define FDO_PWM_MODE_STATIC_RPM 5 - -enum r600_power_level { - R600_POWER_LEVEL_LOW = 0, - R600_POWER_LEVEL_MEDIUM = 1, - R600_POWER_LEVEL_HIGH = 2, - R600_POWER_LEVEL_CTXSW = 3, -}; - -enum r600_td { - R600_TD_AUTO, - R600_TD_UP, - R600_TD_DOWN, -}; - -enum r600_display_watermark { - R600_DISPLAY_WATERMARK_LOW = 0, - R600_DISPLAY_WATERMARK_HIGH = 1, -}; - -enum r600_display_gap -{ - R600_PM_DISPLAY_GAP_VBLANK_OR_WM = 0, - R600_PM_DISPLAY_GAP_VBLANK = 1, - R600_PM_DISPLAY_GAP_WATERMARK = 2, - R600_PM_DISPLAY_GAP_IGNORE = 3, -}; -#endif diff --git a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c deleted file mode 100644 index 9f8cc81cb7ca..000000000000 --- a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c +++ /dev/null @@ -1,8153 +0,0 @@ -/* - * Copyright 2013 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ - -#include -#include - -#include "amdgpu.h" -#include "amdgpu_pm.h" -#include "amdgpu_dpm.h" -#include "amdgpu_atombios.h" -#include "amd_pcie.h" -#include "sid.h" -#include "r600_dpm.h" -#include "si_dpm.h" -#include "atom.h" -#include "../include/pptable.h" -#include -#include -#include -#include - -#define MC_CG_ARB_FREQ_F0 0x0a -#define MC_CG_ARB_FREQ_F1 0x0b -#define MC_CG_ARB_FREQ_F2 0x0c -#define MC_CG_ARB_FREQ_F3 0x0d - -#define SMC_RAM_END 0x20000 - -#define SCLK_MIN_DEEPSLEEP_FREQ 1350 - - -/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */ -#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12 -#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14 -#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16 -#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18 -#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20 -#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22 - -#define BIOS_SCRATCH_4 0x5cd - -MODULE_FIRMWARE("amdgpu/tahiti_smc.bin"); -MODULE_FIRMWARE("amdgpu/pitcairn_smc.bin"); -MODULE_FIRMWARE("amdgpu/pitcairn_k_smc.bin"); -MODULE_FIRMWARE("amdgpu/verde_smc.bin"); -MODULE_FIRMWARE("amdgpu/verde_k_smc.bin"); -MODULE_FIRMWARE("amdgpu/oland_smc.bin"); -MODULE_FIRMWARE("amdgpu/oland_k_smc.bin"); -MODULE_FIRMWARE("amdgpu/hainan_smc.bin"); -MODULE_FIRMWARE("amdgpu/hainan_k_smc.bin"); -MODULE_FIRMWARE("amdgpu/banks_k_2_smc.bin"); - -static const struct amd_pm_funcs si_dpm_funcs; - -union power_info { - struct _ATOM_POWERPLAY_INFO info; - struct _ATOM_POWERPLAY_INFO_V2 info_2; - struct _ATOM_POWERPLAY_INFO_V3 info_3; - struct _ATOM_PPLIB_POWERPLAYTABLE pplib; - struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2; - struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3; - struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4; - struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5; -}; - -union fan_info { - struct _ATOM_PPLIB_FANTABLE fan; - struct _ATOM_PPLIB_FANTABLE2 fan2; - struct _ATOM_PPLIB_FANTABLE3 fan3; -}; - -union pplib_clock_info { - struct _ATOM_PPLIB_R600_CLOCK_INFO r600; - struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780; - struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen; - struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo; - struct _ATOM_PPLIB_SI_CLOCK_INFO si; -}; - -enum si_dpm_auto_throttle_src { - SI_DPM_AUTO_THROTTLE_SRC_THERMAL, - SI_DPM_AUTO_THROTTLE_SRC_EXTERNAL -}; - -enum si_dpm_event_src { - SI_DPM_EVENT_SRC_ANALOG = 0, - SI_DPM_EVENT_SRC_EXTERNAL = 1, - SI_DPM_EVENT_SRC_DIGITAL = 2, - SI_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3, - SI_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4 -}; - -static const u32 r600_utc[R600_PM_NUMBER_OF_TC] = -{ - R600_UTC_DFLT_00, - R600_UTC_DFLT_01, - R600_UTC_DFLT_02, - R600_UTC_DFLT_03, - R600_UTC_DFLT_04, - R600_UTC_DFLT_05, - R600_UTC_DFLT_06, - R600_UTC_DFLT_07, - R600_UTC_DFLT_08, - R600_UTC_DFLT_09, - R600_UTC_DFLT_10, - R600_UTC_DFLT_11, - R600_UTC_DFLT_12, - R600_UTC_DFLT_13, - R600_UTC_DFLT_14, -}; - -static const u32 r600_dtc[R600_PM_NUMBER_OF_TC] = -{ - R600_DTC_DFLT_00, - R600_DTC_DFLT_01, - R600_DTC_DFLT_02, - R600_DTC_DFLT_03, - R600_DTC_DFLT_04, - R600_DTC_DFLT_05, - R600_DTC_DFLT_06, - R600_DTC_DFLT_07, - R600_DTC_DFLT_08, - R600_DTC_DFLT_09, - R600_DTC_DFLT_10, - R600_DTC_DFLT_11, - R600_DTC_DFLT_12, - R600_DTC_DFLT_13, - R600_DTC_DFLT_14, -}; - -static const struct si_cac_config_reg cac_weights_tahiti[] = -{ - { 0x0, 0x0000ffff, 0, 0xc, SISLANDS_CACCONFIG_CGIND }, - { 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0x0000ffff, 0, 0x101, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0xffff0000, 16, 0xc, SISLANDS_CACCONFIG_CGIND }, - { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0x0000ffff, 0, 0x8fc, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0x0000ffff, 0, 0x95, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0xffff0000, 16, 0x34e, SISLANDS_CACCONFIG_CGIND }, - { 0x18f, 0x0000ffff, 0, 0x1a1, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0x0000ffff, 0, 0xda, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0xffff0000, 16, 0x46, SISLANDS_CACCONFIG_CGIND }, - { 0x9, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0xa, 0x0000ffff, 0, 0x208, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0x0000ffff, 0, 0xe7, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0xffff0000, 16, 0x948, SISLANDS_CACCONFIG_CGIND }, - { 0xc, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0xe, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0x0000ffff, 0, 0x167, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND }, - { 0x14, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0x0000ffff, 0, 0x31, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x6d, 0x0000ffff, 0, 0x18e, SISLANDS_CACCONFIG_CGIND }, - { 0xFFFFFFFF } -}; - -static const struct si_cac_config_reg lcac_tahiti[] = -{ - { 0x143, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND }, - { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x146, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND }, - { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x149, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND }, - { 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x14c, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND }, - { 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x8c, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, - { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x8f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, - { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x92, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, - { 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x95, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, - { 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x14f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, - { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x152, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, - { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x155, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, - { 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x158, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, - { 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x110, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, - { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x113, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, - { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x116, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, - { 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x119, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND }, - { 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x16d, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND }, - { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0xFFFFFFFF } - -}; - -static const struct si_cac_config_reg cac_override_tahiti[] = -{ - { 0xFFFFFFFF } -}; - -static const struct si_powertune_data powertune_data_tahiti = -{ - ((1 << 16) | 27027), - 6, - 0, - 4, - 95, - { - 0UL, - 0UL, - 4521550UL, - 309631529UL, - -1270850L, - 4513710L, - 40 - }, - 595000000UL, - 12, - { - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - }, - true -}; - -static const struct si_dte_data dte_data_tahiti = -{ - { 1159409, 0, 0, 0, 0 }, - { 777, 0, 0, 0, 0 }, - 2, - 54000, - 127000, - 25, - 2, - 10, - 13, - { 27, 31, 35, 39, 43, 47, 54, 61, 67, 74, 81, 88, 95, 0, 0, 0 }, - { 240888759, 221057860, 235370597, 162287531, 158510299, 131423027, 116673180, 103067515, 87941937, 76209048, 68209175, 64090048, 58301890, 0, 0, 0 }, - { 12024, 11189, 11451, 8411, 7939, 6666, 5681, 4905, 4241, 3720, 3354, 3122, 2890, 0, 0, 0 }, - 85, - false -}; - -static const struct si_dte_data dte_data_tahiti_pro = -{ - { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, - { 0x0, 0x0, 0x0, 0x0, 0x0 }, - 5, - 45000, - 100, - 0xA, - 1, - 0, - 0x10, - { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, - { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, - { 0x7D0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, - 90, - true -}; - -static const struct si_dte_data dte_data_new_zealand = -{ - { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0 }, - { 0x29B, 0x3E9, 0x537, 0x7D2, 0 }, - 0x5, - 0xAFC8, - 0x69, - 0x32, - 1, - 0, - 0x10, - { 0x82, 0xA0, 0xB4, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE }, - { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, - { 0xDAC, 0x1388, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685 }, - 85, - true -}; - -static const struct si_dte_data dte_data_aruba_pro = -{ - { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, - { 0x0, 0x0, 0x0, 0x0, 0x0 }, - 5, - 45000, - 100, - 0xA, - 1, - 0, - 0x10, - { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, - { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, - { 0x1000, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, - 90, - true -}; - -static const struct si_dte_data dte_data_malta = -{ - { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, - { 0x0, 0x0, 0x0, 0x0, 0x0 }, - 5, - 45000, - 100, - 0xA, - 1, - 0, - 0x10, - { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, - { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, - { 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, - 90, - true -}; - -static const struct si_cac_config_reg cac_weights_pitcairn[] = -{ - { 0x0, 0x0000ffff, 0, 0x8a, SISLANDS_CACCONFIG_CGIND }, - { 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0xffff0000, 16, 0x24d, SISLANDS_CACCONFIG_CGIND }, - { 0x2, 0x0000ffff, 0, 0x19, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0x0000ffff, 0, 0xc11, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0xffff0000, 16, 0x7f3, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0x0000ffff, 0, 0x403, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0xffff0000, 16, 0x367, SISLANDS_CACCONFIG_CGIND }, - { 0x18f, 0x0000ffff, 0, 0x4c9, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0xffff0000, 16, 0x45d, SISLANDS_CACCONFIG_CGIND }, - { 0x9, 0x0000ffff, 0, 0x36d, SISLANDS_CACCONFIG_CGIND }, - { 0xa, 0x0000ffff, 0, 0x534, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0x0000ffff, 0, 0x5da, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0xffff0000, 16, 0x880, SISLANDS_CACCONFIG_CGIND }, - { 0xc, 0x0000ffff, 0, 0x201, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0xe, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0x0000ffff, 0, 0x1f, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0x0000ffff, 0, 0x5de, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x12, 0x0000ffff, 0, 0x7b, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0xffff0000, 16, 0x13, SISLANDS_CACCONFIG_CGIND }, - { 0x14, 0x0000ffff, 0, 0xf9, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0x0000ffff, 0, 0x66, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x4e, 0x0000ffff, 0, 0x13, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x6d, 0x0000ffff, 0, 0x186, SISLANDS_CACCONFIG_CGIND }, - { 0xFFFFFFFF } -}; - -static const struct si_cac_config_reg lcac_pitcairn[] = -{ - { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x8f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x146, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x116, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x155, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x92, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x149, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x119, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x158, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x95, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x14c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x16d, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0xFFFFFFFF } -}; - -static const struct si_cac_config_reg cac_override_pitcairn[] = -{ - { 0xFFFFFFFF } -}; - -static const struct si_powertune_data powertune_data_pitcairn = -{ - ((1 << 16) | 27027), - 5, - 0, - 6, - 100, - { - 51600000UL, - 1800000UL, - 7194395UL, - 309631529UL, - -1270850L, - 4513710L, - 100 - }, - 117830498UL, - 12, - { - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - }, - true -}; - -static const struct si_dte_data dte_data_pitcairn = -{ - { 0, 0, 0, 0, 0 }, - { 0, 0, 0, 0, 0 }, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, - { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, - { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, - 0, - false -}; - -static const struct si_dte_data dte_data_curacao_xt = -{ - { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, - { 0x0, 0x0, 0x0, 0x0, 0x0 }, - 5, - 45000, - 100, - 0xA, - 1, - 0, - 0x10, - { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, - { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, - { 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, - 90, - true -}; - -static const struct si_dte_data dte_data_curacao_pro = -{ - { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, - { 0x0, 0x0, 0x0, 0x0, 0x0 }, - 5, - 45000, - 100, - 0xA, - 1, - 0, - 0x10, - { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, - { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, - { 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, - 90, - true -}; - -static const struct si_dte_data dte_data_neptune_xt = -{ - { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, - { 0x0, 0x0, 0x0, 0x0, 0x0 }, - 5, - 45000, - 100, - 0xA, - 1, - 0, - 0x10, - { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, - { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, - { 0x3A2F, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, - 90, - true -}; - -static const struct si_cac_config_reg cac_weights_chelsea_pro[] = -{ - { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND }, - { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND }, - { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND }, - { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND }, - { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND }, - { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND }, - { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND }, - { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND }, - { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x14, 0x0000ffff, 0, 0x2BD, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, - { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND }, - { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND }, - { 0xFFFFFFFF } -}; - -static const struct si_cac_config_reg cac_weights_chelsea_xt[] = -{ - { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND }, - { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND }, - { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND }, - { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND }, - { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND }, - { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND }, - { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND }, - { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND }, - { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x14, 0x0000ffff, 0, 0x30A, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, - { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND }, - { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND }, - { 0xFFFFFFFF } -}; - -static const struct si_cac_config_reg cac_weights_heathrow[] = -{ - { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND }, - { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND }, - { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND }, - { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND }, - { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND }, - { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND }, - { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND }, - { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND }, - { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x14, 0x0000ffff, 0, 0x362, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, - { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND }, - { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND }, - { 0xFFFFFFFF } -}; - -static const struct si_cac_config_reg cac_weights_cape_verde_pro[] = -{ - { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND }, - { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND }, - { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND }, - { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND }, - { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND }, - { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND }, - { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND }, - { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND }, - { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x14, 0x0000ffff, 0, 0x315, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, - { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND }, - { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND }, - { 0xFFFFFFFF } -}; - -static const struct si_cac_config_reg cac_weights_cape_verde[] = -{ - { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND }, - { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND }, - { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND }, - { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND }, - { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND }, - { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND }, - { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND }, - { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND }, - { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, - { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND }, - { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND }, - { 0xFFFFFFFF } -}; - -static const struct si_cac_config_reg lcac_cape_verde[] = -{ - { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x143, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x8f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x146, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x164, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x167, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x16a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x15e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x161, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x15b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0xFFFFFFFF } -}; - -static const struct si_cac_config_reg cac_override_cape_verde[] = -{ - { 0xFFFFFFFF } -}; - -static const struct si_powertune_data powertune_data_cape_verde = -{ - ((1 << 16) | 0x6993), - 5, - 0, - 7, - 105, - { - 0UL, - 0UL, - 7194395UL, - 309631529UL, - -1270850L, - 4513710L, - 100 - }, - 117830498UL, - 12, - { - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - }, - true -}; - -static const struct si_dte_data dte_data_cape_verde = -{ - { 0, 0, 0, 0, 0 }, - { 0, 0, 0, 0, 0 }, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, - { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, - { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, - 0, - false -}; - -static const struct si_dte_data dte_data_venus_xtx = -{ - { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, - { 0x71C, 0xAAB, 0xE39, 0x11C7, 0x0 }, - 5, - 55000, - 0x69, - 0xA, - 1, - 0, - 0x3, - { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, - { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, - { 0xD6D8, 0x88B8, 0x1555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, - 90, - true -}; - -static const struct si_dte_data dte_data_venus_xt = -{ - { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, - { 0xBDA, 0x11C7, 0x17B4, 0x1DA1, 0x0 }, - 5, - 55000, - 0x69, - 0xA, - 1, - 0, - 0x3, - { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, - { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, - { 0xAFC8, 0x88B8, 0x238E, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, - 90, - true -}; - -static const struct si_dte_data dte_data_venus_pro = -{ - { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, - { 0x11C7, 0x1AAB, 0x238E, 0x2C72, 0x0 }, - 5, - 55000, - 0x69, - 0xA, - 1, - 0, - 0x3, - { 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, - { 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, - { 0x88B8, 0x88B8, 0x3555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, - 90, - true -}; - -static const struct si_cac_config_reg cac_weights_oland[] = -{ - { 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND }, - { 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND }, - { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND }, - { 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND }, - { 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND }, - { 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND }, - { 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND }, - { 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND }, - { 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, - { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND }, - { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND }, - { 0xFFFFFFFF } -}; - -static const struct si_cac_config_reg cac_weights_mars_pro[] = -{ - { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND }, - { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND }, - { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND }, - { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND }, - { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND }, - { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND }, - { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND }, - { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND }, - { 0x14, 0x0000ffff, 0, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, - { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND }, - { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND }, - { 0xFFFFFFFF } -}; - -static const struct si_cac_config_reg cac_weights_mars_xt[] = -{ - { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND }, - { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND }, - { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND }, - { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND }, - { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND }, - { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND }, - { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND }, - { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND }, - { 0x14, 0x0000ffff, 0, 0x60, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, - { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND }, - { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND }, - { 0xFFFFFFFF } -}; - -static const struct si_cac_config_reg cac_weights_oland_pro[] = -{ - { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND }, - { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND }, - { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND }, - { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND }, - { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND }, - { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND }, - { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND }, - { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND }, - { 0x14, 0x0000ffff, 0, 0x90, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, - { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND }, - { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND }, - { 0xFFFFFFFF } -}; - -static const struct si_cac_config_reg cac_weights_oland_xt[] = -{ - { 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND }, - { 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND }, - { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND }, - { 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND }, - { 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND }, - { 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND }, - { 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND }, - { 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND }, - { 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND }, - { 0x14, 0x0000ffff, 0, 0x120, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND }, - { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND }, - { 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND }, - { 0xFFFFFFFF } -}; - -static const struct si_cac_config_reg lcac_oland[] = -{ - { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND }, - { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND }, - { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND }, - { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x143, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND }, - { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0xFFFFFFFF } -}; - -static const struct si_cac_config_reg lcac_mars_pro[] = -{ - { 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND }, - { 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND }, - { 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND }, - { 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND }, - { 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND }, - { 0xFFFFFFFF } -}; - -static const struct si_cac_config_reg cac_override_oland[] = -{ - { 0xFFFFFFFF } -}; - -static const struct si_powertune_data powertune_data_oland = -{ - ((1 << 16) | 0x6993), - 5, - 0, - 7, - 105, - { - 0UL, - 0UL, - 7194395UL, - 309631529UL, - -1270850L, - 4513710L, - 100 - }, - 117830498UL, - 12, - { - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - }, - true -}; - -static const struct si_powertune_data powertune_data_mars_pro = -{ - ((1 << 16) | 0x6993), - 5, - 0, - 7, - 105, - { - 0UL, - 0UL, - 7194395UL, - 309631529UL, - -1270850L, - 4513710L, - 100 - }, - 117830498UL, - 12, - { - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - }, - true -}; - -static const struct si_dte_data dte_data_oland = -{ - { 0, 0, 0, 0, 0 }, - { 0, 0, 0, 0, 0 }, - 0, - 0, - 0, - 0, - 0, - 0, - 0, - { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, - { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, - { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }, - 0, - false -}; - -static const struct si_dte_data dte_data_mars_pro = -{ - { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, - { 0x0, 0x0, 0x0, 0x0, 0x0 }, - 5, - 55000, - 105, - 0xA, - 1, - 0, - 0x10, - { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, - { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, - { 0xF627, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, - 90, - true -}; - -static const struct si_dte_data dte_data_sun_xt = -{ - { 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 }, - { 0x0, 0x0, 0x0, 0x0, 0x0 }, - 5, - 55000, - 105, - 0xA, - 1, - 0, - 0x10, - { 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF }, - { 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 }, - { 0xD555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 }, - 90, - true -}; - - -static const struct si_cac_config_reg cac_weights_hainan[] = -{ - { 0x0, 0x0000ffff, 0, 0x2d9, SISLANDS_CACCONFIG_CGIND }, - { 0x0, 0xffff0000, 16, 0x22b, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0x0000ffff, 0, 0x21c, SISLANDS_CACCONFIG_CGIND }, - { 0x1, 0xffff0000, 16, 0x1dc, SISLANDS_CACCONFIG_CGIND }, - { 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0x0000ffff, 0, 0x24e, SISLANDS_CACCONFIG_CGIND }, - { 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0x0000ffff, 0, 0x35e, SISLANDS_CACCONFIG_CGIND }, - { 0x5, 0xffff0000, 16, 0x1143, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0x0000ffff, 0, 0xe17, SISLANDS_CACCONFIG_CGIND }, - { 0x6, 0xffff0000, 16, 0x441, SISLANDS_CACCONFIG_CGIND }, - { 0x18f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0x0000ffff, 0, 0x28b, SISLANDS_CACCONFIG_CGIND }, - { 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x8, 0xffff0000, 16, 0xabe, SISLANDS_CACCONFIG_CGIND }, - { 0x9, 0x0000ffff, 0, 0xf11, SISLANDS_CACCONFIG_CGIND }, - { 0xa, 0x0000ffff, 0, 0x907, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0x0000ffff, 0, 0xb45, SISLANDS_CACCONFIG_CGIND }, - { 0xb, 0xffff0000, 16, 0xd1e, SISLANDS_CACCONFIG_CGIND }, - { 0xc, 0x0000ffff, 0, 0xa2c, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0x0000ffff, 0, 0x62, SISLANDS_CACCONFIG_CGIND }, - { 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0xe, 0x0000ffff, 0, 0x1f3, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0x0000ffff, 0, 0x42, SISLANDS_CACCONFIG_CGIND }, - { 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0x0000ffff, 0, 0x709, SISLANDS_CACCONFIG_CGIND }, - { 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x13, 0xffff0000, 16, 0x3a, SISLANDS_CACCONFIG_CGIND }, - { 0x14, 0x0000ffff, 0, 0x357, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND }, - { 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0x0000ffff, 0, 0x314, SISLANDS_CACCONFIG_CGIND }, - { 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x17, 0x0000ffff, 0, 0x6d, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND }, - { 0x6d, 0x0000ffff, 0, 0x1b9, SISLANDS_CACCONFIG_CGIND }, - { 0xFFFFFFFF } -}; - -static const struct si_powertune_data powertune_data_hainan = -{ - ((1 << 16) | 0x6993), - 5, - 0, - 9, - 105, - { - 0UL, - 0UL, - 7194395UL, - 309631529UL, - -1270850L, - 4513710L, - 100 - }, - 117830498UL, - 12, - { - 0, - 0, - 0, - 0, - 0, - 0, - 0, - 0 - }, - true -}; - -static struct rv7xx_power_info *rv770_get_pi(struct amdgpu_device *adev); -static struct evergreen_power_info *evergreen_get_pi(struct amdgpu_device *adev); -static struct ni_power_info *ni_get_pi(struct amdgpu_device *adev); -static struct si_ps *si_get_ps(struct amdgpu_ps *rps); - -static int si_populate_voltage_value(struct amdgpu_device *adev, - const struct atom_voltage_table *table, - u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage); -static int si_get_std_voltage_value(struct amdgpu_device *adev, - SISLANDS_SMC_VOLTAGE_VALUE *voltage, - u16 *std_voltage); -static int si_write_smc_soft_register(struct amdgpu_device *adev, - u16 reg_offset, u32 value); -static int si_convert_power_level_to_smc(struct amdgpu_device *adev, - struct rv7xx_pl *pl, - SISLANDS_SMC_HW_PERFORMANCE_LEVEL *level); -static int si_calculate_sclk_params(struct amdgpu_device *adev, - u32 engine_clock, - SISLANDS_SMC_SCLK_VALUE *sclk); - -static void si_thermal_start_smc_fan_control(struct amdgpu_device *adev); -static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev); -static void si_dpm_set_irq_funcs(struct amdgpu_device *adev); - -static struct si_power_info *si_get_pi(struct amdgpu_device *adev) -{ - struct si_power_info *pi = adev->pm.dpm.priv; - return pi; -} - -static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coeffients *coeff, - u16 v, s32 t, u32 ileakage, u32 *leakage) -{ - s64 kt, kv, leakage_w, i_leakage, vddc; - s64 temperature, t_slope, t_intercept, av, bv, t_ref; - s64 tmp; - - i_leakage = div64_s64(drm_int2fixp(ileakage), 100); - vddc = div64_s64(drm_int2fixp(v), 1000); - temperature = div64_s64(drm_int2fixp(t), 1000); - - t_slope = div64_s64(drm_int2fixp(coeff->t_slope), 100000000); - t_intercept = div64_s64(drm_int2fixp(coeff->t_intercept), 100000000); - av = div64_s64(drm_int2fixp(coeff->av), 100000000); - bv = div64_s64(drm_int2fixp(coeff->bv), 100000000); - t_ref = drm_int2fixp(coeff->t_ref); - - tmp = drm_fixp_mul(t_slope, vddc) + t_intercept; - kt = drm_fixp_exp(drm_fixp_mul(tmp, temperature)); - kt = drm_fixp_div(kt, drm_fixp_exp(drm_fixp_mul(tmp, t_ref))); - kv = drm_fixp_mul(av, drm_fixp_exp(drm_fixp_mul(bv, vddc))); - - leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc); - - *leakage = drm_fixp2int(leakage_w * 1000); -} - -static void si_calculate_leakage_for_v_and_t(struct amdgpu_device *adev, - const struct ni_leakage_coeffients *coeff, - u16 v, - s32 t, - u32 i_leakage, - u32 *leakage) -{ - si_calculate_leakage_for_v_and_t_formula(coeff, v, t, i_leakage, leakage); -} - -static void si_calculate_leakage_for_v_formula(const struct ni_leakage_coeffients *coeff, - const u32 fixed_kt, u16 v, - u32 ileakage, u32 *leakage) -{ - s64 kt, kv, leakage_w, i_leakage, vddc; - - i_leakage = div64_s64(drm_int2fixp(ileakage), 100); - vddc = div64_s64(drm_int2fixp(v), 1000); - - kt = div64_s64(drm_int2fixp(fixed_kt), 100000000); - kv = drm_fixp_mul(div64_s64(drm_int2fixp(coeff->av), 100000000), - drm_fixp_exp(drm_fixp_mul(div64_s64(drm_int2fixp(coeff->bv), 100000000), vddc))); - - leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc); - - *leakage = drm_fixp2int(leakage_w * 1000); -} - -static void si_calculate_leakage_for_v(struct amdgpu_device *adev, - const struct ni_leakage_coeffients *coeff, - const u32 fixed_kt, - u16 v, - u32 i_leakage, - u32 *leakage) -{ - si_calculate_leakage_for_v_formula(coeff, fixed_kt, v, i_leakage, leakage); -} - - -static void si_update_dte_from_pl2(struct amdgpu_device *adev, - struct si_dte_data *dte_data) -{ - u32 p_limit1 = adev->pm.dpm.tdp_limit; - u32 p_limit2 = adev->pm.dpm.near_tdp_limit; - u32 k = dte_data->k; - u32 t_max = dte_data->max_t; - u32 t_split[5] = { 10, 15, 20, 25, 30 }; - u32 t_0 = dte_data->t0; - u32 i; - - if (p_limit2 != 0 && p_limit2 <= p_limit1) { - dte_data->tdep_count = 3; - - for (i = 0; i < k; i++) { - dte_data->r[i] = - (t_split[i] * (t_max - t_0/(u32)1000) * (1 << 14)) / - (p_limit2 * (u32)100); - } - - dte_data->tdep_r[1] = dte_data->r[4] * 2; - - for (i = 2; i < SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE; i++) { - dte_data->tdep_r[i] = dte_data->r[4]; - } - } else { - DRM_ERROR("Invalid PL2! DTE will not be updated.\n"); - } -} - -static struct rv7xx_power_info *rv770_get_pi(struct amdgpu_device *adev) -{ - struct rv7xx_power_info *pi = adev->pm.dpm.priv; - - return pi; -} - -static struct ni_power_info *ni_get_pi(struct amdgpu_device *adev) -{ - struct ni_power_info *pi = adev->pm.dpm.priv; - - return pi; -} - -static struct si_ps *si_get_ps(struct amdgpu_ps *aps) -{ - struct si_ps *ps = aps->ps_priv; - - return ps; -} - -static void si_initialize_powertune_defaults(struct amdgpu_device *adev) -{ - struct ni_power_info *ni_pi = ni_get_pi(adev); - struct si_power_info *si_pi = si_get_pi(adev); - bool update_dte_from_pl2 = false; - - if (adev->asic_type == CHIP_TAHITI) { - si_pi->cac_weights = cac_weights_tahiti; - si_pi->lcac_config = lcac_tahiti; - si_pi->cac_override = cac_override_tahiti; - si_pi->powertune_data = &powertune_data_tahiti; - si_pi->dte_data = dte_data_tahiti; - - switch (adev->pdev->device) { - case 0x6798: - si_pi->dte_data.enable_dte_by_default = true; - break; - case 0x6799: - si_pi->dte_data = dte_data_new_zealand; - break; - case 0x6790: - case 0x6791: - case 0x6792: - case 0x679E: - si_pi->dte_data = dte_data_aruba_pro; - update_dte_from_pl2 = true; - break; - case 0x679B: - si_pi->dte_data = dte_data_malta; - update_dte_from_pl2 = true; - break; - case 0x679A: - si_pi->dte_data = dte_data_tahiti_pro; - update_dte_from_pl2 = true; - break; - default: - if (si_pi->dte_data.enable_dte_by_default == true) - DRM_ERROR("DTE is not enabled!\n"); - break; - } - } else if (adev->asic_type == CHIP_PITCAIRN) { - si_pi->cac_weights = cac_weights_pitcairn; - si_pi->lcac_config = lcac_pitcairn; - si_pi->cac_override = cac_override_pitcairn; - si_pi->powertune_data = &powertune_data_pitcairn; - - switch (adev->pdev->device) { - case 0x6810: - case 0x6818: - si_pi->dte_data = dte_data_curacao_xt; - update_dte_from_pl2 = true; - break; - case 0x6819: - case 0x6811: - si_pi->dte_data = dte_data_curacao_pro; - update_dte_from_pl2 = true; - break; - case 0x6800: - case 0x6806: - si_pi->dte_data = dte_data_neptune_xt; - update_dte_from_pl2 = true; - break; - default: - si_pi->dte_data = dte_data_pitcairn; - break; - } - } else if (adev->asic_type == CHIP_VERDE) { - si_pi->lcac_config = lcac_cape_verde; - si_pi->cac_override = cac_override_cape_verde; - si_pi->powertune_data = &powertune_data_cape_verde; - - switch (adev->pdev->device) { - case 0x683B: - case 0x683F: - case 0x6829: - case 0x6835: - si_pi->cac_weights = cac_weights_cape_verde_pro; - si_pi->dte_data = dte_data_cape_verde; - break; - case 0x682C: - si_pi->cac_weights = cac_weights_cape_verde_pro; - si_pi->dte_data = dte_data_sun_xt; - update_dte_from_pl2 = true; - break; - case 0x6825: - case 0x6827: - si_pi->cac_weights = cac_weights_heathrow; - si_pi->dte_data = dte_data_cape_verde; - break; - case 0x6824: - case 0x682D: - si_pi->cac_weights = cac_weights_chelsea_xt; - si_pi->dte_data = dte_data_cape_verde; - break; - case 0x682F: - si_pi->cac_weights = cac_weights_chelsea_pro; - si_pi->dte_data = dte_data_cape_verde; - break; - case 0x6820: - si_pi->cac_weights = cac_weights_heathrow; - si_pi->dte_data = dte_data_venus_xtx; - break; - case 0x6821: - si_pi->cac_weights = cac_weights_heathrow; - si_pi->dte_data = dte_data_venus_xt; - break; - case 0x6823: - case 0x682B: - case 0x6822: - case 0x682A: - si_pi->cac_weights = cac_weights_chelsea_pro; - si_pi->dte_data = dte_data_venus_pro; - break; - default: - si_pi->cac_weights = cac_weights_cape_verde; - si_pi->dte_data = dte_data_cape_verde; - break; - } - } else if (adev->asic_type == CHIP_OLAND) { - si_pi->lcac_config = lcac_mars_pro; - si_pi->cac_override = cac_override_oland; - si_pi->powertune_data = &powertune_data_mars_pro; - si_pi->dte_data = dte_data_mars_pro; - - switch (adev->pdev->device) { - case 0x6601: - case 0x6621: - case 0x6603: - case 0x6605: - si_pi->cac_weights = cac_weights_mars_pro; - update_dte_from_pl2 = true; - break; - case 0x6600: - case 0x6606: - case 0x6620: - case 0x6604: - si_pi->cac_weights = cac_weights_mars_xt; - update_dte_from_pl2 = true; - break; - case 0x6611: - case 0x6613: - case 0x6608: - si_pi->cac_weights = cac_weights_oland_pro; - update_dte_from_pl2 = true; - break; - case 0x6610: - si_pi->cac_weights = cac_weights_oland_xt; - update_dte_from_pl2 = true; - break; - default: - si_pi->cac_weights = cac_weights_oland; - si_pi->lcac_config = lcac_oland; - si_pi->cac_override = cac_override_oland; - si_pi->powertune_data = &powertune_data_oland; - si_pi->dte_data = dte_data_oland; - break; - } - } else if (adev->asic_type == CHIP_HAINAN) { - si_pi->cac_weights = cac_weights_hainan; - si_pi->lcac_config = lcac_oland; - si_pi->cac_override = cac_override_oland; - si_pi->powertune_data = &powertune_data_hainan; - si_pi->dte_data = dte_data_sun_xt; - update_dte_from_pl2 = true; - } else { - DRM_ERROR("Unknown SI asic revision, failed to initialize PowerTune!\n"); - return; - } - - ni_pi->enable_power_containment = false; - ni_pi->enable_cac = false; - ni_pi->enable_sq_ramping = false; - si_pi->enable_dte = false; - - if (si_pi->powertune_data->enable_powertune_by_default) { - ni_pi->enable_power_containment = true; - ni_pi->enable_cac = true; - if (si_pi->dte_data.enable_dte_by_default) { - si_pi->enable_dte = true; - if (update_dte_from_pl2) - si_update_dte_from_pl2(adev, &si_pi->dte_data); - - } - ni_pi->enable_sq_ramping = true; - } - - ni_pi->driver_calculate_cac_leakage = true; - ni_pi->cac_configuration_required = true; - - if (ni_pi->cac_configuration_required) { - ni_pi->support_cac_long_term_average = true; - si_pi->dyn_powertune_data.l2_lta_window_size = - si_pi->powertune_data->l2_lta_window_size_default; - si_pi->dyn_powertune_data.lts_truncate = - si_pi->powertune_data->lts_truncate_default; - } else { - ni_pi->support_cac_long_term_average = false; - si_pi->dyn_powertune_data.l2_lta_window_size = 0; - si_pi->dyn_powertune_data.lts_truncate = 0; - } - - si_pi->dyn_powertune_data.disable_uvd_powertune = false; -} - -static u32 si_get_smc_power_scaling_factor(struct amdgpu_device *adev) -{ - return 1; -} - -static u32 si_calculate_cac_wintime(struct amdgpu_device *adev) -{ - u32 xclk; - u32 wintime; - u32 cac_window; - u32 cac_window_size; - - xclk = amdgpu_asic_get_xclk(adev); - - if (xclk == 0) - return 0; - - cac_window = RREG32(CG_CAC_CTRL) & CAC_WINDOW_MASK; - cac_window_size = ((cac_window & 0xFFFF0000) >> 16) * (cac_window & 0x0000FFFF); - - wintime = (cac_window_size * 100) / xclk; - - return wintime; -} - -static u32 si_scale_power_for_smc(u32 power_in_watts, u32 scaling_factor) -{ - return power_in_watts; -} - -static int si_calculate_adjusted_tdp_limits(struct amdgpu_device *adev, - bool adjust_polarity, - u32 tdp_adjustment, - u32 *tdp_limit, - u32 *near_tdp_limit) -{ - u32 adjustment_delta, max_tdp_limit; - - if (tdp_adjustment > (u32)adev->pm.dpm.tdp_od_limit) - return -EINVAL; - - max_tdp_limit = ((100 + 100) * adev->pm.dpm.tdp_limit) / 100; - - if (adjust_polarity) { - *tdp_limit = ((100 + tdp_adjustment) * adev->pm.dpm.tdp_limit) / 100; - *near_tdp_limit = adev->pm.dpm.near_tdp_limit_adjusted + (*tdp_limit - adev->pm.dpm.tdp_limit); - } else { - *tdp_limit = ((100 - tdp_adjustment) * adev->pm.dpm.tdp_limit) / 100; - adjustment_delta = adev->pm.dpm.tdp_limit - *tdp_limit; - if (adjustment_delta < adev->pm.dpm.near_tdp_limit_adjusted) - *near_tdp_limit = adev->pm.dpm.near_tdp_limit_adjusted - adjustment_delta; - else - *near_tdp_limit = 0; - } - - if ((*tdp_limit <= 0) || (*tdp_limit > max_tdp_limit)) - return -EINVAL; - if ((*near_tdp_limit <= 0) || (*near_tdp_limit > *tdp_limit)) - return -EINVAL; - - return 0; -} - -static int si_populate_smc_tdp_limits(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_state) -{ - struct ni_power_info *ni_pi = ni_get_pi(adev); - struct si_power_info *si_pi = si_get_pi(adev); - - if (ni_pi->enable_power_containment) { - SISLANDS_SMC_STATETABLE *smc_table = &si_pi->smc_statetable; - PP_SIslands_PAPMParameters *papm_parm; - struct amdgpu_ppm_table *ppm = adev->pm.dpm.dyn_state.ppm_table; - u32 scaling_factor = si_get_smc_power_scaling_factor(adev); - u32 tdp_limit; - u32 near_tdp_limit; - int ret; - - if (scaling_factor == 0) - return -EINVAL; - - memset(smc_table, 0, sizeof(SISLANDS_SMC_STATETABLE)); - - ret = si_calculate_adjusted_tdp_limits(adev, - false, /* ??? */ - adev->pm.dpm.tdp_adjustment, - &tdp_limit, - &near_tdp_limit); - if (ret) - return ret; - - smc_table->dpm2Params.TDPLimit = - cpu_to_be32(si_scale_power_for_smc(tdp_limit, scaling_factor) * 1000); - smc_table->dpm2Params.NearTDPLimit = - cpu_to_be32(si_scale_power_for_smc(near_tdp_limit, scaling_factor) * 1000); - smc_table->dpm2Params.SafePowerLimit = - cpu_to_be32(si_scale_power_for_smc((near_tdp_limit * SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100, scaling_factor) * 1000); - - ret = amdgpu_si_copy_bytes_to_smc(adev, - (si_pi->state_table_start + offsetof(SISLANDS_SMC_STATETABLE, dpm2Params) + - offsetof(PP_SIslands_DPM2Parameters, TDPLimit)), - (u8 *)(&(smc_table->dpm2Params.TDPLimit)), - sizeof(u32) * 3, - si_pi->sram_end); - if (ret) - return ret; - - if (si_pi->enable_ppm) { - papm_parm = &si_pi->papm_parm; - memset(papm_parm, 0, sizeof(PP_SIslands_PAPMParameters)); - papm_parm->NearTDPLimitTherm = cpu_to_be32(ppm->dgpu_tdp); - papm_parm->dGPU_T_Limit = cpu_to_be32(ppm->tj_max); - papm_parm->dGPU_T_Warning = cpu_to_be32(95); - papm_parm->dGPU_T_Hysteresis = cpu_to_be32(5); - papm_parm->PlatformPowerLimit = 0xffffffff; - papm_parm->NearTDPLimitPAPM = 0xffffffff; - - ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->papm_cfg_table_start, - (u8 *)papm_parm, - sizeof(PP_SIslands_PAPMParameters), - si_pi->sram_end); - if (ret) - return ret; - } - } - return 0; -} - -static int si_populate_smc_tdp_limits_2(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_state) -{ - struct ni_power_info *ni_pi = ni_get_pi(adev); - struct si_power_info *si_pi = si_get_pi(adev); - - if (ni_pi->enable_power_containment) { - SISLANDS_SMC_STATETABLE *smc_table = &si_pi->smc_statetable; - u32 scaling_factor = si_get_smc_power_scaling_factor(adev); - int ret; - - memset(smc_table, 0, sizeof(SISLANDS_SMC_STATETABLE)); - - smc_table->dpm2Params.NearTDPLimit = - cpu_to_be32(si_scale_power_for_smc(adev->pm.dpm.near_tdp_limit_adjusted, scaling_factor) * 1000); - smc_table->dpm2Params.SafePowerLimit = - cpu_to_be32(si_scale_power_for_smc((adev->pm.dpm.near_tdp_limit_adjusted * SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100, scaling_factor) * 1000); - - ret = amdgpu_si_copy_bytes_to_smc(adev, - (si_pi->state_table_start + - offsetof(SISLANDS_SMC_STATETABLE, dpm2Params) + - offsetof(PP_SIslands_DPM2Parameters, NearTDPLimit)), - (u8 *)(&(smc_table->dpm2Params.NearTDPLimit)), - sizeof(u32) * 2, - si_pi->sram_end); - if (ret) - return ret; - } - - return 0; -} - -static u16 si_calculate_power_efficiency_ratio(struct amdgpu_device *adev, - const u16 prev_std_vddc, - const u16 curr_std_vddc) -{ - u64 margin = (u64)SISLANDS_DPM2_PWREFFICIENCYRATIO_MARGIN; - u64 prev_vddc = (u64)prev_std_vddc; - u64 curr_vddc = (u64)curr_std_vddc; - u64 pwr_efficiency_ratio, n, d; - - if ((prev_vddc == 0) || (curr_vddc == 0)) - return 0; - - n = div64_u64((u64)1024 * curr_vddc * curr_vddc * ((u64)1000 + margin), (u64)1000); - d = prev_vddc * prev_vddc; - pwr_efficiency_ratio = div64_u64(n, d); - - if (pwr_efficiency_ratio > (u64)0xFFFF) - return 0; - - return (u16)pwr_efficiency_ratio; -} - -static bool si_should_disable_uvd_powertune(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_state) -{ - struct si_power_info *si_pi = si_get_pi(adev); - - if (si_pi->dyn_powertune_data.disable_uvd_powertune && - amdgpu_state->vclk && amdgpu_state->dclk) - return true; - - return false; -} - -struct evergreen_power_info *evergreen_get_pi(struct amdgpu_device *adev) -{ - struct evergreen_power_info *pi = adev->pm.dpm.priv; - - return pi; -} - -static int si_populate_power_containment_values(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_state, - SISLANDS_SMC_SWSTATE *smc_state) -{ - struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); - struct ni_power_info *ni_pi = ni_get_pi(adev); - struct si_ps *state = si_get_ps(amdgpu_state); - SISLANDS_SMC_VOLTAGE_VALUE vddc; - u32 prev_sclk; - u32 max_sclk; - u32 min_sclk; - u16 prev_std_vddc; - u16 curr_std_vddc; - int i; - u16 pwr_efficiency_ratio; - u8 max_ps_percent; - bool disable_uvd_power_tune; - int ret; - - if (ni_pi->enable_power_containment == false) - return 0; - - if (state->performance_level_count == 0) - return -EINVAL; - - if (smc_state->levelCount != state->performance_level_count) - return -EINVAL; - - disable_uvd_power_tune = si_should_disable_uvd_powertune(adev, amdgpu_state); - - smc_state->levels[0].dpm2.MaxPS = 0; - smc_state->levels[0].dpm2.NearTDPDec = 0; - smc_state->levels[0].dpm2.AboveSafeInc = 0; - smc_state->levels[0].dpm2.BelowSafeInc = 0; - smc_state->levels[0].dpm2.PwrEfficiencyRatio = 0; - - for (i = 1; i < state->performance_level_count; i++) { - prev_sclk = state->performance_levels[i-1].sclk; - max_sclk = state->performance_levels[i].sclk; - if (i == 1) - max_ps_percent = SISLANDS_DPM2_MAXPS_PERCENT_M; - else - max_ps_percent = SISLANDS_DPM2_MAXPS_PERCENT_H; - - if (prev_sclk > max_sclk) - return -EINVAL; - - if ((max_ps_percent == 0) || - (prev_sclk == max_sclk) || - disable_uvd_power_tune) - min_sclk = max_sclk; - else if (i == 1) - min_sclk = prev_sclk; - else - min_sclk = (prev_sclk * (u32)max_ps_percent) / 100; - - if (min_sclk < state->performance_levels[0].sclk) - min_sclk = state->performance_levels[0].sclk; - - if (min_sclk == 0) - return -EINVAL; - - ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table, - state->performance_levels[i-1].vddc, &vddc); - if (ret) - return ret; - - ret = si_get_std_voltage_value(adev, &vddc, &prev_std_vddc); - if (ret) - return ret; - - ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table, - state->performance_levels[i].vddc, &vddc); - if (ret) - return ret; - - ret = si_get_std_voltage_value(adev, &vddc, &curr_std_vddc); - if (ret) - return ret; - - pwr_efficiency_ratio = si_calculate_power_efficiency_ratio(adev, - prev_std_vddc, curr_std_vddc); - - smc_state->levels[i].dpm2.MaxPS = (u8)((SISLANDS_DPM2_MAX_PULSE_SKIP * (max_sclk - min_sclk)) / max_sclk); - smc_state->levels[i].dpm2.NearTDPDec = SISLANDS_DPM2_NEAR_TDP_DEC; - smc_state->levels[i].dpm2.AboveSafeInc = SISLANDS_DPM2_ABOVE_SAFE_INC; - smc_state->levels[i].dpm2.BelowSafeInc = SISLANDS_DPM2_BELOW_SAFE_INC; - smc_state->levels[i].dpm2.PwrEfficiencyRatio = cpu_to_be16(pwr_efficiency_ratio); - } - - return 0; -} - -static int si_populate_sq_ramping_values(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_state, - SISLANDS_SMC_SWSTATE *smc_state) -{ - struct ni_power_info *ni_pi = ni_get_pi(adev); - struct si_ps *state = si_get_ps(amdgpu_state); - u32 sq_power_throttle, sq_power_throttle2; - bool enable_sq_ramping = ni_pi->enable_sq_ramping; - int i; - - if (state->performance_level_count == 0) - return -EINVAL; - - if (smc_state->levelCount != state->performance_level_count) - return -EINVAL; - - if (adev->pm.dpm.sq_ramping_threshold == 0) - return -EINVAL; - - if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER > (MAX_POWER_MASK >> MAX_POWER_SHIFT)) - enable_sq_ramping = false; - - if (SISLANDS_DPM2_SQ_RAMP_MIN_POWER > (MIN_POWER_MASK >> MIN_POWER_SHIFT)) - enable_sq_ramping = false; - - if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA > (MAX_POWER_DELTA_MASK >> MAX_POWER_DELTA_SHIFT)) - enable_sq_ramping = false; - - if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT)) - enable_sq_ramping = false; - - if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT)) - enable_sq_ramping = false; - - for (i = 0; i < state->performance_level_count; i++) { - sq_power_throttle = 0; - sq_power_throttle2 = 0; - - if ((state->performance_levels[i].sclk >= adev->pm.dpm.sq_ramping_threshold) && - enable_sq_ramping) { - sq_power_throttle |= MAX_POWER(SISLANDS_DPM2_SQ_RAMP_MAX_POWER); - sq_power_throttle |= MIN_POWER(SISLANDS_DPM2_SQ_RAMP_MIN_POWER); - sq_power_throttle2 |= MAX_POWER_DELTA(SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA); - sq_power_throttle2 |= STI_SIZE(SISLANDS_DPM2_SQ_RAMP_STI_SIZE); - sq_power_throttle2 |= LTI_RATIO(SISLANDS_DPM2_SQ_RAMP_LTI_RATIO); - } else { - sq_power_throttle |= MAX_POWER_MASK | MIN_POWER_MASK; - sq_power_throttle2 |= MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK; - } - - smc_state->levels[i].SQPowerThrottle = cpu_to_be32(sq_power_throttle); - smc_state->levels[i].SQPowerThrottle_2 = cpu_to_be32(sq_power_throttle2); - } - - return 0; -} - -static int si_enable_power_containment(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_new_state, - bool enable) -{ - struct ni_power_info *ni_pi = ni_get_pi(adev); - PPSMC_Result smc_result; - int ret = 0; - - if (ni_pi->enable_power_containment) { - if (enable) { - if (!si_should_disable_uvd_powertune(adev, amdgpu_new_state)) { - smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_TDPClampingActive); - if (smc_result != PPSMC_Result_OK) { - ret = -EINVAL; - ni_pi->pc_enabled = false; - } else { - ni_pi->pc_enabled = true; - } - } - } else { - smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_TDPClampingInactive); - if (smc_result != PPSMC_Result_OK) - ret = -EINVAL; - ni_pi->pc_enabled = false; - } - } - - return ret; -} - -static int si_initialize_smc_dte_tables(struct amdgpu_device *adev) -{ - struct si_power_info *si_pi = si_get_pi(adev); - int ret = 0; - struct si_dte_data *dte_data = &si_pi->dte_data; - Smc_SIslands_DTE_Configuration *dte_tables = NULL; - u32 table_size; - u8 tdep_count; - u32 i; - - if (dte_data == NULL) - si_pi->enable_dte = false; - - if (si_pi->enable_dte == false) - return 0; - - if (dte_data->k <= 0) - return -EINVAL; - - dte_tables = kzalloc(sizeof(Smc_SIslands_DTE_Configuration), GFP_KERNEL); - if (dte_tables == NULL) { - si_pi->enable_dte = false; - return -ENOMEM; - } - - table_size = dte_data->k; - - if (table_size > SMC_SISLANDS_DTE_MAX_FILTER_STAGES) - table_size = SMC_SISLANDS_DTE_MAX_FILTER_STAGES; - - tdep_count = dte_data->tdep_count; - if (tdep_count > SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE) - tdep_count = SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE; - - dte_tables->K = cpu_to_be32(table_size); - dte_tables->T0 = cpu_to_be32(dte_data->t0); - dte_tables->MaxT = cpu_to_be32(dte_data->max_t); - dte_tables->WindowSize = dte_data->window_size; - dte_tables->temp_select = dte_data->temp_select; - dte_tables->DTE_mode = dte_data->dte_mode; - dte_tables->Tthreshold = cpu_to_be32(dte_data->t_threshold); - - if (tdep_count > 0) - table_size--; - - for (i = 0; i < table_size; i++) { - dte_tables->tau[i] = cpu_to_be32(dte_data->tau[i]); - dte_tables->R[i] = cpu_to_be32(dte_data->r[i]); - } - - dte_tables->Tdep_count = tdep_count; - - for (i = 0; i < (u32)tdep_count; i++) { - dte_tables->T_limits[i] = dte_data->t_limits[i]; - dte_tables->Tdep_tau[i] = cpu_to_be32(dte_data->tdep_tau[i]); - dte_tables->Tdep_R[i] = cpu_to_be32(dte_data->tdep_r[i]); - } - - ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->dte_table_start, - (u8 *)dte_tables, - sizeof(Smc_SIslands_DTE_Configuration), - si_pi->sram_end); - kfree(dte_tables); - - return ret; -} - -static int si_get_cac_std_voltage_max_min(struct amdgpu_device *adev, - u16 *max, u16 *min) -{ - struct si_power_info *si_pi = si_get_pi(adev); - struct amdgpu_cac_leakage_table *table = - &adev->pm.dpm.dyn_state.cac_leakage_table; - u32 i; - u32 v0_loadline; - - if (table == NULL) - return -EINVAL; - - *max = 0; - *min = 0xFFFF; - - for (i = 0; i < table->count; i++) { - if (table->entries[i].vddc > *max) - *max = table->entries[i].vddc; - if (table->entries[i].vddc < *min) - *min = table->entries[i].vddc; - } - - if (si_pi->powertune_data->lkge_lut_v0_percent > 100) - return -EINVAL; - - v0_loadline = (*min) * (100 - si_pi->powertune_data->lkge_lut_v0_percent) / 100; - - if (v0_loadline > 0xFFFFUL) - return -EINVAL; - - *min = (u16)v0_loadline; - - if ((*min > *max) || (*max == 0) || (*min == 0)) - return -EINVAL; - - return 0; -} - -static u16 si_get_cac_std_voltage_step(u16 max, u16 min) -{ - return ((max - min) + (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES - 1)) / - SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; -} - -static int si_init_dte_leakage_table(struct amdgpu_device *adev, - PP_SIslands_CacConfig *cac_tables, - u16 vddc_max, u16 vddc_min, u16 vddc_step, - u16 t0, u16 t_step) -{ - struct si_power_info *si_pi = si_get_pi(adev); - u32 leakage; - unsigned int i, j; - s32 t; - u32 smc_leakage; - u32 scaling_factor; - u16 voltage; - - scaling_factor = si_get_smc_power_scaling_factor(adev); - - for (i = 0; i < SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES ; i++) { - t = (1000 * (i * t_step + t0)); - - for (j = 0; j < SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) { - voltage = vddc_max - (vddc_step * j); - - si_calculate_leakage_for_v_and_t(adev, - &si_pi->powertune_data->leakage_coefficients, - voltage, - t, - si_pi->dyn_powertune_data.cac_leakage, - &leakage); - - smc_leakage = si_scale_power_for_smc(leakage, scaling_factor) / 4; - - if (smc_leakage > 0xFFFF) - smc_leakage = 0xFFFF; - - cac_tables->cac_lkge_lut[i][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES-1-j] = - cpu_to_be16((u16)smc_leakage); - } - } - return 0; -} - -static int si_init_simplified_leakage_table(struct amdgpu_device *adev, - PP_SIslands_CacConfig *cac_tables, - u16 vddc_max, u16 vddc_min, u16 vddc_step) -{ - struct si_power_info *si_pi = si_get_pi(adev); - u32 leakage; - unsigned int i, j; - u32 smc_leakage; - u32 scaling_factor; - u16 voltage; - - scaling_factor = si_get_smc_power_scaling_factor(adev); - - for (j = 0; j < SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) { - voltage = vddc_max - (vddc_step * j); - - si_calculate_leakage_for_v(adev, - &si_pi->powertune_data->leakage_coefficients, - si_pi->powertune_data->fixed_kt, - voltage, - si_pi->dyn_powertune_data.cac_leakage, - &leakage); - - smc_leakage = si_scale_power_for_smc(leakage, scaling_factor) / 4; - - if (smc_leakage > 0xFFFF) - smc_leakage = 0xFFFF; - - for (i = 0; i < SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES ; i++) - cac_tables->cac_lkge_lut[i][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES-1-j] = - cpu_to_be16((u16)smc_leakage); - } - return 0; -} - -static int si_initialize_smc_cac_tables(struct amdgpu_device *adev) -{ - struct ni_power_info *ni_pi = ni_get_pi(adev); - struct si_power_info *si_pi = si_get_pi(adev); - PP_SIslands_CacConfig *cac_tables = NULL; - u16 vddc_max, vddc_min, vddc_step; - u16 t0, t_step; - u32 load_line_slope, reg; - int ret = 0; - u32 ticks_per_us = amdgpu_asic_get_xclk(adev) / 100; - - if (ni_pi->enable_cac == false) - return 0; - - cac_tables = kzalloc(sizeof(PP_SIslands_CacConfig), GFP_KERNEL); - if (!cac_tables) - return -ENOMEM; - - reg = RREG32(CG_CAC_CTRL) & ~CAC_WINDOW_MASK; - reg |= CAC_WINDOW(si_pi->powertune_data->cac_window); - WREG32(CG_CAC_CTRL, reg); - - si_pi->dyn_powertune_data.cac_leakage = adev->pm.dpm.cac_leakage; - si_pi->dyn_powertune_data.dc_pwr_value = - si_pi->powertune_data->dc_cac[NISLANDS_DCCAC_LEVEL_0]; - si_pi->dyn_powertune_data.wintime = si_calculate_cac_wintime(adev); - si_pi->dyn_powertune_data.shift_n = si_pi->powertune_data->shift_n_default; - - si_pi->dyn_powertune_data.leakage_minimum_temperature = 80 * 1000; - - ret = si_get_cac_std_voltage_max_min(adev, &vddc_max, &vddc_min); - if (ret) - goto done_free; - - vddc_step = si_get_cac_std_voltage_step(vddc_max, vddc_min); - vddc_min = vddc_max - (vddc_step * (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES - 1)); - t_step = 4; - t0 = 60; - - if (si_pi->enable_dte || ni_pi->driver_calculate_cac_leakage) - ret = si_init_dte_leakage_table(adev, cac_tables, - vddc_max, vddc_min, vddc_step, - t0, t_step); - else - ret = si_init_simplified_leakage_table(adev, cac_tables, - vddc_max, vddc_min, vddc_step); - if (ret) - goto done_free; - - load_line_slope = ((u32)adev->pm.dpm.load_line_slope << SMC_SISLANDS_SCALE_R) / 100; - - cac_tables->l2numWin_TDP = cpu_to_be32(si_pi->dyn_powertune_data.l2_lta_window_size); - cac_tables->lts_truncate_n = si_pi->dyn_powertune_data.lts_truncate; - cac_tables->SHIFT_N = si_pi->dyn_powertune_data.shift_n; - cac_tables->lkge_lut_V0 = cpu_to_be32((u32)vddc_min); - cac_tables->lkge_lut_Vstep = cpu_to_be32((u32)vddc_step); - cac_tables->R_LL = cpu_to_be32(load_line_slope); - cac_tables->WinTime = cpu_to_be32(si_pi->dyn_powertune_data.wintime); - cac_tables->calculation_repeats = cpu_to_be32(2); - cac_tables->dc_cac = cpu_to_be32(0); - cac_tables->log2_PG_LKG_SCALE = 12; - cac_tables->cac_temp = si_pi->powertune_data->operating_temp; - cac_tables->lkge_lut_T0 = cpu_to_be32((u32)t0); - cac_tables->lkge_lut_Tstep = cpu_to_be32((u32)t_step); - - ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->cac_table_start, - (u8 *)cac_tables, - sizeof(PP_SIslands_CacConfig), - si_pi->sram_end); - - if (ret) - goto done_free; - - ret = si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_ticks_per_us, ticks_per_us); - -done_free: - if (ret) { - ni_pi->enable_cac = false; - ni_pi->enable_power_containment = false; - } - - kfree(cac_tables); - - return ret; -} - -static int si_program_cac_config_registers(struct amdgpu_device *adev, - const struct si_cac_config_reg *cac_config_regs) -{ - const struct si_cac_config_reg *config_regs = cac_config_regs; - u32 data = 0, offset; - - if (!config_regs) - return -EINVAL; - - while (config_regs->offset != 0xFFFFFFFF) { - switch (config_regs->type) { - case SISLANDS_CACCONFIG_CGIND: - offset = SMC_CG_IND_START + config_regs->offset; - if (offset < SMC_CG_IND_END) - data = RREG32_SMC(offset); - break; - default: - data = RREG32(config_regs->offset); - break; - } - - data &= ~config_regs->mask; - data |= ((config_regs->value << config_regs->shift) & config_regs->mask); - - switch (config_regs->type) { - case SISLANDS_CACCONFIG_CGIND: - offset = SMC_CG_IND_START + config_regs->offset; - if (offset < SMC_CG_IND_END) - WREG32_SMC(offset, data); - break; - default: - WREG32(config_regs->offset, data); - break; - } - config_regs++; - } - return 0; -} - -static int si_initialize_hardware_cac_manager(struct amdgpu_device *adev) -{ - struct ni_power_info *ni_pi = ni_get_pi(adev); - struct si_power_info *si_pi = si_get_pi(adev); - int ret; - - if ((ni_pi->enable_cac == false) || - (ni_pi->cac_configuration_required == false)) - return 0; - - ret = si_program_cac_config_registers(adev, si_pi->lcac_config); - if (ret) - return ret; - ret = si_program_cac_config_registers(adev, si_pi->cac_override); - if (ret) - return ret; - ret = si_program_cac_config_registers(adev, si_pi->cac_weights); - if (ret) - return ret; - - return 0; -} - -static int si_enable_smc_cac(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_new_state, - bool enable) -{ - struct ni_power_info *ni_pi = ni_get_pi(adev); - struct si_power_info *si_pi = si_get_pi(adev); - PPSMC_Result smc_result; - int ret = 0; - - if (ni_pi->enable_cac) { - if (enable) { - if (!si_should_disable_uvd_powertune(adev, amdgpu_new_state)) { - if (ni_pi->support_cac_long_term_average) { - smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_CACLongTermAvgEnable); - if (smc_result != PPSMC_Result_OK) - ni_pi->support_cac_long_term_average = false; - } - - smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableCac); - if (smc_result != PPSMC_Result_OK) { - ret = -EINVAL; - ni_pi->cac_enabled = false; - } else { - ni_pi->cac_enabled = true; - } - - if (si_pi->enable_dte) { - smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableDTE); - if (smc_result != PPSMC_Result_OK) - ret = -EINVAL; - } - } - } else if (ni_pi->cac_enabled) { - if (si_pi->enable_dte) - smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableDTE); - - smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableCac); - - ni_pi->cac_enabled = false; - - if (ni_pi->support_cac_long_term_average) - smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_CACLongTermAvgDisable); - } - } - return ret; -} - -static int si_init_smc_spll_table(struct amdgpu_device *adev) -{ - struct ni_power_info *ni_pi = ni_get_pi(adev); - struct si_power_info *si_pi = si_get_pi(adev); - SMC_SISLANDS_SPLL_DIV_TABLE *spll_table; - SISLANDS_SMC_SCLK_VALUE sclk_params; - u32 fb_div, p_div; - u32 clk_s, clk_v; - u32 sclk = 0; - int ret = 0; - u32 tmp; - int i; - - if (si_pi->spll_table_start == 0) - return -EINVAL; - - spll_table = kzalloc(sizeof(SMC_SISLANDS_SPLL_DIV_TABLE), GFP_KERNEL); - if (spll_table == NULL) - return -ENOMEM; - - for (i = 0; i < 256; i++) { - ret = si_calculate_sclk_params(adev, sclk, &sclk_params); - if (ret) - break; - p_div = (sclk_params.vCG_SPLL_FUNC_CNTL & SPLL_PDIV_A_MASK) >> SPLL_PDIV_A_SHIFT; - fb_div = (sclk_params.vCG_SPLL_FUNC_CNTL_3 & SPLL_FB_DIV_MASK) >> SPLL_FB_DIV_SHIFT; - clk_s = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM & CLK_S_MASK) >> CLK_S_SHIFT; - clk_v = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM_2 & CLK_V_MASK) >> CLK_V_SHIFT; - - fb_div &= ~0x00001FFF; - fb_div >>= 1; - clk_v >>= 6; - - if (p_div & ~(SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT)) - ret = -EINVAL; - if (fb_div & ~(SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT)) - ret = -EINVAL; - if (clk_s & ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT)) - ret = -EINVAL; - if (clk_v & ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT)) - ret = -EINVAL; - - if (ret) - break; - - tmp = ((fb_div << SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK) | - ((p_div << SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK); - spll_table->freq[i] = cpu_to_be32(tmp); - - tmp = ((clk_v << SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK) | - ((clk_s << SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK); - spll_table->ss[i] = cpu_to_be32(tmp); - - sclk += 512; - } - - - if (!ret) - ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->spll_table_start, - (u8 *)spll_table, - sizeof(SMC_SISLANDS_SPLL_DIV_TABLE), - si_pi->sram_end); - - if (ret) - ni_pi->enable_power_containment = false; - - kfree(spll_table); - - return ret; -} - -static u16 si_get_lower_of_leakage_and_vce_voltage(struct amdgpu_device *adev, - u16 vce_voltage) -{ - u16 highest_leakage = 0; - struct si_power_info *si_pi = si_get_pi(adev); - int i; - - for (i = 0; i < si_pi->leakage_voltage.count; i++){ - if (highest_leakage < si_pi->leakage_voltage.entries[i].voltage) - highest_leakage = si_pi->leakage_voltage.entries[i].voltage; - } - - if (si_pi->leakage_voltage.count && (highest_leakage < vce_voltage)) - return highest_leakage; - - return vce_voltage; -} - -static int si_get_vce_clock_voltage(struct amdgpu_device *adev, - u32 evclk, u32 ecclk, u16 *voltage) -{ - u32 i; - int ret = -EINVAL; - struct amdgpu_vce_clock_voltage_dependency_table *table = - &adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table; - - if (((evclk == 0) && (ecclk == 0)) || - (table && (table->count == 0))) { - *voltage = 0; - return 0; - } - - for (i = 0; i < table->count; i++) { - if ((evclk <= table->entries[i].evclk) && - (ecclk <= table->entries[i].ecclk)) { - *voltage = table->entries[i].v; - ret = 0; - break; - } - } - - /* if no match return the highest voltage */ - if (ret) - *voltage = table->entries[table->count - 1].v; - - *voltage = si_get_lower_of_leakage_and_vce_voltage(adev, *voltage); - - return ret; -} - -static bool si_dpm_vblank_too_short(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - u32 vblank_time = amdgpu_dpm_get_vblank_time(adev); - /* we never hit the non-gddr5 limit so disable it */ - u32 switch_limit = adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0; - - if (vblank_time < switch_limit) - return true; - else - return false; - -} - -static int ni_copy_and_switch_arb_sets(struct amdgpu_device *adev, - u32 arb_freq_src, u32 arb_freq_dest) -{ - u32 mc_arb_dram_timing; - u32 mc_arb_dram_timing2; - u32 burst_time; - u32 mc_cg_config; - - switch (arb_freq_src) { - case MC_CG_ARB_FREQ_F0: - mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING); - mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2); - burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE0_MASK) >> STATE0_SHIFT; - break; - case MC_CG_ARB_FREQ_F1: - mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_1); - mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_1); - burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE1_MASK) >> STATE1_SHIFT; - break; - case MC_CG_ARB_FREQ_F2: - mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_2); - mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_2); - burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE2_MASK) >> STATE2_SHIFT; - break; - case MC_CG_ARB_FREQ_F3: - mc_arb_dram_timing = RREG32(MC_ARB_DRAM_TIMING_3); - mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_3); - burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE3_MASK) >> STATE3_SHIFT; - break; - default: - return -EINVAL; - } - - switch (arb_freq_dest) { - case MC_CG_ARB_FREQ_F0: - WREG32(MC_ARB_DRAM_TIMING, mc_arb_dram_timing); - WREG32(MC_ARB_DRAM_TIMING2, mc_arb_dram_timing2); - WREG32_P(MC_ARB_BURST_TIME, STATE0(burst_time), ~STATE0_MASK); - break; - case MC_CG_ARB_FREQ_F1: - WREG32(MC_ARB_DRAM_TIMING_1, mc_arb_dram_timing); - WREG32(MC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2); - WREG32_P(MC_ARB_BURST_TIME, STATE1(burst_time), ~STATE1_MASK); - break; - case MC_CG_ARB_FREQ_F2: - WREG32(MC_ARB_DRAM_TIMING_2, mc_arb_dram_timing); - WREG32(MC_ARB_DRAM_TIMING2_2, mc_arb_dram_timing2); - WREG32_P(MC_ARB_BURST_TIME, STATE2(burst_time), ~STATE2_MASK); - break; - case MC_CG_ARB_FREQ_F3: - WREG32(MC_ARB_DRAM_TIMING_3, mc_arb_dram_timing); - WREG32(MC_ARB_DRAM_TIMING2_3, mc_arb_dram_timing2); - WREG32_P(MC_ARB_BURST_TIME, STATE3(burst_time), ~STATE3_MASK); - break; - default: - return -EINVAL; - } - - mc_cg_config = RREG32(MC_CG_CONFIG) | 0x0000000F; - WREG32(MC_CG_CONFIG, mc_cg_config); - WREG32_P(MC_ARB_CG, CG_ARB_REQ(arb_freq_dest), ~CG_ARB_REQ_MASK); - - return 0; -} - -static void ni_update_current_ps(struct amdgpu_device *adev, - struct amdgpu_ps *rps) -{ - struct si_ps *new_ps = si_get_ps(rps); - struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); - struct ni_power_info *ni_pi = ni_get_pi(adev); - - eg_pi->current_rps = *rps; - ni_pi->current_ps = *new_ps; - eg_pi->current_rps.ps_priv = &ni_pi->current_ps; - adev->pm.dpm.current_ps = &eg_pi->current_rps; -} - -static void ni_update_requested_ps(struct amdgpu_device *adev, - struct amdgpu_ps *rps) -{ - struct si_ps *new_ps = si_get_ps(rps); - struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); - struct ni_power_info *ni_pi = ni_get_pi(adev); - - eg_pi->requested_rps = *rps; - ni_pi->requested_ps = *new_ps; - eg_pi->requested_rps.ps_priv = &ni_pi->requested_ps; - adev->pm.dpm.requested_ps = &eg_pi->requested_rps; -} - -static void ni_set_uvd_clock_before_set_eng_clock(struct amdgpu_device *adev, - struct amdgpu_ps *new_ps, - struct amdgpu_ps *old_ps) -{ - struct si_ps *new_state = si_get_ps(new_ps); - struct si_ps *current_state = si_get_ps(old_ps); - - if ((new_ps->vclk == old_ps->vclk) && - (new_ps->dclk == old_ps->dclk)) - return; - - if (new_state->performance_levels[new_state->performance_level_count - 1].sclk >= - current_state->performance_levels[current_state->performance_level_count - 1].sclk) - return; - - amdgpu_asic_set_uvd_clocks(adev, new_ps->vclk, new_ps->dclk); -} - -static void ni_set_uvd_clock_after_set_eng_clock(struct amdgpu_device *adev, - struct amdgpu_ps *new_ps, - struct amdgpu_ps *old_ps) -{ - struct si_ps *new_state = si_get_ps(new_ps); - struct si_ps *current_state = si_get_ps(old_ps); - - if ((new_ps->vclk == old_ps->vclk) && - (new_ps->dclk == old_ps->dclk)) - return; - - if (new_state->performance_levels[new_state->performance_level_count - 1].sclk < - current_state->performance_levels[current_state->performance_level_count - 1].sclk) - return; - - amdgpu_asic_set_uvd_clocks(adev, new_ps->vclk, new_ps->dclk); -} - -static u16 btc_find_voltage(struct atom_voltage_table *table, u16 voltage) -{ - unsigned int i; - - for (i = 0; i < table->count; i++) - if (voltage <= table->entries[i].value) - return table->entries[i].value; - - return table->entries[table->count - 1].value; -} - -static u32 btc_find_valid_clock(struct amdgpu_clock_array *clocks, - u32 max_clock, u32 requested_clock) -{ - unsigned int i; - - if ((clocks == NULL) || (clocks->count == 0)) - return (requested_clock < max_clock) ? requested_clock : max_clock; - - for (i = 0; i < clocks->count; i++) { - if (clocks->values[i] >= requested_clock) - return (clocks->values[i] < max_clock) ? clocks->values[i] : max_clock; - } - - return (clocks->values[clocks->count - 1] < max_clock) ? - clocks->values[clocks->count - 1] : max_clock; -} - -static u32 btc_get_valid_mclk(struct amdgpu_device *adev, - u32 max_mclk, u32 requested_mclk) -{ - return btc_find_valid_clock(&adev->pm.dpm.dyn_state.valid_mclk_values, - max_mclk, requested_mclk); -} - -static u32 btc_get_valid_sclk(struct amdgpu_device *adev, - u32 max_sclk, u32 requested_sclk) -{ - return btc_find_valid_clock(&adev->pm.dpm.dyn_state.valid_sclk_values, - max_sclk, requested_sclk); -} - -static void btc_get_max_clock_from_voltage_dependency_table(struct amdgpu_clock_voltage_dependency_table *table, - u32 *max_clock) -{ - u32 i, clock = 0; - - if ((table == NULL) || (table->count == 0)) { - *max_clock = clock; - return; - } - - for (i = 0; i < table->count; i++) { - if (clock < table->entries[i].clk) - clock = table->entries[i].clk; - } - *max_clock = clock; -} - -static void btc_apply_voltage_dependency_rules(struct amdgpu_clock_voltage_dependency_table *table, - u32 clock, u16 max_voltage, u16 *voltage) -{ - u32 i; - - if ((table == NULL) || (table->count == 0)) - return; - - for (i= 0; i < table->count; i++) { - if (clock <= table->entries[i].clk) { - if (*voltage < table->entries[i].v) - *voltage = (u16)((table->entries[i].v < max_voltage) ? - table->entries[i].v : max_voltage); - return; - } - } - - *voltage = (*voltage > max_voltage) ? *voltage : max_voltage; -} - -static void btc_adjust_clock_combinations(struct amdgpu_device *adev, - const struct amdgpu_clock_and_voltage_limits *max_limits, - struct rv7xx_pl *pl) -{ - - if ((pl->mclk == 0) || (pl->sclk == 0)) - return; - - if (pl->mclk == pl->sclk) - return; - - if (pl->mclk > pl->sclk) { - if (((pl->mclk + (pl->sclk - 1)) / pl->sclk) > adev->pm.dpm.dyn_state.mclk_sclk_ratio) - pl->sclk = btc_get_valid_sclk(adev, - max_limits->sclk, - (pl->mclk + - (adev->pm.dpm.dyn_state.mclk_sclk_ratio - 1)) / - adev->pm.dpm.dyn_state.mclk_sclk_ratio); - } else { - if ((pl->sclk - pl->mclk) > adev->pm.dpm.dyn_state.sclk_mclk_delta) - pl->mclk = btc_get_valid_mclk(adev, - max_limits->mclk, - pl->sclk - - adev->pm.dpm.dyn_state.sclk_mclk_delta); - } -} - -static void btc_apply_voltage_delta_rules(struct amdgpu_device *adev, - u16 max_vddc, u16 max_vddci, - u16 *vddc, u16 *vddci) -{ - struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); - u16 new_voltage; - - if ((0 == *vddc) || (0 == *vddci)) - return; - - if (*vddc > *vddci) { - if ((*vddc - *vddci) > adev->pm.dpm.dyn_state.vddc_vddci_delta) { - new_voltage = btc_find_voltage(&eg_pi->vddci_voltage_table, - (*vddc - adev->pm.dpm.dyn_state.vddc_vddci_delta)); - *vddci = (new_voltage < max_vddci) ? new_voltage : max_vddci; - } - } else { - if ((*vddci - *vddc) > adev->pm.dpm.dyn_state.vddc_vddci_delta) { - new_voltage = btc_find_voltage(&eg_pi->vddc_voltage_table, - (*vddci - adev->pm.dpm.dyn_state.vddc_vddci_delta)); - *vddc = (new_voltage < max_vddc) ? new_voltage : max_vddc; - } - } -} - -static void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b, - u32 *p, u32 *u) -{ - u32 b_c = 0; - u32 i_c; - u32 tmp; - - i_c = (i * r_c) / 100; - tmp = i_c >> p_b; - - while (tmp) { - b_c++; - tmp >>= 1; - } - - *u = (b_c + 1) / 2; - *p = i_c / (1 << (2 * (*u))); -} - -static int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th) -{ - u32 k, a, ah, al; - u32 t1; - - if ((fl == 0) || (fh == 0) || (fl > fh)) - return -EINVAL; - - k = (100 * fh) / fl; - t1 = (t * (k - 100)); - a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100)); - a = (a + 5) / 10; - ah = ((a * t) + 5000) / 10000; - al = a - ah; - - *th = t - ah; - *tl = t + al; - - return 0; -} - -static bool r600_is_uvd_state(u32 class, u32 class2) -{ - if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) - return true; - if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) - return true; - if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) - return true; - if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) - return true; - if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) - return true; - return false; -} - -static u8 rv770_get_memory_module_index(struct amdgpu_device *adev) -{ - return (u8) ((RREG32(BIOS_SCRATCH_4) >> 16) & 0xff); -} - -static void rv770_get_max_vddc(struct amdgpu_device *adev) -{ - struct rv7xx_power_info *pi = rv770_get_pi(adev); - u16 vddc; - - if (amdgpu_atombios_get_max_vddc(adev, 0, 0, &vddc)) - pi->max_vddc = 0; - else - pi->max_vddc = vddc; -} - -static void rv770_get_engine_memory_ss(struct amdgpu_device *adev) -{ - struct rv7xx_power_info *pi = rv770_get_pi(adev); - struct amdgpu_atom_ss ss; - - pi->sclk_ss = amdgpu_atombios_get_asic_ss_info(adev, &ss, - ASIC_INTERNAL_ENGINE_SS, 0); - pi->mclk_ss = amdgpu_atombios_get_asic_ss_info(adev, &ss, - ASIC_INTERNAL_MEMORY_SS, 0); - - if (pi->sclk_ss || pi->mclk_ss) - pi->dynamic_ss = true; - else - pi->dynamic_ss = false; -} - - -static void si_apply_state_adjust_rules(struct amdgpu_device *adev, - struct amdgpu_ps *rps) -{ - struct si_ps *ps = si_get_ps(rps); - struct amdgpu_clock_and_voltage_limits *max_limits; - bool disable_mclk_switching = false; - bool disable_sclk_switching = false; - u32 mclk, sclk; - u16 vddc, vddci, min_vce_voltage = 0; - u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc; - u32 max_sclk = 0, max_mclk = 0; - int i; - - if (adev->asic_type == CHIP_HAINAN) { - if ((adev->pdev->revision == 0x81) || - (adev->pdev->revision == 0xC3) || - (adev->pdev->device == 0x6664) || - (adev->pdev->device == 0x6665) || - (adev->pdev->device == 0x6667)) { - max_sclk = 75000; - } - if ((adev->pdev->revision == 0xC3) || - (adev->pdev->device == 0x6665)) { - max_sclk = 60000; - max_mclk = 80000; - } - } else if (adev->asic_type == CHIP_OLAND) { - if ((adev->pdev->revision == 0xC7) || - (adev->pdev->revision == 0x80) || - (adev->pdev->revision == 0x81) || - (adev->pdev->revision == 0x83) || - (adev->pdev->revision == 0x87) || - (adev->pdev->device == 0x6604) || - (adev->pdev->device == 0x6605)) { - max_sclk = 75000; - } - } - - if (rps->vce_active) { - rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk; - rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk; - si_get_vce_clock_voltage(adev, rps->evclk, rps->ecclk, - &min_vce_voltage); - } else { - rps->evclk = 0; - rps->ecclk = 0; - } - - if ((adev->pm.dpm.new_active_crtc_count > 1) || - si_dpm_vblank_too_short(adev)) - disable_mclk_switching = true; - - if (rps->vclk || rps->dclk) { - disable_mclk_switching = true; - disable_sclk_switching = true; - } - - if (adev->pm.ac_power) - max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; - else - max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc; - - for (i = ps->performance_level_count - 2; i >= 0; i--) { - if (ps->performance_levels[i].vddc > ps->performance_levels[i+1].vddc) - ps->performance_levels[i].vddc = ps->performance_levels[i+1].vddc; - } - if (adev->pm.ac_power == false) { - for (i = 0; i < ps->performance_level_count; i++) { - if (ps->performance_levels[i].mclk > max_limits->mclk) - ps->performance_levels[i].mclk = max_limits->mclk; - if (ps->performance_levels[i].sclk > max_limits->sclk) - ps->performance_levels[i].sclk = max_limits->sclk; - if (ps->performance_levels[i].vddc > max_limits->vddc) - ps->performance_levels[i].vddc = max_limits->vddc; - if (ps->performance_levels[i].vddci > max_limits->vddci) - ps->performance_levels[i].vddci = max_limits->vddci; - } - } - - /* limit clocks to max supported clocks based on voltage dependency tables */ - btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk, - &max_sclk_vddc); - btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk, - &max_mclk_vddci); - btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk, - &max_mclk_vddc); - - for (i = 0; i < ps->performance_level_count; i++) { - if (max_sclk_vddc) { - if (ps->performance_levels[i].sclk > max_sclk_vddc) - ps->performance_levels[i].sclk = max_sclk_vddc; - } - if (max_mclk_vddci) { - if (ps->performance_levels[i].mclk > max_mclk_vddci) - ps->performance_levels[i].mclk = max_mclk_vddci; - } - if (max_mclk_vddc) { - if (ps->performance_levels[i].mclk > max_mclk_vddc) - ps->performance_levels[i].mclk = max_mclk_vddc; - } - if (max_mclk) { - if (ps->performance_levels[i].mclk > max_mclk) - ps->performance_levels[i].mclk = max_mclk; - } - if (max_sclk) { - if (ps->performance_levels[i].sclk > max_sclk) - ps->performance_levels[i].sclk = max_sclk; - } - } - - /* XXX validate the min clocks required for display */ - - if (disable_mclk_switching) { - mclk = ps->performance_levels[ps->performance_level_count - 1].mclk; - vddci = ps->performance_levels[ps->performance_level_count - 1].vddci; - } else { - mclk = ps->performance_levels[0].mclk; - vddci = ps->performance_levels[0].vddci; - } - - if (disable_sclk_switching) { - sclk = ps->performance_levels[ps->performance_level_count - 1].sclk; - vddc = ps->performance_levels[ps->performance_level_count - 1].vddc; - } else { - sclk = ps->performance_levels[0].sclk; - vddc = ps->performance_levels[0].vddc; - } - - if (rps->vce_active) { - if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk) - sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk; - if (mclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk) - mclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk; - } - - /* adjusted low state */ - ps->performance_levels[0].sclk = sclk; - ps->performance_levels[0].mclk = mclk; - ps->performance_levels[0].vddc = vddc; - ps->performance_levels[0].vddci = vddci; - - if (disable_sclk_switching) { - sclk = ps->performance_levels[0].sclk; - for (i = 1; i < ps->performance_level_count; i++) { - if (sclk < ps->performance_levels[i].sclk) - sclk = ps->performance_levels[i].sclk; - } - for (i = 0; i < ps->performance_level_count; i++) { - ps->performance_levels[i].sclk = sclk; - ps->performance_levels[i].vddc = vddc; - } - } else { - for (i = 1; i < ps->performance_level_count; i++) { - if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk) - ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk; - if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc) - ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc; - } - } - - if (disable_mclk_switching) { - mclk = ps->performance_levels[0].mclk; - for (i = 1; i < ps->performance_level_count; i++) { - if (mclk < ps->performance_levels[i].mclk) - mclk = ps->performance_levels[i].mclk; - } - for (i = 0; i < ps->performance_level_count; i++) { - ps->performance_levels[i].mclk = mclk; - ps->performance_levels[i].vddci = vddci; - } - } else { - for (i = 1; i < ps->performance_level_count; i++) { - if (ps->performance_levels[i].mclk < ps->performance_levels[i - 1].mclk) - ps->performance_levels[i].mclk = ps->performance_levels[i - 1].mclk; - if (ps->performance_levels[i].vddci < ps->performance_levels[i - 1].vddci) - ps->performance_levels[i].vddci = ps->performance_levels[i - 1].vddci; - } - } - - for (i = 0; i < ps->performance_level_count; i++) - btc_adjust_clock_combinations(adev, max_limits, - &ps->performance_levels[i]); - - for (i = 0; i < ps->performance_level_count; i++) { - if (ps->performance_levels[i].vddc < min_vce_voltage) - ps->performance_levels[i].vddc = min_vce_voltage; - btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk, - ps->performance_levels[i].sclk, - max_limits->vddc, &ps->performance_levels[i].vddc); - btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk, - ps->performance_levels[i].mclk, - max_limits->vddci, &ps->performance_levels[i].vddci); - btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk, - ps->performance_levels[i].mclk, - max_limits->vddc, &ps->performance_levels[i].vddc); - btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk, - adev->clock.current_dispclk, - max_limits->vddc, &ps->performance_levels[i].vddc); - } - - for (i = 0; i < ps->performance_level_count; i++) { - btc_apply_voltage_delta_rules(adev, - max_limits->vddc, max_limits->vddci, - &ps->performance_levels[i].vddc, - &ps->performance_levels[i].vddci); - } - - ps->dc_compatible = true; - for (i = 0; i < ps->performance_level_count; i++) { - if (ps->performance_levels[i].vddc > adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc) - ps->dc_compatible = false; - } -} - -#if 0 -static int si_read_smc_soft_register(struct amdgpu_device *adev, - u16 reg_offset, u32 *value) -{ - struct si_power_info *si_pi = si_get_pi(adev); - - return amdgpu_si_read_smc_sram_dword(adev, - si_pi->soft_regs_start + reg_offset, value, - si_pi->sram_end); -} -#endif - -static int si_write_smc_soft_register(struct amdgpu_device *adev, - u16 reg_offset, u32 value) -{ - struct si_power_info *si_pi = si_get_pi(adev); - - return amdgpu_si_write_smc_sram_dword(adev, - si_pi->soft_regs_start + reg_offset, - value, si_pi->sram_end); -} - -static bool si_is_special_1gb_platform(struct amdgpu_device *adev) -{ - bool ret = false; - u32 tmp, width, row, column, bank, density; - bool is_memory_gddr5, is_special; - - tmp = RREG32(MC_SEQ_MISC0); - is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE == ((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT)); - is_special = (MC_SEQ_MISC0_REV_ID_VALUE == ((tmp & MC_SEQ_MISC0_REV_ID_MASK) >> MC_SEQ_MISC0_REV_ID_SHIFT)) - & (MC_SEQ_MISC0_VEN_ID_VALUE == ((tmp & MC_SEQ_MISC0_VEN_ID_MASK) >> MC_SEQ_MISC0_VEN_ID_SHIFT)); - - WREG32(MC_SEQ_IO_DEBUG_INDEX, 0xb); - width = ((RREG32(MC_SEQ_IO_DEBUG_DATA) >> 1) & 1) ? 16 : 32; - - tmp = RREG32(MC_ARB_RAMCFG); - row = ((tmp & NOOFROWS_MASK) >> NOOFROWS_SHIFT) + 10; - column = ((tmp & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) + 8; - bank = ((tmp & NOOFBANK_MASK) >> NOOFBANK_SHIFT) + 2; - - density = (1 << (row + column - 20 + bank)) * width; - - if ((adev->pdev->device == 0x6819) && - is_memory_gddr5 && is_special && (density == 0x400)) - ret = true; - - return ret; -} - -static void si_get_leakage_vddc(struct amdgpu_device *adev) -{ - struct si_power_info *si_pi = si_get_pi(adev); - u16 vddc, count = 0; - int i, ret; - - for (i = 0; i < SISLANDS_MAX_LEAKAGE_COUNT; i++) { - ret = amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(adev, &vddc, SISLANDS_LEAKAGE_INDEX0 + i); - - if (!ret && (vddc > 0) && (vddc != (SISLANDS_LEAKAGE_INDEX0 + i))) { - si_pi->leakage_voltage.entries[count].voltage = vddc; - si_pi->leakage_voltage.entries[count].leakage_index = - SISLANDS_LEAKAGE_INDEX0 + i; - count++; - } - } - si_pi->leakage_voltage.count = count; -} - -static int si_get_leakage_voltage_from_leakage_index(struct amdgpu_device *adev, - u32 index, u16 *leakage_voltage) -{ - struct si_power_info *si_pi = si_get_pi(adev); - int i; - - if (leakage_voltage == NULL) - return -EINVAL; - - if ((index & 0xff00) != 0xff00) - return -EINVAL; - - if ((index & 0xff) > SISLANDS_MAX_LEAKAGE_COUNT + 1) - return -EINVAL; - - if (index < SISLANDS_LEAKAGE_INDEX0) - return -EINVAL; - - for (i = 0; i < si_pi->leakage_voltage.count; i++) { - if (si_pi->leakage_voltage.entries[i].leakage_index == index) { - *leakage_voltage = si_pi->leakage_voltage.entries[i].voltage; - return 0; - } - } - return -EAGAIN; -} - -static void si_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources) -{ - struct rv7xx_power_info *pi = rv770_get_pi(adev); - bool want_thermal_protection; - enum si_dpm_event_src dpm_event_src; - - switch (sources) { - case 0: - default: - want_thermal_protection = false; - break; - case (1 << SI_DPM_AUTO_THROTTLE_SRC_THERMAL): - want_thermal_protection = true; - dpm_event_src = SI_DPM_EVENT_SRC_DIGITAL; - break; - case (1 << SI_DPM_AUTO_THROTTLE_SRC_EXTERNAL): - want_thermal_protection = true; - dpm_event_src = SI_DPM_EVENT_SRC_EXTERNAL; - break; - case ((1 << SI_DPM_AUTO_THROTTLE_SRC_EXTERNAL) | - (1 << SI_DPM_AUTO_THROTTLE_SRC_THERMAL)): - want_thermal_protection = true; - dpm_event_src = SI_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL; - break; - } - - if (want_thermal_protection) { - WREG32_P(CG_THERMAL_CTRL, DPM_EVENT_SRC(dpm_event_src), ~DPM_EVENT_SRC_MASK); - if (pi->thermal_protection) - WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS); - } else { - WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS); - } -} - -static void si_enable_auto_throttle_source(struct amdgpu_device *adev, - enum si_dpm_auto_throttle_src source, - bool enable) -{ - struct rv7xx_power_info *pi = rv770_get_pi(adev); - - if (enable) { - if (!(pi->active_auto_throttle_sources & (1 << source))) { - pi->active_auto_throttle_sources |= 1 << source; - si_set_dpm_event_sources(adev, pi->active_auto_throttle_sources); - } - } else { - if (pi->active_auto_throttle_sources & (1 << source)) { - pi->active_auto_throttle_sources &= ~(1 << source); - si_set_dpm_event_sources(adev, pi->active_auto_throttle_sources); - } - } -} - -static void si_start_dpm(struct amdgpu_device *adev) -{ - WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN); -} - -static void si_stop_dpm(struct amdgpu_device *adev) -{ - WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN); -} - -static void si_enable_sclk_control(struct amdgpu_device *adev, bool enable) -{ - if (enable) - WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF); - else - WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF); - -} - -#if 0 -static int si_notify_hardware_of_thermal_state(struct amdgpu_device *adev, - u32 thermal_level) -{ - PPSMC_Result ret; - - if (thermal_level == 0) { - ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt); - if (ret == PPSMC_Result_OK) - return 0; - else - return -EINVAL; - } - return 0; -} - -static void si_notify_hardware_vpu_recovery_event(struct amdgpu_device *adev) -{ - si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen, true); -} -#endif - -#if 0 -static int si_notify_hw_of_powersource(struct amdgpu_device *adev, bool ac_power) -{ - if (ac_power) - return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_RunningOnAC) == PPSMC_Result_OK) ? - 0 : -EINVAL; - - return 0; -} -#endif - -static PPSMC_Result si_send_msg_to_smc_with_parameter(struct amdgpu_device *adev, - PPSMC_Msg msg, u32 parameter) -{ - WREG32(SMC_SCRATCH0, parameter); - return amdgpu_si_send_msg_to_smc(adev, msg); -} - -static int si_restrict_performance_levels_before_switch(struct amdgpu_device *adev) -{ - if (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_NoForcedLevel) != PPSMC_Result_OK) - return -EINVAL; - - return (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, 1) == PPSMC_Result_OK) ? - 0 : -EINVAL; -} - -static int si_dpm_force_performance_level(void *handle, - enum amd_dpm_forced_level level) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ps *rps = adev->pm.dpm.current_ps; - struct si_ps *ps = si_get_ps(rps); - u32 levels = ps->performance_level_count; - - if (level == AMD_DPM_FORCED_LEVEL_HIGH) { - if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK) - return -EINVAL; - - if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 1) != PPSMC_Result_OK) - return -EINVAL; - } else if (level == AMD_DPM_FORCED_LEVEL_LOW) { - if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) - return -EINVAL; - - if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, 1) != PPSMC_Result_OK) - return -EINVAL; - } else if (level == AMD_DPM_FORCED_LEVEL_AUTO) { - if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK) - return -EINVAL; - - if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK) - return -EINVAL; - } - - adev->pm.dpm.forced_level = level; - - return 0; -} - -#if 0 -static int si_set_boot_state(struct amdgpu_device *adev) -{ - return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_SwitchToInitialState) == PPSMC_Result_OK) ? - 0 : -EINVAL; -} -#endif - -static int si_set_powergating_by_smu(void *handle, - uint32_t block_type, - bool gate) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - switch (block_type) { - case AMD_IP_BLOCK_TYPE_UVD: - if (!gate) { - adev->pm.dpm.uvd_active = true; - adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD; - } else { - adev->pm.dpm.uvd_active = false; - } - - amdgpu_legacy_dpm_compute_clocks(handle); - break; - case AMD_IP_BLOCK_TYPE_VCE: - if (!gate) { - adev->pm.dpm.vce_active = true; - /* XXX select vce level based on ring/task */ - adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL; - } else { - adev->pm.dpm.vce_active = false; - } - - amdgpu_legacy_dpm_compute_clocks(handle); - break; - default: - break; - } - return 0; -} - -static int si_set_sw_state(struct amdgpu_device *adev) -{ - return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_SwitchToSwState) == PPSMC_Result_OK) ? - 0 : -EINVAL; -} - -static int si_halt_smc(struct amdgpu_device *adev) -{ - if (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_Halt) != PPSMC_Result_OK) - return -EINVAL; - - return (amdgpu_si_wait_for_smc_inactive(adev) == PPSMC_Result_OK) ? - 0 : -EINVAL; -} - -static int si_resume_smc(struct amdgpu_device *adev) -{ - if (amdgpu_si_send_msg_to_smc(adev, PPSMC_FlushDataCache) != PPSMC_Result_OK) - return -EINVAL; - - return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_Resume) == PPSMC_Result_OK) ? - 0 : -EINVAL; -} - -static void si_dpm_start_smc(struct amdgpu_device *adev) -{ - amdgpu_si_program_jump_on_start(adev); - amdgpu_si_start_smc(adev); - amdgpu_si_smc_clock(adev, true); -} - -static void si_dpm_stop_smc(struct amdgpu_device *adev) -{ - amdgpu_si_reset_smc(adev); - amdgpu_si_smc_clock(adev, false); -} - -static int si_process_firmware_header(struct amdgpu_device *adev) -{ - struct si_power_info *si_pi = si_get_pi(adev); - u32 tmp; - int ret; - - ret = amdgpu_si_read_smc_sram_dword(adev, - SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + - SISLANDS_SMC_FIRMWARE_HEADER_stateTable, - &tmp, si_pi->sram_end); - if (ret) - return ret; - - si_pi->state_table_start = tmp; - - ret = amdgpu_si_read_smc_sram_dword(adev, - SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + - SISLANDS_SMC_FIRMWARE_HEADER_softRegisters, - &tmp, si_pi->sram_end); - if (ret) - return ret; - - si_pi->soft_regs_start = tmp; - - ret = amdgpu_si_read_smc_sram_dword(adev, - SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + - SISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable, - &tmp, si_pi->sram_end); - if (ret) - return ret; - - si_pi->mc_reg_table_start = tmp; - - ret = amdgpu_si_read_smc_sram_dword(adev, - SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + - SISLANDS_SMC_FIRMWARE_HEADER_fanTable, - &tmp, si_pi->sram_end); - if (ret) - return ret; - - si_pi->fan_table_start = tmp; - - ret = amdgpu_si_read_smc_sram_dword(adev, - SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + - SISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable, - &tmp, si_pi->sram_end); - if (ret) - return ret; - - si_pi->arb_table_start = tmp; - - ret = amdgpu_si_read_smc_sram_dword(adev, - SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + - SISLANDS_SMC_FIRMWARE_HEADER_CacConfigTable, - &tmp, si_pi->sram_end); - if (ret) - return ret; - - si_pi->cac_table_start = tmp; - - ret = amdgpu_si_read_smc_sram_dword(adev, - SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + - SISLANDS_SMC_FIRMWARE_HEADER_DteConfiguration, - &tmp, si_pi->sram_end); - if (ret) - return ret; - - si_pi->dte_table_start = tmp; - - ret = amdgpu_si_read_smc_sram_dword(adev, - SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + - SISLANDS_SMC_FIRMWARE_HEADER_spllTable, - &tmp, si_pi->sram_end); - if (ret) - return ret; - - si_pi->spll_table_start = tmp; - - ret = amdgpu_si_read_smc_sram_dword(adev, - SISLANDS_SMC_FIRMWARE_HEADER_LOCATION + - SISLANDS_SMC_FIRMWARE_HEADER_PAPMParameters, - &tmp, si_pi->sram_end); - if (ret) - return ret; - - si_pi->papm_cfg_table_start = tmp; - - return ret; -} - -static void si_read_clock_registers(struct amdgpu_device *adev) -{ - struct si_power_info *si_pi = si_get_pi(adev); - - si_pi->clock_registers.cg_spll_func_cntl = RREG32(CG_SPLL_FUNC_CNTL); - si_pi->clock_registers.cg_spll_func_cntl_2 = RREG32(CG_SPLL_FUNC_CNTL_2); - si_pi->clock_registers.cg_spll_func_cntl_3 = RREG32(CG_SPLL_FUNC_CNTL_3); - si_pi->clock_registers.cg_spll_func_cntl_4 = RREG32(CG_SPLL_FUNC_CNTL_4); - si_pi->clock_registers.cg_spll_spread_spectrum = RREG32(CG_SPLL_SPREAD_SPECTRUM); - si_pi->clock_registers.cg_spll_spread_spectrum_2 = RREG32(CG_SPLL_SPREAD_SPECTRUM_2); - si_pi->clock_registers.dll_cntl = RREG32(DLL_CNTL); - si_pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL); - si_pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL); - si_pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL); - si_pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL); - si_pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1); - si_pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2); - si_pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1); - si_pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2); -} - -static void si_enable_thermal_protection(struct amdgpu_device *adev, - bool enable) -{ - if (enable) - WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS); - else - WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS); -} - -static void si_enable_acpi_power_management(struct amdgpu_device *adev) -{ - WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN); -} - -#if 0 -static int si_enter_ulp_state(struct amdgpu_device *adev) -{ - WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower); - - udelay(25000); - - return 0; -} - -static int si_exit_ulp_state(struct amdgpu_device *adev) -{ - int i; - - WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower); - - udelay(7000); - - for (i = 0; i < adev->usec_timeout; i++) { - if (RREG32(SMC_RESP_0) == 1) - break; - udelay(1000); - } - - return 0; -} -#endif - -static int si_notify_smc_display_change(struct amdgpu_device *adev, - bool has_display) -{ - PPSMC_Msg msg = has_display ? - PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay; - - return (amdgpu_si_send_msg_to_smc(adev, msg) == PPSMC_Result_OK) ? - 0 : -EINVAL; -} - -static void si_program_response_times(struct amdgpu_device *adev) -{ - u32 voltage_response_time, acpi_delay_time, vbi_time_out; - u32 vddc_dly, acpi_dly, vbi_dly; - u32 reference_clock; - - si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mvdd_chg_time, 1); - - voltage_response_time = (u32)adev->pm.dpm.voltage_response_time; - - if (voltage_response_time == 0) - voltage_response_time = 1000; - - acpi_delay_time = 15000; - vbi_time_out = 100000; - - reference_clock = amdgpu_asic_get_xclk(adev); - - vddc_dly = (voltage_response_time * reference_clock) / 100; - acpi_dly = (acpi_delay_time * reference_clock) / 100; - vbi_dly = (vbi_time_out * reference_clock) / 100; - - si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_delay_vreg, vddc_dly); - si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_delay_acpi, acpi_dly); - si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mclk_chg_timeout, vbi_dly); - si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mc_block_delay, 0xAA); -} - -static void si_program_ds_registers(struct amdgpu_device *adev) -{ - struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); - u32 tmp; - - /* DEEP_SLEEP_CLK_SEL field should be 0x10 on tahiti A0 */ - if (adev->asic_type == CHIP_TAHITI && adev->rev_id == 0x0) - tmp = 0x10; - else - tmp = 0x1; - - if (eg_pi->sclk_deep_sleep) { - WREG32_P(MISC_CLK_CNTL, DEEP_SLEEP_CLK_SEL(tmp), ~DEEP_SLEEP_CLK_SEL_MASK); - WREG32_P(CG_SPLL_AUTOSCALE_CNTL, AUTOSCALE_ON_SS_CLEAR, - ~AUTOSCALE_ON_SS_CLEAR); - } -} - -static void si_program_display_gap(struct amdgpu_device *adev) -{ - u32 tmp, pipe; - int i; - - tmp = RREG32(CG_DISPLAY_GAP_CNTL) & ~(DISP1_GAP_MASK | DISP2_GAP_MASK); - if (adev->pm.dpm.new_active_crtc_count > 0) - tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM); - else - tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE); - - if (adev->pm.dpm.new_active_crtc_count > 1) - tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM); - else - tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE); - - WREG32(CG_DISPLAY_GAP_CNTL, tmp); - - tmp = RREG32(DCCG_DISP_SLOW_SELECT_REG); - pipe = (tmp & DCCG_DISP1_SLOW_SELECT_MASK) >> DCCG_DISP1_SLOW_SELECT_SHIFT; - - if ((adev->pm.dpm.new_active_crtc_count > 0) && - (!(adev->pm.dpm.new_active_crtcs & (1 << pipe)))) { - /* find the first active crtc */ - for (i = 0; i < adev->mode_info.num_crtc; i++) { - if (adev->pm.dpm.new_active_crtcs & (1 << i)) - break; - } - if (i == adev->mode_info.num_crtc) - pipe = 0; - else - pipe = i; - - tmp &= ~DCCG_DISP1_SLOW_SELECT_MASK; - tmp |= DCCG_DISP1_SLOW_SELECT(pipe); - WREG32(DCCG_DISP_SLOW_SELECT_REG, tmp); - } - - /* Setting this to false forces the performance state to low if the crtcs are disabled. - * This can be a problem on PowerXpress systems or if you want to use the card - * for offscreen rendering or compute if there are no crtcs enabled. - */ - si_notify_smc_display_change(adev, adev->pm.dpm.new_active_crtc_count > 0); -} - -static void si_enable_spread_spectrum(struct amdgpu_device *adev, bool enable) -{ - struct rv7xx_power_info *pi = rv770_get_pi(adev); - - if (enable) { - if (pi->sclk_ss) - WREG32_P(GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, ~DYN_SPREAD_SPECTRUM_EN); - } else { - WREG32_P(CG_SPLL_SPREAD_SPECTRUM, 0, ~SSEN); - WREG32_P(GENERAL_PWRMGT, 0, ~DYN_SPREAD_SPECTRUM_EN); - } -} - -static void si_setup_bsp(struct amdgpu_device *adev) -{ - struct rv7xx_power_info *pi = rv770_get_pi(adev); - u32 xclk = amdgpu_asic_get_xclk(adev); - - r600_calculate_u_and_p(pi->asi, - xclk, - 16, - &pi->bsp, - &pi->bsu); - - r600_calculate_u_and_p(pi->pasi, - xclk, - 16, - &pi->pbsp, - &pi->pbsu); - - - pi->dsp = BSP(pi->bsp) | BSU(pi->bsu); - pi->psp = BSP(pi->pbsp) | BSU(pi->pbsu); - - WREG32(CG_BSP, pi->dsp); -} - -static void si_program_git(struct amdgpu_device *adev) -{ - WREG32_P(CG_GIT, CG_GICST(R600_GICST_DFLT), ~CG_GICST_MASK); -} - -static void si_program_tp(struct amdgpu_device *adev) -{ - int i; - enum r600_td td = R600_TD_DFLT; - - for (i = 0; i < R600_PM_NUMBER_OF_TC; i++) - WREG32(CG_FFCT_0 + i, (UTC_0(r600_utc[i]) | DTC_0(r600_dtc[i]))); - - if (td == R600_TD_AUTO) - WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL); - else - WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL); - - if (td == R600_TD_UP) - WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE); - - if (td == R600_TD_DOWN) - WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE); -} - -static void si_program_tpp(struct amdgpu_device *adev) -{ - WREG32(CG_TPC, R600_TPC_DFLT); -} - -static void si_program_sstp(struct amdgpu_device *adev) -{ - WREG32(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT))); -} - -static void si_enable_display_gap(struct amdgpu_device *adev) -{ - u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL); - - tmp &= ~(DISP1_GAP_MASK | DISP2_GAP_MASK); - tmp |= (DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE) | - DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE)); - - tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK); - tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK) | - DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE)); - WREG32(CG_DISPLAY_GAP_CNTL, tmp); -} - -static void si_program_vc(struct amdgpu_device *adev) -{ - struct rv7xx_power_info *pi = rv770_get_pi(adev); - - WREG32(CG_FTV, pi->vrc); -} - -static void si_clear_vc(struct amdgpu_device *adev) -{ - WREG32(CG_FTV, 0); -} - -static u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock) -{ - u8 mc_para_index; - - if (memory_clock < 10000) - mc_para_index = 0; - else if (memory_clock >= 80000) - mc_para_index = 0x0f; - else - mc_para_index = (u8)((memory_clock - 10000) / 5000 + 1); - return mc_para_index; -} - -static u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode) -{ - u8 mc_para_index; - - if (strobe_mode) { - if (memory_clock < 12500) - mc_para_index = 0x00; - else if (memory_clock > 47500) - mc_para_index = 0x0f; - else - mc_para_index = (u8)((memory_clock - 10000) / 2500); - } else { - if (memory_clock < 65000) - mc_para_index = 0x00; - else if (memory_clock > 135000) - mc_para_index = 0x0f; - else - mc_para_index = (u8)((memory_clock - 60000) / 5000); - } - return mc_para_index; -} - -static u8 si_get_strobe_mode_settings(struct amdgpu_device *adev, u32 mclk) -{ - struct rv7xx_power_info *pi = rv770_get_pi(adev); - bool strobe_mode = false; - u8 result = 0; - - if (mclk <= pi->mclk_strobe_mode_threshold) - strobe_mode = true; - - if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) - result = si_get_mclk_frequency_ratio(mclk, strobe_mode); - else - result = si_get_ddr3_mclk_frequency_ratio(mclk); - - if (strobe_mode) - result |= SISLANDS_SMC_STROBE_ENABLE; - - return result; -} - -static int si_upload_firmware(struct amdgpu_device *adev) -{ - struct si_power_info *si_pi = si_get_pi(adev); - - amdgpu_si_reset_smc(adev); - amdgpu_si_smc_clock(adev, false); - - return amdgpu_si_load_smc_ucode(adev, si_pi->sram_end); -} - -static bool si_validate_phase_shedding_tables(struct amdgpu_device *adev, - const struct atom_voltage_table *table, - const struct amdgpu_phase_shedding_limits_table *limits) -{ - u32 data, num_bits, num_levels; - - if ((table == NULL) || (limits == NULL)) - return false; - - data = table->mask_low; - - num_bits = hweight32(data); - - if (num_bits == 0) - return false; - - num_levels = (1 << num_bits); - - if (table->count != num_levels) - return false; - - if (limits->count != (num_levels - 1)) - return false; - - return true; -} - -static void si_trim_voltage_table_to_fit_state_table(struct amdgpu_device *adev, - u32 max_voltage_steps, - struct atom_voltage_table *voltage_table) -{ - unsigned int i, diff; - - if (voltage_table->count <= max_voltage_steps) - return; - - diff = voltage_table->count - max_voltage_steps; - - for (i= 0; i < max_voltage_steps; i++) - voltage_table->entries[i] = voltage_table->entries[i + diff]; - - voltage_table->count = max_voltage_steps; -} - -static int si_get_svi2_voltage_table(struct amdgpu_device *adev, - struct amdgpu_clock_voltage_dependency_table *voltage_dependency_table, - struct atom_voltage_table *voltage_table) -{ - u32 i; - - if (voltage_dependency_table == NULL) - return -EINVAL; - - voltage_table->mask_low = 0; - voltage_table->phase_delay = 0; - - voltage_table->count = voltage_dependency_table->count; - for (i = 0; i < voltage_table->count; i++) { - voltage_table->entries[i].value = voltage_dependency_table->entries[i].v; - voltage_table->entries[i].smio_low = 0; - } - - return 0; -} - -static int si_construct_voltage_tables(struct amdgpu_device *adev) -{ - struct rv7xx_power_info *pi = rv770_get_pi(adev); - struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); - struct si_power_info *si_pi = si_get_pi(adev); - int ret; - - if (pi->voltage_control) { - ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC, - VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddc_voltage_table); - if (ret) - return ret; - - if (eg_pi->vddc_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS) - si_trim_voltage_table_to_fit_state_table(adev, - SISLANDS_MAX_NO_VREG_STEPS, - &eg_pi->vddc_voltage_table); - } else if (si_pi->voltage_control_svi2) { - ret = si_get_svi2_voltage_table(adev, - &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk, - &eg_pi->vddc_voltage_table); - if (ret) - return ret; - } else { - return -EINVAL; - } - - if (eg_pi->vddci_control) { - ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDCI, - VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddci_voltage_table); - if (ret) - return ret; - - if (eg_pi->vddci_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS) - si_trim_voltage_table_to_fit_state_table(adev, - SISLANDS_MAX_NO_VREG_STEPS, - &eg_pi->vddci_voltage_table); - } - if (si_pi->vddci_control_svi2) { - ret = si_get_svi2_voltage_table(adev, - &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk, - &eg_pi->vddci_voltage_table); - if (ret) - return ret; - } - - if (pi->mvdd_control) { - ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_MVDDC, - VOLTAGE_OBJ_GPIO_LUT, &si_pi->mvdd_voltage_table); - - if (ret) { - pi->mvdd_control = false; - return ret; - } - - if (si_pi->mvdd_voltage_table.count == 0) { - pi->mvdd_control = false; - return -EINVAL; - } - - if (si_pi->mvdd_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS) - si_trim_voltage_table_to_fit_state_table(adev, - SISLANDS_MAX_NO_VREG_STEPS, - &si_pi->mvdd_voltage_table); - } - - if (si_pi->vddc_phase_shed_control) { - ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC, - VOLTAGE_OBJ_PHASE_LUT, &si_pi->vddc_phase_shed_table); - if (ret) - si_pi->vddc_phase_shed_control = false; - - if ((si_pi->vddc_phase_shed_table.count == 0) || - (si_pi->vddc_phase_shed_table.count > SISLANDS_MAX_NO_VREG_STEPS)) - si_pi->vddc_phase_shed_control = false; - } - - return 0; -} - -static void si_populate_smc_voltage_table(struct amdgpu_device *adev, - const struct atom_voltage_table *voltage_table, - SISLANDS_SMC_STATETABLE *table) -{ - unsigned int i; - - for (i = 0; i < voltage_table->count; i++) - table->lowSMIO[i] |= cpu_to_be32(voltage_table->entries[i].smio_low); -} - -static int si_populate_smc_voltage_tables(struct amdgpu_device *adev, - SISLANDS_SMC_STATETABLE *table) -{ - struct rv7xx_power_info *pi = rv770_get_pi(adev); - struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); - struct si_power_info *si_pi = si_get_pi(adev); - u8 i; - - if (si_pi->voltage_control_svi2) { - si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc, - si_pi->svc_gpio_id); - si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd, - si_pi->svd_gpio_id); - si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_plat_type, - 2); - } else { - if (eg_pi->vddc_voltage_table.count) { - si_populate_smc_voltage_table(adev, &eg_pi->vddc_voltage_table, table); - table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] = - cpu_to_be32(eg_pi->vddc_voltage_table.mask_low); - - for (i = 0; i < eg_pi->vddc_voltage_table.count; i++) { - if (pi->max_vddc_in_table <= eg_pi->vddc_voltage_table.entries[i].value) { - table->maxVDDCIndexInPPTable = i; - break; - } - } - } - - if (eg_pi->vddci_voltage_table.count) { - si_populate_smc_voltage_table(adev, &eg_pi->vddci_voltage_table, table); - - table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDCI] = - cpu_to_be32(eg_pi->vddci_voltage_table.mask_low); - } - - - if (si_pi->mvdd_voltage_table.count) { - si_populate_smc_voltage_table(adev, &si_pi->mvdd_voltage_table, table); - - table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_MVDD] = - cpu_to_be32(si_pi->mvdd_voltage_table.mask_low); - } - - if (si_pi->vddc_phase_shed_control) { - if (si_validate_phase_shedding_tables(adev, &si_pi->vddc_phase_shed_table, - &adev->pm.dpm.dyn_state.phase_shedding_limits_table)) { - si_populate_smc_voltage_table(adev, &si_pi->vddc_phase_shed_table, table); - - table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING] = - cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low); - - si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_phase_shedding_delay, - (u32)si_pi->vddc_phase_shed_table.phase_delay); - } else { - si_pi->vddc_phase_shed_control = false; - } - } - } - - return 0; -} - -static int si_populate_voltage_value(struct amdgpu_device *adev, - const struct atom_voltage_table *table, - u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage) -{ - unsigned int i; - - for (i = 0; i < table->count; i++) { - if (value <= table->entries[i].value) { - voltage->index = (u8)i; - voltage->value = cpu_to_be16(table->entries[i].value); - break; - } - } - - if (i >= table->count) - return -EINVAL; - - return 0; -} - -static int si_populate_mvdd_value(struct amdgpu_device *adev, u32 mclk, - SISLANDS_SMC_VOLTAGE_VALUE *voltage) -{ - struct rv7xx_power_info *pi = rv770_get_pi(adev); - struct si_power_info *si_pi = si_get_pi(adev); - - if (pi->mvdd_control) { - if (mclk <= pi->mvdd_split_frequency) - voltage->index = 0; - else - voltage->index = (u8)(si_pi->mvdd_voltage_table.count) - 1; - - voltage->value = cpu_to_be16(si_pi->mvdd_voltage_table.entries[voltage->index].value); - } - return 0; -} - -static int si_get_std_voltage_value(struct amdgpu_device *adev, - SISLANDS_SMC_VOLTAGE_VALUE *voltage, - u16 *std_voltage) -{ - u16 v_index; - bool voltage_found = false; - *std_voltage = be16_to_cpu(voltage->value); - - if (adev->pm.dpm.dyn_state.cac_leakage_table.entries) { - if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_NEW_CAC_VOLTAGE) { - if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL) - return -EINVAL; - - for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) { - if (be16_to_cpu(voltage->value) == - (u16)adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) { - voltage_found = true; - if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count) - *std_voltage = - adev->pm.dpm.dyn_state.cac_leakage_table.entries[v_index].vddc; - else - *std_voltage = - adev->pm.dpm.dyn_state.cac_leakage_table.entries[adev->pm.dpm.dyn_state.cac_leakage_table.count-1].vddc; - break; - } - } - - if (!voltage_found) { - for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) { - if (be16_to_cpu(voltage->value) <= - (u16)adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) { - voltage_found = true; - if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count) - *std_voltage = - adev->pm.dpm.dyn_state.cac_leakage_table.entries[v_index].vddc; - else - *std_voltage = - adev->pm.dpm.dyn_state.cac_leakage_table.entries[adev->pm.dpm.dyn_state.cac_leakage_table.count-1].vddc; - break; - } - } - } - } else { - if ((u32)voltage->index < adev->pm.dpm.dyn_state.cac_leakage_table.count) - *std_voltage = adev->pm.dpm.dyn_state.cac_leakage_table.entries[voltage->index].vddc; - } - } - - return 0; -} - -static int si_populate_std_voltage_value(struct amdgpu_device *adev, - u16 value, u8 index, - SISLANDS_SMC_VOLTAGE_VALUE *voltage) -{ - voltage->index = index; - voltage->value = cpu_to_be16(value); - - return 0; -} - -static int si_populate_phase_shedding_value(struct amdgpu_device *adev, - const struct amdgpu_phase_shedding_limits_table *limits, - u16 voltage, u32 sclk, u32 mclk, - SISLANDS_SMC_VOLTAGE_VALUE *smc_voltage) -{ - unsigned int i; - - for (i = 0; i < limits->count; i++) { - if ((voltage <= limits->entries[i].voltage) && - (sclk <= limits->entries[i].sclk) && - (mclk <= limits->entries[i].mclk)) - break; - } - - smc_voltage->phase_settings = (u8)i; - - return 0; -} - -static int si_init_arb_table_index(struct amdgpu_device *adev) -{ - struct si_power_info *si_pi = si_get_pi(adev); - u32 tmp; - int ret; - - ret = amdgpu_si_read_smc_sram_dword(adev, si_pi->arb_table_start, - &tmp, si_pi->sram_end); - if (ret) - return ret; - - tmp &= 0x00FFFFFF; - tmp |= MC_CG_ARB_FREQ_F1 << 24; - - return amdgpu_si_write_smc_sram_dword(adev, si_pi->arb_table_start, - tmp, si_pi->sram_end); -} - -static int si_initial_switch_from_arb_f0_to_f1(struct amdgpu_device *adev) -{ - return ni_copy_and_switch_arb_sets(adev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1); -} - -static int si_reset_to_default(struct amdgpu_device *adev) -{ - return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ? - 0 : -EINVAL; -} - -static int si_force_switch_to_arb_f0(struct amdgpu_device *adev) -{ - struct si_power_info *si_pi = si_get_pi(adev); - u32 tmp; - int ret; - - ret = amdgpu_si_read_smc_sram_dword(adev, si_pi->arb_table_start, - &tmp, si_pi->sram_end); - if (ret) - return ret; - - tmp = (tmp >> 24) & 0xff; - - if (tmp == MC_CG_ARB_FREQ_F0) - return 0; - - return ni_copy_and_switch_arb_sets(adev, tmp, MC_CG_ARB_FREQ_F0); -} - -static u32 si_calculate_memory_refresh_rate(struct amdgpu_device *adev, - u32 engine_clock) -{ - u32 dram_rows; - u32 dram_refresh_rate; - u32 mc_arb_rfsh_rate; - u32 tmp = (RREG32(MC_ARB_RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT; - - if (tmp >= 4) - dram_rows = 16384; - else - dram_rows = 1 << (tmp + 10); - - dram_refresh_rate = 1 << ((RREG32(MC_SEQ_MISC0) & 0x3) + 3); - mc_arb_rfsh_rate = ((engine_clock * 10) * dram_refresh_rate / dram_rows - 32) / 64; - - return mc_arb_rfsh_rate; -} - -static int si_populate_memory_timing_parameters(struct amdgpu_device *adev, - struct rv7xx_pl *pl, - SMC_SIslands_MCArbDramTimingRegisterSet *arb_regs) -{ - u32 dram_timing; - u32 dram_timing2; - u32 burst_time; - - arb_regs->mc_arb_rfsh_rate = - (u8)si_calculate_memory_refresh_rate(adev, pl->sclk); - - amdgpu_atombios_set_engine_dram_timings(adev, - pl->sclk, - pl->mclk); - - dram_timing = RREG32(MC_ARB_DRAM_TIMING); - dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2); - burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK; - - arb_regs->mc_arb_dram_timing = cpu_to_be32(dram_timing); - arb_regs->mc_arb_dram_timing2 = cpu_to_be32(dram_timing2); - arb_regs->mc_arb_burst_time = (u8)burst_time; - - return 0; -} - -static int si_do_program_memory_timing_parameters(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_state, - unsigned int first_arb_set) -{ - struct si_power_info *si_pi = si_get_pi(adev); - struct si_ps *state = si_get_ps(amdgpu_state); - SMC_SIslands_MCArbDramTimingRegisterSet arb_regs = { 0 }; - int i, ret = 0; - - for (i = 0; i < state->performance_level_count; i++) { - ret = si_populate_memory_timing_parameters(adev, &state->performance_levels[i], &arb_regs); - if (ret) - break; - ret = amdgpu_si_copy_bytes_to_smc(adev, - si_pi->arb_table_start + - offsetof(SMC_SIslands_MCArbDramTimingRegisters, data) + - sizeof(SMC_SIslands_MCArbDramTimingRegisterSet) * (first_arb_set + i), - (u8 *)&arb_regs, - sizeof(SMC_SIslands_MCArbDramTimingRegisterSet), - si_pi->sram_end); - if (ret) - break; - } - - return ret; -} - -static int si_program_memory_timing_parameters(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_new_state) -{ - return si_do_program_memory_timing_parameters(adev, amdgpu_new_state, - SISLANDS_DRIVER_STATE_ARB_INDEX); -} - -static int si_populate_initial_mvdd_value(struct amdgpu_device *adev, - struct SISLANDS_SMC_VOLTAGE_VALUE *voltage) -{ - struct rv7xx_power_info *pi = rv770_get_pi(adev); - struct si_power_info *si_pi = si_get_pi(adev); - - if (pi->mvdd_control) - return si_populate_voltage_value(adev, &si_pi->mvdd_voltage_table, - si_pi->mvdd_bootup_value, voltage); - - return 0; -} - -static int si_populate_smc_initial_state(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_initial_state, - SISLANDS_SMC_STATETABLE *table) -{ - struct si_ps *initial_state = si_get_ps(amdgpu_initial_state); - struct rv7xx_power_info *pi = rv770_get_pi(adev); - struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); - struct si_power_info *si_pi = si_get_pi(adev); - u32 reg; - int ret; - - table->initialState.level.mclk.vDLL_CNTL = - cpu_to_be32(si_pi->clock_registers.dll_cntl); - table->initialState.level.mclk.vMCLK_PWRMGT_CNTL = - cpu_to_be32(si_pi->clock_registers.mclk_pwrmgt_cntl); - table->initialState.level.mclk.vMPLL_AD_FUNC_CNTL = - cpu_to_be32(si_pi->clock_registers.mpll_ad_func_cntl); - table->initialState.level.mclk.vMPLL_DQ_FUNC_CNTL = - cpu_to_be32(si_pi->clock_registers.mpll_dq_func_cntl); - table->initialState.level.mclk.vMPLL_FUNC_CNTL = - cpu_to_be32(si_pi->clock_registers.mpll_func_cntl); - table->initialState.level.mclk.vMPLL_FUNC_CNTL_1 = - cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_1); - table->initialState.level.mclk.vMPLL_FUNC_CNTL_2 = - cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_2); - table->initialState.level.mclk.vMPLL_SS = - cpu_to_be32(si_pi->clock_registers.mpll_ss1); - table->initialState.level.mclk.vMPLL_SS2 = - cpu_to_be32(si_pi->clock_registers.mpll_ss2); - - table->initialState.level.mclk.mclk_value = - cpu_to_be32(initial_state->performance_levels[0].mclk); - - table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL = - cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl); - table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_2 = - cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_2); - table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_3 = - cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_3); - table->initialState.level.sclk.vCG_SPLL_FUNC_CNTL_4 = - cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_4); - table->initialState.level.sclk.vCG_SPLL_SPREAD_SPECTRUM = - cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum); - table->initialState.level.sclk.vCG_SPLL_SPREAD_SPECTRUM_2 = - cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum_2); - - table->initialState.level.sclk.sclk_value = - cpu_to_be32(initial_state->performance_levels[0].sclk); - - table->initialState.level.arbRefreshState = - SISLANDS_INITIAL_STATE_ARB_INDEX; - - table->initialState.level.ACIndex = 0; - - ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table, - initial_state->performance_levels[0].vddc, - &table->initialState.level.vddc); - - if (!ret) { - u16 std_vddc; - - ret = si_get_std_voltage_value(adev, - &table->initialState.level.vddc, - &std_vddc); - if (!ret) - si_populate_std_voltage_value(adev, std_vddc, - table->initialState.level.vddc.index, - &table->initialState.level.std_vddc); - } - - if (eg_pi->vddci_control) - si_populate_voltage_value(adev, - &eg_pi->vddci_voltage_table, - initial_state->performance_levels[0].vddci, - &table->initialState.level.vddci); - - if (si_pi->vddc_phase_shed_control) - si_populate_phase_shedding_value(adev, - &adev->pm.dpm.dyn_state.phase_shedding_limits_table, - initial_state->performance_levels[0].vddc, - initial_state->performance_levels[0].sclk, - initial_state->performance_levels[0].mclk, - &table->initialState.level.vddc); - - si_populate_initial_mvdd_value(adev, &table->initialState.level.mvdd); - - reg = CG_R(0xffff) | CG_L(0); - table->initialState.level.aT = cpu_to_be32(reg); - table->initialState.level.bSP = cpu_to_be32(pi->dsp); - table->initialState.level.gen2PCIE = (u8)si_pi->boot_pcie_gen; - - if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) { - table->initialState.level.strobeMode = - si_get_strobe_mode_settings(adev, - initial_state->performance_levels[0].mclk); - - if (initial_state->performance_levels[0].mclk > pi->mclk_edc_enable_threshold) - table->initialState.level.mcFlags = SISLANDS_SMC_MC_EDC_RD_FLAG | SISLANDS_SMC_MC_EDC_WR_FLAG; - else - table->initialState.level.mcFlags = 0; - } - - table->initialState.levelCount = 1; - - table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC; - - table->initialState.level.dpm2.MaxPS = 0; - table->initialState.level.dpm2.NearTDPDec = 0; - table->initialState.level.dpm2.AboveSafeInc = 0; - table->initialState.level.dpm2.BelowSafeInc = 0; - table->initialState.level.dpm2.PwrEfficiencyRatio = 0; - - reg = MIN_POWER_MASK | MAX_POWER_MASK; - table->initialState.level.SQPowerThrottle = cpu_to_be32(reg); - - reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK; - table->initialState.level.SQPowerThrottle_2 = cpu_to_be32(reg); - - return 0; -} - -static enum si_pcie_gen si_gen_pcie_gen_support(struct amdgpu_device *adev, - u32 sys_mask, - enum si_pcie_gen asic_gen, - enum si_pcie_gen default_gen) -{ - switch (asic_gen) { - case SI_PCIE_GEN1: - return SI_PCIE_GEN1; - case SI_PCIE_GEN2: - return SI_PCIE_GEN2; - case SI_PCIE_GEN3: - return SI_PCIE_GEN3; - default: - if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) && - (default_gen == SI_PCIE_GEN3)) - return SI_PCIE_GEN3; - else if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) && - (default_gen == SI_PCIE_GEN2)) - return SI_PCIE_GEN2; - else - return SI_PCIE_GEN1; - } - return SI_PCIE_GEN1; -} - -static int si_populate_smc_acpi_state(struct amdgpu_device *adev, - SISLANDS_SMC_STATETABLE *table) -{ - struct rv7xx_power_info *pi = rv770_get_pi(adev); - struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); - struct si_power_info *si_pi = si_get_pi(adev); - u32 spll_func_cntl = si_pi->clock_registers.cg_spll_func_cntl; - u32 spll_func_cntl_2 = si_pi->clock_registers.cg_spll_func_cntl_2; - u32 spll_func_cntl_3 = si_pi->clock_registers.cg_spll_func_cntl_3; - u32 spll_func_cntl_4 = si_pi->clock_registers.cg_spll_func_cntl_4; - u32 dll_cntl = si_pi->clock_registers.dll_cntl; - u32 mclk_pwrmgt_cntl = si_pi->clock_registers.mclk_pwrmgt_cntl; - u32 mpll_ad_func_cntl = si_pi->clock_registers.mpll_ad_func_cntl; - u32 mpll_dq_func_cntl = si_pi->clock_registers.mpll_dq_func_cntl; - u32 mpll_func_cntl = si_pi->clock_registers.mpll_func_cntl; - u32 mpll_func_cntl_1 = si_pi->clock_registers.mpll_func_cntl_1; - u32 mpll_func_cntl_2 = si_pi->clock_registers.mpll_func_cntl_2; - u32 reg; - int ret; - - table->ACPIState = table->initialState; - - table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC; - - if (pi->acpi_vddc) { - ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table, - pi->acpi_vddc, &table->ACPIState.level.vddc); - if (!ret) { - u16 std_vddc; - - ret = si_get_std_voltage_value(adev, - &table->ACPIState.level.vddc, &std_vddc); - if (!ret) - si_populate_std_voltage_value(adev, std_vddc, - table->ACPIState.level.vddc.index, - &table->ACPIState.level.std_vddc); - } - table->ACPIState.level.gen2PCIE = si_pi->acpi_pcie_gen; - - if (si_pi->vddc_phase_shed_control) { - si_populate_phase_shedding_value(adev, - &adev->pm.dpm.dyn_state.phase_shedding_limits_table, - pi->acpi_vddc, - 0, - 0, - &table->ACPIState.level.vddc); - } - } else { - ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table, - pi->min_vddc_in_table, &table->ACPIState.level.vddc); - if (!ret) { - u16 std_vddc; - - ret = si_get_std_voltage_value(adev, - &table->ACPIState.level.vddc, &std_vddc); - - if (!ret) - si_populate_std_voltage_value(adev, std_vddc, - table->ACPIState.level.vddc.index, - &table->ACPIState.level.std_vddc); - } - table->ACPIState.level.gen2PCIE = - (u8)si_gen_pcie_gen_support(adev, - si_pi->sys_pcie_mask, - si_pi->boot_pcie_gen, - SI_PCIE_GEN1); - - if (si_pi->vddc_phase_shed_control) - si_populate_phase_shedding_value(adev, - &adev->pm.dpm.dyn_state.phase_shedding_limits_table, - pi->min_vddc_in_table, - 0, - 0, - &table->ACPIState.level.vddc); - } - - if (pi->acpi_vddc) { - if (eg_pi->acpi_vddci) - si_populate_voltage_value(adev, &eg_pi->vddci_voltage_table, - eg_pi->acpi_vddci, - &table->ACPIState.level.vddci); - } - - mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET; - mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB); - - dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS); - - spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK; - spll_func_cntl_2 |= SCLK_MUX_SEL(4); - - table->ACPIState.level.mclk.vDLL_CNTL = - cpu_to_be32(dll_cntl); - table->ACPIState.level.mclk.vMCLK_PWRMGT_CNTL = - cpu_to_be32(mclk_pwrmgt_cntl); - table->ACPIState.level.mclk.vMPLL_AD_FUNC_CNTL = - cpu_to_be32(mpll_ad_func_cntl); - table->ACPIState.level.mclk.vMPLL_DQ_FUNC_CNTL = - cpu_to_be32(mpll_dq_func_cntl); - table->ACPIState.level.mclk.vMPLL_FUNC_CNTL = - cpu_to_be32(mpll_func_cntl); - table->ACPIState.level.mclk.vMPLL_FUNC_CNTL_1 = - cpu_to_be32(mpll_func_cntl_1); - table->ACPIState.level.mclk.vMPLL_FUNC_CNTL_2 = - cpu_to_be32(mpll_func_cntl_2); - table->ACPIState.level.mclk.vMPLL_SS = - cpu_to_be32(si_pi->clock_registers.mpll_ss1); - table->ACPIState.level.mclk.vMPLL_SS2 = - cpu_to_be32(si_pi->clock_registers.mpll_ss2); - - table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL = - cpu_to_be32(spll_func_cntl); - table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_2 = - cpu_to_be32(spll_func_cntl_2); - table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_3 = - cpu_to_be32(spll_func_cntl_3); - table->ACPIState.level.sclk.vCG_SPLL_FUNC_CNTL_4 = - cpu_to_be32(spll_func_cntl_4); - - table->ACPIState.level.mclk.mclk_value = 0; - table->ACPIState.level.sclk.sclk_value = 0; - - si_populate_mvdd_value(adev, 0, &table->ACPIState.level.mvdd); - - if (eg_pi->dynamic_ac_timing) - table->ACPIState.level.ACIndex = 0; - - table->ACPIState.level.dpm2.MaxPS = 0; - table->ACPIState.level.dpm2.NearTDPDec = 0; - table->ACPIState.level.dpm2.AboveSafeInc = 0; - table->ACPIState.level.dpm2.BelowSafeInc = 0; - table->ACPIState.level.dpm2.PwrEfficiencyRatio = 0; - - reg = MIN_POWER_MASK | MAX_POWER_MASK; - table->ACPIState.level.SQPowerThrottle = cpu_to_be32(reg); - - reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK; - table->ACPIState.level.SQPowerThrottle_2 = cpu_to_be32(reg); - - return 0; -} - -static int si_populate_ulv_state(struct amdgpu_device *adev, - struct SISLANDS_SMC_SWSTATE_SINGLE *state) -{ - struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); - struct si_power_info *si_pi = si_get_pi(adev); - struct si_ulv_param *ulv = &si_pi->ulv; - u32 sclk_in_sr = 1350; /* ??? */ - int ret; - - ret = si_convert_power_level_to_smc(adev, &ulv->pl, - &state->level); - if (!ret) { - if (eg_pi->sclk_deep_sleep) { - if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ) - state->level.stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS; - else - state->level.stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE; - } - if (ulv->one_pcie_lane_in_ulv) - state->flags |= PPSMC_SWSTATE_FLAG_PCIE_X1; - state->level.arbRefreshState = (u8)(SISLANDS_ULV_STATE_ARB_INDEX); - state->level.ACIndex = 1; - state->level.std_vddc = state->level.vddc; - state->levelCount = 1; - - state->flags |= PPSMC_SWSTATE_FLAG_DC; - } - - return ret; -} - -static int si_program_ulv_memory_timing_parameters(struct amdgpu_device *adev) -{ - struct si_power_info *si_pi = si_get_pi(adev); - struct si_ulv_param *ulv = &si_pi->ulv; - SMC_SIslands_MCArbDramTimingRegisterSet arb_regs = { 0 }; - int ret; - - ret = si_populate_memory_timing_parameters(adev, &ulv->pl, - &arb_regs); - if (ret) - return ret; - - si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_ulv_volt_change_delay, - ulv->volt_change_delay); - - ret = amdgpu_si_copy_bytes_to_smc(adev, - si_pi->arb_table_start + - offsetof(SMC_SIslands_MCArbDramTimingRegisters, data) + - sizeof(SMC_SIslands_MCArbDramTimingRegisterSet) * SISLANDS_ULV_STATE_ARB_INDEX, - (u8 *)&arb_regs, - sizeof(SMC_SIslands_MCArbDramTimingRegisterSet), - si_pi->sram_end); - - return ret; -} - -static void si_get_mvdd_configuration(struct amdgpu_device *adev) -{ - struct rv7xx_power_info *pi = rv770_get_pi(adev); - - pi->mvdd_split_frequency = 30000; -} - -static int si_init_smc_table(struct amdgpu_device *adev) -{ - struct si_power_info *si_pi = si_get_pi(adev); - struct amdgpu_ps *amdgpu_boot_state = adev->pm.dpm.boot_ps; - const struct si_ulv_param *ulv = &si_pi->ulv; - SISLANDS_SMC_STATETABLE *table = &si_pi->smc_statetable; - int ret; - u32 lane_width; - u32 vr_hot_gpio; - - si_populate_smc_voltage_tables(adev, table); - - switch (adev->pm.int_thermal_type) { - case THERMAL_TYPE_SI: - case THERMAL_TYPE_EMC2103_WITH_INTERNAL: - table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL; - break; - case THERMAL_TYPE_NONE: - table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE; - break; - default: - table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL; - break; - } - - if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC) - table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC; - - if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT) { - if ((adev->pdev->device != 0x6818) && (adev->pdev->device != 0x6819)) - table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT; - } - - if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC) - table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC; - - if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) - table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5; - - if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY) - table->extraFlags |= PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH; - - if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE) { - table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT_PROG_GPIO; - vr_hot_gpio = adev->pm.dpm.backbias_response_time; - si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_vr_hot_gpio, - vr_hot_gpio); - } - - ret = si_populate_smc_initial_state(adev, amdgpu_boot_state, table); - if (ret) - return ret; - - ret = si_populate_smc_acpi_state(adev, table); - if (ret) - return ret; - - table->driverState.flags = table->initialState.flags; - table->driverState.levelCount = table->initialState.levelCount; - table->driverState.levels[0] = table->initialState.level; - - ret = si_do_program_memory_timing_parameters(adev, amdgpu_boot_state, - SISLANDS_INITIAL_STATE_ARB_INDEX); - if (ret) - return ret; - - if (ulv->supported && ulv->pl.vddc) { - ret = si_populate_ulv_state(adev, &table->ULVState); - if (ret) - return ret; - - ret = si_program_ulv_memory_timing_parameters(adev); - if (ret) - return ret; - - WREG32(CG_ULV_CONTROL, ulv->cg_ulv_control); - WREG32(CG_ULV_PARAMETER, ulv->cg_ulv_parameter); - - lane_width = amdgpu_get_pcie_lanes(adev); - si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width); - } else { - table->ULVState = table->initialState; - } - - return amdgpu_si_copy_bytes_to_smc(adev, si_pi->state_table_start, - (u8 *)table, sizeof(SISLANDS_SMC_STATETABLE), - si_pi->sram_end); -} - -static int si_calculate_sclk_params(struct amdgpu_device *adev, - u32 engine_clock, - SISLANDS_SMC_SCLK_VALUE *sclk) -{ - struct rv7xx_power_info *pi = rv770_get_pi(adev); - struct si_power_info *si_pi = si_get_pi(adev); - struct atom_clock_dividers dividers; - u32 spll_func_cntl = si_pi->clock_registers.cg_spll_func_cntl; - u32 spll_func_cntl_2 = si_pi->clock_registers.cg_spll_func_cntl_2; - u32 spll_func_cntl_3 = si_pi->clock_registers.cg_spll_func_cntl_3; - u32 spll_func_cntl_4 = si_pi->clock_registers.cg_spll_func_cntl_4; - u32 cg_spll_spread_spectrum = si_pi->clock_registers.cg_spll_spread_spectrum; - u32 cg_spll_spread_spectrum_2 = si_pi->clock_registers.cg_spll_spread_spectrum_2; - u64 tmp; - u32 reference_clock = adev->clock.spll.reference_freq; - u32 reference_divider; - u32 fbdiv; - int ret; - - ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, - engine_clock, false, ÷rs); - if (ret) - return ret; - - reference_divider = 1 + dividers.ref_div; - - tmp = (u64) engine_clock * reference_divider * dividers.post_div * 16384; - do_div(tmp, reference_clock); - fbdiv = (u32) tmp; - - spll_func_cntl &= ~(SPLL_PDIV_A_MASK | SPLL_REF_DIV_MASK); - spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div); - spll_func_cntl |= SPLL_PDIV_A(dividers.post_div); - - spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK; - spll_func_cntl_2 |= SCLK_MUX_SEL(2); - - spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK; - spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv); - spll_func_cntl_3 |= SPLL_DITHEN; - - if (pi->sclk_ss) { - struct amdgpu_atom_ss ss; - u32 vco_freq = engine_clock * dividers.post_div; - - if (amdgpu_atombios_get_asic_ss_info(adev, &ss, - ASIC_INTERNAL_ENGINE_SS, vco_freq)) { - u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate); - u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000); - - cg_spll_spread_spectrum &= ~CLK_S_MASK; - cg_spll_spread_spectrum |= CLK_S(clk_s); - cg_spll_spread_spectrum |= SSEN; - - cg_spll_spread_spectrum_2 &= ~CLK_V_MASK; - cg_spll_spread_spectrum_2 |= CLK_V(clk_v); - } - } - - sclk->sclk_value = engine_clock; - sclk->vCG_SPLL_FUNC_CNTL = spll_func_cntl; - sclk->vCG_SPLL_FUNC_CNTL_2 = spll_func_cntl_2; - sclk->vCG_SPLL_FUNC_CNTL_3 = spll_func_cntl_3; - sclk->vCG_SPLL_FUNC_CNTL_4 = spll_func_cntl_4; - sclk->vCG_SPLL_SPREAD_SPECTRUM = cg_spll_spread_spectrum; - sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cg_spll_spread_spectrum_2; - - return 0; -} - -static int si_populate_sclk_value(struct amdgpu_device *adev, - u32 engine_clock, - SISLANDS_SMC_SCLK_VALUE *sclk) -{ - SISLANDS_SMC_SCLK_VALUE sclk_tmp; - int ret; - - ret = si_calculate_sclk_params(adev, engine_clock, &sclk_tmp); - if (!ret) { - sclk->sclk_value = cpu_to_be32(sclk_tmp.sclk_value); - sclk->vCG_SPLL_FUNC_CNTL = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL); - sclk->vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_2); - sclk->vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_3); - sclk->vCG_SPLL_FUNC_CNTL_4 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_4); - sclk->vCG_SPLL_SPREAD_SPECTRUM = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM); - sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM_2); - } - - return ret; -} - -static int si_populate_mclk_value(struct amdgpu_device *adev, - u32 engine_clock, - u32 memory_clock, - SISLANDS_SMC_MCLK_VALUE *mclk, - bool strobe_mode, - bool dll_state_on) -{ - struct rv7xx_power_info *pi = rv770_get_pi(adev); - struct si_power_info *si_pi = si_get_pi(adev); - u32 dll_cntl = si_pi->clock_registers.dll_cntl; - u32 mclk_pwrmgt_cntl = si_pi->clock_registers.mclk_pwrmgt_cntl; - u32 mpll_ad_func_cntl = si_pi->clock_registers.mpll_ad_func_cntl; - u32 mpll_dq_func_cntl = si_pi->clock_registers.mpll_dq_func_cntl; - u32 mpll_func_cntl = si_pi->clock_registers.mpll_func_cntl; - u32 mpll_func_cntl_1 = si_pi->clock_registers.mpll_func_cntl_1; - u32 mpll_func_cntl_2 = si_pi->clock_registers.mpll_func_cntl_2; - u32 mpll_ss1 = si_pi->clock_registers.mpll_ss1; - u32 mpll_ss2 = si_pi->clock_registers.mpll_ss2; - struct atom_mpll_param mpll_param; - int ret; - - ret = amdgpu_atombios_get_memory_pll_dividers(adev, memory_clock, strobe_mode, &mpll_param); - if (ret) - return ret; - - mpll_func_cntl &= ~BWCTRL_MASK; - mpll_func_cntl |= BWCTRL(mpll_param.bwcntl); - - mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK); - mpll_func_cntl_1 |= CLKF(mpll_param.clkf) | - CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode); - - mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK; - mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div); - - if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) { - mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK); - mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) | - YCLK_POST_DIV(mpll_param.post_div); - } - - if (pi->mclk_ss) { - struct amdgpu_atom_ss ss; - u32 freq_nom; - u32 tmp; - u32 reference_clock = adev->clock.mpll.reference_freq; - - if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) - freq_nom = memory_clock * 4; - else - freq_nom = memory_clock * 2; - - tmp = freq_nom / reference_clock; - tmp = tmp * tmp; - if (amdgpu_atombios_get_asic_ss_info(adev, &ss, - ASIC_INTERNAL_MEMORY_SS, freq_nom)) { - u32 clks = reference_clock * 5 / ss.rate; - u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom); - - mpll_ss1 &= ~CLKV_MASK; - mpll_ss1 |= CLKV(clkv); - - mpll_ss2 &= ~CLKS_MASK; - mpll_ss2 |= CLKS(clks); - } - } - - mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK; - mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed); - - if (dll_state_on) - mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB; - else - mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB); - - mclk->mclk_value = cpu_to_be32(memory_clock); - mclk->vMPLL_FUNC_CNTL = cpu_to_be32(mpll_func_cntl); - mclk->vMPLL_FUNC_CNTL_1 = cpu_to_be32(mpll_func_cntl_1); - mclk->vMPLL_FUNC_CNTL_2 = cpu_to_be32(mpll_func_cntl_2); - mclk->vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl); - mclk->vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl); - mclk->vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl); - mclk->vDLL_CNTL = cpu_to_be32(dll_cntl); - mclk->vMPLL_SS = cpu_to_be32(mpll_ss1); - mclk->vMPLL_SS2 = cpu_to_be32(mpll_ss2); - - return 0; -} - -static void si_populate_smc_sp(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_state, - SISLANDS_SMC_SWSTATE *smc_state) -{ - struct si_ps *ps = si_get_ps(amdgpu_state); - struct rv7xx_power_info *pi = rv770_get_pi(adev); - int i; - - for (i = 0; i < ps->performance_level_count - 1; i++) - smc_state->levels[i].bSP = cpu_to_be32(pi->dsp); - - smc_state->levels[ps->performance_level_count - 1].bSP = - cpu_to_be32(pi->psp); -} - -static int si_convert_power_level_to_smc(struct amdgpu_device *adev, - struct rv7xx_pl *pl, - SISLANDS_SMC_HW_PERFORMANCE_LEVEL *level) -{ - struct rv7xx_power_info *pi = rv770_get_pi(adev); - struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); - struct si_power_info *si_pi = si_get_pi(adev); - int ret; - bool dll_state_on; - u16 std_vddc; - bool gmc_pg = false; - - if (eg_pi->pcie_performance_request && - (si_pi->force_pcie_gen != SI_PCIE_GEN_INVALID)) - level->gen2PCIE = (u8)si_pi->force_pcie_gen; - else - level->gen2PCIE = (u8)pl->pcie_gen; - - ret = si_populate_sclk_value(adev, pl->sclk, &level->sclk); - if (ret) - return ret; - - level->mcFlags = 0; - - if (pi->mclk_stutter_mode_threshold && - (pl->mclk <= pi->mclk_stutter_mode_threshold) && - !eg_pi->uvd_enabled && - (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) && - (adev->pm.dpm.new_active_crtc_count <= 2)) { - level->mcFlags |= SISLANDS_SMC_MC_STUTTER_EN; - - if (gmc_pg) - level->mcFlags |= SISLANDS_SMC_MC_PG_EN; - } - - if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) { - if (pl->mclk > pi->mclk_edc_enable_threshold) - level->mcFlags |= SISLANDS_SMC_MC_EDC_RD_FLAG; - - if (pl->mclk > eg_pi->mclk_edc_wr_enable_threshold) - level->mcFlags |= SISLANDS_SMC_MC_EDC_WR_FLAG; - - level->strobeMode = si_get_strobe_mode_settings(adev, pl->mclk); - - if (level->strobeMode & SISLANDS_SMC_STROBE_ENABLE) { - if (si_get_mclk_frequency_ratio(pl->mclk, true) >= - ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf)) - dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false; - else - dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false; - } else { - dll_state_on = false; - } - } else { - level->strobeMode = si_get_strobe_mode_settings(adev, - pl->mclk); - - dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false; - } - - ret = si_populate_mclk_value(adev, - pl->sclk, - pl->mclk, - &level->mclk, - (level->strobeMode & SISLANDS_SMC_STROBE_ENABLE) != 0, dll_state_on); - if (ret) - return ret; - - ret = si_populate_voltage_value(adev, - &eg_pi->vddc_voltage_table, - pl->vddc, &level->vddc); - if (ret) - return ret; - - - ret = si_get_std_voltage_value(adev, &level->vddc, &std_vddc); - if (ret) - return ret; - - ret = si_populate_std_voltage_value(adev, std_vddc, - level->vddc.index, &level->std_vddc); - if (ret) - return ret; - - if (eg_pi->vddci_control) { - ret = si_populate_voltage_value(adev, &eg_pi->vddci_voltage_table, - pl->vddci, &level->vddci); - if (ret) - return ret; - } - - if (si_pi->vddc_phase_shed_control) { - ret = si_populate_phase_shedding_value(adev, - &adev->pm.dpm.dyn_state.phase_shedding_limits_table, - pl->vddc, - pl->sclk, - pl->mclk, - &level->vddc); - if (ret) - return ret; - } - - level->MaxPoweredUpCU = si_pi->max_cu; - - ret = si_populate_mvdd_value(adev, pl->mclk, &level->mvdd); - - return ret; -} - -static int si_populate_smc_t(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_state, - SISLANDS_SMC_SWSTATE *smc_state) -{ - struct rv7xx_power_info *pi = rv770_get_pi(adev); - struct si_ps *state = si_get_ps(amdgpu_state); - u32 a_t; - u32 t_l, t_h; - u32 high_bsp; - int i, ret; - - if (state->performance_level_count >= 9) - return -EINVAL; - - if (state->performance_level_count < 2) { - a_t = CG_R(0xffff) | CG_L(0); - smc_state->levels[0].aT = cpu_to_be32(a_t); - return 0; - } - - smc_state->levels[0].aT = cpu_to_be32(0); - - for (i = 0; i <= state->performance_level_count - 2; i++) { - ret = r600_calculate_at( - (50 / SISLANDS_MAX_HARDWARE_POWERLEVELS) * 100 * (i + 1), - 100 * R600_AH_DFLT, - state->performance_levels[i + 1].sclk, - state->performance_levels[i].sclk, - &t_l, - &t_h); - - if (ret) { - t_h = (i + 1) * 1000 - 50 * R600_AH_DFLT; - t_l = (i + 1) * 1000 + 50 * R600_AH_DFLT; - } - - a_t = be32_to_cpu(smc_state->levels[i].aT) & ~CG_R_MASK; - a_t |= CG_R(t_l * pi->bsp / 20000); - smc_state->levels[i].aT = cpu_to_be32(a_t); - - high_bsp = (i == state->performance_level_count - 2) ? - pi->pbsp : pi->bsp; - a_t = CG_R(0xffff) | CG_L(t_h * high_bsp / 20000); - smc_state->levels[i + 1].aT = cpu_to_be32(a_t); - } - - return 0; -} - -static int si_disable_ulv(struct amdgpu_device *adev) -{ - struct si_power_info *si_pi = si_get_pi(adev); - struct si_ulv_param *ulv = &si_pi->ulv; - - if (ulv->supported) - return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ? - 0 : -EINVAL; - - return 0; -} - -static bool si_is_state_ulv_compatible(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_state) -{ - const struct si_power_info *si_pi = si_get_pi(adev); - const struct si_ulv_param *ulv = &si_pi->ulv; - const struct si_ps *state = si_get_ps(amdgpu_state); - int i; - - if (state->performance_levels[0].mclk != ulv->pl.mclk) - return false; - - /* XXX validate against display requirements! */ - - for (i = 0; i < adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count; i++) { - if (adev->clock.current_dispclk <= - adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].clk) { - if (ulv->pl.vddc < - adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].v) - return false; - } - } - - if ((amdgpu_state->vclk != 0) || (amdgpu_state->dclk != 0)) - return false; - - return true; -} - -static int si_set_power_state_conditionally_enable_ulv(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_new_state) -{ - const struct si_power_info *si_pi = si_get_pi(adev); - const struct si_ulv_param *ulv = &si_pi->ulv; - - if (ulv->supported) { - if (si_is_state_ulv_compatible(adev, amdgpu_new_state)) - return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ? - 0 : -EINVAL; - } - return 0; -} - -static int si_convert_power_state_to_smc(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_state, - SISLANDS_SMC_SWSTATE *smc_state) -{ - struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); - struct ni_power_info *ni_pi = ni_get_pi(adev); - struct si_power_info *si_pi = si_get_pi(adev); - struct si_ps *state = si_get_ps(amdgpu_state); - int i, ret; - u32 threshold; - u32 sclk_in_sr = 1350; /* ??? */ - - if (state->performance_level_count > SISLANDS_MAX_HARDWARE_POWERLEVELS) - return -EINVAL; - - threshold = state->performance_levels[state->performance_level_count-1].sclk * 100 / 100; - - if (amdgpu_state->vclk && amdgpu_state->dclk) { - eg_pi->uvd_enabled = true; - if (eg_pi->smu_uvd_hs) - smc_state->flags |= PPSMC_SWSTATE_FLAG_UVD; - } else { - eg_pi->uvd_enabled = false; - } - - if (state->dc_compatible) - smc_state->flags |= PPSMC_SWSTATE_FLAG_DC; - - smc_state->levelCount = 0; - for (i = 0; i < state->performance_level_count; i++) { - if (eg_pi->sclk_deep_sleep) { - if ((i == 0) || si_pi->sclk_deep_sleep_above_low) { - if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ) - smc_state->levels[i].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS; - else - smc_state->levels[i].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE; - } - } - - ret = si_convert_power_level_to_smc(adev, &state->performance_levels[i], - &smc_state->levels[i]); - smc_state->levels[i].arbRefreshState = - (u8)(SISLANDS_DRIVER_STATE_ARB_INDEX + i); - - if (ret) - return ret; - - if (ni_pi->enable_power_containment) - smc_state->levels[i].displayWatermark = - (state->performance_levels[i].sclk < threshold) ? - PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH; - else - smc_state->levels[i].displayWatermark = (i < 2) ? - PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH; - - if (eg_pi->dynamic_ac_timing) - smc_state->levels[i].ACIndex = SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i; - else - smc_state->levels[i].ACIndex = 0; - - smc_state->levelCount++; - } - - si_write_smc_soft_register(adev, - SI_SMC_SOFT_REGISTER_watermark_threshold, - threshold / 512); - - si_populate_smc_sp(adev, amdgpu_state, smc_state); - - ret = si_populate_power_containment_values(adev, amdgpu_state, smc_state); - if (ret) - ni_pi->enable_power_containment = false; - - ret = si_populate_sq_ramping_values(adev, amdgpu_state, smc_state); - if (ret) - ni_pi->enable_sq_ramping = false; - - return si_populate_smc_t(adev, amdgpu_state, smc_state); -} - -static int si_upload_sw_state(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_new_state) -{ - struct si_power_info *si_pi = si_get_pi(adev); - struct si_ps *new_state = si_get_ps(amdgpu_new_state); - int ret; - u32 address = si_pi->state_table_start + - offsetof(SISLANDS_SMC_STATETABLE, driverState); - SISLANDS_SMC_SWSTATE *smc_state = &si_pi->smc_statetable.driverState; - size_t state_size = struct_size(smc_state, levels, - new_state->performance_level_count); - memset(smc_state, 0, state_size); - - ret = si_convert_power_state_to_smc(adev, amdgpu_new_state, smc_state); - if (ret) - return ret; - - return amdgpu_si_copy_bytes_to_smc(adev, address, (u8 *)smc_state, - state_size, si_pi->sram_end); -} - -static int si_upload_ulv_state(struct amdgpu_device *adev) -{ - struct si_power_info *si_pi = si_get_pi(adev); - struct si_ulv_param *ulv = &si_pi->ulv; - int ret = 0; - - if (ulv->supported && ulv->pl.vddc) { - u32 address = si_pi->state_table_start + - offsetof(SISLANDS_SMC_STATETABLE, ULVState); - struct SISLANDS_SMC_SWSTATE_SINGLE *smc_state = &si_pi->smc_statetable.ULVState; - u32 state_size = sizeof(struct SISLANDS_SMC_SWSTATE_SINGLE); - - memset(smc_state, 0, state_size); - - ret = si_populate_ulv_state(adev, smc_state); - if (!ret) - ret = amdgpu_si_copy_bytes_to_smc(adev, address, (u8 *)smc_state, - state_size, si_pi->sram_end); - } - - return ret; -} - -static int si_upload_smc_data(struct amdgpu_device *adev) -{ - struct amdgpu_crtc *amdgpu_crtc = NULL; - int i; - - if (adev->pm.dpm.new_active_crtc_count == 0) - return 0; - - for (i = 0; i < adev->mode_info.num_crtc; i++) { - if (adev->pm.dpm.new_active_crtcs & (1 << i)) { - amdgpu_crtc = adev->mode_info.crtcs[i]; - break; - } - } - - if (amdgpu_crtc == NULL) - return 0; - - if (amdgpu_crtc->line_time <= 0) - return 0; - - if (si_write_smc_soft_register(adev, - SI_SMC_SOFT_REGISTER_crtc_index, - amdgpu_crtc->crtc_id) != PPSMC_Result_OK) - return 0; - - if (si_write_smc_soft_register(adev, - SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min, - amdgpu_crtc->wm_high / amdgpu_crtc->line_time) != PPSMC_Result_OK) - return 0; - - if (si_write_smc_soft_register(adev, - SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max, - amdgpu_crtc->wm_low / amdgpu_crtc->line_time) != PPSMC_Result_OK) - return 0; - - return 0; -} - -static int si_set_mc_special_registers(struct amdgpu_device *adev, - struct si_mc_reg_table *table) -{ - u8 i, j, k; - u32 temp_reg; - - for (i = 0, j = table->last; i < table->last; i++) { - if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) - return -EINVAL; - switch (table->mc_reg_address[i].s1) { - case MC_SEQ_MISC1: - temp_reg = RREG32(MC_PMG_CMD_EMRS); - table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS; - table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP; - for (k = 0; k < table->num_entries; k++) - table->mc_reg_table_entry[k].mc_data[j] = - ((temp_reg & 0xffff0000)) | - ((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16); - j++; - - if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) - return -EINVAL; - temp_reg = RREG32(MC_PMG_CMD_MRS); - table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS; - table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP; - for (k = 0; k < table->num_entries; k++) { - table->mc_reg_table_entry[k].mc_data[j] = - (temp_reg & 0xffff0000) | - (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); - if (adev->gmc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) - table->mc_reg_table_entry[k].mc_data[j] |= 0x100; - } - j++; - - if (adev->gmc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) { - if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) - return -EINVAL; - table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD; - table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD; - for (k = 0; k < table->num_entries; k++) - table->mc_reg_table_entry[k].mc_data[j] = - (table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16; - j++; - } - break; - case MC_SEQ_RESERVE_M: - temp_reg = RREG32(MC_PMG_CMD_MRS1); - table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1; - table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP; - for(k = 0; k < table->num_entries; k++) - table->mc_reg_table_entry[k].mc_data[j] = - (temp_reg & 0xffff0000) | - (table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff); - j++; - break; - default: - break; - } - } - - table->last = j; - - return 0; -} - -static bool si_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg) -{ - bool result = true; - switch (in_reg) { - case MC_SEQ_RAS_TIMING: - *out_reg = MC_SEQ_RAS_TIMING_LP; - break; - case MC_SEQ_CAS_TIMING: - *out_reg = MC_SEQ_CAS_TIMING_LP; - break; - case MC_SEQ_MISC_TIMING: - *out_reg = MC_SEQ_MISC_TIMING_LP; - break; - case MC_SEQ_MISC_TIMING2: - *out_reg = MC_SEQ_MISC_TIMING2_LP; - break; - case MC_SEQ_RD_CTL_D0: - *out_reg = MC_SEQ_RD_CTL_D0_LP; - break; - case MC_SEQ_RD_CTL_D1: - *out_reg = MC_SEQ_RD_CTL_D1_LP; - break; - case MC_SEQ_WR_CTL_D0: - *out_reg = MC_SEQ_WR_CTL_D0_LP; - break; - case MC_SEQ_WR_CTL_D1: - *out_reg = MC_SEQ_WR_CTL_D1_LP; - break; - case MC_PMG_CMD_EMRS: - *out_reg = MC_SEQ_PMG_CMD_EMRS_LP; - break; - case MC_PMG_CMD_MRS: - *out_reg = MC_SEQ_PMG_CMD_MRS_LP; - break; - case MC_PMG_CMD_MRS1: - *out_reg = MC_SEQ_PMG_CMD_MRS1_LP; - break; - case MC_SEQ_PMG_TIMING: - *out_reg = MC_SEQ_PMG_TIMING_LP; - break; - case MC_PMG_CMD_MRS2: - *out_reg = MC_SEQ_PMG_CMD_MRS2_LP; - break; - case MC_SEQ_WR_CTL_2: - *out_reg = MC_SEQ_WR_CTL_2_LP; - break; - default: - result = false; - break; - } - - return result; -} - -static void si_set_valid_flag(struct si_mc_reg_table *table) -{ - u8 i, j; - - for (i = 0; i < table->last; i++) { - for (j = 1; j < table->num_entries; j++) { - if (table->mc_reg_table_entry[j-1].mc_data[i] != table->mc_reg_table_entry[j].mc_data[i]) { - table->valid_flag |= 1 << i; - break; - } - } - } -} - -static void si_set_s0_mc_reg_index(struct si_mc_reg_table *table) -{ - u32 i; - u16 address; - - for (i = 0; i < table->last; i++) - table->mc_reg_address[i].s0 = si_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ? - address : table->mc_reg_address[i].s1; - -} - -static int si_copy_vbios_mc_reg_table(struct atom_mc_reg_table *table, - struct si_mc_reg_table *si_table) -{ - u8 i, j; - - if (table->last > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) - return -EINVAL; - if (table->num_entries > MAX_AC_TIMING_ENTRIES) - return -EINVAL; - - for (i = 0; i < table->last; i++) - si_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1; - si_table->last = table->last; - - for (i = 0; i < table->num_entries; i++) { - si_table->mc_reg_table_entry[i].mclk_max = - table->mc_reg_table_entry[i].mclk_max; - for (j = 0; j < table->last; j++) { - si_table->mc_reg_table_entry[i].mc_data[j] = - table->mc_reg_table_entry[i].mc_data[j]; - } - } - si_table->num_entries = table->num_entries; - - return 0; -} - -static int si_initialize_mc_reg_table(struct amdgpu_device *adev) -{ - struct si_power_info *si_pi = si_get_pi(adev); - struct atom_mc_reg_table *table; - struct si_mc_reg_table *si_table = &si_pi->mc_reg_table; - u8 module_index = rv770_get_memory_module_index(adev); - int ret; - - table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL); - if (!table) - return -ENOMEM; - - WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING)); - WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING)); - WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING)); - WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2)); - WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS)); - WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS)); - WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1)); - WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0)); - WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1)); - WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0)); - WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1)); - WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING)); - WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2)); - WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2)); - - ret = amdgpu_atombios_init_mc_reg_table(adev, module_index, table); - if (ret) - goto init_mc_done; - - ret = si_copy_vbios_mc_reg_table(table, si_table); - if (ret) - goto init_mc_done; - - si_set_s0_mc_reg_index(si_table); - - ret = si_set_mc_special_registers(adev, si_table); - if (ret) - goto init_mc_done; - - si_set_valid_flag(si_table); - -init_mc_done: - kfree(table); - - return ret; - -} - -static void si_populate_mc_reg_addresses(struct amdgpu_device *adev, - SMC_SIslands_MCRegisters *mc_reg_table) -{ - struct si_power_info *si_pi = si_get_pi(adev); - u32 i, j; - - for (i = 0, j = 0; j < si_pi->mc_reg_table.last; j++) { - if (si_pi->mc_reg_table.valid_flag & (1 << j)) { - if (i >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE) - break; - mc_reg_table->address[i].s0 = - cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s0); - mc_reg_table->address[i].s1 = - cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s1); - i++; - } - } - mc_reg_table->last = (u8)i; -} - -static void si_convert_mc_registers(const struct si_mc_reg_entry *entry, - SMC_SIslands_MCRegisterSet *data, - u32 num_entries, u32 valid_flag) -{ - u32 i, j; - - for(i = 0, j = 0; j < num_entries; j++) { - if (valid_flag & (1 << j)) { - data->value[i] = cpu_to_be32(entry->mc_data[j]); - i++; - } - } -} - -static void si_convert_mc_reg_table_entry_to_smc(struct amdgpu_device *adev, - struct rv7xx_pl *pl, - SMC_SIslands_MCRegisterSet *mc_reg_table_data) -{ - struct si_power_info *si_pi = si_get_pi(adev); - u32 i = 0; - - for (i = 0; i < si_pi->mc_reg_table.num_entries; i++) { - if (pl->mclk <= si_pi->mc_reg_table.mc_reg_table_entry[i].mclk_max) - break; - } - - if ((i == si_pi->mc_reg_table.num_entries) && (i > 0)) - --i; - - si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[i], - mc_reg_table_data, si_pi->mc_reg_table.last, - si_pi->mc_reg_table.valid_flag); -} - -static void si_convert_mc_reg_table_to_smc(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_state, - SMC_SIslands_MCRegisters *mc_reg_table) -{ - struct si_ps *state = si_get_ps(amdgpu_state); - int i; - - for (i = 0; i < state->performance_level_count; i++) { - si_convert_mc_reg_table_entry_to_smc(adev, - &state->performance_levels[i], - &mc_reg_table->data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i]); - } -} - -static int si_populate_mc_reg_table(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_boot_state) -{ - struct si_ps *boot_state = si_get_ps(amdgpu_boot_state); - struct si_power_info *si_pi = si_get_pi(adev); - struct si_ulv_param *ulv = &si_pi->ulv; - SMC_SIslands_MCRegisters *smc_mc_reg_table = &si_pi->smc_mc_reg_table; - - memset(smc_mc_reg_table, 0, sizeof(SMC_SIslands_MCRegisters)); - - si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_seq_index, 1); - - si_populate_mc_reg_addresses(adev, smc_mc_reg_table); - - si_convert_mc_reg_table_entry_to_smc(adev, &boot_state->performance_levels[0], - &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_INITIAL_SLOT]); - - si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[0], - &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ACPI_SLOT], - si_pi->mc_reg_table.last, - si_pi->mc_reg_table.valid_flag); - - if (ulv->supported && ulv->pl.vddc != 0) - si_convert_mc_reg_table_entry_to_smc(adev, &ulv->pl, - &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ULV_SLOT]); - else - si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[0], - &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ULV_SLOT], - si_pi->mc_reg_table.last, - si_pi->mc_reg_table.valid_flag); - - si_convert_mc_reg_table_to_smc(adev, amdgpu_boot_state, smc_mc_reg_table); - - return amdgpu_si_copy_bytes_to_smc(adev, si_pi->mc_reg_table_start, - (u8 *)smc_mc_reg_table, - sizeof(SMC_SIslands_MCRegisters), si_pi->sram_end); -} - -static int si_upload_mc_reg_table(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_new_state) -{ - struct si_ps *new_state = si_get_ps(amdgpu_new_state); - struct si_power_info *si_pi = si_get_pi(adev); - u32 address = si_pi->mc_reg_table_start + - offsetof(SMC_SIslands_MCRegisters, - data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT]); - SMC_SIslands_MCRegisters *smc_mc_reg_table = &si_pi->smc_mc_reg_table; - - memset(smc_mc_reg_table, 0, sizeof(SMC_SIslands_MCRegisters)); - - si_convert_mc_reg_table_to_smc(adev, amdgpu_new_state, smc_mc_reg_table); - - return amdgpu_si_copy_bytes_to_smc(adev, address, - (u8 *)&smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT], - sizeof(SMC_SIslands_MCRegisterSet) * new_state->performance_level_count, - si_pi->sram_end); -} - -static void si_enable_voltage_control(struct amdgpu_device *adev, bool enable) -{ - if (enable) - WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN); - else - WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN); -} - -static enum si_pcie_gen si_get_maximum_link_speed(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_state) -{ - struct si_ps *state = si_get_ps(amdgpu_state); - int i; - u16 pcie_speed, max_speed = 0; - - for (i = 0; i < state->performance_level_count; i++) { - pcie_speed = state->performance_levels[i].pcie_gen; - if (max_speed < pcie_speed) - max_speed = pcie_speed; - } - return max_speed; -} - -static u16 si_get_current_pcie_speed(struct amdgpu_device *adev) -{ - u32 speed_cntl; - - speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK; - speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT; - - return (u16)speed_cntl; -} - -static void si_request_link_speed_change_before_state_change(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_new_state, - struct amdgpu_ps *amdgpu_current_state) -{ - struct si_power_info *si_pi = si_get_pi(adev); - enum si_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state); - enum si_pcie_gen current_link_speed; - - if (si_pi->force_pcie_gen == SI_PCIE_GEN_INVALID) - current_link_speed = si_get_maximum_link_speed(adev, amdgpu_current_state); - else - current_link_speed = si_pi->force_pcie_gen; - - si_pi->force_pcie_gen = SI_PCIE_GEN_INVALID; - si_pi->pspp_notify_required = false; - if (target_link_speed > current_link_speed) { - switch (target_link_speed) { -#if defined(CONFIG_ACPI) - case SI_PCIE_GEN3: - if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN3, false) == 0) - break; - si_pi->force_pcie_gen = SI_PCIE_GEN2; - if (current_link_speed == SI_PCIE_GEN2) - break; - fallthrough; - case SI_PCIE_GEN2: - if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0) - break; - fallthrough; -#endif - default: - si_pi->force_pcie_gen = si_get_current_pcie_speed(adev); - break; - } - } else { - if (target_link_speed < current_link_speed) - si_pi->pspp_notify_required = true; - } -} - -static void si_notify_link_speed_change_after_state_change(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_new_state, - struct amdgpu_ps *amdgpu_current_state) -{ - struct si_power_info *si_pi = si_get_pi(adev); - enum si_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state); - u8 request; - - if (si_pi->pspp_notify_required) { - if (target_link_speed == SI_PCIE_GEN3) - request = PCIE_PERF_REQ_PECI_GEN3; - else if (target_link_speed == SI_PCIE_GEN2) - request = PCIE_PERF_REQ_PECI_GEN2; - else - request = PCIE_PERF_REQ_PECI_GEN1; - - if ((request == PCIE_PERF_REQ_PECI_GEN1) && - (si_get_current_pcie_speed(adev) > 0)) - return; - -#if defined(CONFIG_ACPI) - amdgpu_acpi_pcie_performance_request(adev, request, false); -#endif - } -} - -#if 0 -static int si_ds_request(struct amdgpu_device *adev, - bool ds_status_on, u32 count_write) -{ - struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); - - if (eg_pi->sclk_deep_sleep) { - if (ds_status_on) - return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_CancelThrottleOVRDSCLKDS) == - PPSMC_Result_OK) ? - 0 : -EINVAL; - else - return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_ThrottleOVRDSCLKDS) == - PPSMC_Result_OK) ? 0 : -EINVAL; - } - return 0; -} -#endif - -static void si_set_max_cu_value(struct amdgpu_device *adev) -{ - struct si_power_info *si_pi = si_get_pi(adev); - - if (adev->asic_type == CHIP_VERDE) { - switch (adev->pdev->device) { - case 0x6820: - case 0x6825: - case 0x6821: - case 0x6823: - case 0x6827: - si_pi->max_cu = 10; - break; - case 0x682D: - case 0x6824: - case 0x682F: - case 0x6826: - si_pi->max_cu = 8; - break; - case 0x6828: - case 0x6830: - case 0x6831: - case 0x6838: - case 0x6839: - case 0x683D: - si_pi->max_cu = 10; - break; - case 0x683B: - case 0x683F: - case 0x6829: - si_pi->max_cu = 8; - break; - default: - si_pi->max_cu = 0; - break; - } - } else { - si_pi->max_cu = 0; - } -} - -static int si_patch_single_dependency_table_based_on_leakage(struct amdgpu_device *adev, - struct amdgpu_clock_voltage_dependency_table *table) -{ - u32 i; - int j; - u16 leakage_voltage; - - if (table) { - for (i = 0; i < table->count; i++) { - switch (si_get_leakage_voltage_from_leakage_index(adev, - table->entries[i].v, - &leakage_voltage)) { - case 0: - table->entries[i].v = leakage_voltage; - break; - case -EAGAIN: - return -EINVAL; - case -EINVAL: - default: - break; - } - } - - for (j = (table->count - 2); j >= 0; j--) { - table->entries[j].v = (table->entries[j].v <= table->entries[j + 1].v) ? - table->entries[j].v : table->entries[j + 1].v; - } - } - return 0; -} - -static int si_patch_dependency_tables_based_on_leakage(struct amdgpu_device *adev) -{ - int ret = 0; - - ret = si_patch_single_dependency_table_based_on_leakage(adev, - &adev->pm.dpm.dyn_state.vddc_dependency_on_sclk); - if (ret) - DRM_ERROR("Could not patch vddc_on_sclk leakage table\n"); - ret = si_patch_single_dependency_table_based_on_leakage(adev, - &adev->pm.dpm.dyn_state.vddc_dependency_on_mclk); - if (ret) - DRM_ERROR("Could not patch vddc_on_mclk leakage table\n"); - ret = si_patch_single_dependency_table_based_on_leakage(adev, - &adev->pm.dpm.dyn_state.vddci_dependency_on_mclk); - if (ret) - DRM_ERROR("Could not patch vddci_on_mclk leakage table\n"); - return ret; -} - -static void si_set_pcie_lane_width_in_smc(struct amdgpu_device *adev, - struct amdgpu_ps *amdgpu_new_state, - struct amdgpu_ps *amdgpu_current_state) -{ - u32 lane_width; - u32 new_lane_width = - ((amdgpu_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1; - u32 current_lane_width = - ((amdgpu_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1; - - if (new_lane_width != current_lane_width) { - amdgpu_set_pcie_lanes(adev, new_lane_width); - lane_width = amdgpu_get_pcie_lanes(adev); - si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width); - } -} - -static void si_dpm_setup_asic(struct amdgpu_device *adev) -{ - si_read_clock_registers(adev); - si_enable_acpi_power_management(adev); -} - -static int si_thermal_enable_alert(struct amdgpu_device *adev, - bool enable) -{ - u32 thermal_int = RREG32(CG_THERMAL_INT); - - if (enable) { - PPSMC_Result result; - - thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW); - WREG32(CG_THERMAL_INT, thermal_int); - result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt); - if (result != PPSMC_Result_OK) { - DRM_DEBUG_KMS("Could not enable thermal interrupts.\n"); - return -EINVAL; - } - } else { - thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW; - WREG32(CG_THERMAL_INT, thermal_int); - } - - return 0; -} - -static int si_thermal_set_temperature_range(struct amdgpu_device *adev, - int min_temp, int max_temp) -{ - int low_temp = 0 * 1000; - int high_temp = 255 * 1000; - - if (low_temp < min_temp) - low_temp = min_temp; - if (high_temp > max_temp) - high_temp = max_temp; - if (high_temp < low_temp) { - DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp); - return -EINVAL; - } - - WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK); - WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK); - WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK); - - adev->pm.dpm.thermal.min_temp = low_temp; - adev->pm.dpm.thermal.max_temp = high_temp; - - return 0; -} - -static void si_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode) -{ - struct si_power_info *si_pi = si_get_pi(adev); - u32 tmp; - - if (si_pi->fan_ctrl_is_in_default_mode) { - tmp = (RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT; - si_pi->fan_ctrl_default_mode = tmp; - tmp = (RREG32(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT; - si_pi->t_min = tmp; - si_pi->fan_ctrl_is_in_default_mode = false; - } - - tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK; - tmp |= TMIN(0); - WREG32(CG_FDO_CTRL2, tmp); - - tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK; - tmp |= FDO_PWM_MODE(mode); - WREG32(CG_FDO_CTRL2, tmp); -} - -static int si_thermal_setup_fan_table(struct amdgpu_device *adev) -{ - struct si_power_info *si_pi = si_get_pi(adev); - PP_SIslands_FanTable fan_table = { FDO_MODE_HARDWARE }; - u32 duty100; - u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2; - u16 fdo_min, slope1, slope2; - u32 reference_clock, tmp; - int ret; - u64 tmp64; - - if (!si_pi->fan_table_start) { - adev->pm.dpm.fan.ucode_fan_control = false; - return 0; - } - - duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT; - - if (duty100 == 0) { - adev->pm.dpm.fan.ucode_fan_control = false; - return 0; - } - - tmp64 = (u64)adev->pm.dpm.fan.pwm_min * duty100; - do_div(tmp64, 10000); - fdo_min = (u16)tmp64; - - t_diff1 = adev->pm.dpm.fan.t_med - adev->pm.dpm.fan.t_min; - t_diff2 = adev->pm.dpm.fan.t_high - adev->pm.dpm.fan.t_med; - - pwm_diff1 = adev->pm.dpm.fan.pwm_med - adev->pm.dpm.fan.pwm_min; - pwm_diff2 = adev->pm.dpm.fan.pwm_high - adev->pm.dpm.fan.pwm_med; - - slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100); - slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100); - - fan_table.temp_min = cpu_to_be16((50 + adev->pm.dpm.fan.t_min) / 100); - fan_table.temp_med = cpu_to_be16((50 + adev->pm.dpm.fan.t_med) / 100); - fan_table.temp_max = cpu_to_be16((50 + adev->pm.dpm.fan.t_max) / 100); - fan_table.slope1 = cpu_to_be16(slope1); - fan_table.slope2 = cpu_to_be16(slope2); - fan_table.fdo_min = cpu_to_be16(fdo_min); - fan_table.hys_down = cpu_to_be16(adev->pm.dpm.fan.t_hyst); - fan_table.hys_up = cpu_to_be16(1); - fan_table.hys_slope = cpu_to_be16(1); - fan_table.temp_resp_lim = cpu_to_be16(5); - reference_clock = amdgpu_asic_get_xclk(adev); - - fan_table.refresh_period = cpu_to_be32((adev->pm.dpm.fan.cycle_delay * - reference_clock) / 1600); - fan_table.fdo_max = cpu_to_be16((u16)duty100); - - tmp = (RREG32(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT; - fan_table.temp_src = (uint8_t)tmp; - - ret = amdgpu_si_copy_bytes_to_smc(adev, - si_pi->fan_table_start, - (u8 *)(&fan_table), - sizeof(fan_table), - si_pi->sram_end); - - if (ret) { - DRM_ERROR("Failed to load fan table to the SMC."); - adev->pm.dpm.fan.ucode_fan_control = false; - } - - return ret; -} - -static int si_fan_ctrl_start_smc_fan_control(struct amdgpu_device *adev) -{ - struct si_power_info *si_pi = si_get_pi(adev); - PPSMC_Result ret; - - ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_StartFanControl); - if (ret == PPSMC_Result_OK) { - si_pi->fan_is_controlled_by_smc = true; - return 0; - } else { - return -EINVAL; - } -} - -static int si_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev) -{ - struct si_power_info *si_pi = si_get_pi(adev); - PPSMC_Result ret; - - ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_StopFanControl); - - if (ret == PPSMC_Result_OK) { - si_pi->fan_is_controlled_by_smc = false; - return 0; - } else { - return -EINVAL; - } -} - -static int si_dpm_get_fan_speed_pwm(void *handle, - u32 *speed) -{ - u32 duty, duty100; - u64 tmp64; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (adev->pm.no_fan) - return -ENOENT; - - duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT; - duty = (RREG32(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT; - - if (duty100 == 0) - return -EINVAL; - - tmp64 = (u64)duty * 255; - do_div(tmp64, duty100); - *speed = MIN((u32)tmp64, 255); - - return 0; -} - -static int si_dpm_set_fan_speed_pwm(void *handle, - u32 speed) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct si_power_info *si_pi = si_get_pi(adev); - u32 tmp; - u32 duty, duty100; - u64 tmp64; - - if (adev->pm.no_fan) - return -ENOENT; - - if (si_pi->fan_is_controlled_by_smc) - return -EINVAL; - - if (speed > 255) - return -EINVAL; - - duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT; - - if (duty100 == 0) - return -EINVAL; - - tmp64 = (u64)speed * duty100; - do_div(tmp64, 255); - duty = (u32)tmp64; - - tmp = RREG32(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK; - tmp |= FDO_STATIC_DUTY(duty); - WREG32(CG_FDO_CTRL0, tmp); - - return 0; -} - -static void si_dpm_set_fan_control_mode(void *handle, u32 mode) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (mode) { - /* stop auto-manage */ - if (adev->pm.dpm.fan.ucode_fan_control) - si_fan_ctrl_stop_smc_fan_control(adev); - si_fan_ctrl_set_static_mode(adev, mode); - } else { - /* restart auto-manage */ - if (adev->pm.dpm.fan.ucode_fan_control) - si_thermal_start_smc_fan_control(adev); - else - si_fan_ctrl_set_default_mode(adev); - } -} - -static u32 si_dpm_get_fan_control_mode(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct si_power_info *si_pi = si_get_pi(adev); - u32 tmp; - - if (si_pi->fan_is_controlled_by_smc) - return 0; - - tmp = RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK; - return (tmp >> FDO_PWM_MODE_SHIFT); -} - -#if 0 -static int si_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev, - u32 *speed) -{ - u32 tach_period; - u32 xclk = amdgpu_asic_get_xclk(adev); - - if (adev->pm.no_fan) - return -ENOENT; - - if (adev->pm.fan_pulses_per_revolution == 0) - return -ENOENT; - - tach_period = (RREG32(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT; - if (tach_period == 0) - return -ENOENT; - - *speed = 60 * xclk * 10000 / tach_period; - - return 0; -} - -static int si_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev, - u32 speed) -{ - u32 tach_period, tmp; - u32 xclk = amdgpu_asic_get_xclk(adev); - - if (adev->pm.no_fan) - return -ENOENT; - - if (adev->pm.fan_pulses_per_revolution == 0) - return -ENOENT; - - if ((speed < adev->pm.fan_min_rpm) || - (speed > adev->pm.fan_max_rpm)) - return -EINVAL; - - if (adev->pm.dpm.fan.ucode_fan_control) - si_fan_ctrl_stop_smc_fan_control(adev); - - tach_period = 60 * xclk * 10000 / (8 * speed); - tmp = RREG32(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK; - tmp |= TARGET_PERIOD(tach_period); - WREG32(CG_TACH_CTRL, tmp); - - si_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM); - - return 0; -} -#endif - -static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev) -{ - struct si_power_info *si_pi = si_get_pi(adev); - u32 tmp; - - if (!si_pi->fan_ctrl_is_in_default_mode) { - tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK; - tmp |= FDO_PWM_MODE(si_pi->fan_ctrl_default_mode); - WREG32(CG_FDO_CTRL2, tmp); - - tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK; - tmp |= TMIN(si_pi->t_min); - WREG32(CG_FDO_CTRL2, tmp); - si_pi->fan_ctrl_is_in_default_mode = true; - } -} - -static void si_thermal_start_smc_fan_control(struct amdgpu_device *adev) -{ - if (adev->pm.dpm.fan.ucode_fan_control) { - si_fan_ctrl_start_smc_fan_control(adev); - si_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC); - } -} - -static void si_thermal_initialize(struct amdgpu_device *adev) -{ - u32 tmp; - - if (adev->pm.fan_pulses_per_revolution) { - tmp = RREG32(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK; - tmp |= EDGE_PER_REV(adev->pm.fan_pulses_per_revolution -1); - WREG32(CG_TACH_CTRL, tmp); - } - - tmp = RREG32(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK; - tmp |= TACH_PWM_RESP_RATE(0x28); - WREG32(CG_FDO_CTRL2, tmp); -} - -static int si_thermal_start_thermal_controller(struct amdgpu_device *adev) -{ - int ret; - - si_thermal_initialize(adev); - ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); - if (ret) - return ret; - ret = si_thermal_enable_alert(adev, true); - if (ret) - return ret; - if (adev->pm.dpm.fan.ucode_fan_control) { - ret = si_halt_smc(adev); - if (ret) - return ret; - ret = si_thermal_setup_fan_table(adev); - if (ret) - return ret; - ret = si_resume_smc(adev); - if (ret) - return ret; - si_thermal_start_smc_fan_control(adev); - } - - return 0; -} - -static void si_thermal_stop_thermal_controller(struct amdgpu_device *adev) -{ - if (!adev->pm.no_fan) { - si_fan_ctrl_set_default_mode(adev); - si_fan_ctrl_stop_smc_fan_control(adev); - } -} - -static int si_dpm_enable(struct amdgpu_device *adev) -{ - struct rv7xx_power_info *pi = rv770_get_pi(adev); - struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); - struct si_power_info *si_pi = si_get_pi(adev); - struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps; - int ret; - - if (amdgpu_si_is_smc_running(adev)) - return -EINVAL; - if (pi->voltage_control || si_pi->voltage_control_svi2) - si_enable_voltage_control(adev, true); - if (pi->mvdd_control) - si_get_mvdd_configuration(adev); - if (pi->voltage_control || si_pi->voltage_control_svi2) { - ret = si_construct_voltage_tables(adev); - if (ret) { - DRM_ERROR("si_construct_voltage_tables failed\n"); - return ret; - } - } - if (eg_pi->dynamic_ac_timing) { - ret = si_initialize_mc_reg_table(adev); - if (ret) - eg_pi->dynamic_ac_timing = false; - } - if (pi->dynamic_ss) - si_enable_spread_spectrum(adev, true); - if (pi->thermal_protection) - si_enable_thermal_protection(adev, true); - si_setup_bsp(adev); - si_program_git(adev); - si_program_tp(adev); - si_program_tpp(adev); - si_program_sstp(adev); - si_enable_display_gap(adev); - si_program_vc(adev); - ret = si_upload_firmware(adev); - if (ret) { - DRM_ERROR("si_upload_firmware failed\n"); - return ret; - } - ret = si_process_firmware_header(adev); - if (ret) { - DRM_ERROR("si_process_firmware_header failed\n"); - return ret; - } - ret = si_initial_switch_from_arb_f0_to_f1(adev); - if (ret) { - DRM_ERROR("si_initial_switch_from_arb_f0_to_f1 failed\n"); - return ret; - } - ret = si_init_smc_table(adev); - if (ret) { - DRM_ERROR("si_init_smc_table failed\n"); - return ret; - } - ret = si_init_smc_spll_table(adev); - if (ret) { - DRM_ERROR("si_init_smc_spll_table failed\n"); - return ret; - } - ret = si_init_arb_table_index(adev); - if (ret) { - DRM_ERROR("si_init_arb_table_index failed\n"); - return ret; - } - if (eg_pi->dynamic_ac_timing) { - ret = si_populate_mc_reg_table(adev, boot_ps); - if (ret) { - DRM_ERROR("si_populate_mc_reg_table failed\n"); - return ret; - } - } - ret = si_initialize_smc_cac_tables(adev); - if (ret) { - DRM_ERROR("si_initialize_smc_cac_tables failed\n"); - return ret; - } - ret = si_initialize_hardware_cac_manager(adev); - if (ret) { - DRM_ERROR("si_initialize_hardware_cac_manager failed\n"); - return ret; - } - ret = si_initialize_smc_dte_tables(adev); - if (ret) { - DRM_ERROR("si_initialize_smc_dte_tables failed\n"); - return ret; - } - ret = si_populate_smc_tdp_limits(adev, boot_ps); - if (ret) { - DRM_ERROR("si_populate_smc_tdp_limits failed\n"); - return ret; - } - ret = si_populate_smc_tdp_limits_2(adev, boot_ps); - if (ret) { - DRM_ERROR("si_populate_smc_tdp_limits_2 failed\n"); - return ret; - } - si_program_response_times(adev); - si_program_ds_registers(adev); - si_dpm_start_smc(adev); - ret = si_notify_smc_display_change(adev, false); - if (ret) { - DRM_ERROR("si_notify_smc_display_change failed\n"); - return ret; - } - si_enable_sclk_control(adev, true); - si_start_dpm(adev); - - si_enable_auto_throttle_source(adev, SI_DPM_AUTO_THROTTLE_SRC_THERMAL, true); - si_thermal_start_thermal_controller(adev); - - ni_update_current_ps(adev, boot_ps); - - return 0; -} - -static int si_set_temperature_range(struct amdgpu_device *adev) -{ - int ret; - - ret = si_thermal_enable_alert(adev, false); - if (ret) - return ret; - ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX); - if (ret) - return ret; - ret = si_thermal_enable_alert(adev, true); - if (ret) - return ret; - - return ret; -} - -static void si_dpm_disable(struct amdgpu_device *adev) -{ - struct rv7xx_power_info *pi = rv770_get_pi(adev); - struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps; - - if (!amdgpu_si_is_smc_running(adev)) - return; - si_thermal_stop_thermal_controller(adev); - si_disable_ulv(adev); - si_clear_vc(adev); - if (pi->thermal_protection) - si_enable_thermal_protection(adev, false); - si_enable_power_containment(adev, boot_ps, false); - si_enable_smc_cac(adev, boot_ps, false); - si_enable_spread_spectrum(adev, false); - si_enable_auto_throttle_source(adev, SI_DPM_AUTO_THROTTLE_SRC_THERMAL, false); - si_stop_dpm(adev); - si_reset_to_default(adev); - si_dpm_stop_smc(adev); - si_force_switch_to_arb_f0(adev); - - ni_update_current_ps(adev, boot_ps); -} - -static int si_dpm_pre_set_power_state(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); - struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps; - struct amdgpu_ps *new_ps = &requested_ps; - - ni_update_requested_ps(adev, new_ps); - si_apply_state_adjust_rules(adev, &eg_pi->requested_rps); - - return 0; -} - -static int si_power_control_set_level(struct amdgpu_device *adev) -{ - struct amdgpu_ps *new_ps = adev->pm.dpm.requested_ps; - int ret; - - ret = si_restrict_performance_levels_before_switch(adev); - if (ret) - return ret; - ret = si_halt_smc(adev); - if (ret) - return ret; - ret = si_populate_smc_tdp_limits(adev, new_ps); - if (ret) - return ret; - ret = si_populate_smc_tdp_limits_2(adev, new_ps); - if (ret) - return ret; - ret = si_resume_smc(adev); - if (ret) - return ret; - ret = si_set_sw_state(adev); - if (ret) - return ret; - return 0; -} - -static void si_set_vce_clock(struct amdgpu_device *adev, - struct amdgpu_ps *new_rps, - struct amdgpu_ps *old_rps) -{ - if ((old_rps->evclk != new_rps->evclk) || - (old_rps->ecclk != new_rps->ecclk)) { - /* Turn the clocks on when encoding, off otherwise */ - if (new_rps->evclk || new_rps->ecclk) { - /* Place holder for future VCE1.0 porting to amdgpu - vce_v1_0_enable_mgcg(adev, false, false);*/ - } else { - /* Place holder for future VCE1.0 porting to amdgpu - vce_v1_0_enable_mgcg(adev, true, false); - amdgpu_asic_set_vce_clocks(adev, new_rps->evclk, new_rps->ecclk);*/ - } - } -} - -static int si_dpm_set_power_state(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); - struct amdgpu_ps *new_ps = &eg_pi->requested_rps; - struct amdgpu_ps *old_ps = &eg_pi->current_rps; - int ret; - - ret = si_disable_ulv(adev); - if (ret) { - DRM_ERROR("si_disable_ulv failed\n"); - return ret; - } - ret = si_restrict_performance_levels_before_switch(adev); - if (ret) { - DRM_ERROR("si_restrict_performance_levels_before_switch failed\n"); - return ret; - } - if (eg_pi->pcie_performance_request) - si_request_link_speed_change_before_state_change(adev, new_ps, old_ps); - ni_set_uvd_clock_before_set_eng_clock(adev, new_ps, old_ps); - ret = si_enable_power_containment(adev, new_ps, false); - if (ret) { - DRM_ERROR("si_enable_power_containment failed\n"); - return ret; - } - ret = si_enable_smc_cac(adev, new_ps, false); - if (ret) { - DRM_ERROR("si_enable_smc_cac failed\n"); - return ret; - } - ret = si_halt_smc(adev); - if (ret) { - DRM_ERROR("si_halt_smc failed\n"); - return ret; - } - ret = si_upload_sw_state(adev, new_ps); - if (ret) { - DRM_ERROR("si_upload_sw_state failed\n"); - return ret; - } - ret = si_upload_smc_data(adev); - if (ret) { - DRM_ERROR("si_upload_smc_data failed\n"); - return ret; - } - ret = si_upload_ulv_state(adev); - if (ret) { - DRM_ERROR("si_upload_ulv_state failed\n"); - return ret; - } - if (eg_pi->dynamic_ac_timing) { - ret = si_upload_mc_reg_table(adev, new_ps); - if (ret) { - DRM_ERROR("si_upload_mc_reg_table failed\n"); - return ret; - } - } - ret = si_program_memory_timing_parameters(adev, new_ps); - if (ret) { - DRM_ERROR("si_program_memory_timing_parameters failed\n"); - return ret; - } - si_set_pcie_lane_width_in_smc(adev, new_ps, old_ps); - - ret = si_resume_smc(adev); - if (ret) { - DRM_ERROR("si_resume_smc failed\n"); - return ret; - } - ret = si_set_sw_state(adev); - if (ret) { - DRM_ERROR("si_set_sw_state failed\n"); - return ret; - } - ni_set_uvd_clock_after_set_eng_clock(adev, new_ps, old_ps); - si_set_vce_clock(adev, new_ps, old_ps); - if (eg_pi->pcie_performance_request) - si_notify_link_speed_change_after_state_change(adev, new_ps, old_ps); - ret = si_set_power_state_conditionally_enable_ulv(adev, new_ps); - if (ret) { - DRM_ERROR("si_set_power_state_conditionally_enable_ulv failed\n"); - return ret; - } - ret = si_enable_smc_cac(adev, new_ps, true); - if (ret) { - DRM_ERROR("si_enable_smc_cac failed\n"); - return ret; - } - ret = si_enable_power_containment(adev, new_ps, true); - if (ret) { - DRM_ERROR("si_enable_power_containment failed\n"); - return ret; - } - - ret = si_power_control_set_level(adev); - if (ret) { - DRM_ERROR("si_power_control_set_level failed\n"); - return ret; - } - - return 0; -} - -static void si_dpm_post_set_power_state(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); - struct amdgpu_ps *new_ps = &eg_pi->requested_rps; - - ni_update_current_ps(adev, new_ps); -} - -#if 0 -void si_dpm_reset_asic(struct amdgpu_device *adev) -{ - si_restrict_performance_levels_before_switch(adev); - si_disable_ulv(adev); - si_set_boot_state(adev); -} -#endif - -static void si_dpm_display_configuration_changed(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - si_program_display_gap(adev); -} - - -static void si_parse_pplib_non_clock_info(struct amdgpu_device *adev, - struct amdgpu_ps *rps, - struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info, - u8 table_rev) -{ - rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings); - rps->class = le16_to_cpu(non_clock_info->usClassification); - rps->class2 = le16_to_cpu(non_clock_info->usClassification2); - - if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) { - rps->vclk = le32_to_cpu(non_clock_info->ulVCLK); - rps->dclk = le32_to_cpu(non_clock_info->ulDCLK); - } else if (r600_is_uvd_state(rps->class, rps->class2)) { - rps->vclk = RV770_DEFAULT_VCLK_FREQ; - rps->dclk = RV770_DEFAULT_DCLK_FREQ; - } else { - rps->vclk = 0; - rps->dclk = 0; - } - - if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) - adev->pm.dpm.boot_ps = rps; - if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE) - adev->pm.dpm.uvd_ps = rps; -} - -static void si_parse_pplib_clock_info(struct amdgpu_device *adev, - struct amdgpu_ps *rps, int index, - union pplib_clock_info *clock_info) -{ - struct rv7xx_power_info *pi = rv770_get_pi(adev); - struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); - struct si_power_info *si_pi = si_get_pi(adev); - struct si_ps *ps = si_get_ps(rps); - u16 leakage_voltage; - struct rv7xx_pl *pl = &ps->performance_levels[index]; - int ret; - - ps->performance_level_count = index + 1; - - pl->sclk = le16_to_cpu(clock_info->si.usEngineClockLow); - pl->sclk |= clock_info->si.ucEngineClockHigh << 16; - pl->mclk = le16_to_cpu(clock_info->si.usMemoryClockLow); - pl->mclk |= clock_info->si.ucMemoryClockHigh << 16; - - pl->vddc = le16_to_cpu(clock_info->si.usVDDC); - pl->vddci = le16_to_cpu(clock_info->si.usVDDCI); - pl->flags = le32_to_cpu(clock_info->si.ulFlags); - pl->pcie_gen = si_gen_pcie_gen_support(adev, - si_pi->sys_pcie_mask, - si_pi->boot_pcie_gen, - clock_info->si.ucPCIEGen); - - /* patch up vddc if necessary */ - ret = si_get_leakage_voltage_from_leakage_index(adev, pl->vddc, - &leakage_voltage); - if (ret == 0) - pl->vddc = leakage_voltage; - - if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) { - pi->acpi_vddc = pl->vddc; - eg_pi->acpi_vddci = pl->vddci; - si_pi->acpi_pcie_gen = pl->pcie_gen; - } - - if ((rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) && - index == 0) { - /* XXX disable for A0 tahiti */ - si_pi->ulv.supported = false; - si_pi->ulv.pl = *pl; - si_pi->ulv.one_pcie_lane_in_ulv = false; - si_pi->ulv.volt_change_delay = SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT; - si_pi->ulv.cg_ulv_parameter = SISLANDS_CGULVPARAMETER_DFLT; - si_pi->ulv.cg_ulv_control = SISLANDS_CGULVCONTROL_DFLT; - } - - if (pi->min_vddc_in_table > pl->vddc) - pi->min_vddc_in_table = pl->vddc; - - if (pi->max_vddc_in_table < pl->vddc) - pi->max_vddc_in_table = pl->vddc; - - /* patch up boot state */ - if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) { - u16 vddc, vddci, mvdd; - amdgpu_atombios_get_default_voltages(adev, &vddc, &vddci, &mvdd); - pl->mclk = adev->clock.default_mclk; - pl->sclk = adev->clock.default_sclk; - pl->vddc = vddc; - pl->vddci = vddci; - si_pi->mvdd_bootup_value = mvdd; - } - - if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) == - ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) { - adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk; - adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = pl->mclk; - adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc; - adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci; - } -} - -union pplib_power_state { - struct _ATOM_PPLIB_STATE v1; - struct _ATOM_PPLIB_STATE_V2 v2; -}; - -static int si_parse_power_table(struct amdgpu_device *adev) -{ - struct amdgpu_mode_info *mode_info = &adev->mode_info; - struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info; - union pplib_power_state *power_state; - int i, j, k, non_clock_array_index, clock_array_index; - union pplib_clock_info *clock_info; - struct _StateArray *state_array; - struct _ClockInfoArray *clock_info_array; - struct _NonClockInfoArray *non_clock_info_array; - union power_info *power_info; - int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo); - u16 data_offset; - u8 frev, crev; - u8 *power_state_offset; - struct si_ps *ps; - - if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL, - &frev, &crev, &data_offset)) - return -EINVAL; - power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); - - amdgpu_add_thermal_controller(adev); - - state_array = (struct _StateArray *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib.usStateArrayOffset)); - clock_info_array = (struct _ClockInfoArray *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib.usClockInfoArrayOffset)); - non_clock_info_array = (struct _NonClockInfoArray *) - (mode_info->atom_context->bios + data_offset + - le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); - - adev->pm.dpm.ps = kcalloc(state_array->ucNumEntries, - sizeof(struct amdgpu_ps), - GFP_KERNEL); - if (!adev->pm.dpm.ps) - return -ENOMEM; - power_state_offset = (u8 *)state_array->states; - for (i = 0; i < state_array->ucNumEntries; i++) { - u8 *idx; - power_state = (union pplib_power_state *)power_state_offset; - non_clock_array_index = power_state->v2.nonClockInfoIndex; - non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *) - &non_clock_info_array->nonClockInfo[non_clock_array_index]; - ps = kzalloc(sizeof(struct si_ps), GFP_KERNEL); - if (ps == NULL) { - kfree(adev->pm.dpm.ps); - return -ENOMEM; - } - adev->pm.dpm.ps[i].ps_priv = ps; - si_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i], - non_clock_info, - non_clock_info_array->ucEntrySize); - k = 0; - idx = (u8 *)&power_state->v2.clockInfoIndex[0]; - for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) { - clock_array_index = idx[j]; - if (clock_array_index >= clock_info_array->ucNumEntries) - continue; - if (k >= SISLANDS_MAX_HARDWARE_POWERLEVELS) - break; - clock_info = (union pplib_clock_info *) - ((u8 *)&clock_info_array->clockInfo[0] + - (clock_array_index * clock_info_array->ucEntrySize)); - si_parse_pplib_clock_info(adev, - &adev->pm.dpm.ps[i], k, - clock_info); - k++; - } - power_state_offset += 2 + power_state->v2.ucNumDPMLevels; - } - adev->pm.dpm.num_ps = state_array->ucNumEntries; - - /* fill in the vce power states */ - for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) { - u32 sclk, mclk; - clock_array_index = adev->pm.dpm.vce_states[i].clk_idx; - clock_info = (union pplib_clock_info *) - &clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize]; - sclk = le16_to_cpu(clock_info->si.usEngineClockLow); - sclk |= clock_info->si.ucEngineClockHigh << 16; - mclk = le16_to_cpu(clock_info->si.usMemoryClockLow); - mclk |= clock_info->si.ucMemoryClockHigh << 16; - adev->pm.dpm.vce_states[i].sclk = sclk; - adev->pm.dpm.vce_states[i].mclk = mclk; - } - - return 0; -} - -static int si_dpm_init(struct amdgpu_device *adev) -{ - struct rv7xx_power_info *pi; - struct evergreen_power_info *eg_pi; - struct ni_power_info *ni_pi; - struct si_power_info *si_pi; - struct atom_clock_dividers dividers; - int ret; - - si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL); - if (si_pi == NULL) - return -ENOMEM; - adev->pm.dpm.priv = si_pi; - ni_pi = &si_pi->ni; - eg_pi = &ni_pi->eg; - pi = &eg_pi->rv7xx; - - si_pi->sys_pcie_mask = - adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK; - si_pi->force_pcie_gen = SI_PCIE_GEN_INVALID; - si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev); - - si_set_max_cu_value(adev); - - rv770_get_max_vddc(adev); - si_get_leakage_vddc(adev); - si_patch_dependency_tables_based_on_leakage(adev); - - pi->acpi_vddc = 0; - eg_pi->acpi_vddci = 0; - pi->min_vddc_in_table = 0; - pi->max_vddc_in_table = 0; - - ret = amdgpu_get_platform_caps(adev); - if (ret) - return ret; - - ret = amdgpu_parse_extended_power_table(adev); - if (ret) - return ret; - - ret = si_parse_power_table(adev); - if (ret) - return ret; - - adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries = - kcalloc(4, - sizeof(struct amdgpu_clock_voltage_dependency_entry), - GFP_KERNEL); - if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) { - amdgpu_free_extended_power_table(adev); - return -ENOMEM; - } - adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4; - adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0; - adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0; - adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000; - adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720; - adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000; - adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810; - adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000; - adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900; - - if (adev->pm.dpm.voltage_response_time == 0) - adev->pm.dpm.voltage_response_time = R600_VOLTAGERESPONSETIME_DFLT; - if (adev->pm.dpm.backbias_response_time == 0) - adev->pm.dpm.backbias_response_time = R600_BACKBIASRESPONSETIME_DFLT; - - ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM, - 0, false, ÷rs); - if (ret) - pi->ref_div = dividers.ref_div + 1; - else - pi->ref_div = R600_REFERENCEDIVIDER_DFLT; - - eg_pi->smu_uvd_hs = false; - - pi->mclk_strobe_mode_threshold = 40000; - if (si_is_special_1gb_platform(adev)) - pi->mclk_stutter_mode_threshold = 0; - else - pi->mclk_stutter_mode_threshold = pi->mclk_strobe_mode_threshold; - pi->mclk_edc_enable_threshold = 40000; - eg_pi->mclk_edc_wr_enable_threshold = 40000; - - ni_pi->mclk_rtt_mode_threshold = eg_pi->mclk_edc_wr_enable_threshold; - - pi->voltage_control = - amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC, - VOLTAGE_OBJ_GPIO_LUT); - if (!pi->voltage_control) { - si_pi->voltage_control_svi2 = - amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC, - VOLTAGE_OBJ_SVID2); - if (si_pi->voltage_control_svi2) - amdgpu_atombios_get_svi2_info(adev, SET_VOLTAGE_TYPE_ASIC_VDDC, - &si_pi->svd_gpio_id, &si_pi->svc_gpio_id); - } - - pi->mvdd_control = - amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_MVDDC, - VOLTAGE_OBJ_GPIO_LUT); - - eg_pi->vddci_control = - amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDCI, - VOLTAGE_OBJ_GPIO_LUT); - if (!eg_pi->vddci_control) - si_pi->vddci_control_svi2 = - amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDCI, - VOLTAGE_OBJ_SVID2); - - si_pi->vddc_phase_shed_control = - amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC, - VOLTAGE_OBJ_PHASE_LUT); - - rv770_get_engine_memory_ss(adev); - - pi->asi = RV770_ASI_DFLT; - pi->pasi = CYPRESS_HASI_DFLT; - pi->vrc = SISLANDS_VRC_DFLT; - - pi->gfx_clock_gating = true; - - eg_pi->sclk_deep_sleep = true; - si_pi->sclk_deep_sleep_above_low = false; - - if (adev->pm.int_thermal_type != THERMAL_TYPE_NONE) - pi->thermal_protection = true; - else - pi->thermal_protection = false; - - eg_pi->dynamic_ac_timing = true; - - eg_pi->light_sleep = true; -#if defined(CONFIG_ACPI) - eg_pi->pcie_performance_request = - amdgpu_acpi_is_pcie_performance_request_supported(adev); -#else - eg_pi->pcie_performance_request = false; -#endif - - si_pi->sram_end = SMC_RAM_END; - - adev->pm.dpm.dyn_state.mclk_sclk_ratio = 4; - adev->pm.dpm.dyn_state.sclk_mclk_delta = 15000; - adev->pm.dpm.dyn_state.vddc_vddci_delta = 200; - adev->pm.dpm.dyn_state.valid_sclk_values.count = 0; - adev->pm.dpm.dyn_state.valid_sclk_values.values = NULL; - adev->pm.dpm.dyn_state.valid_mclk_values.count = 0; - adev->pm.dpm.dyn_state.valid_mclk_values.values = NULL; - - si_initialize_powertune_defaults(adev); - - /* make sure dc limits are valid */ - if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) || - (adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0)) - adev->pm.dpm.dyn_state.max_clock_voltage_on_dc = - adev->pm.dpm.dyn_state.max_clock_voltage_on_ac; - - si_pi->fan_ctrl_is_in_default_mode = true; - - return 0; -} - -static void si_dpm_fini(struct amdgpu_device *adev) -{ - int i; - - if (adev->pm.dpm.ps) - for (i = 0; i < adev->pm.dpm.num_ps; i++) - kfree(adev->pm.dpm.ps[i].ps_priv); - kfree(adev->pm.dpm.ps); - kfree(adev->pm.dpm.priv); - kfree(adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries); - amdgpu_free_extended_power_table(adev); -} - -static void si_dpm_debugfs_print_current_performance_level(void *handle, - struct seq_file *m) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); - struct amdgpu_ps *rps = &eg_pi->current_rps; - struct si_ps *ps = si_get_ps(rps); - struct rv7xx_pl *pl; - u32 current_index = - (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >> - CURRENT_STATE_INDEX_SHIFT; - - if (current_index >= ps->performance_level_count) { - seq_printf(m, "invalid dpm profile %d\n", current_index); - } else { - pl = &ps->performance_levels[current_index]; - seq_printf(m, "uvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); - seq_printf(m, "power level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n", - current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1); - } -} - -static int si_dpm_set_interrupt_state(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - unsigned type, - enum amdgpu_interrupt_state state) -{ - u32 cg_thermal_int; - - switch (type) { - case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH: - switch (state) { - case AMDGPU_IRQ_STATE_DISABLE: - cg_thermal_int = RREG32_SMC(CG_THERMAL_INT); - cg_thermal_int |= THERM_INT_MASK_HIGH; - WREG32_SMC(CG_THERMAL_INT, cg_thermal_int); - break; - case AMDGPU_IRQ_STATE_ENABLE: - cg_thermal_int = RREG32_SMC(CG_THERMAL_INT); - cg_thermal_int &= ~THERM_INT_MASK_HIGH; - WREG32_SMC(CG_THERMAL_INT, cg_thermal_int); - break; - default: - break; - } - break; - - case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW: - switch (state) { - case AMDGPU_IRQ_STATE_DISABLE: - cg_thermal_int = RREG32_SMC(CG_THERMAL_INT); - cg_thermal_int |= THERM_INT_MASK_LOW; - WREG32_SMC(CG_THERMAL_INT, cg_thermal_int); - break; - case AMDGPU_IRQ_STATE_ENABLE: - cg_thermal_int = RREG32_SMC(CG_THERMAL_INT); - cg_thermal_int &= ~THERM_INT_MASK_LOW; - WREG32_SMC(CG_THERMAL_INT, cg_thermal_int); - break; - default: - break; - } - break; - - default: - break; - } - return 0; -} - -static int si_dpm_process_interrupt(struct amdgpu_device *adev, - struct amdgpu_irq_src *source, - struct amdgpu_iv_entry *entry) -{ - bool queue_thermal = false; - - if (entry == NULL) - return -EINVAL; - - switch (entry->src_id) { - case 230: /* thermal low to high */ - DRM_DEBUG("IH: thermal low to high\n"); - adev->pm.dpm.thermal.high_to_low = false; - queue_thermal = true; - break; - case 231: /* thermal high to low */ - DRM_DEBUG("IH: thermal high to low\n"); - adev->pm.dpm.thermal.high_to_low = true; - queue_thermal = true; - break; - default: - break; - } - - if (queue_thermal) - schedule_work(&adev->pm.dpm.thermal.work); - - return 0; -} - -static int si_dpm_late_init(void *handle) -{ - int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (!adev->pm.dpm_enabled) - return 0; - - ret = si_set_temperature_range(adev); - if (ret) - return ret; -#if 0 //TODO ? - si_dpm_powergate_uvd(adev, true); -#endif - return 0; -} - -/** - * si_dpm_init_microcode - load ucode images from disk - * - * @adev: amdgpu_device pointer - * - * Use the firmware interface to load the ucode images into - * the driver (not loaded into hw). - * Returns 0 on success, error on failure. - */ -static int si_dpm_init_microcode(struct amdgpu_device *adev) -{ - const char *chip_name; - char fw_name[30]; - int err; - - DRM_DEBUG("\n"); - switch (adev->asic_type) { - case CHIP_TAHITI: - chip_name = "tahiti"; - break; - case CHIP_PITCAIRN: - if ((adev->pdev->revision == 0x81) && - ((adev->pdev->device == 0x6810) || - (adev->pdev->device == 0x6811))) - chip_name = "pitcairn_k"; - else - chip_name = "pitcairn"; - break; - case CHIP_VERDE: - if (((adev->pdev->device == 0x6820) && - ((adev->pdev->revision == 0x81) || - (adev->pdev->revision == 0x83))) || - ((adev->pdev->device == 0x6821) && - ((adev->pdev->revision == 0x83) || - (adev->pdev->revision == 0x87))) || - ((adev->pdev->revision == 0x87) && - ((adev->pdev->device == 0x6823) || - (adev->pdev->device == 0x682b)))) - chip_name = "verde_k"; - else - chip_name = "verde"; - break; - case CHIP_OLAND: - if (((adev->pdev->revision == 0x81) && - ((adev->pdev->device == 0x6600) || - (adev->pdev->device == 0x6604) || - (adev->pdev->device == 0x6605) || - (adev->pdev->device == 0x6610))) || - ((adev->pdev->revision == 0x83) && - (adev->pdev->device == 0x6610))) - chip_name = "oland_k"; - else - chip_name = "oland"; - break; - case CHIP_HAINAN: - if (((adev->pdev->revision == 0x81) && - (adev->pdev->device == 0x6660)) || - ((adev->pdev->revision == 0x83) && - ((adev->pdev->device == 0x6660) || - (adev->pdev->device == 0x6663) || - (adev->pdev->device == 0x6665) || - (adev->pdev->device == 0x6667)))) - chip_name = "hainan_k"; - else if ((adev->pdev->revision == 0xc3) && - (adev->pdev->device == 0x6665)) - chip_name = "banks_k_2"; - else - chip_name = "hainan"; - break; - default: BUG(); - } - - snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name); - err = request_firmware(&adev->pm.fw, fw_name, adev->dev); - if (err) - goto out; - err = amdgpu_ucode_validate(adev->pm.fw); - -out: - if (err) { - DRM_ERROR("si_smc: Failed to load firmware. err = %d\"%s\"\n", - err, fw_name); - release_firmware(adev->pm.fw); - adev->pm.fw = NULL; - } - return err; - -} - -static int si_dpm_sw_init(void *handle) -{ - int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230, &adev->pm.dpm.thermal.irq); - if (ret) - return ret; - - ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 231, &adev->pm.dpm.thermal.irq); - if (ret) - return ret; - - /* default to balanced state */ - adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED; - adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED; - adev->pm.dpm.forced_level = AMD_DPM_FORCED_LEVEL_AUTO; - adev->pm.default_sclk = adev->clock.default_sclk; - adev->pm.default_mclk = adev->clock.default_mclk; - adev->pm.current_sclk = adev->clock.default_sclk; - adev->pm.current_mclk = adev->clock.default_mclk; - adev->pm.int_thermal_type = THERMAL_TYPE_NONE; - - if (amdgpu_dpm == 0) - return 0; - - ret = si_dpm_init_microcode(adev); - if (ret) - return ret; - - INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler); - mutex_lock(&adev->pm.mutex); - ret = si_dpm_init(adev); - if (ret) - goto dpm_failed; - adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; - if (amdgpu_dpm == 1) - amdgpu_pm_print_power_states(adev); - mutex_unlock(&adev->pm.mutex); - DRM_INFO("amdgpu: dpm initialized\n"); - - return 0; - -dpm_failed: - si_dpm_fini(adev); - mutex_unlock(&adev->pm.mutex); - DRM_ERROR("amdgpu: dpm initialization failed\n"); - return ret; -} - -static int si_dpm_sw_fini(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - flush_work(&adev->pm.dpm.thermal.work); - - mutex_lock(&adev->pm.mutex); - si_dpm_fini(adev); - mutex_unlock(&adev->pm.mutex); - - return 0; -} - -static int si_dpm_hw_init(void *handle) -{ - int ret; - - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (!amdgpu_dpm) - return 0; - - mutex_lock(&adev->pm.mutex); - si_dpm_setup_asic(adev); - ret = si_dpm_enable(adev); - if (ret) - adev->pm.dpm_enabled = false; - else - adev->pm.dpm_enabled = true; - mutex_unlock(&adev->pm.mutex); - amdgpu_legacy_dpm_compute_clocks(adev); - return ret; -} - -static int si_dpm_hw_fini(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (adev->pm.dpm_enabled) { - mutex_lock(&adev->pm.mutex); - si_dpm_disable(adev); - mutex_unlock(&adev->pm.mutex); - } - - return 0; -} - -static int si_dpm_suspend(void *handle) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (adev->pm.dpm_enabled) { - mutex_lock(&adev->pm.mutex); - /* disable dpm */ - si_dpm_disable(adev); - /* reset the power state */ - adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; - mutex_unlock(&adev->pm.mutex); - } - return 0; -} - -static int si_dpm_resume(void *handle) -{ - int ret; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (adev->pm.dpm_enabled) { - /* asic init will reset to the boot state */ - mutex_lock(&adev->pm.mutex); - si_dpm_setup_asic(adev); - ret = si_dpm_enable(adev); - if (ret) - adev->pm.dpm_enabled = false; - else - adev->pm.dpm_enabled = true; - mutex_unlock(&adev->pm.mutex); - if (adev->pm.dpm_enabled) - amdgpu_legacy_dpm_compute_clocks(adev); - } - return 0; -} - -static bool si_dpm_is_idle(void *handle) -{ - /* XXX */ - return true; -} - -static int si_dpm_wait_for_idle(void *handle) -{ - /* XXX */ - return 0; -} - -static int si_dpm_soft_reset(void *handle) -{ - return 0; -} - -static int si_dpm_set_clockgating_state(void *handle, - enum amd_clockgating_state state) -{ - return 0; -} - -static int si_dpm_set_powergating_state(void *handle, - enum amd_powergating_state state) -{ - return 0; -} - -/* get temperature in millidegrees */ -static int si_dpm_get_temp(void *handle) -{ - u32 temp; - int actual_temp = 0; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >> - CTF_TEMP_SHIFT; - - if (temp & 0x200) - actual_temp = 255; - else - actual_temp = temp & 0x1ff; - - actual_temp = (actual_temp * 1000); - - return actual_temp; -} - -static u32 si_dpm_get_sclk(void *handle, bool low) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); - struct si_ps *requested_state = si_get_ps(&eg_pi->requested_rps); - - if (low) - return requested_state->performance_levels[0].sclk; - else - return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk; -} - -static u32 si_dpm_get_mclk(void *handle, bool low) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); - struct si_ps *requested_state = si_get_ps(&eg_pi->requested_rps); - - if (low) - return requested_state->performance_levels[0].mclk; - else - return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk; -} - -static void si_dpm_print_power_state(void *handle, - void *current_ps) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct amdgpu_ps *rps = (struct amdgpu_ps *)current_ps; - struct si_ps *ps = si_get_ps(rps); - struct rv7xx_pl *pl; - int i; - - amdgpu_dpm_print_class_info(rps->class, rps->class2); - amdgpu_dpm_print_cap_info(rps->caps); - DRM_INFO("\tuvd vclk: %d dclk: %d\n", rps->vclk, rps->dclk); - for (i = 0; i < ps->performance_level_count; i++) { - pl = &ps->performance_levels[i]; - if (adev->asic_type >= CHIP_TAHITI) - DRM_INFO("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n", - i, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1); - else - DRM_INFO("\t\tpower level %d sclk: %u mclk: %u vddc: %u vddci: %u\n", - i, pl->sclk, pl->mclk, pl->vddc, pl->vddci); - } - amdgpu_dpm_print_ps_status(adev, rps); -} - -static int si_dpm_early_init(void *handle) -{ - - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - adev->powerplay.pp_funcs = &si_dpm_funcs; - adev->powerplay.pp_handle = adev; - si_dpm_set_irq_funcs(adev); - return 0; -} - -static inline bool si_are_power_levels_equal(const struct rv7xx_pl *si_cpl1, - const struct rv7xx_pl *si_cpl2) -{ - return ((si_cpl1->mclk == si_cpl2->mclk) && - (si_cpl1->sclk == si_cpl2->sclk) && - (si_cpl1->pcie_gen == si_cpl2->pcie_gen) && - (si_cpl1->vddc == si_cpl2->vddc) && - (si_cpl1->vddci == si_cpl2->vddci)); -} - -static int si_check_state_equal(void *handle, - void *current_ps, - void *request_ps, - bool *equal) -{ - struct si_ps *si_cps; - struct si_ps *si_rps; - int i; - struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps; - struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps; - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - - if (adev == NULL || cps == NULL || rps == NULL || equal == NULL) - return -EINVAL; - - si_cps = si_get_ps((struct amdgpu_ps *)cps); - si_rps = si_get_ps((struct amdgpu_ps *)rps); - - if (si_cps == NULL) { - printk("si_cps is NULL\n"); - *equal = false; - return 0; - } - - if (si_cps->performance_level_count != si_rps->performance_level_count) { - *equal = false; - return 0; - } - - for (i = 0; i < si_cps->performance_level_count; i++) { - if (!si_are_power_levels_equal(&(si_cps->performance_levels[i]), - &(si_rps->performance_levels[i]))) { - *equal = false; - return 0; - } - } - - /* If all performance levels are the same try to use the UVD clocks to break the tie.*/ - *equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk)); - *equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk)); - - return 0; -} - -static int si_dpm_read_sensor(void *handle, int idx, - void *value, int *size) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)handle; - struct evergreen_power_info *eg_pi = evergreen_get_pi(adev); - struct amdgpu_ps *rps = &eg_pi->current_rps; - struct si_ps *ps = si_get_ps(rps); - uint32_t sclk, mclk; - u32 pl_index = - (RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >> - CURRENT_STATE_INDEX_SHIFT; - - /* size must be at least 4 bytes for all sensors */ - if (*size < 4) - return -EINVAL; - - switch (idx) { - case AMDGPU_PP_SENSOR_GFX_SCLK: - if (pl_index < ps->performance_level_count) { - sclk = ps->performance_levels[pl_index].sclk; - *((uint32_t *)value) = sclk; - *size = 4; - return 0; - } - return -EINVAL; - case AMDGPU_PP_SENSOR_GFX_MCLK: - if (pl_index < ps->performance_level_count) { - mclk = ps->performance_levels[pl_index].mclk; - *((uint32_t *)value) = mclk; - *size = 4; - return 0; - } - return -EINVAL; - case AMDGPU_PP_SENSOR_GPU_TEMP: - *((uint32_t *)value) = si_dpm_get_temp(adev); - *size = 4; - return 0; - default: - return -EOPNOTSUPP; - } -} - -static const struct amd_ip_funcs si_dpm_ip_funcs = { - .name = "si_dpm", - .early_init = si_dpm_early_init, - .late_init = si_dpm_late_init, - .sw_init = si_dpm_sw_init, - .sw_fini = si_dpm_sw_fini, - .hw_init = si_dpm_hw_init, - .hw_fini = si_dpm_hw_fini, - .suspend = si_dpm_suspend, - .resume = si_dpm_resume, - .is_idle = si_dpm_is_idle, - .wait_for_idle = si_dpm_wait_for_idle, - .soft_reset = si_dpm_soft_reset, - .set_clockgating_state = si_dpm_set_clockgating_state, - .set_powergating_state = si_dpm_set_powergating_state, -}; - -const struct amdgpu_ip_block_version si_smu_ip_block = -{ - .type = AMD_IP_BLOCK_TYPE_SMC, - .major = 6, - .minor = 0, - .rev = 0, - .funcs = &si_dpm_ip_funcs, -}; - -static const struct amd_pm_funcs si_dpm_funcs = { - .pre_set_power_state = &si_dpm_pre_set_power_state, - .set_power_state = &si_dpm_set_power_state, - .post_set_power_state = &si_dpm_post_set_power_state, - .display_configuration_changed = &si_dpm_display_configuration_changed, - .get_sclk = &si_dpm_get_sclk, - .get_mclk = &si_dpm_get_mclk, - .print_power_state = &si_dpm_print_power_state, - .debugfs_print_current_performance_level = &si_dpm_debugfs_print_current_performance_level, - .force_performance_level = &si_dpm_force_performance_level, - .set_powergating_by_smu = &si_set_powergating_by_smu, - .vblank_too_short = &si_dpm_vblank_too_short, - .set_fan_control_mode = &si_dpm_set_fan_control_mode, - .get_fan_control_mode = &si_dpm_get_fan_control_mode, - .set_fan_speed_pwm = &si_dpm_set_fan_speed_pwm, - .get_fan_speed_pwm = &si_dpm_get_fan_speed_pwm, - .check_state_equal = &si_check_state_equal, - .get_vce_clock_state = amdgpu_get_vce_clock_state, - .read_sensor = &si_dpm_read_sensor, - .pm_compute_clocks = amdgpu_legacy_dpm_compute_clocks, -}; - -static const struct amdgpu_irq_src_funcs si_dpm_irq_funcs = { - .set = si_dpm_set_interrupt_state, - .process = si_dpm_process_interrupt, -}; - -static void si_dpm_set_irq_funcs(struct amdgpu_device *adev) -{ - adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST; - adev->pm.dpm.thermal.irq.funcs = &si_dpm_irq_funcs; -} - diff --git a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.h b/drivers/gpu/drm/amd/pm/powerplay/si_dpm.h deleted file mode 100644 index 11cb7874a6bb..000000000000 --- a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.h +++ /dev/null @@ -1,1022 +0,0 @@ -/* - * Copyright 2012 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#ifndef __SI_DPM_H__ -#define __SI_DPM_H__ - -#include "amdgpu_atombios.h" -#include "sislands_smc.h" - -#define MC_CG_CONFIG 0x96f -#define MC_ARB_CG 0x9fa -#define CG_ARB_REQ(x) ((x) << 0) -#define CG_ARB_REQ_MASK (0xff << 0) - -#define MC_ARB_DRAM_TIMING_1 0x9fc -#define MC_ARB_DRAM_TIMING_2 0x9fd -#define MC_ARB_DRAM_TIMING_3 0x9fe -#define MC_ARB_DRAM_TIMING2_1 0x9ff -#define MC_ARB_DRAM_TIMING2_2 0xa00 -#define MC_ARB_DRAM_TIMING2_3 0xa01 - -#define MAX_NO_OF_MVDD_VALUES 2 -#define MAX_NO_VREG_STEPS 32 -#define NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16 -#define SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE 32 -#define SMC_NISLANDS_MC_REGISTER_ARRAY_SET_COUNT 20 -#define RV770_ASI_DFLT 1000 -#define CYPRESS_HASI_DFLT 400000 -#define PCIE_PERF_REQ_PECI_GEN1 2 -#define PCIE_PERF_REQ_PECI_GEN2 3 -#define PCIE_PERF_REQ_PECI_GEN3 4 -#define RV770_DEFAULT_VCLK_FREQ 53300 /* 10 khz */ -#define RV770_DEFAULT_DCLK_FREQ 40000 /* 10 khz */ - -#define SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE 16 - -#define RV770_SMC_TABLE_ADDRESS 0xB000 -#define RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 3 - -#define SMC_STROBE_RATIO 0x0F -#define SMC_STROBE_ENABLE 0x10 - -#define SMC_MC_EDC_RD_FLAG 0x01 -#define SMC_MC_EDC_WR_FLAG 0x02 -#define SMC_MC_RTT_ENABLE 0x04 -#define SMC_MC_STUTTER_EN 0x08 - -#define RV770_SMC_VOLTAGEMASK_VDDC 0 -#define RV770_SMC_VOLTAGEMASK_MVDD 1 -#define RV770_SMC_VOLTAGEMASK_VDDCI 2 -#define RV770_SMC_VOLTAGEMASK_MAX 4 - -#define NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16 -#define NISLANDS_SMC_STROBE_RATIO 0x0F -#define NISLANDS_SMC_STROBE_ENABLE 0x10 - -#define NISLANDS_SMC_MC_EDC_RD_FLAG 0x01 -#define NISLANDS_SMC_MC_EDC_WR_FLAG 0x02 -#define NISLANDS_SMC_MC_RTT_ENABLE 0x04 -#define NISLANDS_SMC_MC_STUTTER_EN 0x08 - -#define MAX_NO_VREG_STEPS 32 - -#define NISLANDS_SMC_VOLTAGEMASK_VDDC 0 -#define NISLANDS_SMC_VOLTAGEMASK_MVDD 1 -#define NISLANDS_SMC_VOLTAGEMASK_VDDCI 2 -#define NISLANDS_SMC_VOLTAGEMASK_MAX 4 - -#define SISLANDS_MCREGISTERTABLE_INITIAL_SLOT 0 -#define SISLANDS_MCREGISTERTABLE_ACPI_SLOT 1 -#define SISLANDS_MCREGISTERTABLE_ULV_SLOT 2 -#define SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT 3 - -#define SISLANDS_LEAKAGE_INDEX0 0xff01 -#define SISLANDS_MAX_LEAKAGE_COUNT 4 - -#define SISLANDS_MAX_HARDWARE_POWERLEVELS 5 -#define SISLANDS_INITIAL_STATE_ARB_INDEX 0 -#define SISLANDS_ACPI_STATE_ARB_INDEX 1 -#define SISLANDS_ULV_STATE_ARB_INDEX 2 -#define SISLANDS_DRIVER_STATE_ARB_INDEX 3 - -#define SISLANDS_DPM2_MAX_PULSE_SKIP 256 - -#define SISLANDS_DPM2_NEAR_TDP_DEC 10 -#define SISLANDS_DPM2_ABOVE_SAFE_INC 5 -#define SISLANDS_DPM2_BELOW_SAFE_INC 20 - -#define SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT 80 - -#define SISLANDS_DPM2_MAXPS_PERCENT_H 99 -#define SISLANDS_DPM2_MAXPS_PERCENT_M 99 - -#define SISLANDS_DPM2_SQ_RAMP_MAX_POWER 0x3FFF -#define SISLANDS_DPM2_SQ_RAMP_MIN_POWER 0x12 -#define SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA 0x15 -#define SISLANDS_DPM2_SQ_RAMP_STI_SIZE 0x1E -#define SISLANDS_DPM2_SQ_RAMP_LTI_RATIO 0xF - -#define SISLANDS_DPM2_PWREFFICIENCYRATIO_MARGIN 10 - -#define SISLANDS_VRC_DFLT 0xC000B3 -#define SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT 1687 -#define SISLANDS_CGULVPARAMETER_DFLT 0x00040035 -#define SISLANDS_CGULVCONTROL_DFLT 0x1f007550 - -#define SI_ASI_DFLT 10000 -#define SI_BSP_DFLT 0x41EB -#define SI_BSU_DFLT 0x2 -#define SI_AH_DFLT 5 -#define SI_RLP_DFLT 25 -#define SI_RMP_DFLT 65 -#define SI_LHP_DFLT 40 -#define SI_LMP_DFLT 15 -#define SI_TD_DFLT 0 -#define SI_UTC_DFLT_00 0x24 -#define SI_UTC_DFLT_01 0x22 -#define SI_UTC_DFLT_02 0x22 -#define SI_UTC_DFLT_03 0x22 -#define SI_UTC_DFLT_04 0x22 -#define SI_UTC_DFLT_05 0x22 -#define SI_UTC_DFLT_06 0x22 -#define SI_UTC_DFLT_07 0x22 -#define SI_UTC_DFLT_08 0x22 -#define SI_UTC_DFLT_09 0x22 -#define SI_UTC_DFLT_10 0x22 -#define SI_UTC_DFLT_11 0x22 -#define SI_UTC_DFLT_12 0x22 -#define SI_UTC_DFLT_13 0x22 -#define SI_UTC_DFLT_14 0x22 -#define SI_DTC_DFLT_00 0x24 -#define SI_DTC_DFLT_01 0x22 -#define SI_DTC_DFLT_02 0x22 -#define SI_DTC_DFLT_03 0x22 -#define SI_DTC_DFLT_04 0x22 -#define SI_DTC_DFLT_05 0x22 -#define SI_DTC_DFLT_06 0x22 -#define SI_DTC_DFLT_07 0x22 -#define SI_DTC_DFLT_08 0x22 -#define SI_DTC_DFLT_09 0x22 -#define SI_DTC_DFLT_10 0x22 -#define SI_DTC_DFLT_11 0x22 -#define SI_DTC_DFLT_12 0x22 -#define SI_DTC_DFLT_13 0x22 -#define SI_DTC_DFLT_14 0x22 -#define SI_VRC_DFLT 0x0000C003 -#define SI_VOLTAGERESPONSETIME_DFLT 1000 -#define SI_BACKBIASRESPONSETIME_DFLT 1000 -#define SI_VRU_DFLT 0x3 -#define SI_SPLLSTEPTIME_DFLT 0x1000 -#define SI_SPLLSTEPUNIT_DFLT 0x3 -#define SI_TPU_DFLT 0 -#define SI_TPC_DFLT 0x200 -#define SI_SSTU_DFLT 0 -#define SI_SST_DFLT 0x00C8 -#define SI_GICST_DFLT 0x200 -#define SI_FCT_DFLT 0x0400 -#define SI_FCTU_DFLT 0 -#define SI_CTXCGTT3DRPHC_DFLT 0x20 -#define SI_CTXCGTT3DRSDC_DFLT 0x40 -#define SI_VDDC3DOORPHC_DFLT 0x100 -#define SI_VDDC3DOORSDC_DFLT 0x7 -#define SI_VDDC3DOORSU_DFLT 0 -#define SI_MPLLLOCKTIME_DFLT 100 -#define SI_MPLLRESETTIME_DFLT 150 -#define SI_VCOSTEPPCT_DFLT 20 -#define SI_ENDINGVCOSTEPPCT_DFLT 5 -#define SI_REFERENCEDIVIDER_DFLT 4 - -#define SI_PM_NUMBER_OF_TC 15 -#define SI_PM_NUMBER_OF_SCLKS 20 -#define SI_PM_NUMBER_OF_MCLKS 4 -#define SI_PM_NUMBER_OF_VOLTAGE_LEVELS 4 -#define SI_PM_NUMBER_OF_ACTIVITY_LEVELS 3 - -/* XXX are these ok? */ -#define SI_TEMP_RANGE_MIN (90 * 1000) -#define SI_TEMP_RANGE_MAX (120 * 1000) - -#define FDO_PWM_MODE_STATIC 1 -#define FDO_PWM_MODE_STATIC_RPM 5 - -enum ni_dc_cac_level -{ - NISLANDS_DCCAC_LEVEL_0 = 0, - NISLANDS_DCCAC_LEVEL_1, - NISLANDS_DCCAC_LEVEL_2, - NISLANDS_DCCAC_LEVEL_3, - NISLANDS_DCCAC_LEVEL_4, - NISLANDS_DCCAC_LEVEL_5, - NISLANDS_DCCAC_LEVEL_6, - NISLANDS_DCCAC_LEVEL_7, - NISLANDS_DCCAC_MAX_LEVELS -}; - -enum si_cac_config_reg_type -{ - SISLANDS_CACCONFIG_MMR = 0, - SISLANDS_CACCONFIG_CGIND, - SISLANDS_CACCONFIG_MAX -}; - -enum si_power_level { - SI_POWER_LEVEL_LOW = 0, - SI_POWER_LEVEL_MEDIUM = 1, - SI_POWER_LEVEL_HIGH = 2, - SI_POWER_LEVEL_CTXSW = 3, -}; - -enum si_td { - SI_TD_AUTO, - SI_TD_UP, - SI_TD_DOWN, -}; - -enum si_display_watermark { - SI_DISPLAY_WATERMARK_LOW = 0, - SI_DISPLAY_WATERMARK_HIGH = 1, -}; - -enum si_display_gap -{ - SI_PM_DISPLAY_GAP_VBLANK_OR_WM = 0, - SI_PM_DISPLAY_GAP_VBLANK = 1, - SI_PM_DISPLAY_GAP_WATERMARK = 2, - SI_PM_DISPLAY_GAP_IGNORE = 3, -}; - -extern const struct amdgpu_ip_block_version si_smu_ip_block; - -struct ni_leakage_coeffients -{ - u32 at; - u32 bt; - u32 av; - u32 bv; - s32 t_slope; - s32 t_intercept; - u32 t_ref; -}; - -struct SMC_Evergreen_MCRegisterAddress -{ - uint16_t s0; - uint16_t s1; -}; - -typedef struct SMC_Evergreen_MCRegisterAddress SMC_Evergreen_MCRegisterAddress; - -struct evergreen_mc_reg_entry { - u32 mclk_max; - u32 mc_data[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE]; -}; - -struct evergreen_mc_reg_table { - u8 last; - u8 num_entries; - u16 valid_flag; - struct evergreen_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES]; - SMC_Evergreen_MCRegisterAddress mc_reg_address[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE]; -}; - -struct SMC_Evergreen_MCRegisterSet -{ - uint32_t value[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE]; -}; - -typedef struct SMC_Evergreen_MCRegisterSet SMC_Evergreen_MCRegisterSet; - -struct SMC_Evergreen_MCRegisters -{ - uint8_t last; - uint8_t reserved[3]; - SMC_Evergreen_MCRegisterAddress address[SMC_EVERGREEN_MC_REGISTER_ARRAY_SIZE]; - SMC_Evergreen_MCRegisterSet data[5]; -}; - -typedef struct SMC_Evergreen_MCRegisters SMC_Evergreen_MCRegisters; - -struct SMC_NIslands_MCRegisterSet -{ - uint32_t value[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE]; -}; - -typedef struct SMC_NIslands_MCRegisterSet SMC_NIslands_MCRegisterSet; - -struct ni_mc_reg_entry { - u32 mclk_max; - u32 mc_data[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE]; -}; - -struct SMC_NIslands_MCRegisterAddress -{ - uint16_t s0; - uint16_t s1; -}; - -typedef struct SMC_NIslands_MCRegisterAddress SMC_NIslands_MCRegisterAddress; - -struct SMC_NIslands_MCRegisters -{ - uint8_t last; - uint8_t reserved[3]; - SMC_NIslands_MCRegisterAddress address[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE]; - SMC_NIslands_MCRegisterSet data[SMC_NISLANDS_MC_REGISTER_ARRAY_SET_COUNT]; -}; - -typedef struct SMC_NIslands_MCRegisters SMC_NIslands_MCRegisters; - -struct evergreen_ulv_param { - bool supported; - struct rv7xx_pl *pl; -}; - -struct evergreen_arb_registers { - u32 mc_arb_dram_timing; - u32 mc_arb_dram_timing2; - u32 mc_arb_rfsh_rate; - u32 mc_arb_burst_time; -}; - -struct at { - u32 rlp; - u32 rmp; - u32 lhp; - u32 lmp; -}; - -struct ni_clock_registers { - u32 cg_spll_func_cntl; - u32 cg_spll_func_cntl_2; - u32 cg_spll_func_cntl_3; - u32 cg_spll_func_cntl_4; - u32 cg_spll_spread_spectrum; - u32 cg_spll_spread_spectrum_2; - u32 mclk_pwrmgt_cntl; - u32 dll_cntl; - u32 mpll_ad_func_cntl; - u32 mpll_ad_func_cntl_2; - u32 mpll_dq_func_cntl; - u32 mpll_dq_func_cntl_2; - u32 mpll_ss1; - u32 mpll_ss2; -}; - -struct RV770_SMC_SCLK_VALUE -{ - uint32_t vCG_SPLL_FUNC_CNTL; - uint32_t vCG_SPLL_FUNC_CNTL_2; - uint32_t vCG_SPLL_FUNC_CNTL_3; - uint32_t vCG_SPLL_SPREAD_SPECTRUM; - uint32_t vCG_SPLL_SPREAD_SPECTRUM_2; - uint32_t sclk_value; -}; - -typedef struct RV770_SMC_SCLK_VALUE RV770_SMC_SCLK_VALUE; - -struct RV770_SMC_MCLK_VALUE -{ - uint32_t vMPLL_AD_FUNC_CNTL; - uint32_t vMPLL_AD_FUNC_CNTL_2; - uint32_t vMPLL_DQ_FUNC_CNTL; - uint32_t vMPLL_DQ_FUNC_CNTL_2; - uint32_t vMCLK_PWRMGT_CNTL; - uint32_t vDLL_CNTL; - uint32_t vMPLL_SS; - uint32_t vMPLL_SS2; - uint32_t mclk_value; -}; - -typedef struct RV770_SMC_MCLK_VALUE RV770_SMC_MCLK_VALUE; - - -struct RV730_SMC_MCLK_VALUE -{ - uint32_t vMCLK_PWRMGT_CNTL; - uint32_t vDLL_CNTL; - uint32_t vMPLL_FUNC_CNTL; - uint32_t vMPLL_FUNC_CNTL2; - uint32_t vMPLL_FUNC_CNTL3; - uint32_t vMPLL_SS; - uint32_t vMPLL_SS2; - uint32_t mclk_value; -}; - -typedef struct RV730_SMC_MCLK_VALUE RV730_SMC_MCLK_VALUE; - -struct RV770_SMC_VOLTAGE_VALUE -{ - uint16_t value; - uint8_t index; - uint8_t padding; -}; - -typedef struct RV770_SMC_VOLTAGE_VALUE RV770_SMC_VOLTAGE_VALUE; - -union RV7XX_SMC_MCLK_VALUE -{ - RV770_SMC_MCLK_VALUE mclk770; - RV730_SMC_MCLK_VALUE mclk730; -}; - -typedef union RV7XX_SMC_MCLK_VALUE RV7XX_SMC_MCLK_VALUE, *LPRV7XX_SMC_MCLK_VALUE; - -struct RV770_SMC_HW_PERFORMANCE_LEVEL -{ - uint8_t arbValue; - union{ - uint8_t seqValue; - uint8_t ACIndex; - }; - uint8_t displayWatermark; - uint8_t gen2PCIE; - uint8_t gen2XSP; - uint8_t backbias; - uint8_t strobeMode; - uint8_t mcFlags; - uint32_t aT; - uint32_t bSP; - RV770_SMC_SCLK_VALUE sclk; - RV7XX_SMC_MCLK_VALUE mclk; - RV770_SMC_VOLTAGE_VALUE vddc; - RV770_SMC_VOLTAGE_VALUE mvdd; - RV770_SMC_VOLTAGE_VALUE vddci; - uint8_t reserved1; - uint8_t reserved2; - uint8_t stateFlags; - uint8_t padding; -}; - -typedef struct RV770_SMC_HW_PERFORMANCE_LEVEL RV770_SMC_HW_PERFORMANCE_LEVEL; - -struct RV770_SMC_SWSTATE -{ - uint8_t flags; - uint8_t padding1; - uint8_t padding2; - uint8_t padding3; - RV770_SMC_HW_PERFORMANCE_LEVEL levels[RV770_SMC_PERFORMANCE_LEVELS_PER_SWSTATE]; -}; - -typedef struct RV770_SMC_SWSTATE RV770_SMC_SWSTATE; - -struct RV770_SMC_VOLTAGEMASKTABLE -{ - uint8_t highMask[RV770_SMC_VOLTAGEMASK_MAX]; - uint32_t lowMask[RV770_SMC_VOLTAGEMASK_MAX]; -}; - -typedef struct RV770_SMC_VOLTAGEMASKTABLE RV770_SMC_VOLTAGEMASKTABLE; - -struct RV770_SMC_STATETABLE -{ - uint8_t thermalProtectType; - uint8_t systemFlags; - uint8_t maxVDDCIndexInPPTable; - uint8_t extraFlags; - uint8_t highSMIO[MAX_NO_VREG_STEPS]; - uint32_t lowSMIO[MAX_NO_VREG_STEPS]; - RV770_SMC_VOLTAGEMASKTABLE voltageMaskTable; - RV770_SMC_SWSTATE initialState; - RV770_SMC_SWSTATE ACPIState; - RV770_SMC_SWSTATE driverState; - RV770_SMC_SWSTATE ULVState; -}; - -typedef struct RV770_SMC_STATETABLE RV770_SMC_STATETABLE; - -struct vddc_table_entry { - u16 vddc; - u8 vddc_index; - u8 high_smio; - u32 low_smio; -}; - -struct rv770_clock_registers { - u32 cg_spll_func_cntl; - u32 cg_spll_func_cntl_2; - u32 cg_spll_func_cntl_3; - u32 cg_spll_spread_spectrum; - u32 cg_spll_spread_spectrum_2; - u32 mpll_ad_func_cntl; - u32 mpll_ad_func_cntl_2; - u32 mpll_dq_func_cntl; - u32 mpll_dq_func_cntl_2; - u32 mclk_pwrmgt_cntl; - u32 dll_cntl; - u32 mpll_ss1; - u32 mpll_ss2; -}; - -struct rv730_clock_registers { - u32 cg_spll_func_cntl; - u32 cg_spll_func_cntl_2; - u32 cg_spll_func_cntl_3; - u32 cg_spll_spread_spectrum; - u32 cg_spll_spread_spectrum_2; - u32 mclk_pwrmgt_cntl; - u32 dll_cntl; - u32 mpll_func_cntl; - u32 mpll_func_cntl2; - u32 mpll_func_cntl3; - u32 mpll_ss; - u32 mpll_ss2; -}; - -union r7xx_clock_registers { - struct rv770_clock_registers rv770; - struct rv730_clock_registers rv730; -}; - -struct rv7xx_power_info { - /* flags */ - bool mem_gddr5; - bool pcie_gen2; - bool dynamic_pcie_gen2; - bool acpi_pcie_gen2; - bool boot_in_gen2; - bool voltage_control; /* vddc */ - bool mvdd_control; - bool sclk_ss; - bool mclk_ss; - bool dynamic_ss; - bool gfx_clock_gating; - bool mg_clock_gating; - bool mgcgtssm; - bool power_gating; - bool thermal_protection; - bool display_gap; - bool dcodt; - bool ulps; - /* registers */ - union r7xx_clock_registers clk_regs; - u32 s0_vid_lower_smio_cntl; - /* voltage */ - u32 vddc_mask_low; - u32 mvdd_mask_low; - u32 mvdd_split_frequency; - u32 mvdd_low_smio[MAX_NO_OF_MVDD_VALUES]; - u16 max_vddc; - u16 max_vddc_in_table; - u16 min_vddc_in_table; - struct vddc_table_entry vddc_table[MAX_NO_VREG_STEPS]; - u8 valid_vddc_entries; - /* dc odt */ - u32 mclk_odt_threshold; - u8 odt_value_0[2]; - u8 odt_value_1[2]; - /* stored values */ - u32 boot_sclk; - u16 acpi_vddc; - u32 ref_div; - u32 active_auto_throttle_sources; - u32 mclk_stutter_mode_threshold; - u32 mclk_strobe_mode_threshold; - u32 mclk_edc_enable_threshold; - u32 bsp; - u32 bsu; - u32 pbsp; - u32 pbsu; - u32 dsp; - u32 psp; - u32 asi; - u32 pasi; - u32 vrc; - u32 restricted_levels; - u32 rlp; - u32 rmp; - u32 lhp; - u32 lmp; - /* smc offsets */ - u16 state_table_start; - u16 soft_regs_start; - u16 sram_end; - /* scratch structs */ - RV770_SMC_STATETABLE smc_statetable; -}; - -enum si_pcie_gen { - SI_PCIE_GEN1 = 0, - SI_PCIE_GEN2 = 1, - SI_PCIE_GEN3 = 2, - SI_PCIE_GEN_INVALID = 0xffff -}; - -struct rv7xx_pl { - u32 sclk; - u32 mclk; - u16 vddc; - u16 vddci; /* eg+ only */ - u32 flags; - enum si_pcie_gen pcie_gen; /* si+ only */ -}; - -struct rv7xx_ps { - struct rv7xx_pl high; - struct rv7xx_pl medium; - struct rv7xx_pl low; - bool dc_compatible; -}; - -struct si_ps { - u16 performance_level_count; - bool dc_compatible; - struct rv7xx_pl performance_levels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE]; -}; - -struct ni_mc_reg_table { - u8 last; - u8 num_entries; - u16 valid_flag; - struct ni_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES]; - SMC_NIslands_MCRegisterAddress mc_reg_address[SMC_NISLANDS_MC_REGISTER_ARRAY_SIZE]; -}; - -struct ni_cac_data -{ - struct ni_leakage_coeffients leakage_coefficients; - u32 i_leakage; - s32 leakage_minimum_temperature; - u32 pwr_const; - u32 dc_cac_value; - u32 bif_cac_value; - u32 lkge_pwr; - u8 mc_wr_weight; - u8 mc_rd_weight; - u8 allow_ovrflw; - u8 num_win_tdp; - u8 l2num_win_tdp; - u8 lts_truncate_n; -}; - -struct evergreen_power_info { - /* must be first! */ - struct rv7xx_power_info rv7xx; - /* flags */ - bool vddci_control; - bool dynamic_ac_timing; - bool abm; - bool mcls; - bool light_sleep; - bool memory_transition; - bool pcie_performance_request; - bool pcie_performance_request_registered; - bool sclk_deep_sleep; - bool dll_default_on; - bool ls_clock_gating; - bool smu_uvd_hs; - bool uvd_enabled; - /* stored values */ - u16 acpi_vddci; - u8 mvdd_high_index; - u8 mvdd_low_index; - u32 mclk_edc_wr_enable_threshold; - struct evergreen_mc_reg_table mc_reg_table; - struct atom_voltage_table vddc_voltage_table; - struct atom_voltage_table vddci_voltage_table; - struct evergreen_arb_registers bootup_arb_registers; - struct evergreen_ulv_param ulv; - struct at ats[2]; - /* smc offsets */ - u16 mc_reg_table_start; - struct amdgpu_ps current_rps; - struct rv7xx_ps current_ps; - struct amdgpu_ps requested_rps; - struct rv7xx_ps requested_ps; -}; - -struct PP_NIslands_Dpm2PerfLevel -{ - uint8_t MaxPS; - uint8_t TgtAct; - uint8_t MaxPS_StepInc; - uint8_t MaxPS_StepDec; - uint8_t PSST; - uint8_t NearTDPDec; - uint8_t AboveSafeInc; - uint8_t BelowSafeInc; - uint8_t PSDeltaLimit; - uint8_t PSDeltaWin; - uint8_t Reserved[6]; -}; - -typedef struct PP_NIslands_Dpm2PerfLevel PP_NIslands_Dpm2PerfLevel; - -struct PP_NIslands_DPM2Parameters -{ - uint32_t TDPLimit; - uint32_t NearTDPLimit; - uint32_t SafePowerLimit; - uint32_t PowerBoostLimit; -}; -typedef struct PP_NIslands_DPM2Parameters PP_NIslands_DPM2Parameters; - -struct NISLANDS_SMC_SCLK_VALUE -{ - uint32_t vCG_SPLL_FUNC_CNTL; - uint32_t vCG_SPLL_FUNC_CNTL_2; - uint32_t vCG_SPLL_FUNC_CNTL_3; - uint32_t vCG_SPLL_FUNC_CNTL_4; - uint32_t vCG_SPLL_SPREAD_SPECTRUM; - uint32_t vCG_SPLL_SPREAD_SPECTRUM_2; - uint32_t sclk_value; -}; - -typedef struct NISLANDS_SMC_SCLK_VALUE NISLANDS_SMC_SCLK_VALUE; - -struct NISLANDS_SMC_MCLK_VALUE -{ - uint32_t vMPLL_FUNC_CNTL; - uint32_t vMPLL_FUNC_CNTL_1; - uint32_t vMPLL_FUNC_CNTL_2; - uint32_t vMPLL_AD_FUNC_CNTL; - uint32_t vMPLL_AD_FUNC_CNTL_2; - uint32_t vMPLL_DQ_FUNC_CNTL; - uint32_t vMPLL_DQ_FUNC_CNTL_2; - uint32_t vMCLK_PWRMGT_CNTL; - uint32_t vDLL_CNTL; - uint32_t vMPLL_SS; - uint32_t vMPLL_SS2; - uint32_t mclk_value; -}; - -typedef struct NISLANDS_SMC_MCLK_VALUE NISLANDS_SMC_MCLK_VALUE; - -struct NISLANDS_SMC_VOLTAGE_VALUE -{ - uint16_t value; - uint8_t index; - uint8_t padding; -}; - -typedef struct NISLANDS_SMC_VOLTAGE_VALUE NISLANDS_SMC_VOLTAGE_VALUE; - -struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL -{ - uint8_t arbValue; - uint8_t ACIndex; - uint8_t displayWatermark; - uint8_t gen2PCIE; - uint8_t reserved1; - uint8_t reserved2; - uint8_t strobeMode; - uint8_t mcFlags; - uint32_t aT; - uint32_t bSP; - NISLANDS_SMC_SCLK_VALUE sclk; - NISLANDS_SMC_MCLK_VALUE mclk; - NISLANDS_SMC_VOLTAGE_VALUE vddc; - NISLANDS_SMC_VOLTAGE_VALUE mvdd; - NISLANDS_SMC_VOLTAGE_VALUE vddci; - NISLANDS_SMC_VOLTAGE_VALUE std_vddc; - uint32_t powergate_en; - uint8_t hUp; - uint8_t hDown; - uint8_t stateFlags; - uint8_t arbRefreshState; - uint32_t SQPowerThrottle; - uint32_t SQPowerThrottle_2; - uint32_t reserved[2]; - PP_NIslands_Dpm2PerfLevel dpm2; -}; - -typedef struct NISLANDS_SMC_HW_PERFORMANCE_LEVEL NISLANDS_SMC_HW_PERFORMANCE_LEVEL; - -struct NISLANDS_SMC_SWSTATE -{ - uint8_t flags; - uint8_t levelCount; - uint8_t padding2; - uint8_t padding3; - NISLANDS_SMC_HW_PERFORMANCE_LEVEL levels[]; -}; - -typedef struct NISLANDS_SMC_SWSTATE NISLANDS_SMC_SWSTATE; - -struct NISLANDS_SMC_VOLTAGEMASKTABLE -{ - uint8_t highMask[NISLANDS_SMC_VOLTAGEMASK_MAX]; - uint32_t lowMask[NISLANDS_SMC_VOLTAGEMASK_MAX]; -}; - -typedef struct NISLANDS_SMC_VOLTAGEMASKTABLE NISLANDS_SMC_VOLTAGEMASKTABLE; - -#define NISLANDS_MAX_NO_VREG_STEPS 32 - -struct NISLANDS_SMC_STATETABLE -{ - uint8_t thermalProtectType; - uint8_t systemFlags; - uint8_t maxVDDCIndexInPPTable; - uint8_t extraFlags; - uint8_t highSMIO[NISLANDS_MAX_NO_VREG_STEPS]; - uint32_t lowSMIO[NISLANDS_MAX_NO_VREG_STEPS]; - NISLANDS_SMC_VOLTAGEMASKTABLE voltageMaskTable; - PP_NIslands_DPM2Parameters dpm2Params; - NISLANDS_SMC_SWSTATE initialState; - NISLANDS_SMC_SWSTATE ACPIState; - NISLANDS_SMC_SWSTATE ULVState; - NISLANDS_SMC_SWSTATE driverState; - NISLANDS_SMC_HW_PERFORMANCE_LEVEL dpmLevels[NISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE - 1]; -}; - -typedef struct NISLANDS_SMC_STATETABLE NISLANDS_SMC_STATETABLE; - -struct ni_power_info { - /* must be first! */ - struct evergreen_power_info eg; - struct ni_clock_registers clock_registers; - struct ni_mc_reg_table mc_reg_table; - u32 mclk_rtt_mode_threshold; - /* flags */ - bool use_power_boost_limit; - bool support_cac_long_term_average; - bool cac_enabled; - bool cac_configuration_required; - bool driver_calculate_cac_leakage; - bool pc_enabled; - bool enable_power_containment; - bool enable_cac; - bool enable_sq_ramping; - /* smc offsets */ - u16 arb_table_start; - u16 fan_table_start; - u16 cac_table_start; - u16 spll_table_start; - /* CAC stuff */ - struct ni_cac_data cac_data; - u32 dc_cac_table[NISLANDS_DCCAC_MAX_LEVELS]; - const struct ni_cac_weights *cac_weights; - u8 lta_window_size; - u8 lts_truncate; - struct si_ps current_ps; - struct si_ps requested_ps; - /* scratch structs */ - SMC_NIslands_MCRegisters smc_mc_reg_table; - NISLANDS_SMC_STATETABLE smc_statetable; -}; - -struct si_cac_config_reg -{ - u32 offset; - u32 mask; - u32 shift; - u32 value; - enum si_cac_config_reg_type type; -}; - -struct si_powertune_data -{ - u32 cac_window; - u32 l2_lta_window_size_default; - u8 lts_truncate_default; - u8 shift_n_default; - u8 operating_temp; - struct ni_leakage_coeffients leakage_coefficients; - u32 fixed_kt; - u32 lkge_lut_v0_percent; - u8 dc_cac[NISLANDS_DCCAC_MAX_LEVELS]; - bool enable_powertune_by_default; -}; - -struct si_dyn_powertune_data -{ - u32 cac_leakage; - s32 leakage_minimum_temperature; - u32 wintime; - u32 l2_lta_window_size; - u8 lts_truncate; - u8 shift_n; - u8 dc_pwr_value; - bool disable_uvd_powertune; -}; - -struct si_dte_data -{ - u32 tau[SMC_SISLANDS_DTE_MAX_FILTER_STAGES]; - u32 r[SMC_SISLANDS_DTE_MAX_FILTER_STAGES]; - u32 k; - u32 t0; - u32 max_t; - u8 window_size; - u8 temp_select; - u8 dte_mode; - u8 tdep_count; - u8 t_limits[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE]; - u32 tdep_tau[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE]; - u32 tdep_r[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE]; - u32 t_threshold; - bool enable_dte_by_default; -}; - -struct si_clock_registers { - u32 cg_spll_func_cntl; - u32 cg_spll_func_cntl_2; - u32 cg_spll_func_cntl_3; - u32 cg_spll_func_cntl_4; - u32 cg_spll_spread_spectrum; - u32 cg_spll_spread_spectrum_2; - u32 dll_cntl; - u32 mclk_pwrmgt_cntl; - u32 mpll_ad_func_cntl; - u32 mpll_dq_func_cntl; - u32 mpll_func_cntl; - u32 mpll_func_cntl_1; - u32 mpll_func_cntl_2; - u32 mpll_ss1; - u32 mpll_ss2; -}; - -struct si_mc_reg_entry { - u32 mclk_max; - u32 mc_data[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE]; -}; - -struct si_mc_reg_table { - u8 last; - u8 num_entries; - u16 valid_flag; - struct si_mc_reg_entry mc_reg_table_entry[MAX_AC_TIMING_ENTRIES]; - SMC_NIslands_MCRegisterAddress mc_reg_address[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE]; -}; - -struct si_leakage_voltage_entry -{ - u16 voltage; - u16 leakage_index; -}; - -struct si_leakage_voltage -{ - u16 count; - struct si_leakage_voltage_entry entries[SISLANDS_MAX_LEAKAGE_COUNT]; -}; - - -struct si_ulv_param { - bool supported; - u32 cg_ulv_control; - u32 cg_ulv_parameter; - u32 volt_change_delay; - struct rv7xx_pl pl; - bool one_pcie_lane_in_ulv; -}; - -struct si_power_info { - /* must be first! */ - struct ni_power_info ni; - struct si_clock_registers clock_registers; - struct si_mc_reg_table mc_reg_table; - struct atom_voltage_table mvdd_voltage_table; - struct atom_voltage_table vddc_phase_shed_table; - struct si_leakage_voltage leakage_voltage; - u16 mvdd_bootup_value; - struct si_ulv_param ulv; - u32 max_cu; - /* pcie gen */ - enum si_pcie_gen force_pcie_gen; - enum si_pcie_gen boot_pcie_gen; - enum si_pcie_gen acpi_pcie_gen; - u32 sys_pcie_mask; - /* flags */ - bool enable_dte; - bool enable_ppm; - bool vddc_phase_shed_control; - bool pspp_notify_required; - bool sclk_deep_sleep_above_low; - bool voltage_control_svi2; - bool vddci_control_svi2; - /* smc offsets */ - u32 sram_end; - u32 state_table_start; - u32 soft_regs_start; - u32 mc_reg_table_start; - u32 arb_table_start; - u32 cac_table_start; - u32 dte_table_start; - u32 spll_table_start; - u32 papm_cfg_table_start; - u32 fan_table_start; - /* CAC stuff */ - const struct si_cac_config_reg *cac_weights; - const struct si_cac_config_reg *lcac_config; - const struct si_cac_config_reg *cac_override; - const struct si_powertune_data *powertune_data; - struct si_dyn_powertune_data dyn_powertune_data; - /* DTE stuff */ - struct si_dte_data dte_data; - /* scratch structs */ - SMC_SIslands_MCRegisters smc_mc_reg_table; - SISLANDS_SMC_STATETABLE smc_statetable; - PP_SIslands_PAPMParameters papm_parm; - /* SVI2 */ - u8 svd_gpio_id; - u8 svc_gpio_id; - /* fan control */ - bool fan_ctrl_is_in_default_mode; - u32 t_min; - u32 fan_ctrl_default_mode; - bool fan_is_controlled_by_smc; -}; - -#endif diff --git a/drivers/gpu/drm/amd/pm/powerplay/si_smc.c b/drivers/gpu/drm/amd/pm/powerplay/si_smc.c deleted file mode 100644 index 8f994ffa9cd1..000000000000 --- a/drivers/gpu/drm/amd/pm/powerplay/si_smc.c +++ /dev/null @@ -1,273 +0,0 @@ -/* - * Copyright 2011 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Alex Deucher - */ - -#include - -#include "amdgpu.h" -#include "sid.h" -#include "ppsmc.h" -#include "amdgpu_ucode.h" -#include "sislands_smc.h" - -static int si_set_smc_sram_address(struct amdgpu_device *adev, - u32 smc_address, u32 limit) -{ - if (smc_address & 3) - return -EINVAL; - if ((smc_address + 3) > limit) - return -EINVAL; - - WREG32(SMC_IND_INDEX_0, smc_address); - WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0); - - return 0; -} - -int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev, - u32 smc_start_address, - const u8 *src, u32 byte_count, u32 limit) -{ - unsigned long flags; - int ret = 0; - u32 data, original_data, addr, extra_shift; - - if (smc_start_address & 3) - return -EINVAL; - if ((smc_start_address + byte_count) > limit) - return -EINVAL; - - addr = smc_start_address; - - spin_lock_irqsave(&adev->smc_idx_lock, flags); - while (byte_count >= 4) { - /* SMC address space is BE */ - data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; - - ret = si_set_smc_sram_address(adev, addr, limit); - if (ret) - goto done; - - WREG32(SMC_IND_DATA_0, data); - - src += 4; - byte_count -= 4; - addr += 4; - } - - /* RMW for the final bytes */ - if (byte_count > 0) { - data = 0; - - ret = si_set_smc_sram_address(adev, addr, limit); - if (ret) - goto done; - - original_data = RREG32(SMC_IND_DATA_0); - extra_shift = 8 * (4 - byte_count); - - while (byte_count > 0) { - /* SMC address space is BE */ - data = (data << 8) + *src++; - byte_count--; - } - - data <<= extra_shift; - data |= (original_data & ~((~0UL) << extra_shift)); - - ret = si_set_smc_sram_address(adev, addr, limit); - if (ret) - goto done; - - WREG32(SMC_IND_DATA_0, data); - } - -done: - spin_unlock_irqrestore(&adev->smc_idx_lock, flags); - - return ret; -} - -void amdgpu_si_start_smc(struct amdgpu_device *adev) -{ - u32 tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL); - - tmp &= ~RST_REG; - - WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp); -} - -void amdgpu_si_reset_smc(struct amdgpu_device *adev) -{ - u32 tmp; - - RREG32(CB_CGTT_SCLK_CTRL); - RREG32(CB_CGTT_SCLK_CTRL); - RREG32(CB_CGTT_SCLK_CTRL); - RREG32(CB_CGTT_SCLK_CTRL); - - tmp = RREG32_SMC(SMC_SYSCON_RESET_CNTL) | - RST_REG; - WREG32_SMC(SMC_SYSCON_RESET_CNTL, tmp); -} - -int amdgpu_si_program_jump_on_start(struct amdgpu_device *adev) -{ - static const u8 data[] = { 0x0E, 0x00, 0x40, 0x40 }; - - return amdgpu_si_copy_bytes_to_smc(adev, 0x0, data, 4, sizeof(data)+1); -} - -void amdgpu_si_smc_clock(struct amdgpu_device *adev, bool enable) -{ - u32 tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0); - - if (enable) - tmp &= ~CK_DISABLE; - else - tmp |= CK_DISABLE; - - WREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0, tmp); -} - -bool amdgpu_si_is_smc_running(struct amdgpu_device *adev) -{ - u32 rst = RREG32_SMC(SMC_SYSCON_RESET_CNTL); - u32 clk = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0); - - if (!(rst & RST_REG) && !(clk & CK_DISABLE)) - return true; - - return false; -} - -PPSMC_Result amdgpu_si_send_msg_to_smc(struct amdgpu_device *adev, - PPSMC_Msg msg) -{ - u32 tmp; - int i; - - if (!amdgpu_si_is_smc_running(adev)) - return PPSMC_Result_Failed; - - WREG32(SMC_MESSAGE_0, msg); - - for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32(SMC_RESP_0); - if (tmp != 0) - break; - udelay(1); - } - - return (PPSMC_Result)RREG32(SMC_RESP_0); -} - -PPSMC_Result amdgpu_si_wait_for_smc_inactive(struct amdgpu_device *adev) -{ - u32 tmp; - int i; - - if (!amdgpu_si_is_smc_running(adev)) - return PPSMC_Result_OK; - - for (i = 0; i < adev->usec_timeout; i++) { - tmp = RREG32_SMC(SMC_SYSCON_CLOCK_CNTL_0); - if ((tmp & CKEN) == 0) - break; - udelay(1); - } - - return PPSMC_Result_OK; -} - -int amdgpu_si_load_smc_ucode(struct amdgpu_device *adev, u32 limit) -{ - const struct smc_firmware_header_v1_0 *hdr; - unsigned long flags; - u32 ucode_start_address; - u32 ucode_size; - const u8 *src; - u32 data; - - if (!adev->pm.fw) - return -EINVAL; - - hdr = (const struct smc_firmware_header_v1_0 *)adev->pm.fw->data; - - amdgpu_ucode_print_smc_hdr(&hdr->header); - - adev->pm.fw_version = le32_to_cpu(hdr->header.ucode_version); - ucode_start_address = le32_to_cpu(hdr->ucode_start_addr); - ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes); - src = (const u8 *) - (adev->pm.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); - if (ucode_size & 3) - return -EINVAL; - - spin_lock_irqsave(&adev->smc_idx_lock, flags); - WREG32(SMC_IND_INDEX_0, ucode_start_address); - WREG32_P(SMC_IND_ACCESS_CNTL, AUTO_INCREMENT_IND_0, ~AUTO_INCREMENT_IND_0); - while (ucode_size >= 4) { - /* SMC address space is BE */ - data = (src[0] << 24) | (src[1] << 16) | (src[2] << 8) | src[3]; - - WREG32(SMC_IND_DATA_0, data); - - src += 4; - ucode_size -= 4; - } - WREG32_P(SMC_IND_ACCESS_CNTL, 0, ~AUTO_INCREMENT_IND_0); - spin_unlock_irqrestore(&adev->smc_idx_lock, flags); - - return 0; -} - -int amdgpu_si_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address, - u32 *value, u32 limit) -{ - unsigned long flags; - int ret; - - spin_lock_irqsave(&adev->smc_idx_lock, flags); - ret = si_set_smc_sram_address(adev, smc_address, limit); - if (ret == 0) - *value = RREG32(SMC_IND_DATA_0); - spin_unlock_irqrestore(&adev->smc_idx_lock, flags); - - return ret; -} - -int amdgpu_si_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address, - u32 value, u32 limit) -{ - unsigned long flags; - int ret; - - spin_lock_irqsave(&adev->smc_idx_lock, flags); - ret = si_set_smc_sram_address(adev, smc_address, limit); - if (ret == 0) - WREG32(SMC_IND_DATA_0, value); - spin_unlock_irqrestore(&adev->smc_idx_lock, flags); - - return ret; -} diff --git a/drivers/gpu/drm/amd/pm/powerplay/sislands_smc.h b/drivers/gpu/drm/amd/pm/powerplay/sislands_smc.h deleted file mode 100644 index c7dc117a688c..000000000000 --- a/drivers/gpu/drm/amd/pm/powerplay/sislands_smc.h +++ /dev/null @@ -1,431 +0,0 @@ -/* - * Copyright 2013 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - */ -#ifndef PP_SISLANDS_SMC_H -#define PP_SISLANDS_SMC_H - -#include "ppsmc.h" - -#pragma pack(push, 1) - -#define SISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE 16 - -struct PP_SIslands_Dpm2PerfLevel -{ - uint8_t MaxPS; - uint8_t TgtAct; - uint8_t MaxPS_StepInc; - uint8_t MaxPS_StepDec; - uint8_t PSSamplingTime; - uint8_t NearTDPDec; - uint8_t AboveSafeInc; - uint8_t BelowSafeInc; - uint8_t PSDeltaLimit; - uint8_t PSDeltaWin; - uint16_t PwrEfficiencyRatio; - uint8_t Reserved[4]; -}; - -typedef struct PP_SIslands_Dpm2PerfLevel PP_SIslands_Dpm2PerfLevel; - -struct PP_SIslands_DPM2Status -{ - uint32_t dpm2Flags; - uint8_t CurrPSkip; - uint8_t CurrPSkipPowerShift; - uint8_t CurrPSkipTDP; - uint8_t CurrPSkipOCP; - uint8_t MaxSPLLIndex; - uint8_t MinSPLLIndex; - uint8_t CurrSPLLIndex; - uint8_t InfSweepMode; - uint8_t InfSweepDir; - uint8_t TDPexceeded; - uint8_t reserved; - uint8_t SwitchDownThreshold; - uint32_t SwitchDownCounter; - uint32_t SysScalingFactor; -}; - -typedef struct PP_SIslands_DPM2Status PP_SIslands_DPM2Status; - -struct PP_SIslands_DPM2Parameters -{ - uint32_t TDPLimit; - uint32_t NearTDPLimit; - uint32_t SafePowerLimit; - uint32_t PowerBoostLimit; - uint32_t MinLimitDelta; -}; -typedef struct PP_SIslands_DPM2Parameters PP_SIslands_DPM2Parameters; - -struct PP_SIslands_PAPMStatus -{ - uint32_t EstimatedDGPU_T; - uint32_t EstimatedDGPU_P; - uint32_t EstimatedAPU_T; - uint32_t EstimatedAPU_P; - uint8_t dGPU_T_Limit_Exceeded; - uint8_t reserved[3]; -}; -typedef struct PP_SIslands_PAPMStatus PP_SIslands_PAPMStatus; - -struct PP_SIslands_PAPMParameters -{ - uint32_t NearTDPLimitTherm; - uint32_t NearTDPLimitPAPM; - uint32_t PlatformPowerLimit; - uint32_t dGPU_T_Limit; - uint32_t dGPU_T_Warning; - uint32_t dGPU_T_Hysteresis; -}; -typedef struct PP_SIslands_PAPMParameters PP_SIslands_PAPMParameters; - -struct SISLANDS_SMC_SCLK_VALUE -{ - uint32_t vCG_SPLL_FUNC_CNTL; - uint32_t vCG_SPLL_FUNC_CNTL_2; - uint32_t vCG_SPLL_FUNC_CNTL_3; - uint32_t vCG_SPLL_FUNC_CNTL_4; - uint32_t vCG_SPLL_SPREAD_SPECTRUM; - uint32_t vCG_SPLL_SPREAD_SPECTRUM_2; - uint32_t sclk_value; -}; - -typedef struct SISLANDS_SMC_SCLK_VALUE SISLANDS_SMC_SCLK_VALUE; - -struct SISLANDS_SMC_MCLK_VALUE -{ - uint32_t vMPLL_FUNC_CNTL; - uint32_t vMPLL_FUNC_CNTL_1; - uint32_t vMPLL_FUNC_CNTL_2; - uint32_t vMPLL_AD_FUNC_CNTL; - uint32_t vMPLL_DQ_FUNC_CNTL; - uint32_t vMCLK_PWRMGT_CNTL; - uint32_t vDLL_CNTL; - uint32_t vMPLL_SS; - uint32_t vMPLL_SS2; - uint32_t mclk_value; -}; - -typedef struct SISLANDS_SMC_MCLK_VALUE SISLANDS_SMC_MCLK_VALUE; - -struct SISLANDS_SMC_VOLTAGE_VALUE -{ - uint16_t value; - uint8_t index; - uint8_t phase_settings; -}; - -typedef struct SISLANDS_SMC_VOLTAGE_VALUE SISLANDS_SMC_VOLTAGE_VALUE; - -struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL -{ - uint8_t ACIndex; - uint8_t displayWatermark; - uint8_t gen2PCIE; - uint8_t UVDWatermark; - uint8_t VCEWatermark; - uint8_t strobeMode; - uint8_t mcFlags; - uint8_t padding; - uint32_t aT; - uint32_t bSP; - SISLANDS_SMC_SCLK_VALUE sclk; - SISLANDS_SMC_MCLK_VALUE mclk; - SISLANDS_SMC_VOLTAGE_VALUE vddc; - SISLANDS_SMC_VOLTAGE_VALUE mvdd; - SISLANDS_SMC_VOLTAGE_VALUE vddci; - SISLANDS_SMC_VOLTAGE_VALUE std_vddc; - uint8_t hysteresisUp; - uint8_t hysteresisDown; - uint8_t stateFlags; - uint8_t arbRefreshState; - uint32_t SQPowerThrottle; - uint32_t SQPowerThrottle_2; - uint32_t MaxPoweredUpCU; - SISLANDS_SMC_VOLTAGE_VALUE high_temp_vddc; - SISLANDS_SMC_VOLTAGE_VALUE low_temp_vddc; - uint32_t reserved[2]; - PP_SIslands_Dpm2PerfLevel dpm2; -}; - -#define SISLANDS_SMC_STROBE_RATIO 0x0F -#define SISLANDS_SMC_STROBE_ENABLE 0x10 - -#define SISLANDS_SMC_MC_EDC_RD_FLAG 0x01 -#define SISLANDS_SMC_MC_EDC_WR_FLAG 0x02 -#define SISLANDS_SMC_MC_RTT_ENABLE 0x04 -#define SISLANDS_SMC_MC_STUTTER_EN 0x08 -#define SISLANDS_SMC_MC_PG_EN 0x10 - -typedef struct SISLANDS_SMC_HW_PERFORMANCE_LEVEL SISLANDS_SMC_HW_PERFORMANCE_LEVEL; - -struct SISLANDS_SMC_SWSTATE -{ - uint8_t flags; - uint8_t levelCount; - uint8_t padding2; - uint8_t padding3; - SISLANDS_SMC_HW_PERFORMANCE_LEVEL levels[]; -}; - -typedef struct SISLANDS_SMC_SWSTATE SISLANDS_SMC_SWSTATE; - -struct SISLANDS_SMC_SWSTATE_SINGLE { - uint8_t flags; - uint8_t levelCount; - uint8_t padding2; - uint8_t padding3; - SISLANDS_SMC_HW_PERFORMANCE_LEVEL level; -}; - -#define SISLANDS_SMC_VOLTAGEMASK_VDDC 0 -#define SISLANDS_SMC_VOLTAGEMASK_MVDD 1 -#define SISLANDS_SMC_VOLTAGEMASK_VDDCI 2 -#define SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING 3 -#define SISLANDS_SMC_VOLTAGEMASK_MAX 4 - -struct SISLANDS_SMC_VOLTAGEMASKTABLE -{ - uint32_t lowMask[SISLANDS_SMC_VOLTAGEMASK_MAX]; -}; - -typedef struct SISLANDS_SMC_VOLTAGEMASKTABLE SISLANDS_SMC_VOLTAGEMASKTABLE; - -#define SISLANDS_MAX_NO_VREG_STEPS 32 - -struct SISLANDS_SMC_STATETABLE -{ - uint8_t thermalProtectType; - uint8_t systemFlags; - uint8_t maxVDDCIndexInPPTable; - uint8_t extraFlags; - uint32_t lowSMIO[SISLANDS_MAX_NO_VREG_STEPS]; - SISLANDS_SMC_VOLTAGEMASKTABLE voltageMaskTable; - SISLANDS_SMC_VOLTAGEMASKTABLE phaseMaskTable; - PP_SIslands_DPM2Parameters dpm2Params; - struct SISLANDS_SMC_SWSTATE_SINGLE initialState; - struct SISLANDS_SMC_SWSTATE_SINGLE ACPIState; - struct SISLANDS_SMC_SWSTATE_SINGLE ULVState; - SISLANDS_SMC_SWSTATE driverState; - SISLANDS_SMC_HW_PERFORMANCE_LEVEL dpmLevels[SISLANDS_MAX_SMC_PERFORMANCE_LEVELS_PER_SWSTATE]; -}; - -typedef struct SISLANDS_SMC_STATETABLE SISLANDS_SMC_STATETABLE; - -#define SI_SMC_SOFT_REGISTER_mclk_chg_timeout 0x0 -#define SI_SMC_SOFT_REGISTER_delay_vreg 0xC -#define SI_SMC_SOFT_REGISTER_delay_acpi 0x28 -#define SI_SMC_SOFT_REGISTER_seq_index 0x5C -#define SI_SMC_SOFT_REGISTER_mvdd_chg_time 0x60 -#define SI_SMC_SOFT_REGISTER_mclk_switch_lim 0x70 -#define SI_SMC_SOFT_REGISTER_watermark_threshold 0x78 -#define SI_SMC_SOFT_REGISTER_phase_shedding_delay 0x88 -#define SI_SMC_SOFT_REGISTER_ulv_volt_change_delay 0x8C -#define SI_SMC_SOFT_REGISTER_mc_block_delay 0x98 -#define SI_SMC_SOFT_REGISTER_ticks_per_us 0xA8 -#define SI_SMC_SOFT_REGISTER_crtc_index 0xC4 -#define SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min 0xC8 -#define SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max 0xCC -#define SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width 0xF4 -#define SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen 0xFC -#define SI_SMC_SOFT_REGISTER_vr_hot_gpio 0x100 -#define SI_SMC_SOFT_REGISTER_svi_rework_plat_type 0x118 -#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd 0x11c -#define SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc 0x120 - -struct PP_SIslands_FanTable -{ - uint8_t fdo_mode; - uint8_t padding; - int16_t temp_min; - int16_t temp_med; - int16_t temp_max; - int16_t slope1; - int16_t slope2; - int16_t fdo_min; - int16_t hys_up; - int16_t hys_down; - int16_t hys_slope; - int16_t temp_resp_lim; - int16_t temp_curr; - int16_t slope_curr; - int16_t pwm_curr; - uint32_t refresh_period; - int16_t fdo_max; - uint8_t temp_src; - int8_t padding2; -}; - -typedef struct PP_SIslands_FanTable PP_SIslands_FanTable; - -#define SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES 16 -#define SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES 32 - -#define SMC_SISLANDS_SCALE_I 7 -#define SMC_SISLANDS_SCALE_R 12 - -struct PP_SIslands_CacConfig -{ - uint16_t cac_lkge_lut[SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES]; - uint32_t lkge_lut_V0; - uint32_t lkge_lut_Vstep; - uint32_t WinTime; - uint32_t R_LL; - uint32_t calculation_repeats; - uint32_t l2numWin_TDP; - uint32_t dc_cac; - uint8_t lts_truncate_n; - uint8_t SHIFT_N; - uint8_t log2_PG_LKG_SCALE; - uint8_t cac_temp; - uint32_t lkge_lut_T0; - uint32_t lkge_lut_Tstep; -}; - -typedef struct PP_SIslands_CacConfig PP_SIslands_CacConfig; - -#define SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE 16 -#define SMC_SISLANDS_MC_REGISTER_ARRAY_SET_COUNT 20 - -struct SMC_SIslands_MCRegisterAddress -{ - uint16_t s0; - uint16_t s1; -}; - -typedef struct SMC_SIslands_MCRegisterAddress SMC_SIslands_MCRegisterAddress; - -struct SMC_SIslands_MCRegisterSet -{ - uint32_t value[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE]; -}; - -typedef struct SMC_SIslands_MCRegisterSet SMC_SIslands_MCRegisterSet; - -struct SMC_SIslands_MCRegisters -{ - uint8_t last; - uint8_t reserved[3]; - SMC_SIslands_MCRegisterAddress address[SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE]; - SMC_SIslands_MCRegisterSet data[SMC_SISLANDS_MC_REGISTER_ARRAY_SET_COUNT]; -}; - -typedef struct SMC_SIslands_MCRegisters SMC_SIslands_MCRegisters; - -struct SMC_SIslands_MCArbDramTimingRegisterSet -{ - uint32_t mc_arb_dram_timing; - uint32_t mc_arb_dram_timing2; - uint8_t mc_arb_rfsh_rate; - uint8_t mc_arb_burst_time; - uint8_t padding[2]; -}; - -typedef struct SMC_SIslands_MCArbDramTimingRegisterSet SMC_SIslands_MCArbDramTimingRegisterSet; - -struct SMC_SIslands_MCArbDramTimingRegisters -{ - uint8_t arb_current; - uint8_t reserved[3]; - SMC_SIslands_MCArbDramTimingRegisterSet data[16]; -}; - -typedef struct SMC_SIslands_MCArbDramTimingRegisters SMC_SIslands_MCArbDramTimingRegisters; - -struct SMC_SISLANDS_SPLL_DIV_TABLE -{ - uint32_t freq[256]; - uint32_t ss[256]; -}; - -#define SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK 0x01ffffff -#define SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT 0 -#define SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK 0xfe000000 -#define SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT 25 -#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK 0x000fffff -#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT 0 -#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK 0xfff00000 -#define SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT 20 - -typedef struct SMC_SISLANDS_SPLL_DIV_TABLE SMC_SISLANDS_SPLL_DIV_TABLE; - -#define SMC_SISLANDS_DTE_MAX_FILTER_STAGES 5 - -#define SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE 16 - -struct Smc_SIslands_DTE_Configuration -{ - uint32_t tau[SMC_SISLANDS_DTE_MAX_FILTER_STAGES]; - uint32_t R[SMC_SISLANDS_DTE_MAX_FILTER_STAGES]; - uint32_t K; - uint32_t T0; - uint32_t MaxT; - uint8_t WindowSize; - uint8_t Tdep_count; - uint8_t temp_select; - uint8_t DTE_mode; - uint8_t T_limits[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE]; - uint32_t Tdep_tau[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE]; - uint32_t Tdep_R[SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE]; - uint32_t Tthreshold; -}; - -typedef struct Smc_SIslands_DTE_Configuration Smc_SIslands_DTE_Configuration; - -#define SMC_SISLANDS_DTE_STATUS_FLAG_DTE_ON 1 - -#define SISLANDS_SMC_FIRMWARE_HEADER_LOCATION 0x10000 - -#define SISLANDS_SMC_FIRMWARE_HEADER_version 0x0 -#define SISLANDS_SMC_FIRMWARE_HEADER_flags 0x4 -#define SISLANDS_SMC_FIRMWARE_HEADER_softRegisters 0xC -#define SISLANDS_SMC_FIRMWARE_HEADER_stateTable 0x10 -#define SISLANDS_SMC_FIRMWARE_HEADER_fanTable 0x14 -#define SISLANDS_SMC_FIRMWARE_HEADER_CacConfigTable 0x18 -#define SISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable 0x24 -#define SISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable 0x30 -#define SISLANDS_SMC_FIRMWARE_HEADER_spllTable 0x38 -#define SISLANDS_SMC_FIRMWARE_HEADER_DteConfiguration 0x40 -#define SISLANDS_SMC_FIRMWARE_HEADER_PAPMParameters 0x48 - -#pragma pack(pop) - -int amdgpu_si_copy_bytes_to_smc(struct amdgpu_device *adev, - u32 smc_start_address, - const u8 *src, u32 byte_count, u32 limit); -void amdgpu_si_start_smc(struct amdgpu_device *adev); -void amdgpu_si_reset_smc(struct amdgpu_device *adev); -int amdgpu_si_program_jump_on_start(struct amdgpu_device *adev); -void amdgpu_si_smc_clock(struct amdgpu_device *adev, bool enable); -bool amdgpu_si_is_smc_running(struct amdgpu_device *adev); -PPSMC_Result amdgpu_si_send_msg_to_smc(struct amdgpu_device *adev, PPSMC_Msg msg); -PPSMC_Result amdgpu_si_wait_for_smc_inactive(struct amdgpu_device *adev); -int amdgpu_si_load_smc_ucode(struct amdgpu_device *adev, u32 limit); -int amdgpu_si_read_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address, - u32 *value, u32 limit); -int amdgpu_si_write_smc_sram_dword(struct amdgpu_device *adev, u32 smc_address, - u32 value, u32 limit); - -#endif - diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h new file mode 100644 index 000000000000..8a689baeaf82 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -0,0 +1,1418 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef __AMDGPU_SMU_H__ +#define __AMDGPU_SMU_H__ + +#include "amdgpu.h" +#include "kgd_pp_interface.h" +#include "dm_pp_interface.h" +#include "dm_pp_smu.h" +#include "smu_types.h" + +#define SMU_THERMAL_MINIMUM_ALERT_TEMP 0 +#define SMU_THERMAL_MAXIMUM_ALERT_TEMP 255 +#define SMU_TEMPERATURE_UNITS_PER_CENTIGRADES 1000 +#define SMU_FW_NAME_LEN 0x24 + +#define SMU_DPM_USER_PROFILE_RESTORE (1 << 0) +#define SMU_CUSTOM_FAN_SPEED_RPM (1 << 1) +#define SMU_CUSTOM_FAN_SPEED_PWM (1 << 2) + +// Power Throttlers +#define SMU_THROTTLER_PPT0_BIT 0 +#define SMU_THROTTLER_PPT1_BIT 1 +#define SMU_THROTTLER_PPT2_BIT 2 +#define SMU_THROTTLER_PPT3_BIT 3 +#define SMU_THROTTLER_SPL_BIT 4 +#define SMU_THROTTLER_FPPT_BIT 5 +#define SMU_THROTTLER_SPPT_BIT 6 +#define SMU_THROTTLER_SPPT_APU_BIT 7 + +// Current Throttlers +#define SMU_THROTTLER_TDC_GFX_BIT 16 +#define SMU_THROTTLER_TDC_SOC_BIT 17 +#define SMU_THROTTLER_TDC_MEM_BIT 18 +#define SMU_THROTTLER_TDC_VDD_BIT 19 +#define SMU_THROTTLER_TDC_CVIP_BIT 20 +#define SMU_THROTTLER_EDC_CPU_BIT 21 +#define SMU_THROTTLER_EDC_GFX_BIT 22 +#define SMU_THROTTLER_APCC_BIT 23 + +// Temperature +#define SMU_THROTTLER_TEMP_GPU_BIT 32 +#define SMU_THROTTLER_TEMP_CORE_BIT 33 +#define SMU_THROTTLER_TEMP_MEM_BIT 34 +#define SMU_THROTTLER_TEMP_EDGE_BIT 35 +#define SMU_THROTTLER_TEMP_HOTSPOT_BIT 36 +#define SMU_THROTTLER_TEMP_SOC_BIT 37 +#define SMU_THROTTLER_TEMP_VR_GFX_BIT 38 +#define SMU_THROTTLER_TEMP_VR_SOC_BIT 39 +#define SMU_THROTTLER_TEMP_VR_MEM0_BIT 40 +#define SMU_THROTTLER_TEMP_VR_MEM1_BIT 41 +#define SMU_THROTTLER_TEMP_LIQUID0_BIT 42 +#define SMU_THROTTLER_TEMP_LIQUID1_BIT 43 +#define SMU_THROTTLER_VRHOT0_BIT 44 +#define SMU_THROTTLER_VRHOT1_BIT 45 +#define SMU_THROTTLER_PROCHOT_CPU_BIT 46 +#define SMU_THROTTLER_PROCHOT_GFX_BIT 47 + +// Other +#define SMU_THROTTLER_PPM_BIT 56 +#define SMU_THROTTLER_FIT_BIT 57 + +struct smu_hw_power_state { + unsigned int magic; +}; + +struct smu_power_state; + +enum smu_state_ui_label { + SMU_STATE_UI_LABEL_NONE, + SMU_STATE_UI_LABEL_BATTERY, + SMU_STATE_UI_TABEL_MIDDLE_LOW, + SMU_STATE_UI_LABEL_BALLANCED, + SMU_STATE_UI_LABEL_MIDDLE_HIGHT, + SMU_STATE_UI_LABEL_PERFORMANCE, + SMU_STATE_UI_LABEL_BACO, +}; + +enum smu_state_classification_flag { + SMU_STATE_CLASSIFICATION_FLAG_BOOT = 0x0001, + SMU_STATE_CLASSIFICATION_FLAG_THERMAL = 0x0002, + SMU_STATE_CLASSIFICATIN_FLAG_LIMITED_POWER_SOURCE = 0x0004, + SMU_STATE_CLASSIFICATION_FLAG_RESET = 0x0008, + SMU_STATE_CLASSIFICATION_FLAG_FORCED = 0x0010, + SMU_STATE_CLASSIFICATION_FLAG_USER_3D_PERFORMANCE = 0x0020, + SMU_STATE_CLASSIFICATION_FLAG_USER_2D_PERFORMANCE = 0x0040, + SMU_STATE_CLASSIFICATION_FLAG_3D_PERFORMANCE = 0x0080, + SMU_STATE_CLASSIFICATION_FLAG_AC_OVERDIRVER_TEMPLATE = 0x0100, + SMU_STATE_CLASSIFICATION_FLAG_UVD = 0x0200, + SMU_STATE_CLASSIFICATION_FLAG_3D_PERFORMANCE_LOW = 0x0400, + SMU_STATE_CLASSIFICATION_FLAG_ACPI = 0x0800, + SMU_STATE_CLASSIFICATION_FLAG_HD2 = 0x1000, + SMU_STATE_CLASSIFICATION_FLAG_UVD_HD = 0x2000, + SMU_STATE_CLASSIFICATION_FLAG_UVD_SD = 0x4000, + SMU_STATE_CLASSIFICATION_FLAG_USER_DC_PERFORMANCE = 0x8000, + SMU_STATE_CLASSIFICATION_FLAG_DC_OVERDIRVER_TEMPLATE = 0x10000, + SMU_STATE_CLASSIFICATION_FLAG_BACO = 0x20000, + SMU_STATE_CLASSIFICATIN_FLAG_LIMITED_POWER_SOURCE2 = 0x40000, + SMU_STATE_CLASSIFICATION_FLAG_ULV = 0x80000, + SMU_STATE_CLASSIFICATION_FLAG_UVD_MVC = 0x100000, +}; + +struct smu_state_classification_block { + enum smu_state_ui_label ui_label; + enum smu_state_classification_flag flags; + int bios_index; + bool temporary_state; + bool to_be_deleted; +}; + +struct smu_state_pcie_block { + unsigned int lanes; +}; + +enum smu_refreshrate_source { + SMU_REFRESHRATE_SOURCE_EDID, + SMU_REFRESHRATE_SOURCE_EXPLICIT +}; + +struct smu_state_display_block { + bool disable_frame_modulation; + bool limit_refreshrate; + enum smu_refreshrate_source refreshrate_source; + int explicit_refreshrate; + int edid_refreshrate_index; + bool enable_vari_bright; +}; + +struct smu_state_memory_block { + bool dll_off; + uint8_t m3arb; + uint8_t unused[3]; +}; + +struct smu_state_software_algorithm_block { + bool disable_load_balancing; + bool enable_sleep_for_timestamps; +}; + +struct smu_temperature_range { + int min; + int max; + int edge_emergency_max; + int hotspot_min; + int hotspot_crit_max; + int hotspot_emergency_max; + int mem_min; + int mem_crit_max; + int mem_emergency_max; + int software_shutdown_temp; +}; + +struct smu_state_validation_block { + bool single_display_only; + bool disallow_on_dc; + uint8_t supported_power_levels; +}; + +struct smu_uvd_clocks { + uint32_t vclk; + uint32_t dclk; +}; + +/** +* Structure to hold a SMU Power State. +*/ +struct smu_power_state { + uint32_t id; + struct list_head ordered_list; + struct list_head all_states_list; + + struct smu_state_classification_block classification; + struct smu_state_validation_block validation; + struct smu_state_pcie_block pcie; + struct smu_state_display_block display; + struct smu_state_memory_block memory; + struct smu_state_software_algorithm_block software; + struct smu_uvd_clocks uvd_clocks; + struct smu_hw_power_state hardware; +}; + +enum smu_power_src_type +{ + SMU_POWER_SOURCE_AC, + SMU_POWER_SOURCE_DC, + SMU_POWER_SOURCE_COUNT, +}; + +enum smu_ppt_limit_type +{ + SMU_DEFAULT_PPT_LIMIT = 0, + SMU_FAST_PPT_LIMIT, +}; + +enum smu_ppt_limit_level +{ + SMU_PPT_LIMIT_MIN = -1, + SMU_PPT_LIMIT_CURRENT, + SMU_PPT_LIMIT_DEFAULT, + SMU_PPT_LIMIT_MAX, +}; + +enum smu_memory_pool_size +{ + SMU_MEMORY_POOL_SIZE_ZERO = 0, + SMU_MEMORY_POOL_SIZE_256_MB = 0x10000000, + SMU_MEMORY_POOL_SIZE_512_MB = 0x20000000, + SMU_MEMORY_POOL_SIZE_1_GB = 0x40000000, + SMU_MEMORY_POOL_SIZE_2_GB = 0x80000000, +}; + +struct smu_user_dpm_profile { + uint32_t fan_mode; + uint32_t power_limit; + uint32_t fan_speed_pwm; + uint32_t fan_speed_rpm; + uint32_t flags; + uint32_t user_od; + + /* user clock state information */ + uint32_t clk_mask[SMU_CLK_COUNT]; + uint32_t clk_dependency; +}; + +#define SMU_TABLE_INIT(tables, table_id, s, a, d) \ + do { \ + tables[table_id].size = s; \ + tables[table_id].align = a; \ + tables[table_id].domain = d; \ + } while (0) + +struct smu_table { + uint64_t size; + uint32_t align; + uint8_t domain; + uint64_t mc_address; + void *cpu_addr; + struct amdgpu_bo *bo; +}; + +enum smu_perf_level_designation { + PERF_LEVEL_ACTIVITY, + PERF_LEVEL_POWER_CONTAINMENT, +}; + +struct smu_performance_level { + uint32_t core_clock; + uint32_t memory_clock; + uint32_t vddc; + uint32_t vddci; + uint32_t non_local_mem_freq; + uint32_t non_local_mem_width; +}; + +struct smu_clock_info { + uint32_t min_mem_clk; + uint32_t max_mem_clk; + uint32_t min_eng_clk; + uint32_t max_eng_clk; + uint32_t min_bus_bandwidth; + uint32_t max_bus_bandwidth; +}; + +struct smu_bios_boot_up_values +{ + uint32_t revision; + uint32_t gfxclk; + uint32_t uclk; + uint32_t socclk; + uint32_t dcefclk; + uint32_t eclk; + uint32_t vclk; + uint32_t dclk; + uint16_t vddc; + uint16_t vddci; + uint16_t mvddc; + uint16_t vdd_gfx; + uint8_t cooling_id; + uint32_t pp_table_id; + uint32_t format_revision; + uint32_t content_revision; + uint32_t fclk; + uint32_t lclk; + uint32_t firmware_caps; +}; + +enum smu_table_id +{ + SMU_TABLE_PPTABLE = 0, + SMU_TABLE_WATERMARKS, + SMU_TABLE_CUSTOM_DPM, + SMU_TABLE_DPMCLOCKS, + SMU_TABLE_AVFS, + SMU_TABLE_AVFS_PSM_DEBUG, + SMU_TABLE_AVFS_FUSE_OVERRIDE, + SMU_TABLE_PMSTATUSLOG, + SMU_TABLE_SMU_METRICS, + SMU_TABLE_DRIVER_SMU_CONFIG, + SMU_TABLE_ACTIVITY_MONITOR_COEFF, + SMU_TABLE_OVERDRIVE, + SMU_TABLE_I2C_COMMANDS, + SMU_TABLE_PACE, + SMU_TABLE_ECCINFO, + SMU_TABLE_COUNT, +}; + +struct smu_table_context +{ + void *power_play_table; + uint32_t power_play_table_size; + void *hardcode_pptable; + unsigned long metrics_time; + void *metrics_table; + void *clocks_table; + void *watermarks_table; + + void *max_sustainable_clocks; + struct smu_bios_boot_up_values boot_values; + void *driver_pptable; + void *ecc_table; + struct smu_table tables[SMU_TABLE_COUNT]; + /* + * The driver table is just a staging buffer for + * uploading/downloading content from the SMU. + * + * And the table_id for SMU_MSG_TransferTableSmu2Dram/ + * SMU_MSG_TransferTableDram2Smu instructs SMU + * which content driver is interested. + */ + struct smu_table driver_table; + struct smu_table memory_pool; + struct smu_table dummy_read_1_table; + uint8_t thermal_controller_type; + + void *overdrive_table; + void *boot_overdrive_table; + void *user_overdrive_table; + + uint32_t gpu_metrics_table_size; + void *gpu_metrics_table; +}; + +struct smu_dpm_context { + uint32_t dpm_context_size; + void *dpm_context; + void *golden_dpm_context; + bool enable_umd_pstate; + enum amd_dpm_forced_level dpm_level; + enum amd_dpm_forced_level saved_dpm_level; + enum amd_dpm_forced_level requested_dpm_level; + struct smu_power_state *dpm_request_power_state; + struct smu_power_state *dpm_current_power_state; + struct mclock_latency_table *mclk_latency_table; +}; + +struct smu_power_gate { + bool uvd_gated; + bool vce_gated; + atomic_t vcn_gated; + atomic_t jpeg_gated; + struct mutex vcn_gate_lock; + struct mutex jpeg_gate_lock; +}; + +struct smu_power_context { + void *power_context; + uint32_t power_context_size; + struct smu_power_gate power_gate; +}; + +#define SMU_FEATURE_MAX (64) +struct smu_feature +{ + uint32_t feature_num; + DECLARE_BITMAP(supported, SMU_FEATURE_MAX); + DECLARE_BITMAP(allowed, SMU_FEATURE_MAX); + DECLARE_BITMAP(enabled, SMU_FEATURE_MAX); + struct mutex mutex; +}; + +struct smu_clocks { + uint32_t engine_clock; + uint32_t memory_clock; + uint32_t bus_bandwidth; + uint32_t engine_clock_in_sr; + uint32_t dcef_clock; + uint32_t dcef_clock_in_sr; +}; + +#define MAX_REGULAR_DPM_NUM 16 +struct mclk_latency_entries { + uint32_t frequency; + uint32_t latency; +}; +struct mclock_latency_table { + uint32_t count; + struct mclk_latency_entries entries[MAX_REGULAR_DPM_NUM]; +}; + +enum smu_reset_mode +{ + SMU_RESET_MODE_0, + SMU_RESET_MODE_1, + SMU_RESET_MODE_2, +}; + +enum smu_baco_state +{ + SMU_BACO_STATE_ENTER = 0, + SMU_BACO_STATE_EXIT, +}; + +struct smu_baco_context +{ + struct mutex mutex; + uint32_t state; + bool platform_support; +}; + +struct smu_freq_info { + uint32_t min; + uint32_t max; + uint32_t freq_level; +}; + +struct pstates_clk_freq { + uint32_t min; + uint32_t standard; + uint32_t peak; + struct smu_freq_info custom; + struct smu_freq_info curr; +}; + +struct smu_umd_pstate_table { + struct pstates_clk_freq gfxclk_pstate; + struct pstates_clk_freq socclk_pstate; + struct pstates_clk_freq uclk_pstate; + struct pstates_clk_freq vclk_pstate; + struct pstates_clk_freq dclk_pstate; +}; + +struct cmn2asic_msg_mapping { + int valid_mapping; + int map_to; + int valid_in_vf; +}; + +struct cmn2asic_mapping { + int valid_mapping; + int map_to; +}; + +struct stb_context { + uint32_t stb_buf_size; + bool enabled; + spinlock_t lock; +}; + +#define WORKLOAD_POLICY_MAX 7 + +struct smu_context +{ + struct amdgpu_device *adev; + struct amdgpu_irq_src irq_source; + + const struct pptable_funcs *ppt_funcs; + const struct cmn2asic_msg_mapping *message_map; + const struct cmn2asic_mapping *clock_map; + const struct cmn2asic_mapping *feature_map; + const struct cmn2asic_mapping *table_map; + const struct cmn2asic_mapping *pwr_src_map; + const struct cmn2asic_mapping *workload_map; + struct mutex mutex; + struct mutex sensor_lock; + struct mutex metrics_lock; + struct mutex message_lock; + uint64_t pool_size; + + struct smu_table_context smu_table; + struct smu_dpm_context smu_dpm; + struct smu_power_context smu_power; + struct smu_feature smu_feature; + struct amd_pp_display_configuration *display_config; + struct smu_baco_context smu_baco; + struct smu_temperature_range thermal_range; + void *od_settings; + + struct smu_umd_pstate_table pstate_table; + uint32_t pstate_sclk; + uint32_t pstate_mclk; + + bool od_enabled; + uint32_t current_power_limit; + uint32_t default_power_limit; + uint32_t max_power_limit; + + /* soft pptable */ + uint32_t ppt_offset_bytes; + uint32_t ppt_size_bytes; + uint8_t *ppt_start_addr; + + bool support_power_containment; + bool disable_watermark; + +#define WATERMARKS_EXIST (1 << 0) +#define WATERMARKS_LOADED (1 << 1) + uint32_t watermarks_bitmap; + uint32_t hard_min_uclk_req_from_dal; + bool disable_uclk_switch; + + uint32_t workload_mask; + uint32_t workload_prority[WORKLOAD_POLICY_MAX]; + uint32_t workload_setting[WORKLOAD_POLICY_MAX]; + uint32_t power_profile_mode; + uint32_t default_power_profile_mode; + bool pm_enabled; + bool is_apu; + + uint32_t smc_driver_if_version; + uint32_t smc_fw_if_version; + uint32_t smc_fw_version; + + bool uploading_custom_pp_table; + bool dc_controlled_by_gpio; + + struct work_struct throttling_logging_work; + atomic64_t throttle_int_counter; + struct work_struct interrupt_work; + + unsigned fan_max_rpm; + unsigned manual_fan_speed_pwm; + + uint32_t gfx_default_hard_min_freq; + uint32_t gfx_default_soft_max_freq; + uint32_t gfx_actual_hard_min_freq; + uint32_t gfx_actual_soft_max_freq; + + /* APU only */ + uint32_t cpu_default_soft_min_freq; + uint32_t cpu_default_soft_max_freq; + uint32_t cpu_actual_soft_min_freq; + uint32_t cpu_actual_soft_max_freq; + uint32_t cpu_core_id_select; + uint16_t cpu_core_num; + + struct smu_user_dpm_profile user_dpm_profile; + + struct stb_context stb_context; +}; + +struct i2c_adapter; + +/** + * struct pptable_funcs - Callbacks used to interact with the SMU. + */ +struct pptable_funcs { + /** + * @run_btc: Calibrate voltage/frequency curve to fit the system's + * power delivery and voltage margins. Required for adaptive + * voltage frequency scaling (AVFS). + */ + int (*run_btc)(struct smu_context *smu); + + /** + * @get_allowed_feature_mask: Get allowed feature mask. + * &feature_mask: Array to store feature mask. + * &num: Elements in &feature_mask. + */ + int (*get_allowed_feature_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num); + + /** + * @get_current_power_state: Get the current power state. + * + * Return: Current power state on success, negative errno on failure. + */ + enum amd_pm_state_type (*get_current_power_state)(struct smu_context *smu); + + /** + * @set_default_dpm_table: Retrieve the default overdrive settings from + * the SMU. + */ + int (*set_default_dpm_table)(struct smu_context *smu); + + int (*set_power_state)(struct smu_context *smu); + + /** + * @populate_umd_state_clk: Populate the UMD power state table with + * defaults. + */ + int (*populate_umd_state_clk)(struct smu_context *smu); + + /** + * @print_clk_levels: Print DPM clock levels for a clock domain + * to buffer. Star current level. + * + * Used for sysfs interfaces. + */ + int (*print_clk_levels)(struct smu_context *smu, enum smu_clk_type clk_type, char *buf); + + /** + * @force_clk_levels: Set a range of allowed DPM levels for a clock + * domain. + * &clk_type: Clock domain. + * &mask: Range of allowed DPM levels. + */ + int (*force_clk_levels)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t mask); + + /** + * @od_edit_dpm_table: Edit the custom overdrive DPM table. + * &type: Type of edit. + * &input: Edit parameters. + * &size: Size of &input. + */ + int (*od_edit_dpm_table)(struct smu_context *smu, + enum PP_OD_DPM_TABLE_COMMAND type, + long *input, uint32_t size); + + /** + * @restore_user_od_settings: Restore the user customized + * OD settings on S3/S4/Runpm resume. + */ + int (*restore_user_od_settings)(struct smu_context *smu); + + /** + * @get_clock_by_type_with_latency: Get the speed and latency of a clock + * domain. + */ + int (*get_clock_by_type_with_latency)(struct smu_context *smu, + enum smu_clk_type clk_type, + struct + pp_clock_levels_with_latency + *clocks); + /** + * @get_clock_by_type_with_voltage: Get the speed and voltage of a clock + * domain. + */ + int (*get_clock_by_type_with_voltage)(struct smu_context *smu, + enum amd_pp_clock_type type, + struct + pp_clock_levels_with_voltage + *clocks); + + /** + * @get_power_profile_mode: Print all power profile modes to + * buffer. Star current mode. + */ + int (*get_power_profile_mode)(struct smu_context *smu, char *buf); + + /** + * @set_power_profile_mode: Set a power profile mode. Also used to + * create/set custom power profile modes. + * &input: Power profile mode parameters. + * &size: Size of &input. + */ + int (*set_power_profile_mode)(struct smu_context *smu, long *input, uint32_t size); + + /** + * @dpm_set_vcn_enable: Enable/disable VCN engine dynamic power + * management. + */ + int (*dpm_set_vcn_enable)(struct smu_context *smu, bool enable); + + /** + * @dpm_set_jpeg_enable: Enable/disable JPEG engine dynamic power + * management. + */ + int (*dpm_set_jpeg_enable)(struct smu_context *smu, bool enable); + + /** + * @read_sensor: Read data from a sensor. + * &sensor: Sensor to read data from. + * &data: Sensor reading. + * &size: Size of &data. + */ + int (*read_sensor)(struct smu_context *smu, enum amd_pp_sensors sensor, + void *data, uint32_t *size); + + /** + * @pre_display_config_changed: Prepare GPU for a display configuration + * change. + * + * Disable display tracking and pin memory clock speed to maximum. Used + * in display component synchronization. + */ + int (*pre_display_config_changed)(struct smu_context *smu); + + /** + * @display_config_changed: Notify the SMU of the current display + * configuration. + * + * Allows SMU to properly track blanking periods for memory clock + * adjustment. Used in display component synchronization. + */ + int (*display_config_changed)(struct smu_context *smu); + + int (*apply_clocks_adjust_rules)(struct smu_context *smu); + + /** + * @notify_smc_display_config: Applies display requirements to the + * current power state. + * + * Optimize deep sleep DCEFclk and mclk for the current display + * configuration. Used in display component synchronization. + */ + int (*notify_smc_display_config)(struct smu_context *smu); + + /** + * @is_dpm_running: Check if DPM is running. + * + * Return: True if DPM is running, false otherwise. + */ + bool (*is_dpm_running)(struct smu_context *smu); + + /** + * @get_fan_speed_pwm: Get the current fan speed in PWM. + */ + int (*get_fan_speed_pwm)(struct smu_context *smu, uint32_t *speed); + + /** + * @get_fan_speed_rpm: Get the current fan speed in rpm. + */ + int (*get_fan_speed_rpm)(struct smu_context *smu, uint32_t *speed); + + /** + * @set_watermarks_table: Configure and upload the watermarks tables to + * the SMU. + */ + int (*set_watermarks_table)(struct smu_context *smu, + struct pp_smu_wm_range_sets *clock_ranges); + + /** + * @get_thermal_temperature_range: Get safe thermal limits in Celcius. + */ + int (*get_thermal_temperature_range)(struct smu_context *smu, struct smu_temperature_range *range); + + /** + * @get_uclk_dpm_states: Get memory clock DPM levels in kHz. + * &clocks_in_khz: Array of DPM levels. + * &num_states: Elements in &clocks_in_khz. + */ + int (*get_uclk_dpm_states)(struct smu_context *smu, uint32_t *clocks_in_khz, uint32_t *num_states); + + /** + * @set_default_od_settings: Set the overdrive tables to defaults. + */ + int (*set_default_od_settings)(struct smu_context *smu); + + /** + * @set_performance_level: Set a performance level. + */ + int (*set_performance_level)(struct smu_context *smu, enum amd_dpm_forced_level level); + + /** + * @display_disable_memory_clock_switch: Enable/disable dynamic memory + * clock switching. + * + * Disabling this feature forces memory clock speed to maximum. + * Enabling sets the minimum memory clock capable of driving the + * current display configuration. + */ + int (*display_disable_memory_clock_switch)(struct smu_context *smu, bool disable_memory_clock_switch); + + /** + * @dump_pptable: Print the power play table to the system log. + */ + void (*dump_pptable)(struct smu_context *smu); + + /** + * @get_power_limit: Get the device's power limits. + */ + int (*get_power_limit)(struct smu_context *smu, + uint32_t *current_power_limit, + uint32_t *default_power_limit, + uint32_t *max_power_limit); + + /** + * @get_ppt_limit: Get the device's ppt limits. + */ + int (*get_ppt_limit)(struct smu_context *smu, uint32_t *ppt_limit, + enum smu_ppt_limit_type limit_type, enum smu_ppt_limit_level limit_level); + + /** + * @set_df_cstate: Set data fabric cstate. + */ + int (*set_df_cstate)(struct smu_context *smu, enum pp_df_cstate state); + + /** + * @allow_xgmi_power_down: Enable/disable external global memory + * interconnect power down. + */ + int (*allow_xgmi_power_down)(struct smu_context *smu, bool en); + + /** + * @update_pcie_parameters: Update and upload the system's PCIe + * capabilites to the SMU. + * &pcie_gen_cap: Maximum allowed PCIe generation. + * &pcie_width_cap: Maximum allowed PCIe width. + */ + int (*update_pcie_parameters)(struct smu_context *smu, uint32_t pcie_gen_cap, uint32_t pcie_width_cap); + + /** + * @i2c_init: Initialize i2c. + * + * The i2c bus is used internally by the SMU voltage regulators and + * other devices. The i2c's EEPROM also stores bad page tables on boards + * with ECC. + */ + int (*i2c_init)(struct smu_context *smu, struct i2c_adapter *control); + + /** + * @i2c_fini: Tear down i2c. + */ + void (*i2c_fini)(struct smu_context *smu, struct i2c_adapter *control); + + /** + * @get_unique_id: Get the GPU's unique id. Used for asset tracking. + */ + void (*get_unique_id)(struct smu_context *smu); + + /** + * @get_dpm_clock_table: Get a copy of the DPM clock table. + * + * Used by display component in bandwidth and watermark calculations. + */ + int (*get_dpm_clock_table)(struct smu_context *smu, struct dpm_clocks *clock_table); + + /** + * @init_microcode: Request the SMU's firmware from the kernel. + */ + int (*init_microcode)(struct smu_context *smu); + + /** + * @load_microcode: Load firmware onto the SMU. + */ + int (*load_microcode)(struct smu_context *smu); + + /** + * @fini_microcode: Release the SMU's firmware. + */ + void (*fini_microcode)(struct smu_context *smu); + + /** + * @init_smc_tables: Initialize the SMU tables. + */ + int (*init_smc_tables)(struct smu_context *smu); + + /** + * @fini_smc_tables: Release the SMU tables. + */ + int (*fini_smc_tables)(struct smu_context *smu); + + /** + * @init_power: Initialize the power gate table context. + */ + int (*init_power)(struct smu_context *smu); + + /** + * @fini_power: Release the power gate table context. + */ + int (*fini_power)(struct smu_context *smu); + + /** + * @check_fw_status: Check the SMU's firmware status. + * + * Return: Zero if check passes, negative errno on failure. + */ + int (*check_fw_status)(struct smu_context *smu); + + /** + * @set_mp1_state: put SMU into a correct state for comming + * resume from runpm or gpu reset. + */ + int (*set_mp1_state)(struct smu_context *smu, + enum pp_mp1_state mp1_state); + + /** + * @setup_pptable: Initialize the power play table and populate it with + * default values. + */ + int (*setup_pptable)(struct smu_context *smu); + + /** + * @get_vbios_bootup_values: Get default boot values from the VBIOS. + */ + int (*get_vbios_bootup_values)(struct smu_context *smu); + + /** + * @check_fw_version: Print driver and SMU interface versions to the + * system log. + * + * Interface mismatch is not a critical failure. + */ + int (*check_fw_version)(struct smu_context *smu); + + /** + * @powergate_sdma: Power up/down system direct memory access. + */ + int (*powergate_sdma)(struct smu_context *smu, bool gate); + + /** + * @set_gfx_cgpg: Enable/disable graphics engine course grain power + * gating. + */ + int (*set_gfx_cgpg)(struct smu_context *smu, bool enable); + + /** + * @write_pptable: Write the power play table to the SMU. + */ + int (*write_pptable)(struct smu_context *smu); + + /** + * @set_driver_table_location: Send the location of the driver table to + * the SMU. + */ + int (*set_driver_table_location)(struct smu_context *smu); + + /** + * @set_tool_table_location: Send the location of the tool table to the + * SMU. + */ + int (*set_tool_table_location)(struct smu_context *smu); + + /** + * @notify_memory_pool_location: Send the location of the memory pool to + * the SMU. + */ + int (*notify_memory_pool_location)(struct smu_context *smu); + + /** + * @system_features_control: Enable/disable all SMU features. + */ + int (*system_features_control)(struct smu_context *smu, bool en); + + /** + * @send_smc_msg_with_param: Send a message with a parameter to the SMU. + * &msg: Type of message. + * ¶m: Message parameter. + * &read_arg: SMU response (optional). + */ + int (*send_smc_msg_with_param)(struct smu_context *smu, + enum smu_message_type msg, uint32_t param, uint32_t *read_arg); + + /** + * @send_smc_msg: Send a message to the SMU. + * &msg: Type of message. + * &read_arg: SMU response (optional). + */ + int (*send_smc_msg)(struct smu_context *smu, + enum smu_message_type msg, + uint32_t *read_arg); + + /** + * @init_display_count: Notify the SMU of the number of display + * components in current display configuration. + */ + int (*init_display_count)(struct smu_context *smu, uint32_t count); + + /** + * @set_allowed_mask: Notify the SMU of the features currently allowed + * by the driver. + */ + int (*set_allowed_mask)(struct smu_context *smu); + + /** + * @get_enabled_mask: Get a mask of features that are currently enabled + * on the SMU. + * &feature_mask: Array representing enabled feature mask. + * &num: Elements in &feature_mask. + */ + int (*get_enabled_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num); + + /** + * @feature_is_enabled: Test if a feature is enabled. + * + * Return: One if enabled, zero if disabled. + */ + int (*feature_is_enabled)(struct smu_context *smu, enum smu_feature_mask mask); + + /** + * @disable_all_features_with_exception: Disable all features with + * exception to those in &mask. + */ + int (*disable_all_features_with_exception)(struct smu_context *smu, + bool no_hw_disablement, + enum smu_feature_mask mask); + + /** + * @notify_display_change: Enable fast memory clock switching. + * + * Allows for fine grained memory clock switching but has more stringent + * timing requirements. + */ + int (*notify_display_change)(struct smu_context *smu); + + /** + * @set_power_limit: Set power limit in watts. + */ + int (*set_power_limit)(struct smu_context *smu, + enum smu_ppt_limit_type limit_type, + uint32_t limit); + + /** + * @init_max_sustainable_clocks: Populate max sustainable clock speed + * table with values from the SMU. + */ + int (*init_max_sustainable_clocks)(struct smu_context *smu); + + /** + * @enable_thermal_alert: Enable thermal alert interrupts. + */ + int (*enable_thermal_alert)(struct smu_context *smu); + + /** + * @disable_thermal_alert: Disable thermal alert interrupts. + */ + int (*disable_thermal_alert)(struct smu_context *smu); + + /** + * @set_min_dcef_deep_sleep: Set a minimum display fabric deep sleep + * clock speed in MHz. + */ + int (*set_min_dcef_deep_sleep)(struct smu_context *smu, uint32_t clk); + + /** + * @display_clock_voltage_request: Set a hard minimum frequency + * for a clock domain. + */ + int (*display_clock_voltage_request)(struct smu_context *smu, struct + pp_display_clock_request + *clock_req); + + /** + * @get_fan_control_mode: Get the current fan control mode. + */ + uint32_t (*get_fan_control_mode)(struct smu_context *smu); + + /** + * @set_fan_control_mode: Set the fan control mode. + */ + int (*set_fan_control_mode)(struct smu_context *smu, uint32_t mode); + + /** + * @set_fan_speed_pwm: Set a static fan speed in PWM. + */ + int (*set_fan_speed_pwm)(struct smu_context *smu, uint32_t speed); + + /** + * @set_fan_speed_rpm: Set a static fan speed in rpm. + */ + int (*set_fan_speed_rpm)(struct smu_context *smu, uint32_t speed); + + /** + * @set_xgmi_pstate: Set inter-chip global memory interconnect pstate. + * &pstate: Pstate to set. D0 if Nonzero, D3 otherwise. + */ + int (*set_xgmi_pstate)(struct smu_context *smu, uint32_t pstate); + + /** + * @gfx_off_control: Enable/disable graphics engine poweroff. + */ + int (*gfx_off_control)(struct smu_context *smu, bool enable); + + + /** + * @get_gfx_off_status: Get graphics engine poweroff status. + * + * Return: + * 0 - GFXOFF(default). + * 1 - Transition out of GFX State. + * 2 - Not in GFXOFF. + * 3 - Transition into GFXOFF. + */ + uint32_t (*get_gfx_off_status)(struct smu_context *smu); + + /** + * @register_irq_handler: Register interupt request handlers. + */ + int (*register_irq_handler)(struct smu_context *smu); + + /** + * @set_azalia_d3_pme: Wake the audio decode engine from d3 sleep. + */ + int (*set_azalia_d3_pme)(struct smu_context *smu); + + /** + * @get_max_sustainable_clocks_by_dc: Get a copy of the max sustainable + * clock speeds table. + * + * Provides a way for the display component (DC) to get the max + * sustainable clocks from the SMU. + */ + int (*get_max_sustainable_clocks_by_dc)(struct smu_context *smu, struct pp_smu_nv_clock_table *max_clocks); + + /** + * @baco_is_support: Check if GPU supports BACO (Bus Active, Chip Off). + */ + bool (*baco_is_support)(struct smu_context *smu); + + /** + * @baco_get_state: Get the current BACO state. + * + * Return: Current BACO state. + */ + enum smu_baco_state (*baco_get_state)(struct smu_context *smu); + + /** + * @baco_set_state: Enter/exit BACO. + */ + int (*baco_set_state)(struct smu_context *smu, enum smu_baco_state state); + + /** + * @baco_enter: Enter BACO. + */ + int (*baco_enter)(struct smu_context *smu); + + /** + * @baco_exit: Exit Baco. + */ + int (*baco_exit)(struct smu_context *smu); + + /** + * @mode1_reset_is_support: Check if GPU supports mode1 reset. + */ + bool (*mode1_reset_is_support)(struct smu_context *smu); + /** + * @mode2_reset_is_support: Check if GPU supports mode2 reset. + */ + bool (*mode2_reset_is_support)(struct smu_context *smu); + + /** + * @mode1_reset: Perform mode1 reset. + * + * Complete GPU reset. + */ + int (*mode1_reset)(struct smu_context *smu); + + /** + * @mode2_reset: Perform mode2 reset. + * + * Mode2 reset generally does not reset as many IPs as mode1 reset. The + * IPs reset varies by asic. + */ + int (*mode2_reset)(struct smu_context *smu); + + /** + * @get_dpm_ultimate_freq: Get the hard frequency range of a clock + * domain in MHz. + */ + int (*get_dpm_ultimate_freq)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max); + + /** + * @set_soft_freq_limited_range: Set the soft frequency range of a clock + * domain in MHz. + */ + int (*set_soft_freq_limited_range)(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t min, uint32_t max); + + /** + * @set_power_source: Notify the SMU of the current power source. + */ + int (*set_power_source)(struct smu_context *smu, enum smu_power_src_type power_src); + + /** + * @log_thermal_throttling_event: Print a thermal throttling warning to + * the system's log. + */ + void (*log_thermal_throttling_event)(struct smu_context *smu); + + /** + * @get_pp_feature_mask: Print a human readable table of enabled + * features to buffer. + */ + size_t (*get_pp_feature_mask)(struct smu_context *smu, char *buf); + + /** + * @set_pp_feature_mask: Request the SMU enable/disable features to + * match those enabled in &new_mask. + */ + int (*set_pp_feature_mask)(struct smu_context *smu, uint64_t new_mask); + + /** + * @get_gpu_metrics: Get a copy of the GPU metrics table from the SMU. + * + * Return: Size of &table + */ + ssize_t (*get_gpu_metrics)(struct smu_context *smu, void **table); + + /** + * @enable_mgpu_fan_boost: Enable multi-GPU fan boost. + */ + int (*enable_mgpu_fan_boost)(struct smu_context *smu); + + /** + * @gfx_ulv_control: Enable/disable ultra low voltage. + */ + int (*gfx_ulv_control)(struct smu_context *smu, bool enablement); + + /** + * @deep_sleep_control: Enable/disable deep sleep. + */ + int (*deep_sleep_control)(struct smu_context *smu, bool enablement); + + /** + * @get_fan_parameters: Get fan parameters. + * + * Get maximum fan speed from the power play table. + */ + int (*get_fan_parameters)(struct smu_context *smu); + + /** + * @post_init: Helper function for asic specific workarounds. + */ + int (*post_init)(struct smu_context *smu); + + /** + * @interrupt_work: Work task scheduled from SMU interrupt handler. + */ + void (*interrupt_work)(struct smu_context *smu); + + /** + * @gpo_control: Enable/disable graphics power optimization if supported. + */ + int (*gpo_control)(struct smu_context *smu, bool enablement); + + /** + * @gfx_state_change_set: Send the current graphics state to the SMU. + */ + int (*gfx_state_change_set)(struct smu_context *smu, uint32_t state); + + /** + * @set_fine_grain_gfx_freq_parameters: Set fine grain graphics clock + * parameters to defaults. + */ + int (*set_fine_grain_gfx_freq_parameters)(struct smu_context *smu); + + /** + * @smu_handle_passthrough_sbr: Send message to SMU about special handling for SBR. + */ + int (*smu_handle_passthrough_sbr)(struct smu_context *smu, bool enable); + + /** + * @wait_for_event: Wait for events from SMU. + */ + int (*wait_for_event)(struct smu_context *smu, + enum smu_event_type event, uint64_t event_arg); + + /** + * @sned_hbm_bad_pages_num: message SMU to update bad page number + * of SMUBUS table. + */ + int (*send_hbm_bad_pages_num)(struct smu_context *smu, uint32_t size); + + /** + * @get_ecc_table: message SMU to get ECC INFO table. + */ + ssize_t (*get_ecc_info)(struct smu_context *smu, void *table); + + + /** + * @stb_collect_info: Collects Smart Trace Buffers data. + */ + int (*stb_collect_info)(struct smu_context *smu, void *buf, uint32_t size); +}; + +typedef enum { + METRICS_CURR_GFXCLK, + METRICS_CURR_SOCCLK, + METRICS_CURR_UCLK, + METRICS_CURR_VCLK, + METRICS_CURR_VCLK1, + METRICS_CURR_DCLK, + METRICS_CURR_DCLK1, + METRICS_CURR_FCLK, + METRICS_CURR_DCEFCLK, + METRICS_AVERAGE_CPUCLK, + METRICS_AVERAGE_GFXCLK, + METRICS_AVERAGE_SOCCLK, + METRICS_AVERAGE_FCLK, + METRICS_AVERAGE_UCLK, + METRICS_AVERAGE_VCLK, + METRICS_AVERAGE_DCLK, + METRICS_AVERAGE_GFXACTIVITY, + METRICS_AVERAGE_MEMACTIVITY, + METRICS_AVERAGE_VCNACTIVITY, + METRICS_AVERAGE_SOCKETPOWER, + METRICS_TEMPERATURE_EDGE, + METRICS_TEMPERATURE_HOTSPOT, + METRICS_TEMPERATURE_MEM, + METRICS_TEMPERATURE_VRGFX, + METRICS_TEMPERATURE_VRSOC, + METRICS_TEMPERATURE_VRMEM, + METRICS_THROTTLER_STATUS, + METRICS_CURR_FANSPEED, + METRICS_VOLTAGE_VDDSOC, + METRICS_VOLTAGE_VDDGFX, + METRICS_SS_APU_SHARE, + METRICS_SS_DGPU_SHARE, +} MetricsMember_t; + +enum smu_cmn2asic_mapping_type { + CMN2ASIC_MAPPING_MSG, + CMN2ASIC_MAPPING_CLK, + CMN2ASIC_MAPPING_FEATURE, + CMN2ASIC_MAPPING_TABLE, + CMN2ASIC_MAPPING_PWR, + CMN2ASIC_MAPPING_WORKLOAD, +}; + +#define MSG_MAP(msg, index, valid_in_vf) \ + [SMU_MSG_##msg] = {1, (index), (valid_in_vf)} + +#define CLK_MAP(clk, index) \ + [SMU_##clk] = {1, (index)} + +#define FEA_MAP(fea) \ + [SMU_FEATURE_##fea##_BIT] = {1, FEATURE_##fea##_BIT} + +#define FEA_MAP_REVERSE(fea) \ + [SMU_FEATURE_DPM_##fea##_BIT] = {1, FEATURE_##fea##_DPM_BIT} + +#define FEA_MAP_HALF_REVERSE(fea) \ + [SMU_FEATURE_DPM_##fea##CLK_BIT] = {1, FEATURE_##fea##_DPM_BIT} + +#define TAB_MAP(tab) \ + [SMU_TABLE_##tab] = {1, TABLE_##tab} + +#define TAB_MAP_VALID(tab) \ + [SMU_TABLE_##tab] = {1, TABLE_##tab} + +#define TAB_MAP_INVALID(tab) \ + [SMU_TABLE_##tab] = {0, TABLE_##tab} + +#define PWR_MAP(tab) \ + [SMU_POWER_SOURCE_##tab] = {1, POWER_SOURCE_##tab} + +#define WORKLOAD_MAP(profile, workload) \ + [profile] = {1, (workload)} + +/** + * smu_memcpy_trailing - Copy the end of one structure into the middle of another + * + * @dst: Pointer to destination struct + * @first_dst_member: The member name in @dst where the overwrite begins + * @last_dst_member: The member name in @dst where the overwrite ends after + * @src: Pointer to the source struct + * @first_src_member: The member name in @src where the copy begins + * + */ +#define smu_memcpy_trailing(dst, first_dst_member, last_dst_member, \ + src, first_src_member) \ +({ \ + size_t __src_offset = offsetof(typeof(*(src)), first_src_member); \ + size_t __src_size = sizeof(*(src)) - __src_offset; \ + size_t __dst_offset = offsetof(typeof(*(dst)), first_dst_member); \ + size_t __dst_size = offsetofend(typeof(*(dst)), last_dst_member) - \ + __dst_offset; \ + BUILD_BUG_ON(__src_size != __dst_size); \ + __builtin_memcpy((u8 *)(dst) + __dst_offset, \ + (u8 *)(src) + __src_offset, \ + __dst_size); \ +}) + +#if !defined(SWSMU_CODE_LAYER_L2) && !defined(SWSMU_CODE_LAYER_L3) && !defined(SWSMU_CODE_LAYER_L4) +int smu_get_power_limit(void *handle, + uint32_t *limit, + enum pp_power_limit_level pp_limit_level, + enum pp_power_type pp_power_type); + +bool smu_mode1_reset_is_support(struct smu_context *smu); +bool smu_mode2_reset_is_support(struct smu_context *smu); +int smu_mode1_reset(struct smu_context *smu); + +extern const struct amd_ip_funcs smu_ip_funcs; + +bool is_support_sw_smu(struct amdgpu_device *adev); +bool is_support_cclk_dpm(struct amdgpu_device *adev); +int smu_write_watermarks_table(struct smu_context *smu); + +int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, + uint32_t *min, uint32_t *max); + +int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type, + uint32_t min, uint32_t max); + +int smu_set_ac_dc(struct smu_context *smu); + +int smu_allow_xgmi_power_down(struct smu_context *smu, bool en); + +int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value); + +int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable); + +int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event, + uint64_t event_arg); +int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc); +int smu_stb_collect_info(struct smu_context *smu, void *buff, uint32_t size); +void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev); +int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size); +#endif +#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/aldebaran_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/aldebaran_ppsmc.h new file mode 100644 index 000000000000..ab66a4b9e438 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/aldebaran_ppsmc.h @@ -0,0 +1,130 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef ALDEBARAN_PP_SMC_H +#define ALDEBARAN_PP_SMC_H + +#pragma pack(push, 1) + +// SMU Response Codes: +#define PPSMC_Result_OK 0x1 +#define PPSMC_Result_Failed 0xFF +#define PPSMC_Result_UnknownCmd 0xFE +#define PPSMC_Result_CmdRejectedPrereq 0xFD +#define PPSMC_Result_CmdRejectedBusy 0xFC + +// Message Definitions: +#define PPSMC_MSG_TestMessage 0x1 +#define PPSMC_MSG_GetSmuVersion 0x2 +#define PPSMC_MSG_GfxDriverReset 0x3 +#define PPSMC_MSG_GetDriverIfVersion 0x4 +#define PPSMC_MSG_spare1 0x5 +#define PPSMC_MSG_spare2 0x6 +#define PPSMC_MSG_EnableAllSmuFeatures 0x7 +#define PPSMC_MSG_DisableAllSmuFeatures 0x8 +#define PPSMC_MSG_spare3 0x9 +#define PPSMC_MSG_spare4 0xA +#define PPSMC_MSG_spare5 0xB +#define PPSMC_MSG_spare6 0xC +#define PPSMC_MSG_GetEnabledSmuFeaturesLow 0xD +#define PPSMC_MSG_GetEnabledSmuFeaturesHigh 0xE +#define PPSMC_MSG_SetDriverDramAddrHigh 0xF +#define PPSMC_MSG_SetDriverDramAddrLow 0x10 +#define PPSMC_MSG_SetToolsDramAddrHigh 0x11 +#define PPSMC_MSG_SetToolsDramAddrLow 0x12 +#define PPSMC_MSG_TransferTableSmu2Dram 0x13 +#define PPSMC_MSG_TransferTableDram2Smu 0x14 +#define PPSMC_MSG_UseDefaultPPTable 0x15 +#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x16 +#define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x17 +#define PPSMC_MSG_SetSoftMinByFreq 0x18 +#define PPSMC_MSG_SetSoftMaxByFreq 0x19 +#define PPSMC_MSG_SetHardMinByFreq 0x1A +#define PPSMC_MSG_SetHardMaxByFreq 0x1B +#define PPSMC_MSG_GetMinDpmFreq 0x1C +#define PPSMC_MSG_GetMaxDpmFreq 0x1D +#define PPSMC_MSG_GetDpmFreqByIndex 0x1E +#define PPSMC_MSG_SetWorkloadMask 0x1F +#define PPSMC_MSG_GetVoltageByDpm 0x20 +#define PPSMC_MSG_GetVoltageByDpmOverdrive 0x21 +#define PPSMC_MSG_SetPptLimit 0x22 +#define PPSMC_MSG_GetPptLimit 0x23 +#define PPSMC_MSG_PrepareMp1ForUnload 0x24 +#define PPSMC_MSG_PrepareMp1ForReset 0x25 //retired in 68.07 +#define PPSMC_MSG_SoftReset 0x26 //retired in 68.07 +#define PPSMC_MSG_RunDcBtc 0x27 +#define PPSMC_MSG_DramLogSetDramAddrHigh 0x28 +#define PPSMC_MSG_DramLogSetDramAddrLow 0x29 +#define PPSMC_MSG_DramLogSetDramSize 0x2A +#define PPSMC_MSG_GetDebugData 0x2B +#define PPSMC_MSG_WaflTest 0x2C +#define PPSMC_MSG_spare7 0x2D +#define PPSMC_MSG_SetMemoryChannelEnable 0x2E +#define PPSMC_MSG_SetNumBadHbmPagesRetired 0x2F +#define PPSMC_MSG_DFCstateControl 0x32 +#define PPSMC_MSG_GetGmiPwrDnHyst 0x33 +#define PPSMC_MSG_SetGmiPwrDnHyst 0x34 +#define PPSMC_MSG_GmiPwrDnControl 0x35 +#define PPSMC_MSG_EnterGfxoff 0x36 +#define PPSMC_MSG_ExitGfxoff 0x37 +#define PPSMC_MSG_SetExecuteDMATest 0x38 +#define PPSMC_MSG_EnableDeterminism 0x39 +#define PPSMC_MSG_DisableDeterminism 0x3A +#define PPSMC_MSG_SetUclkDpmMode 0x3B + +//STB to dram log +#define PPSMC_MSG_DumpSTBtoDram 0x3C +#define PPSMC_MSG_STBtoDramLogSetDramAddrHigh 0x3D +#define PPSMC_MSG_STBtoDramLogSetDramAddrLow 0x3E +#define PPSMC_MSG_STBtoDramLogSetDramSize 0x3F +#define PPSMC_MSG_SetSystemVirtualSTBtoDramAddrHigh 0x40 +#define PPSMC_MSG_SetSystemVirtualSTBtoDramAddrLow 0x41 + +#define PPSMC_MSG_GfxDriverResetRecovery 0x42 +#define PPSMC_MSG_BoardPowerCalibration 0x43 +#define PPSMC_MSG_HeavySBR 0x45 +#define PPSMC_Message_Count 0x46 + + +//PPSMC Reset Types +#define PPSMC_RESET_TYPE_WARM_RESET 0x00 +#define PPSMC_RESET_TYPE_DRIVER_MODE_1_RESET 0x01 //driver msg argument should be 1 for mode-1 +#define PPSMC_RESET_TYPE_DRIVER_MODE_2_RESET 0x02 //and 2 for mode-2 +#define PPSMC_RESET_TYPE_PCIE_LINK_RESET 0x03 +#define PPSMC_RESET_TYPE_BIF_LINK_RESET 0x04 +#define PPSMC_RESET_TYPE_PF0_FLR_RESET 0x05 + + +typedef enum { + GFXOFF_ERROR_NO_ERROR, + GFXOFF_ERROR_DISALLOWED, + GFXOFF_ERROR_GFX_BUSY, + GFXOFF_ERROR_GFX_OFF, + GFXOFF_ERROR_GFX_ON, +} GFXOFF_ERROR_e; + +typedef uint32_t PPSMC_Result; +typedef uint32_t PPSMC_Msg; +#pragma pack(pop) + +#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/arcturus_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/arcturus_ppsmc.h new file mode 100644 index 000000000000..45f5d29bc705 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/arcturus_ppsmc.h @@ -0,0 +1,134 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef ARCTURUS_PP_SMC_H +#define ARCTURUS_PP_SMC_H + +#pragma pack(push, 1) + +// SMU Response Codes: +#define PPSMC_Result_OK 0x1 +#define PPSMC_Result_Failed 0xFF +#define PPSMC_Result_UnknownCmd 0xFE +#define PPSMC_Result_CmdRejectedPrereq 0xFD +#define PPSMC_Result_CmdRejectedBusy 0xFC + +// Message Definitions: +// BASIC +#define PPSMC_MSG_TestMessage 0x1 +#define PPSMC_MSG_GetSmuVersion 0x2 +#define PPSMC_MSG_GetDriverIfVersion 0x3 +#define PPSMC_MSG_SetAllowedFeaturesMaskLow 0x4 +#define PPSMC_MSG_SetAllowedFeaturesMaskHigh 0x5 +#define PPSMC_MSG_EnableAllSmuFeatures 0x6 +#define PPSMC_MSG_DisableAllSmuFeatures 0x7 +#define PPSMC_MSG_EnableSmuFeaturesLow 0x8 +#define PPSMC_MSG_EnableSmuFeaturesHigh 0x9 +#define PPSMC_MSG_DisableSmuFeaturesLow 0xA +#define PPSMC_MSG_DisableSmuFeaturesHigh 0xB +#define PPSMC_MSG_GetEnabledSmuFeaturesLow 0xC +#define PPSMC_MSG_GetEnabledSmuFeaturesHigh 0xD +#define PPSMC_MSG_SetDriverDramAddrHigh 0xE +#define PPSMC_MSG_SetDriverDramAddrLow 0xF +#define PPSMC_MSG_SetToolsDramAddrHigh 0x10 +#define PPSMC_MSG_SetToolsDramAddrLow 0x11 +#define PPSMC_MSG_TransferTableSmu2Dram 0x12 +#define PPSMC_MSG_TransferTableDram2Smu 0x13 +#define PPSMC_MSG_UseDefaultPPTable 0x14 +#define PPSMC_MSG_UseBackupPPTable 0x15 +#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x16 +#define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x17 + +//BACO/BAMACO/BOMACO +#define PPSMC_MSG_EnterBaco 0x18 +#define PPSMC_MSG_ExitBaco 0x19 +#define PPSMC_MSG_ArmD3 0x1A + +//DPM +#define PPSMC_MSG_SetSoftMinByFreq 0x1B +#define PPSMC_MSG_SetSoftMaxByFreq 0x1C +#define PPSMC_MSG_SetHardMinByFreq 0x1D +#define PPSMC_MSG_SetHardMaxByFreq 0x1E +#define PPSMC_MSG_GetMinDpmFreq 0x1F +#define PPSMC_MSG_GetMaxDpmFreq 0x20 +#define PPSMC_MSG_GetDpmFreqByIndex 0x21 + +#define PPSMC_MSG_SetWorkloadMask 0x22 +#define PPSMC_MSG_SetDfSwitchType 0x23 +#define PPSMC_MSG_GetVoltageByDpm 0x24 +#define PPSMC_MSG_GetVoltageByDpmOverdrive 0x25 + +#define PPSMC_MSG_SetPptLimit 0x26 +#define PPSMC_MSG_GetPptLimit 0x27 + +//Power Gating +#define PPSMC_MSG_PowerUpVcn0 0x28 +#define PPSMC_MSG_PowerDownVcn0 0x29 +#define PPSMC_MSG_PowerUpVcn1 0x2A +#define PPSMC_MSG_PowerDownVcn1 0x2B + +//Resets and reload +#define PPSMC_MSG_PrepareMp1ForUnload 0x2C +#define PPSMC_MSG_PrepareMp1ForReset 0x2D +#define PPSMC_MSG_PrepareMp1ForShutdown 0x2E +#define PPSMC_MSG_SoftReset 0x2F + +//BTC +#define PPSMC_MSG_RunAfllBtc 0x30 +#define PPSMC_MSG_RunDcBtc 0x31 + +//Debug +#define PPSMC_MSG_DramLogSetDramAddrHigh 0x33 +#define PPSMC_MSG_DramLogSetDramAddrLow 0x34 +#define PPSMC_MSG_DramLogSetDramSize 0x35 +#define PPSMC_MSG_GetDebugData 0x36 + +//WAFL and XGMI +#define PPSMC_MSG_WaflTest 0x37 +#define PPSMC_MSG_SetXgmiMode 0x38 + +//Others +#define PPSMC_MSG_SetMemoryChannelEnable 0x39 + +//OOB +#define PPSMC_MSG_SetNumBadHbmPagesRetired 0x3A + +#define PPSMC_MSG_DFCstateControl 0x3B +#define PPSMC_MSG_GmiPwrDnControl 0x3D +#define PPSMC_Message_Count 0x3E + +#define PPSMC_MSG_ReadSerialNumTop32 0x40 +#define PPSMC_MSG_ReadSerialNumBottom32 0x41 + +/* paramater for MSG_LightSBR + * 1 -- Enable light secondary bus reset, only do nbio respond without further handling, + * leave driver to handle the real reset + * 0 -- Disable LightSBR, default behavior, SMU will pass the reset to PSP + */ +#define PPSMC_MSG_LightSBR 0x42 + +typedef uint32_t PPSMC_Result; +typedef uint32_t PPSMC_Msg; +#pragma pack(pop) + +#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_arcturus.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_arcturus.h new file mode 100644 index 000000000000..43d43d6addc0 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_arcturus.h @@ -0,0 +1,931 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef SMU11_DRIVER_IF_ARCTURUS_H +#define SMU11_DRIVER_IF_ARCTURUS_H + +// *** IMPORTANT *** +// SMU TEAM: Always increment the interface version if +// any structure is changed in this file +//#define SMU11_DRIVER_IF_VERSION 0x09 + +#define PPTABLE_ARCTURUS_SMU_VERSION 4 + +#define NUM_GFXCLK_DPM_LEVELS 16 +#define NUM_VCLK_DPM_LEVELS 8 +#define NUM_DCLK_DPM_LEVELS 8 +#define NUM_MP0CLK_DPM_LEVELS 2 +#define NUM_SOCCLK_DPM_LEVELS 8 +#define NUM_UCLK_DPM_LEVELS 4 +#define NUM_FCLK_DPM_LEVELS 8 +#define NUM_XGMI_LEVELS 2 +#define NUM_XGMI_PSTATE_LEVELS 4 + +#define MAX_GFXCLK_DPM_LEVEL (NUM_GFXCLK_DPM_LEVELS - 1) +#define MAX_VCLK_DPM_LEVEL (NUM_VCLK_DPM_LEVELS - 1) +#define MAX_DCLK_DPM_LEVEL (NUM_DCLK_DPM_LEVELS - 1) +#define MAX_MP0CLK_DPM_LEVEL (NUM_MP0CLK_DPM_LEVELS - 1) +#define MAX_SOCCLK_DPM_LEVEL (NUM_SOCCLK_DPM_LEVELS - 1) +#define MAX_UCLK_DPM_LEVEL (NUM_UCLK_DPM_LEVELS - 1) +#define MAX_FCLK_DPM_LEVEL (NUM_FCLK_DPM_LEVELS - 1) +#define MAX_XGMI_LEVEL (NUM_XGMI_LEVELS - 1) +#define MAX_XGMI_PSTATE_LEVEL (NUM_XGMI_PSTATE_LEVELS - 1) + +// Feature Control Defines +// DPM +#define FEATURE_DPM_PREFETCHER_BIT 0 +#define FEATURE_DPM_GFXCLK_BIT 1 +#define FEATURE_DPM_UCLK_BIT 2 +#define FEATURE_DPM_SOCCLK_BIT 3 +#define FEATURE_DPM_FCLK_BIT 4 +#define FEATURE_DPM_MP0CLK_BIT 5 +#define FEATURE_DPM_XGMI_BIT 6 +// Idle +#define FEATURE_DS_GFXCLK_BIT 7 +#define FEATURE_DS_SOCCLK_BIT 8 +#define FEATURE_DS_LCLK_BIT 9 +#define FEATURE_DS_FCLK_BIT 10 +#define FEATURE_DS_UCLK_BIT 11 +#define FEATURE_GFX_ULV_BIT 12 +#define FEATURE_DPM_VCN_BIT 13 +#define FEATURE_RSMU_SMN_CG_BIT 14 +#define FEATURE_WAFL_CG_BIT 15 +// Throttler/Response +#define FEATURE_PPT_BIT 16 +#define FEATURE_TDC_BIT 17 +#define FEATURE_APCC_PLUS_BIT 18 +#define FEATURE_VR0HOT_BIT 19 +#define FEATURE_VR1HOT_BIT 20 +#define FEATURE_FW_CTF_BIT 21 +#define FEATURE_FAN_CONTROL_BIT 22 +#define FEATURE_THERMAL_BIT 23 +// Other +#define FEATURE_OUT_OF_BAND_MONITOR_BIT 24 +#define FEATURE_TEMP_DEPENDENT_VMIN_BIT 25 +#define FEATURE_PER_PART_VMIN_BIT 26 + +#define FEATURE_SPARE_27_BIT 27 +#define FEATURE_SPARE_28_BIT 28 +#define FEATURE_SPARE_29_BIT 29 +#define FEATURE_SPARE_30_BIT 30 +#define FEATURE_SPARE_31_BIT 31 +#define FEATURE_SPARE_32_BIT 32 +#define FEATURE_SPARE_33_BIT 33 +#define FEATURE_SPARE_34_BIT 34 +#define FEATURE_SPARE_35_BIT 35 +#define FEATURE_SPARE_36_BIT 36 +#define FEATURE_SPARE_37_BIT 37 +#define FEATURE_SPARE_38_BIT 38 +#define FEATURE_SPARE_39_BIT 39 +#define FEATURE_SPARE_40_BIT 40 +#define FEATURE_SPARE_41_BIT 41 +#define FEATURE_SPARE_42_BIT 42 +#define FEATURE_SPARE_43_BIT 43 +#define FEATURE_SPARE_44_BIT 44 +#define FEATURE_SPARE_45_BIT 45 +#define FEATURE_SPARE_46_BIT 46 +#define FEATURE_SPARE_47_BIT 47 +#define FEATURE_SPARE_48_BIT 48 +#define FEATURE_SPARE_49_BIT 49 +#define FEATURE_SPARE_50_BIT 50 +#define FEATURE_SPARE_51_BIT 51 +#define FEATURE_SPARE_52_BIT 52 +#define FEATURE_SPARE_53_BIT 53 +#define FEATURE_SPARE_54_BIT 54 +#define FEATURE_SPARE_55_BIT 55 +#define FEATURE_SPARE_56_BIT 56 +#define FEATURE_SPARE_57_BIT 57 +#define FEATURE_SPARE_58_BIT 58 +#define FEATURE_SPARE_59_BIT 59 +#define FEATURE_SPARE_60_BIT 60 +#define FEATURE_SPARE_61_BIT 61 +#define FEATURE_SPARE_62_BIT 62 +#define FEATURE_SPARE_63_BIT 63 + +#define NUM_FEATURES 64 + + +#define FEATURE_DPM_PREFETCHER_MASK (1 << FEATURE_DPM_PREFETCHER_BIT ) +#define FEATURE_DPM_GFXCLK_MASK (1 << FEATURE_DPM_GFXCLK_BIT ) +#define FEATURE_DPM_UCLK_MASK (1 << FEATURE_DPM_UCLK_BIT ) +#define FEATURE_DPM_SOCCLK_MASK (1 << FEATURE_DPM_SOCCLK_BIT ) +#define FEATURE_DPM_FCLK_MASK (1 << FEATURE_DPM_FCLK_BIT ) +#define FEATURE_DPM_MP0CLK_MASK (1 << FEATURE_DPM_MP0CLK_BIT ) +#define FEATURE_DPM_XGMI_MASK (1 << FEATURE_DPM_XGMI_BIT ) + +#define FEATURE_DS_GFXCLK_MASK (1 << FEATURE_DS_GFXCLK_BIT ) +#define FEATURE_DS_SOCCLK_MASK (1 << FEATURE_DS_SOCCLK_BIT ) +#define FEATURE_DS_LCLK_MASK (1 << FEATURE_DS_LCLK_BIT ) +#define FEATURE_DS_FCLK_MASK (1 << FEATURE_DS_FCLK_BIT ) +#define FEATURE_DS_UCLK_MASK (1 << FEATURE_DS_UCLK_BIT ) +#define FEATURE_GFX_ULV_MASK (1 << FEATURE_GFX_ULV_BIT ) +#define FEATURE_DPM_VCN_MASK (1 << FEATURE_DPM_VCN_BIT ) +#define FEATURE_RSMU_SMN_CG_MASK (1 << FEATURE_RSMU_SMN_CG_BIT ) +#define FEATURE_WAFL_CG_MASK (1 << FEATURE_WAFL_CG_BIT ) + +#define FEATURE_PPT_MASK (1 << FEATURE_PPT_BIT ) +#define FEATURE_TDC_MASK (1 << FEATURE_TDC_BIT ) +#define FEATURE_APCC_PLUS_MASK (1 << FEATURE_APCC_PLUS_BIT ) +#define FEATURE_VR0HOT_MASK (1 << FEATURE_VR0HOT_BIT ) +#define FEATURE_VR1HOT_MASK (1 << FEATURE_VR1HOT_BIT ) +#define FEATURE_FW_CTF_MASK (1 << FEATURE_FW_CTF_BIT ) +#define FEATURE_FAN_CONTROL_MASK (1 << FEATURE_FAN_CONTROL_BIT ) +#define FEATURE_THERMAL_MASK (1 << FEATURE_THERMAL_BIT ) + +#define FEATURE_OUT_OF_BAND_MONITOR_MASK (1 << FEATURE_OUT_OF_BAND_MONITOR_BIT ) +#define FEATURE_TEMP_DEPENDENT_VMIN_MASK (1 << FEATURE_TEMP_DEPENDENT_VMIN_BIT ) +#define FEATURE_PER_PART_VMIN_MASK (1 << FEATURE_PER_PART_VMIN_BIT ) + + +//FIXME need updating +// Debug Overrides Bitmask +#define DPM_OVERRIDE_DISABLE_UCLK_PID 0x00000001 +#define DPM_OVERRIDE_DISABLE_VOLT_LINK_VCN_FCLK 0x00000002 + +// I2C Config Bit Defines +#define I2C_CONTROLLER_ENABLED 1 +#define I2C_CONTROLLER_DISABLED 0 + +// VR Mapping Bit Defines +#define VR_MAPPING_VR_SELECT_MASK 0x01 +#define VR_MAPPING_VR_SELECT_SHIFT 0x00 + +#define VR_MAPPING_PLANE_SELECT_MASK 0x02 +#define VR_MAPPING_PLANE_SELECT_SHIFT 0x01 + +// PSI Bit Defines +#define PSI_SEL_VR0_PLANE0_PSI0 0x01 +#define PSI_SEL_VR0_PLANE0_PSI1 0x02 +#define PSI_SEL_VR0_PLANE1_PSI0 0x04 +#define PSI_SEL_VR0_PLANE1_PSI1 0x08 +#define PSI_SEL_VR1_PLANE0_PSI0 0x10 +#define PSI_SEL_VR1_PLANE0_PSI1 0x20 +#define PSI_SEL_VR1_PLANE1_PSI0 0x40 +#define PSI_SEL_VR1_PLANE1_PSI1 0x80 + +// Throttler Control/Status Bits +#define THROTTLER_PADDING_BIT 0 +#define THROTTLER_TEMP_EDGE_BIT 1 +#define THROTTLER_TEMP_HOTSPOT_BIT 2 +#define THROTTLER_TEMP_MEM_BIT 3 +#define THROTTLER_TEMP_VR_GFX_BIT 4 +#define THROTTLER_TEMP_VR_MEM_BIT 5 +#define THROTTLER_TEMP_VR_SOC_BIT 6 +#define THROTTLER_TDC_GFX_BIT 7 +#define THROTTLER_TDC_SOC_BIT 8 +#define THROTTLER_PPT0_BIT 9 +#define THROTTLER_PPT1_BIT 10 +#define THROTTLER_PPT2_BIT 11 +#define THROTTLER_PPT3_BIT 12 +#define THROTTLER_PPM_BIT 13 +#define THROTTLER_FIT_BIT 14 +#define THROTTLER_APCC_BIT 15 +#define THROTTLER_VRHOT0_BIT 16 +#define THROTTLER_VRHOT1_BIT 17 + +// Table transfer status +#define TABLE_TRANSFER_OK 0x0 +#define TABLE_TRANSFER_FAILED 0xFF +#define TABLE_TRANSFER_PENDING 0xAB + +// Workload bits +#define WORKLOAD_PPLIB_DEFAULT_BIT 0 +#define WORKLOAD_PPLIB_POWER_SAVING_BIT 1 +#define WORKLOAD_PPLIB_VIDEO_BIT 2 +#define WORKLOAD_PPLIB_COMPUTE_BIT 3 +#define WORKLOAD_PPLIB_CUSTOM_BIT 4 +#define WORKLOAD_PPLIB_COUNT 5 + +//XGMI performance states +#define XGMI_STATE_D0 1 +#define XGMI_STATE_D3 0 + +#define NUM_I2C_CONTROLLERS 8 + +#define I2C_CONTROLLER_ENABLED 1 +#define I2C_CONTROLLER_DISABLED 0 + +#define MAX_SW_I2C_COMMANDS 8 + +typedef enum { + I2C_CONTROLLER_PORT_0 = 0, //CKSVII2C0 + I2C_CONTROLLER_PORT_1 = 1, //CKSVII2C1 + I2C_CONTROLLER_PORT_COUNT, +} I2cControllerPort_e; + +typedef enum { + I2C_CONTROLLER_NAME_VR_GFX = 0, + I2C_CONTROLLER_NAME_VR_SOC, + I2C_CONTROLLER_NAME_VR_MEM, + I2C_CONTROLLER_NAME_SPARE, + I2C_CONTROLLER_NAME_COUNT, +} I2cControllerName_e; + +typedef enum { + I2C_CONTROLLER_THROTTLER_TYPE_NONE = 0, + I2C_CONTROLLER_THROTTLER_VR_GFX, + I2C_CONTROLLER_THROTTLER_VR_SOC, + I2C_CONTROLLER_THROTTLER_VR_MEM, + I2C_CONTROLLER_THROTTLER_COUNT, +} I2cControllerThrottler_e; + +typedef enum { + I2C_CONTROLLER_PROTOCOL_VR_0, + I2C_CONTROLLER_PROTOCOL_VR_1, + I2C_CONTROLLER_PROTOCOL_TMP_0, + I2C_CONTROLLER_PROTOCOL_TMP_1, + I2C_CONTROLLER_PROTOCOL_SPARE_0, + I2C_CONTROLLER_PROTOCOL_SPARE_1, + I2C_CONTROLLER_PROTOCOL_COUNT, +} I2cControllerProtocol_e; + +typedef struct { + uint8_t Enabled; + uint8_t Speed; + uint8_t Padding[2]; + uint32_t SlaveAddress; + uint8_t ControllerPort; + uint8_t ControllerName; + uint8_t ThermalThrotter; + uint8_t I2cProtocol; +} I2cControllerConfig_t; + +typedef enum { + I2C_PORT_SVD_SCL = 0, + I2C_PORT_GPIO, +} I2cPort_e; + +typedef enum { + I2C_SPEED_FAST_50K = 0, //50 Kbits/s + I2C_SPEED_FAST_100K, //100 Kbits/s + I2C_SPEED_FAST_400K, //400 Kbits/s + I2C_SPEED_FAST_PLUS_1M, //1 Mbits/s (in fast mode) + I2C_SPEED_HIGH_1M, //1 Mbits/s (in high speed mode) + I2C_SPEED_HIGH_2M, //2.3 Mbits/s + I2C_SPEED_COUNT, +} I2cSpeed_e; + +typedef enum { + I2C_CMD_READ = 0, + I2C_CMD_WRITE, + I2C_CMD_COUNT, +} I2cCmdType_e; + +#define CMDCONFIG_STOP_BIT 0 +#define CMDCONFIG_RESTART_BIT 1 + +#define CMDCONFIG_STOP_MASK (1 << CMDCONFIG_STOP_BIT) +#define CMDCONFIG_RESTART_MASK (1 << CMDCONFIG_RESTART_BIT) + +typedef struct { + uint8_t RegisterAddr; ////only valid for write, ignored for read + uint8_t Cmd; //Read(0) or Write(1) + uint8_t Data; //Return data for read. Data to send for write + uint8_t CmdConfig; //Includes whether associated command should have a stop or restart command +} SwI2cCmd_t; //SW I2C Command Table + +typedef struct { + uint8_t I2CcontrollerPort; //CKSVII2C0(0) or //CKSVII2C1(1) + uint8_t I2CSpeed; //Slow(0) or Fast(1) + uint16_t SlaveAddress; + uint8_t NumCmds; //Number of commands + uint8_t Padding[3]; + + SwI2cCmd_t SwI2cCmds[MAX_SW_I2C_COMMANDS]; + + uint32_t MmHubPadding[8]; // SMU internal use + +} SwI2cRequest_t; // SW I2C Request Table + +//D3HOT sequences +typedef enum { + BACO_SEQUENCE, + MSR_SEQUENCE, + BAMACO_SEQUENCE, + ULPS_SEQUENCE, + D3HOT_SEQUENCE_COUNT, +}D3HOTSequence_e; + +//THis is aligned with RSMU PGFSM Register Mapping +typedef enum { + PG_DYNAMIC_MODE = 0, + PG_STATIC_MODE, +} PowerGatingMode_e; + +//This is aligned with RSMU PGFSM Register Mapping +typedef enum { + PG_POWER_DOWN = 0, + PG_POWER_UP, +} PowerGatingSettings_e; + +typedef struct { + uint32_t a; // store in IEEE float format in this variable + uint32_t b; // store in IEEE float format in this variable + uint32_t c; // store in IEEE float format in this variable +} QuadraticInt_t; + +typedef struct { + uint32_t m; // store in IEEE float format in this variable + uint32_t b; // store in IEEE float format in this variable +} LinearInt_t; + +typedef struct { + uint32_t a; // store in IEEE float format in this variable + uint32_t b; // store in IEEE float format in this variable + uint32_t c; // store in IEEE float format in this variable +} DroopInt_t; + +typedef enum { + GFXCLK_SOURCE_PLL = 0, + GFXCLK_SOURCE_AFLL, + GFXCLK_SOURCE_COUNT, +} GfxclkSrc_e; + +typedef enum { + PPCLK_GFXCLK, + PPCLK_VCLK, + PPCLK_DCLK, + PPCLK_SOCCLK, + PPCLK_UCLK, + PPCLK_FCLK, + PPCLK_COUNT, +} PPCLK_e; + +typedef enum { + POWER_SOURCE_AC, + POWER_SOURCE_DC, + POWER_SOURCE_COUNT, +} POWER_SOURCE_e; + +typedef enum { + TEMP_EDGE, + TEMP_HOTSPOT, + TEMP_MEM, + TEMP_VR_GFX, + TEMP_VR_SOC, + TEMP_VR_MEM, + TEMP_COUNT +} TEMP_TYPE_e; + +typedef enum { + PPT_THROTTLER_PPT0, + PPT_THROTTLER_PPT1, + PPT_THROTTLER_PPT2, + PPT_THROTTLER_PPT3, + PPT_THROTTLER_COUNT +} PPT_THROTTLER_e; + +typedef enum { + VOLTAGE_MODE_AVFS = 0, + VOLTAGE_MODE_AVFS_SS, + VOLTAGE_MODE_SS, + VOLTAGE_MODE_COUNT, +} VOLTAGE_MODE_e; + +typedef enum { + AVFS_VOLTAGE_GFX = 0, + AVFS_VOLTAGE_SOC, + AVFS_VOLTAGE_COUNT, +} AVFS_VOLTAGE_TYPE_e; + +typedef enum { + GPIO_INT_POLARITY_ACTIVE_LOW = 0, + GPIO_INT_POLARITY_ACTIVE_HIGH, +} GpioIntPolarity_e; + +typedef enum { + MEMORY_TYPE_GDDR6 = 0, + MEMORY_TYPE_HBM, +} MemoryType_e; + +typedef enum { + PWR_CONFIG_TDP = 0, + PWR_CONFIG_TGP, + PWR_CONFIG_TCP_ESTIMATED, + PWR_CONFIG_TCP_MEASURED, +} PwrConfig_e; + +typedef enum { + XGMI_LINK_RATE_2 = 2, // 2Gbps + XGMI_LINK_RATE_4 = 4, // 4Gbps + XGMI_LINK_RATE_8 = 8, // 8Gbps + XGMI_LINK_RATE_12 = 12, // 12Gbps + XGMI_LINK_RATE_16 = 16, // 16Gbps + XGMI_LINK_RATE_17 = 17, // 17Gbps + XGMI_LINK_RATE_18 = 18, // 18Gbps + XGMI_LINK_RATE_19 = 19, // 19Gbps + XGMI_LINK_RATE_20 = 20, // 20Gbps + XGMI_LINK_RATE_21 = 21, // 21Gbps + XGMI_LINK_RATE_22 = 22, // 22Gbps + XGMI_LINK_RATE_23 = 23, // 23Gbps + XGMI_LINK_RATE_24 = 24, // 24Gbps + XGMI_LINK_RATE_25 = 25, // 25Gbps + XGMI_LINK_RATE_COUNT +} XGMI_LINK_RATE_e; + +typedef enum { + XGMI_LINK_WIDTH_1 = 1, // x1 + XGMI_LINK_WIDTH_2 = 2, // x2 + XGMI_LINK_WIDTH_4 = 4, // x4 + XGMI_LINK_WIDTH_8 = 8, // x8 + XGMI_LINK_WIDTH_9 = 9, // x9 + XGMI_LINK_WIDTH_16 = 16, // x16 + XGMI_LINK_WIDTH_COUNT +} XGMI_LINK_WIDTH_e; + +typedef struct { + uint8_t VoltageMode; // 0 - AVFS only, 1- min(AVFS,SS), 2-SS only + uint8_t SnapToDiscrete; // 0 - Fine grained DPM, 1 - Discrete DPM + uint8_t NumDiscreteLevels; // Set to 2 (Fmin, Fmax) when using fine grained DPM, otherwise set to # discrete levels used + uint8_t padding; + LinearInt_t ConversionToAvfsClk; // Transfer function to AVFS Clock (GHz->GHz) + QuadraticInt_t SsCurve; // Slow-slow curve (GHz->V) + uint16_t SsFmin; // Fmin for SS curve. If SS curve is selected, will use V@SSFmin for F <= Fmin + uint16_t Padding16; +} DpmDescriptor_t; + +typedef struct { + uint32_t Version; + + // SECTION: Feature Enablement + uint32_t FeaturesToRun[2]; + + // SECTION: Infrastructure Limits + uint16_t SocketPowerLimitAc[PPT_THROTTLER_COUNT]; + uint16_t SocketPowerLimitAcTau[PPT_THROTTLER_COUNT]; + uint16_t TdcLimitSoc; // Amps + uint16_t TdcLimitSocTau; // Time constant of LPF in ms + uint16_t TdcLimitGfx; // Amps + uint16_t TdcLimitGfxTau; // Time constant of LPF in ms + + uint16_t TedgeLimit; // Celcius + uint16_t ThotspotLimit; // Celcius + uint16_t TmemLimit; // Celcius + uint16_t Tvr_gfxLimit; // Celcius + uint16_t Tvr_memLimit; // Celcius + uint16_t Tvr_socLimit; // Celcius + uint32_t FitLimit; // Failures in time (failures per million parts over the defined lifetime) + + uint16_t PpmPowerLimit; // Switch this this power limit when temperature is above PpmTempThreshold + uint16_t PpmTemperatureThreshold; + + // SECTION: Throttler settings + uint32_t ThrottlerControlMask; // See Throtter masks defines + + // SECTION: ULV Settings + uint16_t UlvVoltageOffsetGfx; // In mV(Q2) + uint16_t UlvPadding; // Padding + + uint8_t UlvGfxclkBypass; // 1 to turn off/bypass Gfxclk during ULV, 0 to leave Gfxclk on during ULV + uint8_t Padding234[3]; + + // SECTION: Voltage Control Parameters + uint16_t MinVoltageGfx; // In mV(Q2) Minimum Voltage ("Vmin") of VDD_GFX + uint16_t MinVoltageSoc; // In mV(Q2) Minimum Voltage ("Vmin") of VDD_SOC + uint16_t MaxVoltageGfx; // In mV(Q2) Maximum Voltage allowable of VDD_GFX + uint16_t MaxVoltageSoc; // In mV(Q2) Maximum Voltage allowable of VDD_SOC + + uint16_t LoadLineResistanceGfx; // In mOhms with 8 fractional bits + uint16_t LoadLineResistanceSoc; // In mOhms with 8 fractional bits + + //SECTION: DPM Config 1 + DpmDescriptor_t DpmDescriptor[PPCLK_COUNT]; + + uint16_t FreqTableGfx [NUM_GFXCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableVclk [NUM_VCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDclk [NUM_DCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableSocclk [NUM_SOCCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableUclk [NUM_UCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableFclk [NUM_FCLK_DPM_LEVELS ]; // In MHz + + uint32_t Paddingclks[16]; + + // SECTION: DPM Config 2 + uint16_t Mp0clkFreq [NUM_MP0CLK_DPM_LEVELS]; // in MHz + uint16_t Mp0DpmVoltage [NUM_MP0CLK_DPM_LEVELS]; // mV(Q2) + + // GFXCLK DPM + uint16_t GfxclkFidle; // In MHz + uint16_t GfxclkSlewRate; // for PLL babystepping??? + uint8_t Padding567[4]; + uint16_t GfxclkDsMaxFreq; // In MHz + uint8_t GfxclkSource; // 0 = PLL, 1 = AFLL + uint8_t Padding456; + + // GFXCLK Thermal DPM (formerly 'Boost' Settings) + uint16_t EnableTdpm; + uint16_t TdpmHighHystTemperature; + uint16_t TdpmLowHystTemperature; + uint16_t GfxclkFreqHighTempLimit; // High limit on GFXCLK when temperature is high, for reliability. + + // SECTION: Fan Control + uint16_t FanStopTemp; //Celcius + uint16_t FanStartTemp; //Celcius + + uint16_t FanGainEdge; + uint16_t FanGainHotspot; + uint16_t FanGainVrGfx; + uint16_t FanGainVrSoc; + uint16_t FanGainVrMem; + uint16_t FanGainHbm; + uint16_t FanPwmMin; + uint16_t FanAcousticLimitRpm; + uint16_t FanThrottlingRpm; + uint16_t FanMaximumRpm; + uint16_t FanTargetTemperature; + uint16_t FanTargetGfxclk; + uint8_t FanZeroRpmEnable; + uint8_t FanTachEdgePerRev; + uint8_t FanTempInputSelect; + uint8_t padding8_Fan; + + // The following are AFC override parameters. Leave at 0 to use FW defaults. + int16_t FuzzyFan_ErrorSetDelta; + int16_t FuzzyFan_ErrorRateSetDelta; + int16_t FuzzyFan_PwmSetDelta; + uint16_t FuzzyFan_Reserved; + + + // SECTION: AVFS + // Overrides + uint8_t OverrideAvfsGb[AVFS_VOLTAGE_COUNT]; + uint8_t Padding8_Avfs[2]; + + QuadraticInt_t qAvfsGb[AVFS_VOLTAGE_COUNT]; // GHz->V Override of fused curve + DroopInt_t dBtcGbGfxPll; // GHz->V BtcGb + DroopInt_t dBtcGbGfxAfll; // GHz->V BtcGb + DroopInt_t dBtcGbSoc; // GHz->V BtcGb + LinearInt_t qAgingGb[AVFS_VOLTAGE_COUNT]; // GHz->V + + QuadraticInt_t qStaticVoltageOffset[AVFS_VOLTAGE_COUNT]; // GHz->V + + uint16_t DcTol[AVFS_VOLTAGE_COUNT]; // mV Q2 + + uint8_t DcBtcEnabled[AVFS_VOLTAGE_COUNT]; + uint8_t Padding8_GfxBtc[2]; + + uint16_t DcBtcMin[AVFS_VOLTAGE_COUNT]; // mV Q2 + uint16_t DcBtcMax[AVFS_VOLTAGE_COUNT]; // mV Q2 + + uint16_t DcBtcGb[AVFS_VOLTAGE_COUNT]; // mV Q2 + + // SECTION: XGMI + uint8_t XgmiDpmPstates[NUM_XGMI_LEVELS]; // 2 DPM states, high and low. 0-P0, 1-P1, 2-P2, 3-P3. + uint8_t XgmiDpmSpare[2]; + + // Temperature Dependent Vmin + uint16_t VDDGFX_TVmin; //Celcius + uint16_t VDDSOC_TVmin; //Celcius + uint16_t VDDGFX_Vmin_HiTemp; // mV Q2 + uint16_t VDDGFX_Vmin_LoTemp; // mV Q2 + uint16_t VDDSOC_Vmin_HiTemp; // mV Q2 + uint16_t VDDSOC_Vmin_LoTemp; // mV Q2 + + uint16_t VDDGFX_TVminHystersis; // Celcius + uint16_t VDDSOC_TVminHystersis; // Celcius + + + // SECTION: Advanced Options + uint32_t DebugOverrides; + QuadraticInt_t ReservedEquation0; + QuadraticInt_t ReservedEquation1; + QuadraticInt_t ReservedEquation2; + QuadraticInt_t ReservedEquation3; + + uint16_t MinVoltageUlvGfx; // In mV(Q2) Minimum Voltage ("Vmin") of VDD_GFX in ULV mode + uint16_t PaddingUlv; // Padding + + // Total Power configuration, use defines from PwrConfig_e + uint8_t TotalPowerConfig; //0-TDP, 1-TGP, 2-TCP Estimated, 3-TCP Measured + uint8_t TotalPowerSpare1; + uint16_t TotalPowerSpare2; + + // APCC Settings + uint16_t PccThresholdLow; + uint16_t PccThresholdHigh; + uint32_t PaddingAPCC[6]; //FIXME pending SPEC + + // OOB Settings + uint16_t BasePerformanceCardPower; + uint16_t MaxPerformanceCardPower; + uint16_t BasePerformanceFrequencyCap; //In Mhz + uint16_t MaxPerformanceFrequencyCap; //In Mhz + + // Per-Part Vmin + uint16_t VDDGFX_VminLow; // mv Q2 + uint16_t VDDGFX_TVminLow; //Celcius + uint16_t VDDGFX_VminLow_HiTemp; // mv Q2 + uint16_t VDDGFX_VminLow_LoTemp; // mv Q2 + + // SECTION: Reserved + uint32_t Reserved[7]; + + // SECTION: BOARD PARAMETERS + + // SVI2 Board Parameters + uint16_t MaxVoltageStepGfx; // In mV(Q2) Max voltage step that SMU will request. Multiple steps are taken if voltage change exceeds this value. + uint16_t MaxVoltageStepSoc; // In mV(Q2) Max voltage step that SMU will request. Multiple steps are taken if voltage change exceeds this value. + + uint8_t VddGfxVrMapping; // Use VR_MAPPING* bitfields + uint8_t VddSocVrMapping; // Use VR_MAPPING* bitfields + uint8_t VddMemVrMapping; // Use VR_MAPPING* bitfields + uint8_t BoardVrMapping; // Use VR_MAPPING* bitfields + + uint8_t GfxUlvPhaseSheddingMask; // set this to 1 to set PSI0/1 to 1 in ULV mode + uint8_t ExternalSensorPresent; // External RDI connected to TMON (aka TEMP IN) + uint8_t Padding8_V[2]; + + // Telemetry Settings + uint16_t GfxMaxCurrent; // in Amps + int8_t GfxOffset; // in Amps + uint8_t Padding_TelemetryGfx; + + uint16_t SocMaxCurrent; // in Amps + int8_t SocOffset; // in Amps + uint8_t Padding_TelemetrySoc; + + uint16_t MemMaxCurrent; // in Amps + int8_t MemOffset; // in Amps + uint8_t Padding_TelemetryMem; + + uint16_t BoardMaxCurrent; // in Amps + int8_t BoardOffset; // in Amps + uint8_t Padding_TelemetryBoardInput; + + // GPIO Settings + uint8_t VR0HotGpio; // GPIO pin configured for VR0 HOT event + uint8_t VR0HotPolarity; // GPIO polarity for VR0 HOT event + uint8_t VR1HotGpio; // GPIO pin configured for VR1 HOT event + uint8_t VR1HotPolarity; // GPIO polarity for VR1 HOT event + + // GFXCLK PLL Spread Spectrum + uint8_t PllGfxclkSpreadEnabled; // on or off + uint8_t PllGfxclkSpreadPercent; // Q4.4 + uint16_t PllGfxclkSpreadFreq; // kHz + + // UCLK Spread Spectrum + uint8_t UclkSpreadEnabled; // on or off + uint8_t UclkSpreadPercent; // Q4.4 + uint16_t UclkSpreadFreq; // kHz + + // FCLK Spread Spectrum + uint8_t FclkSpreadEnabled; // on or off + uint8_t FclkSpreadPercent; // Q4.4 + uint16_t FclkSpreadFreq; // kHz + + // GFXCLK Fll Spread Spectrum + uint8_t FllGfxclkSpreadEnabled; // on or off + uint8_t FllGfxclkSpreadPercent; // Q4.4 + uint16_t FllGfxclkSpreadFreq; // kHz + + // I2C Controller Structure + I2cControllerConfig_t I2cControllers[NUM_I2C_CONTROLLERS]; + + // Memory section + uint32_t MemoryChannelEnabled; // For DRAM use only, Max 32 channels enabled bit mask. + + uint8_t DramBitWidth; // For DRAM use only. See Dram Bit width type defines + uint8_t PaddingMem[3]; + + // Total board power + uint16_t TotalBoardPower; //Only needed for TCP Estimated case, where TCP = TGP+Total Board Power + uint16_t BoardPadding; + + // SECTION: XGMI Training + uint8_t XgmiLinkSpeed [NUM_XGMI_PSTATE_LEVELS]; + uint8_t XgmiLinkWidth [NUM_XGMI_PSTATE_LEVELS]; + + uint16_t XgmiFclkFreq [NUM_XGMI_PSTATE_LEVELS]; + uint16_t XgmiSocVoltage [NUM_XGMI_PSTATE_LEVELS]; + + // GPIO pins for I2C communications with 2nd controller for Input Telemetry Sequence + uint8_t GpioI2cScl; // Serial Clock + uint8_t GpioI2cSda; // Serial Data + uint16_t GpioPadding; + + // Platform input telemetry voltage coefficient + uint32_t BoardVoltageCoeffA; // decode by /1000 + uint32_t BoardVoltageCoeffB; // decode by /1000 + + uint32_t BoardReserved[7]; + + // Padding for MMHUB - do not modify this + uint32_t MmHubPadding[8]; // SMU internal use + +} PPTable_t; + +typedef struct { + // Time constant parameters for clock averages in ms + uint16_t GfxclkAverageLpfTau; + uint16_t SocclkAverageLpfTau; + uint16_t UclkAverageLpfTau; + uint16_t GfxActivityLpfTau; + uint16_t UclkActivityLpfTau; + + uint16_t SocketPowerLpfTau; + + uint16_t VcnClkAverageLpfTau; + uint16_t padding16; + + // Padding - ignore + uint32_t MmHubPadding[8]; // SMU internal use +} DriverSmuConfig_t; + +typedef struct { + uint16_t CurrClock[PPCLK_COUNT]; + uint16_t AverageGfxclkFrequency; + uint16_t AverageSocclkFrequency; + uint16_t AverageUclkFrequency ; + uint16_t AverageGfxActivity ; + uint16_t AverageUclkActivity ; + uint8_t CurrSocVoltageOffset ; + uint8_t CurrGfxVoltageOffset ; + uint8_t CurrMemVidOffset ; + uint8_t Padding8 ; + uint16_t AverageSocketPower ; + uint16_t TemperatureEdge ; + uint16_t TemperatureHotspot ; + uint16_t TemperatureHBM ; + uint16_t TemperatureVrGfx ; + uint16_t TemperatureVrSoc ; + uint16_t TemperatureVrMem ; + uint32_t ThrottlerStatus ; + + uint16_t CurrFanSpeed ; + uint16_t AverageVclkFrequency ; + uint16_t AverageDclkFrequency ; + uint16_t VcnActivityPercentage ; + uint32_t EnergyAccumulator ; + + uint32_t Padding[2]; + + // Padding - ignore + uint32_t MmHubPadding[8]; // SMU internal use +} SmuMetrics_t; + + +typedef struct { + uint16_t avgPsmCount[75]; + uint16_t minPsmCount[75]; + float avgPsmVoltage[75]; + float minPsmVoltage[75]; + + uint32_t MmHubPadding[8]; // SMU internal use +} AvfsDebugTable_t; + +typedef struct { + uint8_t AvfsVersion; + uint8_t Padding; + uint8_t AvfsEn[AVFS_VOLTAGE_COUNT]; + + uint8_t OverrideVFT[AVFS_VOLTAGE_COUNT]; + uint8_t OverrideAvfsGb[AVFS_VOLTAGE_COUNT]; + + uint8_t OverrideTemperatures[AVFS_VOLTAGE_COUNT]; + uint8_t OverrideVInversion[AVFS_VOLTAGE_COUNT]; + uint8_t OverrideP2V[AVFS_VOLTAGE_COUNT]; + uint8_t OverrideP2VCharzFreq[AVFS_VOLTAGE_COUNT]; + + int32_t VFT0_m1[AVFS_VOLTAGE_COUNT]; // Q8.24 + int32_t VFT0_m2[AVFS_VOLTAGE_COUNT]; // Q12.12 + int32_t VFT0_b[AVFS_VOLTAGE_COUNT]; // Q32 + + int32_t VFT1_m1[AVFS_VOLTAGE_COUNT]; // Q8.16 + int32_t VFT1_m2[AVFS_VOLTAGE_COUNT]; // Q12.12 + int32_t VFT1_b[AVFS_VOLTAGE_COUNT]; // Q32 + + int32_t VFT2_m1[AVFS_VOLTAGE_COUNT]; // Q8.16 + int32_t VFT2_m2[AVFS_VOLTAGE_COUNT]; // Q12.12 + int32_t VFT2_b[AVFS_VOLTAGE_COUNT]; // Q32 + + int32_t AvfsGb0_m1[AVFS_VOLTAGE_COUNT]; // Q8.24 + int32_t AvfsGb0_m2[AVFS_VOLTAGE_COUNT]; // Q12.12 + int32_t AvfsGb0_b[AVFS_VOLTAGE_COUNT]; // Q32 + + int32_t AcBtcGb_m1[AVFS_VOLTAGE_COUNT]; // Q8.24 + int32_t AcBtcGb_m2[AVFS_VOLTAGE_COUNT]; // Q12.12 + int32_t AcBtcGb_b[AVFS_VOLTAGE_COUNT]; // Q32 + + uint32_t AvfsTempCold[AVFS_VOLTAGE_COUNT]; + uint32_t AvfsTempMid[AVFS_VOLTAGE_COUNT]; + uint32_t AvfsTempHot[AVFS_VOLTAGE_COUNT]; + + uint32_t VInversion[AVFS_VOLTAGE_COUNT]; // in mV with 2 fractional bits + + + int32_t P2V_m1[AVFS_VOLTAGE_COUNT]; // Q8.24 + int32_t P2V_m2[AVFS_VOLTAGE_COUNT]; // Q12.12 + int32_t P2V_b[AVFS_VOLTAGE_COUNT]; // Q32 + + uint32_t P2VCharzFreq[AVFS_VOLTAGE_COUNT]; // in 10KHz units + + uint32_t EnabledAvfsModules[3]; + + uint32_t MmHubPadding[8]; // SMU internal use +} AvfsFuseOverride_t; + +typedef struct { + uint8_t Gfx_ActiveHystLimit; + uint8_t Gfx_IdleHystLimit; + uint8_t Gfx_FPS; + uint8_t Gfx_MinActiveFreqType; + uint8_t Gfx_BoosterFreqType; + uint8_t Gfx_MinFreqStep; // Minimum delta between current and target frequeny in order for FW to change clock. + uint8_t Gfx_UseRlcBusy; + uint8_t PaddingGfx[3]; + uint16_t Gfx_MinActiveFreq; // MHz + uint16_t Gfx_BoosterFreq; // MHz + uint16_t Gfx_PD_Data_time_constant; // Time constant of PD controller in ms + uint32_t Gfx_PD_Data_limit_a; // Q16 + uint32_t Gfx_PD_Data_limit_b; // Q16 + uint32_t Gfx_PD_Data_limit_c; // Q16 + uint32_t Gfx_PD_Data_error_coeff; // Q16 + uint32_t Gfx_PD_Data_error_rate_coeff; // Q16 + + uint8_t Mem_ActiveHystLimit; + uint8_t Mem_IdleHystLimit; + uint8_t Mem_FPS; + uint8_t Mem_MinActiveFreqType; + uint8_t Mem_BoosterFreqType; + uint8_t Mem_MinFreqStep; // Minimum delta between current and target frequeny in order for FW to change clock. + uint8_t Mem_UseRlcBusy; + uint8_t PaddingMem[3]; + uint16_t Mem_MinActiveFreq; // MHz + uint16_t Mem_BoosterFreq; // MHz + uint16_t Mem_PD_Data_time_constant; // Time constant of PD controller in ms + uint32_t Mem_PD_Data_limit_a; // Q16 + uint32_t Mem_PD_Data_limit_b; // Q16 + uint32_t Mem_PD_Data_limit_c; // Q16 + uint32_t Mem_PD_Data_error_coeff; // Q16 + uint32_t Mem_PD_Data_error_rate_coeff; // Q16 + + uint32_t Mem_UpThreshold_Limit; // Q16 + uint8_t Mem_UpHystLimit; + uint8_t Mem_DownHystLimit; + uint16_t Mem_Fps; + + uint32_t BusyThreshold; // Q16 + uint32_t BusyHyst; + uint32_t IdleHyst; + + uint32_t MmHubPadding[8]; // SMU internal use +} DpmActivityMonitorCoeffInt_t; + +// These defines are used with the following messages: +// SMC_MSG_TransferTableDram2Smu +// SMC_MSG_TransferTableSmu2Dram +#define TABLE_PPTABLE 0 +#define TABLE_AVFS 1 +#define TABLE_AVFS_PSM_DEBUG 2 +#define TABLE_AVFS_FUSE_OVERRIDE 3 +#define TABLE_PMSTATUSLOG 4 +#define TABLE_SMU_METRICS 5 +#define TABLE_DRIVER_SMU_CONFIG 6 +#define TABLE_OVERDRIVE 7 +#define TABLE_WAFL_XGMI_TOPOLOGY 8 +#define TABLE_I2C_COMMANDS 9 +#define TABLE_ACTIVITY_MONITOR_COEFF 10 +#define TABLE_COUNT 11 + +// These defines are used with the SMC_MSG_SetUclkFastSwitch message. +typedef enum { + DF_SWITCH_TYPE_FAST = 0, + DF_SWITCH_TYPE_SLOW, + DF_SWITCH_TYPE_COUNT, +} DF_SWITCH_TYPE_e; + +typedef enum { + DRAM_BIT_WIDTH_DISABLED = 0, + DRAM_BIT_WIDTH_X_8, + DRAM_BIT_WIDTH_X_16, + DRAM_BIT_WIDTH_X_32, + DRAM_BIT_WIDTH_X_64, // NOT USED. + DRAM_BIT_WIDTH_X_128, + DRAM_BIT_WIDTH_COUNT, +} DRAM_BIT_WIDTH_TYPE_e; + +#define REMOVE_FMAX_MARGIN_BIT 0x0 +#define REMOVE_DCTOL_MARGIN_BIT 0x1 +#define REMOVE_PLATFORM_MARGIN_BIT 0x2 + +#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_cyan_skillfish.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_cyan_skillfish.h new file mode 100644 index 000000000000..4884a4e1f261 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_cyan_skillfish.h @@ -0,0 +1,79 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __SMU11_DRIVER_IF_CYAN_SKILLFISH_H__ +#define __SMU11_DRIVER_IF_CYAN_SKILLFISH_H__ + +// *** IMPORTANT *** +// Always increment the interface version if +// any structure is changed in this file +#define MP1_DRIVER_IF_VERSION 0x8 + +#define TABLE_BIOS_IF 0 // Called by BIOS +#define TABLE_WATERMARKS 1 // Called by Driver; defined here, but not used, for backward compatible +#define TABLE_PMSTATUSLOG 3 // Called by Tools for Agm logging +#define TABLE_DPMCLOCKS 4 // Called by Driver; defined here, but not used, for backward compatible +#define TABLE_MOMENTARY_PM 5 // Called by Tools; defined here, but not used, for backward compatible +#define TABLE_SMU_METRICS 6 // Called by Driver +#define TABLE_COUNT 7 + +typedef struct SmuMetricsTable_t { + //CPU status + uint16_t CoreFrequency[6]; //[MHz] + uint32_t CorePower[6]; //[mW] + uint16_t CoreTemperature[6]; //[centi-Celsius] + uint16_t L3Frequency[2]; //[MHz] + uint16_t L3Temperature[2]; //[centi-Celsius] + uint16_t C0Residency[6]; //Percentage + + // GFX status + uint16_t GfxclkFrequency; //[MHz] + uint16_t GfxTemperature; //[centi-Celsius] + + // SOC IP info + uint16_t SocclkFrequency; //[MHz] + uint16_t VclkFrequency; //[MHz] + uint16_t DclkFrequency; //[MHz] + uint16_t MemclkFrequency; //[MHz] + + // power, VF info for CPU/GFX telemetry rails, and then socket power total + uint32_t Voltage[2]; //[mV] indices: VDDCR_VDD, VDDCR_GFX + uint32_t Current[2]; //[mA] indices: VDDCR_VDD, VDDCR_GFX + uint32_t Power[2]; //[mW] indices: VDDCR_VDD, VDDCR_GFX + uint32_t CurrentSocketPower; //[mW] + + uint16_t SocTemperature; //[centi-Celsius] + uint16_t EdgeTemperature; + uint16_t ThrottlerStatus; + uint16_t Spare; + +} SmuMetricsTable_t; + +typedef struct SmuMetrics_t { + SmuMetricsTable_t Current; + SmuMetricsTable_t Average; + uint32_t SampleStartTime; + uint32_t SampleStopTime; + uint32_t Accnt; +} SmuMetrics_t; + +#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_navi10.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_navi10.h new file mode 100644 index 000000000000..04752ade1016 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_navi10.h @@ -0,0 +1,1220 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __SMU11_DRIVER_IF_NAVI10_H__ +#define __SMU11_DRIVER_IF_NAVI10_H__ + +// *** IMPORTANT *** +// SMU TEAM: Always increment the interface version if +// any structure is changed in this file +// Be aware of that the version should be updated in +// smu_v11_0.h, maybe rename is also needed. +// #define SMU11_DRIVER_IF_VERSION 0x33 + +#define PPTABLE_NV10_SMU_VERSION 8 + +#define NUM_GFXCLK_DPM_LEVELS 16 +#define NUM_SMNCLK_DPM_LEVELS 2 +#define NUM_SOCCLK_DPM_LEVELS 8 +#define NUM_MP0CLK_DPM_LEVELS 2 +#define NUM_DCLK_DPM_LEVELS 8 +#define NUM_VCLK_DPM_LEVELS 8 +#define NUM_DCEFCLK_DPM_LEVELS 8 +#define NUM_PHYCLK_DPM_LEVELS 8 +#define NUM_DISPCLK_DPM_LEVELS 8 +#define NUM_PIXCLK_DPM_LEVELS 8 +#define NUM_UCLK_DPM_LEVELS 4 +#define NUM_MP1CLK_DPM_LEVELS 2 +#define NUM_LINK_LEVELS 2 + + +#define MAX_GFXCLK_DPM_LEVEL (NUM_GFXCLK_DPM_LEVELS - 1) +#define MAX_SMNCLK_DPM_LEVEL (NUM_SMNCLK_DPM_LEVELS - 1) +#define MAX_SOCCLK_DPM_LEVEL (NUM_SOCCLK_DPM_LEVELS - 1) +#define MAX_MP0CLK_DPM_LEVEL (NUM_MP0CLK_DPM_LEVELS - 1) +#define MAX_DCLK_DPM_LEVEL (NUM_DCLK_DPM_LEVELS - 1) +#define MAX_VCLK_DPM_LEVEL (NUM_VCLK_DPM_LEVELS - 1) +#define MAX_DCEFCLK_DPM_LEVEL (NUM_DCEFCLK_DPM_LEVELS - 1) +#define MAX_DISPCLK_DPM_LEVEL (NUM_DISPCLK_DPM_LEVELS - 1) +#define MAX_PIXCLK_DPM_LEVEL (NUM_PIXCLK_DPM_LEVELS - 1) +#define MAX_PHYCLK_DPM_LEVEL (NUM_PHYCLK_DPM_LEVELS - 1) +#define MAX_UCLK_DPM_LEVEL (NUM_UCLK_DPM_LEVELS - 1) +#define MAX_MP1CLK_DPM_LEVEL (NUM_MP1CLK_DPM_LEVELS - 1) +#define MAX_LINK_LEVEL (NUM_LINK_LEVELS - 1) + +//Gemini Modes +#define PPSMC_GeminiModeNone 0 //Single GPU board +#define PPSMC_GeminiModeMaster 1 //Master GPU on a Gemini board +#define PPSMC_GeminiModeSlave 2 //Slave GPU on a Gemini board + +// Feature Control Defines +// DPM +#define FEATURE_DPM_PREFETCHER_BIT 0 +#define FEATURE_DPM_GFXCLK_BIT 1 +#define FEATURE_DPM_GFX_PACE_BIT 2 +#define FEATURE_DPM_UCLK_BIT 3 +#define FEATURE_DPM_SOCCLK_BIT 4 +#define FEATURE_DPM_MP0CLK_BIT 5 +#define FEATURE_DPM_LINK_BIT 6 +#define FEATURE_DPM_DCEFCLK_BIT 7 +#define FEATURE_MEM_VDDCI_SCALING_BIT 8 +#define FEATURE_MEM_MVDD_SCALING_BIT 9 + +//Idle +#define FEATURE_DS_GFXCLK_BIT 10 +#define FEATURE_DS_SOCCLK_BIT 11 +#define FEATURE_DS_LCLK_BIT 12 +#define FEATURE_DS_DCEFCLK_BIT 13 +#define FEATURE_DS_UCLK_BIT 14 +#define FEATURE_GFX_ULV_BIT 15 +#define FEATURE_FW_DSTATE_BIT 16 +#define FEATURE_GFXOFF_BIT 17 +#define FEATURE_BACO_BIT 18 +#define FEATURE_VCN_PG_BIT 19 +#define FEATURE_JPEG_PG_BIT 20 +#define FEATURE_USB_PG_BIT 21 +#define FEATURE_RSMU_SMN_CG_BIT 22 +//Throttler/Response +#define FEATURE_PPT_BIT 23 +#define FEATURE_TDC_BIT 24 +#define FEATURE_GFX_EDC_BIT 25 +#define FEATURE_APCC_PLUS_BIT 26 +#define FEATURE_GTHR_BIT 27 +#define FEATURE_ACDC_BIT 28 +#define FEATURE_VR0HOT_BIT 29 +#define FEATURE_VR1HOT_BIT 30 +#define FEATURE_FW_CTF_BIT 31 +#define FEATURE_FAN_CONTROL_BIT 32 +#define FEATURE_THERMAL_BIT 33 +#define FEATURE_GFX_DCS_BIT 34 +//VF +#define FEATURE_RM_BIT 35 +#define FEATURE_LED_DISPLAY_BIT 36 +//Other +#define FEATURE_GFX_SS_BIT 37 +#define FEATURE_OUT_OF_BAND_MONITOR_BIT 38 +#define FEATURE_TEMP_DEPENDENT_VMIN_BIT 39 + +#define FEATURE_MMHUB_PG_BIT 40 +#define FEATURE_ATHUB_PG_BIT 41 +#define FEATURE_APCC_DFLL_BIT 42 +#define FEATURE_SPARE_43_BIT 43 +#define FEATURE_SPARE_44_BIT 44 +#define FEATURE_SPARE_45_BIT 45 +#define FEATURE_SPARE_46_BIT 46 +#define FEATURE_SPARE_47_BIT 47 +#define FEATURE_SPARE_48_BIT 48 +#define FEATURE_SPARE_49_BIT 49 +#define FEATURE_SPARE_50_BIT 50 +#define FEATURE_SPARE_51_BIT 51 +#define FEATURE_SPARE_52_BIT 52 +#define FEATURE_SPARE_53_BIT 53 +#define FEATURE_SPARE_54_BIT 54 +#define FEATURE_SPARE_55_BIT 55 +#define FEATURE_SPARE_56_BIT 56 +#define FEATURE_SPARE_57_BIT 57 +#define FEATURE_SPARE_58_BIT 58 +#define FEATURE_SPARE_59_BIT 59 +#define FEATURE_SPARE_60_BIT 60 +#define FEATURE_SPARE_61_BIT 61 +#define FEATURE_SPARE_62_BIT 62 +#define FEATURE_SPARE_63_BIT 63 +#define NUM_FEATURES 64 + +// Debug Overrides Bitmask +#define DPM_OVERRIDE_DISABLE_SOCCLK_PID 0x00000001 +#define DPM_OVERRIDE_DISABLE_UCLK_PID 0x00000002 +#define DPM_OVERRIDE_DISABLE_VOLT_LINK_VCN_SOCCLK 0x00000004 +#define DPM_OVERRIDE_ENABLE_FREQ_LINK_VCLK_SOCCLK 0x00000008 +#define DPM_OVERRIDE_ENABLE_FREQ_LINK_DCLK_SOCCLK 0x00000010 +#define DPM_OVERRIDE_ENABLE_FREQ_LINK_GFXCLK_SOCCLK 0x00000020 +#define DPM_OVERRIDE_ENABLE_FREQ_LINK_GFXCLK_UCLK 0x00000040 +#define DPM_OVERRIDE_DISABLE_VOLT_LINK_DCE_SOCCLK 0x00000080 +#define DPM_OVERRIDE_DISABLE_VOLT_LINK_MP0_SOCCLK 0x00000100 +#define DPM_OVERRIDE_DISABLE_DFLL_PLL_SHUTDOWN 0x00000200 +#define DPM_OVERRIDE_DISABLE_MEMORY_TEMPERATURE_READ 0x00000400 + +// VR Mapping Bit Defines +#define VR_MAPPING_VR_SELECT_MASK 0x01 +#define VR_MAPPING_VR_SELECT_SHIFT 0x00 + +#define VR_MAPPING_PLANE_SELECT_MASK 0x02 +#define VR_MAPPING_PLANE_SELECT_SHIFT 0x01 + +// PSI Bit Defines +#define PSI_SEL_VR0_PLANE0_PSI0 0x01 +#define PSI_SEL_VR0_PLANE0_PSI1 0x02 +#define PSI_SEL_VR0_PLANE1_PSI0 0x04 +#define PSI_SEL_VR0_PLANE1_PSI1 0x08 +#define PSI_SEL_VR1_PLANE0_PSI0 0x10 +#define PSI_SEL_VR1_PLANE0_PSI1 0x20 +#define PSI_SEL_VR1_PLANE1_PSI0 0x40 +#define PSI_SEL_VR1_PLANE1_PSI1 0x80 + +// Throttler Control/Status Bits +#define THROTTLER_PADDING_BIT 0 +#define THROTTLER_TEMP_EDGE_BIT 1 +#define THROTTLER_TEMP_HOTSPOT_BIT 2 +#define THROTTLER_TEMP_MEM_BIT 3 +#define THROTTLER_TEMP_VR_GFX_BIT 4 +#define THROTTLER_TEMP_VR_MEM0_BIT 5 +#define THROTTLER_TEMP_VR_MEM1_BIT 6 +#define THROTTLER_TEMP_VR_SOC_BIT 7 +#define THROTTLER_TEMP_LIQUID0_BIT 8 +#define THROTTLER_TEMP_LIQUID1_BIT 9 +#define THROTTLER_TEMP_PLX_BIT 10 +#define THROTTLER_TEMP_SKIN_BIT 11 +#define THROTTLER_TDC_GFX_BIT 12 +#define THROTTLER_TDC_SOC_BIT 13 +#define THROTTLER_PPT0_BIT 14 +#define THROTTLER_PPT1_BIT 15 +#define THROTTLER_PPT2_BIT 16 +#define THROTTLER_PPT3_BIT 17 +#define THROTTLER_FIT_BIT 18 +#define THROTTLER_PPM_BIT 19 +#define THROTTLER_APCC_BIT 20 + +// FW DState Features Control Bits +#define FW_DSTATE_SOC_ULV_BIT 0 +#define FW_DSTATE_G6_HSR_BIT 1 +#define FW_DSTATE_G6_PHY_VDDCI_OFF_BIT 2 +#define FW_DSTATE_MP0_DS_BIT 3 +#define FW_DSTATE_SMN_DS_BIT 4 +#define FW_DSTATE_MP1_DS_BIT 5 +#define FW_DSTATE_MP1_WHISPER_MODE_BIT 6 +#define FW_DSTATE_LIV_MIN_BIT 7 +#define FW_DSTATE_SOC_PLL_PWRDN_BIT 8 + +#define FW_DSTATE_SOC_ULV_MASK (1 << FW_DSTATE_SOC_ULV_BIT ) +#define FW_DSTATE_G6_HSR_MASK (1 << FW_DSTATE_G6_HSR_BIT ) +#define FW_DSTATE_G6_PHY_VDDCI_OFF_MASK (1 << FW_DSTATE_G6_PHY_VDDCI_OFF_BIT ) +#define FW_DSTATE_MP1_DS_MASK (1 << FW_DSTATE_MP1_DS_BIT ) +#define FW_DSTATE_MP0_DS_MASK (1 << FW_DSTATE_MP0_DS_BIT ) +#define FW_DSTATE_SMN_DS_MASK (1 << FW_DSTATE_SMN_DS_BIT ) +#define FW_DSTATE_MP1_WHISPER_MODE_MASK (1 << FW_DSTATE_MP1_WHISPER_MODE_BIT ) +#define FW_DSTATE_LIV_MIN_MASK (1 << FW_DSTATE_LIV_MIN_BIT ) +#define FW_DSTATE_SOC_PLL_PWRDN_MASK (1 << FW_DSTATE_SOC_PLL_PWRDN_BIT ) + +//I2C Interface + +#define NUM_I2C_CONTROLLERS 8 + +#define I2C_CONTROLLER_ENABLED 1 +#define I2C_CONTROLLER_DISABLED 0 + +#define MAX_SW_I2C_COMMANDS 8 + +typedef enum { + I2C_CONTROLLER_PORT_0 = 0, //CKSVII2C0 + I2C_CONTROLLER_PORT_1 = 1, //CKSVII2C1 + I2C_CONTROLLER_PORT_COUNT, +} I2cControllerPort_e; + +typedef enum { + I2C_CONTROLLER_NAME_VR_GFX = 0, + I2C_CONTROLLER_NAME_VR_SOC, + I2C_CONTROLLER_NAME_VR_VDDCI, + I2C_CONTROLLER_NAME_VR_MVDD, + I2C_CONTROLLER_NAME_LIQUID0, + I2C_CONTROLLER_NAME_LIQUID1, + I2C_CONTROLLER_NAME_PLX, + I2C_CONTROLLER_NAME_SPARE, + I2C_CONTROLLER_NAME_COUNT, +} I2cControllerName_e; + +typedef enum { + I2C_CONTROLLER_THROTTLER_TYPE_NONE = 0, + I2C_CONTROLLER_THROTTLER_VR_GFX, + I2C_CONTROLLER_THROTTLER_VR_SOC, + I2C_CONTROLLER_THROTTLER_VR_VDDCI, + I2C_CONTROLLER_THROTTLER_VR_MVDD, + I2C_CONTROLLER_THROTTLER_LIQUID0, + I2C_CONTROLLER_THROTTLER_LIQUID1, + I2C_CONTROLLER_THROTTLER_PLX, + I2C_CONTROLLER_THROTTLER_COUNT, +} I2cControllerThrottler_e; + +typedef enum { + I2C_CONTROLLER_PROTOCOL_VR_0, + I2C_CONTROLLER_PROTOCOL_VR_1, + I2C_CONTROLLER_PROTOCOL_TMP_0, + I2C_CONTROLLER_PROTOCOL_TMP_1, + I2C_CONTROLLER_PROTOCOL_SPARE_0, + I2C_CONTROLLER_PROTOCOL_SPARE_1, + I2C_CONTROLLER_PROTOCOL_COUNT, +} I2cControllerProtocol_e; + +typedef struct { + uint8_t Enabled; + uint8_t Speed; + uint8_t Padding[2]; + uint32_t SlaveAddress; + uint8_t ControllerPort; + uint8_t ControllerName; + uint8_t ThermalThrotter; + uint8_t I2cProtocol; +} I2cControllerConfig_t; + +typedef enum { + I2C_PORT_SVD_SCL = 0, + I2C_PORT_GPIO, +} I2cPort_e; + +typedef enum { + I2C_SPEED_FAST_50K = 0, //50 Kbits/s + I2C_SPEED_FAST_100K, //100 Kbits/s + I2C_SPEED_FAST_400K, //400 Kbits/s + I2C_SPEED_FAST_PLUS_1M, //1 Mbits/s (in fast mode) + I2C_SPEED_HIGH_1M, //1 Mbits/s (in high speed mode) + I2C_SPEED_HIGH_2M, //2.3 Mbits/s + I2C_SPEED_COUNT, +} I2cSpeed_e; + +typedef enum { + I2C_CMD_READ = 0, + I2C_CMD_WRITE, + I2C_CMD_COUNT, +} I2cCmdType_e; + +#define CMDCONFIG_STOP_BIT 0 +#define CMDCONFIG_RESTART_BIT 1 + +#define CMDCONFIG_STOP_MASK (1 << CMDCONFIG_STOP_BIT) +#define CMDCONFIG_RESTART_MASK (1 << CMDCONFIG_RESTART_BIT) + +typedef struct { + uint8_t RegisterAddr; ////only valid for write, ignored for read + uint8_t Cmd; //Read(0) or Write(1) + uint8_t Data; //Return data for read. Data to send for write + uint8_t CmdConfig; //Includes whether associated command should have a stop or restart command +} SwI2cCmd_t; //SW I2C Command Table + +typedef struct { + uint8_t I2CcontrollerPort; //CKSVII2C0(0) or //CKSVII2C1(1) + uint8_t I2CSpeed; //Slow(0) or Fast(1) + uint16_t SlaveAddress; + uint8_t NumCmds; //Number of commands + uint8_t Padding[3]; + + SwI2cCmd_t SwI2cCmds[MAX_SW_I2C_COMMANDS]; + + uint32_t MmHubPadding[8]; // SMU internal use + +} SwI2cRequest_t; // SW I2C Request Table + +//D3HOT sequences +typedef enum { + BACO_SEQUENCE, + MSR_SEQUENCE, + BAMACO_SEQUENCE, + ULPS_SEQUENCE, + D3HOT_SEQUENCE_COUNT, +}D3HOTSequence_e; + +//THis is aligned with RSMU PGFSM Register Mapping +typedef enum { + PG_DYNAMIC_MODE = 0, + PG_STATIC_MODE, +} PowerGatingMode_e; + +//This is aligned with RSMU PGFSM Register Mapping +typedef enum { + PG_POWER_DOWN = 0, + PG_POWER_UP, +} PowerGatingSettings_e; + +typedef struct { + uint32_t a; // store in IEEE float format in this variable + uint32_t b; // store in IEEE float format in this variable + uint32_t c; // store in IEEE float format in this variable +} QuadraticInt_t; + +typedef struct { + uint32_t m; // store in IEEE float format in this variable + uint32_t b; // store in IEEE float format in this variable +} LinearInt_t; + +typedef struct { + uint32_t a; // store in IEEE float format in this variable + uint32_t b; // store in IEEE float format in this variable + uint32_t c; // store in IEEE float format in this variable +} DroopInt_t; + +typedef enum { + GFXCLK_SOURCE_PLL = 0, + GFXCLK_SOURCE_DFLL, + GFXCLK_SOURCE_COUNT, +} GfxclkSrc_e; + +//Only Clks that have DPM descriptors are listed here +typedef enum { + PPCLK_GFXCLK = 0, + PPCLK_SOCCLK, + PPCLK_UCLK, + PPCLK_DCLK, + PPCLK_VCLK, + PPCLK_DCEFCLK, + PPCLK_DISPCLK, + PPCLK_PIXCLK, + PPCLK_PHYCLK, + PPCLK_COUNT, +} PPCLK_e; + +typedef enum { + POWER_SOURCE_AC, + POWER_SOURCE_DC, + POWER_SOURCE_COUNT, +} POWER_SOURCE_e; + +typedef enum { + PPT_THROTTLER_PPT0, + PPT_THROTTLER_PPT1, + PPT_THROTTLER_PPT2, + PPT_THROTTLER_PPT3, + PPT_THROTTLER_COUNT +} PPT_THROTTLER_e; + +typedef enum { + VOLTAGE_MODE_AVFS = 0, + VOLTAGE_MODE_AVFS_SS, + VOLTAGE_MODE_SS, + VOLTAGE_MODE_COUNT, +} VOLTAGE_MODE_e; + + +typedef enum { + AVFS_VOLTAGE_GFX = 0, + AVFS_VOLTAGE_SOC, + AVFS_VOLTAGE_COUNT, +} AVFS_VOLTAGE_TYPE_e; + +typedef enum { + UCLK_DIV_BY_1 = 0, + UCLK_DIV_BY_2, + UCLK_DIV_BY_4, + UCLK_DIV_BY_8, +} UCLK_DIV_e; + +typedef enum { + GPIO_INT_POLARITY_ACTIVE_LOW = 0, + GPIO_INT_POLARITY_ACTIVE_HIGH, +} GpioIntPolarity_e; + +typedef enum { + MEMORY_TYPE_GDDR6 = 0, + MEMORY_TYPE_HBM, +} MemoryType_e; + +typedef enum { + PWR_CONFIG_TDP = 0, + PWR_CONFIG_TGP, + PWR_CONFIG_TCP_ESTIMATED, + PWR_CONFIG_TCP_MEASURED, +} PwrConfig_e; + +typedef struct { + uint8_t VoltageMode; // 0 - AVFS only, 1- min(AVFS,SS), 2-SS only + uint8_t SnapToDiscrete; // 0 - Fine grained DPM, 1 - Discrete DPM + uint8_t NumDiscreteLevels; // Set to 2 (Fmin, Fmax) when using fine grained DPM, otherwise set to # discrete levels used + uint8_t Padding; + LinearInt_t ConversionToAvfsClk; // Transfer function to AVFS Clock (GHz->GHz) + QuadraticInt_t SsCurve; // Slow-slow curve (GHz->V) +} DpmDescriptor_t; + +typedef enum { + TEMP_EDGE, + TEMP_HOTSPOT, + TEMP_MEM, + TEMP_VR_GFX, + TEMP_VR_MEM0, + TEMP_VR_MEM1, + TEMP_VR_SOC, + TEMP_LIQUID0, + TEMP_LIQUID1, + TEMP_PLX, + TEMP_COUNT +} TEMP_e; + +//Out of band monitor status defines +//see SPEC //gpu/doc/soc_arch/spec/feature/SMBUS/SMBUS.xlsx +#define POWER_MANAGER_CONTROLLER_NOT_RUNNING 0 +#define POWER_MANAGER_CONTROLLER_RUNNING 1 + +#define POWER_MANAGER_CONTROLLER_BIT 0 +#define MAXIMUM_DPM_STATE_GFX_ENGINE_RESTRICTED_BIT 8 +#define GPU_DIE_TEMPERATURE_THROTTLING_BIT 9 +#define HBM_DIE_TEMPERATURE_THROTTLING_BIT 10 +#define TGP_THROTTLING_BIT 11 +#define PCC_THROTTLING_BIT 12 +#define HBM_TEMPERATURE_EXCEEDING_TEMPERATURE_LIMIT_BIT 13 +#define HBM_TEMPERATURE_EXCEEDING_MAX_MEMORY_TEMPERATURE_BIT 14 + +#define POWER_MANAGER_CONTROLLER_MASK (1 << POWER_MANAGER_CONTROLLER_BIT ) +#define MAXIMUM_DPM_STATE_GFX_ENGINE_RESTRICTED_MASK (1 << MAXIMUM_DPM_STATE_GFX_ENGINE_RESTRICTED_BIT ) +#define GPU_DIE_TEMPERATURE_THROTTLING_MASK (1 << GPU_DIE_TEMPERATURE_THROTTLING_BIT ) +#define HBM_DIE_TEMPERATURE_THROTTLING_MASK (1 << HBM_DIE_TEMPERATURE_THROTTLING_BIT ) +#define TGP_THROTTLING_MASK (1 << TGP_THROTTLING_BIT ) +#define PCC_THROTTLING_MASK (1 << PCC_THROTTLING_BIT ) +#define HBM_TEMPERATURE_EXCEEDING_TEMPERATURE_LIMIT_MASK (1 << HBM_TEMPERATURE_EXCEEDING_TEMPERATURE_LIMIT_BIT ) +#define HBM_TEMPERATURE_EXCEEDING_MAX_MEMORY_TEMPERATURE_MASK (1 << HBM_TEMPERATURE_EXCEEDING_MAX_MEMORY_TEMPERATURE_BIT) + +//This structure to be DMA to SMBUS Config register space +typedef struct { + uint8_t MinorInfoVersion; + uint8_t MajorInfoVersion; + uint8_t TableSize; + uint8_t Reserved; + + uint8_t Reserved1; + uint8_t RevID; + uint16_t DeviceID; + + uint16_t DieTemperatureLimit; + uint16_t FanTargetTemperature; + + uint16_t MemoryTemperatureLimit; + uint16_t MemoryTemperatureLimit1; + + uint16_t TGP; + uint16_t CardPower; + + uint32_t DieTemperatureRegisterOffset; + + uint32_t Reserved2; + + uint32_t Reserved3; + + uint32_t Status; + + uint16_t DieTemperature; + uint16_t CurrentMemoryTemperature; + + uint16_t MemoryTemperature; + uint8_t MemoryHotspotPosition; + uint8_t Reserved4; + + uint32_t BoardLevelEnergyAccumulator; +} OutOfBandMonitor_t; + +typedef struct { + uint32_t Version; + + // SECTION: Feature Enablement + uint32_t FeaturesToRun[2]; + + // SECTION: Infrastructure Limits + uint16_t SocketPowerLimitAc[PPT_THROTTLER_COUNT]; + uint16_t SocketPowerLimitAcTau[PPT_THROTTLER_COUNT]; + uint16_t SocketPowerLimitDc[PPT_THROTTLER_COUNT]; + uint16_t SocketPowerLimitDcTau[PPT_THROTTLER_COUNT]; + + uint16_t TdcLimitSoc; // Amps + uint16_t TdcLimitSocTau; // Time constant of LPF in ms + uint16_t TdcLimitGfx; // Amps + uint16_t TdcLimitGfxTau; // Time constant of LPF in ms + + uint16_t TedgeLimit; // Celcius + uint16_t ThotspotLimit; // Celcius + uint16_t TmemLimit; // Celcius + uint16_t Tvr_gfxLimit; // Celcius + uint16_t Tvr_mem0Limit; // Celcius + uint16_t Tvr_mem1Limit; // Celcius + uint16_t Tvr_socLimit; // Celcius + uint16_t Tliquid0Limit; // Celcius + uint16_t Tliquid1Limit; // Celcius + uint16_t TplxLimit; // Celcius + uint32_t FitLimit; // Failures in time (failures per million parts over the defined lifetime) + + uint16_t PpmPowerLimit; // Switch this this power limit when temperature is above PpmTempThreshold + uint16_t PpmTemperatureThreshold; + + // SECTION: Throttler settings + uint32_t ThrottlerControlMask; // See Throtter masks defines + + // SECTION: FW DSTATE Settings + uint32_t FwDStateMask; // See FW DState masks defines + + // SECTION: ULV Settings + uint16_t UlvVoltageOffsetSoc; // In mV(Q2) + uint16_t UlvVoltageOffsetGfx; // In mV(Q2) + + uint8_t GceaLinkMgrIdleThreshold; //Set by SMU FW during enablment of SOC_ULV. Controls delay for GFX SDP port disconnection during idle events + uint8_t paddingRlcUlvParams[3]; + + uint8_t UlvSmnclkDid; //DID for ULV mode. 0 means CLK will not be modified in ULV. + uint8_t UlvMp1clkDid; //DID for ULV mode. 0 means CLK will not be modified in ULV. + uint8_t UlvGfxclkBypass; // 1 to turn off/bypass Gfxclk during ULV, 0 to leave Gfxclk on during ULV + uint8_t Padding234; + + uint16_t MinVoltageUlvGfx; // In mV(Q2) Minimum Voltage ("Vmin") of VDD_GFX in ULV mode + uint16_t MinVoltageUlvSoc; // In mV(Q2) Minimum Voltage ("Vmin") of VDD_SOC in ULV mode + + + // SECTION: Voltage Control Parameters + uint16_t MinVoltageGfx; // In mV(Q2) Minimum Voltage ("Vmin") of VDD_GFX + uint16_t MinVoltageSoc; // In mV(Q2) Minimum Voltage ("Vmin") of VDD_SOC + uint16_t MaxVoltageGfx; // In mV(Q2) Maximum Voltage allowable of VDD_GFX + uint16_t MaxVoltageSoc; // In mV(Q2) Maximum Voltage allowable of VDD_SOC + + uint16_t LoadLineResistanceGfx; // In mOhms with 8 fractional bits + uint16_t LoadLineResistanceSoc; // In mOhms with 8 fractional bits + + //SECTION: DPM Config 1 + DpmDescriptor_t DpmDescriptor[PPCLK_COUNT]; + + uint16_t FreqTableGfx [NUM_GFXCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableVclk [NUM_VCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDclk [NUM_DCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableSocclk [NUM_SOCCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableUclk [NUM_UCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDcefclk [NUM_DCEFCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDispclk [NUM_DISPCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTablePixclk [NUM_PIXCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTablePhyclk [NUM_PHYCLK_DPM_LEVELS ]; // In MHz + uint32_t Paddingclks[16]; + + uint16_t DcModeMaxFreq [PPCLK_COUNT ]; // In MHz + uint16_t Padding8_Clks; + + uint8_t FreqTableUclkDiv [NUM_UCLK_DPM_LEVELS ]; // 0:Div-1, 1:Div-1/2, 2:Div-1/4, 3:Div-1/8 + + // SECTION: DPM Config 2 + uint16_t Mp0clkFreq [NUM_MP0CLK_DPM_LEVELS]; // in MHz + uint16_t Mp0DpmVoltage [NUM_MP0CLK_DPM_LEVELS]; // mV(Q2) + uint16_t MemVddciVoltage [NUM_UCLK_DPM_LEVELS]; // mV(Q2) + uint16_t MemMvddVoltage [NUM_UCLK_DPM_LEVELS]; // mV(Q2) + // GFXCLK DPM + uint16_t GfxclkFgfxoffEntry; // in Mhz + uint16_t GfxclkFinit; // in Mhz + uint16_t GfxclkFidle; // in MHz + uint16_t GfxclkSlewRate; // for PLL babystepping??? + uint16_t GfxclkFopt; // in Mhz + uint8_t Padding567[2]; + uint16_t GfxclkDsMaxFreq; // in MHz + uint8_t GfxclkSource; // 0 = PLL, 1 = DFLL + uint8_t Padding456; + + // UCLK section + uint8_t LowestUclkReservedForUlv; // Set this to 1 if UCLK DPM0 is reserved for ULV-mode only + uint8_t paddingUclk[3]; + + uint8_t MemoryType; // 0-GDDR6, 1-HBM + uint8_t MemoryChannels; + uint8_t PaddingMem[2]; + + // Link DPM Settings + uint8_t PcieGenSpeed[NUM_LINK_LEVELS]; ///< 0:PciE-gen1 1:PciE-gen2 2:PciE-gen3 3:PciE-gen4 + uint8_t PcieLaneCount[NUM_LINK_LEVELS]; ///< 1=x1, 2=x2, 3=x4, 4=x8, 5=x12, 6=x16 + uint16_t LclkFreq[NUM_LINK_LEVELS]; + + // GFXCLK Thermal DPM (formerly 'Boost' Settings) + uint16_t EnableTdpm; + uint16_t TdpmHighHystTemperature; + uint16_t TdpmLowHystTemperature; + uint16_t GfxclkFreqHighTempLimit; // High limit on GFXCLK when temperature is high, for reliability. + + // SECTION: Fan Control + uint16_t FanStopTemp; //Celcius + uint16_t FanStartTemp; //Celcius + + uint16_t FanGainEdge; + uint16_t FanGainHotspot; + uint16_t FanGainLiquid0; + uint16_t FanGainLiquid1; + uint16_t FanGainVrGfx; + uint16_t FanGainVrSoc; + uint16_t FanGainVrMem0; + uint16_t FanGainVrMem1; + uint16_t FanGainPlx; + uint16_t FanGainMem; + uint16_t FanPwmMin; + uint16_t FanAcousticLimitRpm; + uint16_t FanThrottlingRpm; + uint16_t FanMaximumRpm; + uint16_t FanTargetTemperature; + uint16_t FanTargetGfxclk; + uint8_t FanTempInputSelect; + uint8_t FanPadding; + uint8_t FanZeroRpmEnable; + uint8_t FanTachEdgePerRev; + //uint8_t padding8_Fan[2]; + + // The following are AFC override parameters. Leave at 0 to use FW defaults. + int16_t FuzzyFan_ErrorSetDelta; + int16_t FuzzyFan_ErrorRateSetDelta; + int16_t FuzzyFan_PwmSetDelta; + uint16_t FuzzyFan_Reserved; + + + // SECTION: AVFS + // Overrides + uint8_t OverrideAvfsGb[AVFS_VOLTAGE_COUNT]; + uint8_t Padding8_Avfs[2]; + + QuadraticInt_t qAvfsGb[AVFS_VOLTAGE_COUNT]; // GHz->V Override of fused curve + DroopInt_t dBtcGbGfxPll; // GHz->V BtcGb + DroopInt_t dBtcGbGfxDfll; // GHz->V BtcGb + DroopInt_t dBtcGbSoc; // GHz->V BtcGb + LinearInt_t qAgingGb[AVFS_VOLTAGE_COUNT]; // GHz->V + + QuadraticInt_t qStaticVoltageOffset[AVFS_VOLTAGE_COUNT]; // GHz->V + + uint16_t DcTol[AVFS_VOLTAGE_COUNT]; // mV Q2 + + uint8_t DcBtcEnabled[AVFS_VOLTAGE_COUNT]; + uint8_t Padding8_GfxBtc[2]; + + uint16_t DcBtcMin[AVFS_VOLTAGE_COUNT]; // mV Q2 + uint16_t DcBtcMax[AVFS_VOLTAGE_COUNT]; // mV Q2 + + // SECTION: Advanced Options + uint32_t DebugOverrides; + QuadraticInt_t ReservedEquation0; + QuadraticInt_t ReservedEquation1; + QuadraticInt_t ReservedEquation2; + QuadraticInt_t ReservedEquation3; + + // Total Power configuration, use defines from PwrConfig_e + uint8_t TotalPowerConfig; //0-TDP, 1-TGP, 2-TCP Estimated, 3-TCP Measured + uint8_t TotalPowerSpare1; + uint16_t TotalPowerSpare2; + + // APCC Settings + uint16_t PccThresholdLow; + uint16_t PccThresholdHigh; + uint32_t MGpuFanBoostLimitRpm; + uint32_t PaddingAPCC[5]; + + // Temperature Dependent Vmin + uint16_t VDDGFX_TVmin; //Celcius + uint16_t VDDSOC_TVmin; //Celcius + uint16_t VDDGFX_Vmin_HiTemp; // mV Q2 + uint16_t VDDGFX_Vmin_LoTemp; // mV Q2 + uint16_t VDDSOC_Vmin_HiTemp; // mV Q2 + uint16_t VDDSOC_Vmin_LoTemp; // mV Q2 + + uint16_t VDDGFX_TVminHystersis; // Celcius + uint16_t VDDSOC_TVminHystersis; // Celcius + + // BTC Setting + uint32_t BtcConfig; + + uint16_t SsFmin[10]; // PPtable value to function similar to VFTFmin for SS Curve; Size is PPCLK_COUNT rounded to nearest multiple of 2 + uint16_t DcBtcGb[AVFS_VOLTAGE_COUNT]; + + // SECTION: Board Reserved + uint32_t Reserved[8]; + + // SECTION: BOARD PARAMETERS + // I2C Control + I2cControllerConfig_t I2cControllers[NUM_I2C_CONTROLLERS]; + + // SVI2 Board Parameters + uint16_t MaxVoltageStepGfx; // In mV(Q2) Max voltage step that SMU will request. Multiple steps are taken if voltage change exceeds this value. + uint16_t MaxVoltageStepSoc; // In mV(Q2) Max voltage step that SMU will request. Multiple steps are taken if voltage change exceeds this value. + + uint8_t VddGfxVrMapping; // Use VR_MAPPING* bitfields + uint8_t VddSocVrMapping; // Use VR_MAPPING* bitfields + uint8_t VddMem0VrMapping; // Use VR_MAPPING* bitfields + uint8_t VddMem1VrMapping; // Use VR_MAPPING* bitfields + + uint8_t GfxUlvPhaseSheddingMask; // set this to 1 to set PSI0/1 to 1 in ULV mode + uint8_t SocUlvPhaseSheddingMask; // set this to 1 to set PSI0/1 to 1 in ULV mode + uint8_t ExternalSensorPresent; // External RDI connected to TMON (aka TEMP IN) + uint8_t Padding8_V; + + // Telemetry Settings + uint16_t GfxMaxCurrent; // in Amps + int8_t GfxOffset; // in Amps + uint8_t Padding_TelemetryGfx; + + uint16_t SocMaxCurrent; // in Amps + int8_t SocOffset; // in Amps + uint8_t Padding_TelemetrySoc; + + uint16_t Mem0MaxCurrent; // in Amps + int8_t Mem0Offset; // in Amps + uint8_t Padding_TelemetryMem0; + + uint16_t Mem1MaxCurrent; // in Amps + int8_t Mem1Offset; // in Amps + uint8_t Padding_TelemetryMem1; + + // GPIO Settings + uint8_t AcDcGpio; // GPIO pin configured for AC/DC switching + uint8_t AcDcPolarity; // GPIO polarity for AC/DC switching + uint8_t VR0HotGpio; // GPIO pin configured for VR0 HOT event + uint8_t VR0HotPolarity; // GPIO polarity for VR0 HOT event + + uint8_t VR1HotGpio; // GPIO pin configured for VR1 HOT event + uint8_t VR1HotPolarity; // GPIO polarity for VR1 HOT event + uint8_t GthrGpio; // GPIO pin configured for GTHR Event + uint8_t GthrPolarity; // replace GPIO polarity for GTHR + + // LED Display Settings + uint8_t LedPin0; // GPIO number for LedPin[0] + uint8_t LedPin1; // GPIO number for LedPin[1] + uint8_t LedPin2; // GPIO number for LedPin[2] + uint8_t padding8_4; + + // GFXCLK PLL Spread Spectrum + uint8_t PllGfxclkSpreadEnabled; // on or off + uint8_t PllGfxclkSpreadPercent; // Q4.4 + uint16_t PllGfxclkSpreadFreq; // kHz + + // GFXCLK DFLL Spread Spectrum + uint8_t DfllGfxclkSpreadEnabled; // on or off + uint8_t DfllGfxclkSpreadPercent; // Q4.4 + uint16_t DfllGfxclkSpreadFreq; // kHz + + // UCLK Spread Spectrum + uint8_t UclkSpreadEnabled; // on or off + uint8_t UclkSpreadPercent; // Q4.4 + uint16_t UclkSpreadFreq; // kHz + + // SOCCLK Spread Spectrum + uint8_t SoclkSpreadEnabled; // on or off + uint8_t SocclkSpreadPercent; // Q4.4 + uint16_t SocclkSpreadFreq; // kHz + + // Total board power + uint16_t TotalBoardPower; //Only needed for TCP Estimated case, where TCP = TGP+Total Board Power + uint16_t BoardPadding; + + // Mvdd Svi2 Div Ratio Setting + uint32_t MvddRatio; // This is used for MVDD Vid workaround. It has 16 fractional bits (Q16.16) + + uint8_t RenesesLoadLineEnabled; + uint8_t GfxLoadlineResistance; + uint8_t SocLoadlineResistance; + uint8_t Padding8_Loadline; + + uint32_t BoardReserved[8]; + + // Padding for MMHUB - do not modify this + uint32_t MmHubPadding[8]; // SMU internal use + +} PPTable_t; + +typedef struct { + // Time constant parameters for clock averages in ms + uint16_t GfxclkAverageLpfTau; + uint16_t SocclkAverageLpfTau; + uint16_t UclkAverageLpfTau; + uint16_t GfxActivityLpfTau; + uint16_t UclkActivityLpfTau; + uint16_t SocketPowerLpfTau; + + // Padding - ignore + uint32_t MmHubPadding[8]; // SMU internal use +} DriverSmuConfig_t; + +typedef struct { + + uint16_t GfxclkFmin; // MHz + uint16_t GfxclkFmax; // MHz + uint16_t GfxclkFreq1; // MHz + uint16_t GfxclkVolt1; // mV (Q2) + uint16_t GfxclkFreq2; // MHz + uint16_t GfxclkVolt2; // mV (Q2) + uint16_t GfxclkFreq3; // MHz + uint16_t GfxclkVolt3; // mV (Q2) + uint16_t UclkFmax; // MHz + int16_t OverDrivePct; // % + uint16_t FanMaximumRpm; + uint16_t FanMinimumPwm; + uint16_t FanTargetTemperature; // Degree Celcius + uint16_t FanMode; + uint16_t FanMaxPwm; + uint16_t FanMinPwm; + uint16_t FanMaxTemp; // Degree Celcius + uint16_t FanMinTemp; // Degree Celcius + uint16_t MaxOpTemp; // Degree Celcius + uint16_t FanZeroRpmEnable; + + uint32_t MmHubPadding[6]; // SMU internal use + +} OverDriveTable_t; + +typedef struct { + uint16_t CurrClock[PPCLK_COUNT]; + uint16_t AverageGfxclkFrequency; + uint16_t AverageSocclkFrequency; + uint16_t AverageUclkFrequency ; + uint16_t AverageGfxActivity ; + uint16_t AverageUclkActivity ; + uint8_t CurrSocVoltageOffset ; + uint8_t CurrGfxVoltageOffset ; + uint8_t CurrMemVidOffset ; + uint8_t Padding8 ; + uint16_t AverageSocketPower ; + uint16_t TemperatureEdge ; + uint16_t TemperatureHotspot ; + uint16_t TemperatureMem ; + uint16_t TemperatureVrGfx ; + uint16_t TemperatureVrMem0 ; + uint16_t TemperatureVrMem1 ; + uint16_t TemperatureVrSoc ; + uint16_t TemperatureLiquid0 ; + uint16_t TemperatureLiquid1 ; + uint16_t TemperaturePlx ; + uint16_t Padding16 ; + uint32_t ThrottlerStatus ; + + uint8_t LinkDpmLevel; + uint8_t Padding8_2; + uint16_t CurrFanSpeed; + + // Padding - ignore + uint32_t MmHubPadding[8]; // SMU internal use +} SmuMetrics_legacy_t; + +typedef struct { + uint16_t CurrClock[PPCLK_COUNT]; + uint16_t AverageGfxclkFrequencyPostDs; + uint16_t AverageSocclkFrequency; + uint16_t AverageUclkFrequencyPostDs; + uint16_t AverageGfxActivity ; + uint16_t AverageUclkActivity ; + uint8_t CurrSocVoltageOffset ; + uint8_t CurrGfxVoltageOffset ; + uint8_t CurrMemVidOffset ; + uint8_t Padding8 ; + uint16_t AverageSocketPower ; + uint16_t TemperatureEdge ; + uint16_t TemperatureHotspot ; + uint16_t TemperatureMem ; + uint16_t TemperatureVrGfx ; + uint16_t TemperatureVrMem0 ; + uint16_t TemperatureVrMem1 ; + uint16_t TemperatureVrSoc ; + uint16_t TemperatureLiquid0 ; + uint16_t TemperatureLiquid1 ; + uint16_t TemperaturePlx ; + uint16_t Padding16 ; + uint32_t ThrottlerStatus ; + + uint8_t LinkDpmLevel; + uint8_t Padding8_2; + uint16_t CurrFanSpeed; + + uint16_t AverageGfxclkFrequencyPreDs; + uint16_t AverageUclkFrequencyPreDs; + uint8_t PcieRate; + uint8_t PcieWidth; + uint8_t Padding8_3[2]; + + // Padding - ignore + uint32_t MmHubPadding[8]; // SMU internal use +} SmuMetrics_t; + +typedef struct { + uint16_t CurrClock[PPCLK_COUNT]; + uint16_t AverageGfxclkFrequency; + uint16_t AverageSocclkFrequency; + uint16_t AverageUclkFrequency ; + uint16_t AverageGfxActivity ; + uint16_t AverageUclkActivity ; + uint8_t CurrSocVoltageOffset ; + uint8_t CurrGfxVoltageOffset ; + uint8_t CurrMemVidOffset ; + uint8_t Padding8 ; + uint16_t AverageSocketPower ; + uint16_t TemperatureEdge ; + uint16_t TemperatureHotspot ; + uint16_t TemperatureMem ; + uint16_t TemperatureVrGfx ; + uint16_t TemperatureVrMem0 ; + uint16_t TemperatureVrMem1 ; + uint16_t TemperatureVrSoc ; + uint16_t TemperatureLiquid0 ; + uint16_t TemperatureLiquid1 ; + uint16_t TemperaturePlx ; + uint16_t Padding16 ; + uint32_t ThrottlerStatus ; + + uint8_t LinkDpmLevel; + uint8_t Padding8_2; + uint16_t CurrFanSpeed; + + uint32_t EnergyAccumulator; + uint16_t AverageVclkFrequency ; + uint16_t AverageDclkFrequency ; + uint16_t VcnActivityPercentage ; + uint16_t padding16_2; + + // Padding - ignore + uint32_t MmHubPadding[8]; // SMU internal use +} SmuMetrics_NV12_legacy_t; + +typedef struct { + uint16_t CurrClock[PPCLK_COUNT]; + uint16_t AverageGfxclkFrequencyPostDs; + uint16_t AverageSocclkFrequency; + uint16_t AverageUclkFrequencyPostDs; + uint16_t AverageGfxActivity ; + uint16_t AverageUclkActivity ; + uint8_t CurrSocVoltageOffset ; + uint8_t CurrGfxVoltageOffset ; + uint8_t CurrMemVidOffset ; + uint8_t Padding8 ; + uint16_t AverageSocketPower ; + uint16_t TemperatureEdge ; + uint16_t TemperatureHotspot ; + uint16_t TemperatureMem ; + uint16_t TemperatureVrGfx ; + uint16_t TemperatureVrMem0 ; + uint16_t TemperatureVrMem1 ; + uint16_t TemperatureVrSoc ; + uint16_t TemperatureLiquid0 ; + uint16_t TemperatureLiquid1 ; + uint16_t TemperaturePlx ; + uint16_t Padding16 ; + uint32_t ThrottlerStatus ; + + uint8_t LinkDpmLevel; + uint8_t Padding8_2; + uint16_t CurrFanSpeed; + + uint16_t AverageVclkFrequency ; + uint16_t AverageDclkFrequency ; + uint16_t VcnActivityPercentage ; + uint16_t AverageGfxclkFrequencyPreDs; + uint16_t AverageUclkFrequencyPreDs; + uint8_t PcieRate; + uint8_t PcieWidth; + + uint32_t Padding32_1; + uint64_t EnergyAccumulator; + + // Padding - ignore + uint32_t MmHubPadding[8]; // SMU internal use +} SmuMetrics_NV12_t; + +typedef union SmuMetrics { + SmuMetrics_legacy_t nv10_legacy_metrics; + SmuMetrics_t nv10_metrics; + SmuMetrics_NV12_legacy_t nv12_legacy_metrics; + SmuMetrics_NV12_t nv12_metrics; +} SmuMetrics_NV1X_t; + +typedef struct { + uint16_t MinClock; // This is either DCEFCLK or SOCCLK (in MHz) + uint16_t MaxClock; // This is either DCEFCLK or SOCCLK (in MHz) + uint16_t MinUclk; + uint16_t MaxUclk; + + uint8_t WmSetting; + uint8_t Padding[3]; + + uint32_t MmHubPadding[8]; // SMU internal use +} WatermarkRowGeneric_t; + +#define NUM_WM_RANGES 4 + +typedef enum { + WM_SOCCLK = 0, + WM_DCEFCLK, + WM_COUNT, +} WM_CLOCK_e; + +typedef struct { + // Watermarks + WatermarkRowGeneric_t WatermarkRow[WM_COUNT][NUM_WM_RANGES]; + + uint32_t MmHubPadding[8]; // SMU internal use +} Watermarks_t; + +typedef struct { + uint16_t avgPsmCount[28]; + uint16_t minPsmCount[28]; + float avgPsmVoltage[28]; + float minPsmVoltage[28]; + + uint32_t MmHubPadding[32]; // SMU internal use +} AvfsDebugTable_t_NV14; + +typedef struct { + uint16_t avgPsmCount[36]; + uint16_t minPsmCount[36]; + float avgPsmVoltage[36]; + float minPsmVoltage[36]; + + uint32_t MmHubPadding[8]; // SMU internal use +} AvfsDebugTable_t_NV10; + +typedef struct { + uint8_t AvfsVersion; + uint8_t Padding; + + uint8_t AvfsEn[AVFS_VOLTAGE_COUNT]; + + uint8_t OverrideVFT[AVFS_VOLTAGE_COUNT]; + uint8_t OverrideAvfsGb[AVFS_VOLTAGE_COUNT]; + + uint8_t OverrideTemperatures[AVFS_VOLTAGE_COUNT]; + uint8_t OverrideVInversion[AVFS_VOLTAGE_COUNT]; + uint8_t OverrideP2V[AVFS_VOLTAGE_COUNT]; + uint8_t OverrideP2VCharzFreq[AVFS_VOLTAGE_COUNT]; + + int32_t VFT0_m1[AVFS_VOLTAGE_COUNT]; // Q8.24 + int32_t VFT0_m2[AVFS_VOLTAGE_COUNT]; // Q12.12 + int32_t VFT0_b[AVFS_VOLTAGE_COUNT]; // Q32 + + int32_t VFT1_m1[AVFS_VOLTAGE_COUNT]; // Q8.16 + int32_t VFT1_m2[AVFS_VOLTAGE_COUNT]; // Q12.12 + int32_t VFT1_b[AVFS_VOLTAGE_COUNT]; // Q32 + + int32_t VFT2_m1[AVFS_VOLTAGE_COUNT]; // Q8.16 + int32_t VFT2_m2[AVFS_VOLTAGE_COUNT]; // Q12.12 + int32_t VFT2_b[AVFS_VOLTAGE_COUNT]; // Q32 + + int32_t AvfsGb0_m1[AVFS_VOLTAGE_COUNT]; // Q8.24 + int32_t AvfsGb0_m2[AVFS_VOLTAGE_COUNT]; // Q12.12 + int32_t AvfsGb0_b[AVFS_VOLTAGE_COUNT]; // Q32 + + int32_t AcBtcGb_m1[AVFS_VOLTAGE_COUNT]; // Q8.24 + int32_t AcBtcGb_m2[AVFS_VOLTAGE_COUNT]; // Q12.12 + int32_t AcBtcGb_b[AVFS_VOLTAGE_COUNT]; // Q32 + + uint32_t AvfsTempCold[AVFS_VOLTAGE_COUNT]; + uint32_t AvfsTempMid[AVFS_VOLTAGE_COUNT]; + uint32_t AvfsTempHot[AVFS_VOLTAGE_COUNT]; + + uint32_t VInversion[AVFS_VOLTAGE_COUNT]; // in mV with 2 fractional bits + + + int32_t P2V_m1[AVFS_VOLTAGE_COUNT]; // Q8.24 + int32_t P2V_m2[AVFS_VOLTAGE_COUNT]; // Q12.12 + int32_t P2V_b[AVFS_VOLTAGE_COUNT]; // Q32 + + uint32_t P2VCharzFreq[AVFS_VOLTAGE_COUNT]; // in 10KHz units + + uint32_t EnabledAvfsModules[2]; //NV10 - 36 AVFS modules + + uint32_t MmHubPadding[8]; // SMU internal use +} AvfsFuseOverride_t; + +typedef struct { + + uint8_t Gfx_ActiveHystLimit; + uint8_t Gfx_IdleHystLimit; + uint8_t Gfx_FPS; + uint8_t Gfx_MinActiveFreqType; + uint8_t Gfx_BoosterFreqType; + uint8_t Gfx_MinFreqStep; // Minimum delta between current and target frequeny in order for FW to change clock. + uint16_t Gfx_MinActiveFreq; // MHz + uint16_t Gfx_BoosterFreq; // MHz + uint16_t Gfx_PD_Data_time_constant; // Time constant of PD controller in ms + uint32_t Gfx_PD_Data_limit_a; // Q16 + uint32_t Gfx_PD_Data_limit_b; // Q16 + uint32_t Gfx_PD_Data_limit_c; // Q16 + uint32_t Gfx_PD_Data_error_coeff; // Q16 + uint32_t Gfx_PD_Data_error_rate_coeff; // Q16 + + uint8_t Soc_ActiveHystLimit; + uint8_t Soc_IdleHystLimit; + uint8_t Soc_FPS; + uint8_t Soc_MinActiveFreqType; + uint8_t Soc_BoosterFreqType; + uint8_t Soc_MinFreqStep; // Minimum delta between current and target frequeny in order for FW to change clock. + uint16_t Soc_MinActiveFreq; // MHz + uint16_t Soc_BoosterFreq; // MHz + uint16_t Soc_PD_Data_time_constant; // Time constant of PD controller in ms + uint32_t Soc_PD_Data_limit_a; // Q16 + uint32_t Soc_PD_Data_limit_b; // Q16 + uint32_t Soc_PD_Data_limit_c; // Q16 + uint32_t Soc_PD_Data_error_coeff; // Q16 + uint32_t Soc_PD_Data_error_rate_coeff; // Q16 + + uint8_t Mem_ActiveHystLimit; + uint8_t Mem_IdleHystLimit; + uint8_t Mem_FPS; + uint8_t Mem_MinActiveFreqType; + uint8_t Mem_BoosterFreqType; + uint8_t Mem_MinFreqStep; // Minimum delta between current and target frequeny in order for FW to change clock. + uint16_t Mem_MinActiveFreq; // MHz + uint16_t Mem_BoosterFreq; // MHz + uint16_t Mem_PD_Data_time_constant; // Time constant of PD controller in ms + uint32_t Mem_PD_Data_limit_a; // Q16 + uint32_t Mem_PD_Data_limit_b; // Q16 + uint32_t Mem_PD_Data_limit_c; // Q16 + uint32_t Mem_PD_Data_error_coeff; // Q16 + uint32_t Mem_PD_Data_error_rate_coeff; // Q16 + + uint32_t Mem_UpThreshold_Limit; // Q16 + uint8_t Mem_UpHystLimit; + uint8_t Mem_DownHystLimit; + uint16_t Mem_Fps; + + uint32_t MmHubPadding[8]; // SMU internal use + +} DpmActivityMonitorCoeffInt_t; + + +// Workload bits +#define WORKLOAD_PPLIB_DEFAULT_BIT 0 +#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 1 +#define WORKLOAD_PPLIB_POWER_SAVING_BIT 2 +#define WORKLOAD_PPLIB_VIDEO_BIT 3 +#define WORKLOAD_PPLIB_VR_BIT 4 +#define WORKLOAD_PPLIB_COMPUTE_BIT 5 +#define WORKLOAD_PPLIB_CUSTOM_BIT 6 +#define WORKLOAD_PPLIB_COUNT 7 + + +// These defines are used with the following messages: +// SMC_MSG_TransferTableDram2Smu +// SMC_MSG_TransferTableSmu2Dram + +// Table transfer status +#define TABLE_TRANSFER_OK 0x0 +#define TABLE_TRANSFER_FAILED 0xFF + +// Table types +#define TABLE_PPTABLE 0 +#define TABLE_WATERMARKS 1 +#define TABLE_AVFS 2 +#define TABLE_AVFS_PSM_DEBUG 3 +#define TABLE_AVFS_FUSE_OVERRIDE 4 +#define TABLE_PMSTATUSLOG 5 +#define TABLE_SMU_METRICS 6 +#define TABLE_DRIVER_SMU_CONFIG 7 +#define TABLE_ACTIVITY_MONITOR_COEFF 8 +#define TABLE_OVERDRIVE 9 +#define TABLE_I2C_COMMANDS 10 +#define TABLE_PACE 11 +#define TABLE_COUNT 12 + +//RLC Pace Table total number of levels +#define RLC_PACE_TABLE_NUM_LEVELS 16 + +typedef struct { + float FlopsPerByteTable[RLC_PACE_TABLE_NUM_LEVELS]; + + uint32_t MmHubPadding[8]; // SMU internal use +} RlcPaceFlopsPerByteOverride_t; + +// These defines are used with the SMC_MSG_SetUclkFastSwitch message. +#define UCLK_SWITCH_SLOW 0 +#define UCLK_SWITCH_FAST 1 +#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_sienna_cichlid.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_sienna_cichlid.h new file mode 100644 index 000000000000..63b8701fd466 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_sienna_cichlid.h @@ -0,0 +1,1691 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __SMU11_DRIVER_IF_SIENNA_CICHLID_H__ +#define __SMU11_DRIVER_IF_SIENNA_CICHLID_H__ + +// *** IMPORTANT *** +// SMU TEAM: Always increment the interface version if +// any structure is changed in this file +#define SMU11_DRIVER_IF_VERSION 0x3B + +#define PPTABLE_Sienna_Cichlid_SMU_VERSION 7 + +#define NUM_GFXCLK_DPM_LEVELS 16 +#define NUM_SMNCLK_DPM_LEVELS 2 +#define NUM_SOCCLK_DPM_LEVELS 8 +#define NUM_MP0CLK_DPM_LEVELS 2 +#define NUM_DCLK_DPM_LEVELS 8 +#define NUM_VCLK_DPM_LEVELS 8 +#define NUM_DCEFCLK_DPM_LEVELS 8 +#define NUM_PHYCLK_DPM_LEVELS 8 +#define NUM_DISPCLK_DPM_LEVELS 8 +#define NUM_PIXCLK_DPM_LEVELS 8 +#define NUM_DTBCLK_DPM_LEVELS 8 +#define NUM_UCLK_DPM_LEVELS 4 +#define NUM_MP1CLK_DPM_LEVELS 2 +#define NUM_LINK_LEVELS 2 +#define NUM_FCLK_DPM_LEVELS 8 +#define NUM_XGMI_LEVELS 2 +#define NUM_XGMI_PSTATE_LEVELS 4 +#define NUM_OD_FAN_MAX_POINTS 6 + +#define MAX_GFXCLK_DPM_LEVEL (NUM_GFXCLK_DPM_LEVELS - 1) +#define MAX_SMNCLK_DPM_LEVEL (NUM_SMNCLK_DPM_LEVELS - 1) +#define MAX_SOCCLK_DPM_LEVEL (NUM_SOCCLK_DPM_LEVELS - 1) +#define MAX_MP0CLK_DPM_LEVEL (NUM_MP0CLK_DPM_LEVELS - 1) +#define MAX_DCLK_DPM_LEVEL (NUM_DCLK_DPM_LEVELS - 1) +#define MAX_VCLK_DPM_LEVEL (NUM_VCLK_DPM_LEVELS - 1) +#define MAX_DCEFCLK_DPM_LEVEL (NUM_DCEFCLK_DPM_LEVELS - 1) +#define MAX_DISPCLK_DPM_LEVEL (NUM_DISPCLK_DPM_LEVELS - 1) +#define MAX_PIXCLK_DPM_LEVEL (NUM_PIXCLK_DPM_LEVELS - 1) +#define MAX_PHYCLK_DPM_LEVEL (NUM_PHYCLK_DPM_LEVELS - 1) +#define MAX_DTBCLK_DPM_LEVEL (NUM_DTBCLK_DPM_LEVELS - 1) +#define MAX_UCLK_DPM_LEVEL (NUM_UCLK_DPM_LEVELS - 1) +#define MAX_MP1CLK_DPM_LEVEL (NUM_MP1CLK_DPM_LEVELS - 1) +#define MAX_LINK_LEVEL (NUM_LINK_LEVELS - 1) +#define MAX_FCLK_DPM_LEVEL (NUM_FCLK_DPM_LEVELS - 1) + +//Gemini Modes +#define PPSMC_GeminiModeNone 0 //Single GPU board +#define PPSMC_GeminiModeMaster 1 //Master GPU on a Gemini board +#define PPSMC_GeminiModeSlave 2 //Slave GPU on a Gemini board + +// Feature Control Defines +// DPM +#define FEATURE_DPM_PREFETCHER_BIT 0 +#define FEATURE_DPM_GFXCLK_BIT 1 +#define FEATURE_DPM_GFX_GPO_BIT 2 +#define FEATURE_DPM_UCLK_BIT 3 +#define FEATURE_DPM_FCLK_BIT 4 +#define FEATURE_DPM_SOCCLK_BIT 5 +#define FEATURE_DPM_MP0CLK_BIT 6 +#define FEATURE_DPM_LINK_BIT 7 +#define FEATURE_DPM_DCEFCLK_BIT 8 +#define FEATURE_DPM_XGMI_BIT 9 +#define FEATURE_MEM_VDDCI_SCALING_BIT 10 +#define FEATURE_MEM_MVDD_SCALING_BIT 11 + +//Idle +#define FEATURE_DS_GFXCLK_BIT 12 +#define FEATURE_DS_SOCCLK_BIT 13 +#define FEATURE_DS_FCLK_BIT 14 +#define FEATURE_DS_LCLK_BIT 15 +#define FEATURE_DS_DCEFCLK_BIT 16 +#define FEATURE_DS_UCLK_BIT 17 +#define FEATURE_GFX_ULV_BIT 18 +#define FEATURE_FW_DSTATE_BIT 19 +#define FEATURE_GFXOFF_BIT 20 +#define FEATURE_BACO_BIT 21 +#define FEATURE_MM_DPM_PG_BIT 22 +#define FEATURE_SPARE_23_BIT 23 +//Throttler/Response +#define FEATURE_PPT_BIT 24 +#define FEATURE_TDC_BIT 25 +#define FEATURE_APCC_PLUS_BIT 26 +#define FEATURE_GTHR_BIT 27 +#define FEATURE_ACDC_BIT 28 +#define FEATURE_VR0HOT_BIT 29 +#define FEATURE_VR1HOT_BIT 30 +#define FEATURE_FW_CTF_BIT 31 +#define FEATURE_FAN_CONTROL_BIT 32 +#define FEATURE_THERMAL_BIT 33 +#define FEATURE_GFX_DCS_BIT 34 +//VF +#define FEATURE_RM_BIT 35 +#define FEATURE_LED_DISPLAY_BIT 36 +//Other +#define FEATURE_GFX_SS_BIT 37 +#define FEATURE_OUT_OF_BAND_MONITOR_BIT 38 +#define FEATURE_TEMP_DEPENDENT_VMIN_BIT 39 + +#define FEATURE_MMHUB_PG_BIT 40 +#define FEATURE_ATHUB_PG_BIT 41 +#define FEATURE_APCC_DFLL_BIT 42 +#define FEATURE_DF_SUPERV_BIT 43 +#define FEATURE_RSMU_SMN_CG_BIT 44 +#define FEATURE_DF_CSTATE_BIT 45 +#define FEATURE_2_STEP_PSTATE_BIT 46 +#define FEATURE_SMNCLK_DPM_BIT 47 +#define FEATURE_PERLINK_GMIDOWN_BIT 48 +#define FEATURE_GFX_EDC_BIT 49 +#define FEATURE_GFX_PER_PART_VMIN_BIT 50 +#define FEATURE_SMART_SHIFT_BIT 51 +#define FEATURE_APT_BIT 52 +#define FEATURE_SPARE_53_BIT 53 +#define FEATURE_SPARE_54_BIT 54 +#define FEATURE_SPARE_55_BIT 55 +#define FEATURE_SPARE_56_BIT 56 +#define FEATURE_SPARE_57_BIT 57 +#define FEATURE_SPARE_58_BIT 58 +#define FEATURE_SPARE_59_BIT 59 +#define FEATURE_SPARE_60_BIT 60 +#define FEATURE_SPARE_61_BIT 61 +#define FEATURE_SPARE_62_BIT 62 +#define FEATURE_SPARE_63_BIT 63 +#define NUM_FEATURES 64 + +//For use with feature control messages +typedef enum { + FEATURE_PWR_ALL, + FEATURE_PWR_S5, + FEATURE_PWR_BACO, + FEATURE_PWR_SOC, + FEATURE_PWR_GFX, + FEATURE_PWR_DOMAIN_COUNT, +} FEATURE_PWR_DOMAIN_e; + + +// Debug Overrides Bitmask +#define DPM_OVERRIDE_DISABLE_FCLK_PID 0x00000001 +#define DPM_OVERRIDE_DISABLE_UCLK_PID 0x00000002 +#define DPM_OVERRIDE_DISABLE_VOLT_LINK_VCN_FCLK 0x00000004 +#define DPM_OVERRIDE_ENABLE_FREQ_LINK_VCLK_FCLK 0x00000008 +#define DPM_OVERRIDE_ENABLE_FREQ_LINK_DCLK_FCLK 0x00000010 +#define DPM_OVERRIDE_ENABLE_FREQ_LINK_GFXCLK_SOCCLK 0x00000020 +#define DPM_OVERRIDE_ENABLE_FREQ_LINK_GFXCLK_UCLK 0x00000040 +#define DPM_OVERRIDE_DISABLE_VOLT_LINK_DCE_FCLK 0x00000080 +#define DPM_OVERRIDE_DISABLE_VOLT_LINK_MP0_SOCCLK 0x00000100 +#define DPM_OVERRIDE_DISABLE_DFLL_PLL_SHUTDOWN 0x00000200 +#define DPM_OVERRIDE_DISABLE_MEMORY_TEMPERATURE_READ 0x00000400 +#define DPM_OVERRIDE_DISABLE_VOLT_LINK_VCN_DCEFCLK 0x00000800 +#define DPM_OVERRIDE_DISABLE_FAST_FCLK_TIMER 0x00001000 +#define DPM_OVERRIDE_DISABLE_VCN_PG 0x00002000 +#define DPM_OVERRIDE_DISABLE_FMAX_VMAX 0x00004000 + +// VR Mapping Bit Defines +#define VR_MAPPING_VR_SELECT_MASK 0x01 +#define VR_MAPPING_VR_SELECT_SHIFT 0x00 + +#define VR_MAPPING_PLANE_SELECT_MASK 0x02 +#define VR_MAPPING_PLANE_SELECT_SHIFT 0x01 + +// PSI Bit Defines +#define PSI_SEL_VR0_PLANE0_PSI0 0x01 +#define PSI_SEL_VR0_PLANE0_PSI1 0x02 +#define PSI_SEL_VR0_PLANE1_PSI0 0x04 +#define PSI_SEL_VR0_PLANE1_PSI1 0x08 +#define PSI_SEL_VR1_PLANE0_PSI0 0x10 +#define PSI_SEL_VR1_PLANE0_PSI1 0x20 +#define PSI_SEL_VR1_PLANE1_PSI0 0x40 +#define PSI_SEL_VR1_PLANE1_PSI1 0x80 + +// Throttler Control/Status Bits +#define THROTTLER_PADDING_BIT 0 +#define THROTTLER_TEMP_EDGE_BIT 1 +#define THROTTLER_TEMP_HOTSPOT_BIT 2 +#define THROTTLER_TEMP_MEM_BIT 3 +#define THROTTLER_TEMP_VR_GFX_BIT 4 +#define THROTTLER_TEMP_VR_MEM0_BIT 5 +#define THROTTLER_TEMP_VR_MEM1_BIT 6 +#define THROTTLER_TEMP_VR_SOC_BIT 7 +#define THROTTLER_TEMP_LIQUID0_BIT 8 +#define THROTTLER_TEMP_LIQUID1_BIT 9 +#define THROTTLER_TEMP_PLX_BIT 10 +#define THROTTLER_TDC_GFX_BIT 11 +#define THROTTLER_TDC_SOC_BIT 12 +#define THROTTLER_PPT0_BIT 13 +#define THROTTLER_PPT1_BIT 14 +#define THROTTLER_PPT2_BIT 15 +#define THROTTLER_PPT3_BIT 16 +#define THROTTLER_FIT_BIT 17 +#define THROTTLER_PPM_BIT 18 +#define THROTTLER_APCC_BIT 19 +#define THROTTLER_COUNT 20 + +// FW DState Features Control Bits +// FW DState Features Control Bits +#define FW_DSTATE_SOC_ULV_BIT 0 +#define FW_DSTATE_G6_HSR_BIT 1 +#define FW_DSTATE_G6_PHY_VDDCI_OFF_BIT 2 +#define FW_DSTATE_MP0_DS_BIT 3 +#define FW_DSTATE_SMN_DS_BIT 4 +#define FW_DSTATE_MP1_DS_BIT 5 +#define FW_DSTATE_MP1_WHISPER_MODE_BIT 6 +#define FW_DSTATE_SOC_LIV_MIN_BIT 7 +#define FW_DSTATE_SOC_PLL_PWRDN_BIT 8 +#define FW_DSTATE_MEM_PLL_PWRDN_BIT 9 +#define FW_DSTATE_OPTIMIZE_MALL_REFRESH_BIT 10 +#define FW_DSTATE_MEM_PSI_BIT 11 +#define FW_DSTATE_HSR_NON_STROBE_BIT 12 +#define FW_DSTATE_MP0_ENTER_WFI_BIT 13 + +#define FW_DSTATE_SOC_ULV_MASK (1 << FW_DSTATE_SOC_ULV_BIT ) +#define FW_DSTATE_G6_HSR_MASK (1 << FW_DSTATE_G6_HSR_BIT ) +#define FW_DSTATE_G6_PHY_VDDCI_OFF_MASK (1 << FW_DSTATE_G6_PHY_VDDCI_OFF_BIT ) +#define FW_DSTATE_MP1_DS_MASK (1 << FW_DSTATE_MP1_DS_BIT ) +#define FW_DSTATE_MP0_DS_MASK (1 << FW_DSTATE_MP0_DS_BIT ) +#define FW_DSTATE_SMN_DS_MASK (1 << FW_DSTATE_SMN_DS_BIT ) +#define FW_DSTATE_MP1_WHISPER_MODE_MASK (1 << FW_DSTATE_MP1_WHISPER_MODE_BIT ) +#define FW_DSTATE_SOC_LIV_MIN_MASK (1 << FW_DSTATE_SOC_LIV_MIN_BIT ) +#define FW_DSTATE_SOC_PLL_PWRDN_MASK (1 << FW_DSTATE_SOC_PLL_PWRDN_BIT ) +#define FW_DSTATE_MEM_PLL_PWRDN_MASK (1 << FW_DSTATE_MEM_PLL_PWRDN_BIT ) +#define FW_DSTATE_OPTIMIZE_MALL_REFRESH_MASK (1 << FW_DSTATE_OPTIMIZE_MALL_REFRESH_BIT ) +#define FW_DSTATE_MEM_PSI_MASK (1 << FW_DSTATE_MEM_PSI_BIT ) +#define FW_DSTATE_HSR_NON_STROBE_MASK (1 << FW_DSTATE_HSR_NON_STROBE_BIT ) +#define FW_DSTATE_MP0_ENTER_WFI_MASK (1 << FW_DSTATE_MP0_ENTER_WFI_BIT ) + +// GFX GPO Feature Contains PACE and DEM sub features +#define GFX_GPO_PACE_BIT 0 +#define GFX_GPO_DEM_BIT 1 + +#define GFX_GPO_PACE_MASK (1 << GFX_GPO_PACE_BIT) +#define GFX_GPO_DEM_MASK (1 << GFX_GPO_DEM_BIT ) + +#define GPO_UPDATE_REQ_UCLKDPM_MASK 0x1 +#define GPO_UPDATE_REQ_FCLKDPM_MASK 0x2 +#define GPO_UPDATE_REQ_MALLHIT_MASK 0x4 + + +//LED Display Mask & Control Bits +#define LED_DISPLAY_GFX_DPM_BIT 0 +#define LED_DISPLAY_PCIE_BIT 1 +#define LED_DISPLAY_ERROR_BIT 2 + +//RLC Pace Table total number of levels +#define RLC_PACE_TABLE_NUM_LEVELS 16 + +typedef enum { + DRAM_BIT_WIDTH_DISABLED = 0, + DRAM_BIT_WIDTH_X_8, + DRAM_BIT_WIDTH_X_16, + DRAM_BIT_WIDTH_X_32, + DRAM_BIT_WIDTH_X_64, // NOT USED. + DRAM_BIT_WIDTH_X_128, + DRAM_BIT_WIDTH_COUNT, +} DRAM_BIT_WIDTH_TYPE_e; + +//I2C Interface +#define NUM_I2C_CONTROLLERS 16 + +#define I2C_CONTROLLER_ENABLED 1 +#define I2C_CONTROLLER_DISABLED 0 + +#define MAX_SW_I2C_COMMANDS 24 + +typedef enum { + I2C_CONTROLLER_PORT_0 = 0, //CKSVII2C0 + I2C_CONTROLLER_PORT_1 = 1, //CKSVII2C1 + I2C_CONTROLLER_PORT_COUNT, +} I2cControllerPort_e; + +typedef enum { + I2C_CONTROLLER_NAME_VR_GFX = 0, + I2C_CONTROLLER_NAME_VR_SOC, + I2C_CONTROLLER_NAME_VR_VDDCI, + I2C_CONTROLLER_NAME_VR_MVDD, + I2C_CONTROLLER_NAME_LIQUID0, + I2C_CONTROLLER_NAME_LIQUID1, + I2C_CONTROLLER_NAME_PLX, + I2C_CONTROLLER_NAME_OTHER, + I2C_CONTROLLER_NAME_COUNT, +} I2cControllerName_e; + +typedef enum { + I2C_CONTROLLER_THROTTLER_TYPE_NONE = 0, + I2C_CONTROLLER_THROTTLER_VR_GFX, + I2C_CONTROLLER_THROTTLER_VR_SOC, + I2C_CONTROLLER_THROTTLER_VR_VDDCI, + I2C_CONTROLLER_THROTTLER_VR_MVDD, + I2C_CONTROLLER_THROTTLER_LIQUID0, + I2C_CONTROLLER_THROTTLER_LIQUID1, + I2C_CONTROLLER_THROTTLER_PLX, + I2C_CONTROLLER_THROTTLER_INA3221, + I2C_CONTROLLER_THROTTLER_COUNT, +} I2cControllerThrottler_e; + +typedef enum { + I2C_CONTROLLER_PROTOCOL_VR_XPDE132G5, + I2C_CONTROLLER_PROTOCOL_VR_IR35217, + I2C_CONTROLLER_PROTOCOL_TMP_TMP102A, + I2C_CONTROLLER_PROTOCOL_INA3221, + I2C_CONTROLLER_PROTOCOL_COUNT, +} I2cControllerProtocol_e; + +typedef struct { + uint8_t Enabled; + uint8_t Speed; + uint8_t SlaveAddress; + uint8_t ControllerPort; + uint8_t ControllerName; + uint8_t ThermalThrotter; + uint8_t I2cProtocol; + uint8_t PaddingConfig; +} I2cControllerConfig_t; + +typedef enum { + I2C_PORT_SVD_SCL = 0, + I2C_PORT_GPIO, +} I2cPort_e; + +typedef enum { + I2C_SPEED_FAST_50K = 0, //50 Kbits/s + I2C_SPEED_FAST_100K, //100 Kbits/s + I2C_SPEED_FAST_400K, //400 Kbits/s + I2C_SPEED_FAST_PLUS_1M, //1 Mbits/s (in fast mode) + I2C_SPEED_HIGH_1M, //1 Mbits/s (in high speed mode) + I2C_SPEED_HIGH_2M, //2.3 Mbits/s + I2C_SPEED_COUNT, +} I2cSpeed_e; + +typedef enum { + I2C_CMD_READ = 0, + I2C_CMD_WRITE, + I2C_CMD_COUNT, +} I2cCmdType_e; + +typedef enum { + FAN_MODE_AUTO = 0, + FAN_MODE_MANUAL_LINEAR, +} FanMode_e; + +#define CMDCONFIG_STOP_BIT 0 +#define CMDCONFIG_RESTART_BIT 1 +#define CMDCONFIG_READWRITE_BIT 2 //bit should be 0 for read, 1 for write + +#define CMDCONFIG_STOP_MASK (1 << CMDCONFIG_STOP_BIT) +#define CMDCONFIG_RESTART_MASK (1 << CMDCONFIG_RESTART_BIT) +#define CMDCONFIG_READWRITE_MASK (1 << CMDCONFIG_READWRITE_BIT) + +typedef struct { + uint8_t ReadWriteData; //Return data for read. Data to send for write + uint8_t CmdConfig; //Includes whether associated command should have a stop or restart command, and is a read or write +} SwI2cCmd_t; //SW I2C Command Table + +typedef struct { + uint8_t I2CcontrollerPort; //CKSVII2C0(0) or //CKSVII2C1(1) + uint8_t I2CSpeed; //Use I2cSpeed_e to indicate speed to select + uint8_t SlaveAddress; //Slave address of device + uint8_t NumCmds; //Number of commands + + SwI2cCmd_t SwI2cCmds[MAX_SW_I2C_COMMANDS]; +} SwI2cRequest_t; // SW I2C Request Table + +typedef struct { + SwI2cRequest_t SwI2cRequest; + + uint32_t Spare[8]; + uint32_t MmHubPadding[8]; // SMU internal use +} SwI2cRequestExternal_t; + +//D3HOT sequences +typedef enum { + BACO_SEQUENCE, + MSR_SEQUENCE, + BAMACO_SEQUENCE, + ULPS_SEQUENCE, + D3HOT_SEQUENCE_COUNT, +} D3HOTSequence_e; + +//THis is aligned with RSMU PGFSM Register Mapping +typedef enum { + PG_DYNAMIC_MODE = 0, + PG_STATIC_MODE, +} PowerGatingMode_e; + +//This is aligned with RSMU PGFSM Register Mapping +typedef enum { + PG_POWER_DOWN = 0, + PG_POWER_UP, +} PowerGatingSettings_e; + +typedef struct { + uint32_t a; // store in IEEE float format in this variable + uint32_t b; // store in IEEE float format in this variable + uint32_t c; // store in IEEE float format in this variable +} QuadraticInt_t; + +typedef struct { + uint32_t a; // store in fixed point, [31:20] signed integer, [19:0] fractional bits + uint32_t b; // store in fixed point, [31:20] signed integer, [19:0] fractional bits + uint32_t c; // store in fixed point, [31:20] signed integer, [19:0] fractional bits +} QuadraticFixedPoint_t; + +typedef struct { + uint32_t m; // store in IEEE float format in this variable + uint32_t b; // store in IEEE float format in this variable +} LinearInt_t; + +typedef struct { + uint32_t a; // store in IEEE float format in this variable + uint32_t b; // store in IEEE float format in this variable + uint32_t c; // store in IEEE float format in this variable +} DroopInt_t; + +//Piecewise linear droop model, Sienna_Cichlid currently used only for GFX DFLL +#define NUM_PIECE_WISE_LINEAR_DROOP_MODEL_VF_POINTS 5 +typedef enum { + PIECEWISE_LINEAR_FUSED_MODEL = 0, + PIECEWISE_LINEAR_PP_MODEL, + QUADRATIC_PP_MODEL, + PERPART_PIECEWISE_LINEAR_PP_MODEL, +} DfllDroopModelSelect_e; + +typedef struct { + uint32_t Fset[NUM_PIECE_WISE_LINEAR_DROOP_MODEL_VF_POINTS]; //in GHz, store in IEEE float format + uint32_t Vdroop[NUM_PIECE_WISE_LINEAR_DROOP_MODEL_VF_POINTS]; //in V , store in IEEE float format +}PiecewiseLinearDroopInt_t; + +typedef enum { + GFXCLK_SOURCE_PLL = 0, + GFXCLK_SOURCE_DFLL, + GFXCLK_SOURCE_COUNT, +} GFXCLK_SOURCE_e; + +//Only Clks that have DPM descriptors are listed here +typedef enum { + PPCLK_GFXCLK = 0, + PPCLK_SOCCLK, + PPCLK_UCLK, + PPCLK_FCLK, + PPCLK_DCLK_0, + PPCLK_VCLK_0, + PPCLK_DCLK_1, + PPCLK_VCLK_1, + PPCLK_DCEFCLK, + PPCLK_DISPCLK, + PPCLK_PIXCLK, + PPCLK_PHYCLK, + PPCLK_DTBCLK, + PPCLK_COUNT, +} PPCLK_e; + +typedef enum { + VOLTAGE_MODE_AVFS = 0, + VOLTAGE_MODE_AVFS_SS, + VOLTAGE_MODE_SS, + VOLTAGE_MODE_COUNT, +} VOLTAGE_MODE_e; + + +typedef enum { + AVFS_VOLTAGE_GFX = 0, + AVFS_VOLTAGE_SOC, + AVFS_VOLTAGE_COUNT, +} AVFS_VOLTAGE_TYPE_e; + +typedef enum { + UCLK_DIV_BY_1 = 0, + UCLK_DIV_BY_2, + UCLK_DIV_BY_4, + UCLK_DIV_BY_8, +} UCLK_DIV_e; + +typedef enum { + GPIO_INT_POLARITY_ACTIVE_LOW = 0, + GPIO_INT_POLARITY_ACTIVE_HIGH, +} GpioIntPolarity_e; + +typedef enum { + PWR_CONFIG_TDP = 0, + PWR_CONFIG_TGP, + PWR_CONFIG_TCP_ESTIMATED, + PWR_CONFIG_TCP_MEASURED, +} PwrConfig_e; + +typedef enum { + XGMI_LINK_RATE_2 = 2, // 2Gbps + XGMI_LINK_RATE_4 = 4, // 4Gbps + XGMI_LINK_RATE_8 = 8, // 8Gbps + XGMI_LINK_RATE_12 = 12, // 12Gbps + XGMI_LINK_RATE_16 = 16, // 16Gbps + XGMI_LINK_RATE_17 = 17, // 17Gbps + XGMI_LINK_RATE_18 = 18, // 18Gbps + XGMI_LINK_RATE_19 = 19, // 19Gbps + XGMI_LINK_RATE_20 = 20, // 20Gbps + XGMI_LINK_RATE_21 = 21, // 21Gbps + XGMI_LINK_RATE_22 = 22, // 22Gbps + XGMI_LINK_RATE_23 = 23, // 23Gbps + XGMI_LINK_RATE_24 = 24, // 24Gbps + XGMI_LINK_RATE_25 = 25, // 25Gbps + XGMI_LINK_RATE_COUNT +} XGMI_LINK_RATE_e; + +typedef enum { + XGMI_LINK_WIDTH_1 = 0, // x1 + XGMI_LINK_WIDTH_2, // x2 + XGMI_LINK_WIDTH_4, // x4 + XGMI_LINK_WIDTH_8, // x8 + XGMI_LINK_WIDTH_9, // x9 + XGMI_LINK_WIDTH_16, // x16 + XGMI_LINK_WIDTH_COUNT +} XGMI_LINK_WIDTH_e; + +typedef struct { + uint8_t VoltageMode; // 0 - AVFS only, 1- min(AVFS,SS), 2-SS only + uint8_t SnapToDiscrete; // 0 - Fine grained DPM, 1 - Discrete DPM + uint8_t NumDiscreteLevels; // Set to 2 (Fmin, Fmax) when using fine grained DPM, otherwise set to # discrete levels used + uint8_t Padding; + LinearInt_t ConversionToAvfsClk; // Transfer function to AVFS Clock (GHz->GHz) + QuadraticInt_t SsCurve; // Slow-slow curve (GHz->V) + uint16_t SsFmin; // Fmin for SS curve. If SS curve is selected, will use V@SSFmin for F <= Fmin + uint16_t Padding16; +} DpmDescriptor_t; + +typedef enum { + PPT_THROTTLER_PPT0, + PPT_THROTTLER_PPT1, + PPT_THROTTLER_PPT2, + PPT_THROTTLER_PPT3, + PPT_THROTTLER_COUNT +} PPT_THROTTLER_e; + +typedef enum { + TEMP_EDGE, + TEMP_HOTSPOT, + TEMP_MEM, + TEMP_VR_GFX, + TEMP_VR_MEM0, + TEMP_VR_MEM1, + TEMP_VR_SOC, + TEMP_LIQUID0, + TEMP_LIQUID1, + TEMP_PLX, + TEMP_COUNT, +} TEMP_e; + +typedef enum { + TDC_THROTTLER_GFX, + TDC_THROTTLER_SOC, + TDC_THROTTLER_COUNT +} TDC_THROTTLER_e; + +typedef enum { + CUSTOMER_VARIANT_ROW, + CUSTOMER_VARIANT_FALCON, + CUSTOMER_VARIANT_COUNT, +} CUSTOMER_VARIANT_e; + +// Used for 2-step UCLK DPM change workaround +typedef struct { + uint16_t Fmin; + uint16_t Fmax; +} UclkDpmChangeRange_t; + +typedef struct { + // MAJOR SECTION: SKU PARAMETERS + + uint32_t Version; + + // SECTION: Feature Enablement + uint32_t FeaturesToRun[NUM_FEATURES / 32]; + + // SECTION: Infrastructure Limits + uint16_t SocketPowerLimitAc[PPT_THROTTLER_COUNT]; // Watts + uint16_t SocketPowerLimitAcTau[PPT_THROTTLER_COUNT]; // Time constant of LPF in ms + uint16_t SocketPowerLimitDc[PPT_THROTTLER_COUNT]; // Watts + uint16_t SocketPowerLimitDcTau[PPT_THROTTLER_COUNT]; // Time constant of LPF in ms + + uint16_t TdcLimit[TDC_THROTTLER_COUNT]; // Amps + uint16_t TdcLimitTau[TDC_THROTTLER_COUNT]; // Time constant of LPF in ms + + uint16_t TemperatureLimit[TEMP_COUNT]; // Celcius + + uint32_t FitLimit; // Failures in time (failures per million parts over the defined lifetime) + + // SECTION: Power Configuration + uint8_t TotalPowerConfig; //0-TDP, 1-TGP, 2-TCP Estimated, 3-TCP Measured. Use defines from PwrConfig_e + uint8_t TotalPowerPadding[3]; + + // SECTION: APCC Settings + uint32_t ApccPlusResidencyLimit; + + //SECTION: SMNCLK DPM + uint16_t SmnclkDpmFreq [NUM_SMNCLK_DPM_LEVELS]; // in MHz + uint16_t SmnclkDpmVoltage [NUM_SMNCLK_DPM_LEVELS]; // mV(Q2) + + uint32_t PaddingAPCC; + uint16_t PerPartDroopVsetGfxDfll[NUM_PIECE_WISE_LINEAR_DROOP_MODEL_VF_POINTS]; //In mV(Q2) + uint16_t PaddingPerPartDroop; + + // SECTION: Throttler settings + uint32_t ThrottlerControlMask; // See Throtter masks defines + + // SECTION: FW DSTATE Settings + uint32_t FwDStateMask; // See FW DState masks defines + + // SECTION: ULV Settings + uint16_t UlvVoltageOffsetSoc; // In mV(Q2) + uint16_t UlvVoltageOffsetGfx; // In mV(Q2) + + uint16_t MinVoltageUlvGfx; // In mV(Q2) Minimum Voltage ("Vmin") of VDD_GFX in ULV mode + uint16_t MinVoltageUlvSoc; // In mV(Q2) Minimum Voltage ("Vmin") of VDD_SOC in ULV mode + + uint16_t SocLIVmin; // In mV(Q2) Long Idle Vmin (deep ULV), for VDD_SOC + uint16_t PaddingLIVmin; + + uint8_t GceaLinkMgrIdleThreshold; //Set by SMU FW during enablment of GFXOFF. Controls delay for GFX SDP port disconnection during idle events + uint8_t paddingRlcUlvParams[3]; + + // SECTION: Voltage Control Parameters + uint16_t MinVoltageGfx; // In mV(Q2) Minimum Voltage ("Vmin") of VDD_GFX + uint16_t MinVoltageSoc; // In mV(Q2) Minimum Voltage ("Vmin") of VDD_SOC + uint16_t MaxVoltageGfx; // In mV(Q2) Maximum Voltage allowable of VDD_GFX + uint16_t MaxVoltageSoc; // In mV(Q2) Maximum Voltage allowable of VDD_SOC + + uint16_t LoadLineResistanceGfx; // In mOhms with 8 fractional bits + uint16_t LoadLineResistanceSoc; // In mOhms with 8 fractional bits + + // SECTION: Temperature Dependent Vmin + uint16_t VDDGFX_TVmin; //Celcius + uint16_t VDDSOC_TVmin; //Celcius + uint16_t VDDGFX_Vmin_HiTemp; // mV Q2 + uint16_t VDDGFX_Vmin_LoTemp; // mV Q2 + uint16_t VDDSOC_Vmin_HiTemp; // mV Q2 + uint16_t VDDSOC_Vmin_LoTemp; // mV Q2 + + uint16_t VDDGFX_TVminHystersis; // Celcius + uint16_t VDDSOC_TVminHystersis; // Celcius + + //SECTION: DPM Config 1 + DpmDescriptor_t DpmDescriptor[PPCLK_COUNT]; + + uint16_t FreqTableGfx [NUM_GFXCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableVclk [NUM_VCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDclk [NUM_DCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableSocclk [NUM_SOCCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableUclk [NUM_UCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDcefclk [NUM_DCEFCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDispclk [NUM_DISPCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTablePixclk [NUM_PIXCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTablePhyclk [NUM_PHYCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDtbclk [NUM_DTBCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableFclk [NUM_FCLK_DPM_LEVELS ]; // In MHz + uint32_t Paddingclks; + + DroopInt_t PerPartDroopModelGfxDfll[NUM_PIECE_WISE_LINEAR_DROOP_MODEL_VF_POINTS]; //GHz ->Vstore in IEEE float format + + uint32_t DcModeMaxFreq [PPCLK_COUNT ]; // In MHz + + uint8_t FreqTableUclkDiv [NUM_UCLK_DPM_LEVELS ]; // 0:Div-1, 1:Div-1/2, 2:Div-1/4, 3:Div-1/8 + + // Used for MALL performance boost + uint16_t FclkBoostFreq; // In Mhz + uint16_t FclkParamPadding; + + // SECTION: DPM Config 2 + uint16_t Mp0clkFreq [NUM_MP0CLK_DPM_LEVELS]; // in MHz + uint16_t Mp0DpmVoltage [NUM_MP0CLK_DPM_LEVELS]; // mV(Q2) + uint16_t MemVddciVoltage [NUM_UCLK_DPM_LEVELS]; // mV(Q2) + uint16_t MemMvddVoltage [NUM_UCLK_DPM_LEVELS]; // mV(Q2) + // GFXCLK DPM + uint16_t GfxclkFgfxoffEntry; // in Mhz + uint16_t GfxclkFinit; // in Mhz + uint16_t GfxclkFidle; // in MHz + uint8_t GfxclkSource; // 0 = PLL, 1 = DFLL + uint8_t GfxclkPadding; + + // GFX GPO + uint8_t GfxGpoSubFeatureMask; // bit 0 = PACE, bit 1 = DEM + uint8_t GfxGpoEnabledWorkPolicyMask; //Any policy that GPO can be enabled + uint8_t GfxGpoDisabledWorkPolicyMask; //Any policy that GPO can be disabled + uint8_t GfxGpoPadding[1]; + uint32_t GfxGpoVotingAllow; //For indicating which feature changes should result in a GPO table recalculation + + uint32_t GfxGpoPadding32[4]; + + uint16_t GfxDcsFopt; // Optimal GFXCLK for DCS in Mhz + uint16_t GfxDcsFclkFopt; // Optimal FCLK for DCS in Mhz + uint16_t GfxDcsUclkFopt; // Optimal UCLK for DCS in Mhz + + uint16_t DcsGfxOffVoltage; //Voltage in mV(Q2) applied to VDDGFX when entering DCS GFXOFF phase + + uint16_t DcsMinGfxOffTime; //Minimum amount of time PMFW shuts GFX OFF as part of GFX DCS phase + uint16_t DcsMaxGfxOffTime; //Maximum amount of time PMFW can shut GFX OFF as part of GFX DCS phase at a stretch. + + uint32_t DcsMinCreditAccum; //Min amount of positive credit accumulation before waking GFX up as part of DCS. + + uint16_t DcsExitHysteresis; //The min amount of time power credit accumulator should have a value > 0 before SMU exits the DCS throttling phase. + uint16_t DcsTimeout; //This is the amount of time SMU FW waits for RLC to put GFX into GFXOFF before reverting to the fallback mechanism of throttling GFXCLK to Fmin. + + uint32_t DcsParamPadding[5]; + + uint16_t FlopsPerByteTable[RLC_PACE_TABLE_NUM_LEVELS]; // Q8.8 + + // UCLK section + uint8_t LowestUclkReservedForUlv; // Set this to 1 if UCLK DPM0 is reserved for ULV-mode only + uint8_t PaddingMem[3]; + + uint8_t UclkDpmPstates [NUM_UCLK_DPM_LEVELS]; // 4 DPM states, 0-P0, 1-P1, 2-P2, 3-P3. + + // Used for 2-Step UCLK change workaround + UclkDpmChangeRange_t UclkDpmSrcFreqRange; // In Mhz + UclkDpmChangeRange_t UclkDpmTargFreqRange; // In Mhz + uint16_t UclkDpmMidstepFreq; // In Mhz + uint16_t UclkMidstepPadding; + + // Link DPM Settings + uint8_t PcieGenSpeed[NUM_LINK_LEVELS]; ///< 0:PciE-gen1 1:PciE-gen2 2:PciE-gen3 3:PciE-gen4 + uint8_t PcieLaneCount[NUM_LINK_LEVELS]; ///< 1=x1, 2=x2, 3=x4, 4=x8, 5=x12, 6=x16 + uint16_t LclkFreq[NUM_LINK_LEVELS]; + + // SECTION: Fan Control + uint16_t FanStopTemp; //Celcius + uint16_t FanStartTemp; //Celcius + + uint16_t FanGain[TEMP_COUNT]; + + uint16_t FanPwmMin; + uint16_t FanAcousticLimitRpm; + uint16_t FanThrottlingRpm; + uint16_t FanMaximumRpm; + uint16_t MGpuFanBoostLimitRpm; + uint16_t FanTargetTemperature; + uint16_t FanTargetGfxclk; + uint16_t FanPadding16; + uint8_t FanTempInputSelect; + uint8_t FanPadding; + uint8_t FanZeroRpmEnable; + uint8_t FanTachEdgePerRev; + + // The following are AFC override parameters. Leave at 0 to use FW defaults. + int16_t FuzzyFan_ErrorSetDelta; + int16_t FuzzyFan_ErrorRateSetDelta; + int16_t FuzzyFan_PwmSetDelta; + uint16_t FuzzyFan_Reserved; + + // SECTION: AVFS + // Overrides + uint8_t OverrideAvfsGb[AVFS_VOLTAGE_COUNT]; + uint8_t dBtcGbGfxDfllModelSelect; //0 -> fused piece-wise model, 1 -> piece-wise linear(PPTable), 2 -> quadratic model(PPTable) + uint8_t Padding8_Avfs; + + QuadraticInt_t qAvfsGb[AVFS_VOLTAGE_COUNT]; // GHz->V Override of fused curve + DroopInt_t dBtcGbGfxPll; // GHz->V BtcGb + DroopInt_t dBtcGbGfxDfll; // GHz->V BtcGb + DroopInt_t dBtcGbSoc; // GHz->V BtcGb + LinearInt_t qAgingGb[AVFS_VOLTAGE_COUNT]; // GHz->V + + PiecewiseLinearDroopInt_t PiecewiseLinearDroopIntGfxDfll; //GHz ->Vstore in IEEE float format + + QuadraticInt_t qStaticVoltageOffset[AVFS_VOLTAGE_COUNT]; // GHz->V + + uint16_t DcTol[AVFS_VOLTAGE_COUNT]; // mV Q2 + + uint8_t DcBtcEnabled[AVFS_VOLTAGE_COUNT]; + uint8_t Padding8_GfxBtc[2]; + + uint16_t DcBtcMin[AVFS_VOLTAGE_COUNT]; // mV Q2 + uint16_t DcBtcMax[AVFS_VOLTAGE_COUNT]; // mV Q2 + + uint16_t DcBtcGb[AVFS_VOLTAGE_COUNT]; // mV Q2 + + // SECTION: XGMI + uint8_t XgmiDpmPstates[NUM_XGMI_LEVELS]; // 2 DPM states, high and low. 0-P0, 1-P1, 2-P2, 3-P3. + uint8_t XgmiDpmSpare[2]; + + // SECTION: Advanced Options + uint32_t DebugOverrides; + QuadraticInt_t ReservedEquation0; + QuadraticInt_t ReservedEquation1; + QuadraticInt_t ReservedEquation2; + QuadraticInt_t ReservedEquation3; + + // SECTION: Sku Reserved + uint8_t CustomerVariant; + + //VC BTC parameters are only applicable to VDD_GFX domain + uint8_t VcBtcEnabled; + uint16_t VcBtcVminT0; // T0_VMIN + uint16_t VcBtcFixedVminAgingOffset; // FIXED_VMIN_AGING_OFFSET + uint16_t VcBtcVmin2PsmDegrationGb; // VMIN_TO_PSM_DEGRADATION_GB + uint32_t VcBtcPsmA; // A_PSM + uint32_t VcBtcPsmB; // B_PSM + uint32_t VcBtcVminA; // A_VMIN + uint32_t VcBtcVminB; // B_VMIN + + //GPIO Board feature + uint16_t LedGpio; //GeneriA GPIO flag used to control the radeon LEDs + uint16_t GfxPowerStagesGpio; //Genlk_vsync GPIO flag used to control gfx power stages + + uint32_t SkuReserved[8]; + + + // MAJOR SECTION: BOARD PARAMETERS + + //SECTION: Gaming Clocks + uint32_t GamingClk[6]; + + // SECTION: I2C Control + I2cControllerConfig_t I2cControllers[NUM_I2C_CONTROLLERS]; + + uint8_t GpioScl; // GPIO Number for SCL Line, used only for CKSVII2C1 + uint8_t GpioSda; // GPIO Number for SDA Line, used only for CKSVII2C1 + uint8_t FchUsbPdSlaveAddr; //For requesting USB PD controller S-states via FCH I2C when entering PME turn off + uint8_t I2cSpare[1]; + + // SECTION: SVI2 Board Parameters + uint8_t VddGfxVrMapping; // Use VR_MAPPING* bitfields + uint8_t VddSocVrMapping; // Use VR_MAPPING* bitfields + uint8_t VddMem0VrMapping; // Use VR_MAPPING* bitfields + uint8_t VddMem1VrMapping; // Use VR_MAPPING* bitfields + + uint8_t GfxUlvPhaseSheddingMask; // set this to 1 to set PSI0/1 to 1 in ULV mode + uint8_t SocUlvPhaseSheddingMask; // set this to 1 to set PSI0/1 to 1 in ULV mode + uint8_t VddciUlvPhaseSheddingMask; // set this to 1 to set PSI0/1 to 1 in ULV mode + uint8_t MvddUlvPhaseSheddingMask; // set this to 1 to set PSI0/1 to 1 in ULV mode + + // SECTION: Telemetry Settings + uint16_t GfxMaxCurrent; // in Amps + int8_t GfxOffset; // in Amps + uint8_t Padding_TelemetryGfx; + + uint16_t SocMaxCurrent; // in Amps + int8_t SocOffset; // in Amps + uint8_t Padding_TelemetrySoc; + + uint16_t Mem0MaxCurrent; // in Amps + int8_t Mem0Offset; // in Amps + uint8_t Padding_TelemetryMem0; + + uint16_t Mem1MaxCurrent; // in Amps + int8_t Mem1Offset; // in Amps + uint8_t Padding_TelemetryMem1; + + uint32_t MvddRatio; // This is used for MVDD Svi2 Div Ratio workaround. It has 16 fractional bits (Q16.16) + + // SECTION: GPIO Settings + uint8_t AcDcGpio; // GPIO pin configured for AC/DC switching + uint8_t AcDcPolarity; // GPIO polarity for AC/DC switching + uint8_t VR0HotGpio; // GPIO pin configured for VR0 HOT event + uint8_t VR0HotPolarity; // GPIO polarity for VR0 HOT event + + uint8_t VR1HotGpio; // GPIO pin configured for VR1 HOT event + uint8_t VR1HotPolarity; // GPIO polarity for VR1 HOT event + uint8_t GthrGpio; // GPIO pin configured for GTHR Event + uint8_t GthrPolarity; // replace GPIO polarity for GTHR + + // LED Display Settings + uint8_t LedPin0; // GPIO number for LedPin[0] + uint8_t LedPin1; // GPIO number for LedPin[1] + uint8_t LedPin2; // GPIO number for LedPin[2] + uint8_t LedEnableMask; + + uint8_t LedPcie; // GPIO number for PCIE results + uint8_t LedError; // GPIO number for Error Cases + uint8_t LedSpare1[2]; + + // SECTION: Clock Spread Spectrum + + // GFXCLK PLL Spread Spectrum + uint8_t PllGfxclkSpreadEnabled; // on or off + uint8_t PllGfxclkSpreadPercent; // Q4.4 + uint16_t PllGfxclkSpreadFreq; // kHz + + // GFXCLK DFLL Spread Spectrum + uint8_t DfllGfxclkSpreadEnabled; // on or off + uint8_t DfllGfxclkSpreadPercent; // Q4.4 + uint16_t DfllGfxclkSpreadFreq; // kHz + + // UCLK Spread Spectrum + uint16_t UclkSpreadPadding; + uint16_t UclkSpreadFreq; // kHz + + // FCLK Spread Spectrum + uint8_t FclkSpreadEnabled; // on or off + uint8_t FclkSpreadPercent; // Q4.4 + uint16_t FclkSpreadFreq; // kHz + + // Section: Memory Config + uint32_t MemoryChannelEnabled; // For DRAM use only, Max 32 channels enabled bit mask. + + uint8_t DramBitWidth; // For DRAM use only. See Dram Bit width type defines + uint8_t PaddingMem1[3]; + + // Section: Total Board Power + uint16_t TotalBoardPower; //Only needed for TCP Estimated case, where TCP = TGP+Total Board Power + uint16_t BoardPowerPadding; + + // SECTION: XGMI Training + uint8_t XgmiLinkSpeed [NUM_XGMI_PSTATE_LEVELS]; + uint8_t XgmiLinkWidth [NUM_XGMI_PSTATE_LEVELS]; + + uint16_t XgmiFclkFreq [NUM_XGMI_PSTATE_LEVELS]; + uint16_t XgmiSocVoltage [NUM_XGMI_PSTATE_LEVELS]; + + // SECTION: UMC feature flags + uint8_t HsrEnabled; + uint8_t VddqOffEnabled; + uint8_t PaddingUmcFlags[2]; + + // UCLK Spread Spectrum + uint8_t UclkSpreadPercent[16]; + + // SECTION: Board Reserved + uint32_t BoardReserved[11]; + + // SECTION: Structure Padding + + // Padding for MMHUB - do not modify this + uint32_t MmHubPadding[8]; // SMU internal use + +} PPTable_t; + +typedef struct { + // MAJOR SECTION: SKU PARAMETERS + + uint32_t Version; + + // SECTION: Feature Enablement + uint32_t FeaturesToRun[NUM_FEATURES / 32]; + + // SECTION: Infrastructure Limits + uint16_t SocketPowerLimitAc[PPT_THROTTLER_COUNT]; // Watts + uint16_t SocketPowerLimitAcTau[PPT_THROTTLER_COUNT]; // Time constant of LPF in ms + uint16_t SocketPowerLimitDc[PPT_THROTTLER_COUNT]; // Watts + uint16_t SocketPowerLimitDcTau[PPT_THROTTLER_COUNT]; // Time constant of LPF in ms + + uint16_t TdcLimit[TDC_THROTTLER_COUNT]; // Amps + uint16_t TdcLimitTau[TDC_THROTTLER_COUNT]; // Time constant of LPF in ms + + uint16_t TemperatureLimit[TEMP_COUNT]; // Celcius + + uint32_t FitLimit; // Failures in time (failures per million parts over the defined lifetime) + + // SECTION: Power Configuration + uint8_t TotalPowerConfig; //0-TDP, 1-TGP, 2-TCP Estimated, 3-TCP Measured. Use defines from PwrConfig_e + uint8_t TotalPowerPadding[3]; + + // SECTION: APCC Settings + uint32_t ApccPlusResidencyLimit; + + //SECTION: SMNCLK DPM + uint16_t SmnclkDpmFreq [NUM_SMNCLK_DPM_LEVELS]; // in MHz + uint16_t SmnclkDpmVoltage [NUM_SMNCLK_DPM_LEVELS]; // mV(Q2) + + uint32_t PaddingAPCC; + uint16_t PerPartDroopVsetGfxDfll[NUM_PIECE_WISE_LINEAR_DROOP_MODEL_VF_POINTS]; //In mV(Q2) + uint16_t PaddingPerPartDroop; + + // SECTION: Throttler settings + uint32_t ThrottlerControlMask; // See Throtter masks defines + + // SECTION: FW DSTATE Settings + uint32_t FwDStateMask; // See FW DState masks defines + + // SECTION: ULV Settings + uint16_t UlvVoltageOffsetSoc; // In mV(Q2) + uint16_t UlvVoltageOffsetGfx; // In mV(Q2) + + uint16_t MinVoltageUlvGfx; // In mV(Q2) Minimum Voltage ("Vmin") of VDD_GFX in ULV mode + uint16_t MinVoltageUlvSoc; // In mV(Q2) Minimum Voltage ("Vmin") of VDD_SOC in ULV mode + + uint16_t SocLIVmin; + uint16_t SocLIVminoffset; + + uint8_t GceaLinkMgrIdleThreshold; //Set by SMU FW during enablment of GFXOFF. Controls delay for GFX SDP port disconnection during idle events + uint8_t paddingRlcUlvParams[3]; + + // SECTION: Voltage Control Parameters + uint16_t MinVoltageGfx; // In mV(Q2) Minimum Voltage ("Vmin") of VDD_GFX + uint16_t MinVoltageSoc; // In mV(Q2) Minimum Voltage ("Vmin") of VDD_SOC + uint16_t MaxVoltageGfx; // In mV(Q2) Maximum Voltage allowable of VDD_GFX + uint16_t MaxVoltageSoc; // In mV(Q2) Maximum Voltage allowable of VDD_SOC + + uint16_t LoadLineResistanceGfx; // In mOhms with 8 fractional bits + uint16_t LoadLineResistanceSoc; // In mOhms with 8 fractional bits + + // SECTION: Temperature Dependent Vmin + uint16_t VDDGFX_TVmin; //Celcius + uint16_t VDDSOC_TVmin; //Celcius + uint16_t VDDGFX_Vmin_HiTemp; // mV Q2 + uint16_t VDDGFX_Vmin_LoTemp; // mV Q2 + uint16_t VDDSOC_Vmin_HiTemp; // mV Q2 + uint16_t VDDSOC_Vmin_LoTemp; // mV Q2 + + uint16_t VDDGFX_TVminHystersis; // Celcius + uint16_t VDDSOC_TVminHystersis; // Celcius + + //SECTION: DPM Config 1 + DpmDescriptor_t DpmDescriptor[PPCLK_COUNT]; + + uint16_t FreqTableGfx [NUM_GFXCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableVclk [NUM_VCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDclk [NUM_DCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableSocclk [NUM_SOCCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableUclk [NUM_UCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDcefclk [NUM_DCEFCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDispclk [NUM_DISPCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTablePixclk [NUM_PIXCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTablePhyclk [NUM_PHYCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableDtbclk [NUM_DTBCLK_DPM_LEVELS ]; // In MHz + uint16_t FreqTableFclk [NUM_FCLK_DPM_LEVELS ]; // In MHz + uint32_t Paddingclks; + + DroopInt_t PerPartDroopModelGfxDfll[NUM_PIECE_WISE_LINEAR_DROOP_MODEL_VF_POINTS]; //GHz ->Vstore in IEEE float format + + uint32_t DcModeMaxFreq [PPCLK_COUNT ]; // In MHz + + uint8_t FreqTableUclkDiv [NUM_UCLK_DPM_LEVELS ]; // 0:Div-1, 1:Div-1/2, 2:Div-1/4, 3:Div-1/8 + + // Used for MALL performance boost + uint16_t FclkBoostFreq; // In Mhz + uint16_t FclkParamPadding; + + // SECTION: DPM Config 2 + uint16_t Mp0clkFreq [NUM_MP0CLK_DPM_LEVELS]; // in MHz + uint16_t Mp0DpmVoltage [NUM_MP0CLK_DPM_LEVELS]; // mV(Q2) + uint16_t MemVddciVoltage [NUM_UCLK_DPM_LEVELS]; // mV(Q2) + uint16_t MemMvddVoltage [NUM_UCLK_DPM_LEVELS]; // mV(Q2) + // GFXCLK DPM + uint16_t GfxclkFgfxoffEntry; // in Mhz + uint16_t GfxclkFinit; // in Mhz + uint16_t GfxclkFidle; // in MHz + uint8_t GfxclkSource; // 0 = PLL, 1 = DFLL + uint8_t GfxclkPadding; + + // GFX GPO + uint8_t GfxGpoSubFeatureMask; // bit 0 = PACE, bit 1 = DEM + uint8_t GfxGpoEnabledWorkPolicyMask; //Any policy that GPO can be enabled + uint8_t GfxGpoDisabledWorkPolicyMask; //Any policy that GPO can be disabled + uint8_t GfxGpoPadding[1]; + uint32_t GfxGpoVotingAllow; //For indicating which feature changes should result in a GPO table recalculation + + uint32_t GfxGpoPadding32[4]; + + uint16_t GfxDcsFopt; // Optimal GFXCLK for DCS in Mhz + uint16_t GfxDcsFclkFopt; // Optimal FCLK for DCS in Mhz + uint16_t GfxDcsUclkFopt; // Optimal UCLK for DCS in Mhz + + uint16_t DcsGfxOffVoltage; //Voltage in mV(Q2) applied to VDDGFX when entering DCS GFXOFF phase + + uint16_t DcsMinGfxOffTime; //Minimum amount of time PMFW shuts GFX OFF as part of GFX DCS phase + uint16_t DcsMaxGfxOffTime; //Maximum amount of time PMFW can shut GFX OFF as part of GFX DCS phase at a stretch. + + uint32_t DcsMinCreditAccum; //Min amount of positive credit accumulation before waking GFX up as part of DCS. + + uint16_t DcsExitHysteresis; //The min amount of time power credit accumulator should have a value > 0 before SMU exits the DCS throttling phase. + uint16_t DcsTimeout; //This is the amount of time SMU FW waits for RLC to put GFX into GFXOFF before reverting to the fallback mechanism of throttling GFXCLK to Fmin. + + uint32_t DcsParamPadding[5]; + + uint16_t FlopsPerByteTable[RLC_PACE_TABLE_NUM_LEVELS]; // Q8.8 + + // UCLK section + uint8_t LowestUclkReservedForUlv; // Set this to 1 if UCLK DPM0 is reserved for ULV-mode only + uint8_t PaddingMem[3]; + + uint8_t UclkDpmPstates [NUM_UCLK_DPM_LEVELS]; // 4 DPM states, 0-P0, 1-P1, 2-P2, 3-P3. + + // Used for 2-Step UCLK change workaround + UclkDpmChangeRange_t UclkDpmSrcFreqRange; // In Mhz + UclkDpmChangeRange_t UclkDpmTargFreqRange; // In Mhz + uint16_t UclkDpmMidstepFreq; // In Mhz + uint16_t UclkMidstepPadding; + + // Link DPM Settings + uint8_t PcieGenSpeed[NUM_LINK_LEVELS]; ///< 0:PciE-gen1 1:PciE-gen2 2:PciE-gen3 3:PciE-gen4 + uint8_t PcieLaneCount[NUM_LINK_LEVELS]; ///< 1=x1, 2=x2, 3=x4, 4=x8, 5=x12, 6=x16 + uint16_t LclkFreq[NUM_LINK_LEVELS]; + + // SECTION: Fan Control + uint16_t FanStopTemp; //Celcius + uint16_t FanStartTemp; //Celcius + + uint16_t FanGain[TEMP_COUNT]; + + uint16_t FanPwmMin; + uint16_t FanAcousticLimitRpm; + uint16_t FanThrottlingRpm; + uint16_t FanMaximumRpm; + uint16_t MGpuFanBoostLimitRpm; + uint16_t FanTargetTemperature; + uint16_t FanTargetGfxclk; + uint16_t FanPadding16; + uint8_t FanTempInputSelect; + uint8_t FanPadding; + uint8_t FanZeroRpmEnable; + uint8_t FanTachEdgePerRev; + + // The following are AFC override parameters. Leave at 0 to use FW defaults. + int16_t FuzzyFan_ErrorSetDelta; + int16_t FuzzyFan_ErrorRateSetDelta; + int16_t FuzzyFan_PwmSetDelta; + uint16_t FuzzyFan_Reserved; + + // SECTION: AVFS + // Overrides + uint8_t OverrideAvfsGb[AVFS_VOLTAGE_COUNT]; + uint8_t dBtcGbGfxDfllModelSelect; //0 -> fused piece-wise model, 1 -> piece-wise linear(PPTable), 2 -> quadratic model(PPTable) + uint8_t Padding8_Avfs; + + QuadraticInt_t qAvfsGb[AVFS_VOLTAGE_COUNT]; // GHz->V Override of fused curve + DroopInt_t dBtcGbGfxPll; // GHz->V BtcGb + DroopInt_t dBtcGbGfxDfll; // GHz->V BtcGb + DroopInt_t dBtcGbSoc; // GHz->V BtcGb + LinearInt_t qAgingGb[AVFS_VOLTAGE_COUNT]; // GHz->V + + PiecewiseLinearDroopInt_t PiecewiseLinearDroopIntGfxDfll; //GHz ->Vstore in IEEE float format + + QuadraticInt_t qStaticVoltageOffset[AVFS_VOLTAGE_COUNT]; // GHz->V + + uint16_t DcTol[AVFS_VOLTAGE_COUNT]; // mV Q2 + + uint8_t DcBtcEnabled[AVFS_VOLTAGE_COUNT]; + uint8_t Padding8_GfxBtc[2]; + + uint16_t DcBtcMin[AVFS_VOLTAGE_COUNT]; // mV Q2 + uint16_t DcBtcMax[AVFS_VOLTAGE_COUNT]; // mV Q2 + + uint16_t DcBtcGb[AVFS_VOLTAGE_COUNT]; // mV Q2 + + // SECTION: XGMI + uint8_t XgmiDpmPstates[NUM_XGMI_LEVELS]; // 2 DPM states, high and low. 0-P0, 1-P1, 2-P2, 3-P3. + uint8_t XgmiDpmSpare[2]; + + // SECTION: Advanced Options + uint32_t DebugOverrides; + QuadraticInt_t ReservedEquation0; + QuadraticInt_t ReservedEquation1; + QuadraticInt_t ReservedEquation2; + QuadraticInt_t ReservedEquation3; + + // SECTION: Sku Reserved + uint8_t CustomerVariant; + + //VC BTC parameters are only applicable to VDD_GFX domain + uint8_t VcBtcEnabled; + uint16_t VcBtcVminT0; // T0_VMIN + uint16_t VcBtcFixedVminAgingOffset; // FIXED_VMIN_AGING_OFFSET + uint16_t VcBtcVmin2PsmDegrationGb; // VMIN_TO_PSM_DEGRADATION_GB + uint32_t VcBtcPsmA; // A_PSM + uint32_t VcBtcPsmB; // B_PSM + uint32_t VcBtcVminA; // A_VMIN + uint32_t VcBtcVminB; // B_VMIN + + //GPIO Board feature + uint16_t LedGpio; //GeneriA GPIO flag used to control the radeon LEDs + uint16_t GfxPowerStagesGpio; //Genlk_vsync GPIO flag used to control gfx power stages + + uint32_t SkuReserved[63]; + + + + // MAJOR SECTION: BOARD PARAMETERS + + //SECTION: Gaming Clocks + uint32_t GamingClk[6]; + + // SECTION: I2C Control + I2cControllerConfig_t I2cControllers[NUM_I2C_CONTROLLERS]; + + uint8_t GpioScl; // GPIO Number for SCL Line, used only for CKSVII2C1 + uint8_t GpioSda; // GPIO Number for SDA Line, used only for CKSVII2C1 + uint8_t FchUsbPdSlaveAddr; //For requesting USB PD controller S-states via FCH I2C when entering PME turn off + uint8_t I2cSpare[1]; + + // SECTION: SVI2 Board Parameters + uint8_t VddGfxVrMapping; // Use VR_MAPPING* bitfields + uint8_t VddSocVrMapping; // Use VR_MAPPING* bitfields + uint8_t VddMem0VrMapping; // Use VR_MAPPING* bitfields + uint8_t VddMem1VrMapping; // Use VR_MAPPING* bitfields + + uint8_t GfxUlvPhaseSheddingMask; // set this to 1 to set PSI0/1 to 1 in ULV mode + uint8_t SocUlvPhaseSheddingMask; // set this to 1 to set PSI0/1 to 1 in ULV mode + uint8_t VddciUlvPhaseSheddingMask; // set this to 1 to set PSI0/1 to 1 in ULV mode + uint8_t MvddUlvPhaseSheddingMask; // set this to 1 to set PSI0/1 to 1 in ULV mode + + // SECTION: Telemetry Settings + uint16_t GfxMaxCurrent; // in Amps + int8_t GfxOffset; // in Amps + uint8_t Padding_TelemetryGfx; + + uint16_t SocMaxCurrent; // in Amps + int8_t SocOffset; // in Amps + uint8_t Padding_TelemetrySoc; + + uint16_t Mem0MaxCurrent; // in Amps + int8_t Mem0Offset; // in Amps + uint8_t Padding_TelemetryMem0; + + uint16_t Mem1MaxCurrent; // in Amps + int8_t Mem1Offset; // in Amps + uint8_t Padding_TelemetryMem1; + + uint32_t MvddRatio; // This is used for MVDD Svi2 Div Ratio workaround. It has 16 fractional bits (Q16.16) + + // SECTION: GPIO Settings + uint8_t AcDcGpio; // GPIO pin configured for AC/DC switching + uint8_t AcDcPolarity; // GPIO polarity for AC/DC switching + uint8_t VR0HotGpio; // GPIO pin configured for VR0 HOT event + uint8_t VR0HotPolarity; // GPIO polarity for VR0 HOT event + + uint8_t VR1HotGpio; // GPIO pin configured for VR1 HOT event + uint8_t VR1HotPolarity; // GPIO polarity for VR1 HOT event + uint8_t GthrGpio; // GPIO pin configured for GTHR Event + uint8_t GthrPolarity; // replace GPIO polarity for GTHR + + // LED Display Settings + uint8_t LedPin0; // GPIO number for LedPin[0] + uint8_t LedPin1; // GPIO number for LedPin[1] + uint8_t LedPin2; // GPIO number for LedPin[2] + uint8_t LedEnableMask; + + uint8_t LedPcie; // GPIO number for PCIE results + uint8_t LedError; // GPIO number for Error Cases + uint8_t LedSpare1[2]; + + // SECTION: Clock Spread Spectrum + + // GFXCLK PLL Spread Spectrum + uint8_t PllGfxclkSpreadEnabled; // on or off + uint8_t PllGfxclkSpreadPercent; // Q4.4 + uint16_t PllGfxclkSpreadFreq; // kHz + + // GFXCLK DFLL Spread Spectrum + uint8_t DfllGfxclkSpreadEnabled; // on or off + uint8_t DfllGfxclkSpreadPercent; // Q4.4 + uint16_t DfllGfxclkSpreadFreq; // kHz + + // UCLK Spread Spectrum + uint16_t UclkSpreadPadding; + uint16_t UclkSpreadFreq; // kHz + + // FCLK Spread Spectrum + uint8_t FclkSpreadEnabled; // on or off + uint8_t FclkSpreadPercent; // Q4.4 + uint16_t FclkSpreadFreq; // kHz + + // Section: Memory Config + uint32_t MemoryChannelEnabled; // For DRAM use only, Max 32 channels enabled bit mask. + + uint8_t DramBitWidth; // For DRAM use only. See Dram Bit width type defines + uint8_t PaddingMem1[3]; + + // Section: Total Board Power + uint16_t TotalBoardPower; //Only needed for TCP Estimated case, where TCP = TGP+Total Board Power + uint16_t BoardPowerPadding; + + // SECTION: XGMI Training + uint8_t XgmiLinkSpeed [NUM_XGMI_PSTATE_LEVELS]; + uint8_t XgmiLinkWidth [NUM_XGMI_PSTATE_LEVELS]; + + uint16_t XgmiFclkFreq [NUM_XGMI_PSTATE_LEVELS]; + uint16_t XgmiSocVoltage [NUM_XGMI_PSTATE_LEVELS]; + + // SECTION: UMC feature flags + uint8_t HsrEnabled; + uint8_t VddqOffEnabled; + uint8_t PaddingUmcFlags[2]; + + // UCLK Spread Spectrum + uint8_t UclkSpreadPercent[16]; + + // SECTION: Board Reserved + uint32_t BoardReserved[11]; + + // SECTION: Structure Padding + + // Padding for MMHUB - do not modify this + uint32_t MmHubPadding[8]; // SMU internal use + + +} PPTable_beige_goby_t; + +typedef struct { + // Time constant parameters for clock averages in ms + uint16_t GfxclkAverageLpfTau; + uint16_t FclkAverageLpfTau; + uint16_t UclkAverageLpfTau; + uint16_t GfxActivityLpfTau; + uint16_t UclkActivityLpfTau; + uint16_t SocketPowerLpfTau; + uint16_t VcnClkAverageLpfTau; + uint16_t padding16; +} DriverSmuConfig_t; + +typedef struct { + DriverSmuConfig_t DriverSmuConfig; + + uint32_t Spare[7]; + // Padding - ignore + uint32_t MmHubPadding[8]; // SMU internal use +} DriverSmuConfigExternal_t; + +typedef struct { + uint16_t GfxclkFmin; // MHz + uint16_t GfxclkFmax; // MHz + QuadraticInt_t CustomGfxVfCurve; // a: mV/MHz^2, b: mv/MHz, c: mV + uint16_t CustomCurveFmin; // MHz + uint16_t UclkFmin; // MHz + uint16_t UclkFmax; // MHz + int16_t OverDrivePct; // % + uint16_t FanMaximumRpm; + uint16_t FanMinimumPwm; + uint16_t FanAcousticLimitRpm; + uint16_t FanTargetTemperature; // Degree Celcius + uint8_t FanLinearPwmPoints[NUM_OD_FAN_MAX_POINTS]; + uint8_t FanLinearTempPoints[NUM_OD_FAN_MAX_POINTS]; + uint16_t MaxOpTemp; // Degree Celcius + int16_t VddGfxOffset; // in mV + uint8_t FanZeroRpmEnable; + uint8_t FanZeroRpmStopTemp; + uint8_t FanMode; + uint8_t Padding[1]; +} OverDriveTable_t; + +typedef struct { + OverDriveTable_t OverDriveTable; + uint32_t Spare[8]; + + uint32_t MmHubPadding[8]; // SMU internal use +} OverDriveTableExternal_t; + +typedef struct { + uint32_t CurrClock[PPCLK_COUNT]; + + uint16_t AverageGfxclkFrequencyPreDs; + uint16_t AverageGfxclkFrequencyPostDs; + uint16_t AverageFclkFrequencyPreDs; + uint16_t AverageFclkFrequencyPostDs; + uint16_t AverageUclkFrequencyPreDs ; + uint16_t AverageUclkFrequencyPostDs ; + + + uint16_t AverageGfxActivity ; + uint16_t AverageUclkActivity ; + uint8_t CurrSocVoltageOffset ; + uint8_t CurrGfxVoltageOffset ; + uint8_t CurrMemVidOffset ; + uint8_t Padding8 ; + uint16_t AverageSocketPower ; + uint16_t TemperatureEdge ; + uint16_t TemperatureHotspot ; + uint16_t TemperatureMem ; + uint16_t TemperatureVrGfx ; + uint16_t TemperatureVrMem0 ; + uint16_t TemperatureVrMem1 ; + uint16_t TemperatureVrSoc ; + uint16_t TemperatureLiquid0 ; + uint16_t TemperatureLiquid1 ; + uint16_t TemperaturePlx ; + uint16_t Padding16 ; + uint32_t ThrottlerStatus ; + + uint8_t LinkDpmLevel; + uint8_t CurrFanPwm; + uint16_t CurrFanSpeed; + + //BACO metrics, PMFW-1721 + //metrics for D3hot entry/exit and driver ARM msgs + uint8_t D3HotEntryCountPerMode[D3HOT_SEQUENCE_COUNT]; + uint8_t D3HotExitCountPerMode[D3HOT_SEQUENCE_COUNT]; + uint8_t ArmMsgReceivedCountPerMode[D3HOT_SEQUENCE_COUNT]; + + //PMFW-4362 + uint32_t EnergyAccumulator; + uint16_t AverageVclk0Frequency ; + uint16_t AverageDclk0Frequency ; + uint16_t AverageVclk1Frequency ; + uint16_t AverageDclk1Frequency ; + uint16_t VcnActivityPercentage ; //place holder, David N. to provide full sequence + uint8_t PcieRate ; + uint8_t PcieWidth ; + uint16_t AverageGfxclkFrequencyTarget; + uint16_t Padding16_2; + +} SmuMetrics_t; + +typedef struct { + uint32_t CurrClock[PPCLK_COUNT]; + + uint16_t AverageGfxclkFrequencyPreDs; + uint16_t AverageGfxclkFrequencyPostDs; + uint16_t AverageFclkFrequencyPreDs; + uint16_t AverageFclkFrequencyPostDs; + uint16_t AverageUclkFrequencyPreDs ; + uint16_t AverageUclkFrequencyPostDs ; + + + uint16_t AverageGfxActivity ; + uint16_t AverageUclkActivity ; + uint8_t CurrSocVoltageOffset ; + uint8_t CurrGfxVoltageOffset ; + uint8_t CurrMemVidOffset ; + uint8_t Padding8 ; + uint16_t AverageSocketPower ; + uint16_t TemperatureEdge ; + uint16_t TemperatureHotspot ; + uint16_t TemperatureMem ; + uint16_t TemperatureVrGfx ; + uint16_t TemperatureVrMem0 ; + uint16_t TemperatureVrMem1 ; + uint16_t TemperatureVrSoc ; + uint16_t TemperatureLiquid0 ; + uint16_t TemperatureLiquid1 ; + uint16_t TemperaturePlx ; + uint16_t Padding16 ; + uint32_t AccCnt ; + uint8_t ThrottlingPercentage[THROTTLER_COUNT]; + + + uint8_t LinkDpmLevel; + uint8_t CurrFanPwm; + uint16_t CurrFanSpeed; + + //BACO metrics, PMFW-1721 + //metrics for D3hot entry/exit and driver ARM msgs + uint8_t D3HotEntryCountPerMode[D3HOT_SEQUENCE_COUNT]; + uint8_t D3HotExitCountPerMode[D3HOT_SEQUENCE_COUNT]; + uint8_t ArmMsgReceivedCountPerMode[D3HOT_SEQUENCE_COUNT]; + + //PMFW-4362 + uint32_t EnergyAccumulator; + uint16_t AverageVclk0Frequency ; + uint16_t AverageDclk0Frequency ; + uint16_t AverageVclk1Frequency ; + uint16_t AverageDclk1Frequency ; + uint16_t VcnActivityPercentage ; //place holder, David N. to provide full sequence + uint8_t PcieRate ; + uint8_t PcieWidth ; + uint16_t AverageGfxclkFrequencyTarget; + uint16_t Padding16_2; + +} SmuMetrics_V2_t; + +typedef struct { + union { + SmuMetrics_t SmuMetrics; + SmuMetrics_V2_t SmuMetrics_V2; + }; + uint32_t Spare[1]; + + // Padding - ignore + uint32_t MmHubPadding[8]; // SMU internal use +} SmuMetricsExternal_t; + +typedef struct { + uint16_t MinClock; // This is either DCEFCLK or SOCCLK (in MHz) + uint16_t MaxClock; // This is either DCEFCLK or SOCCLK (in MHz) + uint16_t MinUclk; + uint16_t MaxUclk; + + uint8_t WmSetting; + uint8_t Flags; + uint8_t Padding[2]; + +} WatermarkRowGeneric_t; + +#define NUM_WM_RANGES 4 + +typedef enum { + WM_SOCCLK = 0, + WM_DCEFCLK, + WM_COUNT, +} WM_CLOCK_e; + +typedef enum { + WATERMARKS_CLOCK_RANGE = 0, + WATERMARKS_DUMMY_PSTATE, + WATERMARKS_MALL, + WATERMARKS_COUNT, +} WATERMARKS_FLAGS_e; + +typedef struct { + // Watermarks + WatermarkRowGeneric_t WatermarkRow[WM_COUNT][NUM_WM_RANGES]; +} Watermarks_t; + +typedef struct { + Watermarks_t Watermarks; + + uint32_t MmHubPadding[8]; // SMU internal use +} WatermarksExternal_t; + +typedef struct { + uint16_t avgPsmCount[67]; + uint16_t minPsmCount[67]; + float avgPsmVoltage[67]; + float minPsmVoltage[67]; +} AvfsDebugTable_t; + +typedef struct { + AvfsDebugTable_t AvfsDebugTable; + + uint32_t MmHubPadding[8]; // SMU internal use +} AvfsDebugTableExternal_t; + +typedef struct { + uint8_t AvfsVersion; + uint8_t Padding; + + uint8_t AvfsEn[AVFS_VOLTAGE_COUNT]; + + uint8_t OverrideVFT[AVFS_VOLTAGE_COUNT]; + uint8_t OverrideAvfsGb[AVFS_VOLTAGE_COUNT]; + + uint8_t OverrideTemperatures[AVFS_VOLTAGE_COUNT]; + uint8_t OverrideVInversion[AVFS_VOLTAGE_COUNT]; + uint8_t OverrideP2V[AVFS_VOLTAGE_COUNT]; + uint8_t OverrideP2VCharzFreq[AVFS_VOLTAGE_COUNT]; + + int32_t VFT0_m1[AVFS_VOLTAGE_COUNT]; // Q8.24 + int32_t VFT0_m2[AVFS_VOLTAGE_COUNT]; // Q12.12 + int32_t VFT0_b[AVFS_VOLTAGE_COUNT]; // Q32 + + int32_t VFT1_m1[AVFS_VOLTAGE_COUNT]; // Q8.16 + int32_t VFT1_m2[AVFS_VOLTAGE_COUNT]; // Q12.12 + int32_t VFT1_b[AVFS_VOLTAGE_COUNT]; // Q32 + + int32_t VFT2_m1[AVFS_VOLTAGE_COUNT]; // Q8.16 + int32_t VFT2_m2[AVFS_VOLTAGE_COUNT]; // Q12.12 + int32_t VFT2_b[AVFS_VOLTAGE_COUNT]; // Q32 + + int32_t AvfsGb0_m1[AVFS_VOLTAGE_COUNT]; // Q8.24 + int32_t AvfsGb0_m2[AVFS_VOLTAGE_COUNT]; // Q12.12 + int32_t AvfsGb0_b[AVFS_VOLTAGE_COUNT]; // Q32 + + int32_t AcBtcGb_m1[AVFS_VOLTAGE_COUNT]; // Q8.24 + int32_t AcBtcGb_m2[AVFS_VOLTAGE_COUNT]; // Q12.12 + int32_t AcBtcGb_b[AVFS_VOLTAGE_COUNT]; // Q32 + + uint32_t AvfsTempCold[AVFS_VOLTAGE_COUNT]; + uint32_t AvfsTempMid[AVFS_VOLTAGE_COUNT]; + uint32_t AvfsTempHot[AVFS_VOLTAGE_COUNT]; + + uint32_t VInversion[AVFS_VOLTAGE_COUNT]; // in mV with 2 fractional bits + + + int32_t P2V_m1[AVFS_VOLTAGE_COUNT]; // Q8.24 + int32_t P2V_m2[AVFS_VOLTAGE_COUNT]; // Q12.12 + int32_t P2V_b[AVFS_VOLTAGE_COUNT]; // Q32 + + uint32_t P2VCharzFreq[AVFS_VOLTAGE_COUNT]; // in 10KHz units + + uint32_t EnabledAvfsModules[3]; //Sienna_Cichlid - 67 AVFS modules +} AvfsFuseOverride_t; + +typedef struct { + AvfsFuseOverride_t AvfsFuseOverride; + + uint32_t MmHubPadding[8]; // SMU internal use +} AvfsFuseOverrideExternal_t; + +typedef struct { + uint8_t Gfx_ActiveHystLimit; + uint8_t Gfx_IdleHystLimit; + uint8_t Gfx_FPS; + uint8_t Gfx_MinActiveFreqType; + uint8_t Gfx_BoosterFreqType; + uint8_t Gfx_MinFreqStep; // Minimum delta between current and target frequeny in order for FW to change clock. + uint16_t Gfx_MinActiveFreq; // MHz + uint16_t Gfx_BoosterFreq; // MHz + uint16_t Gfx_PD_Data_time_constant; // Time constant of PD controller in ms + uint32_t Gfx_PD_Data_limit_a; // Q16 + uint32_t Gfx_PD_Data_limit_b; // Q16 + uint32_t Gfx_PD_Data_limit_c; // Q16 + uint32_t Gfx_PD_Data_error_coeff; // Q16 + uint32_t Gfx_PD_Data_error_rate_coeff; // Q16 + + uint8_t Fclk_ActiveHystLimit; + uint8_t Fclk_IdleHystLimit; + uint8_t Fclk_FPS; + uint8_t Fclk_MinActiveFreqType; + uint8_t Fclk_BoosterFreqType; + uint8_t Fclk_MinFreqStep; // Minimum delta between current and target frequeny in order for FW to change clock. + uint16_t Fclk_MinActiveFreq; // MHz + uint16_t Fclk_BoosterFreq; // MHz + uint16_t Fclk_PD_Data_time_constant; // Time constant of PD controller in ms + uint32_t Fclk_PD_Data_limit_a; // Q16 + uint32_t Fclk_PD_Data_limit_b; // Q16 + uint32_t Fclk_PD_Data_limit_c; // Q16 + uint32_t Fclk_PD_Data_error_coeff; // Q16 + uint32_t Fclk_PD_Data_error_rate_coeff; // Q16 + + uint8_t Mem_ActiveHystLimit; + uint8_t Mem_IdleHystLimit; + uint8_t Mem_FPS; + uint8_t Mem_MinActiveFreqType; + uint8_t Mem_BoosterFreqType; + uint8_t Mem_MinFreqStep; // Minimum delta between current and target frequeny in order for FW to change clock. + uint16_t Mem_MinActiveFreq; // MHz + uint16_t Mem_BoosterFreq; // MHz + uint16_t Mem_PD_Data_time_constant; // Time constant of PD controller in ms + uint32_t Mem_PD_Data_limit_a; // Q16 + uint32_t Mem_PD_Data_limit_b; // Q16 + uint32_t Mem_PD_Data_limit_c; // Q16 + uint32_t Mem_PD_Data_error_coeff; // Q16 + uint32_t Mem_PD_Data_error_rate_coeff; // Q16 + + uint32_t Mem_UpThreshold_Limit; // Q16 + uint8_t Mem_UpHystLimit; + uint8_t Mem_DownHystLimit; + uint16_t Mem_Fps; + +} DpmActivityMonitorCoeffInt_t; + + +typedef struct { + DpmActivityMonitorCoeffInt_t DpmActivityMonitorCoeffInt; + uint32_t MmHubPadding[8]; // SMU internal use +} DpmActivityMonitorCoeffIntExternal_t; + +// Workload bits +#define WORKLOAD_PPLIB_DEFAULT_BIT 0 +#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 1 +#define WORKLOAD_PPLIB_POWER_SAVING_BIT 2 +#define WORKLOAD_PPLIB_VIDEO_BIT 3 +#define WORKLOAD_PPLIB_VR_BIT 4 +#define WORKLOAD_PPLIB_COMPUTE_BIT 5 +#define WORKLOAD_PPLIB_CUSTOM_BIT 6 +#define WORKLOAD_PPLIB_W3D_BIT 7 +#define WORKLOAD_PPLIB_COUNT 8 + + +// These defines are used with the following messages: +// SMC_MSG_TransferTableDram2Smu +// SMC_MSG_TransferTableSmu2Dram + +// Table transfer status +#define TABLE_TRANSFER_OK 0x0 +#define TABLE_TRANSFER_FAILED 0xFF + +// Table types +#define TABLE_PPTABLE 0 +#define TABLE_WATERMARKS 1 +#define TABLE_AVFS_PSM_DEBUG 2 +#define TABLE_AVFS_FUSE_OVERRIDE 3 +#define TABLE_PMSTATUSLOG 4 +#define TABLE_SMU_METRICS 5 +#define TABLE_DRIVER_SMU_CONFIG 6 +#define TABLE_ACTIVITY_MONITOR_COEFF 7 +#define TABLE_OVERDRIVE 8 +#define TABLE_I2C_COMMANDS 9 +#define TABLE_PACE 10 +#define TABLE_COUNT 11 + +typedef struct { + float FlopsPerByteTable[RLC_PACE_TABLE_NUM_LEVELS]; +} RlcPaceFlopsPerByteOverride_t; + +typedef struct { + RlcPaceFlopsPerByteOverride_t RlcPaceFlopsPerByteOverride; + + uint32_t MmHubPadding[8]; // SMU internal use +} RlcPaceFlopsPerByteOverrideExternal_t; + +// These defines are used with the SMC_MSG_SetUclkFastSwitch message. +#define UCLK_SWITCH_SLOW 0 +#define UCLK_SWITCH_FAST 1 +#define UCLK_SWITCH_DUMMY 2 +#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_vangogh.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_vangogh.h new file mode 100644 index 000000000000..8361ebd8d876 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_vangogh.h @@ -0,0 +1,282 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __SMU11_DRIVER_IF_VANGOGH_H__ +#define __SMU11_DRIVER_IF_VANGOGH_H__ + +// *** IMPORTANT *** +// SMU TEAM: Always increment the interface version if +// any structure is changed in this file +#define SMU13_DRIVER_IF_VERSION 3 + +typedef struct { + int32_t value; + uint32_t numFractionalBits; +} FloatInIntFormat_t; + +typedef enum { + DSPCLK_DCFCLK = 0, + DSPCLK_DISPCLK, + DSPCLK_PIXCLK, + DSPCLK_PHYCLK, + DSPCLK_COUNT, +} DSPCLK_e; + +typedef struct { + uint16_t Freq; // in MHz + uint16_t Vid; // min voltage in SVI2 VID +} DisplayClockTable_t; + +typedef struct { + uint16_t MinClock; // This is either DCFCLK or SOCCLK (in MHz) + uint16_t MaxClock; // This is either DCFCLK or SOCCLK (in MHz) + uint16_t MinMclk; + uint16_t MaxMclk; + + uint8_t WmSetting; + uint8_t WmType; // Used for normal pstate change or memory retraining + uint8_t Padding[2]; +} WatermarkRowGeneric_t; + +#define NUM_WM_RANGES 4 +#define WM_PSTATE_CHG 0 +#define WM_RETRAINING 1 + +typedef enum { + WM_SOCCLK = 0, + WM_DCFCLK, + WM_COUNT, +} WM_CLOCK_e; + +typedef struct { + // Watermarks + WatermarkRowGeneric_t WatermarkRow[WM_COUNT][NUM_WM_RANGES]; + + uint32_t MmHubPadding[7]; // SMU internal use +} Watermarks_t; + +typedef enum { + CUSTOM_DPM_SETTING_GFXCLK, + CUSTOM_DPM_SETTING_CCLK, + CUSTOM_DPM_SETTING_FCLK_CCX, + CUSTOM_DPM_SETTING_FCLK_GFX, + CUSTOM_DPM_SETTING_FCLK_STALLS, + CUSTOM_DPM_SETTING_LCLK, + CUSTOM_DPM_SETTING_COUNT, +} CUSTOM_DPM_SETTING_e; + +typedef struct { + uint8_t ActiveHystLimit; + uint8_t IdleHystLimit; + uint8_t FPS; + uint8_t MinActiveFreqType; + FloatInIntFormat_t MinActiveFreq; + FloatInIntFormat_t PD_Data_limit; + FloatInIntFormat_t PD_Data_time_constant; + FloatInIntFormat_t PD_Data_error_coeff; + FloatInIntFormat_t PD_Data_error_rate_coeff; +} DpmActivityMonitorCoeffExt_t; + +typedef struct { + DpmActivityMonitorCoeffExt_t DpmActivityMonitorCoeff[CUSTOM_DPM_SETTING_COUNT]; +} CustomDpmSettings_t; + +#define NUM_DCFCLK_DPM_LEVELS 7 +#define NUM_DISPCLK_DPM_LEVELS 7 +#define NUM_DPPCLK_DPM_LEVELS 7 +#define NUM_SOCCLK_DPM_LEVELS 7 +#define NUM_ISPICLK_DPM_LEVELS 7 +#define NUM_ISPXCLK_DPM_LEVELS 7 +#define NUM_VCN_DPM_LEVELS 5 +#define NUM_FCLK_DPM_LEVELS 4 +#define NUM_SOC_VOLTAGE_LEVELS 8 + +typedef struct { + uint32_t fclk; + uint32_t memclk; + uint32_t voltage; +} df_pstate_t; + +typedef struct { + uint32_t vclk; + uint32_t dclk; +} vcn_clk_t; + +//Freq in MHz +//Voltage in milli volts with 2 fractional bits + +typedef struct { + uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS]; + uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS]; + uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS]; + uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS]; + uint32_t IspiClocks[NUM_ISPICLK_DPM_LEVELS]; + uint32_t IspxClocks[NUM_ISPXCLK_DPM_LEVELS]; + vcn_clk_t VcnClocks[NUM_VCN_DPM_LEVELS]; + + uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS]; + + df_pstate_t DfPstateTable[NUM_FCLK_DPM_LEVELS]; + + uint32_t MinGfxClk; + uint32_t MaxGfxClk; + + uint8_t NumDfPstatesEnabled; + uint8_t NumDcfclkLevelsEnabled; + uint8_t NumDispClkLevelsEnabled; //applies to both dispclk and dppclk + uint8_t NumSocClkLevelsEnabled; + + uint8_t IspClkLevelsEnabled; //applies to both ispiclk and ispxclk + uint8_t VcnClkLevelsEnabled; //applies to both vclk/dclk + uint8_t spare[2]; +} DpmClocks_t; + + +// Throttler Status Bitmask +#define THROTTLER_STATUS_BIT_SPL 0 +#define THROTTLER_STATUS_BIT_FPPT 1 +#define THROTTLER_STATUS_BIT_SPPT 2 +#define THROTTLER_STATUS_BIT_SPPT_APU 3 +#define THROTTLER_STATUS_BIT_THM_CORE 4 +#define THROTTLER_STATUS_BIT_THM_GFX 5 +#define THROTTLER_STATUS_BIT_THM_SOC 6 +#define THROTTLER_STATUS_BIT_TDC_VDD 7 +#define THROTTLER_STATUS_BIT_TDC_SOC 8 +#define THROTTLER_STATUS_BIT_TDC_GFX 9 +#define THROTTLER_STATUS_BIT_TDC_CVIP 10 + +typedef struct { + uint16_t GfxclkFrequency; //[MHz] + uint16_t SocclkFrequency; //[MHz] + uint16_t VclkFrequency; //[MHz] + uint16_t DclkFrequency; //[MHz] + uint16_t MemclkFrequency; //[MHz] + uint16_t spare; + + uint16_t GfxActivity; //[centi] + uint16_t UvdActivity; //[centi] + + uint16_t Voltage[3]; //[mV] indices: VDDCR_VDD, VDDCR_SOC, VDDCR_GFX + uint16_t Current[3]; //[mA] indices: VDDCR_VDD, VDDCR_SOC, VDDCR_GFX + uint16_t Power[3]; //[mW] indices: VDDCR_VDD, VDDCR_SOC, VDDCR_GFX + uint16_t CurrentSocketPower; //[mW] + + //3rd party tools in Windows need info in the case of APUs + uint16_t CoreFrequency[8]; //[MHz] + uint16_t CorePower[8]; //[mW] + uint16_t CoreTemperature[8]; //[centi-Celsius] + uint16_t L3Frequency[2]; //[MHz] + uint16_t L3Temperature[2]; //[centi-Celsius] + + uint16_t GfxTemperature; //[centi-Celsius] + uint16_t SocTemperature; //[centi-Celsius] + uint16_t EdgeTemperature; + uint16_t ThrottlerStatus; +} SmuMetrics_legacy_t; + +typedef struct { + uint16_t GfxclkFrequency; //[MHz] + uint16_t SocclkFrequency; //[MHz] + uint16_t VclkFrequency; //[MHz] + uint16_t DclkFrequency; //[MHz] + uint16_t MemclkFrequency; //[MHz] + uint16_t spare; + + uint16_t GfxActivity; //[centi] + uint16_t UvdActivity; //[centi] + uint16_t C0Residency[4]; //percentage + + uint16_t Voltage[3]; //[mV] indices: VDDCR_VDD, VDDCR_SOC, VDDCR_GFX + uint16_t Current[3]; //[mA] indices: VDDCR_VDD, VDDCR_SOC, VDDCR_GFX + uint16_t Power[3]; //[mW] indices: VDDCR_VDD, VDDCR_SOC, VDDCR_GFX + uint16_t CurrentSocketPower; //[mW] + + //3rd party tools in Windows need info in the case of APUs + uint16_t CoreFrequency[4]; //[MHz] + uint16_t CorePower[4]; //[mW] + uint16_t CoreTemperature[4]; //[centi-Celsius] + uint16_t L3Frequency[1]; //[MHz] + uint16_t L3Temperature[1]; //[centi-Celsius] + + uint16_t GfxTemperature; //[centi-Celsius] + uint16_t SocTemperature; //[centi-Celsius] + uint16_t EdgeTemperature; + uint16_t ThrottlerStatus; +} SmuMetricsTable_t; + +typedef struct { + SmuMetricsTable_t Current; + SmuMetricsTable_t Average; + //uint32_t AccCnt; + uint32_t SampleStartTime; + uint32_t SampleStopTime; +} SmuMetrics_t; + + +// Workload bits +#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 0 +#define WORKLOAD_PPLIB_VIDEO_BIT 2 +#define WORKLOAD_PPLIB_VR_BIT 3 +#define WORKLOAD_PPLIB_COMPUTE_BIT 4 +#define WORKLOAD_PPLIB_CUSTOM_BIT 5 +#define WORKLOAD_PPLIB_COUNT 6 + +#define TABLE_BIOS_IF 0 // Called by BIOS +#define TABLE_WATERMARKS 1 // Called by DAL through VBIOS +#define TABLE_CUSTOM_DPM 2 // Called by Driver +#define TABLE_SPARE1 3 +#define TABLE_DPMCLOCKS 4 // Called by Driver +#define TABLE_SPARE2 5 // Called by Tools +#define TABLE_MODERN_STDBY 6 // Called by Tools for Modern Standby Log +#define TABLE_SMU_METRICS 7 // Called by Driver +#define TABLE_COUNT 8 + +//ISP tile definitions +typedef enum { + TILE_ISPX = 0, // ISPX + TILE_ISPM, // ISPM + TILE_ISPC, // ISPCORE + TILE_ISPPRE, // ISPPRE + TILE_ISPPOST, // ISPPOST + TILE_MAX +} TILE_NUM_e; + +// Tile Selection (Based on arguments) +#define TILE_SEL_ISPX (1<<(TILE_ISPX)) +#define TILE_SEL_ISPM (1<<(TILE_ISPM)) +#define TILE_SEL_ISPC (1<<(TILE_ISPC)) +#define TILE_SEL_ISPPRE (1<<(TILE_ISPPRE)) +#define TILE_SEL_ISPPOST (1<<(TILE_ISPPOST)) + +// Mask for ISP tiles in PGFSM PWR Status Registers +//Bit[1:0] maps to ISPX, (ISPX) +//Bit[3:2] maps to ISPM, (ISPM) +//Bit[5:4] maps to ISPCORE, (ISPCORE) +//Bit[7:6] maps to ISPPRE, (ISPPRE) +//Bit[9:8] maps to POST, (ISPPOST +#define TILE_ISPX_MASK ((1<<0) | (1<<1)) +#define TILE_ISPM_MASK ((1<<2) | (1<<3)) +#define TILE_ISPC_MASK ((1<<4) | (1<<5)) +#define TILE_ISPPRE_MASK ((1<<6) | (1<<7)) +#define TILE_ISPPOST_MASK ((1<<8) | (1<<9)) + +#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu12_driver_if.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu12_driver_if.h new file mode 100644 index 000000000000..e9315eb5b48e --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu12_driver_if.h @@ -0,0 +1,232 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef SMU12_DRIVER_IF_H +#define SMU12_DRIVER_IF_H + +// *** IMPORTANT *** +// SMU TEAM: Always increment the interface version if +// any structure is changed in this file +#define SMU12_DRIVER_IF_VERSION 14 + +typedef struct { + int32_t value; + uint32_t numFractionalBits; +} FloatInIntFormat_t; + +typedef enum { + DSPCLK_DCFCLK = 0, + DSPCLK_DISPCLK, + DSPCLK_PIXCLK, + DSPCLK_PHYCLK, + DSPCLK_COUNT, +} DSPCLK_e; + +typedef struct { + uint16_t Freq; // in MHz + uint16_t Vid; // min voltage in SVI2 VID +} DisplayClockTable_t; + +typedef struct { + uint16_t MinClock; // This is either DCFCLK or SOCCLK (in MHz) + uint16_t MaxClock; // This is either DCFCLK or SOCCLK (in MHz) + uint16_t MinMclk; + uint16_t MaxMclk; + + uint8_t WmSetting; + uint8_t WmType; // Used for normal pstate change or memory retraining + uint8_t Padding[2]; +} WatermarkRowGeneric_t; + +#define NUM_WM_RANGES 4 +#define WM_PSTATE_CHG 0 +#define WM_RETRAINING 1 + +typedef enum { + WM_SOCCLK = 0, + WM_DCFCLK, + WM_COUNT, +} WM_CLOCK_e; + +typedef struct { + // Watermarks + WatermarkRowGeneric_t WatermarkRow[WM_COUNT][NUM_WM_RANGES]; + + uint32_t MmHubPadding[7]; // SMU internal use +} Watermarks_t; + +typedef enum { + CUSTOM_DPM_SETTING_GFXCLK, + CUSTOM_DPM_SETTING_CCLK, + CUSTOM_DPM_SETTING_FCLK_CCX, + CUSTOM_DPM_SETTING_FCLK_GFX, + CUSTOM_DPM_SETTING_FCLK_STALLS, + CUSTOM_DPM_SETTING_LCLK, + CUSTOM_DPM_SETTING_COUNT, +} CUSTOM_DPM_SETTING_e; + +typedef struct { + uint8_t ActiveHystLimit; + uint8_t IdleHystLimit; + uint8_t FPS; + uint8_t MinActiveFreqType; + FloatInIntFormat_t MinActiveFreq; + FloatInIntFormat_t PD_Data_limit; + FloatInIntFormat_t PD_Data_time_constant; + FloatInIntFormat_t PD_Data_error_coeff; + FloatInIntFormat_t PD_Data_error_rate_coeff; +} DpmActivityMonitorCoeffExt_t; + +typedef struct { + DpmActivityMonitorCoeffExt_t DpmActivityMonitorCoeff[CUSTOM_DPM_SETTING_COUNT]; +} CustomDpmSettings_t; + + +#define NUM_DCFCLK_DPM_LEVELS 8 +#define NUM_SOCCLK_DPM_LEVELS 8 +#define NUM_FCLK_DPM_LEVELS 4 +#define NUM_MEMCLK_DPM_LEVELS 4 +#define NUM_VCN_DPM_LEVELS 8 + +typedef struct { + uint32_t Freq; // In MHz + uint32_t Vol; // Millivolts with 2 fractional bits +} DpmClock_t; + +typedef struct { + DpmClock_t DcfClocks[NUM_DCFCLK_DPM_LEVELS]; + DpmClock_t SocClocks[NUM_SOCCLK_DPM_LEVELS]; + DpmClock_t FClocks[NUM_FCLK_DPM_LEVELS]; + DpmClock_t MemClocks[NUM_MEMCLK_DPM_LEVELS]; + DpmClock_t VClocks[NUM_VCN_DPM_LEVELS]; + DpmClock_t DClocks[NUM_VCN_DPM_LEVELS]; + + uint8_t NumDcfClkDpmEnabled; + uint8_t NumSocClkDpmEnabled; + uint8_t NumFClkDpmEnabled; + uint8_t NumMemClkDpmEnabled; + uint8_t NumVClkDpmEnabled; + uint8_t NumDClkDpmEnabled; + uint8_t spare[2]; +} DpmClocks_t; + + +typedef enum { + CLOCK_SMNCLK = 0, + CLOCK_SOCCLK, + CLOCK_MP0CLK, + CLOCK_MP1CLK, + CLOCK_MP2CLK, + CLOCK_VCLK, + CLOCK_LCLK, + CLOCK_DCLK, + CLOCK_ACLK, + CLOCK_ISPCLK, + CLOCK_SHUBCLK, + CLOCK_DISPCLK, + CLOCK_DPPCLK, + CLOCK_DPREFCLK, + CLOCK_DCFCLK, + CLOCK_FCLK, + CLOCK_UMCCLK, + CLOCK_GFXCLK, + CLOCK_COUNT, +} CLOCK_IDs_e; + +// Throttler Status Bitmask +#define THROTTLER_STATUS_BIT_SPL 0 +#define THROTTLER_STATUS_BIT_FPPT 1 +#define THROTTLER_STATUS_BIT_SPPT 2 +#define THROTTLER_STATUS_BIT_SPPT_APU 3 +#define THROTTLER_STATUS_BIT_THM_CORE 4 +#define THROTTLER_STATUS_BIT_THM_GFX 5 +#define THROTTLER_STATUS_BIT_THM_SOC 6 +#define THROTTLER_STATUS_BIT_TDC_VDD 7 +#define THROTTLER_STATUS_BIT_TDC_SOC 8 +#define THROTTLER_STATUS_BIT_PROCHOT_CPU 9 +#define THROTTLER_STATUS_BIT_PROCHOT_GFX 10 +#define THROTTLER_STATUS_BIT_EDC_CPU 11 +#define THROTTLER_STATUS_BIT_EDC_GFX 12 + +typedef struct { + uint16_t ClockFrequency[CLOCK_COUNT]; //[MHz] + + uint16_t AverageGfxclkFrequency; //[MHz] + uint16_t AverageSocclkFrequency; //[MHz] + uint16_t AverageVclkFrequency; //[MHz] + uint16_t AverageFclkFrequency; //[MHz] + + uint16_t AverageGfxActivity; //[centi] + uint16_t AverageUvdActivity; //[centi] + + uint16_t Voltage[2]; //[mV] indices: VDDCR_VDD, VDDCR_SOC + uint16_t Current[2]; //[mA] indices: VDDCR_VDD, VDDCR_SOC + uint16_t Power[2]; //[mW] indices: VDDCR_VDD, VDDCR_SOC + + uint16_t FanPwm; //[milli] + uint16_t CurrentSocketPower; //[W] + + uint16_t CoreFrequency[8]; //[MHz] + uint16_t CorePower[8]; //[mW] + uint16_t CoreTemperature[8]; //[centi-Celsius] + uint16_t L3Frequency[2]; //[MHz] + uint16_t L3Temperature[2]; //[centi-Celsius] + + uint16_t GfxTemperature; //[centi-Celsius] + uint16_t SocTemperature; //[centi-Celsius] + uint16_t ThrottlerStatus; + uint16_t spare; + + uint16_t StapmOriginalLimit; //[W] + uint16_t StapmCurrentLimit; //[W] + uint16_t ApuPower; //[W] + uint16_t dGpuPower; //[W] + + uint16_t VddTdcValue; //[mA] + uint16_t SocTdcValue; //[mA] + uint16_t VddEdcValue; //[mA] + uint16_t SocEdcValue; //[mA] + uint16_t reserve[2]; +} SmuMetrics_t; + + +// Workload bits +#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 0 +#define WORKLOAD_PPLIB_VIDEO_BIT 2 +#define WORKLOAD_PPLIB_VR_BIT 3 +#define WORKLOAD_PPLIB_COMPUTE_BIT 4 +#define WORKLOAD_PPLIB_CUSTOM_BIT 5 +#define WORKLOAD_PPLIB_COUNT 6 + +#define TABLE_BIOS_IF 0 // Called by BIOS +#define TABLE_WATERMARKS 1 // Called by Driver +#define TABLE_CUSTOM_DPM 2 // Called by Driver +#define TABLE_SPARE1 3 +#define TABLE_DPMCLOCKS 4 // Called by Driver +#define TABLE_MOMENTARY_PM 5 // Called by Tools +#define TABLE_MODERN_STDBY 6 // Called by Tools for Modern Standby Log +#define TABLE_SMU_METRICS 7 // Called by Driver +#define TABLE_COUNT 8 + + +#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_aldebaran.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_aldebaran.h new file mode 100644 index 000000000000..0f67c56c2863 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_aldebaran.h @@ -0,0 +1,538 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef SMU13_DRIVER_IF_ALDEBARAN_H +#define SMU13_DRIVER_IF_ALDEBARAN_H + +#define NUM_VCLK_DPM_LEVELS 8 +#define NUM_DCLK_DPM_LEVELS 8 +#define NUM_SOCCLK_DPM_LEVELS 8 +#define NUM_LCLK_DPM_LEVELS 8 +#define NUM_UCLK_DPM_LEVELS 4 +#define NUM_FCLK_DPM_LEVELS 8 +#define NUM_XGMI_DPM_LEVELS 4 + +// Feature Control Defines +#define FEATURE_DATA_CALCULATIONS 0 +#define FEATURE_DPM_GFXCLK_BIT 1 +#define FEATURE_DPM_UCLK_BIT 2 +#define FEATURE_DPM_SOCCLK_BIT 3 +#define FEATURE_DPM_FCLK_BIT 4 +#define FEATURE_DPM_LCLK_BIT 5 +#define FEATURE_DPM_XGMI_BIT 6 +#define FEATURE_DS_GFXCLK_BIT 7 +#define FEATURE_DS_SOCCLK_BIT 8 +#define FEATURE_DS_LCLK_BIT 9 +#define FEATURE_DS_FCLK_BIT 10 +#define FEATURE_DS_UCLK_BIT 11 +#define FEATURE_GFX_SS_BIT 12 +#define FEATURE_DPM_VCN_BIT 13 +#define FEATURE_RSMU_SMN_CG_BIT 14 +#define FEATURE_WAFL_CG_BIT 15 +#define FEATURE_PPT_BIT 16 +#define FEATURE_TDC_BIT 17 +#define FEATURE_APCC_PLUS_BIT 18 +#define FEATURE_APCC_DFLL_BIT 19 +#define FEATURE_FW_CTF_BIT 20 +#define FEATURE_THERMAL_BIT 21 +#define FEATURE_OUT_OF_BAND_MONITOR_BIT 22 +#define FEATURE_SPARE_23_BIT 23 +#define FEATURE_XGMI_PER_LINK_PWR_DWN 24 +#define FEATURE_DF_CSTATE 25 +#define FEATURE_FUSE_CG_BIT 26 +#define FEATURE_MP1_CG_BIT 27 +#define FEATURE_SMUIO_CG_BIT 28 +#define FEATURE_THM_CG_BIT 29 +#define FEATURE_CLK_CG_BIT 30 +#define FEATURE_EDC_BIT 31 +#define FEATURE_SPARE_32_BIT 32 +#define FEATURE_SPARE_33_BIT 33 +#define FEATURE_SPARE_34_BIT 34 +#define FEATURE_SPARE_35_BIT 35 +#define FEATURE_SPARE_36_BIT 36 +#define FEATURE_SPARE_37_BIT 37 +#define FEATURE_SPARE_38_BIT 38 +#define FEATURE_SPARE_39_BIT 39 +#define FEATURE_SPARE_40_BIT 40 +#define FEATURE_SPARE_41_BIT 41 +#define FEATURE_SPARE_42_BIT 42 +#define FEATURE_SPARE_43_BIT 43 +#define FEATURE_SPARE_44_BIT 44 +#define FEATURE_SPARE_45_BIT 45 +#define FEATURE_SPARE_46_BIT 46 +#define FEATURE_SPARE_47_BIT 47 +#define FEATURE_SPARE_48_BIT 48 +#define FEATURE_SPARE_49_BIT 49 +#define FEATURE_SPARE_50_BIT 50 +#define FEATURE_SPARE_51_BIT 51 +#define FEATURE_SPARE_52_BIT 52 +#define FEATURE_SPARE_53_BIT 53 +#define FEATURE_SPARE_54_BIT 54 +#define FEATURE_SPARE_55_BIT 55 +#define FEATURE_SPARE_56_BIT 56 +#define FEATURE_SPARE_57_BIT 57 +#define FEATURE_SPARE_58_BIT 58 +#define FEATURE_SPARE_59_BIT 59 +#define FEATURE_SPARE_60_BIT 60 +#define FEATURE_SPARE_61_BIT 61 +#define FEATURE_SPARE_62_BIT 62 +#define FEATURE_SPARE_63_BIT 63 + +#define NUM_FEATURES 64 + +// I2C Config Bit Defines +#define I2C_CONTROLLER_ENABLED 1 +#define I2C_CONTROLLER_DISABLED 0 + +// Throttler Status Bits. +// These are aligned with the out of band monitor alarm bits for common throttlers +#define THROTTLER_PPT0_BIT 0 +#define THROTTLER_PPT1_BIT 1 +#define THROTTLER_TDC_GFX_BIT 2 +#define THROTTLER_TDC_SOC_BIT 3 +#define THROTTLER_TDC_HBM_BIT 4 +#define THROTTLER_SPARE_5 5 +#define THROTTLER_TEMP_GPU_BIT 6 +#define THROTTLER_TEMP_MEM_BIT 7 +#define THORTTLER_SPARE_8 8 +#define THORTTLER_SPARE_9 9 +#define THORTTLER_SPARE_10 10 +#define THROTTLER_TEMP_VR_GFX_BIT 11 +#define THROTTLER_TEMP_VR_SOC_BIT 12 +#define THROTTLER_TEMP_VR_MEM_BIT 13 +#define THORTTLER_SPARE_14 14 +#define THORTTLER_SPARE_15 15 +#define THORTTLER_SPARE_16 16 +#define THORTTLER_SPARE_17 17 +#define THORTTLER_SPARE_18 18 +#define THROTTLER_APCC_BIT 19 + +// Table transfer status +#define TABLE_TRANSFER_OK 0x0 +#define TABLE_TRANSFER_FAILED 0xFF +#define TABLE_TRANSFER_PENDING 0xAB + +//I2C Interface +#define NUM_I2C_CONTROLLERS 8 + +#define I2C_CONTROLLER_ENABLED 1 +#define I2C_CONTROLLER_DISABLED 0 + +#define MAX_SW_I2C_COMMANDS 24 + +#define ALDEBARAN_UMC_CHANNEL_NUM 32 + +typedef enum { + I2C_CONTROLLER_PORT_0, //CKSVII2C0 + I2C_CONTROLLER_PORT_1, //CKSVII2C1 + I2C_CONTROLLER_PORT_COUNT, +} I2cControllerPort_e; + +typedef enum { + I2C_CONTROLLER_THROTTLER_TYPE_NONE, + I2C_CONTROLLER_THROTTLER_VR_GFX0, + I2C_CONTROLLER_THROTTLER_VR_GFX1, + I2C_CONTROLLER_THROTTLER_VR_SOC, + I2C_CONTROLLER_THROTTLER_VR_MEM, + I2C_CONTROLLER_THROTTLER_COUNT, +} I2cControllerThrottler_e; + +typedef enum { + I2C_CONTROLLER_PROTOCOL_VR_MP2855, + I2C_CONTROLLER_PROTOCOL_COUNT, +} I2cControllerProtocol_e; + +typedef struct { + uint8_t Enabled; + uint8_t Speed; + uint8_t SlaveAddress; + uint8_t ControllerPort; + uint8_t ThermalThrotter; + uint8_t I2cProtocol; + uint8_t PaddingConfig[2]; +} I2cControllerConfig_t; + +typedef enum { + I2C_PORT_SVD_SCL, + I2C_PORT_GPIO, +} I2cPort_e; + +typedef enum { + I2C_SPEED_FAST_50K, //50 Kbits/s + I2C_SPEED_FAST_100K, //100 Kbits/s + I2C_SPEED_FAST_400K, //400 Kbits/s + I2C_SPEED_FAST_PLUS_1M, //1 Mbits/s (in fast mode) + I2C_SPEED_HIGH_1M, //1 Mbits/s (in high speed mode) + I2C_SPEED_HIGH_2M, //2.3 Mbits/s + I2C_SPEED_COUNT, +} I2cSpeed_e; + +typedef enum { + I2C_CMD_READ, + I2C_CMD_WRITE, + I2C_CMD_COUNT, +} I2cCmdType_e; + +#define CMDCONFIG_STOP_BIT 0 +#define CMDCONFIG_RESTART_BIT 1 +#define CMDCONFIG_READWRITE_BIT 2 //bit should be 0 for read, 1 for write + +#define CMDCONFIG_STOP_MASK (1 << CMDCONFIG_STOP_BIT) +#define CMDCONFIG_RESTART_MASK (1 << CMDCONFIG_RESTART_BIT) +#define CMDCONFIG_READWRITE_MASK (1 << CMDCONFIG_READWRITE_BIT) + +typedef struct { + uint8_t ReadWriteData; //Return data for read. Data to send for write + uint8_t CmdConfig; //Includes whether associated command should have a stop or restart command, and is a read or write +} SwI2cCmd_t; //SW I2C Command Table + +typedef struct { + uint8_t I2CcontrollerPort; //CKSVII2C0(0) or //CKSVII2C1(1) + uint8_t I2CSpeed; //Use I2cSpeed_e to indicate speed to select + uint8_t SlaveAddress; //Slave address of device + uint8_t NumCmds; //Number of commands + SwI2cCmd_t SwI2cCmds[MAX_SW_I2C_COMMANDS]; +} SwI2cRequest_t; // SW I2C Request Table + +typedef struct { + SwI2cRequest_t SwI2cRequest; + uint32_t Spare[8]; + uint32_t MmHubPadding[8]; // SMU internal use +} SwI2cRequestExternal_t; + +typedef struct { + uint32_t a; // store in IEEE float format in this variable + uint32_t b; // store in IEEE float format in this variable + uint32_t c; // store in IEEE float format in this variable +} QuadraticInt_t; + +typedef struct { + uint32_t m; // store in IEEE float format in this variable + uint32_t b; // store in IEEE float format in this variable +} LinearInt_t; + +typedef enum { + GFXCLK_SOURCE_PLL, + GFXCLK_SOURCE_DFLL, + GFXCLK_SOURCE_COUNT, +} GfxclkSrc_e; + +typedef enum { + PPCLK_GFXCLK, + PPCLK_VCLK, + PPCLK_DCLK, + PPCLK_SOCCLK, + PPCLK_UCLK, + PPCLK_FCLK, + PPCLK_LCLK, + PPCLK_COUNT, +} PPCLK_e; + +typedef enum { + GPIO_INT_POLARITY_ACTIVE_LOW, + GPIO_INT_POLARITY_ACTIVE_HIGH, +} GpioIntPolarity_e; + +//PPSMC_MSG_SetUclkDpmMode +typedef enum { + UCLK_DPM_MODE_BANDWIDTH, + UCLK_DPM_MODE_LATENCY, +} UCLK_DPM_MODE_e; + +typedef struct { + uint8_t StartupLevel; + uint8_t NumDiscreteLevels; // Set to 2 (Fmin, Fmax) when using fine grained DPM, otherwise set to # discrete levels used + uint16_t SsFmin; // Fmin for SS curve. If SS curve is selected, will use V@SSFmin for F <= Fmin + LinearInt_t ConversionToAvfsClk; // Transfer function to AVFS Clock (GHz->GHz) + QuadraticInt_t SsCurve; // Slow-slow curve (GHz->V) +} DpmDescriptor_t; + +typedef struct { + uint32_t Version; + + // SECTION: Feature Enablement + uint32_t FeaturesToRun[2]; + + // SECTION: Infrastructure Limits + uint16_t PptLimit; // Watts + uint16_t TdcLimitGfx; // Amps + uint16_t TdcLimitSoc; // Amps + uint16_t TdcLimitHbm; // Amps + uint16_t ThotspotLimit; // Celcius + uint16_t TmemLimit; // Celcius + uint16_t Tvr_gfxLimit; // Celcius + uint16_t Tvr_memLimit; // Celcius + uint16_t Tvr_socLimit; // Celcius + uint16_t PaddingLimit; + + // SECTION: Voltage Control Parameters + uint16_t MaxVoltageGfx; // In mV(Q2) Maximum Voltage allowable of VDD_GFX + uint16_t MaxVoltageSoc; // In mV(Q2) Maximum Voltage allowable of VDD_SOC + + //SECTION: DPM Config 1 + DpmDescriptor_t DpmDescriptor[PPCLK_COUNT]; + + uint8_t DidTableVclk[NUM_VCLK_DPM_LEVELS]; //PPCLK_VCLK + uint8_t DidTableDclk[NUM_DCLK_DPM_LEVELS]; //PPCLK_DCLK + uint8_t DidTableSocclk[NUM_SOCCLK_DPM_LEVELS]; //PPCLK_SOCCLK + uint8_t DidTableLclk[NUM_LCLK_DPM_LEVELS]; //PPCLK_LCLK + uint32_t FidTableFclk[NUM_FCLK_DPM_LEVELS]; //PPCLK_FCLK + uint8_t DidTableFclk[NUM_FCLK_DPM_LEVELS]; //PPCLK_FCLK + uint32_t FidTableUclk[NUM_UCLK_DPM_LEVELS]; //PPCLK_UCLK + uint8_t DidTableUclk[NUM_UCLK_DPM_LEVELS]; //PPCLK_UCLK + + uint32_t StartupFidPll0; //GFXAVFSCLK, SOCCLK, MP0CLK, MPIOCLK, DXIOCLK + uint32_t StartupFidPll4; //VCLK, DCLK, WAFLCLK + uint32_t StartupFidPll5; //SMNCLK, MP1CLK, LCLK + + uint8_t StartupSmnclkDid; + uint8_t StartupMp0clkDid; + uint8_t StartupMp1clkDid; + uint8_t StartupWaflclkDid; + uint8_t StartupGfxavfsclkDid; + uint8_t StartupMpioclkDid; + uint8_t StartupDxioclkDid; + uint8_t spare123; + + uint8_t StartupVidGpu0Svi0Plane0; //VDDCR_GFX0 + uint8_t StartupVidGpu0Svi0Plane1; //VDDCR_SOC + uint8_t StartupVidGpu0Svi1Plane0; //VDDCR_HBM + uint8_t StartupVidGpu0Svi1Plane1; //UNUSED [0 = plane is not used and should not be programmed] + + uint8_t StartupVidGpu1Svi0Plane0; //VDDCR_GFX1 + uint8_t StartupVidGpu1Svi0Plane1; //UNUSED [0 = plane is not used and should not be programmed] + uint8_t StartupVidGpu1Svi1Plane0; //UNUSED [0 = plane is not used and should not be programmed] + uint8_t StartupVidGpu1Svi1Plane1; //UNUSED [0 = plane is not used and should not be programmed] + + // GFXCLK DPM + uint16_t GfxclkFmax; // In MHz + uint16_t GfxclkFmin; // In MHz + uint16_t GfxclkFidle; // In MHz + uint16_t GfxclkFinit; // In MHz + uint8_t GfxclkSource; // GfxclkSrc_e [0 = PLL, 1 = DFLL] + uint8_t spare1[2]; + uint8_t StartupGfxclkDid; + uint32_t StartupGfxclkFid; + + // SECTION: AVFS + uint16_t GFX_Guardband_Freq[8]; // MHz [unsigned] + int16_t GFX_Guardband_Voltage_Cold[8]; // mV [signed] + int16_t GFX_Guardband_Voltage_Mid[8]; // mV [signed] + int16_t GFX_Guardband_Voltage_Hot[8]; // mV [signed] + + uint16_t SOC_Guardband_Freq[8]; // MHz [unsigned] + int16_t SOC_Guardband_Voltage_Cold[8]; // mV [signed] + int16_t SOC_Guardband_Voltage_Mid[8]; // mV [signed] + int16_t SOC_Guardband_Voltage_Hot[8]; // mV [signed] + + // VDDCR_GFX BTC + uint16_t DcBtcEnabled; + int16_t DcBtcMin; // mV [signed] + int16_t DcBtcMax; // mV [signed] + int16_t DcBtcGb; // mV [signed] + + // SECTION: XGMI + uint8_t XgmiLinkSpeed[NUM_XGMI_DPM_LEVELS]; //Gbps [EX: 32 = 32Gbps] + uint8_t XgmiLinkWidth[NUM_XGMI_DPM_LEVELS]; //Width [EX: 16 = x16] + uint8_t XgmiStartupLevel; + uint8_t spare12[3]; + + // GFX Vmin + uint16_t GFX_PPVmin_Enabled; + uint16_t GFX_Vmin_Plat_Offset_Hot; // mV + uint16_t GFX_Vmin_Plat_Offset_Cold; // mV + uint16_t GFX_Vmin_Hot_T0; // mV + uint16_t GFX_Vmin_Cold_T0; // mV + uint16_t GFX_Vmin_Hot_Eol; // mV + uint16_t GFX_Vmin_Cold_Eol; // mV + uint16_t GFX_Vmin_Aging_Offset; // mV + uint16_t GFX_Vmin_Temperature_Hot; // 'C + uint16_t GFX_Vmin_Temperature_Cold; // 'C + + // SOC Vmin + uint16_t SOC_PPVmin_Enabled; + uint16_t SOC_Vmin_Plat_Offset_Hot; // mV + uint16_t SOC_Vmin_Plat_Offset_Cold; // mV + uint16_t SOC_Vmin_Hot_T0; // mV + uint16_t SOC_Vmin_Cold_T0; // mV + uint16_t SOC_Vmin_Hot_Eol; // mV + uint16_t SOC_Vmin_Cold_Eol; // mV + uint16_t SOC_Vmin_Aging_Offset; // mV + uint16_t SOC_Vmin_Temperature_Hot; // 'C + uint16_t SOC_Vmin_Temperature_Cold; // 'C + + // APCC Settings + uint32_t ApccPlusResidencyLimit; //PCC residency % (0-100) + + // Determinism + uint16_t DeterminismVoltageOffset; //mV + uint16_t spare22; + + // reserved + uint32_t spare3[14]; + + // SECTION: BOARD PARAMETERS + // Telemetry Settings + uint16_t GfxMaxCurrent; // in Amps + int8_t GfxOffset; // in Amps + uint8_t Padding_TelemetryGfx; + + uint16_t SocMaxCurrent; // in Amps + int8_t SocOffset; // in Amps + uint8_t Padding_TelemetrySoc; + + uint16_t MemMaxCurrent; // in Amps + int8_t MemOffset; // in Amps + uint8_t Padding_TelemetryMem; + + uint16_t BoardMaxCurrent; // in Amps + int8_t BoardOffset; // in Amps + uint8_t Padding_TelemetryBoardInput; + + // Platform input telemetry voltage coefficient + uint32_t BoardVoltageCoeffA; // decode by /1000 + uint32_t BoardVoltageCoeffB; // decode by /1000 + + // GPIO Settings + uint8_t VR0HotGpio; // GPIO pin configured for VR0 HOT event + uint8_t VR0HotPolarity; // GPIO polarity for VR0 HOT event + uint8_t VR1HotGpio; // GPIO pin configured for VR1 HOT event + uint8_t VR1HotPolarity; // GPIO polarity for VR1 HOT event + + // UCLK Spread Spectrum + uint8_t UclkSpreadEnabled; // on or off + uint8_t UclkSpreadPercent; // Q4.4 + uint16_t UclkSpreadFreq; // kHz + + // FCLK Spread Spectrum + uint8_t FclkSpreadEnabled; // on or off + uint8_t FclkSpreadPercent; // Q4.4 + uint16_t FclkSpreadFreq; // kHz + + // I2C Controller Structure + I2cControllerConfig_t I2cControllers[NUM_I2C_CONTROLLERS]; + + // GPIO pins for I2C communications with 2nd controller for Input Telemetry Sequence + uint8_t GpioI2cScl; // Serial Clock + uint8_t GpioI2cSda; // Serial Data + uint16_t spare5; + + uint16_t XgmiMaxCurrent; // in Amps + int8_t XgmiOffset; // in Amps + uint8_t Padding_TelemetryXgmi; + + uint16_t EdcPowerLimit; + uint16_t spare6; + + //reserved + uint32_t reserved[14]; + +} PPTable_t; + +typedef struct { + // Time constant parameters for clock averages in ms + uint16_t GfxclkAverageLpfTau; + uint16_t SocclkAverageLpfTau; + uint16_t UclkAverageLpfTau; + uint16_t GfxActivityLpfTau; + uint16_t UclkActivityLpfTau; + + uint16_t SocketPowerLpfTau; + + uint32_t Spare[8]; + // Padding - ignore + uint32_t MmHubPadding[8]; // SMU internal use +} DriverSmuConfig_t; + +typedef struct { + uint16_t CurrClock[PPCLK_COUNT]; + uint16_t Padding1 ; + uint16_t AverageGfxclkFrequency; + uint16_t AverageSocclkFrequency; + uint16_t AverageUclkFrequency ; + uint16_t AverageGfxActivity ; + uint16_t AverageUclkActivity ; + uint8_t CurrSocVoltageOffset ; + uint8_t CurrGfxVoltageOffset ; + uint8_t CurrMemVidOffset ; + uint8_t Padding8 ; + uint16_t AverageSocketPower ; + uint16_t TemperatureEdge ; + uint16_t TemperatureHotspot ; + uint16_t TemperatureHBM ; // Max + uint16_t TemperatureVrGfx ; + uint16_t TemperatureVrSoc ; + uint16_t TemperatureVrMem ; + uint32_t ThrottlerStatus ; + + uint32_t PublicSerialNumLower32; + uint32_t PublicSerialNumUpper32; + uint16_t TemperatureAllHBM[4] ; + uint32_t GfxBusyAcc ; + uint32_t DramBusyAcc ; + uint32_t EnergyAcc64bitLow ; //15.259uJ resolution + uint32_t EnergyAcc64bitHigh ; + uint32_t TimeStampLow ; //10ns resolution + uint32_t TimeStampHigh ; + + // Padding - ignore + uint32_t MmHubPadding[8]; // SMU internal use +} SmuMetrics_t; + + +typedef struct { + uint16_t avgPsmCount[76]; + uint16_t minPsmCount[76]; + float avgPsmVoltage[76]; + float minPsmVoltage[76]; + + uint32_t MmHubPadding[8]; // SMU internal use +} AvfsDebugTable_t; + +typedef struct { + uint64_t mca_umc_status; + uint64_t mca_umc_addr; + uint16_t ce_count_lo_chip; + uint16_t ce_count_hi_chip; + + uint32_t eccPadding; +} EccInfo_t; + +typedef struct { + EccInfo_t EccInfo[ALDEBARAN_UMC_CHANNEL_NUM]; +} EccInfoTable_t; + +// These defines are used with the following messages: +// SMC_MSG_TransferTableDram2Smu +// SMC_MSG_TransferTableSmu2Dram +#define TABLE_PPTABLE 0 +#define TABLE_AVFS_PSM_DEBUG 1 +#define TABLE_AVFS_FUSE_OVERRIDE 2 +#define TABLE_PMSTATUSLOG 3 +#define TABLE_SMU_METRICS 4 +#define TABLE_DRIVER_SMU_CONFIG 5 +#define TABLE_I2C_COMMANDS 6 +#define TABLE_ECCINFO 7 +#define TABLE_COUNT 8 + +#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_yellow_carp.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_yellow_carp.h new file mode 100644 index 000000000000..25540cb28208 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_yellow_carp.h @@ -0,0 +1,222 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __SMU13_DRIVER_IF_YELLOW_CARP_H__ +#define __SMU13_DRIVER_IF_YELLOW_CARP_H__ + +// *** IMPORTANT *** +// SMU TEAM: Always increment the interface version if +// any structure is changed in this file +#define SMU13_DRIVER_IF_VERSION 4 + +typedef struct { + int32_t value; + uint32_t numFractionalBits; +} FloatInIntFormat_t; + +typedef enum { + DSPCLK_DCFCLK = 0, + DSPCLK_DISPCLK, + DSPCLK_PIXCLK, + DSPCLK_PHYCLK, + DSPCLK_COUNT, +} DSPCLK_e; + +typedef struct { + uint16_t Freq; // in MHz + uint16_t Vid; // min voltage in SVI3 VID +} DisplayClockTable_t; + +typedef struct { + uint16_t MinClock; // This is either DCFCLK or SOCCLK (in MHz) + uint16_t MaxClock; // This is either DCFCLK or SOCCLK (in MHz) + uint16_t MinMclk; + uint16_t MaxMclk; + + uint8_t WmSetting; + uint8_t WmType; // Used for normal pstate change or memory retraining + uint8_t Padding[2]; +} WatermarkRowGeneric_t; + +#define NUM_WM_RANGES 4 +#define WM_PSTATE_CHG 0 +#define WM_RETRAINING 1 + +typedef enum { + WM_SOCCLK = 0, + WM_DCFCLK, + WM_COUNT, +} WM_CLOCK_e; + +typedef struct { + // Watermarks + WatermarkRowGeneric_t WatermarkRow[WM_COUNT][NUM_WM_RANGES]; + + uint32_t MmHubPadding[7]; // SMU internal use +} Watermarks_t; + +typedef enum { + CUSTOM_DPM_SETTING_GFXCLK, + CUSTOM_DPM_SETTING_CCLK, + CUSTOM_DPM_SETTING_FCLK_CCX, + CUSTOM_DPM_SETTING_FCLK_GFX, + CUSTOM_DPM_SETTING_FCLK_STALLS, + CUSTOM_DPM_SETTING_LCLK, + CUSTOM_DPM_SETTING_COUNT, +} CUSTOM_DPM_SETTING_e; + +typedef struct { + uint8_t ActiveHystLimit; + uint8_t IdleHystLimit; + uint8_t FPS; + uint8_t MinActiveFreqType; + FloatInIntFormat_t MinActiveFreq; + FloatInIntFormat_t PD_Data_limit; + FloatInIntFormat_t PD_Data_time_constant; + FloatInIntFormat_t PD_Data_error_coeff; + FloatInIntFormat_t PD_Data_error_rate_coeff; +} DpmActivityMonitorCoeffExt_t; + +typedef struct { + DpmActivityMonitorCoeffExt_t DpmActivityMonitorCoeff[CUSTOM_DPM_SETTING_COUNT]; +} CustomDpmSettings_t; + +#define NUM_DCFCLK_DPM_LEVELS 8 +#define NUM_DISPCLK_DPM_LEVELS 8 +#define NUM_DPPCLK_DPM_LEVELS 8 +#define NUM_SOCCLK_DPM_LEVELS 8 +#define NUM_VCN_DPM_LEVELS 8 +#define NUM_SOC_VOLTAGE_LEVELS 8 +#define NUM_DF_PSTATE_LEVELS 4 + +typedef struct { + uint32_t FClk; + uint32_t MemClk; + uint32_t Voltage; + uint8_t WckRatio; + uint8_t Spare[3]; +} DfPstateTable_t; + +//Freq in MHz +//Voltage in milli volts with 2 fractional bits +typedef struct { + uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS]; + uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS]; + uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS]; + uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS]; + uint32_t VClocks[NUM_VCN_DPM_LEVELS]; + uint32_t DClocks[NUM_VCN_DPM_LEVELS]; + uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS]; + DfPstateTable_t DfPstateTable[NUM_DF_PSTATE_LEVELS]; + + uint8_t NumDcfClkLevelsEnabled; + uint8_t NumDispClkLevelsEnabled; //Applies to both Dispclk and Dppclk + uint8_t NumSocClkLevelsEnabled; + uint8_t VcnClkLevelsEnabled; //Applies to both Vclk and Dclk + uint8_t NumDfPstatesEnabled; + uint8_t spare[3]; + + uint32_t MinGfxClk; + uint32_t MaxGfxClk; +} DpmClocks_t; + + +// Throttler Status Bitmask +#define THROTTLER_STATUS_BIT_SPL 0 +#define THROTTLER_STATUS_BIT_FPPT 1 +#define THROTTLER_STATUS_BIT_SPPT 2 +#define THROTTLER_STATUS_BIT_SPPT_APU 3 +#define THROTTLER_STATUS_BIT_THM_CORE 4 +#define THROTTLER_STATUS_BIT_THM_GFX 5 +#define THROTTLER_STATUS_BIT_THM_SOC 6 +#define THROTTLER_STATUS_BIT_TDC_VDD 7 +#define THROTTLER_STATUS_BIT_TDC_SOC 8 +#define THROTTLER_STATUS_BIT_PROCHOT_CPU 9 +#define THROTTLER_STATUS_BIT_PROCHOT_GFX 10 +#define THROTTLER_STATUS_BIT_EDC_CPU 11 +#define THROTTLER_STATUS_BIT_EDC_GFX 12 + +typedef struct { + uint16_t GfxclkFrequency; //[MHz] + uint16_t SocclkFrequency; //[MHz] + uint16_t VclkFrequency; //[MHz] + uint16_t DclkFrequency; //[MHz] + uint16_t MemclkFrequency; //[MHz] + uint16_t spare; + + uint16_t GfxActivity; //[centi] + uint16_t UvdActivity; //[centi] + + uint16_t Voltage[2]; //[mV] indices: VDDCR_VDD, VDDCR_SOC + uint16_t Current[2]; //[mA] indices: VDDCR_VDD, VDDCR_SOC + uint16_t Power[2]; //[mW] indices: VDDCR_VDD, VDDCR_SOC + + //3rd party tools in Windows need this info in the case of APUs + uint16_t CoreFrequency[8]; //[MHz] + uint16_t CorePower[8]; //[mW] + uint16_t CoreTemperature[8]; //[centi-Celsius] + uint16_t L3Frequency; //[MHz] + uint16_t L3Temperature; //[centi-Celsius] + + uint16_t GfxTemperature; //[centi-Celsius] + uint16_t SocTemperature; //[centi-Celsius] + uint16_t ThrottlerStatus; + + uint16_t CurrentSocketPower; //[mW] + uint16_t StapmOpnLimit; //[W] + uint16_t StapmCurrentLimit; //[W] + uint32_t ApuPower; //[mW] + uint32_t dGpuPower; //[mW] + + uint16_t VddTdcValue; //[mA] + uint16_t SocTdcValue; //[mA] + uint16_t VddEdcValue; //[mA] + uint16_t SocEdcValue; //[mA] + + uint16_t InfrastructureCpuMaxFreq; //[MHz] + uint16_t InfrastructureGfxMaxFreq; //[MHz] + + uint16_t SkinTemp; + uint16_t DeviceState; +} SmuMetrics_t; + + +// Workload bits +#define WORKLOAD_PPLIB_FULL_SCREEN_3D_BIT 0 +#define WORKLOAD_PPLIB_VIDEO_BIT 2 +#define WORKLOAD_PPLIB_VR_BIT 3 +#define WORKLOAD_PPLIB_COMPUTE_BIT 4 +#define WORKLOAD_PPLIB_CUSTOM_BIT 5 +#define WORKLOAD_PPLIB_COUNT 6 + +#define TABLE_BIOS_IF 0 // Called by BIOS +#define TABLE_WATERMARKS 1 // Called by DAL through VBIOS +#define TABLE_CUSTOM_DPM 2 // Called by Driver +#define TABLE_SPARE1 3 +#define TABLE_DPMCLOCKS 4 // Called by Driver and VBIOS +#define TABLE_MOMENTARY_PM 5 // Called by Tools +#define TABLE_MODERN_STDBY 6 // Called by Tools for Modern Standby Log +#define TABLE_SMU_METRICS 7 // Called by Driver +#define TABLE_INFRASTRUCTURE_LIMITS 8 +#define TABLE_COUNT 9 + +#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_0_7_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_0_7_ppsmc.h new file mode 100644 index 000000000000..d2e10a724560 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_0_7_ppsmc.h @@ -0,0 +1,143 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef SMU_V11_0_7_PPSMC_H +#define SMU_V11_0_7_PPSMC_H + +#define PPSMC_VERSION 0x1 + +// SMU Response Codes: +#define PPSMC_Result_OK 0x1 +#define PPSMC_Result_Failed 0xFF +#define PPSMC_Result_UnknownCmd 0xFE +#define PPSMC_Result_CmdRejectedPrereq 0xFD +#define PPSMC_Result_CmdRejectedBusy 0xFC + +// Message Definitions: +// BASIC +#define PPSMC_MSG_TestMessage 0x1 +#define PPSMC_MSG_GetSmuVersion 0x2 +#define PPSMC_MSG_GetDriverIfVersion 0x3 +#define PPSMC_MSG_SetAllowedFeaturesMaskLow 0x4 +#define PPSMC_MSG_SetAllowedFeaturesMaskHigh 0x5 +#define PPSMC_MSG_EnableAllSmuFeatures 0x6 +#define PPSMC_MSG_DisableAllSmuFeatures 0x7 +#define PPSMC_MSG_EnableSmuFeaturesLow 0x8 +#define PPSMC_MSG_EnableSmuFeaturesHigh 0x9 +#define PPSMC_MSG_DisableSmuFeaturesLow 0xA +#define PPSMC_MSG_DisableSmuFeaturesHigh 0xB +#define PPSMC_MSG_GetRunningSmuFeaturesLow 0xC +#define PPSMC_MSG_GetRunningSmuFeaturesHigh 0xD +#define PPSMC_MSG_SetDriverDramAddrHigh 0xE +#define PPSMC_MSG_SetDriverDramAddrLow 0xF +#define PPSMC_MSG_SetToolsDramAddrHigh 0x10 +#define PPSMC_MSG_SetToolsDramAddrLow 0x11 +#define PPSMC_MSG_TransferTableSmu2Dram 0x12 +#define PPSMC_MSG_TransferTableDram2Smu 0x13 +#define PPSMC_MSG_UseDefaultPPTable 0x14 + +//BACO/BAMACO/BOMACO +#define PPSMC_MSG_EnterBaco 0x15 +#define PPSMC_MSG_ExitBaco 0x16 +#define PPSMC_MSG_ArmD3 0x17 +#define PPSMC_MSG_BacoAudioD3PME 0x18 + +//DPM +#define PPSMC_MSG_SetSoftMinByFreq 0x19 +#define PPSMC_MSG_SetSoftMaxByFreq 0x1A +#define PPSMC_MSG_SetHardMinByFreq 0x1B +#define PPSMC_MSG_SetHardMaxByFreq 0x1C +#define PPSMC_MSG_GetMinDpmFreq 0x1D +#define PPSMC_MSG_GetMaxDpmFreq 0x1E +#define PPSMC_MSG_GetDpmFreqByIndex 0x1F +#define PPSMC_MSG_OverridePcieParameters 0x20 + +//DramLog Set DramAddrHigh +#define PPSMC_MSG_DramLogSetDramAddrHigh 0x21 + +#define PPSMC_MSG_SetWorkloadMask 0x22 +#define PPSMC_MSG_SetUclkFastSwitch 0x23 +#define PPSMC_MSG_GetVoltageByDpm 0x24 +#define PPSMC_MSG_SetVideoFps 0x25 +#define PPSMC_MSG_GetDcModeMaxDpmFreq 0x26 + +//DramLog Set DramAddrLow +#define PPSMC_MSG_DramLogSetDramAddrLow 0x27 + +//Power Gating +#define PPSMC_MSG_AllowGfxOff 0x28 +#define PPSMC_MSG_DisallowGfxOff 0x29 +#define PPSMC_MSG_PowerUpVcn 0x2A +#define PPSMC_MSG_PowerDownVcn 0x2B +#define PPSMC_MSG_PowerUpJpeg 0x2C +#define PPSMC_MSG_PowerDownJpeg 0x2D + +//Resets +#define PPSMC_MSG_PrepareMp1ForUnload 0x2E + +//DramLog Set DramLog SetDramSize +#define PPSMC_MSG_DramLogSetDramSize 0x2F + +#define PPSMC_MSG_Mode1Reset 0x30 + +//Set SystemVirtual DramAddrHigh +#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x31 + +//ACDC Power Source +#define PPSMC_MSG_SetPptLimit 0x32 +#define PPSMC_MSG_GetPptLimit 0x33 +#define PPSMC_MSG_ReenableAcDcInterrupt 0x34 +#define PPSMC_MSG_NotifyPowerSource 0x35 + +//BTC +#define PPSMC_MSG_RunDcBtc 0x36 + +//Set SystemVirtual DramAddrLow +#define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x38 + +//Others +#define PPSMC_MSG_SetMemoryChannelEnable 0x39 +#define PPSMC_MSG_SetDramBitWidth 0x3A +#define PPSMC_MSG_SetGeminiMode 0x3B +#define PPSMC_MSG_SetGeminiApertureHigh 0x3C +#define PPSMC_MSG_SetGeminiApertureLow 0x3D + +#define PPSMC_MSG_SetTemperatureInputSelect 0x3E +#define PPSMC_MSG_SetFwDstatesMask 0x3F +#define PPSMC_MSG_SetThrottlerMask 0x40 + +#define PPSMC_MSG_SetExternalClientDfCstateAllow 0x41 +#define PPSMC_MSG_EnableOutOfBandMonTesting 0x42 +#define PPSMC_MSG_SetMGpuFanBoostLimitRpm 0x43 + +#define PPSMC_MSG_SetNumBadHbmPagesRetired 0x44 +#define PPSMC_MSG_SetGpoFeaturePMask 0x45 +#define PPSMC_MSG_SetSMBUSInterrupt 0x46 + +#define PPSMC_MSG_DisallowGpo 0x56 + +#define PPSMC_MSG_Enable2ndUSB20Port 0x57 + +#define PPSMC_Message_Count 0x58 + +#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_0_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_0_ppsmc.h new file mode 100644 index 000000000000..26181b679098 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_0_ppsmc.h @@ -0,0 +1,141 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef SMU_V11_0_PPSMC_H +#define SMU_V11_0_PPSMC_H + +// SMU Response Codes: +#define PPSMC_Result_OK 0x1 +#define PPSMC_Result_Failed 0xFF +#define PPSMC_Result_UnknownCmd 0xFE +#define PPSMC_Result_CmdRejectedPrereq 0xFD +#define PPSMC_Result_CmdRejectedBusy 0xFC + +// Message Definitions: +// BASIC +#define PPSMC_MSG_TestMessage 0x1 +#define PPSMC_MSG_GetSmuVersion 0x2 +#define PPSMC_MSG_GetDriverIfVersion 0x3 +#define PPSMC_MSG_SetAllowedFeaturesMaskLow 0x4 +#define PPSMC_MSG_SetAllowedFeaturesMaskHigh 0x5 +#define PPSMC_MSG_EnableAllSmuFeatures 0x6 +#define PPSMC_MSG_DisableAllSmuFeatures 0x7 +#define PPSMC_MSG_EnableSmuFeaturesLow 0x8 +#define PPSMC_MSG_EnableSmuFeaturesHigh 0x9 +#define PPSMC_MSG_DisableSmuFeaturesLow 0xA +#define PPSMC_MSG_DisableSmuFeaturesHigh 0xB +#define PPSMC_MSG_GetEnabledSmuFeaturesLow 0xC +#define PPSMC_MSG_GetEnabledSmuFeaturesHigh 0xD +#define PPSMC_MSG_SetDriverDramAddrHigh 0xE +#define PPSMC_MSG_SetDriverDramAddrLow 0xF +#define PPSMC_MSG_SetToolsDramAddrHigh 0x10 +#define PPSMC_MSG_SetToolsDramAddrLow 0x11 +#define PPSMC_MSG_TransferTableSmu2Dram 0x12 +#define PPSMC_MSG_TransferTableDram2Smu 0x13 +#define PPSMC_MSG_UseDefaultPPTable 0x14 +#define PPSMC_MSG_UseBackupPPTable 0x15 +#define PPSMC_MSG_SetSystemVirtualDramAddrHigh 0x16 +#define PPSMC_MSG_SetSystemVirtualDramAddrLow 0x17 + +//BACO/BAMACO/BOMACO +#define PPSMC_MSG_EnterBaco 0x18 +#define PPSMC_MSG_ExitBaco 0x19 +#define PPSMC_MSG_ArmD3 0x46 + +//DPM +#define PPSMC_MSG_SetSoftMinByFreq 0x1A +#define PPSMC_MSG_SetSoftMaxByFreq 0x1B +#define PPSMC_MSG_SetHardMinByFreq 0x1C +#define PPSMC_MSG_SetHardMaxByFreq 0x1D +#define PPSMC_MSG_GetMinDpmFreq 0x1E +#define PPSMC_MSG_GetMaxDpmFreq 0x1F +#define PPSMC_MSG_GetDpmFreqByIndex 0x20 +#define PPSMC_MSG_OverridePcieParameters 0x21 +#define PPSMC_MSG_SetMinDeepSleepDcefclk 0x22 + +#define PPSMC_MSG_SetWorkloadMask 0x24 +#define PPSMC_MSG_SetUclkFastSwitch 0x25 +#define PPSMC_MSG_GetVoltageByDpm 0x26 +#define PPSMC_MSG_SetVideoFps 0x27 +#define PPSMC_MSG_GetDcModeMaxDpmFreq 0x28 + +//Power Gating +#define PPSMC_MSG_AllowGfxOff 0x29 +#define PPSMC_MSG_DisallowGfxOff 0x2A +#define PPSMC_MSG_PowerUpVcn 0x2B +#define PPSMC_MSG_PowerDownVcn 0x2C +#define PPSMC_MSG_PowerUpJpeg 0x2D +#define PPSMC_MSG_PowerDownJpeg 0x2E +//reserve 0x29 to 0x30 for PG harvesting TBD + +//Resets +#define PPSMC_MSG_PrepareMp1ForUnload 0x32 +#define PPSMC_MSG_PrepareMp1ForReset 0x33 +#define PPSMC_MSG_PrepareMp1ForShutdown 0x34 + +//ACDC Power Source +#define PPSMC_MSG_SetPptLimit 0x35 +#define PPSMC_MSG_GetPptLimit 0x36 +#define PPSMC_MSG_ReenableAcDcInterrupt 0x37 +#define PPSMC_MSG_NotifyPowerSource 0x38 + +//BTC +#define PPSMC_MSG_RunBtc 0x3A + +//Debug +#define PPSMC_MSG_DramLogSetDramAddrHigh 0x3B +#define PPSMC_MSG_DramLogSetDramAddrLow 0x3C +#define PPSMC_MSG_DramLogSetDramSize 0x3D +#define PPSMC_MSG_GetDebugData 0x3E + +//Others +#define PPSMC_MSG_ConfigureGfxDidt 0x3F +#define PPSMC_MSG_NumOfDisplays 0x40 + +#define PPSMC_MSG_SetMemoryChannelConfig 0x41 +#define PPSMC_MSG_SetGeminiMode 0x42 +#define PPSMC_MSG_SetGeminiApertureHigh 0x43 +#define PPSMC_MSG_SetGeminiApertureLow 0x44 + +#define PPSMC_MSG_GetVoltageByDpmOverdrive 0x45 +#define PPSMC_MSG_BacoAudioD3PME 0x48 + +#define PPSMC_MSG_DALDisableDummyPstateChange 0x49 +#define PPSMC_MSG_DALEnableDummyPstateChange 0x4A + +#define PPSMC_MSG_SetMGpuFanBoostLimitRpm 0x4C + +#define PPSMC_MSG_SetDriverDummyTableDramAddrHigh 0x4E +#define PPSMC_MSG_SetDriverDummyTableDramAddrLow 0x4F + +#define PPSMC_MSG_GetUMCFWWA 0x50 + +#define PPSMC_Message_Count 0x51 + +typedef uint32_t PPSMC_Result; +typedef uint32_t PPSMC_Msg; + +//for use with PPSMC_MSG_GetVoltageByDpmOverdrive +#define PPSMC_GET_AVFS_CURVE 0 +#define PPSMC_GET_OVERDRIVE_CURVE 1 +#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_5_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_5_pmfw.h new file mode 100644 index 000000000000..22edd88b8117 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_5_pmfw.h @@ -0,0 +1,123 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __SMU_V11_5_0_PMFW_H__ +#define __SMU_V11_5_0_PMFW_H__ + +#include "smu11_driver_if_vangogh.h" + +#pragma pack(push, 1) + +#define ENABLE_DEBUG_FEATURES + +// Feature Control Defines +#define FEATURE_CCLK_DPM_BIT 0 +#define FEATURE_FAN_CONTROLLER_BIT 1 +#define FEATURE_DATA_CALCULATION_BIT 2 +#define FEATURE_PPT_BIT 3 +#define FEATURE_TDC_BIT 4 +#define FEATURE_THERMAL_BIT 5 +#define FEATURE_FIT_BIT 6 +#define FEATURE_EDC_BIT 7 +#define FEATURE_PLL_POWER_DOWN_BIT 8 +#define FEATURE_ULV_BIT 9 +#define FEATURE_VDDOFF_BIT 10 +#define FEATURE_VCN_DPM_BIT 11 +#define FEATURE_CSTATE_BOOST_BIT 12 +#define FEATURE_FCLK_DPM_BIT 13 +#define FEATURE_SOCCLK_DPM_BIT 14 +#define FEATURE_MP0CLK_DPM_BIT 15 +#define FEATURE_LCLK_DPM_BIT 16 +#define FEATURE_SHUBCLK_DPM_BIT 17 +#define FEATURE_DCFCLK_DPM_BIT 18 +#define FEATURE_GFX_DPM_BIT 19 +#define FEATURE_DS_GFXCLK_BIT 20 +#define FEATURE_DS_SOCCLK_BIT 21 +#define FEATURE_DS_LCLK_BIT 22 +#define FEATURE_DS_DCFCLK_BIT 23 +#define FEATURE_DS_SHUBCLK_BIT 24 +#define FEATURE_GFX_TEMP_VMIN_BIT 25 +#define FEATURE_S0I2_BIT 26 +#define FEATURE_WHISPER_MODE_BIT 27 +#define FEATURE_DS_FCLK_BIT 28 +#define FEATURE_DS_SMNCLK_BIT 29 +#define FEATURE_DS_MP1CLK_BIT 30 +#define FEATURE_DS_MP0CLK_BIT 31 +#define FEATURE_SMU_LOW_POWER_BIT 32 +#define FEATURE_FUSE_PG_BIT 33 +#define FEATURE_GFX_DEM_BIT 34 +#define FEATURE_PSI_BIT 35 +#define FEATURE_PROCHOT_BIT 36 +#define FEATURE_CPUOFF_BIT 37 +#define FEATURE_STAPM_BIT 38 +#define FEATURE_S0I3_BIT 39 +#define FEATURE_DF_CSTATES_BIT 40 +#define FEATURE_PERF_LIMIT_BIT 41 +#define FEATURE_CORE_DLDO_BIT 42 +#define FEATURE_RSMU_LOW_POWER_BIT 43 +#define FEATURE_SMN_LOW_POWER_BIT 44 +#define FEATURE_THM_LOW_POWER_BIT 45 +#define FEATURE_SMUIO_LOW_POWER_BIT 46 +#define FEATURE_MP1_LOW_POWER_BIT 47 +#define FEATURE_DS_VCN_BIT 48 +#define FEATURE_CPPC_BIT 49 +#define FEATURE_OS_CSTATES_BIT 50 +#define FEATURE_ISP_DPM_BIT 51 +#define FEATURE_A55_DPM_BIT 52 +#define FEATURE_CVIP_DSP_DPM_BIT 53 +#define FEATURE_MSMU_LOW_POWER_BIT 54 +#define FEATURE_SOC_VOLTAGE_MON_BIT 55 +#define FEATURE_ATHUB_PG_BIT 56 +#define FEATURE_ECO_DEEPCSTATE_BIT 57 +#define FEATURE_CC6_BIT 58 +#define FEATURE_GFX_EDC_BIT 59 +#define NUM_FEATURES 60 + +typedef struct { + // MP1_EXT_SCRATCH0 + uint32_t DpmHandlerID : 8; + uint32_t ActivityMonitorID : 8; + uint32_t DpmTimerID : 8; + uint32_t DpmHubID : 4; + uint32_t DpmHubTask : 4; + // MP1_EXT_SCRATCH1 + uint32_t GfxStatus : 2; + uint32_t GfxoffStatus : 8; + uint32_t CpuOff : 1; + uint32_t VddOff : 1; + uint32_t InUlv : 1; + uint32_t InS0i2 : 2; + uint32_t InWhisperMode : 1; + uint32_t spare1 : 16; + // MP1_EXT_SCRATCH2 + uint32_t P2JobHandler : 32; + // MP1_EXT_SCRATCH3: used for postcodes + + // MP1_EXT_SCRATCH4:6 are used by Kernel + // MP1_EXT_SCRATCH7: used by HW +} FwStatus_t; + + +#pragma pack(pop) + +#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_5_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_5_ppsmc.h new file mode 100644 index 000000000000..fe130a497d6c --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_5_ppsmc.h @@ -0,0 +1,119 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef SMU_11_5_0_PPSMC_H +#define SMU_11_5_0_PPSMC_H + +// SMU Response Codes: +#define PPSMC_Result_OK 0x1 +#define PPSMC_Result_Failed 0xFF +#define PPSMC_Result_UnknownCmd 0xFE +#define PPSMC_Result_CmdRejectedPrereq 0xFD +#define PPSMC_Result_CmdRejectedBusy 0xFC + +// Message Definitions: +#define PPSMC_MSG_TestMessage 0x1 +#define PPSMC_MSG_GetSmuVersion 0x2 +#define PPSMC_MSG_GetDriverIfVersion 0x3 +#define PPSMC_MSG_EnableGfxOff 0x4 +#define PPSMC_MSG_DisableGfxOff 0x5 +#define PPSMC_MSG_PowerDownIspByTile 0x6 // ISP is power gated by default +#define PPSMC_MSG_PowerUpIspByTile 0x7 +#define PPSMC_MSG_PowerDownVcn 0x8 // VCN is power gated by default +#define PPSMC_MSG_PowerUpVcn 0x9 +#define PPSMC_MSG_RlcPowerNotify 0xA +#define PPSMC_MSG_SetHardMinVcn 0xB // For wireless display +#define PPSMC_MSG_SetSoftMinGfxclk 0xC //Sets SoftMin for GFXCLK. Arg is in MHz +#define PPSMC_MSG_ActiveProcessNotify 0xD +#define PPSMC_MSG_SetHardMinIspiclkByFreq 0xE +#define PPSMC_MSG_SetHardMinIspxclkByFreq 0xF +#define PPSMC_MSG_SetDriverDramAddrHigh 0x10 +#define PPSMC_MSG_SetDriverDramAddrLow 0x11 +#define PPSMC_MSG_TransferTableSmu2Dram 0x12 +#define PPSMC_MSG_TransferTableDram2Smu 0x13 +#define PPSMC_MSG_GfxDeviceDriverReset 0x14 //mode 2 reset during TDR +#define PPSMC_MSG_GetEnabledSmuFeatures 0x15 +#define PPSMC_MSG_spare1 0x16 +#define PPSMC_MSG_SetHardMinSocclkByFreq 0x17 +#define PPSMC_MSG_SetSoftMinFclk 0x18 //Used to be PPSMC_MSG_SetMinVideoFclkFreq +#define PPSMC_MSG_SetSoftMinVcn 0x19 +#define PPSMC_MSG_EnablePostCode 0x1A +#define PPSMC_MSG_GetGfxclkFrequency 0x1B +#define PPSMC_MSG_GetFclkFrequency 0x1C +#define PPSMC_MSG_AllowGfxOff 0x1D +#define PPSMC_MSG_DisallowGfxOff 0x1E +#define PPSMC_MSG_SetSoftMaxGfxClk 0x1F +#define PPSMC_MSG_SetHardMinGfxClk 0x20 +#define PPSMC_MSG_SetSoftMaxSocclkByFreq 0x21 +#define PPSMC_MSG_SetSoftMaxFclkByFreq 0x22 +#define PPSMC_MSG_SetSoftMaxVcn 0x23 +#define PPSMC_MSG_spare2 0x24 +#define PPSMC_MSG_SetPowerLimitPercentage 0x25 +#define PPSMC_MSG_PowerDownJpeg 0x26 +#define PPSMC_MSG_PowerUpJpeg 0x27 +#define PPSMC_MSG_SetHardMinFclkByFreq 0x28 +#define PPSMC_MSG_SetSoftMinSocclkByFreq 0x29 +#define PPSMC_MSG_PowerUpCvip 0x2A +#define PPSMC_MSG_PowerDownCvip 0x2B +#define PPSMC_MSG_GetPptLimit 0x2C +#define PPSMC_MSG_GetThermalLimit 0x2D +#define PPSMC_MSG_GetCurrentTemperature 0x2E +#define PPSMC_MSG_GetCurrentPower 0x2F +#define PPSMC_MSG_GetCurrentVoltage 0x30 +#define PPSMC_MSG_GetCurrentCurrent 0x31 +#define PPSMC_MSG_GetAverageCpuActivity 0x32 +#define PPSMC_MSG_GetAverageGfxActivity 0x33 +#define PPSMC_MSG_GetAveragePower 0x34 +#define PPSMC_MSG_GetAverageTemperature 0x35 +#define PPSMC_MSG_SetAveragePowerTimeConstant 0x36 +#define PPSMC_MSG_SetAverageActivityTimeConstant 0x37 +#define PPSMC_MSG_SetAverageTemperatureTimeConstant 0x38 +#define PPSMC_MSG_SetMitigationEndHysteresis 0x39 +#define PPSMC_MSG_GetCurrentFreq 0x3A +#define PPSMC_MSG_SetReducedPptLimit 0x3B +#define PPSMC_MSG_SetReducedThermalLimit 0x3C +#define PPSMC_MSG_DramLogSetDramAddr 0x3D +#define PPSMC_MSG_StartDramLogging 0x3E +#define PPSMC_MSG_StopDramLogging 0x3F +#define PPSMC_MSG_SetSoftMinCclk 0x40 +#define PPSMC_MSG_SetSoftMaxCclk 0x41 +#define PPSMC_MSG_SetDfPstateActiveLevel 0x42 +#define PPSMC_MSG_SetDfPstateSoftMinLevel 0x43 +#define PPSMC_MSG_SetCclkPolicy 0x44 +#define PPSMC_MSG_DramLogSetDramAddrHigh 0x45 +#define PPSMC_MSG_DramLogSetDramBufferSize 0x46 +#define PPSMC_MSG_RequestActiveWgp 0x47 +#define PPSMC_MSG_QueryActiveWgp 0x48 +#define PPSMC_MSG_SetFastPPTLimit 0x49 +#define PPSMC_MSG_SetSlowPPTLimit 0x4A +#define PPSMC_MSG_GetFastPPTLimit 0x4B +#define PPSMC_MSG_GetSlowPPTLimit 0x4C +#define PPSMC_Message_Count 0x4D + +//Argument for PPSMC_MSG_GfxDeviceDriverReset +enum { + MODE1_RESET = 1, + MODE2_RESET = 2 +}; + +#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_8_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_8_pmfw.h new file mode 100644 index 000000000000..bd4fcb6b9610 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_8_pmfw.h @@ -0,0 +1,152 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __SMU_V11_8_0_PMFW_H__ +#define __SMU_V11_8_0_PMFW_H__ + +#pragma pack(push, 1) + +#define ENABLE_DEBUG_FEATURES + +// Feature Control Defines +#define FEATURE_CCLK_CONTROLLER_BIT 0 +#define FEATURE_GFXCLK_EFFT_FREQ_BIT 1 +#define FEATURE_DATA_CALCULATION_BIT 2 +#define FEATURE_THERMAL_BIT 3 +#define FEATURE_PLL_POWER_DOWN_BIT 4 +#define FEATURE_FCLK_DPM_BIT 5 +#define FEATURE_GFX_DPM_BIT 6 +#define FEATURE_DS_GFXCLK_BIT 7 +#define FEATURE_DS_SOCCLK_BIT 8 +#define FEATURE_DS_LCLK_BIT 9 +#define FEATURE_CORE_CSTATES_BIT 10 +#define FEATURE_G6_SSC_BIT 11 //G6 memory UCLK and UCLK_DIV SS +#define FEATURE_RM_BIT 12 +#define FEATURE_SOC_DPM_BIT 13 +#define FEATURE_DS_SMNCLK_BIT 14 +#define FEATURE_DS_MP1CLK_BIT 15 +#define FEATURE_DS_MP0CLK_BIT 16 +#define FEATURE_MGCG_BIT 17 +#define FEATURE_DS_FUSE_SRAM_BIT 18 +#define FEATURE_GFX_CKS_BIT 19 +#define FEATURE_FP_THROTTLING_BIT 20 +#define FEATURE_PROCHOT_BIT 21 +#define FEATURE_CPUOFF_BIT 22 +#define FEATURE_UMC_THROTTLE_BIT 23 +#define FEATURE_DF_THROTTLE_BIT 24 +#define FEATURE_DS_MP3CLK_BIT 25 +#define FEATURE_DS_SHUBCLK_BIT 26 +#define FEATURE_TDC_BIT 27 //Legacy APM_BIT +#define FEATURE_UMC_CAL_SHARING_BIT 28 +#define FEATURE_DFLL_BTC_CALIBRATION_BIT 29 +#define FEATURE_EDC_BIT 30 +#define FEATURE_DLDO_BIT 31 +#define FEATURE_MEAS_DRAM_BLACKOUT_BIT 32 +#define FEATURE_CC1_BIT 33 +#define FEATURE_PPT_BIT 34 +#define FEATURE_STAPM_BIT 35 +#define FEATURE_CSTATE_BOOST_BIT 36 +#define FEATURE_SPARE_37_BIT 37 +#define FEATURE_SPARE_38_BIT 38 +#define FEATURE_SPARE_39_BIT 39 +#define FEATURE_SPARE_40_BIT 40 +#define FEATURE_SPARE_41_BIT 41 +#define FEATURE_SPARE_42_BIT 42 +#define FEATURE_SPARE_43_BIT 43 +#define FEATURE_SPARE_44_BIT 44 +#define FEATURE_SPARE_45_BIT 45 +#define FEATURE_SPARE_46_BIT 46 +#define FEATURE_SPARE_47_BIT 47 +#define FEATURE_SPARE_48_BIT 48 +#define FEATURE_SPARE_49_BIT 49 +#define FEATURE_SPARE_50_BIT 50 +#define FEATURE_SPARE_51_BIT 51 +#define FEATURE_SPARE_52_BIT 52 +#define FEATURE_SPARE_53_BIT 53 +#define FEATURE_SPARE_54_BIT 54 +#define FEATURE_SPARE_55_BIT 55 +#define FEATURE_SPARE_56_BIT 56 +#define FEATURE_SPARE_57_BIT 57 +#define FEATURE_SPARE_58_BIT 58 +#define FEATURE_SPARE_59_BIT 59 +#define FEATURE_SPARE_60_BIT 60 +#define FEATURE_SPARE_61_BIT 61 +#define FEATURE_SPARE_62_BIT 62 +#define FEATURE_SPARE_63_BIT 63 + +#define NUM_FEATURES 64 + +#define FEATURE_CCLK_CONTROLLER_MASK (1 << FEATURE_CCLK_CONTROLLER_BIT) +#define FEATURE_DATA_CALCULATION_MASK (1 << FEATURE_DATA_CALCULATION_BIT) +#define FEATURE_THERMAL_MASK (1 << FEATURE_THERMAL_BIT) +#define FEATURE_PLL_POWER_DOWN_MASK (1 << FEATURE_PLL_POWER_DOWN_BIT) +#define FEATURE_FCLK_DPM_MASK (1 << FEATURE_FCLK_DPM_BIT) +#define FEATURE_GFX_DPM_MASK (1 << FEATURE_GFX_DPM_BIT) +#define FEATURE_DS_GFXCLK_MASK (1 << FEATURE_DS_GFXCLK_BIT) +#define FEATURE_DS_SOCCLK_MASK (1 << FEATURE_DS_SOCCLK_BIT) +#define FEATURE_DS_LCLK_MASK (1 << FEATURE_DS_LCLK_BIT) +#define FEATURE_RM_MASK (1 << FEATURE_RM_BIT) +#define FEATURE_DS_SMNCLK_MASK (1 << FEATURE_DS_SMNCLK_BIT) +#define FEATURE_DS_MP1CLK_MASK (1 << FEATURE_DS_MP1CLK_BIT) +#define FEATURE_DS_MP0CLK_MASK (1 << FEATURE_DS_MP0CLK_BIT) +#define FEATURE_MGCG_MASK (1 << FEATURE_MGCG_BIT) +#define FEATURE_DS_FUSE_SRAM_MASK (1 << FEATURE_DS_FUSE_SRAM_BIT) +#define FEATURE_PROCHOT_MASK (1 << FEATURE_PROCHOT_BIT) +#define FEATURE_CPUOFF_MASK (1 << FEATURE_CPUOFF_BIT) +#define FEATURE_GFX_CKS_MASK (1 << FEATURE_GFX_CKS_BIT) +#define FEATURE_UMC_THROTTLE_MASK (1 << FEATURE_UMC_THROTTLE_BIT) +#define FEATURE_DF_THROTTLE_MASK (1 << FEATURE_DF_THROTTLE_BIT) +#define FEATURE_SOC_DPM_MASK (1 << FEATURE_SOC_DPM_BIT) + +typedef struct { + // MP1_EXT_SCRATCH0 + uint32_t SPARE1 : 4; + uint32_t SPARE2 : 4; + uint32_t SPARE3 : 4; + uint32_t CurrLevel_LCLK : 4; + uint32_t CurrLevel_MP0CLK : 4; + uint32_t CurrLevel_FCLK : 4; + uint32_t CurrLevel_SOCCLK : 4; + uint32_t CurrLevel_DCEFCLK : 4; + // MP1_EXT_SCRATCH1 + uint32_t SPARE4 : 4; + uint32_t SPARE5 : 4; + uint32_t SPARE6 : 4; + uint32_t TargLevel_LCLK : 4; + uint32_t TargLevel_MP0CLK : 4; + uint32_t TargLevel_FCLK : 4; + uint32_t TargLevel_SOCCLK : 4; + uint32_t TargLevel_DCEFCLK : 4; + // MP1_EXT_SCRATCH2 + uint32_t CurrLevel_SHUBCLK : 4; + uint32_t TargLevel_SHUBCLK : 4; + uint32_t Reserved : 24; + // MP1_EXT_SCRATCH3-4 + uint32_t Reserved2[2]; + // MP1_EXT_SCRATCH5 + uint32_t FeatureStatus[NUM_FEATURES / 32]; +} FwStatus_t; + +#pragma pack(pop) + +#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_8_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_8_ppsmc.h new file mode 100644 index 000000000000..909a86aa60f3 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_8_ppsmc.h @@ -0,0 +1,77 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef SMU_11_8_0_PPSMC_H +#define SMU_11_8_0_PPSMC_H + +// SMU Response Codes: +#define PPSMC_Result_OK 0x1 +#define PPSMC_Result_Failed 0xFF +#define PPSMC_Result_UnknownCmd 0xFE +#define PPSMC_Result_CmdRejectedPrereq 0xFD +#define PPSMC_Result_CmdRejectedBusy 0xFC + +// Message Definitions: +#define PPSMC_MSG_TestMessage 0x1 +#define PPSMC_MSG_GetSmuVersion 0x2 +#define PPSMC_MSG_GetDriverIfVersion 0x3 +#define PPSMC_MSG_SetDriverTableDramAddrHigh 0x4 +#define PPSMC_MSG_SetDriverTableDramAddrLow 0x5 +#define PPSMC_MSG_TransferTableSmu2Dram 0x6 +#define PPSMC_MSG_TransferTableDram2Smu 0x7 +#define PPSMC_MSG_Rsvd1 0xA +#define PPSMC_MSG_RequestCorePstate 0xB +#define PPSMC_MSG_QueryCorePstate 0xC +#define PPSMC_MSG_Rsvd2 0xD +#define PPSMC_MSG_RequestGfxclk 0xE +#define PPSMC_MSG_QueryGfxclk 0xF +#define PPSMC_MSG_QueryVddcrSocClock 0x11 +#define PPSMC_MSG_QueryDfPstate 0x13 +#define PPSMC_MSG_Rsvd3 0x14 +#define PPSMC_MSG_ConfigureS3PwrOffRegisterAddressHigh 0x16 +#define PPSMC_MSG_ConfigureS3PwrOffRegisterAddressLow 0x17 +#define PPSMC_MSG_RequestActiveWgp 0x18 +#define PPSMC_MSG_SetMinDeepSleepGfxclkFreq 0x19 +#define PPSMC_MSG_SetMaxDeepSleepDfllGfxDiv 0x1A +#define PPSMC_MSG_StartTelemetryReporting 0x1B +#define PPSMC_MSG_StopTelemetryReporting 0x1C +#define PPSMC_MSG_ClearTelemetryMax 0x1D +#define PPSMC_MSG_QueryActiveWgp 0x1E +#define PPSMC_MSG_SetCoreEnableMask 0x2C +#define PPSMC_MSG_InitiateGcRsmuSoftReset 0x2E +#define PPSMC_MSG_GfxCacWeightOperation 0x2F +#define PPSMC_MSG_L3CacWeightOperation 0x30 +#define PPSMC_MSG_PackCoreCacWeight 0x31 +#define PPSMC_MSG_SetDriverTableVMID 0x34 +#define PPSMC_MSG_SetSoftMinCclk 0x35 +#define PPSMC_MSG_SetSoftMaxCclk 0x36 +#define PPSMC_MSG_GetGfxFrequency 0x37 +#define PPSMC_MSG_GetGfxVid 0x38 +#define PPSMC_MSG_ForceGfxFreq 0x39 +#define PPSMC_MSG_UnForceGfxFreq 0x3A +#define PPSMC_MSG_ForceGfxVid 0x3B +#define PPSMC_MSG_UnforceGfxVid 0x3C +#define PPSMC_MSG_GetEnabledSmuFeatures 0x3D +#define PPSMC_Message_Count 0x3E + +#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v12_0_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v12_0_ppsmc.h new file mode 100644 index 000000000000..9ac9f3bd3664 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v12_0_ppsmc.h @@ -0,0 +1,106 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef SMU_12_0_PPSMC_H +#define SMU_12_0_PPSMC_H + +// SMU Response Codes: +#define PPSMC_Result_OK 0x1 +#define PPSMC_Result_Failed 0xFF +#define PPSMC_Result_UnknownCmd 0xFE +#define PPSMC_Result_CmdRejectedPrereq 0xFD +#define PPSMC_Result_CmdRejectedBusy 0xFC + + +// Message Definitions: +#define PPSMC_MSG_TestMessage 0x1 +#define PPSMC_MSG_GetSmuVersion 0x2 +#define PPSMC_MSG_GetDriverIfVersion 0x3 +#define PPSMC_MSG_PowerUpGfx 0x6 +#define PPSMC_MSG_EnableGfxOff 0x7 +#define PPSMC_MSG_DisableGfxOff 0x8 +#define PPSMC_MSG_PowerDownIspByTile 0x9 // ISP is power gated by default +#define PPSMC_MSG_PowerUpIspByTile 0xA +#define PPSMC_MSG_PowerDownVcn 0xB // VCN is power gated by default +#define PPSMC_MSG_PowerUpVcn 0xC +#define PPSMC_MSG_PowerDownSdma 0xD // SDMA is power gated by default +#define PPSMC_MSG_PowerUpSdma 0xE +#define PPSMC_MSG_SetHardMinIspclkByFreq 0xF +#define PPSMC_MSG_SetHardMinVcn 0x10 // For wireless display +#define PPSMC_MSG_spare1 0x11 +#define PPSMC_MSG_spare2 0x12 +#define PPSMC_MSG_SetAllowFclkSwitch 0x13 +#define PPSMC_MSG_SetMinVideoGfxclkFreq 0x14 +#define PPSMC_MSG_ActiveProcessNotify 0x15 +#define PPSMC_MSG_SetCustomPolicy 0x16 +#define PPSMC_MSG_SetVideoFps 0x17 +#define PPSMC_MSG_SetDisplayCount 0x18 // Moved to VBIOS +#define PPSMC_MSG_QueryPowerLimit 0x19 //Driver to look up sustainable clocks for VQ +#define PPSMC_MSG_SetDriverDramAddrHigh 0x1A +#define PPSMC_MSG_SetDriverDramAddrLow 0x1B +#define PPSMC_MSG_TransferTableSmu2Dram 0x1C +#define PPSMC_MSG_TransferTableDram2Smu 0x1D +#define PPSMC_MSG_GfxDeviceDriverReset 0x1E +#define PPSMC_MSG_SetGfxclkOverdriveByFreqVid 0x1F +#define PPSMC_MSG_SetHardMinDcfclkByFreq 0x20 // Moved to VBIOS +#define PPSMC_MSG_SetHardMinSocclkByFreq 0x21 +#define PPSMC_MSG_ControlIgpuATS 0x22 +#define PPSMC_MSG_SetMinVideoFclkFreq 0x23 +#define PPSMC_MSG_SetMinDeepSleepDcfclk 0x24 // Moved to VBIOS +#define PPSMC_MSG_ForcePowerDownGfx 0x25 +#define PPSMC_MSG_SetPhyclkVoltageByFreq 0x26 // Moved to VBIOS +#define PPSMC_MSG_SetDppclkVoltageByFreq 0x27 // Moved to VBIOS and is SetDppclkFreq +#define PPSMC_MSG_SetSoftMinVcn 0x28 +#define PPSMC_MSG_EnablePostCode 0x29 +#define PPSMC_MSG_GetGfxclkFrequency 0x2A +#define PPSMC_MSG_GetFclkFrequency 0x2B +#define PPSMC_MSG_GetMinGfxclkFrequency 0x2C +#define PPSMC_MSG_GetMaxGfxclkFrequency 0x2D +#define PPSMC_MSG_SoftReset 0x2E // Not supported +#define PPSMC_MSG_SetGfxCGPG 0x2F +#define PPSMC_MSG_SetSoftMaxGfxClk 0x30 +#define PPSMC_MSG_SetHardMinGfxClk 0x31 +#define PPSMC_MSG_SetSoftMaxSocclkByFreq 0x32 +#define PPSMC_MSG_SetSoftMaxFclkByFreq 0x33 +#define PPSMC_MSG_SetSoftMaxVcn 0x34 +#define PPSMC_MSG_PowerGateMmHub 0x35 +#define PPSMC_MSG_UpdatePmeRestore 0x36 // Moved to VBIOS +#define PPSMC_MSG_GpuChangeState 0x37 +#define PPSMC_MSG_SetPowerLimitPercentage 0x38 +#define PPSMC_MSG_ForceGfxContentSave 0x39 +#define PPSMC_MSG_EnableTmdp48MHzRefclkPwrDown 0x3A // Moved to VBIOS +#define PPSMC_MSG_PowerDownJpeg 0x3B +#define PPSMC_MSG_PowerUpJpeg 0x3C +#define PPSMC_MSG_PowerGateAtHub 0x3D +#define PPSMC_MSG_SetSoftMinJpeg 0x3E +#define PPSMC_MSG_SetHardMinFclkByFreq 0x3F +#define PPSMC_Message_Count 0x40 + + +//Argument for PPSMC_MSG_GpuChangeState +enum { + eGpuChangeState_D0Entry = 1, + eGpuChangeState_D3Entry, +}; + +#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_1_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_1_pmfw.h new file mode 100644 index 000000000000..c5e26d619bf0 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_1_pmfw.h @@ -0,0 +1,141 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef __SMU_V13_0_1_PMFW_H__ +#define __SMU_V13_0_1_PMFW_H__ + +#include "smu13_driver_if_yellow_carp.h" + +#pragma pack(push, 1) + +#define ENABLE_DEBUG_FEATURES + +// Firmware features +// Feature Control Defines +#define FEATURE_CCLK_DPM_BIT 0 +#define FEATURE_FAN_CONTROLLER_BIT 1 +#define FEATURE_DATA_CALCULATION_BIT 2 +#define FEATURE_PPT_BIT 3 +#define FEATURE_TDC_BIT 4 +#define FEATURE_THERMAL_BIT 5 +#define FEATURE_FIT_BIT 6 +#define FEATURE_EDC_BIT 7 +#define FEATURE_PLL_POWER_DOWN_BIT 8 +#define FEATURE_ULV_BIT 9 +#define FEATURE_VDDOFF_BIT 10 +#define FEATURE_VCN_DPM_BIT 11 +#define FEATURE_CSTATE_BOOST_BIT 12 +#define FEATURE_FCLK_DPM_BIT 13 +#define FEATURE_SOCCLK_DPM_BIT 14 +#define FEATURE_MP0CLK_DPM_BIT 15 +#define FEATURE_LCLK_DPM_BIT 16 +#define FEATURE_SHUBCLK_DPM_BIT 17 +#define FEATURE_DCFCLK_DPM_BIT 18 +#define FEATURE_GFX_DPM_BIT 19 +#define FEATURE_DS_GFXCLK_BIT 20 +#define FEATURE_DS_SOCCLK_BIT 21 +#define FEATURE_DS_LCLK_BIT 22 +#define FEATURE_DS_DCFCLK_BIT 23 +#define FEATURE_DS_SHUBCLK_BIT 24 +#define FEATURE_GFX_TEMP_VMIN_BIT 25 +#define FEATURE_ZSTATES_BIT 26 +#define FEATURE_WHISPER_MODE_BIT 27 +#define FEATURE_DS_FCLK_BIT 28 +#define FEATURE_DS_SMNCLK_BIT 29 +#define FEATURE_DS_MP1CLK_BIT 30 +#define FEATURE_DS_MP0CLK_BIT 31 +#define FEATURE_CLK_LOW_POWER_BIT 32 +#define FEATURE_FUSE_PG_BIT 33 +#define FEATURE_GFX_DEM_BIT 34 +#define FEATURE_PSI_BIT 35 +#define FEATURE_PROCHOT_BIT 36 +#define FEATURE_CPUOFF_BIT 37 +#define FEATURE_STAPM_BIT 38 +#define FEATURE_S0I3_BIT 39 +#define FEATURE_DF_LIGHT_CSTATE 40 // shift the order or DFCstate annd DF light Cstate +#define FEATURE_PERF_LIMIT_BIT 41 +#define FEATURE_CORE_DLDO_BIT 42 +#define FEATURE_RSMU_LOW_POWER_BIT 43 +#define FEATURE_SMN_LOW_POWER_BIT 44 +#define FEATURE_THM_LOW_POWER_BIT 45 +#define FEATURE_SMUIO_LOW_POWER_BIT 46 +#define FEATURE_MP1_LOW_POWER_BIT 47 +#define FEATURE_DS_VCN_BIT 48 +#define FEATURE_CPPC_BIT 49 +#define FEATURE_CPPC_PREFERRED_CORES 50 +#define FEATURE_SMART_SHIFT_BIT 51 +#define FEATURE_DF_CSTATES_BIT 52 +#define FEATURE_MSMU_LOW_POWER_BIT 53 +#define FEATURE_SOC_VOLTAGE_MON_BIT 54 +#define FEATURE_ATHUB_PG_BIT 55 +#define FEATURE_VDDOFF_ECO_BIT 56 +#define FEATURE_ZSTATES_ECO_BIT 57 +#define FEATURE_CC6_BIT 58 +#define FEATURE_DS_UMCCLK_BIT 59 +#define FEATURE_DS_HSPCLK_BIT 60 +#define NUM_FEATURES 61 + +typedef struct { + // MP1_EXT_SCRATCH0 + uint32_t DpmHandlerID : 8; + uint32_t ActivityMonitorID : 8; + uint32_t DpmTimerID : 8; + uint32_t DpmHubID : 4; + uint32_t DpmHubTask : 4; + // MP1_EXT_SCRATCH1 + uint32_t GfxoffStatus : 8; + uint32_t GfxStatus : 2; + uint32_t CpuOff : 2; + uint32_t VddOff : 1; + uint32_t InUlv : 1; + uint32_t InWhisperMode : 1; + uint32_t spare0 : 1; + uint32_t ZstateStatus : 4; + uint32_t spare1 : 4; + uint32_t DstateFun : 4; + uint32_t DstateDev : 4; + // MP1_EXT_SCRATCH2 + uint32_t P2JobHandler :24; + uint32_t RsmuPmiP2FinishedCnt : 8; + // MP1_EXT_SCRATCH3 + uint32_t PostCode :32; + // MP1_EXT_SCRATCH4 + uint32_t MsgPortBusy :15; + uint32_t RsmuPmiP1Pending : 1; + uint32_t DfCstateExitPending : 1; + uint32_t Pc6EntryPending : 1; + uint32_t Pc6ExitPending : 1; + uint32_t WarmResetPending : 1; + uint32_t Mp0ClkPending : 1; + uint32_t spare2 : 3; + uint32_t RsmuPmiP2PendingCnt : 8; + // MP1_EXT_SCRATCH5 + uint32_t IdleMask :32; + // MP1_EXT_SCRATCH6 = RTOS threads' status + // MP1_EXT_SCRATCH7 = RTOS Current Job +} FwStatus_t; + + +#pragma pack(pop) + +#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_1_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_1_ppsmc.h new file mode 100644 index 000000000000..fc9198846e70 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_1_ppsmc.h @@ -0,0 +1,97 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + +#ifndef SMU_13_0_1_PPSMC_H +#define SMU_13_0_1_PPSMC_H + +/** @def PPS_PMFW_IF_VER +* PPS (PPLib) to PMFW IF version 1.0 +*/ +#define PPS_PMFW_IF_VER "1.0" ///< Major.Minor + +/** @defgroup ResponseCodes PMFW Response Codes: +* @{ +*/ +#define PPSMC_Result_OK 0x1 ///< Message Response OK +#define PPSMC_Result_Failed 0xFF ///< Message Response Failed +#define PPSMC_Result_UnknownCmd 0xFE ///< Message Response Unknown Command +#define PPSMC_Result_CmdRejectedPrereq 0xFD ///< Message Response Command Failed Prerequisite +#define PPSMC_Result_CmdRejectedBusy 0xFC ///< Message Response Command Rejected due to PMFW is busy. Sender should retry sending this message +/** @}*/ + +/** @defgroup definitions Message definitions +* @{ +*/ +#define PPSMC_MSG_TestMessage 0x01 ///< To check if PMFW is alive and responding. Requirement specified by PMFW team +#define PPSMC_MSG_GetSmuVersion 0x02 ///< Get PMFW version +#define PPSMC_MSG_GetDriverIfVersion 0x03 ///< Get PMFW_DRIVER_IF version +#define PPSMC_MSG_EnableGfxOff 0x04 ///< Enable GFXOFF +#define PPSMC_MSG_DisableGfxOff 0x05 ///< Disable GFXOFF +#define PPSMC_MSG_PowerDownVcn 0x06 ///< Power down VCN +#define PPSMC_MSG_PowerUpVcn 0x07 ///< Power up VCN; VCN is power gated by default +#define PPSMC_MSG_SetHardMinVcn 0x08 ///< For wireless display +#define PPSMC_MSG_SetSoftMinGfxclk 0x09 ///< Set SoftMin for GFXCLK, argument is frequency in MHz +#define PPSMC_MSG_ActiveProcessNotify 0x0A ///< Deprecated (Not to be used) +#define PPSMC_MSG_ForcePowerDownGfx 0x0B ///< Force power down GFX, i.e. enter GFXOFF +#define PPSMC_MSG_PrepareMp1ForUnload 0x0C ///< Prepare PMFW for GFX driver unload +#define PPSMC_MSG_SetDriverDramAddrHigh 0x0D ///< Set high 32 bits of DRAM address for Driver table transfer +#define PPSMC_MSG_SetDriverDramAddrLow 0x0E ///< Set low 32 bits of DRAM address for Driver table transfer +#define PPSMC_MSG_TransferTableSmu2Dram 0x0F ///< Transfer driver interface table from PMFW SRAM to DRAM +#define PPSMC_MSG_TransferTableDram2Smu 0x10 ///< Transfer driver interface table from DRAM to PMFW SRAM +#define PPSMC_MSG_GfxDeviceDriverReset 0x11 ///< Request GFX mode 2 reset +#define PPSMC_MSG_GetEnabledSmuFeatures 0x12 ///< Get enabled features in PMFW +#define PPSMC_MSG_SetHardMinSocclkByFreq 0x13 ///< Set hard min for SOC CLK +#define PPSMC_MSG_SetSoftMinFclk 0x14 ///< Set hard min for FCLK +#define PPSMC_MSG_SetSoftMinVcn 0x15 ///< Set soft min for VCN clocks (VCLK and DCLK) +#define PPSMC_MSG_SPARE 0x16 ///< Spare +#define PPSMC_MSG_GetGfxclkFrequency 0x17 ///< Get GFX clock frequency +#define PPSMC_MSG_GetFclkFrequency 0x18 ///< Get FCLK frequency +#define PPSMC_MSG_AllowGfxOff 0x19 ///< Inform PMFW of allowing GFXOFF entry +#define PPSMC_MSG_DisallowGfxOff 0x1A ///< Inform PMFW of disallowing GFXOFF entry +#define PPSMC_MSG_SetSoftMaxGfxClk 0x1B ///< Set soft max for GFX CLK +#define PPSMC_MSG_SetHardMinGfxClk 0x1C ///< Set hard min for GFX CLK +#define PPSMC_MSG_SetSoftMaxSocclkByFreq 0x1D ///< Set soft max for SOC CLK +#define PPSMC_MSG_SetSoftMaxFclkByFreq 0x1E ///< Set soft max for FCLK +#define PPSMC_MSG_SetSoftMaxVcn 0x1F ///< Set soft max for VCN clocks (VCLK and DCLK) +#define PPSMC_MSG_SetPowerLimitPercentage 0x20 ///< Set power limit percentage +#define PPSMC_MSG_PowerDownJpeg 0x21 ///< Power down Jpeg +#define PPSMC_MSG_PowerUpJpeg 0x22 ///< Power up Jpeg; VCN is power gated by default +#define PPSMC_MSG_SetHardMinFclkByFreq 0x23 ///< Set hard min for FCLK +#define PPSMC_MSG_SetSoftMinSocclkByFreq 0x24 ///< Set soft min for SOC CLK +#define PPSMC_MSG_AllowZstates 0x25 ///< Inform PMFM of allowing Zstate entry, i.e. no Miracast activity +#define PPSMC_MSG_DisallowZstates 0x26 ///< Inform PMFW of disallowing Zstate entry, i.e. there is Miracast activity +#define PPSMC_MSG_RequestActiveWgp 0x27 ///< Request GFX active WGP number +#define PPSMC_MSG_QueryActiveWgp 0x28 ///< Query the anumber of active WGP number +#define PPSMC_Message_Count 0x29 ///< Total number of PPS messages +/** @}*/ + +/** @enum Mode_Reset_e +* Mode reset type, argument for PPSMC_MSG_GfxDeviceDriverReset +*/ +typedef enum { + MODE1_RESET = 1, ///< Mode reset type 1 + MODE2_RESET = 2 ///< Mode reset type 2 +} Mode_Reset_e; +/** @}*/ + +#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_11_0_cdr_table.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_11_0_cdr_table.h new file mode 100644 index 000000000000..beab6d7b28b7 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_11_0_cdr_table.h @@ -0,0 +1,194 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ + + +#ifndef SMU_11_0_CDR_TABLE +#define SMU_11_0_CDR_TABLE + + +#pragma pack(push, 1) + +/// CDR table : PRBS sequence for DQ toggles + +/*static unsigned int NoDbiPrbs7[] = +{ +//256 bytes, 256 byte aligned +0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, +0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, +0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f, +0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff, +}; + + +static unsigned int DbiPrbs7[] = +{ +// 256 bytes, 256 byte aligned +0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff, +0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff, +0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff, +0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff, +}; +*/ + + +//4096 bytes, 256 byte aligned +static unsigned int NoDbiPrbs7[] = +{ + 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, + 0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, + 0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f, + 0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff, + 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, + 0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, + 0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f, + 0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff, + 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, + 0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, + 0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f, + 0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff, + 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, + 0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, + 0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f, + 0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff, + 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, + 0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, + 0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f, + 0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff, + 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, + 0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, + 0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f, + 0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff, + 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, + 0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, + 0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f, + 0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff, + 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, + 0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, + 0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f, + 0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff, + 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, + 0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, + 0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f, + 0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff, + 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, + 0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, + 0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f, + 0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff, + 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, + 0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, + 0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f, + 0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff, + 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, + 0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, + 0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f, + 0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff, + 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, + 0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, + 0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f, + 0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff, + 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, + 0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, + 0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f, + 0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff, + 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, + 0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, + 0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f, + 0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff, + 0x0f0f0f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, + 0x0f0f0f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0x0f0f0f0f, 0x0f0f0f0f, 0xf0f0f0f0, 0xf0f00f0f, + 0x0f0f0f0f, 0xf0f00f0f, 0x0f0ff0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0xf0f00f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0x0f0ff0f0, 0xf0f00f0f, + 0xf0f00f0f, 0x0f0ff0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0ff0f0, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f0f0f0, 0x0f0f0f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f00f0f, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0f0f0, 0xf0f0ffff, +}; + +// 4096 bytes, 256 byte aligned +static unsigned int DbiPrbs7[] = +{ + 0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff, + 0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff, + 0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff, + 0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff, + 0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff, + 0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff, + 0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff, + 0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff, + 0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff, + 0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff, + 0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff, + 0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff, + 0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff, + 0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff, + 0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff, + 0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff, + 0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff, + 0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff, + 0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff, + 0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff, + 0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff, + 0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff, + 0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff, + 0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff, + 0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff, + 0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff, + 0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff, + 0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff, + 0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff, + 0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff, + 0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff, + 0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff, + 0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff, + 0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff, + 0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff, + 0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff, + 0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff, + 0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff, + 0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff, + 0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff, + 0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff, + 0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff, + 0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff, + 0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff, + 0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff, + 0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff, + 0xffffffff, 0xffffffff, 0xffffffff, 0x0000ffff, 0xffffffff, 0xffffffff, 0x00000000, 0xffffffff, 0xffffffff, 0x0000ffff, 0x0000ffff, 0xffffffff, 0x00000000, 0x00000000, 0xffffffff, 0x0000ffff, + 0xffffffff, 0x0000ffff, 0x00000000, 0xffffffff, 0x00000000, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0xffff0000, 0x00000000, 0x00000000, 0xffff0000, 0xffffffff, 0xffffffff, 0x00000000, 0x0000ffff, + 0xffffffff, 0x0000ffff, 0xffff0000, 0xffffffff, 0x00000000, 0xffff0000, 0x0000ffff, 0x0000ffff, 0x00000000, 0xffff0000, 0x00000000, 0x0000ffff, 0x00000000, 0xffffffff, 0xffff0000, 0x0000ffff, + 0x0000ffff, 0xffff0000, 0xffff0000, 0x00000000, 0xffff0000, 0x00000000, 0xffffffff, 0x00000000, 0xffffffff, 0x0000ffff, 0x0000ffff, 0x0000ffff, 0x00000000, 0x00000000, 0x00000000, 0x0000ffff, +}; + +#pragma pack(pop) + +#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h new file mode 100644 index 000000000000..ff8a0bcbd290 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h @@ -0,0 +1,373 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef __SMU_TYPES_H__ +#define __SMU_TYPES_H__ + +#define SMU_MESSAGE_TYPES \ + __SMU_DUMMY_MAP(TestMessage), \ + __SMU_DUMMY_MAP(GetSmuVersion), \ + __SMU_DUMMY_MAP(GetDriverIfVersion), \ + __SMU_DUMMY_MAP(SetAllowedFeaturesMaskLow), \ + __SMU_DUMMY_MAP(SetAllowedFeaturesMaskHigh), \ + __SMU_DUMMY_MAP(EnableAllSmuFeatures), \ + __SMU_DUMMY_MAP(DisableAllSmuFeatures), \ + __SMU_DUMMY_MAP(EnableSmuFeaturesLow), \ + __SMU_DUMMY_MAP(EnableSmuFeaturesHigh), \ + __SMU_DUMMY_MAP(DisableSmuFeaturesLow), \ + __SMU_DUMMY_MAP(DisableSmuFeaturesHigh), \ + __SMU_DUMMY_MAP(GetEnabledSmuFeatures), \ + __SMU_DUMMY_MAP(GetEnabledSmuFeaturesLow), \ + __SMU_DUMMY_MAP(GetEnabledSmuFeaturesHigh), \ + __SMU_DUMMY_MAP(SetWorkloadMask), \ + __SMU_DUMMY_MAP(SetPptLimit), \ + __SMU_DUMMY_MAP(SetDriverDramAddrHigh), \ + __SMU_DUMMY_MAP(SetDriverDramAddrLow), \ + __SMU_DUMMY_MAP(SetToolsDramAddrHigh), \ + __SMU_DUMMY_MAP(SetToolsDramAddrLow), \ + __SMU_DUMMY_MAP(TransferTableSmu2Dram), \ + __SMU_DUMMY_MAP(TransferTableDram2Smu), \ + __SMU_DUMMY_MAP(UseDefaultPPTable), \ + __SMU_DUMMY_MAP(UseBackupPPTable), \ + __SMU_DUMMY_MAP(RunBtc), \ + __SMU_DUMMY_MAP(RequestI2CBus), \ + __SMU_DUMMY_MAP(ReleaseI2CBus), \ + __SMU_DUMMY_MAP(SetFloorSocVoltage), \ + __SMU_DUMMY_MAP(SoftReset), \ + __SMU_DUMMY_MAP(StartBacoMonitor), \ + __SMU_DUMMY_MAP(CancelBacoMonitor), \ + __SMU_DUMMY_MAP(EnterBaco), \ + __SMU_DUMMY_MAP(SetSoftMinByFreq), \ + __SMU_DUMMY_MAP(SetSoftMaxByFreq), \ + __SMU_DUMMY_MAP(SetHardMinByFreq), \ + __SMU_DUMMY_MAP(SetHardMaxByFreq), \ + __SMU_DUMMY_MAP(GetMinDpmFreq), \ + __SMU_DUMMY_MAP(GetMaxDpmFreq), \ + __SMU_DUMMY_MAP(GetDpmFreqByIndex), \ + __SMU_DUMMY_MAP(GetDpmClockFreq), \ + __SMU_DUMMY_MAP(GetSsVoltageByDpm), \ + __SMU_DUMMY_MAP(SetMemoryChannelConfig), \ + __SMU_DUMMY_MAP(SetGeminiMode), \ + __SMU_DUMMY_MAP(SetGeminiApertureHigh), \ + __SMU_DUMMY_MAP(SetGeminiApertureLow), \ + __SMU_DUMMY_MAP(SetMinLinkDpmByIndex), \ + __SMU_DUMMY_MAP(OverridePcieParameters), \ + __SMU_DUMMY_MAP(OverDriveSetPercentage), \ + __SMU_DUMMY_MAP(SetMinDeepSleepDcefclk), \ + __SMU_DUMMY_MAP(ReenableAcDcInterrupt), \ + __SMU_DUMMY_MAP(NotifyPowerSource), \ + __SMU_DUMMY_MAP(SetUclkFastSwitch), \ + __SMU_DUMMY_MAP(SetUclkDownHyst), \ + __SMU_DUMMY_MAP(GfxDeviceDriverReset), \ + __SMU_DUMMY_MAP(GetCurrentRpm), \ + __SMU_DUMMY_MAP(SetVideoFps), \ + __SMU_DUMMY_MAP(SetTjMax), \ + __SMU_DUMMY_MAP(SetFanTemperatureTarget), \ + __SMU_DUMMY_MAP(PrepareMp1ForUnload), \ + __SMU_DUMMY_MAP(DramLogSetDramAddrHigh), \ + __SMU_DUMMY_MAP(DramLogSetDramAddrLow), \ + __SMU_DUMMY_MAP(DramLogSetDramSize), \ + __SMU_DUMMY_MAP(SetFanMaxRpm), \ + __SMU_DUMMY_MAP(SetFanMinPwm), \ + __SMU_DUMMY_MAP(ConfigureGfxDidt), \ + __SMU_DUMMY_MAP(NumOfDisplays), \ + __SMU_DUMMY_MAP(RemoveMargins), \ + __SMU_DUMMY_MAP(ReadSerialNumTop32), \ + __SMU_DUMMY_MAP(ReadSerialNumBottom32), \ + __SMU_DUMMY_MAP(SetSystemVirtualDramAddrHigh), \ + __SMU_DUMMY_MAP(SetSystemVirtualDramAddrLow), \ + __SMU_DUMMY_MAP(WaflTest), \ + __SMU_DUMMY_MAP(SetFclkGfxClkRatio), \ + __SMU_DUMMY_MAP(AllowGfxOff), \ + __SMU_DUMMY_MAP(DisallowGfxOff), \ + __SMU_DUMMY_MAP(GetPptLimit), \ + __SMU_DUMMY_MAP(GetDcModeMaxDpmFreq), \ + __SMU_DUMMY_MAP(GetDebugData), \ + __SMU_DUMMY_MAP(SetXgmiMode), \ + __SMU_DUMMY_MAP(RunAfllBtc), \ + __SMU_DUMMY_MAP(ExitBaco), \ + __SMU_DUMMY_MAP(PrepareMp1ForReset), \ + __SMU_DUMMY_MAP(PrepareMp1ForShutdown), \ + __SMU_DUMMY_MAP(SetMGpuFanBoostLimitRpm), \ + __SMU_DUMMY_MAP(GetAVFSVoltageByDpm), \ + __SMU_DUMMY_MAP(PowerUpVcn), \ + __SMU_DUMMY_MAP(PowerDownVcn), \ + __SMU_DUMMY_MAP(PowerUpJpeg), \ + __SMU_DUMMY_MAP(PowerDownJpeg), \ + __SMU_DUMMY_MAP(BacoAudioD3PME), \ + __SMU_DUMMY_MAP(ArmD3), \ + __SMU_DUMMY_MAP(RunDcBtc), \ + __SMU_DUMMY_MAP(RunGfxDcBtc), \ + __SMU_DUMMY_MAP(RunSocDcBtc), \ + __SMU_DUMMY_MAP(SetMemoryChannelEnable), \ + __SMU_DUMMY_MAP(SetDfSwitchType), \ + __SMU_DUMMY_MAP(GetVoltageByDpm), \ + __SMU_DUMMY_MAP(GetVoltageByDpmOverdrive), \ + __SMU_DUMMY_MAP(PowerUpVcn0), \ + __SMU_DUMMY_MAP(PowerDownVcn0), \ + __SMU_DUMMY_MAP(PowerUpVcn1), \ + __SMU_DUMMY_MAP(PowerDownVcn1), \ + __SMU_DUMMY_MAP(PowerUpGfx), \ + __SMU_DUMMY_MAP(PowerDownIspByTile), \ + __SMU_DUMMY_MAP(PowerUpIspByTile), \ + __SMU_DUMMY_MAP(PowerDownSdma), \ + __SMU_DUMMY_MAP(PowerUpSdma), \ + __SMU_DUMMY_MAP(SetHardMinIspclkByFreq), \ + __SMU_DUMMY_MAP(SetHardMinVcn), \ + __SMU_DUMMY_MAP(SetAllowFclkSwitch), \ + __SMU_DUMMY_MAP(SetMinVideoGfxclkFreq), \ + __SMU_DUMMY_MAP(ActiveProcessNotify), \ + __SMU_DUMMY_MAP(SetCustomPolicy), \ + __SMU_DUMMY_MAP(QueryPowerLimit), \ + __SMU_DUMMY_MAP(SetGfxclkOverdriveByFreqVid), \ + __SMU_DUMMY_MAP(SetHardMinDcfclkByFreq), \ + __SMU_DUMMY_MAP(SetHardMinSocclkByFreq), \ + __SMU_DUMMY_MAP(ControlIgpuATS), \ + __SMU_DUMMY_MAP(SetMinVideoFclkFreq), \ + __SMU_DUMMY_MAP(SetMinDeepSleepDcfclk), \ + __SMU_DUMMY_MAP(ForcePowerDownGfx), \ + __SMU_DUMMY_MAP(SetPhyclkVoltageByFreq), \ + __SMU_DUMMY_MAP(SetDppclkVoltageByFreq), \ + __SMU_DUMMY_MAP(SetSoftMinVcn), \ + __SMU_DUMMY_MAP(EnablePostCode), \ + __SMU_DUMMY_MAP(GetGfxclkFrequency), \ + __SMU_DUMMY_MAP(GetFclkFrequency), \ + __SMU_DUMMY_MAP(GetMinGfxclkFrequency), \ + __SMU_DUMMY_MAP(GetMaxGfxclkFrequency), \ + __SMU_DUMMY_MAP(SetGfxCGPG), \ + __SMU_DUMMY_MAP(SetSoftMaxGfxClk), \ + __SMU_DUMMY_MAP(SetHardMinGfxClk), \ + __SMU_DUMMY_MAP(SetSoftMaxSocclkByFreq), \ + __SMU_DUMMY_MAP(SetSoftMaxFclkByFreq), \ + __SMU_DUMMY_MAP(SetSoftMaxVcn), \ + __SMU_DUMMY_MAP(PowerGateMmHub), \ + __SMU_DUMMY_MAP(UpdatePmeRestore), \ + __SMU_DUMMY_MAP(GpuChangeState), \ + __SMU_DUMMY_MAP(SetPowerLimitPercentage), \ + __SMU_DUMMY_MAP(ForceGfxContentSave), \ + __SMU_DUMMY_MAP(EnableTmdp48MHzRefclkPwrDown),\ + __SMU_DUMMY_MAP(PowerGateAtHub), \ + __SMU_DUMMY_MAP(SetSoftMinJpeg), \ + __SMU_DUMMY_MAP(SetHardMinFclkByFreq), \ + __SMU_DUMMY_MAP(DFCstateControl), \ + __SMU_DUMMY_MAP(GmiPwrDnControl), \ + __SMU_DUMMY_MAP(spare), \ + __SMU_DUMMY_MAP(SetNumBadHbmPagesRetired), \ + __SMU_DUMMY_MAP(GetGmiPwrDnHyst), \ + __SMU_DUMMY_MAP(SetGmiPwrDnHyst), \ + __SMU_DUMMY_MAP(EnterGfxoff), \ + __SMU_DUMMY_MAP(ExitGfxoff), \ + __SMU_DUMMY_MAP(SetExecuteDMATest), \ + __SMU_DUMMY_MAP(DAL_DISABLE_DUMMY_PSTATE_CHANGE), \ + __SMU_DUMMY_MAP(DAL_ENABLE_DUMMY_PSTATE_CHANGE), \ + __SMU_DUMMY_MAP(SET_DRIVER_DUMMY_TABLE_DRAM_ADDR_HIGH), \ + __SMU_DUMMY_MAP(SET_DRIVER_DUMMY_TABLE_DRAM_ADDR_LOW), \ + __SMU_DUMMY_MAP(GET_UMC_FW_WA), \ + __SMU_DUMMY_MAP(Mode1Reset), \ + __SMU_DUMMY_MAP(RlcPowerNotify), \ + __SMU_DUMMY_MAP(SetHardMinIspiclkByFreq), \ + __SMU_DUMMY_MAP(SetHardMinIspxclkByFreq), \ + __SMU_DUMMY_MAP(SetSoftMinSocclkByFreq), \ + __SMU_DUMMY_MAP(PowerUpCvip), \ + __SMU_DUMMY_MAP(PowerDownCvip), \ + __SMU_DUMMY_MAP(EnableGfxOff), \ + __SMU_DUMMY_MAP(SetSoftMinGfxclk), \ + __SMU_DUMMY_MAP(SetSoftMinFclk), \ + __SMU_DUMMY_MAP(GetThermalLimit), \ + __SMU_DUMMY_MAP(GetCurrentTemperature), \ + __SMU_DUMMY_MAP(GetCurrentPower), \ + __SMU_DUMMY_MAP(GetCurrentVoltage), \ + __SMU_DUMMY_MAP(GetCurrentCurrent), \ + __SMU_DUMMY_MAP(GetAverageCpuActivity), \ + __SMU_DUMMY_MAP(GetAverageGfxActivity), \ + __SMU_DUMMY_MAP(GetAveragePower), \ + __SMU_DUMMY_MAP(GetAverageTemperature), \ + __SMU_DUMMY_MAP(SetAveragePowerTimeConstant), \ + __SMU_DUMMY_MAP(SetAverageActivityTimeConstant), \ + __SMU_DUMMY_MAP(SetAverageTemperatureTimeConstant), \ + __SMU_DUMMY_MAP(SetMitigationEndHysteresis), \ + __SMU_DUMMY_MAP(GetCurrentFreq), \ + __SMU_DUMMY_MAP(SetReducedPptLimit), \ + __SMU_DUMMY_MAP(SetReducedThermalLimit), \ + __SMU_DUMMY_MAP(DramLogSetDramAddr), \ + __SMU_DUMMY_MAP(StartDramLogging), \ + __SMU_DUMMY_MAP(StopDramLogging), \ + __SMU_DUMMY_MAP(SetSoftMinCclk), \ + __SMU_DUMMY_MAP(SetSoftMaxCclk), \ + __SMU_DUMMY_MAP(SetGpoFeaturePMask), \ + __SMU_DUMMY_MAP(DisallowGpo), \ + __SMU_DUMMY_MAP(Enable2ndUSB20Port), \ + __SMU_DUMMY_MAP(RequestActiveWgp), \ + __SMU_DUMMY_MAP(SetFastPPTLimit), \ + __SMU_DUMMY_MAP(SetSlowPPTLimit), \ + __SMU_DUMMY_MAP(GetFastPPTLimit), \ + __SMU_DUMMY_MAP(GetSlowPPTLimit), \ + __SMU_DUMMY_MAP(EnableDeterminism), \ + __SMU_DUMMY_MAP(DisableDeterminism), \ + __SMU_DUMMY_MAP(SetUclkDpmMode), \ + __SMU_DUMMY_MAP(LightSBR), \ + __SMU_DUMMY_MAP(GfxDriverResetRecovery), \ + __SMU_DUMMY_MAP(BoardPowerCalibration), \ + __SMU_DUMMY_MAP(RequestGfxclk), \ + __SMU_DUMMY_MAP(ForceGfxVid), \ + __SMU_DUMMY_MAP(UnforceGfxVid), \ + __SMU_DUMMY_MAP(HeavySBR), + +#undef __SMU_DUMMY_MAP +#define __SMU_DUMMY_MAP(type) SMU_MSG_##type +enum smu_message_type { + SMU_MESSAGE_TYPES + SMU_MSG_MAX_COUNT, +}; + +enum smu_clk_type { + SMU_GFXCLK, + SMU_VCLK, + SMU_DCLK, + SMU_VCLK1, + SMU_DCLK1, + SMU_ECLK, + SMU_SOCCLK, + SMU_UCLK, + SMU_DCEFCLK, + SMU_DISPCLK, + SMU_PIXCLK, + SMU_PHYCLK, + SMU_FCLK, + SMU_SCLK, + SMU_MCLK, + SMU_PCIE, + SMU_LCLK, + SMU_OD_CCLK, + SMU_OD_SCLK, + SMU_OD_MCLK, + SMU_OD_VDDC_CURVE, + SMU_OD_RANGE, + SMU_OD_VDDGFX_OFFSET, + SMU_CLK_COUNT, +}; + +#define SMU_FEATURE_MASKS \ + __SMU_DUMMY_MAP(DPM_PREFETCHER), \ + __SMU_DUMMY_MAP(DPM_GFXCLK), \ + __SMU_DUMMY_MAP(DPM_UCLK), \ + __SMU_DUMMY_MAP(DPM_SOCCLK), \ + __SMU_DUMMY_MAP(DPM_UVD), \ + __SMU_DUMMY_MAP(DPM_VCE), \ + __SMU_DUMMY_MAP(DPM_LCLK), \ + __SMU_DUMMY_MAP(ULV), \ + __SMU_DUMMY_MAP(DPM_MP0CLK), \ + __SMU_DUMMY_MAP(DPM_LINK), \ + __SMU_DUMMY_MAP(DPM_DCEFCLK), \ + __SMU_DUMMY_MAP(DPM_XGMI), \ + __SMU_DUMMY_MAP(DS_GFXCLK), \ + __SMU_DUMMY_MAP(DS_SOCCLK), \ + __SMU_DUMMY_MAP(DS_LCLK), \ + __SMU_DUMMY_MAP(PPT), \ + __SMU_DUMMY_MAP(TDC), \ + __SMU_DUMMY_MAP(THERMAL), \ + __SMU_DUMMY_MAP(GFX_PER_CU_CG), \ + __SMU_DUMMY_MAP(DATA_CALCULATIONS), \ + __SMU_DUMMY_MAP(RM), \ + __SMU_DUMMY_MAP(DS_DCEFCLK), \ + __SMU_DUMMY_MAP(ACDC), \ + __SMU_DUMMY_MAP(VR0HOT), \ + __SMU_DUMMY_MAP(VR1HOT), \ + __SMU_DUMMY_MAP(FW_CTF), \ + __SMU_DUMMY_MAP(LED_DISPLAY), \ + __SMU_DUMMY_MAP(FAN_CONTROL), \ + __SMU_DUMMY_MAP(GFX_EDC), \ + __SMU_DUMMY_MAP(GFXOFF), \ + __SMU_DUMMY_MAP(CG), \ + __SMU_DUMMY_MAP(DPM_FCLK), \ + __SMU_DUMMY_MAP(DS_FCLK), \ + __SMU_DUMMY_MAP(DS_MP1CLK), \ + __SMU_DUMMY_MAP(DS_MP0CLK), \ + __SMU_DUMMY_MAP(XGMI_PER_LINK_PWR_DWN), \ + __SMU_DUMMY_MAP(DPM_GFX_PACE), \ + __SMU_DUMMY_MAP(MEM_VDDCI_SCALING), \ + __SMU_DUMMY_MAP(MEM_MVDD_SCALING), \ + __SMU_DUMMY_MAP(DS_UCLK), \ + __SMU_DUMMY_MAP(GFX_ULV), \ + __SMU_DUMMY_MAP(FW_DSTATE), \ + __SMU_DUMMY_MAP(BACO), \ + __SMU_DUMMY_MAP(VCN_PG), \ + __SMU_DUMMY_MAP(MM_DPM_PG), \ + __SMU_DUMMY_MAP(JPEG_PG), \ + __SMU_DUMMY_MAP(USB_PG), \ + __SMU_DUMMY_MAP(RSMU_SMN_CG), \ + __SMU_DUMMY_MAP(APCC_PLUS), \ + __SMU_DUMMY_MAP(GTHR), \ + __SMU_DUMMY_MAP(GFX_DCS), \ + __SMU_DUMMY_MAP(GFX_SS), \ + __SMU_DUMMY_MAP(OUT_OF_BAND_MONITOR), \ + __SMU_DUMMY_MAP(TEMP_DEPENDENT_VMIN), \ + __SMU_DUMMY_MAP(MMHUB_PG), \ + __SMU_DUMMY_MAP(ATHUB_PG), \ + __SMU_DUMMY_MAP(APCC_DFLL), \ + __SMU_DUMMY_MAP(DF_CSTATE), \ + __SMU_DUMMY_MAP(DPM_GFX_GPO), \ + __SMU_DUMMY_MAP(WAFL_CG), \ + __SMU_DUMMY_MAP(CCLK_DPM), \ + __SMU_DUMMY_MAP(FAN_CONTROLLER), \ + __SMU_DUMMY_MAP(VCN_DPM), \ + __SMU_DUMMY_MAP(LCLK_DPM), \ + __SMU_DUMMY_MAP(SHUBCLK_DPM), \ + __SMU_DUMMY_MAP(DCFCLK_DPM), \ + __SMU_DUMMY_MAP(DS_DCFCLK), \ + __SMU_DUMMY_MAP(S0I2), \ + __SMU_DUMMY_MAP(SMU_LOW_POWER), \ + __SMU_DUMMY_MAP(GFX_DEM), \ + __SMU_DUMMY_MAP(PSI), \ + __SMU_DUMMY_MAP(PROCHOT), \ + __SMU_DUMMY_MAP(CPUOFF), \ + __SMU_DUMMY_MAP(STAPM), \ + __SMU_DUMMY_MAP(S0I3), \ + __SMU_DUMMY_MAP(DF_CSTATES), \ + __SMU_DUMMY_MAP(PERF_LIMIT), \ + __SMU_DUMMY_MAP(CORE_DLDO), \ + __SMU_DUMMY_MAP(RSMU_LOW_POWER), \ + __SMU_DUMMY_MAP(SMN_LOW_POWER), \ + __SMU_DUMMY_MAP(THM_LOW_POWER), \ + __SMU_DUMMY_MAP(SMUIO_LOW_POWER), \ + __SMU_DUMMY_MAP(MP1_LOW_POWER), \ + __SMU_DUMMY_MAP(DS_VCN), \ + __SMU_DUMMY_MAP(CPPC), \ + __SMU_DUMMY_MAP(OS_CSTATES), \ + __SMU_DUMMY_MAP(ISP_DPM), \ + __SMU_DUMMY_MAP(A55_DPM), \ + __SMU_DUMMY_MAP(CVIP_DSP_DPM), \ + __SMU_DUMMY_MAP(MSMU_LOW_POWER), \ + __SMU_DUMMY_MAP(FUSE_CG), \ + __SMU_DUMMY_MAP(MP1_CG), \ + __SMU_DUMMY_MAP(SMUIO_CG), \ + __SMU_DUMMY_MAP(THM_CG), \ + __SMU_DUMMY_MAP(CLK_CG), \ + +#undef __SMU_DUMMY_MAP +#define __SMU_DUMMY_MAP(feature) SMU_FEATURE_##feature##_BIT +enum smu_feature_mask { + SMU_FEATURE_MASKS + SMU_FEATURE_COUNT, +}; + +#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h new file mode 100644 index 000000000000..acb3be292096 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h @@ -0,0 +1,320 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __SMU_V11_0_H__ +#define __SMU_V11_0_H__ + +#include "amdgpu_smu.h" + +#define SMU11_DRIVER_IF_VERSION_INV 0xFFFFFFFF +#define SMU11_DRIVER_IF_VERSION_ARCT 0x17 +#define SMU11_DRIVER_IF_VERSION_NV10 0x37 +#define SMU11_DRIVER_IF_VERSION_NV12 0x38 +#define SMU11_DRIVER_IF_VERSION_NV14 0x38 +#define SMU11_DRIVER_IF_VERSION_Sienna_Cichlid 0x40 +#define SMU11_DRIVER_IF_VERSION_Navy_Flounder 0xE +#define SMU11_DRIVER_IF_VERSION_VANGOGH 0x03 +#define SMU11_DRIVER_IF_VERSION_Dimgrey_Cavefish 0xF +#define SMU11_DRIVER_IF_VERSION_Beige_Goby 0xD +#define SMU11_DRIVER_IF_VERSION_Cyan_Skillfish 0x8 + +/* MP Apertures */ +#define MP0_Public 0x03800000 +#define MP0_SRAM 0x03900000 +#define MP1_Public 0x03b00000 +#define MP1_SRAM 0x03c00004 + +/* address block */ +#define smnMP1_FIRMWARE_FLAGS 0x3010024 +#define smnMP0_FW_INTF 0x30101c0 +#define smnMP1_PUB_CTRL 0x3010b14 + +#define TEMP_RANGE_MIN (0) +#define TEMP_RANGE_MAX (80 * 1000) + +#define SMU11_TOOL_SIZE 0x19000 + +#define MAX_DPM_LEVELS 16 +#define MAX_PCIE_CONF 2 + +#define CTF_OFFSET_EDGE 5 +#define CTF_OFFSET_HOTSPOT 5 +#define CTF_OFFSET_MEM 5 + +#define LINK_WIDTH_MAX 6 +#define LINK_SPEED_MAX 3 + +static const __maybe_unused uint16_t link_width[] = {0, 1, 2, 4, 8, 12, 16}; +static const __maybe_unused uint16_t link_speed[] = {25, 50, 80, 160}; + +static const +struct smu_temperature_range __maybe_unused smu11_thermal_policy[] = +{ + {-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000}, + { 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000, 120000}, +}; + +struct smu_11_0_max_sustainable_clocks { + uint32_t display_clock; + uint32_t phy_clock; + uint32_t pixel_clock; + uint32_t uclock; + uint32_t dcef_clock; + uint32_t soc_clock; +}; + +struct smu_11_0_dpm_clk_level { + bool enabled; + uint32_t value; +}; + +struct smu_11_0_dpm_table { + uint32_t min; /* MHz */ + uint32_t max; /* MHz */ + uint32_t count; + bool is_fine_grained; + struct smu_11_0_dpm_clk_level dpm_levels[MAX_DPM_LEVELS]; +}; + +struct smu_11_0_pcie_table { + uint8_t pcie_gen[MAX_PCIE_CONF]; + uint8_t pcie_lane[MAX_PCIE_CONF]; +}; + +struct smu_11_0_dpm_tables { + struct smu_11_0_dpm_table soc_table; + struct smu_11_0_dpm_table gfx_table; + struct smu_11_0_dpm_table uclk_table; + struct smu_11_0_dpm_table eclk_table; + struct smu_11_0_dpm_table vclk_table; + struct smu_11_0_dpm_table vclk1_table; + struct smu_11_0_dpm_table dclk_table; + struct smu_11_0_dpm_table dclk1_table; + struct smu_11_0_dpm_table dcef_table; + struct smu_11_0_dpm_table pixel_table; + struct smu_11_0_dpm_table display_table; + struct smu_11_0_dpm_table phy_table; + struct smu_11_0_dpm_table fclk_table; + struct smu_11_0_pcie_table pcie_table; +}; + +struct smu_11_0_dpm_context { + struct smu_11_0_dpm_tables dpm_tables; + uint32_t workload_policy_mask; + uint32_t dcef_min_ds_clk; +}; + +enum smu_11_0_power_state { + SMU_11_0_POWER_STATE__D0 = 0, + SMU_11_0_POWER_STATE__D1, + SMU_11_0_POWER_STATE__D3, /* Sleep*/ + SMU_11_0_POWER_STATE__D4, /* Hibernate*/ + SMU_11_0_POWER_STATE__D5, /* Power off*/ +}; + +struct smu_11_0_power_context { + uint32_t power_source; + uint8_t in_power_limit_boost_mode; + enum smu_11_0_power_state power_state; +}; + +struct smu_11_5_power_context { + uint32_t power_source; + uint8_t in_power_limit_boost_mode; + enum smu_11_0_power_state power_state; + + uint32_t current_fast_ppt_limit; + uint32_t default_fast_ppt_limit; + uint32_t max_fast_ppt_limit; +}; + +enum smu_v11_0_baco_seq { + BACO_SEQ_BACO = 0, + BACO_SEQ_MSR, + BACO_SEQ_BAMACO, + BACO_SEQ_ULPS, + BACO_SEQ_COUNT, +}; + +#if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3) + +int smu_v11_0_init_microcode(struct smu_context *smu); + +void smu_v11_0_fini_microcode(struct smu_context *smu); + +int smu_v11_0_load_microcode(struct smu_context *smu); + +int smu_v11_0_init_smc_tables(struct smu_context *smu); + +int smu_v11_0_fini_smc_tables(struct smu_context *smu); + +int smu_v11_0_init_power(struct smu_context *smu); + +int smu_v11_0_fini_power(struct smu_context *smu); + +int smu_v11_0_check_fw_status(struct smu_context *smu); + +int smu_v11_0_setup_pptable(struct smu_context *smu); + +int smu_v11_0_get_vbios_bootup_values(struct smu_context *smu); + +int smu_v11_0_check_fw_version(struct smu_context *smu); + +int smu_v11_0_set_driver_table_location(struct smu_context *smu); + +int smu_v11_0_set_tool_table_location(struct smu_context *smu); + +int smu_v11_0_notify_memory_pool_location(struct smu_context *smu); + +int smu_v11_0_system_features_control(struct smu_context *smu, + bool en); + +int smu_v11_0_init_display_count(struct smu_context *smu, uint32_t count); + +int smu_v11_0_set_allowed_mask(struct smu_context *smu); + +int smu_v11_0_notify_display_change(struct smu_context *smu); + +int smu_v11_0_get_current_power_limit(struct smu_context *smu, + uint32_t *power_limit); + +int smu_v11_0_set_power_limit(struct smu_context *smu, + enum smu_ppt_limit_type limit_type, + uint32_t limit); + +int smu_v11_0_init_max_sustainable_clocks(struct smu_context *smu); + +int smu_v11_0_enable_thermal_alert(struct smu_context *smu); + +int smu_v11_0_disable_thermal_alert(struct smu_context *smu); + +int smu_v11_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value); + +int smu_v11_0_set_min_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk); + +int +smu_v11_0_display_clock_voltage_request(struct smu_context *smu, + struct pp_display_clock_request + *clock_req); + +uint32_t +smu_v11_0_get_fan_control_mode(struct smu_context *smu); + +int +smu_v11_0_set_fan_control_mode(struct smu_context *smu, + uint32_t mode); + +int smu_v11_0_set_fan_speed_pwm(struct smu_context *smu, + uint32_t speed); + +int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu, + uint32_t speed); + +int smu_v11_0_get_fan_speed_pwm(struct smu_context *smu, + uint32_t *speed); + +int smu_v11_0_get_fan_speed_rpm(struct smu_context *smu, + uint32_t *speed); + +int smu_v11_0_set_xgmi_pstate(struct smu_context *smu, + uint32_t pstate); + +int smu_v11_0_gfx_off_control(struct smu_context *smu, bool enable); + +int smu_v11_0_register_irq_handler(struct smu_context *smu); + +int smu_v11_0_set_azalia_d3_pme(struct smu_context *smu); + +int smu_v11_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu, + struct pp_smu_nv_clock_table *max_clocks); + +bool smu_v11_0_baco_is_support(struct smu_context *smu); + +enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu); + +int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state); + +int smu_v11_0_baco_enter(struct smu_context *smu); +int smu_v11_0_baco_exit(struct smu_context *smu); + +int smu_v11_0_baco_set_armd3_sequence(struct smu_context *smu, + enum smu_v11_0_baco_seq baco_seq); + +int smu_v11_0_mode1_reset(struct smu_context *smu); + +int smu_v11_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, + uint32_t *min, uint32_t *max); + +int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, + uint32_t min, uint32_t max); + +int smu_v11_0_set_hard_freq_limited_range(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t min, + uint32_t max); + +int smu_v11_0_set_performance_level(struct smu_context *smu, + enum amd_dpm_forced_level level); + +int smu_v11_0_set_power_source(struct smu_context *smu, + enum smu_power_src_type power_src); + +int smu_v11_0_get_dpm_freq_by_index(struct smu_context *smu, + enum smu_clk_type clk_type, + uint16_t level, + uint32_t *value); + +int smu_v11_0_get_dpm_level_count(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *value); + +int smu_v11_0_set_single_dpm_table(struct smu_context *smu, + enum smu_clk_type clk_type, + struct smu_11_0_dpm_table *single_dpm_table); + +int smu_v11_0_get_dpm_level_range(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *min_value, + uint32_t *max_value); + +int smu_v11_0_get_current_pcie_link_width_level(struct smu_context *smu); + +uint16_t smu_v11_0_get_current_pcie_link_width(struct smu_context *smu); + +int smu_v11_0_get_current_pcie_link_speed_level(struct smu_context *smu); + +uint16_t smu_v11_0_get_current_pcie_link_speed(struct smu_context *smu); + +int smu_v11_0_gfx_ulv_control(struct smu_context *smu, + bool enablement); + +int smu_v11_0_deep_sleep_control(struct smu_context *smu, + bool enablement); + +void smu_v11_0_interrupt_work(struct smu_context *smu); + +int smu_v11_0_handle_passthrough_sbr(struct smu_context *smu, bool enable); + +int smu_v11_0_restore_user_od_settings(struct smu_context *smu); + +#endif +#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_7_pptable.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_7_pptable.h new file mode 100644 index 000000000000..247c6e9632ba --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_7_pptable.h @@ -0,0 +1,196 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef SMU_11_0_7_PPTABLE_H +#define SMU_11_0_7_PPTABLE_H + + +#define SMU_11_0_7_TABLE_FORMAT_REVISION 15 + +//// POWERPLAYTABLE::ulPlatformCaps +#define SMU_11_0_7_PP_PLATFORM_CAP_POWERPLAY 0x1 // This cap indicates whether CCC need to show Powerplay page. +#define SMU_11_0_7_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 0x2 // This cap indicates whether power source notificaiton is done by SBIOS instead of OS. +#define SMU_11_0_7_PP_PLATFORM_CAP_HARDWAREDC 0x4 // This cap indicates whether DC mode notificaiton is done by GPIO pin directly. +#define SMU_11_0_7_PP_PLATFORM_CAP_BACO 0x8 // This cap indicates whether board supports the BACO circuitry. +#define SMU_11_0_7_PP_PLATFORM_CAP_MACO 0x10 // This cap indicates whether board supports the MACO circuitry. +#define SMU_11_0_7_PP_PLATFORM_CAP_SHADOWPSTATE 0x20 // This cap indicates whether board supports the Shadow Pstate. + +// SMU_11_0_7_PP_THERMALCONTROLLER - Thermal Controller Type +#define SMU_11_0_7_PP_THERMALCONTROLLER_NONE 0 +#define SMU_11_0_7_PP_THERMALCONTROLLER_SIENNA_CICHLID 28 + +#define SMU_11_0_7_PP_OVERDRIVE_VERSION 0x81 // OverDrive 8 Table Version 0.2 +#define SMU_11_0_7_PP_POWERSAVINGCLOCK_VERSION 0x01 // Power Saving Clock Table Version 1.00 + +enum SMU_11_0_7_ODFEATURE_CAP { + SMU_11_0_7_ODCAP_GFXCLK_LIMITS = 0, + SMU_11_0_7_ODCAP_GFXCLK_CURVE, + SMU_11_0_7_ODCAP_UCLK_LIMITS, + SMU_11_0_7_ODCAP_POWER_LIMIT, + SMU_11_0_7_ODCAP_FAN_ACOUSTIC_LIMIT, + SMU_11_0_7_ODCAP_FAN_SPEED_MIN, + SMU_11_0_7_ODCAP_TEMPERATURE_FAN, + SMU_11_0_7_ODCAP_TEMPERATURE_SYSTEM, + SMU_11_0_7_ODCAP_MEMORY_TIMING_TUNE, + SMU_11_0_7_ODCAP_FAN_ZERO_RPM_CONTROL, + SMU_11_0_7_ODCAP_AUTO_UV_ENGINE, + SMU_11_0_7_ODCAP_AUTO_OC_ENGINE, + SMU_11_0_7_ODCAP_AUTO_OC_MEMORY, + SMU_11_0_7_ODCAP_FAN_CURVE, + SMU_11_0_ODCAP_AUTO_FAN_ACOUSTIC_LIMIT, + SMU_11_0_7_ODCAP_POWER_MODE, + SMU_11_0_7_ODCAP_COUNT, +}; + +enum SMU_11_0_7_ODFEATURE_ID { + SMU_11_0_7_ODFEATURE_GFXCLK_LIMITS = 1 << SMU_11_0_7_ODCAP_GFXCLK_LIMITS, //GFXCLK Limit feature + SMU_11_0_7_ODFEATURE_GFXCLK_CURVE = 1 << SMU_11_0_7_ODCAP_GFXCLK_CURVE, //GFXCLK Curve feature + SMU_11_0_7_ODFEATURE_UCLK_LIMITS = 1 << SMU_11_0_7_ODCAP_UCLK_LIMITS, //UCLK Limit feature + SMU_11_0_7_ODFEATURE_POWER_LIMIT = 1 << SMU_11_0_7_ODCAP_POWER_LIMIT, //Power Limit feature + SMU_11_0_7_ODFEATURE_FAN_ACOUSTIC_LIMIT = 1 << SMU_11_0_7_ODCAP_FAN_ACOUSTIC_LIMIT, //Fan Acoustic RPM feature + SMU_11_0_7_ODFEATURE_FAN_SPEED_MIN = 1 << SMU_11_0_7_ODCAP_FAN_SPEED_MIN, //Minimum Fan Speed feature + SMU_11_0_7_ODFEATURE_TEMPERATURE_FAN = 1 << SMU_11_0_7_ODCAP_TEMPERATURE_FAN, //Fan Target Temperature Limit feature + SMU_11_0_7_ODFEATURE_TEMPERATURE_SYSTEM = 1 << SMU_11_0_7_ODCAP_TEMPERATURE_SYSTEM, //Operating Temperature Limit feature + SMU_11_0_7_ODFEATURE_MEMORY_TIMING_TUNE = 1 << SMU_11_0_7_ODCAP_MEMORY_TIMING_TUNE, //AC Timing Tuning feature + SMU_11_0_7_ODFEATURE_FAN_ZERO_RPM_CONTROL = 1 << SMU_11_0_7_ODCAP_FAN_ZERO_RPM_CONTROL, //Zero RPM feature + SMU_11_0_7_ODFEATURE_AUTO_UV_ENGINE = 1 << SMU_11_0_7_ODCAP_AUTO_UV_ENGINE, //Auto Under Volt GFXCLK feature + SMU_11_0_7_ODFEATURE_AUTO_OC_ENGINE = 1 << SMU_11_0_7_ODCAP_AUTO_OC_ENGINE, //Auto Over Clock GFXCLK feature + SMU_11_0_7_ODFEATURE_AUTO_OC_MEMORY = 1 << SMU_11_0_7_ODCAP_AUTO_OC_MEMORY, //Auto Over Clock MCLK feature + SMU_11_0_7_ODFEATURE_FAN_CURVE = 1 << SMU_11_0_7_ODCAP_FAN_CURVE, //Fan Curve feature + SMU_11_0_ODFEATURE_AUTO_FAN_ACOUSTIC_LIMIT = 1 << SMU_11_0_ODCAP_AUTO_FAN_ACOUSTIC_LIMIT, //Auto Fan Acoustic RPM feature + SMU_11_0_7_ODFEATURE_POWER_MODE = 1 << SMU_11_0_7_ODCAP_POWER_MODE, //Optimized GPU Power Mode feature + SMU_11_0_7_ODFEATURE_COUNT = 16, +}; + +#define SMU_11_0_7_MAX_ODFEATURE 32 //Maximum Number of OD Features + +enum SMU_11_0_7_ODSETTING_ID { + SMU_11_0_7_ODSETTING_GFXCLKFMAX = 0, + SMU_11_0_7_ODSETTING_GFXCLKFMIN, + SMU_11_0_7_ODSETTING_CUSTOM_GFX_VF_CURVE_A, + SMU_11_0_7_ODSETTING_CUSTOM_GFX_VF_CURVE_B, + SMU_11_0_7_ODSETTING_CUSTOM_GFX_VF_CURVE_C, + SMU_11_0_7_ODSETTING_CUSTOM_CURVE_VFT_FMIN, + SMU_11_0_7_ODSETTING_UCLKFMIN, + SMU_11_0_7_ODSETTING_UCLKFMAX, + SMU_11_0_7_ODSETTING_POWERPERCENTAGE, + SMU_11_0_7_ODSETTING_FANRPMMIN, + SMU_11_0_7_ODSETTING_FANRPMACOUSTICLIMIT, + SMU_11_0_7_ODSETTING_FANTARGETTEMPERATURE, + SMU_11_0_7_ODSETTING_OPERATINGTEMPMAX, + SMU_11_0_7_ODSETTING_ACTIMING, + SMU_11_0_7_ODSETTING_FAN_ZERO_RPM_CONTROL, + SMU_11_0_7_ODSETTING_AUTOUVENGINE, + SMU_11_0_7_ODSETTING_AUTOOCENGINE, + SMU_11_0_7_ODSETTING_AUTOOCMEMORY, + SMU_11_0_7_ODSETTING_FAN_CURVE_TEMPERATURE_1, + SMU_11_0_7_ODSETTING_FAN_CURVE_SPEED_1, + SMU_11_0_7_ODSETTING_FAN_CURVE_TEMPERATURE_2, + SMU_11_0_7_ODSETTING_FAN_CURVE_SPEED_2, + SMU_11_0_7_ODSETTING_FAN_CURVE_TEMPERATURE_3, + SMU_11_0_7_ODSETTING_FAN_CURVE_SPEED_3, + SMU_11_0_7_ODSETTING_FAN_CURVE_TEMPERATURE_4, + SMU_11_0_7_ODSETTING_FAN_CURVE_SPEED_4, + SMU_11_0_7_ODSETTING_FAN_CURVE_TEMPERATURE_5, + SMU_11_0_7_ODSETTING_FAN_CURVE_SPEED_5, + SMU_11_0_7_ODSETTING_AUTO_FAN_ACOUSTIC_LIMIT, + SMU_11_0_7_ODSETTING_POWER_MODE, + SMU_11_0_7_ODSETTING_COUNT, +}; +#define SMU_11_0_7_MAX_ODSETTING 64 //Maximum Number of ODSettings + +enum SMU_11_0_7_PWRMODE_SETTING { + SMU_11_0_7_PMSETTING_POWER_LIMIT_QUIET = 0, + SMU_11_0_7_PMSETTING_POWER_LIMIT_BALANCE, + SMU_11_0_7_PMSETTING_POWER_LIMIT_TURBO, + SMU_11_0_7_PMSETTING_POWER_LIMIT_RAGE, + SMU_11_0_7_PMSETTING_ACOUSTIC_TEMP_QUIET, + SMU_11_0_7_PMSETTING_ACOUSTIC_TEMP_BALANCE, + SMU_11_0_7_PMSETTING_ACOUSTIC_TEMP_TURBO, + SMU_11_0_7_PMSETTING_ACOUSTIC_TEMP_RAGE, +}; +#define SMU_11_0_7_MAX_PMSETTING 32 //Maximum Number of PowerMode Settings + +struct smu_11_0_7_overdrive_table +{ + uint8_t revision; //Revision = SMU_11_0_7_PP_OVERDRIVE_VERSION + uint8_t reserve[3]; //Zero filled field reserved for future use + uint32_t feature_count; //Total number of supported features + uint32_t setting_count; //Total number of supported settings + uint8_t cap[SMU_11_0_7_MAX_ODFEATURE]; //OD feature support flags + uint32_t max[SMU_11_0_7_MAX_ODSETTING]; //default maximum settings + uint32_t min[SMU_11_0_7_MAX_ODSETTING]; //default minimum settings + int16_t pm_setting[SMU_11_0_7_MAX_PMSETTING]; //Optimized power mode feature settings +} __attribute__((packed)); + +enum SMU_11_0_7_PPCLOCK_ID { + SMU_11_0_7_PPCLOCK_GFXCLK = 0, + SMU_11_0_7_PPCLOCK_SOCCLK, + SMU_11_0_7_PPCLOCK_UCLK, + SMU_11_0_7_PPCLOCK_FCLK, + SMU_11_0_7_PPCLOCK_DCLK_0, + SMU_11_0_7_PPCLOCK_VCLK_0, + SMU_11_0_7_PPCLOCK_DCLK_1, + SMU_11_0_7_PPCLOCK_VCLK_1, + SMU_11_0_7_PPCLOCK_DCEFCLK, + SMU_11_0_7_PPCLOCK_DISPCLK, + SMU_11_0_7_PPCLOCK_PIXCLK, + SMU_11_0_7_PPCLOCK_PHYCLK, + SMU_11_0_7_PPCLOCK_DTBCLK, + SMU_11_0_7_PPCLOCK_COUNT, +}; +#define SMU_11_0_7_MAX_PPCLOCK 16 //Maximum Number of PP Clocks + +struct smu_11_0_7_power_saving_clock_table +{ + uint8_t revision; //Revision = SMU_11_0_7_PP_POWERSAVINGCLOCK_VERSION + uint8_t reserve[3]; //Zero filled field reserved for future use + uint32_t count; //power_saving_clock_count = SMU_11_0_7_PPCLOCK_COUNT + uint32_t max[SMU_11_0_7_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Maximum array In MHz + uint32_t min[SMU_11_0_7_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Minimum array In MHz +} __attribute__((packed)); + +struct smu_11_0_7_powerplay_table +{ + struct atom_common_table_header header; //For sienna_cichlid, header.format_revision = 15, header.content_revision = 0 + uint8_t table_revision; //For sienna_cichlid, table_revision = 2 + uint16_t table_size; //Driver portion table size. The offset to smc_pptable including header size + uint32_t golden_pp_id; //PPGen use only: PP Table ID on the Golden Data Base + uint32_t golden_revision; //PPGen use only: PP Table Revision on the Golden Data Base + uint16_t format_id; //PPGen use only: PPTable for different ASICs. For sienna_cichlid this should be 0x80 + uint32_t platform_caps; //POWERPLAYABLE::ulPlatformCaps + + uint8_t thermal_controller_type; //one of SMU_11_0_7_PP_THERMALCONTROLLER + + uint16_t small_power_limit1; + uint16_t small_power_limit2; + uint16_t boost_power_limit; //For Gemini Board, when the slave adapter is in BACO mode, the master adapter will use this boost power limit instead of the default power limit to boost the power limit. + uint16_t software_shutdown_temp; + + uint16_t reserve[8]; //Zero filled field reserved for future use + + struct smu_11_0_7_power_saving_clock_table power_saving_clock; + struct smu_11_0_7_overdrive_table overdrive_table; + + PPTable_t smc_pptable; //PPTable_t in smu11_driver_if.h +} __attribute__((packed)); + +#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_pptable.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_pptable.h new file mode 100644 index 000000000000..7a63cf8e85ed --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_pptable.h @@ -0,0 +1,167 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef SMU_11_0_PPTABLE_H +#define SMU_11_0_PPTABLE_H + + +#define SMU_11_0_TABLE_FORMAT_REVISION 12 + +//// POWERPLAYTABLE::ulPlatformCaps +#define SMU_11_0_PP_PLATFORM_CAP_POWERPLAY 0x1 +#define SMU_11_0_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 0x2 +#define SMU_11_0_PP_PLATFORM_CAP_HARDWAREDC 0x4 +#define SMU_11_0_PP_PLATFORM_CAP_BACO 0x8 +#define SMU_11_0_PP_PLATFORM_CAP_MACO 0x10 +#define SMU_11_0_PP_PLATFORM_CAP_SHADOWPSTATE 0x20 + +// SMU_11_0_PP_THERMALCONTROLLER - Thermal Controller Type +#define SMU_11_0_PP_THERMALCONTROLLER_NONE 0 + +#define SMU_11_0_PP_OVERDRIVE_VERSION 0x0800 +#define SMU_11_0_PP_POWERSAVINGCLOCK_VERSION 0x0100 + +enum SMU_11_0_ODFEATURE_CAP { + SMU_11_0_ODCAP_GFXCLK_LIMITS = 0, + SMU_11_0_ODCAP_GFXCLK_CURVE, + SMU_11_0_ODCAP_UCLK_MAX, + SMU_11_0_ODCAP_POWER_LIMIT, + SMU_11_0_ODCAP_FAN_ACOUSTIC_LIMIT, + SMU_11_0_ODCAP_FAN_SPEED_MIN, + SMU_11_0_ODCAP_TEMPERATURE_FAN, + SMU_11_0_ODCAP_TEMPERATURE_SYSTEM, + SMU_11_0_ODCAP_MEMORY_TIMING_TUNE, + SMU_11_0_ODCAP_FAN_ZERO_RPM_CONTROL, + SMU_11_0_ODCAP_AUTO_UV_ENGINE, + SMU_11_0_ODCAP_AUTO_OC_ENGINE, + SMU_11_0_ODCAP_AUTO_OC_MEMORY, + SMU_11_0_ODCAP_FAN_CURVE, + SMU_11_0_ODCAP_COUNT, +}; + +enum SMU_11_0_ODFEATURE_ID { + SMU_11_0_ODFEATURE_GFXCLK_LIMITS = 1 << SMU_11_0_ODCAP_GFXCLK_LIMITS, //GFXCLK Limit feature + SMU_11_0_ODFEATURE_GFXCLK_CURVE = 1 << SMU_11_0_ODCAP_GFXCLK_CURVE, //GFXCLK Curve feature + SMU_11_0_ODFEATURE_UCLK_MAX = 1 << SMU_11_0_ODCAP_UCLK_MAX, //UCLK Limit feature + SMU_11_0_ODFEATURE_POWER_LIMIT = 1 << SMU_11_0_ODCAP_POWER_LIMIT, //Power Limit feature + SMU_11_0_ODFEATURE_FAN_ACOUSTIC_LIMIT = 1 << SMU_11_0_ODCAP_FAN_ACOUSTIC_LIMIT, //Fan Acoustic RPM feature + SMU_11_0_ODFEATURE_FAN_SPEED_MIN = 1 << SMU_11_0_ODCAP_FAN_SPEED_MIN, //Minimum Fan Speed feature + SMU_11_0_ODFEATURE_TEMPERATURE_FAN = 1 << SMU_11_0_ODCAP_TEMPERATURE_FAN, //Fan Target Temperature Limit feature + SMU_11_0_ODFEATURE_TEMPERATURE_SYSTEM = 1 << SMU_11_0_ODCAP_TEMPERATURE_SYSTEM, //Operating Temperature Limit feature + SMU_11_0_ODFEATURE_MEMORY_TIMING_TUNE = 1 << SMU_11_0_ODCAP_MEMORY_TIMING_TUNE, //AC Timing Tuning feature + SMU_11_0_ODFEATURE_FAN_ZERO_RPM_CONTROL = 1 << SMU_11_0_ODCAP_FAN_ZERO_RPM_CONTROL, //Zero RPM feature + SMU_11_0_ODFEATURE_AUTO_UV_ENGINE = 1 << SMU_11_0_ODCAP_AUTO_UV_ENGINE, //Auto Under Volt GFXCLK feature + SMU_11_0_ODFEATURE_AUTO_OC_ENGINE = 1 << SMU_11_0_ODCAP_AUTO_OC_ENGINE, //Auto Over Clock GFXCLK feature + SMU_11_0_ODFEATURE_AUTO_OC_MEMORY = 1 << SMU_11_0_ODCAP_AUTO_OC_MEMORY, //Auto Over Clock MCLK feature + SMU_11_0_ODFEATURE_FAN_CURVE = 1 << SMU_11_0_ODCAP_FAN_CURVE, //Fan Curve feature + SMU_11_0_ODFEATURE_COUNT = 14, +}; +#define SMU_11_0_MAX_ODFEATURE 32 //Maximum Number of OD Features + +enum SMU_11_0_ODSETTING_ID { + SMU_11_0_ODSETTING_GFXCLKFMAX = 0, + SMU_11_0_ODSETTING_GFXCLKFMIN, + SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1, + SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P1, + SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P2, + SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P2, + SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P3, + SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P3, + SMU_11_0_ODSETTING_UCLKFMAX, + SMU_11_0_ODSETTING_POWERPERCENTAGE, + SMU_11_0_ODSETTING_FANRPMMIN, + SMU_11_0_ODSETTING_FANRPMACOUSTICLIMIT, + SMU_11_0_ODSETTING_FANTARGETTEMPERATURE, + SMU_11_0_ODSETTING_OPERATINGTEMPMAX, + SMU_11_0_ODSETTING_ACTIMING, + SMU_11_0_ODSETTING_FAN_ZERO_RPM_CONTROL, + SMU_11_0_ODSETTING_AUTOUVENGINE, + SMU_11_0_ODSETTING_AUTOOCENGINE, + SMU_11_0_ODSETTING_AUTOOCMEMORY, + SMU_11_0_ODSETTING_COUNT, +}; +#define SMU_11_0_MAX_ODSETTING 32 //Maximum Number of ODSettings + +struct smu_11_0_overdrive_table +{ + uint8_t revision; //Revision = SMU_11_0_PP_OVERDRIVE_VERSION + uint8_t reserve[3]; //Zero filled field reserved for future use + uint32_t feature_count; //Total number of supported features + uint32_t setting_count; //Total number of supported settings + uint8_t cap[SMU_11_0_MAX_ODFEATURE]; //OD feature support flags + uint32_t max[SMU_11_0_MAX_ODSETTING]; //default maximum settings + uint32_t min[SMU_11_0_MAX_ODSETTING]; //default minimum settings +} __attribute__((packed)); + +enum SMU_11_0_PPCLOCK_ID { + SMU_11_0_PPCLOCK_GFXCLK = 0, + SMU_11_0_PPCLOCK_VCLK, + SMU_11_0_PPCLOCK_DCLK, + SMU_11_0_PPCLOCK_ECLK, + SMU_11_0_PPCLOCK_SOCCLK, + SMU_11_0_PPCLOCK_UCLK, + SMU_11_0_PPCLOCK_DCEFCLK, + SMU_11_0_PPCLOCK_DISPCLK, + SMU_11_0_PPCLOCK_PIXCLK, + SMU_11_0_PPCLOCK_PHYCLK, + SMU_11_0_PPCLOCK_COUNT, +}; +#define SMU_11_0_MAX_PPCLOCK 16 //Maximum Number of PP Clocks + +struct smu_11_0_power_saving_clock_table +{ + uint8_t revision; //Revision = SMU_11_0_PP_POWERSAVINGCLOCK_VERSION + uint8_t reserve[3]; //Zero filled field reserved for future use + uint32_t count; //power_saving_clock_count = SMU_11_0_PPCLOCK_COUNT + uint32_t max[SMU_11_0_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Maximum array In MHz + uint32_t min[SMU_11_0_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Minimum array In MHz +} __attribute__((packed)); + +struct smu_11_0_powerplay_table +{ + struct atom_common_table_header header; + uint8_t table_revision; + uint16_t table_size; //Driver portion table size. The offset to smc_pptable including header size + uint32_t golden_pp_id; + uint32_t golden_revision; + uint16_t format_id; + uint32_t platform_caps; //POWERPLAYABLE::ulPlatformCaps + + uint8_t thermal_controller_type; //one of SMU_11_0_PP_THERMALCONTROLLER + + uint16_t small_power_limit1; + uint16_t small_power_limit2; + uint16_t boost_power_limit; + uint16_t od_turbo_power_limit; //Power limit setting for Turbo mode in Performance UI Tuning. + uint16_t od_power_save_power_limit; //Power limit setting for PowerSave/Optimal mode in Performance UI Tuning. + uint16_t software_shutdown_temp; + + uint16_t reserve[6]; //Zero filled field reserved for future use + + struct smu_11_0_power_saving_clock_table power_saving_clock; + struct smu_11_0_overdrive_table overdrive_table; + +#ifndef SMU_11_0_PARTIAL_PPTABLE + PPTable_t smc_pptable; //PPTable_t in smu11_driver_if.h +#endif +} __attribute__((packed)); + +#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h new file mode 100644 index 000000000000..1ad2dff71090 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h @@ -0,0 +1,66 @@ +/* + * Copyright 2019 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __SMU_V12_0_H__ +#define __SMU_V12_0_H__ + +#include "amdgpu_smu.h" + +/* MP Apertures */ +#define MP0_Public 0x03800000 +#define MP0_SRAM 0x03900000 +#define MP1_Public 0x03b00000 +#define MP1_SRAM 0x03c00004 + +#if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3) + +int smu_v12_0_check_fw_status(struct smu_context *smu); + +int smu_v12_0_check_fw_version(struct smu_context *smu); + +int smu_v12_0_powergate_sdma(struct smu_context *smu, bool gate); + +int smu_v12_0_powergate_vcn(struct smu_context *smu, bool gate); + +int smu_v12_0_powergate_jpeg(struct smu_context *smu, bool gate); + +int smu_v12_0_set_gfx_cgpg(struct smu_context *smu, bool enable); + +uint32_t smu_v12_0_get_gfxoff_status(struct smu_context *smu); + +int smu_v12_0_gfx_off_control(struct smu_context *smu, bool enable); + +int smu_v12_0_fini_smc_tables(struct smu_context *smu); + +int smu_v12_0_set_default_dpm_tables(struct smu_context *smu); + +int smu_v12_0_mode2_reset(struct smu_context *smu); + +int smu_v12_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, + uint32_t min, uint32_t max); + +int smu_v12_0_set_driver_table_location(struct smu_context *smu); + +int smu_v12_0_get_vbios_bootup_values(struct smu_context *smu); + +#endif +#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h new file mode 100644 index 000000000000..44af23ae059e --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h @@ -0,0 +1,273 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + */ +#ifndef __SMU_V13_0_H__ +#define __SMU_V13_0_H__ + +#include "amdgpu_smu.h" + +#define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF +#define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04 +#define SMU13_DRIVER_IF_VERSION_ALDE 0x08 + +#define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms + +/* MP Apertures */ +#define MP0_Public 0x03800000 +#define MP0_SRAM 0x03900000 +#define MP1_Public 0x03b00000 +#define MP1_SRAM 0x03c00004 + +/* address block */ +#define smnMP1_FIRMWARE_FLAGS 0x3010024 +#define smnMP0_FW_INTF 0x30101c0 +#define smnMP1_PUB_CTRL 0x3010b14 + +#define TEMP_RANGE_MIN (0) +#define TEMP_RANGE_MAX (80 * 1000) + +#define SMU13_TOOL_SIZE 0x19000 + +#define MAX_DPM_LEVELS 16 +#define MAX_PCIE_CONF 2 + +#define CTF_OFFSET_EDGE 5 +#define CTF_OFFSET_HOTSPOT 5 +#define CTF_OFFSET_MEM 5 + +struct smu_13_0_max_sustainable_clocks { + uint32_t display_clock; + uint32_t phy_clock; + uint32_t pixel_clock; + uint32_t uclock; + uint32_t dcef_clock; + uint32_t soc_clock; +}; + +struct smu_13_0_dpm_clk_level { + bool enabled; + uint32_t value; +}; + +struct smu_13_0_dpm_table { + uint32_t min; /* MHz */ + uint32_t max; /* MHz */ + uint32_t count; + struct smu_13_0_dpm_clk_level dpm_levels[MAX_DPM_LEVELS]; +}; + +struct smu_13_0_pcie_table { + uint8_t pcie_gen[MAX_PCIE_CONF]; + uint8_t pcie_lane[MAX_PCIE_CONF]; +}; + +struct smu_13_0_dpm_tables { + struct smu_13_0_dpm_table soc_table; + struct smu_13_0_dpm_table gfx_table; + struct smu_13_0_dpm_table uclk_table; + struct smu_13_0_dpm_table eclk_table; + struct smu_13_0_dpm_table vclk_table; + struct smu_13_0_dpm_table dclk_table; + struct smu_13_0_dpm_table dcef_table; + struct smu_13_0_dpm_table pixel_table; + struct smu_13_0_dpm_table display_table; + struct smu_13_0_dpm_table phy_table; + struct smu_13_0_dpm_table fclk_table; + struct smu_13_0_pcie_table pcie_table; +}; + +struct smu_13_0_dpm_context { + struct smu_13_0_dpm_tables dpm_tables; + uint32_t workload_policy_mask; + uint32_t dcef_min_ds_clk; +}; + +enum smu_13_0_power_state { + SMU_13_0_POWER_STATE__D0 = 0, + SMU_13_0_POWER_STATE__D1, + SMU_13_0_POWER_STATE__D3, /* Sleep*/ + SMU_13_0_POWER_STATE__D4, /* Hibernate*/ + SMU_13_0_POWER_STATE__D5, /* Power off*/ +}; + +struct smu_13_0_power_context { + uint32_t power_source; + uint8_t in_power_limit_boost_mode; + enum smu_13_0_power_state power_state; +}; + +enum smu_v13_0_baco_seq { + BACO_SEQ_BACO = 0, + BACO_SEQ_MSR, + BACO_SEQ_BAMACO, + BACO_SEQ_ULPS, + BACO_SEQ_COUNT, +}; + +#if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3) + +int smu_v13_0_init_microcode(struct smu_context *smu); + +void smu_v13_0_fini_microcode(struct smu_context *smu); + +int smu_v13_0_load_microcode(struct smu_context *smu); + +int smu_v13_0_init_smc_tables(struct smu_context *smu); + +int smu_v13_0_fini_smc_tables(struct smu_context *smu); + +int smu_v13_0_init_power(struct smu_context *smu); + +int smu_v13_0_fini_power(struct smu_context *smu); + +int smu_v13_0_check_fw_status(struct smu_context *smu); + +int smu_v13_0_setup_pptable(struct smu_context *smu); + +int smu_v13_0_get_vbios_bootup_values(struct smu_context *smu); + +int smu_v13_0_check_fw_version(struct smu_context *smu); + +int smu_v13_0_set_driver_table_location(struct smu_context *smu); + +int smu_v13_0_set_tool_table_location(struct smu_context *smu); + +int smu_v13_0_notify_memory_pool_location(struct smu_context *smu); + +int smu_v13_0_system_features_control(struct smu_context *smu, + bool en); + +int smu_v13_0_init_display_count(struct smu_context *smu, uint32_t count); + +int smu_v13_0_set_allowed_mask(struct smu_context *smu); + +int smu_v13_0_notify_display_change(struct smu_context *smu); + +int smu_v13_0_get_current_power_limit(struct smu_context *smu, + uint32_t *power_limit); + +int smu_v13_0_set_power_limit(struct smu_context *smu, + enum smu_ppt_limit_type limit_type, + uint32_t limit); + +int smu_v13_0_init_max_sustainable_clocks(struct smu_context *smu); + +int smu_v13_0_enable_thermal_alert(struct smu_context *smu); + +int smu_v13_0_disable_thermal_alert(struct smu_context *smu); + +int smu_v13_0_get_gfx_vdd(struct smu_context *smu, uint32_t *value); + +int smu_v13_0_set_min_deep_sleep_dcefclk(struct smu_context *smu, uint32_t clk); + +int +smu_v13_0_display_clock_voltage_request(struct smu_context *smu, + struct pp_display_clock_request + *clock_req); + +uint32_t +smu_v13_0_get_fan_control_mode(struct smu_context *smu); + +int +smu_v13_0_set_fan_control_mode(struct smu_context *smu, + uint32_t mode); + +int +smu_v13_0_set_fan_speed_percent(struct smu_context *smu, uint32_t speed); + +int smu_v13_0_set_fan_speed_rpm(struct smu_context *smu, + uint32_t speed); + +int smu_v13_0_set_xgmi_pstate(struct smu_context *smu, + uint32_t pstate); + +int smu_v13_0_gfx_off_control(struct smu_context *smu, bool enable); + +int smu_v13_0_register_irq_handler(struct smu_context *smu); + +int smu_v13_0_set_azalia_d3_pme(struct smu_context *smu); + +int smu_v13_0_get_max_sustainable_clocks_by_dc(struct smu_context *smu, + struct pp_smu_nv_clock_table *max_clocks); + +bool smu_v13_0_baco_is_support(struct smu_context *smu); + +enum smu_baco_state smu_v13_0_baco_get_state(struct smu_context *smu); + +int smu_v13_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state); + +int smu_v13_0_baco_enter(struct smu_context *smu); +int smu_v13_0_baco_exit(struct smu_context *smu); + +int smu_v13_0_mode2_reset(struct smu_context *smu); + +int smu_v13_0_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, + uint32_t *min, uint32_t *max); + +int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu, enum smu_clk_type clk_type, + uint32_t min, uint32_t max); + +int smu_v13_0_set_hard_freq_limited_range(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t min, + uint32_t max); + +int smu_v13_0_set_performance_level(struct smu_context *smu, + enum amd_dpm_forced_level level); + +int smu_v13_0_set_power_source(struct smu_context *smu, + enum smu_power_src_type power_src); + +int smu_v13_0_get_dpm_freq_by_index(struct smu_context *smu, + enum smu_clk_type clk_type, + uint16_t level, + uint32_t *value); + +int smu_v13_0_get_dpm_level_count(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *value); + +int smu_v13_0_set_single_dpm_table(struct smu_context *smu, + enum smu_clk_type clk_type, + struct smu_13_0_dpm_table *single_dpm_table); + +int smu_v13_0_get_dpm_level_range(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *min_value, + uint32_t *max_value); + +int smu_v13_0_get_current_pcie_link_width_level(struct smu_context *smu); + +int smu_v13_0_get_current_pcie_link_width(struct smu_context *smu); + +int smu_v13_0_get_current_pcie_link_speed_level(struct smu_context *smu); + +int smu_v13_0_get_current_pcie_link_speed(struct smu_context *smu); + +int smu_v13_0_gfx_ulv_control(struct smu_context *smu, + bool enablement); + +int smu_v13_0_wait_for_event(struct smu_context *smu, enum smu_event_type event, + uint64_t event_arg); + +#endif +#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_pptable.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_pptable.h new file mode 100644 index 000000000000..1f311396b706 --- /dev/null +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_pptable.h @@ -0,0 +1,165 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ +#ifndef SMU_13_0_PPTABLE_H +#define SMU_13_0_PPTABLE_H + +#define SMU_13_0_TABLE_FORMAT_REVISION 1 + +//// POWERPLAYTABLE::ulPlatformCaps +#define SMU_13_0_PP_PLATFORM_CAP_POWERPLAY 0x1 +#define SMU_13_0_PP_PLATFORM_CAP_SBIOSPOWERSOURCE 0x2 +#define SMU_13_0_PP_PLATFORM_CAP_HARDWAREDC 0x4 +#define SMU_13_0_PP_PLATFORM_CAP_BACO 0x8 +#define SMU_13_0_PP_PLATFORM_CAP_MACO 0x10 +#define SMU_13_0_PP_PLATFORM_CAP_SHADOWPSTATE 0x20 + +// SMU_13_0_PP_THERMALCONTROLLER - Thermal Controller Type +#define SMU_13_0_PP_THERMALCONTROLLER_NONE 0 + +#define SMU_13_0_PP_OVERDRIVE_VERSION 0x0800 +#define SMU_13_0_PP_POWERSAVINGCLOCK_VERSION 0x0100 + +enum SMU_13_0_ODFEATURE_CAP { + SMU_13_0_ODCAP_GFXCLK_LIMITS = 0, + SMU_13_0_ODCAP_GFXCLK_CURVE, + SMU_13_0_ODCAP_UCLK_MAX, + SMU_13_0_ODCAP_POWER_LIMIT, + SMU_13_0_ODCAP_FAN_ACOUSTIC_LIMIT, + SMU_13_0_ODCAP_FAN_SPEED_MIN, + SMU_13_0_ODCAP_TEMPERATURE_FAN, + SMU_13_0_ODCAP_TEMPERATURE_SYSTEM, + SMU_13_0_ODCAP_MEMORY_TIMING_TUNE, + SMU_13_0_ODCAP_FAN_ZERO_RPM_CONTROL, + SMU_13_0_ODCAP_AUTO_UV_ENGINE, + SMU_13_0_ODCAP_AUTO_OC_ENGINE, + SMU_13_0_ODCAP_AUTO_OC_MEMORY, + SMU_13_0_ODCAP_FAN_CURVE, + SMU_13_0_ODCAP_COUNT, +}; + +enum SMU_13_0_ODFEATURE_ID { + SMU_13_0_ODFEATURE_GFXCLK_LIMITS = 1 << SMU_13_0_ODCAP_GFXCLK_LIMITS, //GFXCLK Limit feature + SMU_13_0_ODFEATURE_GFXCLK_CURVE = 1 << SMU_13_0_ODCAP_GFXCLK_CURVE, //GFXCLK Curve feature + SMU_13_0_ODFEATURE_UCLK_MAX = 1 << SMU_13_0_ODCAP_UCLK_MAX, //UCLK Limit feature + SMU_13_0_ODFEATURE_POWER_LIMIT = 1 << SMU_13_0_ODCAP_POWER_LIMIT, //Power Limit feature + SMU_13_0_ODFEATURE_FAN_ACOUSTIC_LIMIT = 1 << SMU_13_0_ODCAP_FAN_ACOUSTIC_LIMIT, //Fan Acoustic RPM feature + SMU_13_0_ODFEATURE_FAN_SPEED_MIN = 1 << SMU_13_0_ODCAP_FAN_SPEED_MIN, //Minimum Fan Speed feature + SMU_13_0_ODFEATURE_TEMPERATURE_FAN = 1 << SMU_13_0_ODCAP_TEMPERATURE_FAN, //Fan Target Temperature Limit feature + SMU_13_0_ODFEATURE_TEMPERATURE_SYSTEM = 1 << SMU_13_0_ODCAP_TEMPERATURE_SYSTEM, //Operating Temperature Limit feature + SMU_13_0_ODFEATURE_MEMORY_TIMING_TUNE = 1 << SMU_13_0_ODCAP_MEMORY_TIMING_TUNE, //AC Timing Tuning feature + SMU_13_0_ODFEATURE_FAN_ZERO_RPM_CONTROL = 1 << SMU_13_0_ODCAP_FAN_ZERO_RPM_CONTROL, //Zero RPM feature + SMU_13_0_ODFEATURE_AUTO_UV_ENGINE = 1 << SMU_13_0_ODCAP_AUTO_UV_ENGINE, //Auto Under Volt GFXCLK feature + SMU_13_0_ODFEATURE_AUTO_OC_ENGINE = 1 << SMU_13_0_ODCAP_AUTO_OC_ENGINE, //Auto Over Clock GFXCLK feature + SMU_13_0_ODFEATURE_AUTO_OC_MEMORY = 1 << SMU_13_0_ODCAP_AUTO_OC_MEMORY, //Auto Over Clock MCLK feature + SMU_13_0_ODFEATURE_FAN_CURVE = 1 << SMU_13_0_ODCAP_FAN_CURVE, //Fan Curve feature + SMU_13_0_ODFEATURE_COUNT = 14, +}; + +#define SMU_13_0_MAX_ODFEATURE 32 //Maximum Number of OD Features + +enum SMU_13_0_ODSETTING_ID { + SMU_13_0_ODSETTING_GFXCLKFMAX = 0, + SMU_13_0_ODSETTING_GFXCLKFMIN, + SMU_13_0_ODSETTING_VDDGFXCURVEFREQ_P1, + SMU_13_0_ODSETTING_VDDGFXCURVEVOLTAGE_P1, + SMU_13_0_ODSETTING_VDDGFXCURVEFREQ_P2, + SMU_13_0_ODSETTING_VDDGFXCURVEVOLTAGE_P2, + SMU_13_0_ODSETTING_VDDGFXCURVEFREQ_P3, + SMU_13_0_ODSETTING_VDDGFXCURVEVOLTAGE_P3, + SMU_13_0_ODSETTING_UCLKFMAX, + SMU_13_0_ODSETTING_POWERPERCENTAGE, + SMU_13_0_ODSETTING_FANRPMMIN, + SMU_13_0_ODSETTING_FANRPMACOUSTICLIMIT, + SMU_13_0_ODSETTING_FANTARGETTEMPERATURE, + SMU_13_0_ODSETTING_OPERATINGTEMPMAX, + SMU_13_0_ODSETTING_ACTIMING, + SMU_13_0_ODSETTING_FAN_ZERO_RPM_CONTROL, + SMU_13_0_ODSETTING_AUTOUVENGINE, + SMU_13_0_ODSETTING_AUTOOCENGINE, + SMU_13_0_ODSETTING_AUTOOCMEMORY, + SMU_13_0_ODSETTING_COUNT, +}; + +#define SMU_13_0_MAX_ODSETTING 32 //Maximum Number of ODSettings + +struct smu_13_0_overdrive_table { + uint8_t revision; //Revision = SMU_11_0_PP_OVERDRIVE_VERSION + uint8_t reserve[3]; //Zero filled field reserved for future use + uint32_t feature_count; //Total number of supported features + uint32_t setting_count; //Total number of supported settings + uint8_t cap[SMU_13_0_MAX_ODFEATURE]; //OD feature support flags + uint32_t max[SMU_13_0_MAX_ODSETTING]; //default maximum settings + uint32_t min[SMU_13_0_MAX_ODSETTING]; //default minimum settings +} __attribute__((packed)); + +enum SMU_13_0_PPCLOCK_ID { + SMU_13_0_PPCLOCK_GFXCLK = 0, + SMU_13_0_PPCLOCK_VCLK, + SMU_13_0_PPCLOCK_DCLK, + SMU_13_0_PPCLOCK_ECLK, + SMU_13_0_PPCLOCK_SOCCLK, + SMU_13_0_PPCLOCK_UCLK, + SMU_13_0_PPCLOCK_DCEFCLK, + SMU_13_0_PPCLOCK_DISPCLK, + SMU_13_0_PPCLOCK_PIXCLK, + SMU_13_0_PPCLOCK_PHYCLK, + SMU_13_0_PPCLOCK_COUNT, +}; +#define SMU_13_0_MAX_PPCLOCK 16 //Maximum Number of PP Clocks + +struct smu_13_0_power_saving_clock_table { + uint8_t revision; //Revision = SMU_11_0_PP_POWERSAVINGCLOCK_VERSION + uint8_t reserve[3]; //Zero filled field reserved for future use + uint32_t count; //power_saving_clock_count = SMU_11_0_PPCLOCK_COUNT + uint32_t max[SMU_13_0_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Maximum array In MHz + uint32_t min[SMU_13_0_MAX_PPCLOCK]; //PowerSavingClock Mode Clock Minimum array In MHz +} __attribute__((packed)); + +struct smu_13_0_powerplay_table { + struct atom_common_table_header header; + uint8_t table_revision; + uint16_t table_size; //Driver portion table size. The offset to smc_pptable including header size + uint32_t golden_pp_id; + uint32_t golden_revision; + uint16_t format_id; + uint32_t platform_caps; //POWERPLAYABLE::ulPlatformCaps + + uint8_t thermal_controller_type; //one of SMU_13_0_PP_THERMALCONTROLLER + + uint16_t small_power_limit1; + uint16_t small_power_limit2; + uint16_t boost_power_limit; + uint16_t od_turbo_power_limit; //Power limit setting for Turbo mode in Performance UI Tuning. + uint16_t od_power_save_power_limit; //Power limit setting for PowerSave/Optimal mode in Performance UI Tuning. + uint16_t software_shutdown_temp; + + uint16_t reserve[6]; //Zero filled field reserved for future use + + struct smu_13_0_power_saving_clock_table power_saving_clock; + struct smu_13_0_overdrive_table overdrive_table; + +#ifndef SMU_13_0_PARTIAL_PPTABLE + PPTable_t smc_pptable; //PPTable_t in driver_if.h +#endif +} __attribute__((packed)); + +#endif diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index 0a1547c27305..d3963bfe5c89 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -33,7 +33,6 @@ #include "smu11_driver_if_arcturus.h" #include "soc15_common.h" #include "atom.h" -#include "power_state.h" #include "arcturus_ppt.h" #include "smu_v11_0_pptable.h" #include "arcturus_ppsmc.h" diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index a4d46f7069bc..ac8ba5e0e697 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -33,7 +33,6 @@ #include "smu13_driver_if_aldebaran.h" #include "soc15_common.h" #include "atom.h" -#include "power_state.h" #include "aldebaran_ppt.h" #include "smu_v13_0_pptable.h" #include "aldebaran_ppsmc.h" -- cgit From 3bce90bfbaa8de63bc500bc5a4dd262ed8e548ca Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Wed, 24 Nov 2021 11:12:31 +0800 Subject: drm/amd/pm: drop unnecessary gfxoff controls Those gfxoff controls added for some specific ASICs are unnecessary. The functionalities are not affected without them. Also to align with other ASICs, they should also be dropped. Signed-off-by: Evan Quan Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 4 ---- .../drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c | 25 +--------------------- drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c | 7 ------ drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c | 7 ------ 4 files changed, 1 insertion(+), 42 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index c898ea67354a..8895b2b595a4 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -1542,8 +1542,6 @@ static int smu_reset(struct smu_context *smu) struct amdgpu_device *adev = smu->adev; int ret; - amdgpu_gfx_off_ctrl(smu->adev, false); - ret = smu_hw_fini(adev); if (ret) return ret; @@ -1556,8 +1554,6 @@ static int smu_reset(struct smu_context *smu) if (ret) return ret; - amdgpu_gfx_off_ctrl(smu->adev, true); - return 0; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index efe6b2eff6a0..4e37cd8025ed 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -1036,10 +1036,6 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu, if (ret) goto print_clk_out; - /* no need to disable gfxoff when retrieving the current gfxclk */ - if ((clk_type == SMU_GFXCLK) || (clk_type == SMU_SCLK)) - amdgpu_gfx_off_ctrl(adev, false); - ret = smu_v11_0_get_dpm_level_count(smu, clk_type, &count); if (ret) goto print_clk_out; @@ -1168,25 +1164,18 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu, } print_clk_out: - if ((clk_type == SMU_GFXCLK) || (clk_type == SMU_SCLK)) - amdgpu_gfx_off_ctrl(adev, true); - return size; } static int sienna_cichlid_force_clk_levels(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t mask) { - struct amdgpu_device *adev = smu->adev; int ret = 0; uint32_t soft_min_level = 0, soft_max_level = 0, min_freq = 0, max_freq = 0; soft_min_level = mask ? (ffs(mask) - 1) : 0; soft_max_level = mask ? (fls(mask) - 1) : 0; - if ((clk_type == SMU_GFXCLK) || (clk_type == SMU_SCLK)) - amdgpu_gfx_off_ctrl(adev, false); - switch (clk_type) { case SMU_GFXCLK: case SMU_SCLK: @@ -1220,9 +1209,6 @@ static int sienna_cichlid_force_clk_levels(struct smu_context *smu, } forec_level_out: - if ((clk_type == SMU_GFXCLK) || (clk_type == SMU_SCLK)) - amdgpu_gfx_off_ctrl(adev, true); - return 0; } @@ -1865,16 +1851,7 @@ static int sienna_cichlid_get_dpm_ultimate_freq(struct smu_context *smu, enum smu_clk_type clk_type, uint32_t *min, uint32_t *max) { - struct amdgpu_device *adev = smu->adev; - int ret; - - if (clk_type == SMU_GFXCLK) - amdgpu_gfx_off_ctrl(adev, false); - ret = smu_v11_0_get_dpm_ultimate_freq(smu, clk_type, min, max); - if (clk_type == SMU_GFXCLK) - amdgpu_gfx_off_ctrl(adev, true); - - return ret; + return smu_v11_0_get_dpm_ultimate_freq(smu, clk_type, min, max); } static void sienna_cichlid_dump_od_table(struct smu_context *smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index e176e6a74f44..9acf2c045a97 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -1798,7 +1798,6 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, uint32_t min, uint32_t max) { - struct amdgpu_device *adev = smu->adev; int ret = 0, clk_id = 0; uint32_t param; @@ -1811,9 +1810,6 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, if (clk_id < 0) return clk_id; - if (clk_type == SMU_GFXCLK) - amdgpu_gfx_off_ctrl(adev, false); - if (max > 0) { param = (uint32_t)((clk_id << 16) | (max & 0xffff)); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq, @@ -1831,9 +1827,6 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu, } out: - if (clk_type == SMU_GFXCLK) - amdgpu_gfx_off_ctrl(adev, true); - return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index f1d7b4900aa1..f66d8b9135ca 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -1533,7 +1533,6 @@ int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu, uint32_t min, uint32_t max) { - struct amdgpu_device *adev = smu->adev; int ret = 0, clk_id = 0; uint32_t param; @@ -1546,9 +1545,6 @@ int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu, if (clk_id < 0) return clk_id; - if (clk_type == SMU_GFXCLK) - amdgpu_gfx_off_ctrl(adev, false); - if (max > 0) { param = (uint32_t)((clk_id << 16) | (max & 0xffff)); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq, @@ -1566,9 +1562,6 @@ int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu, } out: - if (clk_type == SMU_GFXCLK) - amdgpu_gfx_off_ctrl(adev, true); - return ret; } -- cgit From 61d7d0d5adc705f833d8a5dbb596253842486220 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Thu, 25 Nov 2021 11:15:46 +0800 Subject: drm/amd/pm: revise the performance level setting APIs Avoid cross callings which make lock protection enforcement on amdgpu_dpm_force_performance_level() impossible. Signed-off-by: Evan Quan Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/amd_shared.h | 2 -- drivers/gpu/drm/amd/pm/amdgpu_pm.c | 29 ++++++++++++++++++++---- drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c | 17 +++++++------- drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c | 12 ---------- drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 15 ------------ drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h | 1 - 6 files changed, 34 insertions(+), 42 deletions(-) diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index f57a1478f0fe..fb6ad56ad6f1 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -268,7 +268,6 @@ enum amd_dpm_forced_level; * @set_clockgating_state: enable/disable cg for the IP block * @set_powergating_state: enable/disable pg for the IP block * @get_clockgating_state: get current clockgating status - * @enable_umd_pstate: enable UMD powerstate * * These hooks provide an interface for controlling the operational state * of IP blocks. After acquiring a list of IP blocks for the GPU in use, @@ -299,7 +298,6 @@ struct amd_ip_funcs { int (*set_powergating_state)(void *handle, enum amd_powergating_state state); void (*get_clockgating_state)(void *handle, u32 *flags); - int (*enable_umd_pstate)(void *handle, enum amd_dpm_forced_level *level); }; diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index b14b004577e5..d3eab245e0fe 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -300,6 +300,10 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, struct amdgpu_device *adev = drm_to_adev(ddev); enum amd_dpm_forced_level level; enum amd_dpm_forced_level current_level; + uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | + AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | + AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | + AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; int ret = 0; if (amdgpu_in_reset(adev)) @@ -354,10 +358,7 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, } /* profile_exit setting is valid only when current mode is in profile mode */ - if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | - AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | - AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | - AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) && + if (!(current_level & profile_mode_mask) && (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) { pr_err("Currently not in any profile mode!\n"); pm_runtime_mark_last_busy(ddev->dev); @@ -365,6 +366,26 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, return -EINVAL; } + if (!(current_level & profile_mode_mask) && + (level & profile_mode_mask)) { + /* enter UMD Pstate */ + amdgpu_device_ip_set_powergating_state(adev, + AMD_IP_BLOCK_TYPE_GFX, + AMD_PG_STATE_UNGATE); + amdgpu_device_ip_set_clockgating_state(adev, + AMD_IP_BLOCK_TYPE_GFX, + AMD_CG_STATE_UNGATE); + } else if ((current_level & profile_mode_mask) && + !(level & profile_mode_mask)) { + /* exit UMD Pstate */ + amdgpu_device_ip_set_clockgating_state(adev, + AMD_IP_BLOCK_TYPE_GFX, + AMD_CG_STATE_GATE); + amdgpu_device_ip_set_powergating_state(adev, + AMD_IP_BLOCK_TYPE_GFX, + AMD_PG_STATE_GATE); + } + if (amdgpu_dpm_force_performance_level(adev, level)) { pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c index 3c6ee493e410..9613c6181c17 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c @@ -953,6 +953,7 @@ restart_search: static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) { + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; struct amdgpu_ps *ps; enum amd_pm_state_type dpm_state; int ret; @@ -976,7 +977,7 @@ static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) else return -EINVAL; - if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) { + if (amdgpu_dpm == 1 && pp_funcs->print_power_state) { printk("switching from power state:\n"); amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps); printk("switching to power state:\n"); @@ -985,14 +986,14 @@ static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) /* update whether vce is active */ ps->vce_active = adev->pm.dpm.vce_active; - if (adev->powerplay.pp_funcs->display_configuration_changed) + if (pp_funcs->display_configuration_changed) amdgpu_dpm_display_configuration_changed(adev); ret = amdgpu_dpm_pre_set_power_state(adev); if (ret) return ret; - if (adev->powerplay.pp_funcs->check_state_equal) { + if (pp_funcs->check_state_equal) { if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal)) equal = false; } @@ -1000,24 +1001,24 @@ static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) if (equal) return 0; - if (adev->powerplay.pp_funcs->set_power_state) - adev->powerplay.pp_funcs->set_power_state(adev->powerplay.pp_handle); + if (pp_funcs->set_power_state) + pp_funcs->set_power_state(adev->powerplay.pp_handle); amdgpu_dpm_post_set_power_state(adev); adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count; - if (adev->powerplay.pp_funcs->force_performance_level) { + if (pp_funcs->force_performance_level) { if (adev->pm.dpm.thermal_active) { enum amd_dpm_forced_level level = adev->pm.dpm.forced_level; /* force low perf level for thermal */ - amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW); + pp_funcs->force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW); /* save the user's level */ adev->pm.dpm.forced_level = level; } else { /* otherwise, user selected level */ - amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level); + pp_funcs->force_performance_level(adev, adev->pm.dpm.forced_level); } } diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c index 6a06a1f0b79b..89341729744d 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c @@ -323,12 +323,6 @@ static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr, if (*level & profile_mode_mask) { hwmgr->saved_dpm_level = hwmgr->dpm_level; hwmgr->en_umd_pstate = true; - amdgpu_device_ip_set_powergating_state(hwmgr->adev, - AMD_IP_BLOCK_TYPE_GFX, - AMD_PG_STATE_UNGATE); - amdgpu_device_ip_set_clockgating_state(hwmgr->adev, - AMD_IP_BLOCK_TYPE_GFX, - AMD_CG_STATE_UNGATE); } } else { /* exit umd pstate, restore level, enable gfx cg*/ @@ -336,12 +330,6 @@ static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr, if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT) *level = hwmgr->saved_dpm_level; hwmgr->en_umd_pstate = false; - amdgpu_device_ip_set_clockgating_state(hwmgr->adev, - AMD_IP_BLOCK_TYPE_GFX, - AMD_CG_STATE_GATE); - amdgpu_device_ip_set_powergating_state(hwmgr->adev, - AMD_IP_BLOCK_TYPE_GFX, - AMD_PG_STATE_GATE); } } } diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 8895b2b595a4..c374c3067496 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -1677,14 +1677,7 @@ static int smu_enable_umd_pstate(void *handle, /* enter umd pstate, save current level, disable gfx cg*/ if (*level & profile_mode_mask) { smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level; - smu_dpm_ctx->enable_umd_pstate = true; smu_gpo_control(smu, false); - amdgpu_device_ip_set_powergating_state(smu->adev, - AMD_IP_BLOCK_TYPE_GFX, - AMD_PG_STATE_UNGATE); - amdgpu_device_ip_set_clockgating_state(smu->adev, - AMD_IP_BLOCK_TYPE_GFX, - AMD_CG_STATE_UNGATE); smu_gfx_ulv_control(smu, false); smu_deep_sleep_control(smu, false); amdgpu_asic_update_umd_stable_pstate(smu->adev, true); @@ -1694,16 +1687,9 @@ static int smu_enable_umd_pstate(void *handle, if (!(*level & profile_mode_mask)) { if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT) *level = smu_dpm_ctx->saved_dpm_level; - smu_dpm_ctx->enable_umd_pstate = false; amdgpu_asic_update_umd_stable_pstate(smu->adev, false); smu_deep_sleep_control(smu, true); smu_gfx_ulv_control(smu, true); - amdgpu_device_ip_set_clockgating_state(smu->adev, - AMD_IP_BLOCK_TYPE_GFX, - AMD_CG_STATE_GATE); - amdgpu_device_ip_set_powergating_state(smu->adev, - AMD_IP_BLOCK_TYPE_GFX, - AMD_PG_STATE_GATE); smu_gpo_control(smu, true); } } @@ -2149,7 +2135,6 @@ const struct amd_ip_funcs smu_ip_funcs = { .soft_reset = NULL, .set_clockgating_state = smu_set_clockgating_state, .set_powergating_state = smu_set_powergating_state, - .enable_umd_pstate = smu_enable_umd_pstate, }; const struct amdgpu_ip_block_version smu_v11_0_ip_block = diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index 8a689baeaf82..3fdab6a44901 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -363,7 +363,6 @@ struct smu_dpm_context { uint32_t dpm_context_size; void *dpm_context; void *golden_dpm_context; - bool enable_umd_pstate; enum amd_dpm_forced_level dpm_level; enum amd_dpm_forced_level saved_dpm_level; enum amd_dpm_forced_level requested_dpm_level; -- cgit From 3712e7a494596b26861f4dc9b81676d1d0272eaf Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Tue, 16 Nov 2021 14:30:20 +0800 Subject: drm/amd/pm: unified lock protections in amdgpu_dpm.c As the only entry point, it's now safe and reasonable to enforce the lock protections in amdgpu_dpm.c. And with this, we can drop other internal used power locks. Signed-off-by: Evan Quan Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/amdgpu_dpm.c | 707 +++++++++++++++++++++-------- drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c | 16 +- drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c | 16 +- 3 files changed, 526 insertions(+), 213 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index 2756f52b74c1..68d2e80a673b 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -39,15 +39,33 @@ int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; + + if (!pp_funcs->get_sclk) + return 0; - return pp_funcs->get_sclk((adev)->powerplay.pp_handle, (low)); + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle, + low); + mutex_unlock(&adev->pm.mutex); + + return ret; } int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; + + if (!pp_funcs->get_mclk) + return 0; + + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle, + low); + mutex_unlock(&adev->pm.mutex); - return pp_funcs->get_mclk((adev)->powerplay.pp_handle, (low)); + return ret; } int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate) @@ -62,52 +80,20 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block return 0; } + mutex_lock(&adev->pm.mutex); + switch (block_type) { case AMD_IP_BLOCK_TYPE_UVD: case AMD_IP_BLOCK_TYPE_VCE: - if (pp_funcs && pp_funcs->set_powergating_by_smu) { - /* - * TODO: need a better lock mechanism - * - * Here adev->pm.mutex lock protection is enforced on - * UVD and VCE cases only. Since for other cases, there - * may be already lock protection in amdgpu_pm.c. - * This is a quick fix for the deadlock issue below. - * NFO: task ocltst:2028 blocked for more than 120 seconds. - * Tainted: G OE 5.0.0-37-generic #40~18.04.1-Ubuntu - * echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. - * cltst D 0 2028 2026 0x00000000 - * all Trace: - * __schedule+0x2c0/0x870 - * schedule+0x2c/0x70 - * schedule_preempt_disabled+0xe/0x10 - * __mutex_lock.isra.9+0x26d/0x4e0 - * __mutex_lock_slowpath+0x13/0x20 - * ? __mutex_lock_slowpath+0x13/0x20 - * mutex_lock+0x2f/0x40 - * amdgpu_dpm_set_powergating_by_smu+0x64/0xe0 [amdgpu] - * gfx_v8_0_enable_gfx_static_mg_power_gating+0x3c/0x70 [amdgpu] - * gfx_v8_0_set_powergating_state+0x66/0x260 [amdgpu] - * amdgpu_device_ip_set_powergating_state+0x62/0xb0 [amdgpu] - * pp_dpm_force_performance_level+0xe7/0x100 [amdgpu] - * amdgpu_set_dpm_forced_performance_level+0x129/0x330 [amdgpu] - */ - mutex_lock(&adev->pm.mutex); - ret = (pp_funcs->set_powergating_by_smu( - (adev)->powerplay.pp_handle, block_type, gate)); - mutex_unlock(&adev->pm.mutex); - } - break; case AMD_IP_BLOCK_TYPE_GFX: case AMD_IP_BLOCK_TYPE_VCN: case AMD_IP_BLOCK_TYPE_SDMA: case AMD_IP_BLOCK_TYPE_JPEG: case AMD_IP_BLOCK_TYPE_GMC: case AMD_IP_BLOCK_TYPE_ACP: - if (pp_funcs && pp_funcs->set_powergating_by_smu) { + if (pp_funcs && pp_funcs->set_powergating_by_smu) ret = (pp_funcs->set_powergating_by_smu( (adev)->powerplay.pp_handle, block_type, gate)); - } break; default: break; @@ -116,6 +102,8 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block if (!ret) atomic_set(&adev->pm.pwr_state[block_type], pwr_state); + mutex_unlock(&adev->pm.mutex); + return ret; } @@ -128,9 +116,13 @@ int amdgpu_dpm_baco_enter(struct amdgpu_device *adev) if (!pp_funcs || !pp_funcs->set_asic_baco_state) return -ENOENT; + mutex_lock(&adev->pm.mutex); + /* enter BACO state */ ret = pp_funcs->set_asic_baco_state(pp_handle, 1); + mutex_unlock(&adev->pm.mutex); + return ret; } @@ -143,9 +135,13 @@ int amdgpu_dpm_baco_exit(struct amdgpu_device *adev) if (!pp_funcs || !pp_funcs->set_asic_baco_state) return -ENOENT; + mutex_lock(&adev->pm.mutex); + /* exit BACO state */ ret = pp_funcs->set_asic_baco_state(pp_handle, 0); + mutex_unlock(&adev->pm.mutex); + return ret; } @@ -156,9 +152,13 @@ int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev, const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; if (pp_funcs && pp_funcs->set_mp1_state) { + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->set_mp1_state( adev->powerplay.pp_handle, mp1_state); + + mutex_unlock(&adev->pm.mutex); } return ret; @@ -169,25 +169,37 @@ bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev) const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; void *pp_handle = adev->powerplay.pp_handle; bool baco_cap; + int ret = 0; if (!pp_funcs || !pp_funcs->get_asic_baco_capability) return false; - if (pp_funcs->get_asic_baco_capability(pp_handle, &baco_cap)) - return false; + mutex_lock(&adev->pm.mutex); + + ret = pp_funcs->get_asic_baco_capability(pp_handle, + &baco_cap); - return baco_cap; + mutex_unlock(&adev->pm.mutex); + + return ret ? false : baco_cap; } int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; void *pp_handle = adev->powerplay.pp_handle; + int ret = 0; if (!pp_funcs || !pp_funcs->asic_reset_mode_2) return -ENOENT; - return pp_funcs->asic_reset_mode_2(pp_handle); + mutex_lock(&adev->pm.mutex); + + ret = pp_funcs->asic_reset_mode_2(pp_handle); + + mutex_unlock(&adev->pm.mutex); + + return ret; } int amdgpu_dpm_baco_reset(struct amdgpu_device *adev) @@ -199,37 +211,47 @@ int amdgpu_dpm_baco_reset(struct amdgpu_device *adev) if (!pp_funcs || !pp_funcs->set_asic_baco_state) return -ENOENT; + mutex_lock(&adev->pm.mutex); + /* enter BACO state */ ret = pp_funcs->set_asic_baco_state(pp_handle, 1); if (ret) - return ret; + goto out; /* exit BACO state */ ret = pp_funcs->set_asic_baco_state(pp_handle, 0); - if (ret) - return ret; - return 0; +out: + mutex_unlock(&adev->pm.mutex); + return ret; } bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev) { struct smu_context *smu = adev->powerplay.pp_handle; + bool support_mode1_reset = false; - if (is_support_sw_smu(adev)) - return smu_mode1_reset_is_support(smu); + if (is_support_sw_smu(adev)) { + mutex_lock(&adev->pm.mutex); + support_mode1_reset = smu_mode1_reset_is_support(smu); + mutex_unlock(&adev->pm.mutex); + } - return false; + return support_mode1_reset; } int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev) { struct smu_context *smu = adev->powerplay.pp_handle; + int ret = -EOPNOTSUPP; - if (is_support_sw_smu(adev)) - return smu_mode1_reset(smu); + if (is_support_sw_smu(adev)) { + mutex_lock(&adev->pm.mutex); + ret = smu_mode1_reset(smu); + mutex_unlock(&adev->pm.mutex); + } - return -EOPNOTSUPP; + return ret; } int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev, @@ -242,9 +264,12 @@ int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev, if (amdgpu_sriov_vf(adev)) return 0; - if (pp_funcs && pp_funcs->switch_power_profile) + if (pp_funcs && pp_funcs->switch_power_profile) { + mutex_lock(&adev->pm.mutex); ret = pp_funcs->switch_power_profile( adev->powerplay.pp_handle, type, en); + mutex_unlock(&adev->pm.mutex); + } return ret; } @@ -255,9 +280,12 @@ int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev, const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int ret = 0; - if (pp_funcs && pp_funcs->set_xgmi_pstate) + if (pp_funcs && pp_funcs->set_xgmi_pstate) { + mutex_lock(&adev->pm.mutex); ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle, pstate); + mutex_unlock(&adev->pm.mutex); + } return ret; } @@ -269,8 +297,11 @@ int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev, const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; void *pp_handle = adev->powerplay.pp_handle; - if (pp_funcs && pp_funcs->set_df_cstate) + if (pp_funcs && pp_funcs->set_df_cstate) { + mutex_lock(&adev->pm.mutex); ret = pp_funcs->set_df_cstate(pp_handle, cstate); + mutex_unlock(&adev->pm.mutex); + } return ret; } @@ -278,11 +309,15 @@ int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev, int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en) { struct smu_context *smu = adev->powerplay.pp_handle; + int ret = 0; - if (is_support_sw_smu(adev)) - return smu_allow_xgmi_power_down(smu, en); + if (is_support_sw_smu(adev)) { + mutex_lock(&adev->pm.mutex); + ret = smu_allow_xgmi_power_down(smu, en); + mutex_unlock(&adev->pm.mutex); + } - return 0; + return ret; } int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev) @@ -292,8 +327,11 @@ int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev) adev->powerplay.pp_funcs; int ret = 0; - if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) + if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) { + mutex_lock(&adev->pm.mutex); ret = pp_funcs->enable_mgpu_fan_boost(pp_handle); + mutex_unlock(&adev->pm.mutex); + } return ret; } @@ -306,9 +344,12 @@ int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev, adev->powerplay.pp_funcs; int ret = 0; - if (pp_funcs && pp_funcs->set_clockgating_by_smu) + if (pp_funcs && pp_funcs->set_clockgating_by_smu) { + mutex_lock(&adev->pm.mutex); ret = pp_funcs->set_clockgating_by_smu(pp_handle, msg_id); + mutex_unlock(&adev->pm.mutex); + } return ret; } @@ -321,9 +362,12 @@ int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev, adev->powerplay.pp_funcs; int ret = -EOPNOTSUPP; - if (pp_funcs && pp_funcs->smu_i2c_bus_access) + if (pp_funcs && pp_funcs->smu_i2c_bus_access) { + mutex_lock(&adev->pm.mutex); ret = pp_funcs->smu_i2c_bus_access(pp_handle, acquire); + mutex_unlock(&adev->pm.mutex); + } return ret; } @@ -336,13 +380,15 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) adev->pm.ac_power = true; else adev->pm.ac_power = false; + if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->enable_bapm) amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power); - mutex_unlock(&adev->pm.mutex); if (is_support_sw_smu(adev)) smu_set_ac_dc(adev->powerplay.pp_handle); + + mutex_unlock(&adev->pm.mutex); } } @@ -350,16 +396,19 @@ int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors senso void *data, uint32_t *size) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; - int ret = 0; + int ret = -EINVAL; if (!data || !size) return -EINVAL; - if (pp_funcs && pp_funcs->read_sensor) - ret = pp_funcs->read_sensor((adev)->powerplay.pp_handle, - sensor, data, size); - else - ret = -EINVAL; + if (pp_funcs && pp_funcs->read_sensor) { + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->read_sensor(adev->powerplay.pp_handle, + sensor, + data, + size); + mutex_unlock(&adev->pm.mutex); + } return ret; } @@ -374,7 +423,9 @@ void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev) if (!pp_funcs->pm_compute_clocks) return; + mutex_lock(&adev->pm.mutex); pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle); + mutex_unlock(&adev->pm.mutex); } void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) @@ -409,32 +460,51 @@ void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable) int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version) { - int r; + const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int r = 0; - if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) { - r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle); - if (r) { - pr_err("smu firmware loading failed\n"); - return r; - } + if (!pp_funcs->load_firmware) + return 0; - if (smu_version) - *smu_version = adev->pm.fw_version; + mutex_lock(&adev->pm.mutex); + r = pp_funcs->load_firmware(adev->powerplay.pp_handle); + if (r) { + pr_err("smu firmware loading failed\n"); + goto out; } - return 0; + if (smu_version) + *smu_version = adev->pm.fw_version; + +out: + mutex_unlock(&adev->pm.mutex); + return r; } int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable) { - return smu_handle_passthrough_sbr(adev->powerplay.pp_handle, enable); + int ret = 0; + + if (is_support_sw_smu(adev)) { + mutex_lock(&adev->pm.mutex); + ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle, + enable); + mutex_unlock(&adev->pm.mutex); + } + + return ret; } int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size) { struct smu_context *smu = adev->powerplay.pp_handle; + int ret = 0; + + mutex_lock(&adev->pm.mutex); + ret = smu_send_hbm_bad_pages_num(smu, size); + mutex_unlock(&adev->pm.mutex); - return smu_send_hbm_bad_pages_num(smu, size); + return ret; } int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev, @@ -442,15 +512,22 @@ int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev, uint32_t *min, uint32_t *max) { + int ret = 0; + + if (type != PP_SCLK) + return -EINVAL; + if (!is_support_sw_smu(adev)) return -EOPNOTSUPP; - switch (type) { - case PP_SCLK: - return smu_get_dpm_freq_range(adev->powerplay.pp_handle, SMU_SCLK, min, max); - default: - return -EINVAL; - } + mutex_lock(&adev->pm.mutex); + ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle, + SMU_SCLK, + min, + max); + mutex_unlock(&adev->pm.mutex); + + return ret; } int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev, @@ -459,26 +536,37 @@ int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev, uint32_t max) { struct smu_context *smu = adev->powerplay.pp_handle; + int ret = 0; + + if (type != PP_SCLK) + return -EINVAL; if (!is_support_sw_smu(adev)) return -EOPNOTSUPP; - switch (type) { - case PP_SCLK: - return smu_set_soft_freq_range(smu, SMU_SCLK, min, max); - default: - return -EINVAL; - } + mutex_lock(&adev->pm.mutex); + ret = smu_set_soft_freq_range(smu, + SMU_SCLK, + min, + max); + mutex_unlock(&adev->pm.mutex); + + return ret; } int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev) { struct smu_context *smu = adev->powerplay.pp_handle; + int ret = 0; if (!is_support_sw_smu(adev)) return 0; - return smu_write_watermarks_table(smu); + mutex_lock(&adev->pm.mutex); + ret = smu_write_watermarks_table(smu); + mutex_unlock(&adev->pm.mutex); + + return ret; } int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev, @@ -486,27 +574,40 @@ int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev, uint64_t event_arg) { struct smu_context *smu = adev->powerplay.pp_handle; + int ret = 0; if (!is_support_sw_smu(adev)) return -EOPNOTSUPP; - return smu_wait_for_event(smu, event, event_arg); + mutex_lock(&adev->pm.mutex); + ret = smu_wait_for_event(smu, event, event_arg); + mutex_unlock(&adev->pm.mutex); + + return ret; } int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value) { struct smu_context *smu = adev->powerplay.pp_handle; + int ret = 0; if (!is_support_sw_smu(adev)) return -EOPNOTSUPP; - return smu_get_status_gfxoff(smu, value); + mutex_lock(&adev->pm.mutex); + ret = smu_get_status_gfxoff(smu, value); + mutex_unlock(&adev->pm.mutex); + + return ret; } uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev) { struct smu_context *smu = adev->powerplay.pp_handle; + if (!is_support_sw_smu(adev)) + return 0; + return atomic64_read(&smu->throttle_int_counter); } @@ -541,12 +642,17 @@ struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev, uint32_t idx) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + struct amd_vce_state *vstate = NULL; if (!pp_funcs->get_vce_clock_state) return NULL; - return pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle, - idx); + mutex_lock(&adev->pm.mutex); + vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle, + idx); + mutex_unlock(&adev->pm.mutex); + + return vstate; } void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev, @@ -554,21 +660,28 @@ void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev, { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + mutex_lock(&adev->pm.mutex); + if (!pp_funcs->get_current_power_state) { *state = adev->pm.dpm.user_state; - return; + goto out; } *state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle); if (*state < POWER_STATE_TYPE_DEFAULT || *state > POWER_STATE_TYPE_INTERNAL_3DPERF) *state = adev->pm.dpm.user_state; + +out: + mutex_unlock(&adev->pm.mutex); } void amdgpu_dpm_set_power_state(struct amdgpu_device *adev, enum amd_pm_state_type state) { + mutex_lock(&adev->pm.mutex); adev->pm.dpm.user_state = state; + mutex_unlock(&adev->pm.mutex); if (is_support_sw_smu(adev)) return; @@ -584,10 +697,12 @@ enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; enum amd_dpm_forced_level level; + mutex_lock(&adev->pm.mutex); if (pp_funcs->get_performance_level) level = pp_funcs->get_performance_level(adev->powerplay.pp_handle); else level = adev->pm.dpm.forced_level; + mutex_unlock(&adev->pm.mutex); return level; } @@ -596,30 +711,46 @@ int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev, enum amd_dpm_forced_level level) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; - if (pp_funcs->force_performance_level) { - if (adev->pm.dpm.thermal_active) - return -EINVAL; + if (!pp_funcs->force_performance_level) + return 0; - if (pp_funcs->force_performance_level(adev->powerplay.pp_handle, - level)) - return -EINVAL; + mutex_lock(&adev->pm.mutex); - adev->pm.dpm.forced_level = level; + if (adev->pm.dpm.thermal_active) { + ret = -EINVAL; + goto out; } - return 0; + if (pp_funcs->force_performance_level(adev->powerplay.pp_handle, + level)) + ret = -EINVAL; + + if (!ret) + adev->pm.dpm.forced_level = level; + +out: + mutex_unlock(&adev->pm.mutex); + + return ret; } int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev, struct pp_states_info *states) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; if (!pp_funcs->get_pp_num_states) return -EOPNOTSUPP; - return pp_funcs->get_pp_num_states(adev->powerplay.pp_handle, states); + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle, + states); + mutex_unlock(&adev->pm.mutex); + + return ret; } int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev, @@ -627,21 +758,34 @@ int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev, enum amd_pm_state_type *user_state) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; if (!pp_funcs->dispatch_tasks) return -EOPNOTSUPP; - return pp_funcs->dispatch_tasks(adev->powerplay.pp_handle, task_id, user_state); + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle, + task_id, + user_state); + mutex_unlock(&adev->pm.mutex); + + return ret; } int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; if (!pp_funcs->get_pp_table) return 0; - return pp_funcs->get_pp_table(adev->powerplay.pp_handle, table); + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle, + table); + mutex_unlock(&adev->pm.mutex); + + return ret; } int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev, @@ -650,14 +794,19 @@ int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev, uint32_t size) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; if (!pp_funcs->set_fine_grain_clk_vol) return 0; - return pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle, - type, - input, - size); + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle, + type, + input, + size); + mutex_unlock(&adev->pm.mutex); + + return ret; } int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev, @@ -666,14 +815,19 @@ int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev, uint32_t size) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; if (!pp_funcs->odn_edit_dpm_table) return 0; - return pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle, - type, - input, - size); + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle, + type, + input, + size); + mutex_unlock(&adev->pm.mutex); + + return ret; } int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev, @@ -681,36 +835,51 @@ int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev, char *buf) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; if (!pp_funcs->print_clock_levels) return 0; - return pp_funcs->print_clock_levels(adev->powerplay.pp_handle, - type, - buf); + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle, + type, + buf); + mutex_unlock(&adev->pm.mutex); + + return ret; } int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev, uint64_t ppfeature_masks) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; if (!pp_funcs->set_ppfeature_status) return 0; - return pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle, - ppfeature_masks); + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle, + ppfeature_masks); + mutex_unlock(&adev->pm.mutex); + + return ret; } int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; if (!pp_funcs->get_ppfeature_status) return 0; - return pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle, - buf); + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle, + buf); + mutex_unlock(&adev->pm.mutex); + + return ret; } int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev, @@ -718,23 +887,33 @@ int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev, uint32_t mask) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; if (!pp_funcs->force_clock_level) return 0; - return pp_funcs->force_clock_level(adev->powerplay.pp_handle, - type, - mask); + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle, + type, + mask); + mutex_unlock(&adev->pm.mutex); + + return ret; } int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; if (!pp_funcs->get_sclk_od) return 0; - return pp_funcs->get_sclk_od(adev->powerplay.pp_handle); + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle); + mutex_unlock(&adev->pm.mutex); + + return ret; } int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value) @@ -744,8 +923,10 @@ int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value) if (is_support_sw_smu(adev)) return 0; + mutex_lock(&adev->pm.mutex); if (pp_funcs->set_sclk_od) pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value); + mutex_unlock(&adev->pm.mutex); if (amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, @@ -760,11 +941,16 @@ int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value) int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; if (!pp_funcs->get_mclk_od) return 0; - return pp_funcs->get_mclk_od(adev->powerplay.pp_handle); + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle); + mutex_unlock(&adev->pm.mutex); + + return ret; } int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value) @@ -774,8 +960,10 @@ int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value) if (is_support_sw_smu(adev)) return 0; + mutex_lock(&adev->pm.mutex); if (pp_funcs->set_mclk_od) pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value); + mutex_unlock(&adev->pm.mutex); if (amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, @@ -791,35 +979,51 @@ int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev, char *buf) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; if (!pp_funcs->get_power_profile_mode) return -EOPNOTSUPP; - return pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle, - buf); + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle, + buf); + mutex_unlock(&adev->pm.mutex); + + return ret; } int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev, long *input, uint32_t size) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; if (!pp_funcs->set_power_profile_mode) return 0; - return pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle, - input, - size); + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle, + input, + size); + mutex_unlock(&adev->pm.mutex); + + return ret; } int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; if (!pp_funcs->get_gpu_metrics) return 0; - return pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle, table); + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle, + table); + mutex_unlock(&adev->pm.mutex); + + return ret; } int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev, @@ -830,7 +1034,9 @@ int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev, if (!pp_funcs->get_fan_control_mode) return -EOPNOTSUPP; + mutex_lock(&adev->pm.mutex); *fan_mode = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle); + mutex_unlock(&adev->pm.mutex); return 0; } @@ -839,44 +1045,68 @@ int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev, uint32_t speed) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; if (!pp_funcs->set_fan_speed_pwm) return -EINVAL; - return pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle, speed); + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle, + speed); + mutex_unlock(&adev->pm.mutex); + + return ret; } int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev, uint32_t *speed) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; if (!pp_funcs->get_fan_speed_pwm) return -EINVAL; - return pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle, speed); + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle, + speed); + mutex_unlock(&adev->pm.mutex); + + return ret; } int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev, uint32_t *speed) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; if (!pp_funcs->get_fan_speed_rpm) return -EINVAL; - return pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle, speed); + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle, + speed); + mutex_unlock(&adev->pm.mutex); + + return ret; } int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev, uint32_t speed) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; if (!pp_funcs->set_fan_speed_rpm) return -EINVAL; - return pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle, speed); + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle, + speed); + mutex_unlock(&adev->pm.mutex); + + return ret; } int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev, @@ -887,7 +1117,10 @@ int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev, if (!pp_funcs->set_fan_control_mode) return -EOPNOTSUPP; - pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle, mode); + mutex_lock(&adev->pm.mutex); + pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle, + mode); + mutex_unlock(&adev->pm.mutex); return 0; } @@ -898,33 +1131,50 @@ int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev, enum pp_power_type power_type) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; if (!pp_funcs->get_power_limit) return -ENODATA; - return pp_funcs->get_power_limit(adev->powerplay.pp_handle, - limit, - pp_limit_level, - power_type); + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle, + limit, + pp_limit_level, + power_type); + mutex_unlock(&adev->pm.mutex); + + return ret; } int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev, uint32_t limit) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; if (!pp_funcs->set_power_limit) return -EINVAL; - return pp_funcs->set_power_limit(adev->powerplay.pp_handle, limit); + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle, + limit); + mutex_unlock(&adev->pm.mutex); + + return ret; } int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev) { + bool cclk_dpm_supported = false; + if (!is_support_sw_smu(adev)) return false; - return is_support_cclk_dpm(adev); + mutex_lock(&adev->pm.mutex); + cclk_dpm_supported = is_support_cclk_dpm(adev); + mutex_unlock(&adev->pm.mutex); + + return (int)cclk_dpm_supported; } int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev, @@ -935,8 +1185,10 @@ int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *ade if (!pp_funcs->debugfs_print_current_performance_level) return -EOPNOTSUPP; + mutex_lock(&adev->pm.mutex); pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle, m); + mutex_unlock(&adev->pm.mutex); return 0; } @@ -946,13 +1198,18 @@ int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev, size_t *size) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; if (!pp_funcs->get_smu_prv_buf_details) return -ENOSYS; - return pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle, - addr, - size); + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle, + addr, + size); + mutex_unlock(&adev->pm.mutex); + + return ret; } int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev) @@ -973,19 +1230,27 @@ int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev, size_t size) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; if (!pp_funcs->set_pp_table) return -EOPNOTSUPP; - return pp_funcs->set_pp_table(adev->powerplay.pp_handle, - buf, - size); + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle, + buf, + size); + mutex_unlock(&adev->pm.mutex); + + return ret; } int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev) { struct smu_context *smu = adev->powerplay.pp_handle; + if (!is_support_sw_smu(adev)) + return INT_MAX; + return smu->cpu_core_num; } @@ -1001,12 +1266,17 @@ int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev, const struct amd_pp_display_configuration *input) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; if (!pp_funcs->display_configuration_change) return 0; - return pp_funcs->display_configuration_change(adev->powerplay.pp_handle, - input); + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle, + input); + mutex_unlock(&adev->pm.mutex); + + return ret; } int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev, @@ -1014,25 +1284,35 @@ int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev, struct amd_pp_clocks *clocks) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; if (!pp_funcs->get_clock_by_type) return 0; - return pp_funcs->get_clock_by_type(adev->powerplay.pp_handle, - type, - clocks); + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle, + type, + clocks); + mutex_unlock(&adev->pm.mutex); + + return ret; } int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev, struct amd_pp_simple_clock_info *clocks) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; if (!pp_funcs->get_display_mode_validation_clocks) return 0; - return pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle, - clocks); + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle, + clocks); + mutex_unlock(&adev->pm.mutex); + + return ret; } int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev, @@ -1040,13 +1320,18 @@ int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev, struct pp_clock_levels_with_latency *clocks) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; if (!pp_funcs->get_clock_by_type_with_latency) return 0; - return pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle, - type, - clocks); + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle, + type, + clocks); + mutex_unlock(&adev->pm.mutex); + + return ret; } int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev, @@ -1054,49 +1339,69 @@ int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev, struct pp_clock_levels_with_voltage *clocks) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; if (!pp_funcs->get_clock_by_type_with_voltage) return 0; - return pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle, - type, - clocks); + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle, + type, + clocks); + mutex_unlock(&adev->pm.mutex); + + return ret; } int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev, void *clock_ranges) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; if (!pp_funcs->set_watermarks_for_clocks_ranges) return -EOPNOTSUPP; - return pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle, - clock_ranges); + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle, + clock_ranges); + mutex_unlock(&adev->pm.mutex); + + return ret; } int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev, struct pp_display_clock_request *clock) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; if (!pp_funcs->display_clock_voltage_request) return -EOPNOTSUPP; - return pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle, - clock); + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle, + clock); + mutex_unlock(&adev->pm.mutex); + + return ret; } int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev, struct amd_pp_clock_info *clocks) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; if (!pp_funcs->get_current_clocks) return -EOPNOTSUPP; - return pp_funcs->get_current_clocks(adev->powerplay.pp_handle, - clocks); + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle, + clocks); + mutex_unlock(&adev->pm.mutex); + + return ret; } void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev) @@ -1106,31 +1411,43 @@ void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev) if (!pp_funcs->notify_smu_enable_pwe) return; + mutex_lock(&adev->pm.mutex); pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle); + mutex_unlock(&adev->pm.mutex); } int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev, uint32_t count) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; if (!pp_funcs->set_active_display_count) return -EOPNOTSUPP; - return pp_funcs->set_active_display_count(adev->powerplay.pp_handle, - count); + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle, + count); + mutex_unlock(&adev->pm.mutex); + + return ret; } int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev, uint32_t clock) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; if (!pp_funcs->set_min_deep_sleep_dcefclk) return -EOPNOTSUPP; - return pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle, - clock); + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle, + clock); + mutex_unlock(&adev->pm.mutex); + + return ret; } void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev, @@ -1141,8 +1458,10 @@ void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev, if (!pp_funcs->set_hard_min_dcefclk_by_freq) return; + mutex_lock(&adev->pm.mutex); pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle, clock); + mutex_unlock(&adev->pm.mutex); } void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev, @@ -1153,32 +1472,44 @@ void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev, if (!pp_funcs->set_hard_min_fclk_by_freq) return; + mutex_lock(&adev->pm.mutex); pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle, clock); + mutex_unlock(&adev->pm.mutex); } int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev, bool disable_memory_clock_switch) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; if (!pp_funcs->display_disable_memory_clock_switch) return 0; - return pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle, - disable_memory_clock_switch); + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle, + disable_memory_clock_switch); + mutex_unlock(&adev->pm.mutex); + + return ret; } int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev, struct pp_smu_nv_clock_table *max_clocks) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; if (!pp_funcs->get_max_sustainable_clocks_by_dc) return -EOPNOTSUPP; - return pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle, - max_clocks); + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle, + max_clocks); + mutex_unlock(&adev->pm.mutex); + + return ret; } enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev, @@ -1186,23 +1517,33 @@ enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev, unsigned int *num_states) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; if (!pp_funcs->get_uclk_dpm_states) return -EOPNOTSUPP; - return pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle, - clock_values_in_khz, - num_states); + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle, + clock_values_in_khz, + num_states); + mutex_unlock(&adev->pm.mutex); + + return ret; } int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev, struct dpm_clocks *clock_table) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; if (!pp_funcs->get_dpm_clock_table) return -EOPNOTSUPP; - return pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle, - clock_table); + mutex_lock(&adev->pm.mutex); + ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle, + clock_table); + mutex_unlock(&adev->pm.mutex); + + return ret; } diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c index 72824ef61edd..b37662c4a413 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c @@ -3040,21 +3040,18 @@ static int kv_dpm_sw_init(void *handle) return 0; INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler); - mutex_lock(&adev->pm.mutex); ret = kv_dpm_init(adev); if (ret) goto dpm_failed; adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; if (amdgpu_dpm == 1) amdgpu_pm_print_power_states(adev); - mutex_unlock(&adev->pm.mutex); DRM_INFO("amdgpu: dpm initialized\n"); return 0; dpm_failed: kv_dpm_fini(adev); - mutex_unlock(&adev->pm.mutex); DRM_ERROR("amdgpu: dpm initialization failed\n"); return ret; } @@ -3065,9 +3062,7 @@ static int kv_dpm_sw_fini(void *handle) flush_work(&adev->pm.dpm.thermal.work); - mutex_lock(&adev->pm.mutex); kv_dpm_fini(adev); - mutex_unlock(&adev->pm.mutex); return 0; } @@ -3080,14 +3075,12 @@ static int kv_dpm_hw_init(void *handle) if (!amdgpu_dpm) return 0; - mutex_lock(&adev->pm.mutex); kv_dpm_setup_asic(adev); ret = kv_dpm_enable(adev); if (ret) adev->pm.dpm_enabled = false; else adev->pm.dpm_enabled = true; - mutex_unlock(&adev->pm.mutex); amdgpu_legacy_dpm_compute_clocks(adev); return ret; } @@ -3096,11 +3089,8 @@ static int kv_dpm_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (adev->pm.dpm_enabled) { - mutex_lock(&adev->pm.mutex); + if (adev->pm.dpm_enabled) kv_dpm_disable(adev); - mutex_unlock(&adev->pm.mutex); - } return 0; } @@ -3110,12 +3100,10 @@ static int kv_dpm_suspend(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (adev->pm.dpm_enabled) { - mutex_lock(&adev->pm.mutex); /* disable dpm */ kv_dpm_disable(adev); /* reset the power state */ adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; - mutex_unlock(&adev->pm.mutex); } return 0; } @@ -3127,14 +3115,12 @@ static int kv_dpm_resume(void *handle) if (adev->pm.dpm_enabled) { /* asic init will reset to the boot state */ - mutex_lock(&adev->pm.mutex); kv_dpm_setup_asic(adev); ret = kv_dpm_enable(adev); if (ret) adev->pm.dpm_enabled = false; else adev->pm.dpm_enabled = true; - mutex_unlock(&adev->pm.mutex); if (adev->pm.dpm_enabled) amdgpu_legacy_dpm_compute_clocks(adev); } diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c index 9f8cc81cb7ca..92b987fb31d4 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c @@ -7786,21 +7786,18 @@ static int si_dpm_sw_init(void *handle) return ret; INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler); - mutex_lock(&adev->pm.mutex); ret = si_dpm_init(adev); if (ret) goto dpm_failed; adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; if (amdgpu_dpm == 1) amdgpu_pm_print_power_states(adev); - mutex_unlock(&adev->pm.mutex); DRM_INFO("amdgpu: dpm initialized\n"); return 0; dpm_failed: si_dpm_fini(adev); - mutex_unlock(&adev->pm.mutex); DRM_ERROR("amdgpu: dpm initialization failed\n"); return ret; } @@ -7811,9 +7808,7 @@ static int si_dpm_sw_fini(void *handle) flush_work(&adev->pm.dpm.thermal.work); - mutex_lock(&adev->pm.mutex); si_dpm_fini(adev); - mutex_unlock(&adev->pm.mutex); return 0; } @@ -7827,14 +7822,12 @@ static int si_dpm_hw_init(void *handle) if (!amdgpu_dpm) return 0; - mutex_lock(&adev->pm.mutex); si_dpm_setup_asic(adev); ret = si_dpm_enable(adev); if (ret) adev->pm.dpm_enabled = false; else adev->pm.dpm_enabled = true; - mutex_unlock(&adev->pm.mutex); amdgpu_legacy_dpm_compute_clocks(adev); return ret; } @@ -7843,11 +7836,8 @@ static int si_dpm_hw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (adev->pm.dpm_enabled) { - mutex_lock(&adev->pm.mutex); + if (adev->pm.dpm_enabled) si_dpm_disable(adev); - mutex_unlock(&adev->pm.mutex); - } return 0; } @@ -7857,12 +7847,10 @@ static int si_dpm_suspend(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; if (adev->pm.dpm_enabled) { - mutex_lock(&adev->pm.mutex); /* disable dpm */ si_dpm_disable(adev); /* reset the power state */ adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps; - mutex_unlock(&adev->pm.mutex); } return 0; } @@ -7874,14 +7862,12 @@ static int si_dpm_resume(void *handle) if (adev->pm.dpm_enabled) { /* asic init will reset to the boot state */ - mutex_lock(&adev->pm.mutex); si_dpm_setup_asic(adev); ret = si_dpm_enable(adev); if (ret) adev->pm.dpm_enabled = false; else adev->pm.dpm_enabled = true; - mutex_unlock(&adev->pm.mutex); if (adev->pm.dpm_enabled) amdgpu_legacy_dpm_compute_clocks(adev); } -- cgit From 1a408c710d9e273a22050b0b7b0c131f92847918 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Thu, 2 Dec 2021 09:50:34 +0800 Subject: drm/amdgpu: wrap those atombios APIs used by SI under CONFIG_DRM_AMDGPU_SI No need to compile them on CONFIG_DRM_AMDGPU_SI disabled. Signed-off-by: Evan Quan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c | 2 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h | 22 ++++++++++++---------- 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c index 12a6b1c99c93..9ba4817a9148 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.c @@ -1083,6 +1083,7 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev, return 0; } +#ifdef CONFIG_DRM_AMDGPU_SI int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev, u32 clock, bool strobe_mode, @@ -1503,6 +1504,7 @@ int amdgpu_atombios_init_mc_reg_table(struct amdgpu_device *adev, } return -EINVAL; } +#endif bool amdgpu_atombios_has_gpu_virtualization_table(struct amdgpu_device *adev) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h index 27e74b1fc260..4153d520e2a3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atombios.h @@ -160,6 +160,7 @@ int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev, bool strobe_mode, struct atom_clock_dividers *dividers); +#ifdef CONFIG_DRM_AMDGPU_SI int amdgpu_atombios_get_memory_pll_dividers(struct amdgpu_device *adev, u32 clock, bool strobe_mode, @@ -179,6 +180,17 @@ int amdgpu_atombios_get_voltage_table(struct amdgpu_device *adev, int amdgpu_atombios_init_mc_reg_table(struct amdgpu_device *adev, u8 module_index, struct atom_mc_reg_table *reg_table); +int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type, + u16 voltage_id, u16 *voltage); +int amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(struct amdgpu_device *adev, + u16 *voltage, + u16 leakage_idx); +void amdgpu_atombios_get_default_voltages(struct amdgpu_device *adev, + u16 *vddc, u16 *vddci, u16 *mvdd); +int amdgpu_atombios_get_svi2_info(struct amdgpu_device *adev, + u8 voltage_type, + u8 *svd_gpio_id, u8 *svc_gpio_id); +#endif bool amdgpu_atombios_has_gpu_virtualization_table(struct amdgpu_device *adev); @@ -190,21 +202,11 @@ void amdgpu_atombios_scratch_regs_set_backlight_level(struct amdgpu_device *adev bool amdgpu_atombios_scratch_need_asic_init(struct amdgpu_device *adev); void amdgpu_atombios_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le); -int amdgpu_atombios_get_max_vddc(struct amdgpu_device *adev, u8 voltage_type, - u16 voltage_id, u16 *voltage); -int amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(struct amdgpu_device *adev, - u16 *voltage, - u16 leakage_idx); -void amdgpu_atombios_get_default_voltages(struct amdgpu_device *adev, - u16 *vddc, u16 *vddci, u16 *mvdd); int amdgpu_atombios_get_clock_dividers(struct amdgpu_device *adev, u8 clock_type, u32 clock, bool strobe_mode, struct atom_clock_dividers *dividers); -int amdgpu_atombios_get_svi2_info(struct amdgpu_device *adev, - u8 voltage_type, - u8 *svd_gpio_id, u8 *svc_gpio_id); int amdgpu_atombios_get_data_table(struct amdgpu_device *adev, uint32_t table, -- cgit From bcf19fdd507fb679bb6e1b8a119961f32b6cbb95 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Thu, 2 Dec 2021 10:13:23 +0800 Subject: drm/amd/pm: drop those unrealistic thermal_type checks As it's impossible the thermal sensor of KV is one of them. Signed-off-by: Evan Quan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c index b37662c4a413..8b23cc9f098a 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c @@ -1260,18 +1260,8 @@ static void kv_dpm_enable_bapm(void *handle, bool enable) static bool kv_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor) { switch (sensor) { - case THERMAL_TYPE_RV6XX: - case THERMAL_TYPE_RV770: - case THERMAL_TYPE_EVERGREEN: - case THERMAL_TYPE_SUMO: - case THERMAL_TYPE_NI: - case THERMAL_TYPE_SI: - case THERMAL_TYPE_CI: case THERMAL_TYPE_KV: return true; - case THERMAL_TYPE_ADT7473_WITH_INTERNAL: - case THERMAL_TYPE_EMC2103_WITH_INTERNAL: - return false; /* need special handling */ case THERMAL_TYPE_NONE: case THERMAL_TYPE_EXTERNAL: case THERMAL_TYPE_EXTERNAL_GPIO: -- cgit From 54c73b51df2958f564b144ce86f15a85e227db90 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 7 Jan 2022 18:14:22 -0500 Subject: drm/amdgpu/pm: move additional logic into amdgpu_dpm_force_performance_level This is part of the forced performance level. Move it from the sysfs handler into amdgpu_dpm_force_performance_level. Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/amdgpu_dpm.c | 61 +++++++++++++++++++++++++++++++++++-- drivers/gpu/drm/amd/pm/amdgpu_pm.c | 50 ------------------------------ 2 files changed, 59 insertions(+), 52 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index 68d2e80a673b..728b6e10f302 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -692,16 +692,25 @@ void amdgpu_dpm_set_power_state(struct amdgpu_device *adev, amdgpu_dpm_compute_clocks(adev); } -enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev) +static enum amd_dpm_forced_level amdgpu_dpm_get_performance_level_locked(struct amdgpu_device *adev) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; enum amd_dpm_forced_level level; - mutex_lock(&adev->pm.mutex); if (pp_funcs->get_performance_level) level = pp_funcs->get_performance_level(adev->powerplay.pp_handle); else level = adev->pm.dpm.forced_level; + + return level; +} + +enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev) +{ + enum amd_dpm_forced_level level; + + mutex_lock(&adev->pm.mutex); + level = amdgpu_dpm_get_performance_level_locked(adev); mutex_unlock(&adev->pm.mutex); return level; @@ -711,6 +720,11 @@ int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev, enum amd_dpm_forced_level level) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + enum amd_dpm_forced_level current_level; + uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | + AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | + AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | + AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; int ret = 0; if (!pp_funcs->force_performance_level) @@ -723,6 +737,49 @@ int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev, goto out; } + current_level = amdgpu_dpm_get_performance_level_locked(adev); + if (current_level == level) { + ret = 0; + goto out; + } + + if (adev->asic_type == CHIP_RAVEN) { + if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) { + if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && + level == AMD_DPM_FORCED_LEVEL_MANUAL) + amdgpu_gfx_off_ctrl(adev, false); + else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && + level != AMD_DPM_FORCED_LEVEL_MANUAL) + amdgpu_gfx_off_ctrl(adev, true); + } + } + + if (!(current_level & profile_mode_mask) && + (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) { + ret = -EINVAL; + goto out; + } + + if (!(current_level & profile_mode_mask) && + (level & profile_mode_mask)) { + /* enter UMD Pstate */ + amdgpu_device_ip_set_powergating_state(adev, + AMD_IP_BLOCK_TYPE_GFX, + AMD_PG_STATE_UNGATE); + amdgpu_device_ip_set_clockgating_state(adev, + AMD_IP_BLOCK_TYPE_GFX, + AMD_CG_STATE_UNGATE); + } else if ((current_level & profile_mode_mask) && + !(level & profile_mode_mask)) { + /* exit UMD Pstate */ + amdgpu_device_ip_set_clockgating_state(adev, + AMD_IP_BLOCK_TYPE_GFX, + AMD_CG_STATE_GATE); + amdgpu_device_ip_set_powergating_state(adev, + AMD_IP_BLOCK_TYPE_GFX, + AMD_PG_STATE_GATE); + } + if (pp_funcs->force_performance_level(adev->powerplay.pp_handle, level)) ret = -EINVAL; diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index d3eab245e0fe..d2823aaeca09 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -299,11 +299,6 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, struct drm_device *ddev = dev_get_drvdata(dev); struct amdgpu_device *adev = drm_to_adev(ddev); enum amd_dpm_forced_level level; - enum amd_dpm_forced_level current_level; - uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | - AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | - AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | - AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; int ret = 0; if (amdgpu_in_reset(adev)) @@ -341,51 +336,6 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, return ret; } - current_level = amdgpu_dpm_get_performance_level(adev); - if (current_level == level) { - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); - return count; - } - - if (adev->asic_type == CHIP_RAVEN) { - if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) { - if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && level == AMD_DPM_FORCED_LEVEL_MANUAL) - amdgpu_gfx_off_ctrl(adev, false); - else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && level != AMD_DPM_FORCED_LEVEL_MANUAL) - amdgpu_gfx_off_ctrl(adev, true); - } - } - - /* profile_exit setting is valid only when current mode is in profile mode */ - if (!(current_level & profile_mode_mask) && - (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) { - pr_err("Currently not in any profile mode!\n"); - pm_runtime_mark_last_busy(ddev->dev); - pm_runtime_put_autosuspend(ddev->dev); - return -EINVAL; - } - - if (!(current_level & profile_mode_mask) && - (level & profile_mode_mask)) { - /* enter UMD Pstate */ - amdgpu_device_ip_set_powergating_state(adev, - AMD_IP_BLOCK_TYPE_GFX, - AMD_PG_STATE_UNGATE); - amdgpu_device_ip_set_clockgating_state(adev, - AMD_IP_BLOCK_TYPE_GFX, - AMD_CG_STATE_UNGATE); - } else if ((current_level & profile_mode_mask) && - !(level & profile_mode_mask)) { - /* exit UMD Pstate */ - amdgpu_device_ip_set_clockgating_state(adev, - AMD_IP_BLOCK_TYPE_GFX, - AMD_CG_STATE_GATE); - amdgpu_device_ip_set_powergating_state(adev, - AMD_IP_BLOCK_TYPE_GFX, - AMD_PG_STATE_GATE); - } - if (amdgpu_dpm_force_performance_level(adev, level)) { pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); -- cgit From 685fae24d94fd615b7058832fcb437eb588f4860 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Tue, 11 Jan 2022 15:02:19 +0800 Subject: drm/amd/pm: correct the checks for fan attributes support On functionality unsupported, -EOPNOTSUPP will be returned. And we rely on that to determine the fan attributes support. Fixes: 79c65f3fcbb128 ("drm/amd/pm: do not expose power implementation details to amdgpu_pm.c") Signed-off-by: Evan Quan Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/kgd_pp_interface.h | 4 +- drivers/gpu/drm/amd/pm/amdgpu_dpm.c | 21 ++-- drivers/gpu/drm/amd/pm/amdgpu_pm.c | 21 ++-- drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c | 19 +++- drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c | 71 +++++++----- drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 133 +++++++++++++---------- 6 files changed, 160 insertions(+), 109 deletions(-) diff --git a/drivers/gpu/drm/amd/include/kgd_pp_interface.h b/drivers/gpu/drm/amd/include/kgd_pp_interface.h index a8eec91c0995..387120099493 100644 --- a/drivers/gpu/drm/amd/include/kgd_pp_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_pp_interface.h @@ -315,8 +315,8 @@ struct amd_pm_funcs { void *rps, bool *equal); /* export for sysfs */ - void (*set_fan_control_mode)(void *handle, u32 mode); - u32 (*get_fan_control_mode)(void *handle); + int (*set_fan_control_mode)(void *handle, u32 mode); + int (*get_fan_control_mode)(void *handle, u32 *fan_mode); int (*set_fan_speed_pwm)(void *handle, u32 speed); int (*get_fan_speed_pwm)(void *handle, u32 *speed); int (*force_clock_level)(void *handle, enum pp_clock_type type, uint32_t mask); diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index 728b6e10f302..f0daa66f5b3d 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -1087,15 +1087,17 @@ int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev, uint32_t *fan_mode) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; if (!pp_funcs->get_fan_control_mode) return -EOPNOTSUPP; mutex_lock(&adev->pm.mutex); - *fan_mode = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle); + ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle, + fan_mode); mutex_unlock(&adev->pm.mutex); - return 0; + return ret; } int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev, @@ -1105,7 +1107,7 @@ int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev, int ret = 0; if (!pp_funcs->set_fan_speed_pwm) - return -EINVAL; + return -EOPNOTSUPP; mutex_lock(&adev->pm.mutex); ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle, @@ -1122,7 +1124,7 @@ int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev, int ret = 0; if (!pp_funcs->get_fan_speed_pwm) - return -EINVAL; + return -EOPNOTSUPP; mutex_lock(&adev->pm.mutex); ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle, @@ -1139,7 +1141,7 @@ int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev, int ret = 0; if (!pp_funcs->get_fan_speed_rpm) - return -EINVAL; + return -EOPNOTSUPP; mutex_lock(&adev->pm.mutex); ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle, @@ -1156,7 +1158,7 @@ int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev, int ret = 0; if (!pp_funcs->set_fan_speed_rpm) - return -EINVAL; + return -EOPNOTSUPP; mutex_lock(&adev->pm.mutex); ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle, @@ -1170,16 +1172,17 @@ int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev, uint32_t mode) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; + int ret = 0; if (!pp_funcs->set_fan_control_mode) return -EOPNOTSUPP; mutex_lock(&adev->pm.mutex); - pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle, - mode); + ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle, + mode); mutex_unlock(&adev->pm.mutex); - return 0; + return ret; } int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index d2823aaeca09..1b03ad7a21ad 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -3147,7 +3147,6 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, struct device *dev = kobj_to_dev(kobj); struct amdgpu_device *adev = dev_get_drvdata(dev); umode_t effective_mode = attr->mode; - uint32_t speed = 0; /* under multi-vf mode, the hwmon attributes are all not supported */ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) @@ -3213,15 +3212,15 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, return 0; /* mask fan attributes if we have no bindings for this asic to expose */ - if (((amdgpu_dpm_get_fan_speed_pwm(adev, &speed) == -EINVAL) && + if (((amdgpu_dpm_get_fan_speed_pwm(adev, NULL) == -EOPNOTSUPP) && attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */ - ((amdgpu_dpm_get_fan_control_mode(adev, &speed) == -EOPNOTSUPP) && + ((amdgpu_dpm_get_fan_control_mode(adev, NULL) == -EOPNOTSUPP) && attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */ effective_mode &= ~S_IRUGO; - if (((amdgpu_dpm_set_fan_speed_pwm(adev, speed) == -EINVAL) && + if (((amdgpu_dpm_set_fan_speed_pwm(adev, U32_MAX) == -EOPNOTSUPP) && attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */ - ((amdgpu_dpm_set_fan_control_mode(adev, speed) == -EOPNOTSUPP) && + ((amdgpu_dpm_set_fan_control_mode(adev, U32_MAX) == -EOPNOTSUPP) && attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */ effective_mode &= ~S_IWUSR; @@ -3241,16 +3240,16 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, return 0; /* hide max/min values if we can't both query and manage the fan */ - if (((amdgpu_dpm_set_fan_speed_pwm(adev, speed) == -EINVAL) && - (amdgpu_dpm_get_fan_speed_pwm(adev, &speed) == -EINVAL) && - (amdgpu_dpm_set_fan_speed_rpm(adev, speed) == -EINVAL) && - (amdgpu_dpm_get_fan_speed_rpm(adev, &speed) == -EINVAL)) && + if (((amdgpu_dpm_set_fan_speed_pwm(adev, U32_MAX) == -EOPNOTSUPP) && + (amdgpu_dpm_get_fan_speed_pwm(adev, NULL) == -EOPNOTSUPP) && + (amdgpu_dpm_set_fan_speed_rpm(adev, U32_MAX) == -EOPNOTSUPP) && + (amdgpu_dpm_get_fan_speed_rpm(adev, NULL) == -EOPNOTSUPP)) && (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) return 0; - if ((amdgpu_dpm_set_fan_speed_rpm(adev, speed) == -EINVAL) && - (amdgpu_dpm_get_fan_speed_rpm(adev, &speed) == -EINVAL) && + if ((amdgpu_dpm_set_fan_speed_rpm(adev, U32_MAX) == -EOPNOTSUPP) && + (amdgpu_dpm_get_fan_speed_rpm(adev, NULL) == -EOPNOTSUPP) && (attr == &sensor_dev_attr_fan1_max.dev_attr.attr || attr == &sensor_dev_attr_fan1_min.dev_attr.attr)) return 0; diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c index 92b987fb31d4..23ff0d812e4b 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c @@ -6619,6 +6619,9 @@ static int si_dpm_get_fan_speed_pwm(void *handle, u64 tmp64; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (!speed) + return -EINVAL; + if (adev->pm.no_fan) return -ENOENT; @@ -6669,10 +6672,13 @@ static int si_dpm_set_fan_speed_pwm(void *handle, return 0; } -static void si_dpm_set_fan_control_mode(void *handle, u32 mode) +static int si_dpm_set_fan_control_mode(void *handle, u32 mode) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; + if (mode == U32_MAX) + return -EINVAL; + if (mode) { /* stop auto-manage */ if (adev->pm.dpm.fan.ucode_fan_control) @@ -6685,19 +6691,26 @@ static void si_dpm_set_fan_control_mode(void *handle, u32 mode) else si_fan_ctrl_set_default_mode(adev); } + + return 0; } -static u32 si_dpm_get_fan_control_mode(void *handle) +static int si_dpm_get_fan_control_mode(void *handle, u32 *fan_mode) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; struct si_power_info *si_pi = si_get_pi(adev); u32 tmp; + if (!fan_mode) + return -EINVAL; + if (si_pi->fan_is_controlled_by_smc) return 0; tmp = RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK; - return (tmp >> FDO_PWM_MODE_SHIFT); + *fan_mode = (tmp >> FDO_PWM_MODE_SHIFT); + + return 0; } #if 0 diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c index 89341729744d..76c26ae368f9 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c @@ -488,38 +488,43 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle) return pm_type; } -static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode) +static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode) { struct pp_hwmgr *hwmgr = handle; if (!hwmgr || !hwmgr->pm_en) - return; + return -EOPNOTSUPP; + + if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) + return -EOPNOTSUPP; + + if (mode == U32_MAX) + return -EINVAL; - if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) { - pr_info_ratelimited("%s was not implemented.\n", __func__); - return; - } mutex_lock(&hwmgr->smu_lock); hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode); mutex_unlock(&hwmgr->smu_lock); + + return 0; } -static uint32_t pp_dpm_get_fan_control_mode(void *handle) +static int pp_dpm_get_fan_control_mode(void *handle, uint32_t *fan_mode) { struct pp_hwmgr *hwmgr = handle; - uint32_t mode = 0; if (!hwmgr || !hwmgr->pm_en) - return 0; + return -EOPNOTSUPP; + + if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) + return -EOPNOTSUPP; + + if (!fan_mode) + return -EINVAL; - if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) { - pr_info_ratelimited("%s was not implemented.\n", __func__); - return 0; - } mutex_lock(&hwmgr->smu_lock); - mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr); + *fan_mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr); mutex_unlock(&hwmgr->smu_lock); - return mode; + return 0; } static int pp_dpm_set_fan_speed_pwm(void *handle, uint32_t speed) @@ -528,12 +533,14 @@ static int pp_dpm_set_fan_speed_pwm(void *handle, uint32_t speed) int ret = 0; if (!hwmgr || !hwmgr->pm_en) + return -EOPNOTSUPP; + + if (hwmgr->hwmgr_func->set_fan_speed_pwm == NULL) + return -EOPNOTSUPP; + + if (speed == U32_MAX) return -EINVAL; - if (hwmgr->hwmgr_func->set_fan_speed_pwm == NULL) { - pr_info_ratelimited("%s was not implemented.\n", __func__); - return 0; - } mutex_lock(&hwmgr->smu_lock); ret = hwmgr->hwmgr_func->set_fan_speed_pwm(hwmgr, speed); mutex_unlock(&hwmgr->smu_lock); @@ -546,12 +553,13 @@ static int pp_dpm_get_fan_speed_pwm(void *handle, uint32_t *speed) int ret = 0; if (!hwmgr || !hwmgr->pm_en) - return -EINVAL; + return -EOPNOTSUPP; - if (hwmgr->hwmgr_func->get_fan_speed_pwm == NULL) { - pr_info_ratelimited("%s was not implemented.\n", __func__); - return 0; - } + if (hwmgr->hwmgr_func->get_fan_speed_pwm == NULL) + return -EOPNOTSUPP; + + if (!speed) + return -EINVAL; mutex_lock(&hwmgr->smu_lock); ret = hwmgr->hwmgr_func->get_fan_speed_pwm(hwmgr, speed); @@ -565,9 +573,12 @@ static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm) int ret = 0; if (!hwmgr || !hwmgr->pm_en) - return -EINVAL; + return -EOPNOTSUPP; if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL) + return -EOPNOTSUPP; + + if (!rpm) return -EINVAL; mutex_lock(&hwmgr->smu_lock); @@ -582,12 +593,14 @@ static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm) int ret = 0; if (!hwmgr || !hwmgr->pm_en) + return -EOPNOTSUPP; + + if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL) + return -EOPNOTSUPP; + + if (rpm == U32_MAX) return -EINVAL; - if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL) { - pr_info_ratelimited("%s was not implemented.\n", __func__); - return 0; - } mutex_lock(&hwmgr->smu_lock); ret = hwmgr->hwmgr_func->set_fan_speed_rpm(hwmgr, rpm); mutex_unlock(&hwmgr->smu_lock); diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index c374c3067496..828cb932f6a9 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -59,7 +59,7 @@ static int smu_handle_task(struct smu_context *smu, bool lock_needed); static int smu_reset(struct smu_context *smu); static int smu_set_fan_speed_pwm(void *handle, u32 speed); -static int smu_set_fan_control_mode(struct smu_context *smu, int value); +static int smu_set_fan_control_mode(void *handle, u32 value); static int smu_set_power_limit(void *handle, uint32_t limit); static int smu_set_fan_speed_rpm(void *handle, uint32_t speed); static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled); @@ -407,7 +407,7 @@ static void smu_restore_dpm_user_profile(struct smu_context *smu) if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL || smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_NONE) { ret = smu_set_fan_control_mode(smu, smu->user_dpm_profile.fan_mode); - if (ret) { + if (ret != -EOPNOTSUPP) { smu->user_dpm_profile.fan_speed_pwm = 0; smu->user_dpm_profile.fan_speed_rpm = 0; smu->user_dpm_profile.fan_mode = AMD_FAN_CTRL_AUTO; @@ -416,13 +416,13 @@ static void smu_restore_dpm_user_profile(struct smu_context *smu) if (smu->user_dpm_profile.fan_speed_pwm) { ret = smu_set_fan_speed_pwm(smu, smu->user_dpm_profile.fan_speed_pwm); - if (ret) + if (ret != -EOPNOTSUPP) dev_err(smu->adev->dev, "Failed to set manual fan speed in pwm\n"); } if (smu->user_dpm_profile.fan_speed_rpm) { ret = smu_set_fan_speed_rpm(smu, smu->user_dpm_profile.fan_speed_rpm); - if (ret) + if (ret != -EOPNOTSUPP) dev_err(smu->adev->dev, "Failed to set manual fan speed in rpm\n"); } } @@ -2218,18 +2218,22 @@ static int smu_set_fan_speed_rpm(void *handle, uint32_t speed) if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; + if (!smu->ppt_funcs->set_fan_speed_rpm) + return -EOPNOTSUPP; + + if (speed == U32_MAX) + return -EINVAL; + mutex_lock(&smu->mutex); - if (smu->ppt_funcs->set_fan_speed_rpm) { - ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed); - if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { - smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM; - smu->user_dpm_profile.fan_speed_rpm = speed; + ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed); + if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { + smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM; + smu->user_dpm_profile.fan_speed_rpm = speed; - /* Override custom PWM setting as they cannot co-exist */ - smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_PWM; - smu->user_dpm_profile.fan_speed_pwm = 0; - } + /* Override custom PWM setting as they cannot co-exist */ + smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_PWM; + smu->user_dpm_profile.fan_speed_pwm = 0; } mutex_unlock(&smu->mutex); @@ -2562,60 +2566,65 @@ static int smu_set_power_profile_mode(void *handle, } -static u32 smu_get_fan_control_mode(void *handle) +static int smu_get_fan_control_mode(void *handle, u32 *fan_mode) { struct smu_context *smu = handle; - u32 ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) - return AMD_FAN_CTRL_NONE; + return -EOPNOTSUPP; + + if (!smu->ppt_funcs->get_fan_control_mode) + return -EOPNOTSUPP; + + if (!fan_mode) + return -EINVAL; mutex_lock(&smu->mutex); - if (smu->ppt_funcs->get_fan_control_mode) - ret = smu->ppt_funcs->get_fan_control_mode(smu); + *fan_mode = smu->ppt_funcs->get_fan_control_mode(smu); mutex_unlock(&smu->mutex); - return ret; + return 0; } -static int smu_set_fan_control_mode(struct smu_context *smu, int value) +static int smu_set_fan_control_mode(void *handle, u32 value) { + struct smu_context *smu = handle; int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) - return -EOPNOTSUPP; + return -EOPNOTSUPP; + + if (!smu->ppt_funcs->set_fan_control_mode) + return -EOPNOTSUPP; + + if (value == U32_MAX) + return -EINVAL; mutex_lock(&smu->mutex); - if (smu->ppt_funcs->set_fan_control_mode) { - ret = smu->ppt_funcs->set_fan_control_mode(smu, value); - if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) - smu->user_dpm_profile.fan_mode = value; - } + ret = smu->ppt_funcs->set_fan_control_mode(smu, value); + if (ret) + goto out; - mutex_unlock(&smu->mutex); + if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { + smu->user_dpm_profile.fan_mode = value; - /* reset user dpm fan speed */ - if (!ret && value != AMD_FAN_CTRL_MANUAL && - !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { - smu->user_dpm_profile.fan_speed_pwm = 0; - smu->user_dpm_profile.fan_speed_rpm = 0; - smu->user_dpm_profile.flags &= ~(SMU_CUSTOM_FAN_SPEED_RPM | SMU_CUSTOM_FAN_SPEED_PWM); + /* reset user dpm fan speed */ + if (value != AMD_FAN_CTRL_MANUAL) { + smu->user_dpm_profile.fan_speed_pwm = 0; + smu->user_dpm_profile.fan_speed_rpm = 0; + smu->user_dpm_profile.flags &= ~(SMU_CUSTOM_FAN_SPEED_RPM | SMU_CUSTOM_FAN_SPEED_PWM); + } } - return ret; -} - -static void smu_pp_set_fan_control_mode(void *handle, u32 value) -{ - struct smu_context *smu = handle; +out: + mutex_unlock(&smu->mutex); - smu_set_fan_control_mode(smu, value); + return ret; } - static int smu_get_fan_speed_pwm(void *handle, u32 *speed) { struct smu_context *smu = handle; @@ -2624,10 +2633,15 @@ static int smu_get_fan_speed_pwm(void *handle, u32 *speed) if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; + if (!smu->ppt_funcs->get_fan_speed_pwm) + return -EOPNOTSUPP; + + if (!speed) + return -EINVAL; + mutex_lock(&smu->mutex); - if (smu->ppt_funcs->get_fan_speed_pwm) - ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed); + ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed); mutex_unlock(&smu->mutex); @@ -2642,18 +2656,22 @@ static int smu_set_fan_speed_pwm(void *handle, u32 speed) if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; + if (!smu->ppt_funcs->set_fan_speed_pwm) + return -EOPNOTSUPP; + + if (speed == U32_MAX) + return -EINVAL; + mutex_lock(&smu->mutex); - if (smu->ppt_funcs->set_fan_speed_pwm) { - ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed); - if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { - smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM; - smu->user_dpm_profile.fan_speed_pwm = speed; + ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed); + if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { + smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM; + smu->user_dpm_profile.fan_speed_pwm = speed; - /* Override custom RPM setting as they cannot co-exist */ - smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_RPM; - smu->user_dpm_profile.fan_speed_rpm = 0; - } + /* Override custom RPM setting as they cannot co-exist */ + smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_RPM; + smu->user_dpm_profile.fan_speed_rpm = 0; } mutex_unlock(&smu->mutex); @@ -2669,10 +2687,15 @@ static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed) if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; + if (!smu->ppt_funcs->get_fan_speed_rpm) + return -EOPNOTSUPP; + + if (!speed) + return -EINVAL; + mutex_lock(&smu->mutex); - if (smu->ppt_funcs->get_fan_speed_rpm) - ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed); + ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed); mutex_unlock(&smu->mutex); @@ -3101,7 +3124,7 @@ static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size) static const struct amd_pm_funcs swsmu_pm_funcs = { /* export for sysfs */ - .set_fan_control_mode = smu_pp_set_fan_control_mode, + .set_fan_control_mode = smu_set_fan_control_mode, .get_fan_control_mode = smu_get_fan_control_mode, .set_fan_speed_pwm = smu_set_fan_speed_pwm, .get_fan_speed_pwm = smu_get_fan_speed_pwm, -- cgit From 6492e1b07c03397f85bd6dc0e230ea6cd9394635 Mon Sep 17 00:00:00 2001 From: yipechai Date: Tue, 4 Jan 2022 13:35:37 +0800 Subject: drm/amdgpu: Unify ras block interface for each ras block 1. Define unified ops interface for each block. 2. Add ras_block_match function pointer in ops interface, each ras block can customize specail match function to identify itself. 3. Add amdgpu_ras_block_match_default new function. If a ras block doesn't define .ras_block_match, default execute amdgpu_ras_block_match_default to identify this ras block. 4. Define unified basic ras block data for each ras block. 5. Create dedicated amdgpu device ras block link list to manage all of the ras blocks. 6. Add amdgpu_ras_register_ras_block new function interface for each ras block to register itself to ras controlling block. Signed-off-by: yipechai Reviewed-by: Hawking Zhang Reviewed-by: John Clements Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 2 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 46 ++++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h | 28 ++++++++++++++++++ 4 files changed, 78 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index f0132a5cc58d..c349337939dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1091,6 +1091,8 @@ struct amdgpu_device { uint32_t ip_versions[MAX_HWIP][HWIP_MAX_INSTANCE]; bool ram_is_direct_mapped; + + struct list_head ras_list; }; static inline struct amdgpu_device *drm_to_adev(struct drm_device *ddev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 596bb2fdb8a2..9230e4476d91 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3519,6 +3519,8 @@ int amdgpu_device_init(struct amdgpu_device *adev, INIT_LIST_HEAD(&adev->reset_list); + INIT_LIST_HEAD(&adev->ras_list); + INIT_DELAYED_WORK(&adev->delayed_init_work, amdgpu_device_delayed_init_work_handler); INIT_DELAYED_WORK(&adev->gfx.gfx_off_delay_work, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 4a9970423e7d..bf2983fe5d9d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -866,6 +866,40 @@ static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev, } /* feature ctl end */ +int amdgpu_ras_block_match_default(struct amdgpu_ras_block_object* block_obj, enum amdgpu_ras_block block) +{ + if(!block_obj) + return -EINVAL; + + if (block_obj->block == block) + return 0; + + return -EINVAL; +} + +static struct amdgpu_ras_block_object* amdgpu_ras_get_ras_block(struct amdgpu_device *adev, + enum amdgpu_ras_block block, uint32_t sub_block_index) +{ + struct amdgpu_ras_block_object *obj, *tmp; + + if (block >= AMDGPU_RAS_BLOCK__LAST) + return NULL; + + if (!amdgpu_ras_is_supported(adev, block)) + return NULL; + + list_for_each_entry_safe(obj, tmp, &adev->ras_list, node) { + if (obj->ras_block_match) { + if (obj->ras_block_match(obj, block, sub_block_index) == 0) + return obj; + } else { + if (amdgpu_ras_block_match_default(obj, block) == 0) + return obj; + } + } + + return NULL; +} static void amdgpu_ras_mca_query_error_status(struct amdgpu_device *adev, struct ras_common_if *ras_block, @@ -2776,3 +2810,15 @@ static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev) } } #endif +/* Register each ip ras block into amdgpu ras */ +int amdgpu_ras_register_ras_block(struct amdgpu_device *adev, + struct amdgpu_ras_block_object* ras_block_obj) +{ + if (!adev || !ras_block_obj) + return -EINVAL; + + INIT_LIST_HEAD(&ras_block_obj->node); + list_add_tail(&ras_block_obj->node, &adev->ras_list); + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index 1c708122d492..f66122fdf477 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -484,6 +484,33 @@ struct ras_debug_if { }; int op; }; + +struct amdgpu_ras_block_object { + /* block name */ + char name[32]; + + enum amdgpu_ras_block block; + + uint32_t sub_block_index; + + /* ras block link */ + struct list_head node; + + int (*ras_block_match)(struct amdgpu_ras_block_object *block_obj, enum amdgpu_ras_block block, uint32_t sub_block_index); + int (*ras_late_init)(struct amdgpu_device *adev, void *ras_info); + void (*ras_fini)(struct amdgpu_device *adev); + const struct amdgpu_ras_block_hw_ops *hw_ops; +}; + +struct amdgpu_ras_block_hw_ops { + int (*ras_error_inject)(struct amdgpu_device *adev, void *inject_if); + void (*query_ras_error_count)(struct amdgpu_device *adev,void *ras_error_status); + void (*query_ras_error_status)(struct amdgpu_device *adev); + void (*query_ras_error_address)(struct amdgpu_device *adev, void *ras_error_status); + void (*reset_ras_error_count)(struct amdgpu_device *adev); + void (*reset_ras_error_status)(struct amdgpu_device *adev); +}; + /* work flow * vbios * 1: ras feature enable (enabled by default) @@ -667,4 +694,5 @@ const char *get_ras_block_str(struct ras_common_if *ras_block); bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev); +int amdgpu_ras_register_ras_block(struct amdgpu_device *adev, struct amdgpu_ras_block_object* ras_block_obj); #endif -- cgit From 7cab2124058d2f5f048f435a4631e176dcd1430d Mon Sep 17 00:00:00 2001 From: yipechai Date: Tue, 4 Jan 2022 13:39:15 +0800 Subject: drm/amdgpu: Modify the compilation failed problem when other ras blocks' .h include amdgpu_ras.h Modify the compilation failed problem when other ras blocks' .h include amdgpu_ras.h. v2: squash in forward declaration warning fix (Alex) Signed-off-by: yipechai Reviewed-by: Hawking Zhang Reviewed-by: John Clements Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 39 +++++++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h | 34 +++++++++------------------- drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c | 2 +- 4 files changed, 51 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index c349337939dd..445a0d077c1f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -108,6 +108,7 @@ #include "amdgpu_smuio.h" #include "amdgpu_fdinfo.h" #include "amdgpu_mca.h" +#include "amdgpu_ras.h" #define MAX_GPU_INSTANCE 16 diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index bf2983fe5d9d..5b3f4beb2149 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -2810,6 +2810,45 @@ static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev) } } #endif + +struct amdgpu_ras* amdgpu_ras_get_context(struct amdgpu_device *adev) +{ + if (!adev) + return NULL; + + return adev->psp.ras_context.ras; +} + +int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras* ras_con) +{ + if (!adev) + return -EINVAL;; + + adev->psp.ras_context.ras = ras_con; + return 0; +} + +/* check if ras is supported on block, say, sdma, gfx */ +int amdgpu_ras_is_supported(struct amdgpu_device *adev, + unsigned int block) +{ + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + + if (block >= AMDGPU_RAS_BLOCK_COUNT) + return 0; + return ras && (adev->ras_enabled & (1 << block)); +} + +int amdgpu_ras_reset_gpu(struct amdgpu_device *adev) +{ + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + + if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0) + schedule_work(&ras->recovery_work); + return 0; +} + + /* Register each ip ras block into amdgpu ras */ int amdgpu_ras_register_ras_block(struct amdgpu_device *adev, struct amdgpu_ras_block_object* ras_block_obj) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index f66122fdf477..7a4d82378205 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -26,11 +26,11 @@ #include #include -#include "amdgpu.h" -#include "amdgpu_psp.h" #include "ta_ras_if.h" #include "amdgpu_ras_eeprom.h" +struct amdgpu_iv_entry; + #define AMDGPU_RAS_FLAG_INIT_BY_VBIOS (0x1 << 0) enum amdgpu_ras_block { @@ -525,19 +525,6 @@ struct amdgpu_ras_block_hw_ops { * 8: feature disable */ -#define amdgpu_ras_get_context(adev) ((adev)->psp.ras_context.ras) -#define amdgpu_ras_set_context(adev, ras_con) ((adev)->psp.ras_context.ras = (ras_con)) - -/* check if ras is supported on block, say, sdma, gfx */ -static inline int amdgpu_ras_is_supported(struct amdgpu_device *adev, - unsigned int block) -{ - struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); - - if (block >= AMDGPU_RAS_BLOCK_COUNT) - return 0; - return ras && (adev->ras_enabled & (1 << block)); -} int amdgpu_ras_recovery_init(struct amdgpu_device *adev); @@ -554,15 +541,6 @@ int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev, int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev); -static inline int amdgpu_ras_reset_gpu(struct amdgpu_device *adev) -{ - struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); - - if (atomic_cmpxchg(&ras->in_recovery, 0, 1) == 0) - schedule_work(&ras->recovery_work); - return 0; -} - static inline enum ta_ras_block amdgpu_ras_block_to_ta(enum amdgpu_ras_block block) { switch (block) { @@ -694,5 +672,13 @@ const char *get_ras_block_str(struct ras_common_if *ras_block); bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev); +int amdgpu_ras_is_supported(struct amdgpu_device *adev, unsigned int block); + +int amdgpu_ras_reset_gpu(struct amdgpu_device *adev); + +struct amdgpu_ras* amdgpu_ras_get_context(struct amdgpu_device *adev); + +int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras* ras_con); + int amdgpu_ras_register_ras_block(struct amdgpu_device *adev, struct amdgpu_ras_block_object* ras_block_obj); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c index 25951b2d83b6..e81ce465ff3a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c @@ -21,7 +21,7 @@ * */ -#include "amdgpu_ras.h" +#include "amdgpu.h" static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev, void *ras_error_status, -- cgit From 8b0fb0e967c1700bd729ae54b6f229501b8587ec Mon Sep 17 00:00:00 2001 From: yipechai Date: Tue, 4 Jan 2022 13:52:46 +0800 Subject: drm/amdgpu: Modify gfx block to fit for the unified ras block data and ops 1.Modify gfx block to fit for the unified ras block data and ops. 2.Change amdgpu_gfx_ras_funcs to amdgpu_gfx_ras, and the corresponding variable name remove _funcs suffix. 3.Remove the const flag of gfx ras variable so that gfx ras block can be able to be inserted into amdgpu device ras block link list. 4.Invoke amdgpu_ras_register_ras_block function to register gfx ras block into amdgpu device ras block link list. 5.Remove the redundant code about gfx in amdgpu_ras.c after using the unified ras block. 6.Fill unified ras block .name .block .ras_late_init and .ras_fini for all of gfx versions. If .ras_late_init and .ras_fini had been defined by the selected gfx version, the defined functions will take effect; if not defined, default fill with amdgpu_gfx_ras_late_init and amdgpu_gfx_ras_fini. Signed-off-by: yipechai Reviewed-by: Hawking Zhang Reviewed-by: John Clements Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 8 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h | 17 +++------ drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 63 +++++++++++++++++++++----------- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 65 +++++++++++++++++++++------------ drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c | 24 +++++++----- drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h | 2 +- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c | 25 +++++++------ drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h | 2 +- 8 files changed, 123 insertions(+), 83 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 3d8f82dc8c97..43004822ec6f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -622,7 +622,7 @@ int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value) return r; } -int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev) +int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, void *ras_info) { int r; struct ras_fs_if fs_info = { @@ -695,9 +695,9 @@ int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev, */ if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) { kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); - if (adev->gfx.ras_funcs && - adev->gfx.ras_funcs->query_ras_error_count) - adev->gfx.ras_funcs->query_ras_error_count(adev, err_data); + if (adev->gfx.ras && adev->gfx.ras->ras_block.hw_ops && + adev->gfx.ras->ras_block.hw_ops->query_ras_error_count) + adev->gfx.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data); amdgpu_ras_reset_gpu(adev); } return AMDGPU_RAS_SUCCESS; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h index 776c886fd94a..f99eac544f6d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.h @@ -31,6 +31,7 @@ #include "amdgpu_ring.h" #include "amdgpu_rlc.h" #include "soc15.h" +#include "amdgpu_ras.h" /* GFX current status */ #define AMDGPU_GFX_NORMAL_MODE 0x00000000L @@ -198,16 +199,8 @@ struct amdgpu_cu_info { uint32_t bitmap[4][4]; }; -struct amdgpu_gfx_ras_funcs { - int (*ras_late_init)(struct amdgpu_device *adev); - void (*ras_fini)(struct amdgpu_device *adev); - int (*ras_error_inject)(struct amdgpu_device *adev, - void *inject_if); - int (*query_ras_error_count)(struct amdgpu_device *adev, - void *ras_error_status); - void (*reset_ras_error_count)(struct amdgpu_device *adev); - void (*query_ras_error_status)(struct amdgpu_device *adev); - void (*reset_ras_error_status)(struct amdgpu_device *adev); +struct amdgpu_gfx_ras { + struct amdgpu_ras_block_object ras_block; void (*enable_watchdog_timer)(struct amdgpu_device *adev); }; @@ -331,7 +324,7 @@ struct amdgpu_gfx { /*ras */ struct ras_common_if *ras_if; - const struct amdgpu_gfx_ras_funcs *ras_funcs; + struct amdgpu_gfx_ras *ras; }; #define amdgpu_gfx_get_gpu_clock_counter(adev) (adev)->gfx.funcs->get_gpu_clock_counter((adev)) @@ -393,7 +386,7 @@ bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev, int me, int pipe, int queue); void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable); int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value); -int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev); +int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev, void *ras_info); void amdgpu_gfx_ras_fini(struct amdgpu_device *adev); int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev, void *err_data, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 5b3f4beb2149..a5812c21177e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -89,6 +89,8 @@ const char *get_ras_block_str(struct ras_common_if *ras_block) return ras_block_string[ras_block->block]; } +#define ras_block_str(_BLOCK_) (((_BLOCK_) < (sizeof(*ras_block_string)/sizeof(const char*))) ? ras_block_string[_BLOCK_] : "Out Of Range") + #define ras_err_str(i) (ras_error_string[ffs(i)]) #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS) @@ -962,6 +964,7 @@ static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_d int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_if *info) { + struct amdgpu_ras_block_object* block_obj = NULL; struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); struct ras_err_data err_data = {0, 0, 0, NULL}; int i; @@ -969,6 +972,8 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, if (!obj) return -EINVAL; + block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0); + switch (info->head.block) { case AMDGPU_RAS_BLOCK__UMC: amdgpu_ras_get_ecc_info(adev, &err_data); @@ -981,13 +986,16 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, } break; case AMDGPU_RAS_BLOCK__GFX: - if (adev->gfx.ras_funcs && - adev->gfx.ras_funcs->query_ras_error_count) - adev->gfx.ras_funcs->query_ras_error_count(adev, &err_data); + if (!block_obj || !block_obj->hw_ops) { + dev_info(adev->dev, "%s doesn't config ras function \n", + get_ras_block_str(&info->head)); + return -EINVAL; + } + if (block_obj->hw_ops->query_ras_error_count) + block_obj->hw_ops->query_ras_error_count(adev, &err_data); - if (adev->gfx.ras_funcs && - adev->gfx.ras_funcs->query_ras_error_status) - adev->gfx.ras_funcs->query_ras_error_status(adev); + if (block_obj->hw_ops->query_ras_error_status) + block_obj->hw_ops->query_ras_error_status(adev); break; case AMDGPU_RAS_BLOCK__MMHUB: if (adev->mmhub.ras_funcs && @@ -1074,18 +1082,23 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, int amdgpu_ras_reset_error_status(struct amdgpu_device *adev, enum amdgpu_ras_block block) { + struct amdgpu_ras_block_object* block_obj = amdgpu_ras_get_ras_block(adev, block, 0); + if (!amdgpu_ras_is_supported(adev, block)) return -EINVAL; switch (block) { case AMDGPU_RAS_BLOCK__GFX: - if (adev->gfx.ras_funcs && - adev->gfx.ras_funcs->reset_ras_error_count) - adev->gfx.ras_funcs->reset_ras_error_count(adev); + if (!block_obj || !block_obj->hw_ops) { + dev_info(adev->dev, "%s doesn't config ras function \n", ras_block_str(block)); + return -EINVAL; + } + + if (block_obj->hw_ops->reset_ras_error_count) + block_obj->hw_ops->reset_ras_error_count(adev); - if (adev->gfx.ras_funcs && - adev->gfx.ras_funcs->reset_ras_error_status) - adev->gfx.ras_funcs->reset_ras_error_status(adev); + if (block_obj->hw_ops->reset_ras_error_status) + block_obj->hw_ops->reset_ras_error_status(adev); break; case AMDGPU_RAS_BLOCK__MMHUB: if (adev->mmhub.ras_funcs && @@ -1150,7 +1163,8 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev, .address = info->address, .value = info->value, }; - int ret = 0; + int ret = -EINVAL; + struct amdgpu_ras_block_object* block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, info->head.sub_block_index); if (!obj) return -EINVAL; @@ -1164,11 +1178,13 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev, switch (info->head.block) { case AMDGPU_RAS_BLOCK__GFX: - if (adev->gfx.ras_funcs && - adev->gfx.ras_funcs->ras_error_inject) - ret = adev->gfx.ras_funcs->ras_error_inject(adev, info); - else - ret = -EINVAL; + if (!block_obj || !block_obj->hw_ops) { + dev_info(adev->dev, "%s doesn't config ras function \n", get_ras_block_str(&info->head)); + return -EINVAL; + } + + if (block_obj->hw_ops->ras_error_inject) + ret = block_obj->hw_ops->ras_error_inject(adev, info); break; case AMDGPU_RAS_BLOCK__UMC: case AMDGPU_RAS_BLOCK__SDMA: @@ -1800,15 +1816,20 @@ static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev) static void amdgpu_ras_error_status_query(struct amdgpu_device *adev, struct ras_query_if *info) { + struct amdgpu_ras_block_object* block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, info->head.sub_block_index); /* * Only two block need to query read/write * RspStatus at current state */ switch (info->head.block) { case AMDGPU_RAS_BLOCK__GFX: - if (adev->gfx.ras_funcs && - adev->gfx.ras_funcs->query_ras_error_status) - adev->gfx.ras_funcs->query_ras_error_status(adev); + if (!block_obj || !block_obj->hw_ops) { + dev_info(adev->dev, "%s doesn't config ras function \n", get_ras_block_str(&info->head)); + return ; + } + + if (block_obj->hw_ops->query_ras_error_status) + block_obj->hw_ops->query_ras_error_status(adev); break; case AMDGPU_RAS_BLOCK__MMHUB: if (adev->mmhub.ras_funcs && diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 9189fb85a4dd..d36a6bc62560 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -882,7 +882,7 @@ static int gfx_v9_0_get_cu_info(struct amdgpu_device *adev, static uint64_t gfx_v9_0_get_gpu_clock_counter(struct amdgpu_device *adev); static void gfx_v9_0_ring_emit_de_meta(struct amdgpu_ring *ring); static u64 gfx_v9_0_ring_get_rptr_compute(struct amdgpu_ring *ring); -static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, +static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status); static int gfx_v9_0_ras_error_inject(struct amdgpu_device *adev, void *inject_if); @@ -2197,12 +2197,16 @@ static const struct amdgpu_gfx_funcs gfx_v9_0_gfx_funcs = { .select_me_pipe_q = &gfx_v9_0_select_me_pipe_q, }; -static const struct amdgpu_gfx_ras_funcs gfx_v9_0_ras_funcs = { - .ras_late_init = amdgpu_gfx_ras_late_init, - .ras_fini = amdgpu_gfx_ras_fini, - .ras_error_inject = &gfx_v9_0_ras_error_inject, - .query_ras_error_count = &gfx_v9_0_query_ras_error_count, - .reset_ras_error_count = &gfx_v9_0_reset_ras_error_count, +const struct amdgpu_ras_block_hw_ops gfx_v9_0_ras_ops = { + .ras_error_inject = &gfx_v9_0_ras_error_inject, + .query_ras_error_count = &gfx_v9_0_query_ras_error_count, + .reset_ras_error_count = &gfx_v9_0_reset_ras_error_count, +}; + +static struct amdgpu_gfx_ras gfx_v9_0_ras = { + .ras_block = { + .hw_ops = &gfx_v9_0_ras_ops, + }, }; static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) @@ -2231,7 +2235,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) DRM_INFO("fix gfx.config for vega12\n"); break; case IP_VERSION(9, 4, 0): - adev->gfx.ras_funcs = &gfx_v9_0_ras_funcs; + adev->gfx.ras = &gfx_v9_0_ras; adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; adev->gfx.config.sc_prim_fifo_size_backend = 0x100; @@ -2258,7 +2262,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) gb_addr_config = RAVEN_GB_ADDR_CONFIG_GOLDEN; break; case IP_VERSION(9, 4, 1): - adev->gfx.ras_funcs = &gfx_v9_4_ras_funcs; + adev->gfx.ras = &gfx_v9_4_ras; adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; adev->gfx.config.sc_prim_fifo_size_backend = 0x100; @@ -2279,7 +2283,7 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) gb_addr_config |= 0x22010042; break; case IP_VERSION(9, 4, 2): - adev->gfx.ras_funcs = &gfx_v9_4_2_ras_funcs; + adev->gfx.ras = &gfx_v9_4_2_ras; adev->gfx.config.max_hw_contexts = 8; adev->gfx.config.sc_prim_fifo_size_frontend = 0x20; adev->gfx.config.sc_prim_fifo_size_backend = 0x100; @@ -2298,6 +2302,25 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) break; } + if (adev->gfx.ras) { + err = amdgpu_ras_register_ras_block(adev, &adev->gfx.ras->ras_block); + if (err) { + DRM_ERROR("Failed to register gfx ras block!\n"); + return err; + } + + strcpy(adev->gfx.ras->ras_block.name,"gfx"); + adev->gfx.ras->ras_block.block = AMDGPU_RAS_BLOCK__GFX; + + /* If not define special ras_late_init function, use gfx default ras_late_init */ + if (!adev->gfx.ras->ras_block.ras_late_init) + adev->gfx.ras->ras_block.ras_late_init = amdgpu_gfx_ras_late_init; + + /* If not define special ras_fini function, use gfx default ras_fini */ + if (!adev->gfx.ras->ras_block.ras_fini) + adev->gfx.ras->ras_block.ras_fini = amdgpu_gfx_ras_fini; + } + adev->gfx.config.gb_addr_config = gb_addr_config; adev->gfx.config.gb_addr_config_fields.num_pipes = 1 << @@ -2513,9 +2536,8 @@ static int gfx_v9_0_sw_fini(void *handle) int i; struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (adev->gfx.ras_funcs && - adev->gfx.ras_funcs->ras_fini) - adev->gfx.ras_funcs->ras_fini(adev); + if (adev->gfx.ras && adev->gfx.ras->ras_block.ras_fini) + adev->gfx.ras->ras_block.ras_fini(adev); for (i = 0; i < adev->gfx.num_gfx_rings; i++) amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); @@ -4870,16 +4892,15 @@ static int gfx_v9_0_ecc_late_init(void *handle) if (r) return r; - if (adev->gfx.ras_funcs && - adev->gfx.ras_funcs->ras_late_init) { - r = adev->gfx.ras_funcs->ras_late_init(adev); + if (adev->gfx.ras && adev->gfx.ras->ras_block.ras_late_init) { + r = adev->gfx.ras->ras_block.ras_late_init(adev, NULL); if (r) return r; } - if (adev->gfx.ras_funcs && - adev->gfx.ras_funcs->enable_watchdog_timer) - adev->gfx.ras_funcs->enable_watchdog_timer(adev); + if (adev->gfx.ras && + adev->gfx.ras->enable_watchdog_timer) + adev->gfx.ras->enable_watchdog_timer(adev); return 0; } @@ -6819,7 +6840,7 @@ static void gfx_v9_0_reset_ras_error_count(struct amdgpu_device *adev) WREG32_SOC15(GC, 0, mmATC_L2_CACHE_4K_EDC_INDEX, 255); } -static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, +static void gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status) { struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; @@ -6828,7 +6849,7 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, uint32_t reg_value; if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) - return -EINVAL; + return; err_data->ue_count = 0; err_data->ce_count = 0; @@ -6857,8 +6878,6 @@ static int gfx_v9_0_query_ras_error_count(struct amdgpu_device *adev, mutex_unlock(&adev->grbm_idx_mutex); gfx_v9_0_query_utc_edc_status(adev, err_data); - - return 0; } static void gfx_v9_0_emit_mem_sync(struct amdgpu_ring *ring) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c index b4789dfc2bb9..c67e387a97f5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.c @@ -863,7 +863,7 @@ static int gfx_v9_4_ras_error_count(struct amdgpu_device *adev, return 0; } -static int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev, +static void gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status) { struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; @@ -872,7 +872,7 @@ static int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev, uint32_t reg_value; if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) - return -EINVAL; + return; err_data->ue_count = 0; err_data->ce_count = 0; @@ -903,7 +903,6 @@ static int gfx_v9_4_query_ras_error_count(struct amdgpu_device *adev, gfx_v9_4_query_utc_edc_status(adev, err_data); - return 0; } static void gfx_v9_4_reset_ras_error_count(struct amdgpu_device *adev) @@ -1029,11 +1028,16 @@ static void gfx_v9_4_query_ras_error_status(struct amdgpu_device *adev) mutex_unlock(&adev->grbm_idx_mutex); } -const struct amdgpu_gfx_ras_funcs gfx_v9_4_ras_funcs = { - .ras_late_init = amdgpu_gfx_ras_late_init, - .ras_fini = amdgpu_gfx_ras_fini, - .ras_error_inject = &gfx_v9_4_ras_error_inject, - .query_ras_error_count = &gfx_v9_4_query_ras_error_count, - .reset_ras_error_count = &gfx_v9_4_reset_ras_error_count, - .query_ras_error_status = &gfx_v9_4_query_ras_error_status, + +const struct amdgpu_ras_block_hw_ops gfx_v9_4_ras_ops = { + .ras_error_inject = &gfx_v9_4_ras_error_inject, + .query_ras_error_count = &gfx_v9_4_query_ras_error_count, + .reset_ras_error_count = &gfx_v9_4_reset_ras_error_count, + .query_ras_error_status = &gfx_v9_4_query_ras_error_status, +}; + +struct amdgpu_gfx_ras gfx_v9_4_ras = { + .ras_block = { + .hw_ops = &gfx_v9_4_ras_ops, + }, }; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h index bdd16b568021..ca520a767267 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4.h @@ -24,6 +24,6 @@ #ifndef __GFX_V9_4_H__ #define __GFX_V9_4_H__ -extern const struct amdgpu_gfx_ras_funcs gfx_v9_4_ras_funcs; +extern struct amdgpu_gfx_ras gfx_v9_4_ras; #endif /* __GFX_V9_4_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c index c4f37a161875..7ec6243e015e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c @@ -1641,14 +1641,14 @@ static int gfx_v9_4_2_query_utc_edc_count(struct amdgpu_device *adev, return 0; } -static int gfx_v9_4_2_query_ras_error_count(struct amdgpu_device *adev, +static void gfx_v9_4_2_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status) { struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; uint32_t sec_count = 0, ded_count = 0; if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) - return -EINVAL; + return; err_data->ue_count = 0; err_data->ce_count = 0; @@ -1661,7 +1661,6 @@ static int gfx_v9_4_2_query_ras_error_count(struct amdgpu_device *adev, err_data->ce_count += sec_count; err_data->ue_count += ded_count; - return 0; } static void gfx_v9_4_2_reset_utc_err_status(struct amdgpu_device *adev) @@ -1931,13 +1930,17 @@ static void gfx_v9_4_2_reset_sq_timeout_status(struct amdgpu_device *adev) mutex_unlock(&adev->grbm_idx_mutex); } -const struct amdgpu_gfx_ras_funcs gfx_v9_4_2_ras_funcs = { - .ras_late_init = amdgpu_gfx_ras_late_init, - .ras_fini = amdgpu_gfx_ras_fini, - .ras_error_inject = &gfx_v9_4_2_ras_error_inject, - .query_ras_error_count = &gfx_v9_4_2_query_ras_error_count, - .reset_ras_error_count = &gfx_v9_4_2_reset_ras_error_count, - .query_ras_error_status = &gfx_v9_4_2_query_ras_error_status, - .reset_ras_error_status = &gfx_v9_4_2_reset_ras_error_status, +struct amdgpu_ras_block_hw_ops gfx_v9_4_2_ras_ops ={ + .ras_error_inject = &gfx_v9_4_2_ras_error_inject, + .query_ras_error_count = &gfx_v9_4_2_query_ras_error_count, + .reset_ras_error_count = &gfx_v9_4_2_reset_ras_error_count, + .query_ras_error_status = &gfx_v9_4_2_query_ras_error_status, + .reset_ras_error_status = &gfx_v9_4_2_reset_ras_error_status, +}; + +struct amdgpu_gfx_ras gfx_v9_4_2_ras = { + .ras_block = { + .hw_ops = &gfx_v9_4_2_ras_ops, + }, .enable_watchdog_timer = &gfx_v9_4_2_enable_watchdog_timer, }; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h index 6db1f88509af..7584624b641c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.h @@ -31,6 +31,6 @@ void gfx_v9_4_2_init_golden_registers(struct amdgpu_device *adev, void gfx_v9_4_2_set_power_brake_sequence(struct amdgpu_device *adev); int gfx_v9_4_2_do_edc_gpr_workarounds(struct amdgpu_device *adev); -extern const struct amdgpu_gfx_ras_funcs gfx_v9_4_2_ras_funcs; +extern struct amdgpu_gfx_ras gfx_v9_4_2_ras; #endif /* __GFX_V9_4_2_H__ */ -- cgit From 6c2453861f48e4e779cafa01c09e78ddc2c23c6b Mon Sep 17 00:00:00 2001 From: yipechai Date: Tue, 4 Jan 2022 18:56:20 +0800 Subject: drm/amdgpu: Modify xgmi block to fit for the unified ras block data and ops 1.Modify gmc block to fit for the unified ras block data and ops. 2.Change amdgpu_xgmi_ras_funcs to amdgpu_xgmi_ras, and the corresponding variable name remove _funcs suffix. 3.Remove the const flag of gmc ras variable so that gmc ras block can be able to be inserted into amdgpu device ras block link list. 4.Invoke amdgpu_ras_register_ras_block function to register gmc ras block into amdgpu device ras block link list. 5.Remove the redundant code about gmc in amdgpu_ras.c after using the unified ras block. Signed-off-by: yipechai Reviewed-by: Hawking Zhang Reviewed-by: John Clements Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 16 ++++++++-------- drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h | 11 ++++------- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 10 +++++++--- drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c | 26 ++++++++++++++++---------- drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h | 4 ++-- 5 files changed, 37 insertions(+), 30 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 2430d6223c2d..d86ee530e0a4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -454,12 +454,13 @@ int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev) return r; } - if (!adev->gmc.xgmi.connected_to_cpu) - adev->gmc.xgmi.ras_funcs = &xgmi_ras_funcs; + if (!adev->gmc.xgmi.connected_to_cpu) { + adev->gmc.xgmi.ras = &xgmi_ras; + amdgpu_ras_register_ras_block(adev, &adev->gmc.xgmi.ras->ras_block); + } - if (adev->gmc.xgmi.ras_funcs && - adev->gmc.xgmi.ras_funcs->ras_late_init) { - r = adev->gmc.xgmi.ras_funcs->ras_late_init(adev); + if (adev->gmc.xgmi.ras && adev->gmc.xgmi.ras->ras_block.ras_late_init) { + r = adev->gmc.xgmi.ras->ras_block.ras_late_init(adev, NULL); if (r) return r; } @@ -505,9 +506,8 @@ void amdgpu_gmc_ras_fini(struct amdgpu_device *adev) adev->mmhub.ras_funcs->ras_fini) adev->mmhub.ras_funcs->ras_fini(adev); - if (adev->gmc.xgmi.ras_funcs && - adev->gmc.xgmi.ras_funcs->ras_fini) - adev->gmc.xgmi.ras_funcs->ras_fini(adev); + if (adev->gmc.xgmi.ras && adev->gmc.xgmi.ras->ras_block.ras_fini) + adev->gmc.xgmi.ras->ras_block.ras_fini(adev); if (adev->hdp.ras_funcs && adev->hdp.ras_funcs->ras_fini) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index 8458cebc6d5b..0001631cfedb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -29,6 +29,7 @@ #include #include "amdgpu_irq.h" +#include "amdgpu_ras.h" /* VA hole for 48bit addresses on Vega10 */ #define AMDGPU_GMC_HOLE_START 0x0000800000000000ULL @@ -135,12 +136,8 @@ struct amdgpu_gmc_funcs { unsigned int (*get_vbios_fb_size)(struct amdgpu_device *adev); }; -struct amdgpu_xgmi_ras_funcs { - int (*ras_late_init)(struct amdgpu_device *adev); - void (*ras_fini)(struct amdgpu_device *adev); - int (*query_ras_error_count)(struct amdgpu_device *adev, - void *ras_error_status); - void (*reset_ras_error_count)(struct amdgpu_device *adev); +struct amdgpu_xgmi_ras { + struct amdgpu_ras_block_object ras_block; }; struct amdgpu_xgmi { @@ -159,7 +156,7 @@ struct amdgpu_xgmi { struct ras_common_if *ras_if; bool connected_to_cpu; bool pending_reset; - const struct amdgpu_xgmi_ras_funcs *ras_funcs; + struct amdgpu_xgmi_ras *ras; }; struct amdgpu_gmc { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index a5812c21177e..28997b7f7c95 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -1012,9 +1012,13 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, adev->nbio.ras_funcs->query_ras_error_count(adev, &err_data); break; case AMDGPU_RAS_BLOCK__XGMI_WAFL: - if (adev->gmc.xgmi.ras_funcs && - adev->gmc.xgmi.ras_funcs->query_ras_error_count) - adev->gmc.xgmi.ras_funcs->query_ras_error_count(adev, &err_data); + if (!block_obj || !block_obj->hw_ops) { + dev_info(adev->dev, "%s doesn't config ras function \n", + get_ras_block_str(&info->head)); + return -EINVAL; + } + if (block_obj->hw_ops->query_ras_error_count) + block_obj->hw_ops->query_ras_error_count(adev, &err_data); break; case AMDGPU_RAS_BLOCK__HDP: if (adev->hdp.ras_funcs && diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index e8b8f28c2f72..d29acd33eb11 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -732,7 +732,7 @@ int amdgpu_xgmi_remove_device(struct amdgpu_device *adev) return psp_xgmi_terminate(&adev->psp); } -static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev) +static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev, void *ras_info) { int r; struct ras_ih_if ih_info = { @@ -746,7 +746,7 @@ static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev) adev->gmc.xgmi.num_physical_nodes == 0) return 0; - adev->gmc.xgmi.ras_funcs->reset_ras_error_count(adev); + adev->gmc.xgmi.ras->ras_block.hw_ops->reset_ras_error_count(adev); if (!adev->gmc.xgmi.ras_if) { adev->gmc.xgmi.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL); @@ -865,7 +865,7 @@ static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev, return 0; } -static int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev, +static void amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status) { struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; @@ -874,7 +874,7 @@ static int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev, uint32_t ue_cnt = 0, ce_cnt = 0; if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL)) - return -EINVAL; + return ; err_data->ue_count = 0; err_data->ce_count = 0; @@ -940,17 +940,23 @@ static int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev, break; } - adev->gmc.xgmi.ras_funcs->reset_ras_error_count(adev); + adev->gmc.xgmi.ras->ras_block.hw_ops->reset_ras_error_count(adev); err_data->ue_count += ue_cnt; err_data->ce_count += ce_cnt; - - return 0; } -const struct amdgpu_xgmi_ras_funcs xgmi_ras_funcs = { - .ras_late_init = amdgpu_xgmi_ras_late_init, - .ras_fini = amdgpu_xgmi_ras_fini, +struct amdgpu_ras_block_hw_ops xgmi_ras_hw_ops = { .query_ras_error_count = amdgpu_xgmi_query_ras_error_count, .reset_ras_error_count = amdgpu_xgmi_reset_ras_error_count, }; + +struct amdgpu_xgmi_ras xgmi_ras = { + .ras_block = { + .name = "xgmi", + .block = AMDGPU_RAS_BLOCK__XGMI_WAFL, + .hw_ops = &xgmi_ras_hw_ops, + .ras_late_init = amdgpu_xgmi_ras_late_init, + .ras_fini = amdgpu_xgmi_ras_fini, + }, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h index d2189bf7d428..0afca51c3c0c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.h @@ -24,7 +24,7 @@ #include #include "amdgpu_psp.h" - +#include "amdgpu_ras.h" struct amdgpu_hive_info { struct kobject kobj; @@ -50,7 +50,7 @@ struct amdgpu_pcs_ras_field { uint32_t pcs_err_shift; }; -extern const struct amdgpu_xgmi_ras_funcs xgmi_ras_funcs; +extern struct amdgpu_xgmi_ras xgmi_ras; struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev); void amdgpu_put_xgmi_hive(struct amdgpu_hive_info *hive); int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev); -- cgit From 6d76e9049ad92be32704106668c34493c3e4c0d4 Mon Sep 17 00:00:00 2001 From: yipechai Date: Tue, 4 Jan 2022 18:57:58 +0800 Subject: drm/amdgpu: Modify hdp block to fit for the unified ras block data and ops 1.Modify hdp block to fit for the unified ras block data and ops. 2.Change amdgpu_hdp_ras_funcs to amdgpu_hdp_ras, and the corresponding variable name remove _funcs suffix. 3.Remove the const flag of hdp ras variable so that hdp ras block can be able to be inserted into amdgpu device ras block link list. 4.Invoke amdgpu_ras_register_ras_block function to register hdp ras block into amdgpu device ras block link list. 5.Remove the redundant code about hdp in amdgpu_ras.c after using the unified ras block. Signed-off-by: yipechai Reviewed-by: Hawking Zhang Reviewed-by: John Clements Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 10 ++++------ drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h | 13 +++++-------- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 16 ++++++++-------- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 9 +++++---- drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c | 14 +++++++++++--- drivers/gpu/drm/amd/amdgpu/hdp_v4_0.h | 2 +- 7 files changed, 35 insertions(+), 31 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index d86ee530e0a4..58cc4dae1246 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -465,9 +465,8 @@ int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev) return r; } - if (adev->hdp.ras_funcs && - adev->hdp.ras_funcs->ras_late_init) { - r = adev->hdp.ras_funcs->ras_late_init(adev); + if (adev->hdp.ras && adev->hdp.ras->ras_block.ras_late_init) { + r = adev->hdp.ras->ras_block.ras_late_init(adev, NULL); if (r) return r; } @@ -509,9 +508,8 @@ void amdgpu_gmc_ras_fini(struct amdgpu_device *adev) if (adev->gmc.xgmi.ras && adev->gmc.xgmi.ras->ras_block.ras_fini) adev->gmc.xgmi.ras->ras_block.ras_fini(adev); - if (adev->hdp.ras_funcs && - adev->hdp.ras_funcs->ras_fini) - adev->hdp.ras_funcs->ras_fini(adev); + if (adev->hdp.ras && adev->hdp.ras->ras_block.ras_fini) + adev->hdp.ras->ras_block.ras_fini(adev); } /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c index a766e1aad2b9..518966a26130 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.c @@ -24,7 +24,7 @@ #include "amdgpu.h" #include "amdgpu_ras.h" -int amdgpu_hdp_ras_late_init(struct amdgpu_device *adev) +int amdgpu_hdp_ras_late_init(struct amdgpu_device *adev, void *ras_info) { int r; struct ras_ih_if ih_info = { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h index 7ec99d591584..c94a4b3c8d6d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h @@ -22,13 +22,10 @@ */ #ifndef __AMDGPU_HDP_H__ #define __AMDGPU_HDP_H__ +#include "amdgpu_ras.h" -struct amdgpu_hdp_ras_funcs { - int (*ras_late_init)(struct amdgpu_device *adev); - void (*ras_fini)(struct amdgpu_device *adev); - void (*query_ras_error_count)(struct amdgpu_device *adev, - void *ras_error_status); - void (*reset_ras_error_count)(struct amdgpu_device *adev); +struct amdgpu_hdp_ras{ + struct amdgpu_ras_block_object ras_block; }; struct amdgpu_hdp_funcs { @@ -43,9 +40,9 @@ struct amdgpu_hdp_funcs { struct amdgpu_hdp { struct ras_common_if *ras_if; const struct amdgpu_hdp_funcs *funcs; - const struct amdgpu_hdp_ras_funcs *ras_funcs; + struct amdgpu_hdp_ras *ras; }; -int amdgpu_hdp_ras_late_init(struct amdgpu_device *adev); +int amdgpu_hdp_ras_late_init(struct amdgpu_device *adev, void *ras_info); void amdgpu_hdp_ras_fini(struct amdgpu_device *adev); #endif /* __AMDGPU_HDP_H__ */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 28997b7f7c95..c47a03252ec8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -1012,6 +1012,7 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, adev->nbio.ras_funcs->query_ras_error_count(adev, &err_data); break; case AMDGPU_RAS_BLOCK__XGMI_WAFL: + case AMDGPU_RAS_BLOCK__HDP: if (!block_obj || !block_obj->hw_ops) { dev_info(adev->dev, "%s doesn't config ras function \n", get_ras_block_str(&info->head)); @@ -1020,11 +1021,6 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, if (block_obj->hw_ops->query_ras_error_count) block_obj->hw_ops->query_ras_error_count(adev, &err_data); break; - case AMDGPU_RAS_BLOCK__HDP: - if (adev->hdp.ras_funcs && - adev->hdp.ras_funcs->query_ras_error_count) - adev->hdp.ras_funcs->query_ras_error_count(adev, &err_data); - break; case AMDGPU_RAS_BLOCK__MCA: amdgpu_ras_mca_query_error_status(adev, &info->head, &err_data); break; @@ -1118,9 +1114,13 @@ int amdgpu_ras_reset_error_status(struct amdgpu_device *adev, adev->sdma.funcs->reset_ras_error_count(adev); break; case AMDGPU_RAS_BLOCK__HDP: - if (adev->hdp.ras_funcs && - adev->hdp.ras_funcs->reset_ras_error_count) - adev->hdp.ras_funcs->reset_ras_error_count(adev); + if (!block_obj || !block_obj->hw_ops) { + dev_info(adev->dev, "%s doesn't config ras function \n", ras_block_str(block)); + return -EINVAL; + } + + if (block_obj->hw_ops->reset_ras_error_count) + block_obj->hw_ops->reset_ras_error_count(adev); break; default: break; diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 88c1eb9ad068..16ab572219ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1269,7 +1269,8 @@ static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev) static void gmc_v9_0_set_hdp_ras_funcs(struct amdgpu_device *adev) { - adev->hdp.ras_funcs = &hdp_v4_0_ras_funcs; + adev->hdp.ras = &hdp_v4_0_ras; + amdgpu_ras_register_ras_block(adev, &adev->hdp.ras->ras_block); } static void gmc_v9_0_set_mca_funcs(struct amdgpu_device *adev) @@ -1346,9 +1347,9 @@ static int gmc_v9_0_late_init(void *handle) adev->mmhub.ras_funcs->reset_ras_error_count) adev->mmhub.ras_funcs->reset_ras_error_count(adev); - if (adev->hdp.ras_funcs && - adev->hdp.ras_funcs->reset_ras_error_count) - adev->hdp.ras_funcs->reset_ras_error_count(adev); + if (adev->hdp.ras && adev->hdp.ras->ras_block.hw_ops && + adev->hdp.ras->ras_block.hw_ops->reset_ras_error_count) + adev->hdp.ras->ras_block.hw_ops->reset_ras_error_count(adev); } r = amdgpu_gmc_ras_late_init(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c index eecfb1545c1e..6b41fcbf4875 100644 --- a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.c @@ -150,13 +150,21 @@ static void hdp_v4_0_init_registers(struct amdgpu_device *adev) WREG32_SOC15(HDP, 0, mmHDP_NONSURFACE_BASE_HI, (adev->gmc.vram_start >> 40)); } -const struct amdgpu_hdp_ras_funcs hdp_v4_0_ras_funcs = { - .ras_late_init = amdgpu_hdp_ras_late_init, - .ras_fini = amdgpu_hdp_ras_fini, +struct amdgpu_ras_block_hw_ops hdp_v4_0_ras_hw_ops = { .query_ras_error_count = hdp_v4_0_query_ras_error_count, .reset_ras_error_count = hdp_v4_0_reset_ras_error_count, }; +struct amdgpu_hdp_ras hdp_v4_0_ras = { + .ras_block = { + .name = "hdp", + .block = AMDGPU_RAS_BLOCK__HDP, + .hw_ops = &hdp_v4_0_ras_hw_ops, + .ras_late_init = amdgpu_hdp_ras_late_init, + .ras_fini = amdgpu_hdp_ras_fini, + }, +}; + const struct amdgpu_hdp_funcs hdp_v4_0_funcs = { .flush_hdp = hdp_v4_0_flush_hdp, .invalidate_hdp = hdp_v4_0_invalidate_hdp, diff --git a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.h b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.h index dc3a1b81dd62..c44eee9282ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.h +++ b/drivers/gpu/drm/amd/amdgpu/hdp_v4_0.h @@ -27,6 +27,6 @@ #include "soc15_common.h" extern const struct amdgpu_hdp_funcs hdp_v4_0_funcs; -extern const struct amdgpu_hdp_ras_funcs hdp_v4_0_ras_funcs; +extern struct amdgpu_hdp_ras hdp_v4_0_ras; #endif -- cgit From 5e67bba301156c85251f49df19f5c695875814d1 Mon Sep 17 00:00:00 2001 From: yipechai Date: Tue, 4 Jan 2022 19:04:41 +0800 Subject: drm/amdgpu: Modify mmhub block to fit for the unified ras block data and ops 1.Modify mmhub block to fit for the unified ras block data and ops. 2.Change amdgpu_mmhub_ras_funcs to amdgpu_mmhub_ras, and the corresponding variable name remove _funcs suffix. 3.Remove the const flag of mmhub ras variable so that mmhub ras block can be able to be inserted into amdgpu device ras block link list. 4.Invoke amdgpu_ras_register_ras_block function to register mmhub ras block into amdgpu device ras block link list. 5.Remove the redundant code about mmhub in amdgpu_ras.c after using the unified ras block. 5.Remove the redundant code about mmhub in amdgpu_ras.c after using the unified ras block. 6.Fill unified ras block .name .block .ras_late_init and .ras_fini for all of mmhub versions. If .ras_late_init and .ras_fini had been defined by the selected mmhub version, the defined functions will take effect; if not defined, default fill them with amdgpu_mmhub_ras_late_init and amdgpu_mmhub_ras_fini. Signed-off-by: yipechai Reviewed-by: Hawking Zhang Reviewed-by: John Clements Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 12 ++++---- drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 10 +++---- drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h | 14 +++------ drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 47 +++++++++--------------------- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 27 +++++++++++++---- drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c | 10 +++++-- drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h | 2 +- drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c | 10 +++++-- drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.h | 2 +- drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c | 10 +++++-- drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h | 2 +- 12 files changed, 73 insertions(+), 75 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 9230e4476d91..71b814fe15f9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3307,9 +3307,9 @@ static void amdgpu_device_xgmi_reset_func(struct work_struct *__work) if (adev->asic_reset_res) goto fail; - if (adev->mmhub.ras_funcs && - adev->mmhub.ras_funcs->reset_ras_error_count) - adev->mmhub.ras_funcs->reset_ras_error_count(adev); + if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops && + adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count) + adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev); } else { task_barrier_full(&hive->tb); @@ -4656,9 +4656,9 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle, if (!r && amdgpu_ras_intr_triggered()) { list_for_each_entry(tmp_adev, device_list_handle, reset_list) { - if (tmp_adev->mmhub.ras_funcs && - tmp_adev->mmhub.ras_funcs->reset_ras_error_count) - tmp_adev->mmhub.ras_funcs->reset_ras_error_count(tmp_adev); + if (tmp_adev->mmhub.ras && tmp_adev->mmhub.ras->ras_block.hw_ops && + tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count) + tmp_adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(tmp_adev); } amdgpu_ras_intr_cleared(); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 58cc4dae1246..acf806c87673 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -447,9 +447,8 @@ int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev) return r; } - if (adev->mmhub.ras_funcs && - adev->mmhub.ras_funcs->ras_late_init) { - r = adev->mmhub.ras_funcs->ras_late_init(adev); + if (adev->mmhub.ras && adev->mmhub.ras->ras_block.ras_late_init) { + r = adev->mmhub.ras->ras_block.ras_late_init(adev, NULL); if (r) return r; } @@ -501,9 +500,8 @@ void amdgpu_gmc_ras_fini(struct amdgpu_device *adev) adev->umc.ras_funcs->ras_fini) adev->umc.ras_funcs->ras_fini(adev); - if (adev->mmhub.ras_funcs && - adev->mmhub.ras_funcs->ras_fini) - adev->mmhub.ras_funcs->ras_fini(adev); + if (adev->mmhub.ras && adev->mmhub.ras->ras_block.ras_fini) + adev->mmhub.ras->ras_block.ras_fini(adev); if (adev->gmc.xgmi.ras && adev->gmc.xgmi.ras->ras_block.ras_fini) adev->gmc.xgmi.ras->ras_block.ras_fini(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c index 24297dc51434..f9b5472a75d7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.c @@ -24,7 +24,7 @@ #include "amdgpu.h" #include "amdgpu_ras.h" -int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev) +int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev, void *ras_info) { int r; struct ras_ih_if ih_info = { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h index b27fcbccce2b..7deda9a3b81e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mmhub.h @@ -21,14 +21,8 @@ #ifndef __AMDGPU_MMHUB_H__ #define __AMDGPU_MMHUB_H__ -struct amdgpu_mmhub_ras_funcs { - int (*ras_late_init)(struct amdgpu_device *adev); - void (*ras_fini)(struct amdgpu_device *adev); - void (*query_ras_error_count)(struct amdgpu_device *adev, - void *ras_error_status); - void (*query_ras_error_status)(struct amdgpu_device *adev); - void (*reset_ras_error_count)(struct amdgpu_device *adev); - void (*reset_ras_error_status)(struct amdgpu_device *adev); +struct amdgpu_mmhub_ras { + struct amdgpu_ras_block_object ras_block; }; struct amdgpu_mmhub_funcs { @@ -50,10 +44,10 @@ struct amdgpu_mmhub_funcs { struct amdgpu_mmhub { struct ras_common_if *ras_if; const struct amdgpu_mmhub_funcs *funcs; - const struct amdgpu_mmhub_ras_funcs *ras_funcs; + struct amdgpu_mmhub_ras *ras; }; -int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev); +int amdgpu_mmhub_ras_late_init(struct amdgpu_device *adev, void *ras_info); void amdgpu_mmhub_ras_fini(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index c47a03252ec8..0ce26fd6abbd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -986,6 +986,7 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, } break; case AMDGPU_RAS_BLOCK__GFX: + case AMDGPU_RAS_BLOCK__MMHUB: if (!block_obj || !block_obj->hw_ops) { dev_info(adev->dev, "%s doesn't config ras function \n", get_ras_block_str(&info->head)); @@ -997,15 +998,6 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, if (block_obj->hw_ops->query_ras_error_status) block_obj->hw_ops->query_ras_error_status(adev); break; - case AMDGPU_RAS_BLOCK__MMHUB: - if (adev->mmhub.ras_funcs && - adev->mmhub.ras_funcs->query_ras_error_count) - adev->mmhub.ras_funcs->query_ras_error_count(adev, &err_data); - - if (adev->mmhub.ras_funcs && - adev->mmhub.ras_funcs->query_ras_error_status) - adev->mmhub.ras_funcs->query_ras_error_status(adev); - break; case AMDGPU_RAS_BLOCK__PCIE_BIF: if (adev->nbio.ras_funcs && adev->nbio.ras_funcs->query_ras_error_count) @@ -1089,6 +1081,7 @@ int amdgpu_ras_reset_error_status(struct amdgpu_device *adev, switch (block) { case AMDGPU_RAS_BLOCK__GFX: + case AMDGPU_RAS_BLOCK__MMHUB: if (!block_obj || !block_obj->hw_ops) { dev_info(adev->dev, "%s doesn't config ras function \n", ras_block_str(block)); return -EINVAL; @@ -1100,15 +1093,6 @@ int amdgpu_ras_reset_error_status(struct amdgpu_device *adev, if (block_obj->hw_ops->reset_ras_error_status) block_obj->hw_ops->reset_ras_error_status(adev); break; - case AMDGPU_RAS_BLOCK__MMHUB: - if (adev->mmhub.ras_funcs && - adev->mmhub.ras_funcs->reset_ras_error_count) - adev->mmhub.ras_funcs->reset_ras_error_count(adev); - - if (adev->mmhub.ras_funcs && - adev->mmhub.ras_funcs->reset_ras_error_status) - adev->mmhub.ras_funcs->reset_ras_error_status(adev); - break; case AMDGPU_RAS_BLOCK__SDMA: if (adev->sdma.funcs->reset_ras_error_count) adev->sdma.funcs->reset_ras_error_count(adev); @@ -1825,24 +1809,19 @@ static void amdgpu_ras_error_status_query(struct amdgpu_device *adev, * Only two block need to query read/write * RspStatus at current state */ - switch (info->head.block) { - case AMDGPU_RAS_BLOCK__GFX: - if (!block_obj || !block_obj->hw_ops) { - dev_info(adev->dev, "%s doesn't config ras function \n", get_ras_block_str(&info->head)); - return ; - } + if ((info->head.block != AMDGPU_RAS_BLOCK__GFX) && + (info->head.block != AMDGPU_RAS_BLOCK__MMHUB)) + return ; - if (block_obj->hw_ops->query_ras_error_status) - block_obj->hw_ops->query_ras_error_status(adev); - break; - case AMDGPU_RAS_BLOCK__MMHUB: - if (adev->mmhub.ras_funcs && - adev->mmhub.ras_funcs->query_ras_error_status) - adev->mmhub.ras_funcs->query_ras_error_status(adev); - break; - default: - break; + block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, info->head.sub_block_index); + if (!block_obj || !block_obj->hw_ops) { + dev_info(adev->dev, "%s doesn't config ras function \n", get_ras_block_str(&info->head)); + return ; } + + if (block_obj->hw_ops->query_ras_error_status) + block_obj->hw_ops->query_ras_error_status(adev); + } static void amdgpu_ras_query_err_status(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 16ab572219ed..7506e198e6e1 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1248,18 +1248,33 @@ static void gmc_v9_0_set_mmhub_ras_funcs(struct amdgpu_device *adev) { switch (adev->ip_versions[MMHUB_HWIP][0]) { case IP_VERSION(9, 4, 0): - adev->mmhub.ras_funcs = &mmhub_v1_0_ras_funcs; + adev->mmhub.ras = &mmhub_v1_0_ras; break; case IP_VERSION(9, 4, 1): - adev->mmhub.ras_funcs = &mmhub_v9_4_ras_funcs; + adev->mmhub.ras = &mmhub_v9_4_ras; break; case IP_VERSION(9, 4, 2): - adev->mmhub.ras_funcs = &mmhub_v1_7_ras_funcs; + adev->mmhub.ras = &mmhub_v1_7_ras; break; default: /* mmhub ras is not available */ break; } + + if (adev->mmhub.ras) { + amdgpu_ras_register_ras_block(adev, &adev->mmhub.ras->ras_block); + + strcpy(adev->mmhub.ras->ras_block.name,"mmhub"); + adev->mmhub.ras->ras_block.block = AMDGPU_RAS_BLOCK__MMHUB; + + /* If don't define special ras_late_init function, use default ras_late_init */ + if (!adev->mmhub.ras->ras_block.ras_late_init) + adev->mmhub.ras->ras_block.ras_late_init = amdgpu_mmhub_ras_late_init; + + /* If don't define special ras_fini function, use default ras_fini */ + if (!adev->mmhub.ras->ras_block.ras_fini) + adev->mmhub.ras->ras_block.ras_fini = amdgpu_mmhub_ras_fini; + } } static void gmc_v9_0_set_gfxhub_funcs(struct amdgpu_device *adev) @@ -1343,9 +1358,9 @@ static int gmc_v9_0_late_init(void *handle) } if (!amdgpu_persistent_edc_harvesting_supported(adev)) { - if (adev->mmhub.ras_funcs && - adev->mmhub.ras_funcs->reset_ras_error_count) - adev->mmhub.ras_funcs->reset_ras_error_count(adev); + if (adev->mmhub.ras && adev->mmhub.ras->ras_block.hw_ops && + adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count) + adev->mmhub.ras->ras_block.hw_ops->reset_ras_error_count(adev); if (adev->hdp.ras && adev->hdp.ras->ras_block.hw_ops && adev->hdp.ras->ras_block.hw_ops->reset_ras_error_count) diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c index 1da2ec692057..4c9f0c0f3116 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.c @@ -774,13 +774,17 @@ static void mmhub_v1_0_reset_ras_error_count(struct amdgpu_device *adev) } } -const struct amdgpu_mmhub_ras_funcs mmhub_v1_0_ras_funcs = { - .ras_late_init = amdgpu_mmhub_ras_late_init, - .ras_fini = amdgpu_mmhub_ras_fini, +struct amdgpu_ras_block_hw_ops mmhub_v1_0_ras_hw_ops = { .query_ras_error_count = mmhub_v1_0_query_ras_error_count, .reset_ras_error_count = mmhub_v1_0_reset_ras_error_count, }; +struct amdgpu_mmhub_ras mmhub_v1_0_ras = { + .ras_block = { + .hw_ops = &mmhub_v1_0_ras_hw_ops, + }, +}; + const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs = { .get_fb_location = mmhub_v1_0_get_fb_location, .init = mmhub_v1_0_init, diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h index 4661b094e007..dae7ca48bd8b 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_0.h @@ -24,6 +24,6 @@ #define __MMHUB_V1_0_H__ extern const struct amdgpu_mmhub_funcs mmhub_v1_0_funcs; -extern const struct amdgpu_mmhub_ras_funcs mmhub_v1_0_ras_funcs; +extern struct amdgpu_mmhub_ras mmhub_v1_0_ras; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c index f5f7181f9af5..3b901f941627 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.c @@ -1321,15 +1321,19 @@ static void mmhub_v1_7_reset_ras_error_status(struct amdgpu_device *adev) } } -const struct amdgpu_mmhub_ras_funcs mmhub_v1_7_ras_funcs = { - .ras_late_init = amdgpu_mmhub_ras_late_init, - .ras_fini = amdgpu_mmhub_ras_fini, +struct amdgpu_ras_block_hw_ops mmhub_v1_7_ras_hw_ops = { .query_ras_error_count = mmhub_v1_7_query_ras_error_count, .reset_ras_error_count = mmhub_v1_7_reset_ras_error_count, .query_ras_error_status = mmhub_v1_7_query_ras_error_status, .reset_ras_error_status = mmhub_v1_7_reset_ras_error_status, }; +struct amdgpu_mmhub_ras mmhub_v1_7_ras = { + .ras_block = { + .hw_ops = &mmhub_v1_7_ras_hw_ops, + }, +}; + const struct amdgpu_mmhub_funcs mmhub_v1_7_funcs = { .get_fb_location = mmhub_v1_7_get_fb_location, .init = mmhub_v1_7_init, diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.h index a7f9dfc24697..629f49052137 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.h +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v1_7.h @@ -24,6 +24,6 @@ #define __MMHUB_V1_7_H__ extern const struct amdgpu_mmhub_funcs mmhub_v1_7_funcs; -extern const struct amdgpu_mmhub_ras_funcs mmhub_v1_7_ras_funcs; +extern struct amdgpu_mmhub_ras mmhub_v1_7_ras; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c index ff49eeaf7882..619106f7d23d 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.c @@ -1655,14 +1655,18 @@ static void mmhub_v9_4_query_ras_error_status(struct amdgpu_device *adev) } } -const struct amdgpu_mmhub_ras_funcs mmhub_v9_4_ras_funcs = { - .ras_late_init = amdgpu_mmhub_ras_late_init, - .ras_fini = amdgpu_mmhub_ras_fini, +const struct amdgpu_ras_block_hw_ops mmhub_v9_4_ras_hw_ops = { .query_ras_error_count = mmhub_v9_4_query_ras_error_count, .reset_ras_error_count = mmhub_v9_4_reset_ras_error_count, .query_ras_error_status = mmhub_v9_4_query_ras_error_status, }; +struct amdgpu_mmhub_ras mmhub_v9_4_ras = { + .ras_block = { + .hw_ops = &mmhub_v9_4_ras_hw_ops, + }, +}; + const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs = { .get_fb_location = mmhub_v9_4_get_fb_location, .init = mmhub_v9_4_init, diff --git a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h index 90436efa92ef..a48329d95f71 100644 --- a/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h +++ b/drivers/gpu/drm/amd/amdgpu/mmhub_v9_4.h @@ -24,6 +24,6 @@ #define __MMHUB_V9_4_H__ extern const struct amdgpu_mmhub_funcs mmhub_v9_4_funcs; -extern const struct amdgpu_mmhub_ras_funcs mmhub_v9_4_ras_funcs; +extern struct amdgpu_mmhub_ras mmhub_v9_4_ras; #endif -- cgit From 2e54fe5d056e7dc82988ef64ded3dca0ef954f0a Mon Sep 17 00:00:00 2001 From: yipechai Date: Wed, 5 Jan 2022 14:28:10 +0800 Subject: drm/amdgpu: Modify nbio block to fit for the unified ras block data and ops 1.Modify nbio block to fit for the unified ras block data and ops. 2.Change amdgpu_nbio_ras_funcs to amdgpu_nbio_ras, and the corresponding variable name remove _funcs suffix. 3.Remove the const flag of mmhub ras variable so that nbio ras block can be able to be inserted into amdgpu device ras block link list. 4.Invoke amdgpu_ras_register_ras_block function to register nbio ras block into amdgpu device ras block link list. 5.Remove the redundant code about nbio in amdgpu_ras.c after using the unified ras block. Signed-off-by: yipechai Reviewed-by: Hawking Zhang Reviewed-by: John Clements Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c | 12 ++++++------ drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h | 11 ++++------- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 22 ++++++++++------------ drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c | 17 +++++++++++++---- drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h | 2 +- drivers/gpu/drm/amd/amdgpu/soc15.c | 18 ++++++++---------- 7 files changed, 43 insertions(+), 41 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c index f5cbc2747ac6..ea3e8c66211f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_irq.c @@ -199,13 +199,13 @@ static irqreturn_t amdgpu_irq_handler(int irq, void *arg) * ack the interrupt if it is there */ if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__PCIE_BIF)) { - if (adev->nbio.ras_funcs && - adev->nbio.ras_funcs->handle_ras_controller_intr_no_bifring) - adev->nbio.ras_funcs->handle_ras_controller_intr_no_bifring(adev); + if (adev->nbio.ras && + adev->nbio.ras->handle_ras_controller_intr_no_bifring) + adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev); - if (adev->nbio.ras_funcs && - adev->nbio.ras_funcs->handle_ras_err_event_athub_intr_no_bifring) - adev->nbio.ras_funcs->handle_ras_err_event_athub_intr_no_bifring(adev); + if (adev->nbio.ras && + adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring) + adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring(adev); } return ret; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c index 6afb02fef8cf..6ace2e390e77 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.c @@ -22,7 +22,7 @@ #include "amdgpu.h" #include "amdgpu_ras.h" -int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev) +int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, void *ras_info) { int r; struct ras_ih_if ih_info = { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h index 843052205bd5..4afb76d3cd97 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_nbio.h @@ -47,15 +47,12 @@ struct nbio_hdp_flush_reg { u32 ref_and_mask_sdma7; }; -struct amdgpu_nbio_ras_funcs { +struct amdgpu_nbio_ras { + struct amdgpu_ras_block_object ras_block; void (*handle_ras_controller_intr_no_bifring)(struct amdgpu_device *adev); void (*handle_ras_err_event_athub_intr_no_bifring)(struct amdgpu_device *adev); int (*init_ras_controller_interrupt)(struct amdgpu_device *adev); int (*init_ras_err_event_athub_interrupt)(struct amdgpu_device *adev); - void (*query_ras_error_count)(struct amdgpu_device *adev, - void *ras_error_status); - int (*ras_late_init)(struct amdgpu_device *adev); - void (*ras_fini)(struct amdgpu_device *adev); }; struct amdgpu_nbio_funcs { @@ -104,9 +101,9 @@ struct amdgpu_nbio { struct amdgpu_irq_src ras_err_event_athub_irq; struct ras_common_if *ras_if; const struct amdgpu_nbio_funcs *funcs; - const struct amdgpu_nbio_ras_funcs *ras_funcs; + struct amdgpu_nbio_ras *ras; }; -int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev); +int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev, void *ras_info); void amdgpu_nbio_ras_fini(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 0ce26fd6abbd..6d1ca9e9795b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -999,10 +999,6 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, block_obj->hw_ops->query_ras_error_status(adev); break; case AMDGPU_RAS_BLOCK__PCIE_BIF: - if (adev->nbio.ras_funcs && - adev->nbio.ras_funcs->query_ras_error_count) - adev->nbio.ras_funcs->query_ras_error_count(adev, &err_data); - break; case AMDGPU_RAS_BLOCK__XGMI_WAFL: case AMDGPU_RAS_BLOCK__HDP: if (!block_obj || !block_obj->hw_ops) { @@ -2385,24 +2381,26 @@ int amdgpu_ras_init(struct amdgpu_device *adev) case CHIP_VEGA20: case CHIP_ARCTURUS: case CHIP_ALDEBARAN: - if (!adev->gmc.xgmi.connected_to_cpu) - adev->nbio.ras_funcs = &nbio_v7_4_ras_funcs; + if (!adev->gmc.xgmi.connected_to_cpu) { + adev->nbio.ras = &nbio_v7_4_ras; + amdgpu_ras_register_ras_block(adev, &adev->nbio.ras->ras_block); + } break; default: /* nbio ras is not available */ break; } - if (adev->nbio.ras_funcs && - adev->nbio.ras_funcs->init_ras_controller_interrupt) { - r = adev->nbio.ras_funcs->init_ras_controller_interrupt(adev); + if (adev->nbio.ras && + adev->nbio.ras->init_ras_controller_interrupt) { + r = adev->nbio.ras->init_ras_controller_interrupt(adev); if (r) goto release_con; } - if (adev->nbio.ras_funcs && - adev->nbio.ras_funcs->init_ras_err_event_athub_interrupt) { - r = adev->nbio.ras_funcs->init_ras_err_event_athub_interrupt(adev); + if (adev->nbio.ras && + adev->nbio.ras->init_ras_err_event_athub_interrupt) { + r = adev->nbio.ras->init_ras_err_event_athub_interrupt(adev); if (r) goto release_con; } diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c index dc5e93756fea..39974b449341 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.c @@ -658,16 +658,25 @@ static void nbio_v7_4_enable_doorbell_interrupt(struct amdgpu_device *adev, DOORBELL_INTERRUPT_DISABLE, enable ? 0 : 1); } -const struct amdgpu_nbio_ras_funcs nbio_v7_4_ras_funcs = { +const struct amdgpu_ras_block_hw_ops nbio_v7_4_ras_hw_ops = { + .query_ras_error_count = nbio_v7_4_query_ras_error_count, +}; + +struct amdgpu_nbio_ras nbio_v7_4_ras = { + .ras_block = { + .name = "pcie_bif", + .block = AMDGPU_RAS_BLOCK__PCIE_BIF, + .hw_ops = &nbio_v7_4_ras_hw_ops, + .ras_late_init = amdgpu_nbio_ras_late_init, + .ras_fini = amdgpu_nbio_ras_fini, + }, .handle_ras_controller_intr_no_bifring = nbio_v7_4_handle_ras_controller_intr_no_bifring, .handle_ras_err_event_athub_intr_no_bifring = nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring, .init_ras_controller_interrupt = nbio_v7_4_init_ras_controller_interrupt, .init_ras_err_event_athub_interrupt = nbio_v7_4_init_ras_err_event_athub_interrupt, - .query_ras_error_count = nbio_v7_4_query_ras_error_count, - .ras_late_init = amdgpu_nbio_ras_late_init, - .ras_fini = amdgpu_nbio_ras_fini, }; + static void nbio_v7_4_program_ltr(struct amdgpu_device *adev) { uint32_t def, data; diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h index cc5692db6f98..7490022d79d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_4.h @@ -29,6 +29,6 @@ extern const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg; extern const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg_ald; extern const struct amdgpu_nbio_funcs nbio_v7_4_funcs; -extern const struct amdgpu_nbio_ras_funcs nbio_v7_4_ras_funcs; +extern struct amdgpu_nbio_ras nbio_v7_4_ras; #endif diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 0fc1747e4a70..6c8fcc4e29f4 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -1224,9 +1224,8 @@ static int soc15_common_late_init(void *handle) if (amdgpu_sriov_vf(adev)) xgpu_ai_mailbox_get_irq(adev); - if (adev->nbio.ras_funcs && - adev->nbio.ras_funcs->ras_late_init) - r = adev->nbio.ras_funcs->ras_late_init(adev); + if (adev->nbio.ras && adev->nbio.ras->ras_block.ras_late_init) + r = adev->nbio.ras->ras_block.ras_late_init(adev, NULL); return r; } @@ -1249,9 +1248,8 @@ static int soc15_common_sw_fini(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (adev->nbio.ras_funcs && - adev->nbio.ras_funcs->ras_fini) - adev->nbio.ras_funcs->ras_fini(adev); + if (adev->nbio.ras && adev->nbio.ras->ras_block.ras_fini) + adev->nbio.ras->ras_block.ras_fini(adev); if (adev->df.funcs && adev->df.funcs->sw_fini) @@ -1318,11 +1316,11 @@ static int soc15_common_hw_fini(void *handle) if (adev->nbio.ras_if && amdgpu_ras_is_supported(adev, adev->nbio.ras_if->block)) { - if (adev->nbio.ras_funcs && - adev->nbio.ras_funcs->init_ras_controller_interrupt) + if (adev->nbio.ras && + adev->nbio.ras->init_ras_controller_interrupt) amdgpu_irq_put(adev, &adev->nbio.ras_controller_irq, 0); - if (adev->nbio.ras_funcs && - adev->nbio.ras_funcs->init_ras_err_event_athub_interrupt) + if (adev->nbio.ras && + adev->nbio.ras->init_ras_err_event_athub_interrupt) amdgpu_irq_put(adev, &adev->nbio.ras_err_event_athub_irq, 0); } -- cgit From efe17d5a217e6b7dfd16c80dab522abcf2edf1bc Mon Sep 17 00:00:00 2001 From: yipechai Date: Thu, 6 Jan 2022 14:07:44 +0800 Subject: drm/amdgpu: Modify umc block to fit for the unified ras block data and ops 1.Modify umc block to fit for the unified ras block data and ops. 2.Change amdgpu_umc_ras_funcs to amdgpu_umc_ras, and the corresponding variable name remove _funcs suffix. 3.Remove the const flag of umc ras variable so that umc ras block can be able to be inserted into amdgpu device ras block link list. 4.Invoke amdgpu_ras_register_ras_block function to register umc ras block into amdgpu device ras block link list. 5.Remove the redundant code about umc in amdgpu_ras.c after using the unified ras block. 6.Fill unified ras block .name .block .ras_late_init and .ras_fini for all of umc versions. If .ras_late_init and .ras_fini had been defined by the selected umc version, the defined functions will take effect; if not defined, default fill them with amdgpu_umc_ras_late_init and amdgpu_umc_ras_fini. Signed-off-by: yipechai Reviewed-by: Hawking Zhang Reviewed-by: John Clements Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 10 ++++------ drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 30 +++++++++++++++--------------- drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c | 32 ++++++++++++++++---------------- drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h | 14 +++++--------- drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c | 16 +++++++++++++++- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 21 ++++++++++++++++++--- drivers/gpu/drm/amd/amdgpu/umc_v6_1.c | 12 ++++++++---- drivers/gpu/drm/amd/amdgpu/umc_v6_1.h | 2 +- drivers/gpu/drm/amd/amdgpu/umc_v6_7.c | 10 +++++++--- drivers/gpu/drm/amd/amdgpu/umc_v6_7.h | 2 +- drivers/gpu/drm/amd/amdgpu/umc_v8_7.c | 12 ++++++++---- drivers/gpu/drm/amd/amdgpu/umc_v8_7.h | 2 +- 12 files changed, 99 insertions(+), 64 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index acf806c87673..de0ef21e1501 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -440,9 +440,8 @@ int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev) { int r; - if (adev->umc.ras_funcs && - adev->umc.ras_funcs->ras_late_init) { - r = adev->umc.ras_funcs->ras_late_init(adev); + if (adev->umc.ras && adev->umc.ras->ras_block.ras_late_init) { + r = adev->umc.ras->ras_block.ras_late_init(adev, NULL); if (r) return r; } @@ -496,9 +495,8 @@ int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev) void amdgpu_gmc_ras_fini(struct amdgpu_device *adev) { - if (adev->umc.ras_funcs && - adev->umc.ras_funcs->ras_fini) - adev->umc.ras_funcs->ras_fini(adev); + if (adev->umc.ras && adev->umc.ras->ras_block.ras_fini) + adev->umc.ras->ras_block.ras_fini(adev); if (adev->mmhub.ras && adev->mmhub.ras->ras_block.ras_fini) adev->mmhub.ras->ras_block.ras_fini(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 6d1ca9e9795b..fba1c415a2a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -939,24 +939,24 @@ static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_d */ ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(ras->umc_ecc)); if (ret == -EOPNOTSUPP) { - if (adev->umc.ras_funcs && - adev->umc.ras_funcs->query_ras_error_count) - adev->umc.ras_funcs->query_ras_error_count(adev, err_data); + if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops && + adev->umc.ras->ras_block.hw_ops->query_ras_error_count) + adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data); /* umc query_ras_error_address is also responsible for clearing * error status */ - if (adev->umc.ras_funcs && - adev->umc.ras_funcs->query_ras_error_address) - adev->umc.ras_funcs->query_ras_error_address(adev, err_data); + if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops && + adev->umc.ras->ras_block.hw_ops->query_ras_error_address) + adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, err_data); } else if (!ret) { - if (adev->umc.ras_funcs && - adev->umc.ras_funcs->ecc_info_query_ras_error_count) - adev->umc.ras_funcs->ecc_info_query_ras_error_count(adev, err_data); + if (adev->umc.ras && + adev->umc.ras->ecc_info_query_ras_error_count) + adev->umc.ras->ecc_info_query_ras_error_count(adev, err_data); - if (adev->umc.ras_funcs && - adev->umc.ras_funcs->ecc_info_query_ras_error_address) - adev->umc.ras_funcs->ecc_info_query_ras_error_address(adev, err_data); + if (adev->umc.ras && + adev->umc.ras->ecc_info_query_ras_error_address) + adev->umc.ras->ecc_info_query_ras_error_address(adev, err_data); } } @@ -2412,12 +2412,12 @@ int amdgpu_ras_init(struct amdgpu_device *adev) } else if (adev->df.funcs && adev->df.funcs->query_ras_poison_mode && - adev->umc.ras_funcs && - adev->umc.ras_funcs->query_ras_poison_mode) { + adev->umc.ras && + adev->umc.ras->query_ras_poison_mode) { df_poison = adev->df.funcs->query_ras_poison_mode(adev); umc_poison = - adev->umc.ras_funcs->query_ras_poison_mode(adev); + adev->umc.ras->query_ras_poison_mode(adev); /* Only poison is set in both DF and UMC, we can support it */ if (df_poison && umc_poison) con->poison_supported = true; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c index e81ce465ff3a..b4c68c09e071 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c @@ -35,12 +35,12 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev, kgd2kfd_set_sram_ecc_flag(adev->kfd.dev); ret = amdgpu_dpm_get_ecc_info(adev, (void *)&(con->umc_ecc)); if (ret == -EOPNOTSUPP) { - if (adev->umc.ras_funcs && - adev->umc.ras_funcs->query_ras_error_count) - adev->umc.ras_funcs->query_ras_error_count(adev, ras_error_status); + if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops && + adev->umc.ras->ras_block.hw_ops->query_ras_error_count) + adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, ras_error_status); - if (adev->umc.ras_funcs && - adev->umc.ras_funcs->query_ras_error_address && + if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops && + adev->umc.ras->ras_block.hw_ops->query_ras_error_address && adev->umc.max_ras_err_cnt_per_query) { err_data->err_addr = kcalloc(adev->umc.max_ras_err_cnt_per_query, @@ -56,15 +56,15 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev, /* umc query_ras_error_address is also responsible for clearing * error status */ - adev->umc.ras_funcs->query_ras_error_address(adev, ras_error_status); + adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, ras_error_status); } } else if (!ret) { - if (adev->umc.ras_funcs && - adev->umc.ras_funcs->ecc_info_query_ras_error_count) - adev->umc.ras_funcs->ecc_info_query_ras_error_count(adev, ras_error_status); + if (adev->umc.ras && + adev->umc.ras->ecc_info_query_ras_error_count) + adev->umc.ras->ecc_info_query_ras_error_count(adev, ras_error_status); - if (adev->umc.ras_funcs && - adev->umc.ras_funcs->ecc_info_query_ras_error_address && + if (adev->umc.ras && + adev->umc.ras->ecc_info_query_ras_error_address && adev->umc.max_ras_err_cnt_per_query) { err_data->err_addr = kcalloc(adev->umc.max_ras_err_cnt_per_query, @@ -80,7 +80,7 @@ static int amdgpu_umc_do_page_retirement(struct amdgpu_device *adev, /* umc query_ras_error_address is also responsible for clearing * error status */ - adev->umc.ras_funcs->ecc_info_query_ras_error_address(adev, ras_error_status); + adev->umc.ras->ecc_info_query_ras_error_address(adev, ras_error_status); } } @@ -136,7 +136,7 @@ static int amdgpu_umc_process_ras_data_cb(struct amdgpu_device *adev, return amdgpu_umc_do_page_retirement(adev, ras_error_status, entry, true); } -int amdgpu_umc_ras_late_init(struct amdgpu_device *adev) +int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, void *ras_info) { int r; struct ras_fs_if fs_info = { @@ -172,9 +172,9 @@ int amdgpu_umc_ras_late_init(struct amdgpu_device *adev) } /* ras init of specific umc version */ - if (adev->umc.ras_funcs && - adev->umc.ras_funcs->err_cnt_init) - adev->umc.ras_funcs->err_cnt_init(adev); + if (adev->umc.ras && + adev->umc.ras->err_cnt_init) + adev->umc.ras->err_cnt_init(adev); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h index b72194e8bfe5..195740a6d97d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h @@ -20,6 +20,7 @@ */ #ifndef __AMDGPU_UMC_H__ #define __AMDGPU_UMC_H__ +#include "amdgpu_ras.h" /* * (addr / 256) * 4096, the higher 26 bits in ErrorAddr @@ -40,14 +41,9 @@ #define LOOP_UMC_CH_INST(ch_inst) for ((ch_inst) = 0; (ch_inst) < adev->umc.channel_inst_num; (ch_inst)++) #define LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) LOOP_UMC_INST((umc_inst)) LOOP_UMC_CH_INST((ch_inst)) -struct amdgpu_umc_ras_funcs { +struct amdgpu_umc_ras { + struct amdgpu_ras_block_object ras_block; void (*err_cnt_init)(struct amdgpu_device *adev); - int (*ras_late_init)(struct amdgpu_device *adev); - void (*ras_fini)(struct amdgpu_device *adev); - void (*query_ras_error_count)(struct amdgpu_device *adev, - void *ras_error_status); - void (*query_ras_error_address)(struct amdgpu_device *adev, - void *ras_error_status); bool (*query_ras_poison_mode)(struct amdgpu_device *adev); void (*ecc_info_query_ras_error_count)(struct amdgpu_device *adev, void *ras_error_status); @@ -73,10 +69,10 @@ struct amdgpu_umc { struct ras_common_if *ras_if; const struct amdgpu_umc_funcs *funcs; - const struct amdgpu_umc_ras_funcs *ras_funcs; + struct amdgpu_umc_ras *ras; }; -int amdgpu_umc_ras_late_init(struct amdgpu_device *adev); +int amdgpu_umc_ras_late_init(struct amdgpu_device *adev, void *ras_info); void amdgpu_umc_ras_fini(struct amdgpu_device *adev); int amdgpu_umc_poison_handler(struct amdgpu_device *adev, void *ras_error_status, diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index 38bb42727715..5ef4ad28ab26 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -664,11 +664,25 @@ static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev) adev->umc.umc_inst_num = UMC_V8_7_UMC_INSTANCE_NUM; adev->umc.channel_offs = UMC_V8_7_PER_CHANNEL_OFFSET_SIENNA; adev->umc.channel_idx_tbl = &umc_v8_7_channel_idx_tbl[0][0]; - adev->umc.ras_funcs = &umc_v8_7_ras_funcs; + adev->umc.ras = &umc_v8_7_ras; break; default: break; } + if (adev->umc.ras) { + amdgpu_ras_register_ras_block(adev, &adev->umc.ras->ras_block); + + strcpy(adev->umc.ras->ras_block.name,"umc"); + adev->umc.ras->ras_block.block = AMDGPU_RAS_BLOCK__UMC; + + /* If don't define special ras_late_init function, use default ras_late_init */ + if (!adev->umc.ras->ras_block.ras_late_init) + adev->umc.ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init; + + /* If don't define special ras_fini function, use default ras_fini */ + if (!adev->umc.ras->ras_block.ras_fini) + adev->umc.ras->ras_block.ras_fini = amdgpu_umc_ras_fini; + } } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 7506e198e6e1..3965aae435f8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1202,7 +1202,7 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM; adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_VG20; adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0]; - adev->umc.ras_funcs = &umc_v6_1_ras_funcs; + adev->umc.ras = &umc_v6_1_ras; break; case IP_VERSION(6, 1, 2): adev->umc.max_ras_err_cnt_per_query = UMC_V6_1_TOTAL_CHANNEL_NUM; @@ -1210,7 +1210,7 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) adev->umc.umc_inst_num = UMC_V6_1_UMC_INSTANCE_NUM; adev->umc.channel_offs = UMC_V6_1_PER_CHANNEL_OFFSET_ARCT; adev->umc.channel_idx_tbl = &umc_v6_1_channel_idx_tbl[0][0]; - adev->umc.ras_funcs = &umc_v6_1_ras_funcs; + adev->umc.ras = &umc_v6_1_ras; break; case IP_VERSION(6, 7, 0): adev->umc.max_ras_err_cnt_per_query = UMC_V6_7_TOTAL_CHANNEL_NUM; @@ -1218,7 +1218,7 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) adev->umc.umc_inst_num = UMC_V6_7_UMC_INSTANCE_NUM; adev->umc.channel_offs = UMC_V6_7_PER_CHANNEL_OFFSET; if (!adev->gmc.xgmi.connected_to_cpu) - adev->umc.ras_funcs = &umc_v6_7_ras_funcs; + adev->umc.ras = &umc_v6_7_ras; if (1 & adev->smuio.funcs->get_die_id(adev)) adev->umc.channel_idx_tbl = &umc_v6_7_channel_idx_tbl_first[0][0]; else @@ -1227,6 +1227,21 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) default: break; } + + if (adev->umc.ras) { + amdgpu_ras_register_ras_block(adev, &adev->umc.ras->ras_block); + + strcpy(adev->umc.ras->ras_block.name,"umc"); + adev->umc.ras->ras_block.block = AMDGPU_RAS_BLOCK__UMC; + + /* If don't define special ras_late_init function, use default ras_late_init */ + if (!adev->umc.ras->ras_block.ras_late_init) + adev->umc.ras->ras_block.ras_late_init = amdgpu_umc_ras_late_init; + + /* If don't define special ras_fini function, use default ras_fini */ + if (!adev->umc.ras->ras_block.ras_fini) + adev->umc.ras->ras_block.ras_fini = amdgpu_umc_ras_fini; + } } static void gmc_v9_0_set_mmhub_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c index 20b44983ac94..4776301972d4 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c @@ -465,10 +465,14 @@ static void umc_v6_1_err_cnt_init(struct amdgpu_device *adev) umc_v6_1_enable_umc_index_mode(adev); } -const struct amdgpu_umc_ras_funcs umc_v6_1_ras_funcs = { - .err_cnt_init = umc_v6_1_err_cnt_init, - .ras_late_init = amdgpu_umc_ras_late_init, - .ras_fini = amdgpu_umc_ras_fini, +const struct amdgpu_ras_block_hw_ops umc_v6_1_ras_hw_ops = { .query_ras_error_count = umc_v6_1_query_ras_error_count, .query_ras_error_address = umc_v6_1_query_ras_error_address, }; + +struct amdgpu_umc_ras umc_v6_1_ras = { + .ras_block = { + .hw_ops = &umc_v6_1_ras_hw_ops, + }, + .err_cnt_init = umc_v6_1_err_cnt_init, +}; \ No newline at end of file diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h index 5dc36c730bb2..50c632eb4cc6 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.h @@ -45,7 +45,7 @@ /* umc ce count initial value */ #define UMC_V6_1_CE_CNT_INIT (UMC_V6_1_CE_CNT_MAX - UMC_V6_1_CE_INT_THRESHOLD) -extern const struct amdgpu_umc_ras_funcs umc_v6_1_ras_funcs; +extern struct amdgpu_umc_ras umc_v6_1_ras; extern const uint32_t umc_v6_1_channel_idx_tbl[UMC_V6_1_UMC_INSTANCE_NUM][UMC_V6_1_CHANNEL_INSTANCE_NUM]; diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c index 6dd1e19e8d43..6953426f0bed 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c @@ -480,11 +480,15 @@ static bool umc_v6_7_query_ras_poison_mode(struct amdgpu_device *adev) return true; } -const struct amdgpu_umc_ras_funcs umc_v6_7_ras_funcs = { - .ras_late_init = amdgpu_umc_ras_late_init, - .ras_fini = amdgpu_umc_ras_fini, +const struct amdgpu_ras_block_hw_ops umc_v6_7_ras_hw_ops = { .query_ras_error_count = umc_v6_7_query_ras_error_count, .query_ras_error_address = umc_v6_7_query_ras_error_address, +}; + +struct amdgpu_umc_ras umc_v6_7_ras = { + .ras_block = { + .hw_ops = &umc_v6_7_ras_hw_ops, + }, .query_ras_poison_mode = umc_v6_7_query_ras_poison_mode, .ecc_info_query_ras_error_count = umc_v6_7_ecc_info_query_ras_error_count, .ecc_info_query_ras_error_address = umc_v6_7_ecc_info_query_ras_error_address, diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.h b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.h index 57f2557e7aca..1f2edf625370 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.h +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.h @@ -43,7 +43,7 @@ #define UMC_V6_7_TOTAL_CHANNEL_NUM (UMC_V6_7_CHANNEL_INSTANCE_NUM * UMC_V6_7_UMC_INSTANCE_NUM) /* UMC regiser per channel offset */ #define UMC_V6_7_PER_CHANNEL_OFFSET 0x400 -extern const struct amdgpu_umc_ras_funcs umc_v6_7_ras_funcs; +extern struct amdgpu_umc_ras umc_v6_7_ras; extern const uint32_t umc_v6_7_channel_idx_tbl_second[UMC_V6_7_UMC_INSTANCE_NUM][UMC_V6_7_CHANNEL_INSTANCE_NUM]; extern const uint32_t diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c index af59a35788e3..ff9e1fac616d 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c @@ -324,10 +324,14 @@ static void umc_v8_7_err_cnt_init(struct amdgpu_device *adev) } } -const struct amdgpu_umc_ras_funcs umc_v8_7_ras_funcs = { - .err_cnt_init = umc_v8_7_err_cnt_init, - .ras_late_init = amdgpu_umc_ras_late_init, - .ras_fini = amdgpu_umc_ras_fini, +const struct amdgpu_ras_block_hw_ops umc_v8_7_ras_hw_ops = { .query_ras_error_count = umc_v8_7_query_ras_error_count, .query_ras_error_address = umc_v8_7_query_ras_error_address, }; + +struct amdgpu_umc_ras umc_v8_7_ras = { + .ras_block = { + .hw_ops = &umc_v8_7_ras_hw_ops, + }, + .err_cnt_init = umc_v8_7_err_cnt_init, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.h b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.h index 37e6dc7c28e0..dd4993f5f78f 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.h +++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.h @@ -44,7 +44,7 @@ /* umc ce count initial value */ #define UMC_V8_7_CE_CNT_INIT (UMC_V8_7_CE_CNT_MAX - UMC_V8_7_CE_INT_THRESHOLD) -extern const struct amdgpu_umc_ras_funcs umc_v8_7_ras_funcs; +extern struct amdgpu_umc_ras umc_v8_7_ras; extern const uint32_t umc_v8_7_channel_idx_tbl[UMC_V8_7_UMC_INSTANCE_NUM][UMC_V8_7_CHANNEL_INSTANCE_NUM]; -- cgit From bdc4292bd3b4337985f716be789b08eef921f7a6 Mon Sep 17 00:00:00 2001 From: yipechai Date: Wed, 5 Jan 2022 15:30:37 +0800 Subject: drm/amdgpu: Modify sdma block to fit for the unified ras block data and ops 1.Modify sdma block to fit for the unified ras block data and ops. 2.Change amdgpu_sdma_ras_funcs to amdgpu_sdma_ras, and the corresponding variable name remove _funcs suffix. 3.Remove the const flag of sdma ras variable so that sdma ras block can be able to be inserted into amdgpu device ras block link list. 4.Invoke amdgpu_ras_register_ras_block function to register sdma ras block into amdgpu device ras block link list. 5.Remove the redundant code about sdma in amdgpu_ras.c after using the unified ras block. 6.Fill unified ras block .name .block .ras_late_init and .ras_fini for all of sdma versions. If .ras_late_init and .ras_fini had been defined by the selected sdma version, the defined functions will take effect; if not defined, default fill them with amdgpu_sdma_ras_late_init and amdgpu_sdma_ras_fini. v2: squash in warning fix (Alex) Signed-off-by: yipechai Reviewed-by: Hawking Zhang Reviewed-by: John Clements Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 10 ------ drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h | 12 +++---- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 58 +++++++++++++++++++++++++------- drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c | 25 +++++++++++--- drivers/gpu/drm/amd/amdgpu/sdma_v4_4.h | 2 +- 5 files changed, 71 insertions(+), 36 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index fba1c415a2a8..7c21eab95fc8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -967,7 +967,6 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct amdgpu_ras_block_object* block_obj = NULL; struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); struct ras_err_data err_data = {0, 0, 0, NULL}; - int i; if (!obj) return -EINVAL; @@ -979,12 +978,6 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, amdgpu_ras_get_ecc_info(adev, &err_data); break; case AMDGPU_RAS_BLOCK__SDMA: - if (adev->sdma.funcs->query_ras_error_count) { - for (i = 0; i < adev->sdma.num_instances; i++) - adev->sdma.funcs->query_ras_error_count(adev, i, - &err_data); - } - break; case AMDGPU_RAS_BLOCK__GFX: case AMDGPU_RAS_BLOCK__MMHUB: if (!block_obj || !block_obj->hw_ops) { @@ -1090,9 +1083,6 @@ int amdgpu_ras_reset_error_status(struct amdgpu_device *adev, block_obj->hw_ops->reset_ras_error_status(adev); break; case AMDGPU_RAS_BLOCK__SDMA: - if (adev->sdma.funcs->reset_ras_error_count) - adev->sdma.funcs->reset_ras_error_count(adev); - break; case AMDGPU_RAS_BLOCK__HDP: if (!block_obj || !block_obj->hw_ops) { dev_info(adev->dev, "%s doesn't config ras function \n", ras_block_str(block)); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h index f8fb755e3aa6..eaee12ab6518 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sdma.h @@ -23,6 +23,7 @@ #ifndef __AMDGPU_SDMA_H__ #define __AMDGPU_SDMA_H__ +#include "amdgpu_ras.h" /* max number of IP instances */ #define AMDGPU_MAX_SDMA_INSTANCES 8 @@ -50,13 +51,8 @@ struct amdgpu_sdma_instance { bool burst_nop; }; -struct amdgpu_sdma_ras_funcs { - int (*ras_late_init)(struct amdgpu_device *adev, - void *ras_ih_info); - void (*ras_fini)(struct amdgpu_device *adev); - int (*query_ras_error_count)(struct amdgpu_device *adev, - uint32_t instance, void *ras_error_status); - void (*reset_ras_error_count)(struct amdgpu_device *adev); +struct amdgpu_sdma_ras { + struct amdgpu_ras_block_object ras_block; }; struct amdgpu_sdma { @@ -73,7 +69,7 @@ struct amdgpu_sdma { uint32_t srbm_soft_reset; bool has_page_queue; struct ras_common_if *ras_if; - const struct amdgpu_sdma_ras_funcs *funcs; + struct amdgpu_sdma_ras *ras; }; /* diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index e8e4749e9c79..3c1483dc113e 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -1892,13 +1892,13 @@ static int sdma_v4_0_late_init(void *handle) sdma_v4_0_setup_ulv(adev); if (!amdgpu_persistent_edc_harvesting_supported(adev)) { - if (adev->sdma.funcs && - adev->sdma.funcs->reset_ras_error_count) - adev->sdma.funcs->reset_ras_error_count(adev); + if (adev->sdma.ras && adev->sdma.ras->ras_block.hw_ops && + adev->sdma.ras->ras_block.hw_ops->reset_ras_error_count) + adev->sdma.ras->ras_block.hw_ops->reset_ras_error_count(adev); } - if (adev->sdma.funcs && adev->sdma.funcs->ras_late_init) - return adev->sdma.funcs->ras_late_init(adev, &ih_info); + if (adev->sdma.ras && adev->sdma.ras->ras_block.ras_late_init) + return adev->sdma.ras->ras_block.ras_late_init(adev, &ih_info); else return 0; } @@ -2001,8 +2001,9 @@ static int sdma_v4_0_sw_fini(void *handle) struct amdgpu_device *adev = (struct amdgpu_device *)handle; int i; - if (adev->sdma.funcs && adev->sdma.funcs->ras_fini) - adev->sdma.funcs->ras_fini(adev); + if (adev->sdma.ras && adev->sdma.ras->ras_block.hw_ops && + adev->sdma.ras->ras_block.ras_fini) + adev->sdma.ras->ras_block.ras_fini(adev); for (i = 0; i < adev->sdma.num_instances; i++) { amdgpu_ring_fini(&adev->sdma.instance[i].ring); @@ -2740,7 +2741,7 @@ static void sdma_v4_0_get_ras_error_count(uint32_t value, } } -static int sdma_v4_0_query_ras_error_count(struct amdgpu_device *adev, +static int sdma_v4_0_query_ras_error_count_by_instance(struct amdgpu_device *adev, uint32_t instance, void *ras_error_status) { struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; @@ -2762,6 +2763,18 @@ static int sdma_v4_0_query_ras_error_count(struct amdgpu_device *adev, return 0; }; +static void sdma_v4_0_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status) +{ + int i = 0; + for (i = 0; i < adev->sdma.num_instances; i++) { + if (sdma_v4_0_query_ras_error_count_by_instance(adev, i, ras_error_status)) + { + dev_err(adev->dev, "Query ras error count failed in SDMA%d \n", i); + return; + } + } +} + static void sdma_v4_0_reset_ras_error_count(struct amdgpu_device *adev) { int i; @@ -2773,26 +2786,45 @@ static void sdma_v4_0_reset_ras_error_count(struct amdgpu_device *adev) } } -static const struct amdgpu_sdma_ras_funcs sdma_v4_0_ras_funcs = { - .ras_late_init = amdgpu_sdma_ras_late_init, - .ras_fini = amdgpu_sdma_ras_fini, +const struct amdgpu_ras_block_hw_ops sdma_v4_0_ras_hw_ops = { .query_ras_error_count = sdma_v4_0_query_ras_error_count, .reset_ras_error_count = sdma_v4_0_reset_ras_error_count, }; +static struct amdgpu_sdma_ras sdma_v4_0_ras = { + .ras_block = { + .hw_ops = &sdma_v4_0_ras_hw_ops, + }, +}; + static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev) { switch (adev->ip_versions[SDMA0_HWIP][0]) { case IP_VERSION(4, 2, 0): case IP_VERSION(4, 2, 2): - adev->sdma.funcs = &sdma_v4_0_ras_funcs; + adev->sdma.ras = &sdma_v4_0_ras; break; case IP_VERSION(4, 4, 0): - adev->sdma.funcs = &sdma_v4_4_ras_funcs; + adev->sdma.ras = &sdma_v4_4_ras; break; default: break; } + + if (adev->sdma.ras) { + amdgpu_ras_register_ras_block(adev, &adev->sdma.ras->ras_block); + + strcpy(adev->sdma.ras->ras_block.name,"sdma"); + adev->sdma.ras->ras_block.block = AMDGPU_RAS_BLOCK__SDMA; + + /* If don't define special ras_late_init function, use default ras_late_init */ + if (!adev->sdma.ras->ras_block.ras_late_init) + adev->sdma.ras->ras_block.ras_late_init = amdgpu_sdma_ras_late_init; + + /* If don't define special ras_fini function, use default ras_fini */ + if (!adev->sdma.ras->ras_block.ras_fini) + adev->sdma.ras->ras_block.ras_fini = amdgpu_sdma_ras_fini; + } } const struct amdgpu_ip_block_version sdma_v4_0_ip_block = { diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c index bf95007f0843..5c1ba1116e5c 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c @@ -188,7 +188,7 @@ static void sdma_v4_4_get_ras_error_count(struct amdgpu_device *adev, } } -static int sdma_v4_4_query_ras_error_count(struct amdgpu_device *adev, +static int sdma_v4_4_query_ras_error_count_by_instance(struct amdgpu_device *adev, uint32_t instance, void *ras_error_status) { @@ -245,9 +245,26 @@ static void sdma_v4_4_reset_ras_error_count(struct amdgpu_device *adev) } } -const struct amdgpu_sdma_ras_funcs sdma_v4_4_ras_funcs = { - .ras_late_init = amdgpu_sdma_ras_late_init, - .ras_fini = amdgpu_sdma_ras_fini, +static void sdma_v4_4_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status) +{ + int i = 0; + for (i = 0; i < adev->sdma.num_instances; i++) { + if (sdma_v4_4_query_ras_error_count_by_instance(adev, i, ras_error_status)) + { + dev_err(adev->dev, "Query ras error count failed in SDMA%d \n", i); + return; + } + } + +} + +const struct amdgpu_ras_block_hw_ops sdma_v4_4_ras_hw_ops = { .query_ras_error_count = sdma_v4_4_query_ras_error_count, .reset_ras_error_count = sdma_v4_4_reset_ras_error_count, }; + +struct amdgpu_sdma_ras sdma_v4_4_ras = { + .ras_block = { + .hw_ops = &sdma_v4_4_ras_hw_ops, + }, +}; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.h b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.h index 74a6e5b5e949..a9f0c68359e0 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.h +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.h @@ -23,6 +23,6 @@ #ifndef __SDMA_V4_4_H__ #define __SDMA_V4_4_H__ -extern const struct amdgpu_sdma_ras_funcs sdma_v4_4_ras_funcs; +extern struct amdgpu_sdma_ras sdma_v4_4_ras; #endif -- cgit From b0e2062dc8978869c1dd96d92027f74b361d5eb7 Mon Sep 17 00:00:00 2001 From: yipechai Date: Wed, 5 Jan 2022 15:39:13 +0800 Subject: drm/amdgpu: Modify mca block to fit for the unified ras block data and ops 1.Modify mca block to fit for the unified ras block data and ops. 2.Define special .ras_block_match function for mca block to identify itself. 3.Change amdgpu_mca_ras_funcs to amdgpu_mca_ras_block(amdgpu_mca_ras had been used), and the corresponding variable name remove _funcs suffix. 4.Remove the const flag of cma ras variable so that cma ras block can be able to be inserted into amdgpu device ras block link list. 5.Invoke amdgpu_ras_register_ras_block function to register cma ras block into amdgpu device ras block link list. 6.Remove the redundant code about cma in amdgpu_ras.c after using the unified ras block. Signed-off-by: yipechai Reviewed-by: Hawking Zhang Reviewed-by: John Clements Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 15 +++--- drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c | 11 +++-- drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h | 14 ++---- drivers/gpu/drm/amd/amdgpu/mca_v3_0.c | 85 +++++++++++++++++++++++---------- 4 files changed, 77 insertions(+), 48 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index de0ef21e1501..a0a9abd23a7b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -469,23 +469,20 @@ int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev) return r; } - if (adev->mca.mp0.ras_funcs && - adev->mca.mp0.ras_funcs->ras_late_init) { - r = adev->mca.mp0.ras_funcs->ras_late_init(adev); + if (adev->mca.mp0.ras && adev->mca.mp0.ras->ras_block.ras_late_init) { + r = adev->mca.mp0.ras->ras_block.ras_late_init(adev, NULL); if (r) return r; } - if (adev->mca.mp1.ras_funcs && - adev->mca.mp1.ras_funcs->ras_late_init) { - r = adev->mca.mp1.ras_funcs->ras_late_init(adev); + if (adev->mca.mp1.ras && adev->mca.mp1.ras->ras_block.ras_late_init) { + r = adev->mca.mp1.ras->ras_block.ras_late_init(adev, NULL); if (r) return r; } - if (adev->mca.mpio.ras_funcs && - adev->mca.mpio.ras_funcs->ras_late_init) { - r = adev->mca.mpio.ras_funcs->ras_late_init(adev); + if (adev->mca.mpio.ras && adev->mca.mpio.ras->ras_block.ras_late_init) { + r = adev->mca.mpio.ras->ras_block.ras_late_init(adev, NULL); if (r) return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c index ce538f4819f9..52a60c2316a2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.c @@ -74,20 +74,23 @@ void amdgpu_mca_query_ras_error_count(struct amdgpu_device *adev, int amdgpu_mca_ras_late_init(struct amdgpu_device *adev, struct amdgpu_mca_ras *mca_dev) { + char sysfs_name[32] = {0}; int r; struct ras_ih_if ih_info = { .cb = NULL, }; - struct ras_fs_if fs_info = { - .sysfs_name = mca_dev->ras_funcs->sysfs_name, + struct ras_fs_if fs_info= { + .sysfs_name = sysfs_name, }; + snprintf(sysfs_name, sizeof(sysfs_name), "%s_err_count", mca_dev->ras->ras_block.name); + if (!mca_dev->ras_if) { mca_dev->ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL); if (!mca_dev->ras_if) return -ENOMEM; - mca_dev->ras_if->block = mca_dev->ras_funcs->ras_block; - mca_dev->ras_if->sub_block_index = mca_dev->ras_funcs->ras_sub_block; + mca_dev->ras_if->block = mca_dev->ras->ras_block.block; + mca_dev->ras_if->sub_block_index = mca_dev->ras->ras_block.sub_block_index; mca_dev->ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; } ih_info.head = fs_info.head = *mca_dev->ras_if; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h index c74bc7177066..be030c4031d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mca.h @@ -21,21 +21,13 @@ #ifndef __AMDGPU_MCA_H__ #define __AMDGPU_MCA_H__ -struct amdgpu_mca_ras_funcs { - int (*ras_late_init)(struct amdgpu_device *adev); - void (*ras_fini)(struct amdgpu_device *adev); - void (*query_ras_error_count)(struct amdgpu_device *adev, - void *ras_error_status); - void (*query_ras_error_address)(struct amdgpu_device *adev, - void *ras_error_status); - uint32_t ras_block; - uint32_t ras_sub_block; - const char* sysfs_name; +struct amdgpu_mca_ras_block { + struct amdgpu_ras_block_object ras_block; }; struct amdgpu_mca_ras { struct ras_common_if *ras_if; - const struct amdgpu_mca_ras_funcs *ras_funcs; + struct amdgpu_mca_ras_block *ras; }; struct amdgpu_mca_funcs { diff --git a/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c b/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c index 8f7107d392af..f51092041edc 100644 --- a/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c @@ -37,7 +37,7 @@ static void mca_v3_0_mp0_query_ras_error_count(struct amdgpu_device *adev, ras_error_status); } -static int mca_v3_0_mp0_ras_late_init(struct amdgpu_device *adev) +static int mca_v3_0_mp0_ras_late_init(struct amdgpu_device *adev, void *ras_info) { return amdgpu_mca_ras_late_init(adev, &adev->mca.mp0); } @@ -47,14 +47,34 @@ static void mca_v3_0_mp0_ras_fini(struct amdgpu_device *adev) amdgpu_mca_ras_fini(adev, &adev->mca.mp0); } -const struct amdgpu_mca_ras_funcs mca_v3_0_mp0_ras_funcs = { - .ras_late_init = mca_v3_0_mp0_ras_late_init, - .ras_fini = mca_v3_0_mp0_ras_fini, +static int mca_v3_0_ras_block_match(struct amdgpu_ras_block_object* block_obj, enum amdgpu_ras_block block, uint32_t sub_block_index) +{ + if(!block_obj) + return -EINVAL; + + if( (block_obj->block == block) && + (block_obj->sub_block_index == sub_block_index)) { + return 0; + } + + return -EINVAL; +} + +const struct amdgpu_ras_block_hw_ops mca_v3_0_mp0_hw_ops = { .query_ras_error_count = mca_v3_0_mp0_query_ras_error_count, .query_ras_error_address = NULL, - .ras_block = AMDGPU_RAS_BLOCK__MCA, - .ras_sub_block = AMDGPU_RAS_MCA_BLOCK__MP0, - .sysfs_name = "mp0_err_count", +}; + +struct amdgpu_mca_ras_block mca_v3_0_mp0_ras = { + .ras_block = { + .block = AMDGPU_RAS_BLOCK__MCA, + .sub_block_index = AMDGPU_RAS_MCA_BLOCK__MP0, + .name = "mp0", + .hw_ops = &mca_v3_0_mp0_hw_ops, + .ras_block_match = mca_v3_0_ras_block_match, + .ras_late_init = mca_v3_0_mp0_ras_late_init, + .ras_fini = mca_v3_0_mp0_ras_fini, + }, }; static void mca_v3_0_mp1_query_ras_error_count(struct amdgpu_device *adev, @@ -65,7 +85,7 @@ static void mca_v3_0_mp1_query_ras_error_count(struct amdgpu_device *adev, ras_error_status); } -static int mca_v3_0_mp1_ras_late_init(struct amdgpu_device *adev) +static int mca_v3_0_mp1_ras_late_init(struct amdgpu_device *adev, void *ras_info) { return amdgpu_mca_ras_late_init(adev, &adev->mca.mp1); } @@ -75,14 +95,21 @@ static void mca_v3_0_mp1_ras_fini(struct amdgpu_device *adev) amdgpu_mca_ras_fini(adev, &adev->mca.mp1); } -const struct amdgpu_mca_ras_funcs mca_v3_0_mp1_ras_funcs = { - .ras_late_init = mca_v3_0_mp1_ras_late_init, - .ras_fini = mca_v3_0_mp1_ras_fini, +const struct amdgpu_ras_block_hw_ops mca_v3_0_mp1_hw_ops = { .query_ras_error_count = mca_v3_0_mp1_query_ras_error_count, .query_ras_error_address = NULL, - .ras_block = AMDGPU_RAS_BLOCK__MCA, - .ras_sub_block = AMDGPU_RAS_MCA_BLOCK__MP1, - .sysfs_name = "mp1_err_count", +}; + +struct amdgpu_mca_ras_block mca_v3_0_mp1_ras = { + .ras_block = { + .block = AMDGPU_RAS_BLOCK__MCA, + .sub_block_index = AMDGPU_RAS_MCA_BLOCK__MP1, + .name = "mp1", + .hw_ops = &mca_v3_0_mp1_hw_ops, + .ras_block_match = mca_v3_0_ras_block_match, + .ras_late_init = mca_v3_0_mp1_ras_late_init, + .ras_fini = mca_v3_0_mp1_ras_fini, + }, }; static void mca_v3_0_mpio_query_ras_error_count(struct amdgpu_device *adev, @@ -93,7 +120,7 @@ static void mca_v3_0_mpio_query_ras_error_count(struct amdgpu_device *adev, ras_error_status); } -static int mca_v3_0_mpio_ras_late_init(struct amdgpu_device *adev) +static int mca_v3_0_mpio_ras_late_init(struct amdgpu_device *adev, void *ras_info) { return amdgpu_mca_ras_late_init(adev, &adev->mca.mpio); } @@ -103,14 +130,21 @@ static void mca_v3_0_mpio_ras_fini(struct amdgpu_device *adev) amdgpu_mca_ras_fini(adev, &adev->mca.mpio); } -const struct amdgpu_mca_ras_funcs mca_v3_0_mpio_ras_funcs = { - .ras_late_init = mca_v3_0_mpio_ras_late_init, - .ras_fini = mca_v3_0_mpio_ras_fini, +const struct amdgpu_ras_block_hw_ops mca_v3_0_mpio_hw_ops = { .query_ras_error_count = mca_v3_0_mpio_query_ras_error_count, .query_ras_error_address = NULL, - .ras_block = AMDGPU_RAS_BLOCK__MCA, - .ras_sub_block = AMDGPU_RAS_MCA_BLOCK__MPIO, - .sysfs_name = "mpio_err_count", +}; + +struct amdgpu_mca_ras_block mca_v3_0_mpio_ras = { + .ras_block = { + .block = AMDGPU_RAS_BLOCK__MCA, + .sub_block_index = AMDGPU_RAS_MCA_BLOCK__MPIO, + .name = "mpio", + .hw_ops = &mca_v3_0_mpio_hw_ops, + .ras_block_match = mca_v3_0_ras_block_match, + .ras_late_init = mca_v3_0_mpio_ras_late_init, + .ras_fini = mca_v3_0_mpio_ras_fini, + }, }; @@ -118,9 +152,12 @@ static void mca_v3_0_init(struct amdgpu_device *adev) { struct amdgpu_mca *mca = &adev->mca; - mca->mp0.ras_funcs = &mca_v3_0_mp0_ras_funcs; - mca->mp1.ras_funcs = &mca_v3_0_mp1_ras_funcs; - mca->mpio.ras_funcs = &mca_v3_0_mpio_ras_funcs; + mca->mp0.ras = &mca_v3_0_mp0_ras; + mca->mp1.ras = &mca_v3_0_mp1_ras; + mca->mpio.ras = &mca_v3_0_mpio_ras; + amdgpu_ras_register_ras_block(adev, &mca->mp0.ras->ras_block); + amdgpu_ras_register_ras_block(adev, &mca->mp1.ras->ras_block); + amdgpu_ras_register_ras_block(adev, &mca->mpio.ras->ras_block); } const struct amdgpu_mca_funcs mca_v3_0_funcs = { -- cgit From 22d4ba53b1c10de6832e588f01d916e24306f6a1 Mon Sep 17 00:00:00 2001 From: yipechai Date: Wed, 5 Jan 2022 15:40:26 +0800 Subject: drm/amdgpu: Adjust error inject function code style in amdgpu_ras.c 1. Move xgmi special error inject function from amdgpu_ras.c to xgmi block. 2. Support to use psp_ras_trigger_error as default error inject function in amdgpu_ras.c. If .ras_error_inject isn't defined in ras block, default error inject function will take effect. v2: squash in warning fix (Alex) Signed-off-by: yipechai Reviewed-by: Hawking Zhang Reviewed-by: John Clements Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 89 +++++--------------------------- drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c | 27 ++++++++++ 2 files changed, 40 insertions(+), 76 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 7c21eab95fc8..bcd33e5350f8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -903,31 +903,6 @@ static struct amdgpu_ras_block_object* amdgpu_ras_get_ras_block(struct amdgpu_de return NULL; } -static void amdgpu_ras_mca_query_error_status(struct amdgpu_device *adev, - struct ras_common_if *ras_block, - struct ras_err_data *err_data) -{ - switch (ras_block->sub_block_index) { - case AMDGPU_RAS_MCA_BLOCK__MP0: - if (adev->mca.mp0.ras_funcs && - adev->mca.mp0.ras_funcs->query_ras_error_count) - adev->mca.mp0.ras_funcs->query_ras_error_count(adev, &err_data); - break; - case AMDGPU_RAS_MCA_BLOCK__MP1: - if (adev->mca.mp1.ras_funcs && - adev->mca.mp1.ras_funcs->query_ras_error_count) - adev->mca.mp1.ras_funcs->query_ras_error_count(adev, &err_data); - break; - case AMDGPU_RAS_MCA_BLOCK__MPIO: - if (adev->mca.mpio.ras_funcs && - adev->mca.mpio.ras_funcs->query_ras_error_count) - adev->mca.mpio.ras_funcs->query_ras_error_count(adev, &err_data); - break; - default: - break; - } -} - static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data) { struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); @@ -994,6 +969,7 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, case AMDGPU_RAS_BLOCK__PCIE_BIF: case AMDGPU_RAS_BLOCK__XGMI_WAFL: case AMDGPU_RAS_BLOCK__HDP: + case AMDGPU_RAS_BLOCK__MCA: if (!block_obj || !block_obj->hw_ops) { dev_info(adev->dev, "%s doesn't config ras function \n", get_ras_block_str(&info->head)); @@ -1002,9 +978,6 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, if (block_obj->hw_ops->query_ras_error_count) block_obj->hw_ops->query_ras_error_count(adev, &err_data); break; - case AMDGPU_RAS_BLOCK__MCA: - amdgpu_ras_mca_query_error_status(adev, &info->head, &err_data); - break; default: break; } @@ -1099,32 +1072,6 @@ int amdgpu_ras_reset_error_status(struct amdgpu_device *adev, return 0; } -/* Trigger XGMI/WAFL error */ -static int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev, - struct ta_ras_trigger_error_input *block_info) -{ - int ret; - - if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW)) - dev_warn(adev->dev, "Failed to disallow df cstate"); - - if (amdgpu_dpm_allow_xgmi_power_down(adev, false)) - dev_warn(adev->dev, "Failed to disallow XGMI power down"); - - ret = psp_ras_trigger_error(&adev->psp, block_info); - - if (amdgpu_ras_intr_triggered()) - return ret; - - if (amdgpu_dpm_allow_xgmi_power_down(adev, true)) - dev_warn(adev->dev, "Failed to allow XGMI power down"); - - if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW)) - dev_warn(adev->dev, "Failed to allow df cstate"); - - return ret; -} - /* wrapper of psp_ras_trigger_error */ int amdgpu_ras_error_inject(struct amdgpu_device *adev, struct ras_inject_if *info) @@ -1143,6 +1090,11 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev, if (!obj) return -EINVAL; + if (!block_obj || !block_obj->hw_ops) { + dev_info(adev->dev, "%s doesn't config ras function \n", get_ras_block_str(&info->head)); + return -EINVAL; + } + /* Calculate XGMI relative offset */ if (adev->gmc.xgmi.num_physical_nodes > 1) { block_info.address = @@ -1150,30 +1102,15 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev, block_info.address); } - switch (info->head.block) { - case AMDGPU_RAS_BLOCK__GFX: - if (!block_obj || !block_obj->hw_ops) { - dev_info(adev->dev, "%s doesn't config ras function \n", get_ras_block_str(&info->head)); - return -EINVAL; - } - + if (info->head.block == AMDGPU_RAS_BLOCK__GFX) { if (block_obj->hw_ops->ras_error_inject) ret = block_obj->hw_ops->ras_error_inject(adev, info); - break; - case AMDGPU_RAS_BLOCK__UMC: - case AMDGPU_RAS_BLOCK__SDMA: - case AMDGPU_RAS_BLOCK__MMHUB: - case AMDGPU_RAS_BLOCK__PCIE_BIF: - case AMDGPU_RAS_BLOCK__MCA: - ret = psp_ras_trigger_error(&adev->psp, &block_info); - break; - case AMDGPU_RAS_BLOCK__XGMI_WAFL: - ret = amdgpu_ras_error_inject_xgmi(adev, &block_info); - break; - default: - dev_info(adev->dev, "%s error injection is not supported yet\n", - get_ras_block_str(&info->head)); - ret = -EINVAL; + } else { + /* If defined special ras_error_inject(e.g: xgmi), implement special ras_error_inject */ + if (block_obj->hw_ops->ras_error_inject) + ret = block_obj->hw_ops->ras_error_inject(adev, &block_info); + else /*If not defined .ras_error_inject, use default ras_error_inject*/ + ret = psp_ras_trigger_error(&adev->psp, &block_info); } if (ret) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index d29acd33eb11..478457637d29 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -946,9 +946,36 @@ static void amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev, err_data->ce_count += ce_cnt; } +/* Trigger XGMI/WAFL error */ +static int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev, void *inject_if) +{ + int ret = 0; + struct ta_ras_trigger_error_input *block_info = (struct ta_ras_trigger_error_input *)inject_if; + + if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW)) + dev_warn(adev->dev, "Failed to disallow df cstate"); + + if (amdgpu_dpm_allow_xgmi_power_down(adev, false)) + dev_warn(adev->dev, "Failed to disallow XGMI power down"); + + ret = psp_ras_trigger_error(&adev->psp, block_info); + + if (amdgpu_ras_intr_triggered()) + return ret; + + if (amdgpu_dpm_allow_xgmi_power_down(adev, true)) + dev_warn(adev->dev, "Failed to allow XGMI power down"); + + if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_ALLOW)) + dev_warn(adev->dev, "Failed to allow df cstate"); + + return ret; +} + struct amdgpu_ras_block_hw_ops xgmi_ras_hw_ops = { .query_ras_error_count = amdgpu_xgmi_query_ras_error_count, .reset_ras_error_count = amdgpu_xgmi_reset_ras_error_count, + .ras_error_inject = amdgpu_ras_error_inject_xgmi, }; struct amdgpu_xgmi_ras xgmi_ras = { -- cgit From 7389a5b837cde5e5cd771619e9f006ae102f5d7d Mon Sep 17 00:00:00 2001 From: yipechai Date: Wed, 5 Jan 2022 15:48:21 +0800 Subject: drm/amdgpu: Removed redundant ras code Removed redundant ras code. Signed-off-by: yipechai Reviewed-by: Hawking Zhang Reviewed-by: John Clements Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 67 ++++++++++----------------------- 1 file changed, 19 insertions(+), 48 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index bcd33e5350f8..517650d286a7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -946,40 +946,25 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, if (!obj) return -EINVAL; - block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0); - - switch (info->head.block) { - case AMDGPU_RAS_BLOCK__UMC: + if (info->head.block == AMDGPU_RAS_BLOCK__UMC) { amdgpu_ras_get_ecc_info(adev, &err_data); - break; - case AMDGPU_RAS_BLOCK__SDMA: - case AMDGPU_RAS_BLOCK__GFX: - case AMDGPU_RAS_BLOCK__MMHUB: + } else { + block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0); if (!block_obj || !block_obj->hw_ops) { dev_info(adev->dev, "%s doesn't config ras function \n", - get_ras_block_str(&info->head)); + get_ras_block_str(&info->head)); return -EINVAL; } - if (block_obj->hw_ops->query_ras_error_count) - block_obj->hw_ops->query_ras_error_count(adev, &err_data); - if (block_obj->hw_ops->query_ras_error_status) - block_obj->hw_ops->query_ras_error_status(adev); - break; - case AMDGPU_RAS_BLOCK__PCIE_BIF: - case AMDGPU_RAS_BLOCK__XGMI_WAFL: - case AMDGPU_RAS_BLOCK__HDP: - case AMDGPU_RAS_BLOCK__MCA: - if (!block_obj || !block_obj->hw_ops) { - dev_info(adev->dev, "%s doesn't config ras function \n", - get_ras_block_str(&info->head)); - return -EINVAL; - } if (block_obj->hw_ops->query_ras_error_count) block_obj->hw_ops->query_ras_error_count(adev, &err_data); - break; - default: - break; + + if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) || + (info->head.block == AMDGPU_RAS_BLOCK__GFX) || + (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) { + if (block_obj->hw_ops->query_ras_error_status) + block_obj->hw_ops->query_ras_error_status(adev); + } } obj->err_data.ue_count += err_data.ue_count; @@ -1041,32 +1026,18 @@ int amdgpu_ras_reset_error_status(struct amdgpu_device *adev, if (!amdgpu_ras_is_supported(adev, block)) return -EINVAL; - switch (block) { - case AMDGPU_RAS_BLOCK__GFX: - case AMDGPU_RAS_BLOCK__MMHUB: - if (!block_obj || !block_obj->hw_ops) { - dev_info(adev->dev, "%s doesn't config ras function \n", ras_block_str(block)); - return -EINVAL; - } + if (!block_obj || !block_obj->hw_ops) { + dev_info(adev->dev, "%s doesn't config ras function \n", ras_block_str(block)); + return -EINVAL; + } - if (block_obj->hw_ops->reset_ras_error_count) - block_obj->hw_ops->reset_ras_error_count(adev); + if (block_obj->hw_ops->reset_ras_error_count) + block_obj->hw_ops->reset_ras_error_count(adev); + if ((block == AMDGPU_RAS_BLOCK__GFX) || + (block == AMDGPU_RAS_BLOCK__MMHUB)) { if (block_obj->hw_ops->reset_ras_error_status) block_obj->hw_ops->reset_ras_error_status(adev); - break; - case AMDGPU_RAS_BLOCK__SDMA: - case AMDGPU_RAS_BLOCK__HDP: - if (!block_obj || !block_obj->hw_ops) { - dev_info(adev->dev, "%s doesn't config ras function \n", ras_block_str(block)); - return -EINVAL; - } - - if (block_obj->hw_ops->reset_ras_error_count) - block_obj->hw_ops->reset_ras_error_count(adev); - break; - default: - break; } return 0; -- cgit From cb5cc4f573e18deb7d9143de0ccb62c08181bc85 Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Thu, 9 Dec 2021 16:48:56 -0500 Subject: drm/amdgpu: improve debug VRAM access performance using sdma For better performance during VRAM access for debugged processes, do read/write copies over SDMA. In order to fulfill post mortem debugging on a broken device, fallback to stable MMIO access when gpu recovery is disabled or when job submission time outs are set to max. Failed SDMA access should automatically fall back to MMIO access. Use a pre-allocated GTT bounce buffer pre-mapped into GART to avoid page-table updates and TLB flushes on access. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 78 +++++++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 4 ++ 2 files changed, 82 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 5c3f24069f2a..953d68b26f0b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -50,6 +50,7 @@ #include #include +#include #include "amdgpu.h" #include "amdgpu_object.h" @@ -1433,6 +1434,70 @@ static void amdgpu_ttm_vram_mm_access(struct amdgpu_device *adev, loff_t pos, } } +static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo, + unsigned long offset, void *buf, int len, int write) +{ + struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); + struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); + struct amdgpu_job *job; + struct dma_fence *fence; + uint64_t src_addr, dst_addr; + unsigned int num_dw; + int r, idx; + + if (len != PAGE_SIZE) + return -EINVAL; + + if (!adev->mman.sdma_access_ptr) + return -EACCES; + + r = drm_dev_enter(adev_to_drm(adev), &idx); + if (r) + return r; + + if (write) + memcpy(adev->mman.sdma_access_ptr, buf, len); + + num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); + r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_DELAYED, &job); + if (r) + goto out; + + src_addr = write ? amdgpu_bo_gpu_offset(adev->mman.sdma_access_bo) : + amdgpu_bo_gpu_offset(abo); + dst_addr = write ? amdgpu_bo_gpu_offset(abo) : + amdgpu_bo_gpu_offset(adev->mman.sdma_access_bo); + amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr, PAGE_SIZE, false); + + amdgpu_ring_pad_ib(adev->mman.buffer_funcs_ring, &job->ibs[0]); + WARN_ON(job->ibs[0].length_dw > num_dw); + + r = amdgpu_job_submit(job, &adev->mman.entity, AMDGPU_FENCE_OWNER_UNDEFINED, &fence); + if (r) { + amdgpu_job_free(job); + goto out; + } + + if (!dma_fence_wait_timeout(fence, false, adev->sdma_timeout)) + r = -ETIMEDOUT; + dma_fence_put(fence); + + if (!(r || write)) + memcpy(buf, adev->mman.sdma_access_ptr, len); +out: + drm_dev_exit(idx); + return r; +} + +static inline bool amdgpu_ttm_allow_post_mortem_debug(struct amdgpu_device *adev) +{ + return amdgpu_gpu_recovery == 0 || + adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT || + adev->compute_timeout == MAX_SCHEDULE_TIMEOUT || + adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT || + adev->video_timeout == MAX_SCHEDULE_TIMEOUT; +} + /** * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object. * @@ -1457,6 +1522,10 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo, if (bo->resource->mem_type != TTM_PL_VRAM) return -EIO; + if (!amdgpu_ttm_allow_post_mortem_debug(adev) && + !amdgpu_ttm_access_memory_sdma(bo, offset, buf, len, write)) + return len; + amdgpu_res_first(bo->resource, offset, len, &cursor); while (cursor.remaining) { size_t count, size = cursor.size; @@ -1797,6 +1866,12 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) return r; } + if (amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_GTT, + &adev->mman.sdma_access_bo, NULL, + adev->mman.sdma_access_ptr)) + DRM_WARN("Debug VRAM access will use slowpath MM access\n"); + return 0; } @@ -1837,6 +1912,9 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA); ttm_device_fini(&adev->mman.bdev); adev->mman.initialized = false; + if (adev->mman.sdma_access_ptr) + amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL, + &adev->mman.sdma_access_ptr); DRM_INFO("amdgpu: ttm finalized\n"); } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index f8f48be16d80..f06fd19b4895 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -98,6 +98,10 @@ struct amdgpu_mman { u64 fw_vram_usage_size; struct amdgpu_bo *fw_vram_usage_reserved_bo; void *fw_vram_usage_va; + + /* PAGE_SIZE'd BO for process memory r/w over SDMA. */ + struct amdgpu_bo *sdma_access_bo; + void *sdma_access_ptr; }; struct amdgpu_copy_mem { -- cgit From 400ef298f400854544e062023671e927965bc9b0 Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Wed, 12 Jan 2022 10:27:56 -0500 Subject: drm/amdgpu: cleanup ttm debug sdma vram access function MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Some suggested cleanups to declutter ttm when doing debug VRAM access over SDMA. Signed-off-by: Jonathan Kim Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 9 +++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 23 +++++++---------------- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 445a0d077c1f..8ddddf12e1ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1446,6 +1446,15 @@ int amdgpu_device_set_cg_state(struct amdgpu_device *adev, int amdgpu_device_set_pg_state(struct amdgpu_device *adev, enum amd_powergating_state state); +static inline bool amdgpu_device_has_timeouts_enabled(struct amdgpu_device *adev) +{ + return amdgpu_gpu_recovery != 0 && + adev->gfx_timeout != MAX_SCHEDULE_TIMEOUT && + adev->compute_timeout != MAX_SCHEDULE_TIMEOUT && + adev->sdma_timeout != MAX_SCHEDULE_TIMEOUT && + adev->video_timeout != MAX_SCHEDULE_TIMEOUT; +} + #include "amdgpu_object.h" static inline bool amdgpu_is_tmz(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 953d68b26f0b..1e012b45f663 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1463,10 +1463,11 @@ static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo, if (r) goto out; - src_addr = write ? amdgpu_bo_gpu_offset(adev->mman.sdma_access_bo) : - amdgpu_bo_gpu_offset(abo); - dst_addr = write ? amdgpu_bo_gpu_offset(abo) : - amdgpu_bo_gpu_offset(adev->mman.sdma_access_bo); + src_addr = amdgpu_bo_gpu_offset(abo); + dst_addr = amdgpu_bo_gpu_offset(adev->mman.sdma_access_bo); + if (write) + swap(src_addr, dst_addr); + amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr, PAGE_SIZE, false); amdgpu_ring_pad_ib(adev->mman.buffer_funcs_ring, &job->ibs[0]); @@ -1489,15 +1490,6 @@ out: return r; } -static inline bool amdgpu_ttm_allow_post_mortem_debug(struct amdgpu_device *adev) -{ - return amdgpu_gpu_recovery == 0 || - adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT || - adev->compute_timeout == MAX_SCHEDULE_TIMEOUT || - adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT || - adev->video_timeout == MAX_SCHEDULE_TIMEOUT; -} - /** * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object. * @@ -1522,7 +1514,7 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo, if (bo->resource->mem_type != TTM_PL_VRAM) return -EIO; - if (!amdgpu_ttm_allow_post_mortem_debug(adev) && + if (amdgpu_device_has_timeouts_enabled(adev) && !amdgpu_ttm_access_memory_sdma(bo, offset, buf, len, write)) return len; @@ -1912,8 +1904,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA); ttm_device_fini(&adev->mman.bdev); adev->mman.initialized = false; - if (adev->mman.sdma_access_ptr) - amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL, + amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL, &adev->mman.sdma_access_ptr); DRM_INFO("amdgpu: ttm finalized\n"); } -- cgit From 20c5e425d36a59529f2e6a77ba21d009cdfa8ffa Mon Sep 17 00:00:00 2001 From: Graham Sider Date: Tue, 11 Jan 2022 15:07:10 -0500 Subject: drm/amdkfd: Fix indentation on switch statement Cases should be same indentation as switch. Also fix string spanning across multiple lines. Signed-off-by: Graham Sider Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 55 ++++++++++++++++----------------- 1 file changed, 27 insertions(+), 28 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 2b65d0acae2c..5a47f437b455 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -64,34 +64,33 @@ static void kfd_device_info_set_sdma_queue_num(struct kfd_dev *kfd) uint32_t sdma_version = kfd->adev->ip_versions[SDMA0_HWIP][0]; switch (sdma_version) { - case IP_VERSION(4, 0, 0):/* VEGA10 */ - case IP_VERSION(4, 0, 1):/* VEGA12 */ - case IP_VERSION(4, 1, 0):/* RAVEN */ - case IP_VERSION(4, 1, 1):/* RAVEN */ - case IP_VERSION(4, 1, 2):/* RENOIR */ - case IP_VERSION(5, 2, 1):/* VANGOGH */ - case IP_VERSION(5, 2, 3):/* YELLOW_CARP */ - kfd->device_info.num_sdma_queues_per_engine = 2; - break; - case IP_VERSION(4, 2, 0):/* VEGA20 */ - case IP_VERSION(4, 2, 2):/* ARCTURUS */ - case IP_VERSION(4, 4, 0):/* ALDEBARAN */ - case IP_VERSION(5, 0, 0):/* NAVI10 */ - case IP_VERSION(5, 0, 1):/* CYAN_SKILLFISH */ - case IP_VERSION(5, 0, 2):/* NAVI14 */ - case IP_VERSION(5, 0, 5):/* NAVI12 */ - case IP_VERSION(5, 2, 0):/* SIENNA_CICHLID */ - case IP_VERSION(5, 2, 2):/* NAVY_FLOUNDER */ - case IP_VERSION(5, 2, 4):/* DIMGREY_CAVEFISH */ - case IP_VERSION(5, 2, 5):/* BEIGE_GOBY */ - kfd->device_info.num_sdma_queues_per_engine = 8; - break; - default: - dev_warn(kfd_device, - "Default sdma queue per engine(8) is set due to " - "mismatch of sdma ip block(SDMA_HWIP:0x%x).\n", - sdma_version); - kfd->device_info.num_sdma_queues_per_engine = 8; + case IP_VERSION(4, 0, 0):/* VEGA10 */ + case IP_VERSION(4, 0, 1):/* VEGA12 */ + case IP_VERSION(4, 1, 0):/* RAVEN */ + case IP_VERSION(4, 1, 1):/* RAVEN */ + case IP_VERSION(4, 1, 2):/* RENOIR */ + case IP_VERSION(5, 2, 1):/* VANGOGH */ + case IP_VERSION(5, 2, 3):/* YELLOW_CARP */ + kfd->device_info.num_sdma_queues_per_engine = 2; + break; + case IP_VERSION(4, 2, 0):/* VEGA20 */ + case IP_VERSION(4, 2, 2):/* ARCTURUS */ + case IP_VERSION(4, 4, 0):/* ALDEBARAN */ + case IP_VERSION(5, 0, 0):/* NAVI10 */ + case IP_VERSION(5, 0, 1):/* CYAN_SKILLFISH */ + case IP_VERSION(5, 0, 2):/* NAVI14 */ + case IP_VERSION(5, 0, 5):/* NAVI12 */ + case IP_VERSION(5, 2, 0):/* SIENNA_CICHLID */ + case IP_VERSION(5, 2, 2):/* NAVY_FLOUNDER */ + case IP_VERSION(5, 2, 4):/* DIMGREY_CAVEFISH */ + case IP_VERSION(5, 2, 5):/* BEIGE_GOBY */ + kfd->device_info.num_sdma_queues_per_engine = 8; + break; + default: + dev_warn(kfd_device, + "Default sdma queue per engine(8) is set due to mismatch of sdma ip block(SDMA_HWIP:0x%x).\n", + sdma_version); + kfd->device_info.num_sdma_queues_per_engine = 8; } } -- cgit From c4381d0ee81930097e94e55d1c23f85798ffd093 Mon Sep 17 00:00:00 2001 From: Bokun Zhang Date: Wed, 12 Jan 2022 10:34:11 -0500 Subject: drm/amdgpu: Add interface to load SRIOV cap FW - Add interface to load SRIOV cap FW. If the FW does not exist, simply skip this FW loading routine. This FW will only be loaded under SRIOV. Other driver configuration will not be affected. By adding this interface, it will make us easier to prepare SRIOV Linux guest driver for different users. - Update sysfs interface to read cap FW version. - Refactor PSP FW loading routine under SRIOV to use a unified SWITCH statement instead of using IF statement - Remove redundant amdgpu_sriov_vf() check in FW loading routine Acked-by: Monk Liu Acked-by: Guchun Chen Signed-off-by: Bokun Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 14 ++++ drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 108 ++++++++++++++++++++++++++---- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h | 9 +++ drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h | 1 + drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h | 1 + drivers/gpu/drm/amd/amdgpu/psp_v11_0.c | 4 +- drivers/gpu/drm/amd/amdgpu/psp_v3_1.c | 1 + include/uapi/drm/amdgpu_drm.h | 2 + 8 files changed, 125 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 289521aafb79..85f06396d184 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -400,6 +400,10 @@ static int amdgpu_firmware_info(struct drm_amdgpu_info_firmware *fw_info, fw_info->ver = adev->psp.toc.fw_version; fw_info->feature = adev->psp.toc.feature_version; break; + case AMDGPU_INFO_FW_CAP: + fw_info->ver = adev->psp.cap_fw_version; + fw_info->feature = adev->psp.cap_feature_version; + break; default: return -EINVAL; } @@ -1617,6 +1621,16 @@ static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused) seq_printf(m, "TOC feature version: %u, firmware version: 0x%08x\n", fw_info.feature, fw_info.ver); + /* CAP */ + if (adev->psp.cap_fw) { + query_fw.fw_type = AMDGPU_INFO_FW_CAP; + ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); + if (ret) + return ret; + seq_printf(m, "CAP feature version: %u, firmware version: 0x%08x\n", + fw_info.feature, fw_info.ver); + } + seq_printf(m, "VBIOS version: %s\n", ctx->vbios_version); return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index dee17a0e1187..c984b5a34679 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -259,6 +259,32 @@ static bool psp_get_runtime_db_entry(struct amdgpu_device *adev, return ret; } +static int psp_init_sriov_microcode(struct psp_context *psp) +{ + struct amdgpu_device *adev = psp->adev; + int ret = 0; + + switch (adev->ip_versions[MP0_HWIP][0]) { + case IP_VERSION(9, 0, 0): + ret = psp_init_cap_microcode(psp, "vega10"); + break; + case IP_VERSION(11, 0, 9): + ret = psp_init_cap_microcode(psp, "navi12"); + break; + case IP_VERSION(11, 0, 7): + ret = psp_init_cap_microcode(psp, "sienna_cichlid"); + break; + case IP_VERSION(13, 0, 2): + ret = psp_init_ta_microcode(psp, "aldebaran"); + break; + default: + BUG(); + break; + } + + return ret; +} + static int psp_sw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; @@ -273,19 +299,13 @@ static int psp_sw_init(void *handle) ret = -ENOMEM; } - if (!amdgpu_sriov_vf(adev)) { + if (amdgpu_sriov_vf(adev)) + ret = psp_init_sriov_microcode(psp); + else ret = psp_init_microcode(psp); - if (ret) { - DRM_ERROR("Failed to load psp firmware!\n"); - return ret; - } - } else if (amdgpu_sriov_vf(adev) && - adev->ip_versions[MP0_HWIP][0] == IP_VERSION(13, 0, 2)) { - ret = psp_init_ta_microcode(psp, "aldebaran"); - if (ret) { - DRM_ERROR("Failed to initialize ta microcode!\n"); - return ret; - } + if (ret) { + DRM_ERROR("Failed to load psp firmware!\n"); + return ret; } memset(&boot_cfg_entry, 0, sizeof(boot_cfg_entry)); @@ -353,6 +373,10 @@ static int psp_sw_fini(void *handle) release_firmware(psp->ta_fw); psp->ta_fw = NULL; } + if (adev->psp.cap_fw) { + release_firmware(psp->cap_fw); + psp->cap_fw = NULL; + } if (adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 0) || adev->ip_versions[MP0_HWIP][0] == IP_VERSION(11, 0, 7)) @@ -491,7 +515,10 @@ psp_cmd_submit_buf(struct psp_context *psp, DRM_WARN("psp gfx command %s(0x%X) failed and response status is (0x%X)\n", psp_gfx_cmd_name(psp->cmd_buf_mem->cmd_id), psp->cmd_buf_mem->cmd_id, psp->cmd_buf_mem->resp.status); - if (!timeout) { + /* If we load CAP FW, PSP must return 0 under SRIOV + * also return failure in case of timeout + */ + if ((ucode && (ucode->ucode_id == AMDGPU_UCODE_ID_CAP)) || !timeout) { ret = -EINVAL; goto exit; } @@ -2051,6 +2078,9 @@ static int psp_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *type) { switch (ucode->ucode_id) { + case AMDGPU_UCODE_ID_CAP: + *type = GFX_FW_TYPE_CAP; + break; case AMDGPU_UCODE_ID_SDMA0: *type = GFX_FW_TYPE_SDMA0; break; @@ -3217,6 +3247,58 @@ out: return err; } +int psp_init_cap_microcode(struct psp_context *psp, + const char *chip_name) +{ + struct amdgpu_device *adev = psp->adev; + char fw_name[PSP_FW_NAME_LEN]; + const struct psp_firmware_header_v1_0 *cap_hdr_v1_0; + struct amdgpu_firmware_info *info = NULL; + int err = 0; + + if (!chip_name) { + dev_err(adev->dev, "invalid chip name for cap microcode\n"); + return -EINVAL; + } + + if (!amdgpu_sriov_vf(adev)) { + dev_err(adev->dev, "cap microcode should only be loaded under SRIOV\n"); + return -EINVAL; + } + + snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_cap.bin", chip_name); + err = request_firmware(&adev->psp.cap_fw, fw_name, adev->dev); + if (err) { + dev_warn(adev->dev, "cap microcode does not exist, skip\n"); + err = 0; + goto out; + } + + err = amdgpu_ucode_validate(adev->psp.cap_fw); + if (err) { + dev_err(adev->dev, "fail to initialize cap microcode\n"); + goto out; + } + + info = &adev->firmware.ucode[AMDGPU_UCODE_ID_CAP]; + info->ucode_id = AMDGPU_UCODE_ID_CAP; + info->fw = adev->psp.cap_fw; + cap_hdr_v1_0 = (const struct psp_firmware_header_v1_0 *) + adev->psp.cap_fw->data; + adev->firmware.fw_size += ALIGN( + le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes), PAGE_SIZE); + adev->psp.cap_fw_version = le32_to_cpu(cap_hdr_v1_0->header.ucode_version); + adev->psp.cap_feature_version = le32_to_cpu(cap_hdr_v1_0->sos.fw_version); + adev->psp.cap_ucode_size = le32_to_cpu(cap_hdr_v1_0->header.ucode_size_bytes); + + return 0; + +out: + release_firmware(adev->psp.cap_fw); + adev->psp.cap_fw = NULL; + return err; +} + static int psp_set_clockgating_state(void *handle, enum amd_clockgating_state state) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h index f29afabbff1f..ff7d533eb746 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.h @@ -306,6 +306,9 @@ struct psp_context /* toc firmware */ const struct firmware *toc_fw; + /* cap firmware */ + const struct firmware *cap_fw; + /* fence buffer */ struct amdgpu_bo *fence_buf_bo; uint64_t fence_buf_mc_addr; @@ -327,6 +330,10 @@ struct psp_context const struct firmware *ta_fw; uint32_t ta_fw_version; + uint32_t cap_fw_version; + uint32_t cap_feature_version; + uint32_t cap_ucode_size; + struct ta_context asd_context; struct psp_xgmi_context xgmi_context; struct psp_ras_context ras_context; @@ -440,6 +447,8 @@ int psp_init_sos_microcode(struct psp_context *psp, const char *chip_name); int psp_init_ta_microcode(struct psp_context *psp, const char *chip_name); +int psp_init_cap_microcode(struct psp_context *psp, + const char *chip_name); int psp_get_fw_attestation_records_addr(struct psp_context *psp, uint64_t *output_ptr); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h index 7c2538db3cd5..428f4df184d0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h @@ -378,6 +378,7 @@ enum AMDGPU_UCODE_ID { AMDGPU_UCODE_ID_VCN0_RAM, AMDGPU_UCODE_ID_VCN1_RAM, AMDGPU_UCODE_ID_DMCUB, + AMDGPU_UCODE_ID_CAP, AMDGPU_UCODE_ID_MAXIMUM, }; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h index dd0dce254901..1f276ddd26e9 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h +++ b/drivers/gpu/drm/amd/amdgpu/psp_gfx_if.h @@ -258,6 +258,7 @@ enum psp_gfx_fw_type { GFX_FW_TYPE_SDMA6 = 56, /* SDMA6 MI */ GFX_FW_TYPE_SDMA7 = 57, /* SDMA7 MI */ GFX_FW_TYPE_VCN1 = 58, /* VCN1 MI */ + GFX_FW_TYPE_CAP = 62, /* CAP_FW */ GFX_FW_TYPE_REG_LIST = 67, /* REG_LIST MI */ GFX_FW_TYPE_MAX }; diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c index d0e76b36d4ab..9518b4394a6e 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v11_0.c @@ -53,11 +53,13 @@ MODULE_FIRMWARE("amdgpu/navi14_ta.bin"); MODULE_FIRMWARE("amdgpu/navi12_sos.bin"); MODULE_FIRMWARE("amdgpu/navi12_asd.bin"); MODULE_FIRMWARE("amdgpu/navi12_ta.bin"); +MODULE_FIRMWARE("amdgpu/navi12_cap.bin"); MODULE_FIRMWARE("amdgpu/arcturus_sos.bin"); MODULE_FIRMWARE("amdgpu/arcturus_asd.bin"); MODULE_FIRMWARE("amdgpu/arcturus_ta.bin"); MODULE_FIRMWARE("amdgpu/sienna_cichlid_sos.bin"); MODULE_FIRMWARE("amdgpu/sienna_cichlid_ta.bin"); +MODULE_FIRMWARE("amdgpu/sienna_cichlid_cap.bin"); MODULE_FIRMWARE("amdgpu/navy_flounder_sos.bin"); MODULE_FIRMWARE("amdgpu/navy_flounder_ta.bin"); MODULE_FIRMWARE("amdgpu/vangogh_asd.bin"); @@ -177,8 +179,6 @@ static int psp_v11_0_init_microcode(struct psp_context *psp) err = psp_init_asd_microcode(psp, chip_name); if (err) return err; - if (amdgpu_sriov_vf(adev)) - break; snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ta.bin", chip_name); err = request_firmware(&adev->psp.ta_fw, fw_name, adev->dev); if (err) { diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c index 1ed357cb0f49..01f3bcc62a6c 100644 --- a/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c +++ b/drivers/gpu/drm/amd/amdgpu/psp_v3_1.c @@ -44,6 +44,7 @@ MODULE_FIRMWARE("amdgpu/vega10_sos.bin"); MODULE_FIRMWARE("amdgpu/vega10_asd.bin"); +MODULE_FIRMWARE("amdgpu/vega10_cap.bin"); MODULE_FIRMWARE("amdgpu/vega12_sos.bin"); MODULE_FIRMWARE("amdgpu/vega12_asd.bin"); diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index 0b94ec7b73e7..be4f9111f478 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -728,6 +728,8 @@ struct drm_amdgpu_cs_chunk_data { #define AMDGPU_INFO_FW_DMCUB 0x14 /* Subquery id: Query TOC firmware version */ #define AMDGPU_INFO_FW_TOC 0x15 + /* Subquery id: Query CAP firmware version */ + #define AMDGPU_INFO_FW_CAP 0x16 /* number of bytes moved for TTM migration */ #define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f -- cgit From df01fe73ee98daf00c94189967550bd2d2098912 Mon Sep 17 00:00:00 2001 From: yipechai Date: Tue, 11 Jan 2022 16:45:19 +0800 Subject: drm/amdgpu: Add ras supported check for register_ras_block Add ras supported check for register_ras_block. Signed-off-by: yipechai Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 517650d286a7..a1b4623ab1dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -2756,6 +2756,9 @@ int amdgpu_ras_register_ras_block(struct amdgpu_device *adev, if (!adev || !ras_block_obj) return -EINVAL; + if (!amdgpu_ras_asic_supported(adev)) + return 0; + INIT_LIST_HEAD(&ras_block_obj->node); list_add_tail(&ras_block_obj->node, &adev->ras_list); -- cgit From df4f0041c6ef497e598a67e367db835489162754 Mon Sep 17 00:00:00 2001 From: yipechai Date: Wed, 12 Jan 2022 10:19:07 +0800 Subject: drm/amdgpu: No longer insert ras blocks into ras_list if it already exists in ras_list No longer insert ras blocks into ras_list if it already exists in ras_list. Signed-off-by: yipechai Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index a1b4623ab1dd..23f4290b2fde 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -2753,12 +2753,20 @@ int amdgpu_ras_reset_gpu(struct amdgpu_device *adev) int amdgpu_ras_register_ras_block(struct amdgpu_device *adev, struct amdgpu_ras_block_object* ras_block_obj) { + struct amdgpu_ras_block_object *obj, *tmp; if (!adev || !ras_block_obj) return -EINVAL; if (!amdgpu_ras_asic_supported(adev)) return 0; + /* If the ras object is in ras_list, don't add it again */ + list_for_each_entry_safe(obj, tmp, &adev->ras_list, node) { + if (obj == ras_block_obj) { + return 0; + } + } + INIT_LIST_HEAD(&ras_block_obj->node); list_add_tail(&ras_block_obj->node, &adev->ras_list); -- cgit From 69f91d32c6632e09f0954e690d61ac4921dacbd3 Mon Sep 17 00:00:00 2001 From: Yang Li Date: Thu, 13 Jan 2022 15:11:31 +0800 Subject: drm/amdgpu: remove unneeded semicolon Eliminate the following coccicheck warning: ./drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c:2725:16-17: Unneeded semicolon Reported-by: Abaci Robot Reviewed-by: Guchun Chen Signed-off-by: Yang Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 23f4290b2fde..64eed01d740c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -2722,7 +2722,7 @@ struct amdgpu_ras* amdgpu_ras_get_context(struct amdgpu_device *adev) int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras* ras_con) { if (!adev) - return -EINVAL;; + return -EINVAL; adev->psp.ras_context.ras = ras_con; return 0; -- cgit From ab3b9de65bfeded1d4646c9f66897c163e89abd8 Mon Sep 17 00:00:00 2001 From: Yang Li Date: Thu, 13 Jan 2022 15:11:32 +0800 Subject: drm/amdgpu: clean up some inconsistent indenting Eliminate the follow smatch warnings: drivers/gpu/drm/amd/amdgpu/amdgpu_device.c:3504 amdgpu_device_init() warn: inconsistent indenting drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c:1716 amdgpu_ras_error_status_query() warn: if statement not indented drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c:1058 amdgpu_ras_error_inject() warn: inconsistent indenting Reported-by: Abaci Robot Reviewed-by: Guchun Chen Signed-off-by: Yang Li Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 8 +++++--- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 71b814fe15f9..7dd7ef76c720 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3498,7 +3498,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, mutex_init(&adev->psp.mutex); mutex_init(&adev->notifier_lock); - amdgpu_device_init_apu_flags(adev); + amdgpu_device_init_apu_flags(adev); r = amdgpu_device_check_arguments(adev); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 64eed01d740c..d37b09b2d33a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -1055,8 +1055,10 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev, .address = info->address, .value = info->value, }; - int ret = -EINVAL; - struct amdgpu_ras_block_object* block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, info->head.sub_block_index); + int ret = -EINVAL; + struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, + info->head.block, + info->head.sub_block_index); if (!obj) return -EINVAL; @@ -1714,7 +1716,7 @@ static void amdgpu_ras_error_status_query(struct amdgpu_device *adev, } if (block_obj->hw_ops->query_ras_error_status) - block_obj->hw_ops->query_ras_error_status(adev); + block_obj->hw_ops->query_ras_error_status(adev); } -- cgit From 954ea6aa1545a13036851327b4ed251fa7ab2f22 Mon Sep 17 00:00:00 2001 From: yipechai Date: Thu, 13 Jan 2022 11:01:17 +0800 Subject: drm/amdgpu: Use ARRAY_SIZE to get array length Use ARRAY_SIZE to get array length. Signed-off-by: yipechai Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index d37b09b2d33a..9864f730b958 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -89,7 +89,8 @@ const char *get_ras_block_str(struct ras_common_if *ras_block) return ras_block_string[ras_block->block]; } -#define ras_block_str(_BLOCK_) (((_BLOCK_) < (sizeof(*ras_block_string)/sizeof(const char*))) ? ras_block_string[_BLOCK_] : "Out Of Range") +#define ras_block_str(_BLOCK_) \ + (((_BLOCK_) < ARRAY_SIZE(ras_block_string)) ? ras_block_string[_BLOCK_] : "Out Of Range") #define ras_err_str(i) (ras_error_string[ffs(i)]) -- cgit From e3d833f41c46b8c59c4af53897a6619bf667ebe5 Mon Sep 17 00:00:00 2001 From: yipechai Date: Thu, 13 Jan 2022 11:29:55 +0800 Subject: drm/amdgpu: fix compile warning for ras_block_match_default fix compile warning for ras_block_match_default Signed-off-by: yipechai Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 9864f730b958..0bb6b5354802 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -869,7 +869,8 @@ static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev, } /* feature ctl end */ -int amdgpu_ras_block_match_default(struct amdgpu_ras_block_object* block_obj, enum amdgpu_ras_block block) +static int amdgpu_ras_block_match_default(struct amdgpu_ras_block_object *block_obj, + enum amdgpu_ras_block block) { if(!block_obj) return -EINVAL; -- cgit From 1613f346f86b25121aceb941d03ca927e57af20c Mon Sep 17 00:00:00 2001 From: Flora Cui Date: Thu, 13 Jan 2022 14:55:24 +0800 Subject: drm/amd/pm: fix null ptr access check null ptr first before access its element v2: check adev->pm.dpm_enabled early in amdgpu_debugfs_pm_init() Signed-off-by: Flora Cui Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/amdgpu_dpm.c | 2 +- drivers/gpu/drm/amd/pm/amdgpu_pm.c | 3 +++ 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index f0daa66f5b3d..5fc33893a68c 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -463,7 +463,7 @@ int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_versio const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; int r = 0; - if (!pp_funcs->load_firmware) + if (!pp_funcs || !pp_funcs->load_firmware) return 0; mutex_lock(&adev->pm.mutex); diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 1b03ad7a21ad..49a9c6375343 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -3583,6 +3583,9 @@ void amdgpu_debugfs_pm_init(struct amdgpu_device *adev) struct drm_minor *minor = adev_to_drm(adev)->primary; struct dentry *root = minor->debugfs_root; + if (!adev->pm.dpm_enabled) + return; + debugfs_create_file("amdgpu_pm_info", 0444, root, adev, &amdgpu_debugfs_pm_info_fops); -- cgit From a85d70a8b4813a955d45b062440fedb9be701421 Mon Sep 17 00:00:00 2001 From: Changcheng Deng Date: Fri, 14 Jan 2022 09:20:36 +0000 Subject: drm/amd/pm: Replace one-element array with flexible-array member There is a regular need in the kernel to provide a way to declare having a dynamically sized set of trailing elements in a structure. Kernel code should always use "flexible array members" for these cases. The older style of one-element or zero-length arrays should no longer be used. Reference: https://www.kernel.org/doc/html/latest/process/deprecated.html#zero-length-and-one-element-arrays Reported-by: Zeal Robot Signed-off-by: Changcheng Deng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/powerplay/inc/smu_ucode_xfer_cz.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/smu_ucode_xfer_cz.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu_ucode_xfer_cz.h index eb0f79f9c876..701aae598b58 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/inc/smu_ucode_xfer_cz.h +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu_ucode_xfer_cz.h @@ -121,7 +121,7 @@ typedef struct SMU_Task SMU_Task; struct TOC { uint8_t JobList[NUM_JOBLIST_ENTRIES]; - SMU_Task tasks[1]; + SMU_Task tasks[]; }; // META DATA COMMAND Definitions -- cgit From fb825b651de98cafef13e07673eb72d5e2ceb4a9 Mon Sep 17 00:00:00 2001 From: Jiapeng Chong Date: Fri, 14 Jan 2022 18:43:52 +0800 Subject: drm/amd/display: Remove redundant initialization of dpg_width dpg_width is being initialized to width but this is never read as dpg_width is overwritten later on. Remove the redundant initialization. Cleans up the following clang-analyzer warning: drivers/gpu/drm/amd/amdgpu/../display/dc/core/dc_link_dp.c:6020:8: warning: Value stored to 'dpg_width' during its initialization is never read [clang-analyzer-deadcode.DeadStores]. Reported-by: Abaci Robot Signed-off-by: Jiapeng Chong Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 05e216524370..cb2f96690b4a 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -6011,7 +6011,7 @@ static void set_crtc_test_pattern(struct dc_link *link, else if (link->dc->hwss.set_disp_pattern_generator) { struct pipe_ctx *odm_pipe; int opp_cnt = 1; - int dpg_width = width; + int dpg_width; for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) opp_cnt++; -- cgit From 31425abeda7130e66e61dbd8468502061413631f Mon Sep 17 00:00:00 2001 From: CHANDAN VURDIGERE NATARAJ Date: Tue, 11 Jan 2022 19:02:26 +0530 Subject: drm/amdgpu: Enable recovery on yellow carp Add yellow carp to devices which support recovery Signed-off-by: CHANDAN VURDIGERE NATARAJ Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 7dd7ef76c720..26debfd3bfb2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -4476,6 +4476,7 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev) case CHIP_BEIGE_GOBY: case CHIP_VANGOGH: case CHIP_ALDEBARAN: + case CHIP_YELLOW_CARP: break; default: goto disabled; -- cgit From b3523c457380c23cf28d4ee1ef60da337a0a45c0 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 11 Jan 2022 17:41:44 -0500 Subject: drm/amdgpu: invert the logic in amdgpu_device_should_recover_gpu() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rather than opting into GPU recovery support, default to on, and opt out if it's not working on a particular GPU. This avoids the need to add new asics to this list since this is a core feature. Reviewed-by: Evan Quan Reviewed-by: Guchun Chen Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 44 ++++++++++++------------------ 1 file changed, 17 insertions(+), 27 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 26debfd3bfb2..d363ae2da5c5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -4452,34 +4452,24 @@ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev) if (amdgpu_gpu_recovery == -1) { switch (adev->asic_type) { - case CHIP_BONAIRE: - case CHIP_HAWAII: - case CHIP_TOPAZ: - case CHIP_TONGA: - case CHIP_FIJI: - case CHIP_POLARIS10: - case CHIP_POLARIS11: - case CHIP_POLARIS12: - case CHIP_VEGAM: - case CHIP_VEGA20: - case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_RAVEN: - case CHIP_ARCTURUS: - case CHIP_RENOIR: - case CHIP_NAVI10: - case CHIP_NAVI14: - case CHIP_NAVI12: - case CHIP_SIENNA_CICHLID: - case CHIP_NAVY_FLOUNDER: - case CHIP_DIMGREY_CAVEFISH: - case CHIP_BEIGE_GOBY: - case CHIP_VANGOGH: - case CHIP_ALDEBARAN: - case CHIP_YELLOW_CARP: - break; - default: +#ifdef CONFIG_DRM_AMDGPU_SI + case CHIP_VERDE: + case CHIP_TAHITI: + case CHIP_PITCAIRN: + case CHIP_OLAND: + case CHIP_HAINAN: +#endif +#ifdef CONFIG_DRM_AMDGPU_CIK + case CHIP_KAVERI: + case CHIP_KABINI: + case CHIP_MULLINS: +#endif + case CHIP_CARRIZO: + case CHIP_STONEY: + case CHIP_CYAN_SKILLFISH: goto disabled; + default: + break; } } -- cgit From 06cf9bd61a7452df375f212881d9bb6b3c52c3ec Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Wed, 12 Jan 2022 22:38:51 -0500 Subject: drm/amdgpu: don't do resets on APUs which don't support it It can cause a hang. This is normally not enabled for GPU hangs on these asics, but was recently enabled for handling aborted suspends. This causes hangs on some platforms on suspend. Fixes: daf8de0874ab5b ("drm/amdgpu: always reset the asic in suspend (v2)") Bug: https://gitlab.freedesktop.org/drm/amd/-/issues/1858 Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/cik.c | 4 ++++ drivers/gpu/drm/amd/amdgpu/vi.c | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 54f28c075f21..f10ce740a29c 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -1428,6 +1428,10 @@ static int cik_asic_reset(struct amdgpu_device *adev) { int r; + /* APUs don't have full asic reset */ + if (adev->flags & AMD_IS_APU) + return 0; + if (cik_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { dev_info(adev->dev, "BACO reset\n"); r = amdgpu_dpm_baco_reset(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c index fe9a7cc8d9eb..6645ebbd2696 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.c +++ b/drivers/gpu/drm/amd/amdgpu/vi.c @@ -956,6 +956,10 @@ static int vi_asic_reset(struct amdgpu_device *adev) { int r; + /* APUs don't have full asic reset */ + if (adev->flags & AMD_IS_APU) + return 0; + if (vi_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) { dev_info(adev->dev, "BACO reset\n"); r = amdgpu_dpm_baco_reset(adev); -- cgit From 5f0754ab2751d1935818459e8e71a8fe26f6403c Mon Sep 17 00:00:00 2001 From: Lukas Fink Date: Fri, 14 Jan 2022 07:51:41 +0100 Subject: drm/amdgpu: Fix rejecting Tahiti GPUs eb4fd29afd4a ("drm/amdgpu: bind to any 0x1002 PCI diplay class device") added generic bindings to amdgpu so that that it binds to all display class devices with VID 0x1002 and then rejects those in amdgpu_pci_probe. Unfortunately it reuses a driver_data value of 0 to detect those new bindings, which is already used to denote CHIP_TAHITI ASICs. The driver_data value given to those new bindings was changed in dd0761fd24ea1 ("drm/amdgpu: set CHIP_IP_DISCOVERY as the asic type by default") to CHIP_IP_DISCOVERY (=36), but it seems that the check in amdgpu_pci_probe was forgotten to be changed. Therefore, it still rejects Tahiti GPUs. Link: https://gitlab.freedesktop.org/drm/amd/-/issues/1860 Fixes: eb4fd29afd4a ("drm/amdgpu: bind to any 0x1002 PCI diplay class device") Signed-off-by: Lukas Fink Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 10e01928ffad..6d4eb1b72951 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -1880,7 +1880,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, return -ENODEV; } - if (flags == 0) { + if (flags == CHIP_IP_DISCOVERY) { DRM_INFO("Unsupported asic. Remove me when IP discovery init is in place.\n"); return -ENODEV; } -- cgit From a8e6398ffe171c84b1c03a17eb6d432dc5f703a4 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 14 Jan 2022 09:59:29 -0500 Subject: drm/amdgpu: drop flags check for CHIP_IP_DISCOVERY Support for IP based discovery is in place now so this check is no longer required. Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 6d4eb1b72951..cc6585193236 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -1880,11 +1880,6 @@ static int amdgpu_pci_probe(struct pci_dev *pdev, return -ENODEV; } - if (flags == CHIP_IP_DISCOVERY) { - DRM_INFO("Unsupported asic. Remove me when IP discovery init is in place.\n"); - return -ENODEV; - } - if (amdgpu_virtual_display || amdgpu_device_asic_has_dc_support(flags & AMD_ASIC_MASK)) supports_atomic = true; -- cgit From e8521d0cd984897b4fa58e7837afebd04e44f0b3 Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Fri, 7 Jan 2022 10:44:17 -0600 Subject: drm/amd/display: Revert W/A for hard hangs on DCN20/DCN21 The WA from commit 2a50edbf10c8 ("drm/amd/display: Apply w/a for hard hang on HPD") and commit 1bd3bc745e7f ("drm/amd/display: Extend w/a for hard hang on HPD to dcn20") causes a regression in s0ix where the system will fail to resume properly on many laptops. Pull the workarounds out to avoid that s0ix regression in the common case. This HPD hang happens with an external device in special circumstances and a new W/A will need to be developed for this in the future. Cc: Qingqing Zhuo Reported-by: Scott Bruce Reported-by: Chris Hixon Reported-by: spasswolf@web.de Link: https://bugzilla.kernel.org/show_bug.cgi?id=215436 Link: https://gitlab.freedesktop.org/drm/amd/-/issues/1821 Link: https://gitlab.freedesktop.org/drm/amd/-/issues/1852 Fixes: 2a50edbf10c8 ("drm/amd/display: Apply w/a for hard hang on HPD") Fixes: 1bd3bc745e7f ("drm/amd/display: Extend w/a for hard hang on HPD to dcn20") Reviewed-by: Nicholas Kazlauskas Signed-off-by: Mario Limonciello Signed-off-by: Alex Deucher --- .../amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c | 11 +--------- .../drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c | 11 +--------- .../amd/display/dc/irq/dcn20/irq_service_dcn20.c | 25 ---------------------- .../amd/display/dc/irq/dcn20/irq_service_dcn20.h | 2 -- .../amd/display/dc/irq/dcn21/irq_service_dcn21.c | 25 ---------------------- .../amd/display/dc/irq/dcn21/irq_service_dcn21.h | 2 -- drivers/gpu/drm/amd/display/dc/irq/irq_service.c | 2 +- drivers/gpu/drm/amd/display/dc/irq/irq_service.h | 4 ---- 8 files changed, 3 insertions(+), 79 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c index 9f35f2e8f971..cac80ba69072 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn20/dcn20_clk_mgr.c @@ -38,7 +38,6 @@ #include "clk/clk_11_0_0_offset.h" #include "clk/clk_11_0_0_sh_mask.h" -#include "irq/dcn20/irq_service_dcn20.h" #undef FN #define FN(reg_name, field_name) \ @@ -223,8 +222,6 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base, bool force_reset = false; bool p_state_change_support; int total_plane_count; - int irq_src; - uint32_t hpd_state; if (dc->work_arounds.skip_clock_update) return; @@ -242,13 +239,7 @@ void dcn2_update_clocks(struct clk_mgr *clk_mgr_base, if (dc->res_pool->pp_smu) pp_smu = &dc->res_pool->pp_smu->nv_funcs; - for (irq_src = DC_IRQ_SOURCE_HPD1; irq_src <= DC_IRQ_SOURCE_HPD6; irq_src++) { - hpd_state = dc_get_hpd_state_dcn20(dc->res_pool->irqs, irq_src); - if (hpd_state) - break; - } - - if (display_count == 0 && !hpd_state) + if (display_count == 0) enter_display_off = true; if (enter_display_off == safe_to_lower) { diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c index fbda42313bfe..f4dee0e48a67 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c @@ -42,7 +42,6 @@ #include "clk/clk_10_0_2_sh_mask.h" #include "renoir_ip_offset.h" -#include "irq/dcn21/irq_service_dcn21.h" /* Constants */ @@ -129,11 +128,9 @@ static void rn_update_clocks(struct clk_mgr *clk_mgr_base, struct dc_clocks *new_clocks = &context->bw_ctx.bw.dcn.clk; struct dc *dc = clk_mgr_base->ctx->dc; int display_count; - int irq_src; bool update_dppclk = false; bool update_dispclk = false; bool dpp_clock_lowered = false; - uint32_t hpd_state; struct dmcu *dmcu = clk_mgr_base->ctx->dc->res_pool->dmcu; @@ -150,14 +147,8 @@ static void rn_update_clocks(struct clk_mgr *clk_mgr_base, display_count = rn_get_active_display_cnt_wa(dc, context); - for (irq_src = DC_IRQ_SOURCE_HPD1; irq_src <= DC_IRQ_SOURCE_HPD5; irq_src++) { - hpd_state = dc_get_hpd_state_dcn21(dc->res_pool->irqs, irq_src); - if (hpd_state) - break; - } - /* if we can go lower, go lower */ - if (display_count == 0 && !hpd_state) { + if (display_count == 0) { rn_vbios_smu_set_dcn_low_power_state(clk_mgr, DCN_PWR_STATE_LOW_POWER); /* update power state */ clk_mgr_base->clks.pwr_state = DCN_PWR_STATE_LOW_POWER; diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c index 9ccafe007b23..c4b067d01895 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.c @@ -132,31 +132,6 @@ enum dc_irq_source to_dal_irq_source_dcn20( } } -uint32_t dc_get_hpd_state_dcn20(struct irq_service *irq_service, enum dc_irq_source source) -{ - const struct irq_source_info *info; - uint32_t addr; - uint32_t value; - uint32_t current_status; - - info = find_irq_source_info(irq_service, source); - if (!info) - return 0; - - addr = info->status_reg; - if (!addr) - return 0; - - value = dm_read_reg(irq_service->ctx, addr); - current_status = - get_reg_field_value( - value, - HPD0_DC_HPD_INT_STATUS, - DC_HPD_SENSE); - - return current_status; -} - static bool hpd_ack( struct irq_service *irq_service, const struct irq_source_info *info) diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h index 4d69ab24ca25..aee4b37999f1 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn20/irq_service_dcn20.h @@ -31,6 +31,4 @@ struct irq_service *dal_irq_service_dcn20_create( struct irq_service_init_data *init_data); -uint32_t dc_get_hpd_state_dcn20(struct irq_service *irq_service, enum dc_irq_source source); - #endif diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c index 235294534c43..0f15bcada4e9 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.c @@ -134,31 +134,6 @@ static enum dc_irq_source to_dal_irq_source_dcn21(struct irq_service *irq_servic return DC_IRQ_SOURCE_INVALID; } -uint32_t dc_get_hpd_state_dcn21(struct irq_service *irq_service, enum dc_irq_source source) -{ - const struct irq_source_info *info; - uint32_t addr; - uint32_t value; - uint32_t current_status; - - info = find_irq_source_info(irq_service, source); - if (!info) - return 0; - - addr = info->status_reg; - if (!addr) - return 0; - - value = dm_read_reg(irq_service->ctx, addr); - current_status = - get_reg_field_value( - value, - HPD0_DC_HPD_INT_STATUS, - DC_HPD_SENSE); - - return current_status; -} - static bool hpd_ack( struct irq_service *irq_service, const struct irq_source_info *info) diff --git a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h index 616470e32380..da2bd0e93d7a 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h +++ b/drivers/gpu/drm/amd/display/dc/irq/dcn21/irq_service_dcn21.h @@ -31,6 +31,4 @@ struct irq_service *dal_irq_service_dcn21_create( struct irq_service_init_data *init_data); -uint32_t dc_get_hpd_state_dcn21(struct irq_service *irq_service, enum dc_irq_source source); - #endif diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c index 4db1133e4466..a2a4fbeb83f8 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c +++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c @@ -79,7 +79,7 @@ void dal_irq_service_destroy(struct irq_service **irq_service) *irq_service = NULL; } -const struct irq_source_info *find_irq_source_info( +static const struct irq_source_info *find_irq_source_info( struct irq_service *irq_service, enum dc_irq_source source) { diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.h b/drivers/gpu/drm/amd/display/dc/irq/irq_service.h index e60b82480093..dbfcb096eedd 100644 --- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.h +++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.h @@ -69,10 +69,6 @@ struct irq_service { const struct irq_service_funcs *funcs; }; -const struct irq_source_info *find_irq_source_info( - struct irq_service *irq_service, - enum dc_irq_source source); - void dal_irq_service_construct( struct irq_service *irq_service, struct irq_service_init_data *init_data); -- cgit From b0641cb8a1deae38990cea783d2a1117255f59f5 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 12 Jan 2022 13:27:15 +0200 Subject: drm/i915/psr: remove unused lines_to_wait vbt info MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The lines_to_wait info from VBT is never used. Remove. Cc: José Roberto de Souza Cc: Jouni Högander Cc: Ville Syrjälä Signed-off-by: Jani Nikula Reviewed-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20220112112715.1234366-1-jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_bios.c | 20 -------------------- drivers/gpu/drm/i915/i915_drv.h | 8 -------- 2 files changed, 28 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index c7a8d517ce81..262406c00e53 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -905,26 +905,6 @@ parse_psr(struct drm_i915_private *i915, const struct bdb_header *bdb) i915->vbt.psr.idle_frames = psr_table->idle_frames < 0 ? 0 : psr_table->idle_frames > 15 ? 15 : psr_table->idle_frames; - switch (psr_table->lines_to_wait) { - case 0: - i915->vbt.psr.lines_to_wait = PSR_0_LINES_TO_WAIT; - break; - case 1: - i915->vbt.psr.lines_to_wait = PSR_1_LINE_TO_WAIT; - break; - case 2: - i915->vbt.psr.lines_to_wait = PSR_4_LINES_TO_WAIT; - break; - case 3: - i915->vbt.psr.lines_to_wait = PSR_8_LINES_TO_WAIT; - break; - default: - drm_dbg_kms(&i915->drm, - "VBT has unknown PSR lines to wait %u\n", - psr_table->lines_to_wait); - break; - } - /* * New psr options 0=500us, 1=100us, 2=2500us, 3=0us * Old decimal value is wake up time in multiples of 100 us. diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index b102457bfa51..290dfd40c7b3 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -516,13 +516,6 @@ i915_fence_timeout(const struct drm_i915_private *i915) /* Amount of PSF GV points, BSpec precisely defines this */ #define I915_NUM_PSF_GV_POINTS 3 -enum psr_lines_to_wait { - PSR_0_LINES_TO_WAIT = 0, - PSR_1_LINE_TO_WAIT, - PSR_4_LINES_TO_WAIT, - PSR_8_LINES_TO_WAIT -}; - struct intel_vbt_data { /* bdb version */ u16 version; @@ -562,7 +555,6 @@ struct intel_vbt_data { bool full_link; bool require_aux_wakeup; int idle_frames; - enum psr_lines_to_wait lines_to_wait; int tp1_wakeup_time_us; int tp2_tp3_wakeup_time_us; int psr2_tp2_tp3_wakeup_time_us; -- cgit From 71b59439aa03e8de022c31ccbf9aa9bea4578971 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 1 Dec 2021 17:25:42 +0200 Subject: drm/i915: Sipmplify PLANE_STRIDE masking MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There's no need to have separate masks for the stride bitfield in PLANE_STRIDE for different platforms. All the extra bits are hardcoded to zero anyway. Also the masks we're using now don't even match the actual hardware since the bitfield was only 10 bits on skl/derivatives, only getting bumped to 11 bits on glk. So let's just use a 12 bit mask for everything. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20211201152552.7821-5-ville.syrjala@linux.intel.com Reviewed-by: José Roberto de Souza --- drivers/gpu/drm/i915/display/skl_universal_plane.c | 5 +---- drivers/gpu/drm/i915/i915_reg.h | 3 +-- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c index 158d89b8d490..ec115505aac2 100644 --- a/drivers/gpu/drm/i915/display/skl_universal_plane.c +++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c @@ -2374,10 +2374,7 @@ skl_get_initial_plane_config(struct intel_crtc *crtc, val = intel_de_read(dev_priv, PLANE_STRIDE(pipe, plane_id)); stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0); - if (DISPLAY_VER(dev_priv) >= 13) - fb->pitches[0] = (val & PLANE_STRIDE_MASK_XELPD) * stride_mult; - else - fb->pitches[0] = (val & PLANE_STRIDE_MASK) * stride_mult; + fb->pitches[0] = (val & PLANE_STRIDE_MASK) * stride_mult; aligned_height = intel_fb_align_height(fb, 0, fb->height); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 4424807c8dec..f13d5886b6bd 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -6435,8 +6435,7 @@ enum { _PIPE(pipe, _PLANE_STRIDE_3_A, _PLANE_STRIDE_3_B) #define PLANE_STRIDE(pipe, plane) \ _MMIO_PLANE(plane, _PLANE_STRIDE_1(pipe), _PLANE_STRIDE_2(pipe)) -#define PLANE_STRIDE_MASK REG_GENMASK(10, 0) -#define PLANE_STRIDE_MASK_XELPD REG_GENMASK(11, 0) +#define PLANE_STRIDE_MASK REG_GENMASK(11, 0) #define _PLANE_POS_1_B 0x7118c #define _PLANE_POS_2_B 0x7128c -- cgit From 12d7d858e63d0769a91aab218828e0526c0ab49d Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 1 Dec 2021 17:25:44 +0200 Subject: drm/i915: Use REG_BIT() & co. for universal plane bits MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Polish the skl+ universal plane register defines by using REG_BIT() & co. The defines are also currently spread around in some semi-random fashion. Collect them up into one place. v2: deal with gvt Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20211201152552.7821-7-ville.syrjala@linux.intel.com Reviewed-by: José Roberto de Souza --- drivers/gpu/drm/i915/display/skl_universal_plane.c | 36 ++-- drivers/gpu/drm/i915/gvt/fb_decoder.c | 2 +- drivers/gpu/drm/i915/gvt/reg.h | 1 - drivers/gpu/drm/i915/i915_reg.h | 197 ++++++++++++--------- drivers/gpu/drm/i915/intel_pm.c | 12 +- 5 files changed, 136 insertions(+), 112 deletions(-) diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c index ec115505aac2..cc9d1c6b6c2e 100644 --- a/drivers/gpu/drm/i915/display/skl_universal_plane.c +++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c @@ -1048,11 +1048,12 @@ skl_program_plane_noarm(struct intel_plane *plane, if (plane_state->force_black) icl_plane_csc_load_black(plane); - intel_de_write_fw(dev_priv, PLANE_STRIDE(pipe, plane_id), stride); + intel_de_write_fw(dev_priv, PLANE_STRIDE(pipe, plane_id), + PLANE_STRIDE_(stride)); intel_de_write_fw(dev_priv, PLANE_POS(pipe, plane_id), - (crtc_y << 16) | crtc_x); + PLANE_POS_Y(crtc_y) | PLANE_POS_X(crtc_x)); intel_de_write_fw(dev_priv, PLANE_SIZE(pipe, plane_id), - ((src_h - 1) << 16) | (src_w - 1)); + PLANE_HEIGHT(src_h - 1) | PLANE_WIDTH(src_w - 1)); if (intel_fb_is_rc_ccs_cc_modifier(fb->modifier)) { intel_de_write_fw(dev_priv, PLANE_CC_VAL(pipe, plane_id, 0), @@ -1111,7 +1112,7 @@ skl_program_plane_arm(struct intel_plane *plane, skl_surf_address(plane_state, color_plane); if (DISPLAY_VER(dev_priv) < 12) - aux_dist |= skl_plane_stride(plane_state, aux_plane); + aux_dist |= PLANE_AUX_STRIDE(skl_plane_stride(plane_state, aux_plane)); } spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); @@ -1122,14 +1123,14 @@ skl_program_plane_arm(struct intel_plane *plane, intel_de_write_fw(dev_priv, PLANE_KEYMAX(pipe, plane_id), keymax); intel_de_write_fw(dev_priv, PLANE_OFFSET(pipe, plane_id), - (y << 16) | x); + PLANE_OFFSET_Y(y) | PLANE_OFFSET_X(x)); intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id), aux_dist); if (DISPLAY_VER(dev_priv) < 11) intel_de_write_fw(dev_priv, PLANE_AUX_OFFSET(pipe, plane_id), - (plane_state->view.color_plane[1].y << 16) | - plane_state->view.color_plane[1].x); + PLANE_OFFSET_Y(plane_state->view.color_plane[1].y) | + PLANE_OFFSET_X(plane_state->view.color_plane[1].x)); if (DISPLAY_VER(dev_priv) >= 10) intel_de_write_fw(dev_priv, PLANE_COLOR_CTL(pipe, plane_id), plane_color_ctl); @@ -2289,16 +2290,17 @@ skl_get_initial_plane_config(struct intel_crtc *crtc, val = intel_de_read(dev_priv, PLANE_CTL(pipe, plane_id)); if (DISPLAY_VER(dev_priv) >= 11) - pixel_format = val & ICL_PLANE_CTL_FORMAT_MASK; + pixel_format = val & PLANE_CTL_FORMAT_MASK_ICL; else - pixel_format = val & PLANE_CTL_FORMAT_MASK; + pixel_format = val & PLANE_CTL_FORMAT_MASK_SKL; if (DISPLAY_VER(dev_priv) >= 10) { - alpha = intel_de_read(dev_priv, - PLANE_COLOR_CTL(pipe, plane_id)); - alpha &= PLANE_COLOR_ALPHA_MASK; + u32 color_ctl; + + color_ctl = intel_de_read(dev_priv, PLANE_COLOR_CTL(pipe, plane_id)); + alpha = REG_FIELD_GET(PLANE_COLOR_ALPHA_MASK, color_ctl); } else { - alpha = val & PLANE_CTL_ALPHA_MASK; + alpha = REG_FIELD_GET(PLANE_CTL_ALPHA_MASK, val); } fourcc = skl_format_to_fourcc(pixel_format, @@ -2362,19 +2364,19 @@ skl_get_initial_plane_config(struct intel_crtc *crtc, if (drm_rotation_90_or_270(plane_config->rotation)) goto error; - base = intel_de_read(dev_priv, PLANE_SURF(pipe, plane_id)) & 0xfffff000; + base = intel_de_read(dev_priv, PLANE_SURF(pipe, plane_id)) & PLANE_SURF_ADDR_MASK; plane_config->base = base; offset = intel_de_read(dev_priv, PLANE_OFFSET(pipe, plane_id)); val = intel_de_read(dev_priv, PLANE_SIZE(pipe, plane_id)); - fb->height = ((val >> 16) & 0xffff) + 1; - fb->width = ((val >> 0) & 0xffff) + 1; + fb->height = REG_FIELD_GET(PLANE_HEIGHT_MASK, val) + 1; + fb->width = REG_FIELD_GET(PLANE_WIDTH_MASK, val) + 1; val = intel_de_read(dev_priv, PLANE_STRIDE(pipe, plane_id)); stride_mult = skl_plane_stride_mult(fb, 0, DRM_MODE_ROTATE_0); - fb->pitches[0] = (val & PLANE_STRIDE_MASK) * stride_mult; + fb->pitches[0] = REG_FIELD_GET(PLANE_STRIDE__MASK, val) * stride_mult; aligned_height = intel_fb_align_height(fb, 0, fb->height); diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c index 1aabfa9cda02..cdadc7368d55 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.c +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c @@ -218,7 +218,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, if (GRAPHICS_VER(dev_priv) >= 9) { plane->tiled = val & PLANE_CTL_TILED_MASK; fmt = skl_format_to_drm( - val & PLANE_CTL_FORMAT_MASK, + val & PLANE_CTL_FORMAT_MASK_SKL, val & PLANE_CTL_ORDER_RGBX, val & PLANE_CTL_ALPHA_MASK, val & PLANE_CTL_YUV422_ORDER_MASK); diff --git a/drivers/gpu/drm/i915/gvt/reg.h b/drivers/gpu/drm/i915/gvt/reg.h index 244cc7320b54..7d666d34f9ff 100644 --- a/drivers/gpu/drm/i915/gvt/reg.h +++ b/drivers/gpu/drm/i915/gvt/reg.h @@ -62,7 +62,6 @@ #define SKL_FLIP_EVENT(pipe, plane) (PRIMARY_A_FLIP_DONE + (plane) * 3 + (pipe)) -#define PLANE_CTL_ASYNC_FLIP (1 << 9) #define REG50080_FLIP_TYPE_MASK 0x3 #define REG50080_FLIP_TYPE_ASYNC 0x1 diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index f13d5886b6bd..a4fe2d112268 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -6228,84 +6228,99 @@ enum { #define _PLANE_CTL_1_A 0x70180 #define _PLANE_CTL_2_A 0x70280 #define _PLANE_CTL_3_A 0x70380 -#define PLANE_CTL_ENABLE (1 << 31) +#define PLANE_CTL_ENABLE REG_BIT(31) #define PLANE_CTL_ARB_SLOTS_MASK REG_GENMASK(30, 28) /* icl+ */ #define PLANE_CTL_ARB_SLOTS(x) REG_FIELD_PREP(PLANE_CTL_ARB_SLOTS_MASK, (x)) /* icl+ */ -#define PLANE_CTL_PIPE_GAMMA_ENABLE (1 << 30) /* Pre-GLK */ -#define PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE (1 << 28) +#define PLANE_CTL_PIPE_GAMMA_ENABLE REG_BIT(30) /* Pre-GLK */ +#define PLANE_CTL_YUV_RANGE_CORRECTION_DISABLE REG_BIT(28) /* * ICL+ uses the same PLANE_CTL_FORMAT bits, but the field definition * expanded to include bit 23 as well. However, the shift-24 based values * correctly map to the same formats in ICL, as long as bit 23 is set to 0 */ -#define PLANE_CTL_FORMAT_MASK (0xf << 24) -#define PLANE_CTL_FORMAT_YUV422 (0 << 24) -#define PLANE_CTL_FORMAT_NV12 (1 << 24) -#define PLANE_CTL_FORMAT_XRGB_2101010 (2 << 24) -#define PLANE_CTL_FORMAT_P010 (3 << 24) -#define PLANE_CTL_FORMAT_XRGB_8888 (4 << 24) -#define PLANE_CTL_FORMAT_P012 (5 << 24) -#define PLANE_CTL_FORMAT_XRGB_16161616F (6 << 24) -#define PLANE_CTL_FORMAT_P016 (7 << 24) -#define PLANE_CTL_FORMAT_XYUV (8 << 24) -#define PLANE_CTL_FORMAT_INDEXED (12 << 24) -#define PLANE_CTL_FORMAT_RGB_565 (14 << 24) -#define ICL_PLANE_CTL_FORMAT_MASK (0x1f << 23) -#define PLANE_CTL_PIPE_CSC_ENABLE (1 << 23) /* Pre-GLK */ -#define PLANE_CTL_FORMAT_Y210 (1 << 23) -#define PLANE_CTL_FORMAT_Y212 (3 << 23) -#define PLANE_CTL_FORMAT_Y216 (5 << 23) -#define PLANE_CTL_FORMAT_Y410 (7 << 23) -#define PLANE_CTL_FORMAT_Y412 (9 << 23) -#define PLANE_CTL_FORMAT_Y416 (0xb << 23) -#define PLANE_CTL_KEY_ENABLE_MASK (0x3 << 21) -#define PLANE_CTL_KEY_ENABLE_SOURCE (1 << 21) -#define PLANE_CTL_KEY_ENABLE_DESTINATION (2 << 21) -#define PLANE_CTL_ORDER_BGRX (0 << 20) -#define PLANE_CTL_ORDER_RGBX (1 << 20) -#define PLANE_CTL_YUV420_Y_PLANE (1 << 19) -#define PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709 (1 << 18) -#define PLANE_CTL_YUV422_ORDER_MASK (0x3 << 16) -#define PLANE_CTL_YUV422_ORDER_YUYV (0 << 16) -#define PLANE_CTL_YUV422_ORDER_UYVY (1 << 16) -#define PLANE_CTL_YUV422_ORDER_YVYU (2 << 16) -#define PLANE_CTL_YUV422_ORDER_VYUY (3 << 16) -#define PLANE_CTL_RENDER_DECOMPRESSION_ENABLE (1 << 15) -#define PLANE_CTL_TRICKLE_FEED_DISABLE (1 << 14) -#define PLANE_CTL_CLEAR_COLOR_DISABLE (1 << 13) /* TGL+ */ -#define PLANE_CTL_PLANE_GAMMA_DISABLE (1 << 13) /* Pre-GLK */ -#define PLANE_CTL_TILED_MASK (0x7 << 10) -#define PLANE_CTL_TILED_LINEAR (0 << 10) -#define PLANE_CTL_TILED_X (1 << 10) -#define PLANE_CTL_TILED_Y (4 << 10) -#define PLANE_CTL_TILED_YF (5 << 10) -#define PLANE_CTL_ASYNC_FLIP (1 << 9) -#define PLANE_CTL_FLIP_HORIZONTAL (1 << 8) -#define PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE (1 << 4) /* TGL+ */ -#define PLANE_CTL_ALPHA_MASK (0x3 << 4) /* Pre-GLK */ -#define PLANE_CTL_ALPHA_DISABLE (0 << 4) -#define PLANE_CTL_ALPHA_SW_PREMULTIPLY (2 << 4) -#define PLANE_CTL_ALPHA_HW_PREMULTIPLY (3 << 4) -#define PLANE_CTL_ROTATE_MASK 0x3 -#define PLANE_CTL_ROTATE_0 0x0 -#define PLANE_CTL_ROTATE_90 0x1 -#define PLANE_CTL_ROTATE_180 0x2 -#define PLANE_CTL_ROTATE_270 0x3 +#define PLANE_CTL_FORMAT_MASK_SKL REG_GENMASK(27, 24) /* pre-icl */ +#define PLANE_CTL_FORMAT_MASK_ICL REG_GENMASK(27, 23) /* icl+ */ +#define PLANE_CTL_FORMAT_YUV422 REG_FIELD_PREP(PLANE_CTL_FORMAT_MASK_SKL, 0) +#define PLANE_CTL_FORMAT_NV12 REG_FIELD_PREP(PLANE_CTL_FORMAT_MASK_SKL, 1) +#define PLANE_CTL_FORMAT_XRGB_2101010 REG_FIELD_PREP(PLANE_CTL_FORMAT_MASK_SKL, 2) +#define PLANE_CTL_FORMAT_P010 REG_FIELD_PREP(PLANE_CTL_FORMAT_MASK_SKL, 3) +#define PLANE_CTL_FORMAT_XRGB_8888 REG_FIELD_PREP(PLANE_CTL_FORMAT_MASK_SKL, 4) +#define PLANE_CTL_FORMAT_P012 REG_FIELD_PREP(PLANE_CTL_FORMAT_MASK_SKL, 5) +#define PLANE_CTL_FORMAT_XRGB_16161616F REG_FIELD_PREP(PLANE_CTL_FORMAT_MASK_SKL, 6) +#define PLANE_CTL_FORMAT_P016 REG_FIELD_PREP(PLANE_CTL_FORMAT_MASK_SKL, 7) +#define PLANE_CTL_FORMAT_XYUV REG_FIELD_PREP(PLANE_CTL_FORMAT_MASK_SKL, 8) +#define PLANE_CTL_FORMAT_INDEXED REG_FIELD_PREP(PLANE_CTL_FORMAT_MASK_SKL, 12) +#define PLANE_CTL_FORMAT_RGB_565 REG_FIELD_PREP(PLANE_CTL_FORMAT_MASK_SKL, 14) +#define PLANE_CTL_FORMAT_Y210 REG_FIELD_PREP(PLANE_CTL_FORMAT_MASK_ICL, 1) +#define PLANE_CTL_FORMAT_Y212 REG_FIELD_PREP(PLANE_CTL_FORMAT_MASK_ICL, 3) +#define PLANE_CTL_FORMAT_Y216 REG_FIELD_PREP(PLANE_CTL_FORMAT_MASK_ICL, 5) +#define PLANE_CTL_FORMAT_Y410 REG_FIELD_PREP(PLANE_CTL_FORMAT_MASK_ICL, 7) +#define PLANE_CTL_FORMAT_Y412 REG_FIELD_PREP(PLANE_CTL_FORMAT_MASK_ICL, 9) +#define PLANE_CTL_FORMAT_Y416 REG_FIELD_PREP(PLANE_CTL_FORMAT_MASK_ICL, 11) +#define PLANE_CTL_PIPE_CSC_ENABLE REG_BIT(23) /* Pre-GLK */ +#define PLANE_CTL_KEY_ENABLE_MASK REG_GENMASK(22, 21) +#define PLANE_CTL_KEY_ENABLE_SOURCE REG_FIELD_PREP(PLANE_CTL_KEY_ENABLE_MASK, 1) +#define PLANE_CTL_KEY_ENABLE_DESTINATION REG_FIELD_PREP(PLANE_CTL_KEY_ENABLE_MASK, 2) +#define PLANE_CTL_ORDER_RGBX REG_BIT(20) +#define PLANE_CTL_YUV420_Y_PLANE REG_BIT(19) +#define PLANE_CTL_YUV_TO_RGB_CSC_FORMAT_BT709 REG_BIT(18) +#define PLANE_CTL_YUV422_ORDER_MASK REG_GENMASK(17, 16) +#define PLANE_CTL_YUV422_ORDER_YUYV REG_FIELD_PREP(PLANE_CTL_YUV422_ORDER_MASK, 0) +#define PLANE_CTL_YUV422_ORDER_UYVY REG_FIELD_PREP(PLANE_CTL_YUV422_ORDER_MASK, 1) +#define PLANE_CTL_YUV422_ORDER_YVYU REG_FIELD_PREP(PLANE_CTL_YUV422_ORDER_MASK, 2) +#define PLANE_CTL_YUV422_ORDER_VYUY REG_FIELD_PREP(PLANE_CTL_YUV422_ORDER_MASK, 3) +#define PLANE_CTL_RENDER_DECOMPRESSION_ENABLE REG_BIT(15) +#define PLANE_CTL_TRICKLE_FEED_DISABLE REG_BIT(14) +#define PLANE_CTL_CLEAR_COLOR_DISABLE REG_BIT(13) /* TGL+ */ +#define PLANE_CTL_PLANE_GAMMA_DISABLE REG_BIT(13) /* Pre-GLK */ +#define PLANE_CTL_TILED_MASK REG_GENMASK(12, 10) +#define PLANE_CTL_TILED_LINEAR REG_FIELD_PREP(PLANE_CTL_TILED_MASK, 0) +#define PLANE_CTL_TILED_X REG_FIELD_PREP(PLANE_CTL_TILED_MASK, 1) +#define PLANE_CTL_TILED_Y REG_FIELD_PREP(PLANE_CTL_TILED_MASK, 4) +#define PLANE_CTL_TILED_YF REG_FIELD_PREP(PLANE_CTL_TILED_MASK, 5) +#define PLANE_CTL_ASYNC_FLIP REG_BIT(9) +#define PLANE_CTL_FLIP_HORIZONTAL REG_BIT(8) +#define PLANE_CTL_MEDIA_DECOMPRESSION_ENABLE REG_BIT(4) /* TGL+ */ +#define PLANE_CTL_ALPHA_MASK REG_GENMASK(5, 4) /* Pre-GLK */ +#define PLANE_CTL_ALPHA_DISABLE REG_FIELD_PREP(PLANE_CTL_ALPHA_MASK, 0) +#define PLANE_CTL_ALPHA_SW_PREMULTIPLY REG_FIELD_PREP(PLANE_CTL_ALPHA_MASK, 2) +#define PLANE_CTL_ALPHA_HW_PREMULTIPLY REG_FIELD_PREP(PLANE_CTL_ALPHA_MASK, 3) +#define PLANE_CTL_ROTATE_MASK REG_GENMASK(1, 0) +#define PLANE_CTL_ROTATE_0 REG_FIELD_PREP(PLANE_CTL_ROTATE_MASK, 0) +#define PLANE_CTL_ROTATE_90 REG_FIELD_PREP(PLANE_CTL_ROTATE_MASK, 1) +#define PLANE_CTL_ROTATE_180 REG_FIELD_PREP(PLANE_CTL_ROTATE_MASK, 2) +#define PLANE_CTL_ROTATE_270 REG_FIELD_PREP(PLANE_CTL_ROTATE_MASK, 3) #define _PLANE_STRIDE_1_A 0x70188 #define _PLANE_STRIDE_2_A 0x70288 #define _PLANE_STRIDE_3_A 0x70388 +#define PLANE_STRIDE__MASK REG_GENMASK(11, 0) +#define PLANE_STRIDE_(stride) REG_FIELD_PREP(PLANE_STRIDE__MASK, (stride)) #define _PLANE_POS_1_A 0x7018c #define _PLANE_POS_2_A 0x7028c #define _PLANE_POS_3_A 0x7038c +#define PLANE_POS_Y_MASK REG_GENMASK(31, 16) +#define PLANE_POS_Y(y) REG_FIELD_PREP(PLANE_POS_Y_MASK, (y)) +#define PLANE_POS_X_MASK REG_GENMASK(15, 0) +#define PLANE_POS_X(x) REG_FIELD_PREP(PLANE_POS_X_MASK, (x)) #define _PLANE_SIZE_1_A 0x70190 #define _PLANE_SIZE_2_A 0x70290 #define _PLANE_SIZE_3_A 0x70390 +#define PLANE_HEIGHT_MASK REG_GENMASK(31, 16) +#define PLANE_HEIGHT(h) REG_FIELD_PREP(PLANE_HEIGHT_MASK, (h)) +#define PLANE_WIDTH_MASK REG_GENMASK(15, 0) +#define PLANE_WIDTH(w) REG_FIELD_PREP(PLANE_WIDTH_MASK, (w)) #define _PLANE_SURF_1_A 0x7019c #define _PLANE_SURF_2_A 0x7029c #define _PLANE_SURF_3_A 0x7039c +#define PLANE_SURF_ADDR_MASK REG_GENMASK(31, 12) +#define PLANE_SURF_DECRYPT REG_BIT(2) #define _PLANE_OFFSET_1_A 0x701a4 #define _PLANE_OFFSET_2_A 0x702a4 #define _PLANE_OFFSET_3_A 0x703a4 +#define PLANE_OFFSET_Y_MASK REG_GENMASK(31, 16) +#define PLANE_OFFSET_Y(y) REG_FIELD_PREP(PLANE_OFFSET_Y_MASK, (y)) +#define PLANE_OFFSET_X_MASK REG_GENMASK(15, 0) +#define PLANE_OFFSET_X(x) REG_FIELD_PREP(PLANE_OFFSET_X_MASK, (x)) #define _PLANE_KEYVAL_1_A 0x70194 #define _PLANE_KEYVAL_2_A 0x70294 #define _PLANE_KEYMSK_1_A 0x70198 @@ -6317,42 +6332,49 @@ enum { #define _PLANE_CC_VAL_1_A 0x701b4 #define _PLANE_CC_VAL_2_A 0x702b4 #define _PLANE_AUX_DIST_1_A 0x701c0 +#define PLANE_AUX_DISTANCE_MASK REG_GENMASK(31, 12) +#define PLANE_AUX_STRIDE_MASK REG_GENMASK(11, 0) +#define PLANE_AUX_STRIDE(stride) REG_FIELD_PREP(PLANE_AUX_STRIDE_MASK, (stride)) #define _PLANE_AUX_DIST_2_A 0x702c0 #define _PLANE_AUX_OFFSET_1_A 0x701c4 #define _PLANE_AUX_OFFSET_2_A 0x702c4 #define _PLANE_CUS_CTL_1_A 0x701c8 #define _PLANE_CUS_CTL_2_A 0x702c8 -#define PLANE_CUS_ENABLE (1 << 31) -#define PLANE_CUS_Y_PLANE_4_RKL (0 << 30) -#define PLANE_CUS_Y_PLANE_5_RKL (1 << 30) -#define PLANE_CUS_Y_PLANE_6_ICL (0 << 30) -#define PLANE_CUS_Y_PLANE_7_ICL (1 << 30) -#define PLANE_CUS_HPHASE_SIGN_NEGATIVE (1 << 19) -#define PLANE_CUS_HPHASE_0 (0 << 16) -#define PLANE_CUS_HPHASE_0_25 (1 << 16) -#define PLANE_CUS_HPHASE_0_5 (2 << 16) -#define PLANE_CUS_VPHASE_SIGN_NEGATIVE (1 << 15) -#define PLANE_CUS_VPHASE_0 (0 << 12) -#define PLANE_CUS_VPHASE_0_25 (1 << 12) -#define PLANE_CUS_VPHASE_0_5 (2 << 12) +#define PLANE_CUS_ENABLE REG_BIT(31) +#define PLANE_CUS_Y_PLANE_MASK REG_BIT(30) +#define PLANE_CUS_Y_PLANE_4_RKL REG_FIELD_PREP(PLANE_CUS_Y_PLANE_MASK, 0) +#define PLANE_CUS_Y_PLANE_5_RKL REG_FIELD_PREP(PLANE_CUS_Y_PLANE_MASK, 1) +#define PLANE_CUS_Y_PLANE_6_ICL REG_FIELD_PREP(PLANE_CUS_Y_PLANE_MASK, 0) +#define PLANE_CUS_Y_PLANE_7_ICL REG_FIELD_PREP(PLANE_CUS_Y_PLANE_MASK, 1) +#define PLANE_CUS_HPHASE_SIGN_NEGATIVE REG_BIT(19) +#define PLANE_CUS_HPHASE_MASK REG_GENMASK(17, 16) +#define PLANE_CUS_HPHASE_0 REG_FIELD_PREP(PLANE_CUS_HPHASE_MASK, 0) +#define PLANE_CUS_HPHASE_0_25 REG_FIELD_PREP(PLANE_CUS_HPHASE_MASK, 1) +#define PLANE_CUS_HPHASE_0_5 REG_FIELD_PREP(PLANE_CUS_HPHASE_MASK, 2) +#define PLANE_CUS_VPHASE_SIGN_NEGATIVE REG_BIT(15) +#define PLANE_CUS_VPHASE_MASK REG_GENMASK(13, 12) +#define PLANE_CUS_VPHASE_0 REG_FIELD_PREP(PLANE_CUS_VPHASE_MASK, 0) +#define PLANE_CUS_VPHASE_0_25 REG_FIELD_PREP(PLANE_CUS_VPHASE_MASK, 1) +#define PLANE_CUS_VPHASE_0_5 REG_FIELD_PREP(PLANE_CUS_VPHASE_MASK, 2) #define _PLANE_COLOR_CTL_1_A 0x701CC /* GLK+ */ #define _PLANE_COLOR_CTL_2_A 0x702CC /* GLK+ */ #define _PLANE_COLOR_CTL_3_A 0x703CC /* GLK+ */ -#define PLANE_COLOR_PIPE_GAMMA_ENABLE (1 << 30) /* Pre-ICL */ -#define PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE (1 << 28) +#define PLANE_COLOR_PIPE_GAMMA_ENABLE REG_BIT(30) /* Pre-ICL */ +#define PLANE_COLOR_YUV_RANGE_CORRECTION_DISABLE REG_BIT(28) +#define PLANE_COLOR_PIPE_CSC_ENABLE REG_BIT(23) /* Pre-ICL */ #define PLANE_COLOR_PLANE_CSC_ENABLE REG_BIT(21) /* ICL+ */ -#define PLANE_COLOR_INPUT_CSC_ENABLE (1 << 20) /* ICL+ */ -#define PLANE_COLOR_PIPE_CSC_ENABLE (1 << 23) /* Pre-ICL */ -#define PLANE_COLOR_CSC_MODE_BYPASS (0 << 17) -#define PLANE_COLOR_CSC_MODE_YUV601_TO_RGB601 (1 << 17) -#define PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709 (2 << 17) -#define PLANE_COLOR_CSC_MODE_YUV2020_TO_RGB2020 (3 << 17) -#define PLANE_COLOR_CSC_MODE_RGB709_TO_RGB2020 (4 << 17) -#define PLANE_COLOR_PLANE_GAMMA_DISABLE (1 << 13) -#define PLANE_COLOR_ALPHA_MASK (0x3 << 4) -#define PLANE_COLOR_ALPHA_DISABLE (0 << 4) -#define PLANE_COLOR_ALPHA_SW_PREMULTIPLY (2 << 4) -#define PLANE_COLOR_ALPHA_HW_PREMULTIPLY (3 << 4) +#define PLANE_COLOR_INPUT_CSC_ENABLE REG_BIT(20) /* ICL+ */ +#define PLANE_COLOR_CSC_MODE_MASK REG_GENMASK(19, 17) +#define PLANE_COLOR_CSC_MODE_BYPASS REG_FIELD_PREP(PLANE_COLOR_CSC_MODE_MASK, 0) +#define PLANE_COLOR_CSC_MODE_YUV601_TO_RGB601 REG_FIELD_PREP(PLANE_COLOR_CSC_MODE_MASK, 1) +#define PLANE_COLOR_CSC_MODE_YUV709_TO_RGB709 REG_FIELD_PREP(PLANE_COLOR_CSC_MODE_MASK, 2) +#define PLANE_COLOR_CSC_MODE_YUV2020_TO_RGB2020 REG_FIELD_PREP(PLANE_COLOR_CSC_MODE_MASK, 3) +#define PLANE_COLOR_CSC_MODE_RGB709_TO_RGB2020 REG_FIELD_PREP(PLANE_COLOR_CSC_MODE_MASK, 4) +#define PLANE_COLOR_PLANE_GAMMA_DISABLE REG_BIT(13) +#define PLANE_COLOR_ALPHA_MASK REG_GENMASK(5, 4) +#define PLANE_COLOR_ALPHA_DISABLE REG_FIELD_PREP(PLANE_COLOR_ALPHA_MASK, 0) +#define PLANE_COLOR_ALPHA_SW_PREMULTIPLY REG_FIELD_PREP(PLANE_COLOR_ALPHA_MASK, 2) +#define PLANE_COLOR_ALPHA_HW_PREMULTIPLY REG_FIELD_PREP(PLANE_COLOR_ALPHA_MASK, 3) #define _PLANE_BUF_CFG_1_A 0x7027c #define _PLANE_BUF_CFG_2_A 0x7037c #define _PLANE_NV12_BUF_CFG_1_A 0x70278 @@ -6435,7 +6457,6 @@ enum { _PIPE(pipe, _PLANE_STRIDE_3_A, _PLANE_STRIDE_3_B) #define PLANE_STRIDE(pipe, plane) \ _MMIO_PLANE(plane, _PLANE_STRIDE_1(pipe), _PLANE_STRIDE_2(pipe)) -#define PLANE_STRIDE_MASK REG_GENMASK(11, 0) #define _PLANE_POS_1_B 0x7118c #define _PLANE_POS_2_B 0x7128c @@ -6463,7 +6484,6 @@ enum { #define _PLANE_SURF_3(pipe) _PIPE(pipe, _PLANE_SURF_3_A, _PLANE_SURF_3_B) #define PLANE_SURF(pipe, plane) \ _MMIO_PLANE(plane, _PLANE_SURF_1(pipe), _PLANE_SURF_2(pipe)) -#define PLANE_SURF_DECRYPT REG_BIT(2) #define _PLANE_OFFSET_1_B 0x711a4 #define _PLANE_OFFSET_2_B 0x712a4 @@ -6495,8 +6515,11 @@ enum { #define _PLANE_BUF_CFG_1_B 0x7127c #define _PLANE_BUF_CFG_2_B 0x7137c -#define DDB_ENTRY_MASK 0xFFF /* skl+: 10 bits, icl+ 11 bits, adlp+ 12 bits */ -#define DDB_ENTRY_END_SHIFT 16 +/* skl+: 10 bits, icl+ 11 bits, adlp+ 12 bits */ +#define PLANE_BUF_END_MASK REG_GENMASK(27, 16) +#define PLANE_BUF_END(end) REG_FIELD_PREP(PLANE_BUF_END_MASK, (end)) +#define PLANE_BUF_START_MASK REG_GENMASK(11, 0) +#define PLANE_BUF_START(start) REG_FIELD_PREP(PLANE_BUF_START_MASK, (start)) #define _PLANE_BUF_CFG_1(pipe) \ _PIPE(pipe, _PLANE_BUF_CFG_1_A, _PLANE_BUF_CFG_1_B) #define _PLANE_BUF_CFG_2(pipe) \ diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index a83b71af551b..897d66fec5d6 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -4292,11 +4292,10 @@ skl_cursor_allocation(const struct intel_crtc_state *crtc_state, static void skl_ddb_entry_init_from_hw(struct drm_i915_private *dev_priv, struct skl_ddb_entry *entry, u32 reg) { - entry->start = reg & DDB_ENTRY_MASK; - entry->end = (reg >> DDB_ENTRY_END_SHIFT) & DDB_ENTRY_MASK; - + entry->start = REG_FIELD_GET(PLANE_BUF_START_MASK, reg); + entry->end = REG_FIELD_GET(PLANE_BUF_END_MASK, reg); if (entry->end) - entry->end += 1; + entry->end++; } static void @@ -4320,7 +4319,7 @@ skl_ddb_get_hw_plane_state(struct drm_i915_private *dev_priv, /* No DDB allocated for disabled planes */ if (val & PLANE_CTL_ENABLE) - fourcc = skl_format_to_fourcc(val & PLANE_CTL_FORMAT_MASK, + fourcc = skl_format_to_fourcc(val & PLANE_CTL_FORMAT_MASK_SKL, val & PLANE_CTL_ORDER_RGBX, val & PLANE_CTL_ALPHA_MASK); @@ -5891,7 +5890,8 @@ static void skl_ddb_entry_write(struct drm_i915_private *dev_priv, { if (entry->end) intel_de_write_fw(dev_priv, reg, - (entry->end - 1) << 16 | entry->start); + PLANE_BUF_END(entry->end - 1) | + PLANE_BUF_START(entry->start)); else intel_de_write_fw(dev_priv, reg, 0); } -- cgit From 2f609faf5bda9d828ce0229689227ba2edb1918b Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 1 Dec 2021 17:25:46 +0200 Subject: drm/i915: Clean up ivb+ sprite plane registers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use REG_BIT() & co. to polish the ivb+ sprite plane registers. v2: deal with gvt Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20211201152552.7821-9-ville.syrjala@linux.intel.com Reviewed-by: José Roberto de Souza --- drivers/gpu/drm/i915/display/intel_sprite.c | 20 ++++--- drivers/gpu/drm/i915/gvt/fb_decoder.c | 2 +- drivers/gpu/drm/i915/i915_reg.h | 81 +++++++++++++++++------------ 3 files changed, 63 insertions(+), 40 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index 2357a1301f48..090d1d372211 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -700,7 +700,7 @@ static u32 ivb_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state) u32 sprctl = 0; if (crtc_state->gamma_enable) - sprctl |= SPRITE_GAMMA_ENABLE; + sprctl |= SPRITE_PIPE_GAMMA_ENABLE; if (crtc_state->csc_enable) sprctl |= SPRITE_PIPE_CSC_ENABLE; @@ -770,7 +770,7 @@ static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state, } if (!ivb_need_sprite_gamma(plane_state)) - sprctl |= SPRITE_INT_GAMMA_DISABLE; + sprctl |= SPRITE_PLANE_GAMMA_DISABLE; if (plane_state->hw.color_encoding == DRM_COLOR_YCBCR_BT709) sprctl |= SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709; @@ -863,14 +863,18 @@ ivb_sprite_update_noarm(struct intel_plane *plane, unsigned long irqflags; if (crtc_w != src_w || crtc_h != src_h) - sprscale = SPRITE_SCALE_ENABLE | ((src_w - 1) << 16) | (src_h - 1); + sprscale = SPRITE_SCALE_ENABLE | + SPRITE_SRC_WIDTH(src_w - 1) | + SPRITE_SRC_HEIGHT(src_h - 1); spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); intel_de_write_fw(dev_priv, SPRSTRIDE(pipe), plane_state->view.color_plane[0].mapping_stride); - intel_de_write_fw(dev_priv, SPRPOS(pipe), (crtc_y << 16) | crtc_x); - intel_de_write_fw(dev_priv, SPRSIZE(pipe), ((crtc_h - 1) << 16) | (crtc_w - 1)); + intel_de_write_fw(dev_priv, SPRPOS(pipe), + SPRITE_POS_Y(crtc_y) | SPRITE_POS_X(crtc_x)); + intel_de_write_fw(dev_priv, SPRSIZE(pipe), + SPRITE_HEIGHT(crtc_h - 1) | SPRITE_WIDTH(crtc_w - 1)); if (IS_IVYBRIDGE(dev_priv)) intel_de_write_fw(dev_priv, SPRSCALE(pipe), sprscale); @@ -907,10 +911,12 @@ ivb_sprite_update_arm(struct intel_plane *plane, /* HSW consolidates SPRTILEOFF and SPRLINOFF into a single SPROFFSET * register */ if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { - intel_de_write_fw(dev_priv, SPROFFSET(pipe), (y << 16) | x); + intel_de_write_fw(dev_priv, SPROFFSET(pipe), + SPRITE_OFFSET_Y(y) | SPRITE_OFFSET_X(x)); } else { intel_de_write_fw(dev_priv, SPRLINOFF(pipe), linear_offset); - intel_de_write_fw(dev_priv, SPRTILEOFF(pipe), (y << 16) | x); + intel_de_write_fw(dev_priv, SPRTILEOFF(pipe), + SPRITE_OFFSET_Y(y) | SPRITE_OFFSET_X(x)); } /* diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c index cdadc7368d55..83fd18d0b1a1 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.c +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c @@ -430,7 +430,7 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu, yuv_order = (val & SPRITE_YUV_ORDER_MASK) >> _SPRITE_YUV_ORDER_SHIFT; - fmt = (val & SPRITE_PIXFORMAT_MASK) >> _SPRITE_FMT_SHIFT; + fmt = (val & SPRITE_FORMAT_MASK) >> _SPRITE_FMT_SHIFT; if (!sprite_pixel_formats[fmt].bpp) { gvt_vgpu_err("Non-supported pixel format (0x%x)\n", fmt); return -EINVAL; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index a4fe2d112268..4bd4cdfb0131 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -6025,50 +6025,67 @@ enum { #define DVSGAMCMAX_ILK(pipe, i) _MMIO(_PIPE(pipe, _DVSAGAMCMAX_ILK, _DVSBGAMCMAX_ILK) + (i) * 4) /* 3 x u1.10 */ #define _SPRA_CTL 0x70280 -#define SPRITE_ENABLE (1 << 31) -#define SPRITE_GAMMA_ENABLE (1 << 30) -#define SPRITE_YUV_RANGE_CORRECTION_DISABLE (1 << 28) -#define SPRITE_PIXFORMAT_MASK (7 << 25) -#define SPRITE_FORMAT_YUV422 (0 << 25) -#define SPRITE_FORMAT_RGBX101010 (1 << 25) -#define SPRITE_FORMAT_RGBX888 (2 << 25) -#define SPRITE_FORMAT_RGBX161616 (3 << 25) -#define SPRITE_FORMAT_YUV444 (4 << 25) -#define SPRITE_FORMAT_XR_BGR101010 (5 << 25) /* Extended range */ -#define SPRITE_PIPE_CSC_ENABLE (1 << 24) -#define SPRITE_SOURCE_KEY (1 << 22) -#define SPRITE_RGB_ORDER_RGBX (1 << 20) /* only for 888 and 161616 */ -#define SPRITE_YUV_TO_RGB_CSC_DISABLE (1 << 19) -#define SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709 (1 << 18) /* 0 is BT601 */ -#define SPRITE_YUV_ORDER_MASK (3 << 16) -#define SPRITE_YUV_ORDER_YUYV (0 << 16) -#define SPRITE_YUV_ORDER_UYVY (1 << 16) -#define SPRITE_YUV_ORDER_YVYU (2 << 16) -#define SPRITE_YUV_ORDER_VYUY (3 << 16) -#define SPRITE_ROTATE_180 (1 << 15) -#define SPRITE_TRICKLE_FEED_DISABLE (1 << 14) -#define SPRITE_INT_GAMMA_DISABLE (1 << 13) -#define SPRITE_TILED (1 << 10) -#define SPRITE_DEST_KEY (1 << 2) +#define SPRITE_ENABLE REG_BIT(31) +#define SPRITE_PIPE_GAMMA_ENABLE REG_BIT(30) +#define SPRITE_YUV_RANGE_CORRECTION_DISABLE REG_BIT(28) +#define SPRITE_FORMAT_MASK REG_GENMASK(27, 25) +#define SPRITE_FORMAT_YUV422 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 0) +#define SPRITE_FORMAT_RGBX101010 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 1) +#define SPRITE_FORMAT_RGBX888 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 2) +#define SPRITE_FORMAT_RGBX161616 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 3) +#define SPRITE_FORMAT_YUV444 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 4) +#define SPRITE_FORMAT_XR_BGR101010 REG_FIELD_PREP(SPRITE_FORMAT_MASK, 5) /* Extended range */ +#define SPRITE_PIPE_CSC_ENABLE REG_BIT(24) +#define SPRITE_SOURCE_KEY REG_BIT(22) +#define SPRITE_RGB_ORDER_RGBX REG_BIT(20) /* only for 888 and 161616 */ +#define SPRITE_YUV_TO_RGB_CSC_DISABLE REG_BIT(19) +#define SPRITE_YUV_TO_RGB_CSC_FORMAT_BT709 REG_BIT(18) /* 0 is BT601 */ +#define SPRITE_YUV_ORDER_MASK REG_GENMASK(17, 16) +#define SPRITE_YUV_ORDER_YUYV REG_FIELD_PREP(SPRITE_YUV_ORDER_MASK, 0) +#define SPRITE_YUV_ORDER_UYVY REG_FIELD_PREP(SPRITE_YUV_ORDER_MASK, 1) +#define SPRITE_YUV_ORDER_YVYU REG_FIELD_PREP(SPRITE_YUV_ORDER_MASK, 2) +#define SPRITE_YUV_ORDER_VYUY REG_FIELD_PREP(SPRITE_YUV_ORDER_MASK, 3) +#define SPRITE_ROTATE_180 REG_BIT(15) +#define SPRITE_TRICKLE_FEED_DISABLE REG_BIT(14) +#define SPRITE_PLANE_GAMMA_DISABLE REG_BIT(13) +#define SPRITE_TILED REG_BIT(10) +#define SPRITE_DEST_KEY REG_BIT(2) #define _SPRA_LINOFF 0x70284 #define _SPRA_STRIDE 0x70288 #define _SPRA_POS 0x7028c +#define SPRITE_POS_Y_MASK REG_GENMASK(31, 16) +#define SPRITE_POS_Y(y) REG_FIELD_PREP(SPRITE_POS_Y_MASK, (y)) +#define SPRITE_POS_X_MASK REG_GENMASK(15, 0) +#define SPRITE_POS_X(x) REG_FIELD_PREP(SPRITE_POS_X_MASK, (x)) #define _SPRA_SIZE 0x70290 +#define SPRITE_HEIGHT_MASK REG_GENMASK(31, 16) +#define SPRITE_HEIGHT(h) REG_FIELD_PREP(SPRITE_HEIGHT_MASK, (h)) +#define SPRITE_WIDTH_MASK REG_GENMASK(15, 0) +#define SPRITE_WIDTH(w) REG_FIELD_PREP(SPRITE_WIDTH_MASK, (w)) #define _SPRA_KEYVAL 0x70294 #define _SPRA_KEYMSK 0x70298 #define _SPRA_SURF 0x7029c +#define SPRITE_ADDR_MASK REG_GENMASK(31, 12) #define _SPRA_KEYMAX 0x702a0 #define _SPRA_TILEOFF 0x702a4 +#define SPRITE_OFFSET_Y_MASK REG_GENMASK(31, 16) +#define SPRITE_OFFSET_Y(y) REG_FIELD_PREP(SPRITE_OFFSET_Y_MASK, (y)) +#define SPRITE_OFFSET_X_MASK REG_GENMASK(15, 0) +#define SPRITE_OFFSET_X(x) REG_FIELD_PREP(SPRITE_OFFSET_X_MASK, (x)) #define _SPRA_OFFSET 0x702a4 #define _SPRA_SURFLIVE 0x702ac #define _SPRA_SCALE 0x70304 -#define SPRITE_SCALE_ENABLE (1 << 31) -#define SPRITE_FILTER_MASK (3 << 29) -#define SPRITE_FILTER_MEDIUM (0 << 29) -#define SPRITE_FILTER_ENHANCING (1 << 29) -#define SPRITE_FILTER_SOFTENING (2 << 29) -#define SPRITE_VERTICAL_OFFSET_HALF (1 << 28) /* must be enabled below */ -#define SPRITE_VERTICAL_OFFSET_ENABLE (1 << 27) +#define SPRITE_SCALE_ENABLE REG_BIT(31) +#define SPRITE_FILTER_MASK REG_GENMASK(30, 29) +#define SPRITE_FILTER_MEDIUM REG_FIELD_PREP(SPRITE_FILTER_MASK, 0) +#define SPRITE_FILTER_ENHANCING REG_FIELD_PREP(SPRITE_FILTER_MASK, 1) +#define SPRITE_FILTER_SOFTENING REG_FIELD_PREP(SPRITE_FILTER_MASK, 2) +#define SPRITE_VERTICAL_OFFSET_HALF REG_BIT(28) /* must be enabled below */ +#define SPRITE_VERTICAL_OFFSET_ENABLE REG_BIT(27) +#define SPRITE_SRC_WIDTH_MASK REG_GENMASK(26, 16) +#define SPRITE_SRC_WIDTH(w) REG_FIELD_PREP(SPRITE_SRC_WIDTH_MASK, (w)) +#define SPRITE_SRC_HEIGHT_MASK REG_GENMASK(10, 0) +#define SPRITE_SRC_HEIGHT(h) REG_FIELD_PREP(SPRITE_SRC_HEIGHT_MASK, (h)) #define _SPRA_GAMC 0x70400 #define _SPRA_GAMC16 0x70440 #define _SPRA_GAMC17 0x7044c -- cgit From f6bb74e07705579f83252f9c3cbd462d8084bb4d Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 1 Dec 2021 17:25:48 +0200 Subject: drm/i915: Clean up g4x+ sprite plane registers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use REG_BIT() & co. to polish the g4x+ sprite plane registers. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20211201152552.7821-11-ville.syrjala@linux.intel.com Reviewed-by: José Roberto de Souza --- drivers/gpu/drm/i915/display/intel_sprite.c | 12 +++-- drivers/gpu/drm/i915/i915_reg.h | 73 ++++++++++++++++++----------- 2 files changed, 53 insertions(+), 32 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index 090d1d372211..9c231567bd91 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -1053,7 +1053,7 @@ static u32 g4x_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state) u32 dvscntr = 0; if (crtc_state->gamma_enable) - dvscntr |= DVS_GAMMA_ENABLE; + dvscntr |= DVS_PIPE_GAMMA_ENABLE; if (crtc_state->csc_enable) dvscntr |= DVS_PIPE_CSC_ENABLE; @@ -1205,14 +1205,18 @@ g4x_sprite_update_noarm(struct intel_plane *plane, unsigned long irqflags; if (crtc_w != src_w || crtc_h != src_h) - dvsscale = DVS_SCALE_ENABLE | ((src_w - 1) << 16) | (src_h - 1); + dvsscale = DVS_SCALE_ENABLE | + DVS_SRC_WIDTH(src_w - 1) | + DVS_SRC_HEIGHT(src_h - 1); spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); intel_de_write_fw(dev_priv, DVSSTRIDE(pipe), plane_state->view.color_plane[0].mapping_stride); - intel_de_write_fw(dev_priv, DVSPOS(pipe), (crtc_y << 16) | crtc_x); - intel_de_write_fw(dev_priv, DVSSIZE(pipe), ((crtc_h - 1) << 16) | (crtc_w - 1)); + intel_de_write_fw(dev_priv, DVSPOS(pipe), + DVS_POS_Y(crtc_y) | DVS_POS_X(crtc_x)); + intel_de_write_fw(dev_priv, DVSSIZE(pipe), + DVS_HEIGHT(crtc_h - 1) | DVS_WIDTH(crtc_w - 1)); intel_de_write_fw(dev_priv, DVSSCALE(pipe), dvsscale); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 4bd4cdfb0131..78e4066e955e 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -5949,46 +5949,63 @@ enum { /* Sprite A control */ #define _DVSACNTR 0x72180 -#define DVS_ENABLE (1 << 31) -#define DVS_GAMMA_ENABLE (1 << 30) -#define DVS_YUV_RANGE_CORRECTION_DISABLE (1 << 27) -#define DVS_PIXFORMAT_MASK (3 << 25) -#define DVS_FORMAT_YUV422 (0 << 25) -#define DVS_FORMAT_RGBX101010 (1 << 25) -#define DVS_FORMAT_RGBX888 (2 << 25) -#define DVS_FORMAT_RGBX161616 (3 << 25) -#define DVS_PIPE_CSC_ENABLE (1 << 24) -#define DVS_SOURCE_KEY (1 << 22) -#define DVS_RGB_ORDER_XBGR (1 << 20) -#define DVS_YUV_FORMAT_BT709 (1 << 18) -#define DVS_YUV_ORDER_MASK (3 << 16) -#define DVS_YUV_ORDER_YUYV (0 << 16) -#define DVS_YUV_ORDER_UYVY (1 << 16) -#define DVS_YUV_ORDER_YVYU (2 << 16) -#define DVS_YUV_ORDER_VYUY (3 << 16) -#define DVS_ROTATE_180 (1 << 15) -#define DVS_DEST_KEY (1 << 2) -#define DVS_TRICKLE_FEED_DISABLE (1 << 14) -#define DVS_TILED (1 << 10) +#define DVS_ENABLE REG_BIT(31) +#define DVS_PIPE_GAMMA_ENABLE REG_BIT(30) +#define DVS_YUV_RANGE_CORRECTION_DISABLE REG_BIT(27) +#define DVS_FORMAT_MASK REG_GENMASK(26, 25) +#define DVS_FORMAT_YUV422 REG_FIELD_PREP(DVS_FORMAT_MASK, 0) +#define DVS_FORMAT_RGBX101010 REG_FIELD_PREP(DVS_FORMAT_MASK, 1) +#define DVS_FORMAT_RGBX888 REG_FIELD_PREP(DVS_FORMAT_MASK, 2) +#define DVS_FORMAT_RGBX161616 REG_FIELD_PREP(DVS_FORMAT_MASK, 3) +#define DVS_PIPE_CSC_ENABLE REG_BIT(24) +#define DVS_SOURCE_KEY REG_BIT(22) +#define DVS_RGB_ORDER_XBGR REG_BIT(20) +#define DVS_YUV_FORMAT_BT709 REG_BIT(18) +#define DVS_YUV_ORDER_MASK REG_GENMASK(17, 16) +#define DVS_YUV_ORDER_YUYV REG_FIELD_PREP(DVS_YUV_ORDER_MASK, 0) +#define DVS_YUV_ORDER_UYVY REG_FIELD_PREP(DVS_YUV_ORDER_MASK, 1) +#define DVS_YUV_ORDER_YVYU REG_FIELD_PREP(DVS_YUV_ORDER_MASK, 2) +#define DVS_YUV_ORDER_VYUY REG_FIELD_PREP(DVS_YUV_ORDER_MASK, 3) +#define DVS_ROTATE_180 REG_BIT(15) +#define DVS_TRICKLE_FEED_DISABLE REG_BIT(14) +#define DVS_TILED REG_BIT(10) +#define DVS_DEST_KEY REG_BIT(2) #define _DVSALINOFF 0x72184 #define _DVSASTRIDE 0x72188 #define _DVSAPOS 0x7218c +#define DVS_POS_Y_MASK REG_GENMASK(31, 16) +#define DVS_POS_Y(y) REG_FIELD_PREP(DVS_POS_Y_MASK, (y)) +#define DVS_POS_X_MASK REG_GENMASK(15, 0) +#define DVS_POS_X(x) REG_FIELD_PREP(DVS_POS_X_MASK, (x)) #define _DVSASIZE 0x72190 +#define DVS_HEIGHT_MASK REG_GENMASK(31, 16) +#define DVS_HEIGHT(h) REG_FIELD_PREP(DVS_HEIGHT_MASK, (h)) +#define DVS_WIDTH_MASK REG_GENMASK(15, 0) +#define DVS_WIDTH(w) REG_FIELD_PREP(DVS_WIDTH_MASK, (w)) #define _DVSAKEYVAL 0x72194 #define _DVSAKEYMSK 0x72198 #define _DVSASURF 0x7219c +#define DVS_ADDR_MASK REG_GENMASK(31, 12) #define _DVSAKEYMAXVAL 0x721a0 #define _DVSATILEOFF 0x721a4 +#define DVS_OFFSET_Y_MASK REG_GENMASK(31, 16) +#define DVS_OFFSET_Y(y) REG_FIELD_PREP(DVS_OFFSET_Y_MASK, (y)) +#define DVS_OFFSET_X_MASK REG_GENMASK(15, 0) +#define DVS_OFFSET_X(x) REG_FIELD_PREP(DVS_OFFSET_X_MASK, (x)) #define _DVSASURFLIVE 0x721ac #define _DVSAGAMC_G4X 0x721e0 /* g4x */ #define _DVSASCALE 0x72204 -#define DVS_SCALE_ENABLE (1 << 31) -#define DVS_FILTER_MASK (3 << 29) -#define DVS_FILTER_MEDIUM (0 << 29) -#define DVS_FILTER_ENHANCING (1 << 29) -#define DVS_FILTER_SOFTENING (2 << 29) -#define DVS_VERTICAL_OFFSET_HALF (1 << 28) /* must be enabled below */ -#define DVS_VERTICAL_OFFSET_ENABLE (1 << 27) +#define DVS_SCALE_ENABLE REG_BIT(31) +#define DVS_FILTER_MASK REG_GENMASK(30, 29) +#define DVS_FILTER_MEDIUM REG_FIELD_PREP(DVS_FILTER_MASK, 0) +#define DVS_FILTER_ENHANCING REG_FIELD_PREP(DVS_FILTER_MASK, 1) +#define DVS_FILTER_SOFTENING REG_FIELD_PREP(DVS_FILTER_MASK, 2) +#define DVS_VERTICAL_OFFSET_HALF REG_BIT(28) /* must be enabled below */ +#define DVS_VERTICAL_OFFSET_ENABLE REG_BIT(27) +#define DVS_SRC_WIDTH_MASK REG_GENMASK(26, 16) +#define DVS_SRC_WIDTH(w) REG_FIELD_PREP(DVS_SRC_WIDTH_MASK, (w)) +#define DVS_SRC_HEIGHT_MASK REG_GENMASK(10, 0) +#define DVS_SRC_HEIGHT(h) REG_FIELD_PREP(DVS_SRC_HEIGHT_MASK, (h)) #define _DVSAGAMC_ILK 0x72300 /* ilk/snb */ #define _DVSAGAMCMAX_ILK 0x72340 /* ilk/snb */ -- cgit From 348abd4cf353abb3aca8dc6ebb80ee84acc4f64e Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 1 Dec 2021 17:25:49 +0200 Subject: drm/i915: Clean up cursor registers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use REG_BIT() & co. to polish the cursor plane registers. v2: deal with gvt Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20211201152552.7821-12-ville.syrjala@linux.intel.com Reviewed-by: José Roberto de Souza --- drivers/gpu/drm/i915/display/intel_cursor.c | 25 +++++----- drivers/gpu/drm/i915/display/intel_display.c | 4 +- drivers/gpu/drm/i915/gvt/display.c | 4 +- drivers/gpu/drm/i915/gvt/fb_decoder.c | 2 +- drivers/gpu/drm/i915/i915_reg.h | 71 +++++++++++++++------------- 5 files changed, 56 insertions(+), 50 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c index 16d34685d83f..2ade8fdd9bdd 100644 --- a/drivers/gpu/drm/i915/display/intel_cursor.c +++ b/drivers/gpu/drm/i915/display/intel_cursor.c @@ -51,16 +51,16 @@ static u32 intel_cursor_position(const struct intel_plane_state *plane_state) u32 pos = 0; if (x < 0) { - pos |= CURSOR_POS_SIGN << CURSOR_X_SHIFT; + pos |= CURSOR_POS_X_SIGN; x = -x; } - pos |= x << CURSOR_X_SHIFT; + pos |= CURSOR_POS_X(x); if (y < 0) { - pos |= CURSOR_POS_SIGN << CURSOR_Y_SHIFT; + pos |= CURSOR_POS_Y_SIGN; y = -y; } - pos |= y << CURSOR_Y_SHIFT; + pos |= CURSOR_POS_Y(y); return pos; } @@ -180,7 +180,7 @@ static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state) u32 cntl = 0; if (crtc_state->gamma_enable) - cntl |= CURSOR_GAMMA_ENABLE; + cntl |= CURSOR_PIPE_GAMMA_ENABLE; return cntl; } @@ -264,7 +264,7 @@ static void i845_cursor_update_arm(struct intel_plane *plane, cntl = plane_state->ctl | i845_cursor_ctl_crtc(crtc_state); - size = (height << 12) | width; + size = CURSOR_HEIGHT(height) | CURSOR_WIDTH(width); base = intel_cursor_base(plane_state); pos = intel_cursor_position(plane_state); @@ -280,7 +280,7 @@ static void i845_cursor_update_arm(struct intel_plane *plane, plane->cursor.cntl != cntl) { intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), 0); intel_de_write_fw(dev_priv, CURBASE(PIPE_A), base); - intel_de_write_fw(dev_priv, CURSIZE, size); + intel_de_write_fw(dev_priv, CURSIZE(PIPE_A), size); intel_de_write_fw(dev_priv, CURPOS(PIPE_A), pos); intel_de_write_fw(dev_priv, CURCNTR(PIPE_A), cntl); @@ -340,13 +340,13 @@ static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state) return cntl; if (crtc_state->gamma_enable) - cntl = MCURSOR_GAMMA_ENABLE; + cntl = MCURSOR_PIPE_GAMMA_ENABLE; if (crtc_state->csc_enable) cntl |= MCURSOR_PIPE_CSC_ENABLE; if (DISPLAY_VER(dev_priv) < 5 && !IS_G4X(dev_priv)) - cntl |= MCURSOR_PIPE_SELECT(crtc->pipe); + cntl |= MCURSOR_PIPE_SEL(crtc->pipe); return cntl; } @@ -502,7 +502,7 @@ static void i9xx_cursor_update_arm(struct intel_plane *plane, i9xx_cursor_ctl_crtc(crtc_state); if (width != height) - fbc_ctl = CUR_FBC_CTL_EN | (height - 1); + fbc_ctl = CUR_FBC_EN | CUR_FBC_HEIGHT(height - 1); base = intel_cursor_base(plane_state); pos = intel_cursor_position(plane_state); @@ -586,13 +586,12 @@ static bool i9xx_cursor_get_hw_state(struct intel_plane *plane, val = intel_de_read(dev_priv, CURCNTR(plane->pipe)); - ret = val & MCURSOR_MODE; + ret = val & MCURSOR_MODE_MASK; if (DISPLAY_VER(dev_priv) >= 5 || IS_G4X(dev_priv)) *pipe = plane->pipe; else - *pipe = (val & MCURSOR_PIPE_SELECT_MASK) >> - MCURSOR_PIPE_SELECT_SHIFT; + *pipe = REG_FIELD_GET(MCURSOR_PIPE_SEL_MASK, val); intel_display_power_put(dev_priv, power_domain, wakeref); diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index ebad83d9a2aa..2ecf6d9ab84b 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -10004,9 +10004,9 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) intel_de_read(dev_priv, DSPCNTR(PLANE_C)) & DISPLAY_PLANE_ENABLE); drm_WARN_ON(&dev_priv->drm, - intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE); + intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE_MASK); drm_WARN_ON(&dev_priv->drm, - intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE); + intel_de_read(dev_priv, CURCNTR(PIPE_B)) & MCURSOR_MODE_MASK); intel_de_write(dev_priv, PIPECONF(pipe), 0); intel_de_posting_read(dev_priv, PIPECONF(pipe)); diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index 034c060f89d4..8ce5d2b2e330 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -187,7 +187,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) ~(PIPECONF_ENABLE | I965_PIPECONF_ACTIVE); vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISPLAY_PLANE_ENABLE; vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE; - vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~MCURSOR_MODE; + vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~MCURSOR_MODE_MASK; vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= MCURSOR_MODE_DISABLE; } @@ -498,7 +498,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) for_each_pipe(dev_priv, pipe) { vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISPLAY_PLANE_ENABLE; vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE; - vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~MCURSOR_MODE; + vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~MCURSOR_MODE_MASK; vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= MCURSOR_MODE_DISABLE; } diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c index 83fd18d0b1a1..40ace46bad46 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.c +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c @@ -342,7 +342,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu, return -ENODEV; val = vgpu_vreg_t(vgpu, CURCNTR(pipe)); - mode = val & MCURSOR_MODE; + mode = val & MCURSOR_MODE_MASK; plane->enabled = (mode != MCURSOR_MODE_DISABLE); if (!plane->enabled) return -ENODEV; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 78e4066e955e..5e57652b7807 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -5756,44 +5756,50 @@ enum { /* Cursor A & B regs */ #define _CURACNTR 0x70080 /* Old style CUR*CNTR flags (desktop 8xx) */ -#define CURSOR_ENABLE 0x80000000 -#define CURSOR_GAMMA_ENABLE 0x40000000 -#define CURSOR_STRIDE_SHIFT 28 -#define CURSOR_STRIDE(x) ((ffs(x) - 9) << CURSOR_STRIDE_SHIFT) /* 256,512,1k,2k */ -#define CURSOR_FORMAT_SHIFT 24 -#define CURSOR_FORMAT_MASK (0x07 << CURSOR_FORMAT_SHIFT) -#define CURSOR_FORMAT_2C (0x00 << CURSOR_FORMAT_SHIFT) -#define CURSOR_FORMAT_3C (0x01 << CURSOR_FORMAT_SHIFT) -#define CURSOR_FORMAT_4C (0x02 << CURSOR_FORMAT_SHIFT) -#define CURSOR_FORMAT_ARGB (0x04 << CURSOR_FORMAT_SHIFT) -#define CURSOR_FORMAT_XRGB (0x05 << CURSOR_FORMAT_SHIFT) +#define CURSOR_ENABLE REG_BIT(31) +#define CURSOR_PIPE_GAMMA_ENABLE REG_BIT(30) +#define CURSOR_STRIDE_MASK REG_GENMASK(29, 28) +#define CURSOR_STRIDE(stride) REG_FIELD_PREP(CURSOR_STRIDE_MASK, ffs(stride) - 9) /* 256,512,1k,2k */ +#define CURSOR_FORMAT_MASK REG_GENMASK(26, 24) +#define CURSOR_FORMAT_2C REG_FIELD_PREP(CURSOR_FORMAT_MASK, 0) +#define CURSOR_FORMAT_3C REG_FIELD_PREP(CURSOR_FORMAT_MASK, 1) +#define CURSOR_FORMAT_4C REG_FIELD_PREP(CURSOR_FORMAT_MASK, 2) +#define CURSOR_FORMAT_ARGB REG_FIELD_PREP(CURSOR_FORMAT_MASK, 4) +#define CURSOR_FORMAT_XRGB REG_FIELD_PREP(CURSOR_FORMAT_MASK, 5) /* New style CUR*CNTR flags */ -#define MCURSOR_MODE 0x27 -#define MCURSOR_MODE_DISABLE 0x00 -#define MCURSOR_MODE_128_32B_AX 0x02 -#define MCURSOR_MODE_256_32B_AX 0x03 -#define MCURSOR_MODE_64_32B_AX 0x07 -#define MCURSOR_MODE_128_ARGB_AX ((1 << 5) | MCURSOR_MODE_128_32B_AX) -#define MCURSOR_MODE_256_ARGB_AX ((1 << 5) | MCURSOR_MODE_256_32B_AX) -#define MCURSOR_MODE_64_ARGB_AX ((1 << 5) | MCURSOR_MODE_64_32B_AX) #define MCURSOR_ARB_SLOTS_MASK REG_GENMASK(30, 28) /* icl+ */ #define MCURSOR_ARB_SLOTS(x) REG_FIELD_PREP(MCURSOR_ARB_SLOTS_MASK, (x)) /* icl+ */ -#define MCURSOR_PIPE_SELECT_MASK (0x3 << 28) -#define MCURSOR_PIPE_SELECT_SHIFT 28 -#define MCURSOR_PIPE_SELECT(pipe) ((pipe) << 28) -#define MCURSOR_GAMMA_ENABLE (1 << 26) -#define MCURSOR_PIPE_CSC_ENABLE (1 << 24) /* ilk+ */ -#define MCURSOR_ROTATE_180 (1 << 15) -#define MCURSOR_TRICKLE_FEED_DISABLE (1 << 14) +#define MCURSOR_PIPE_SEL_MASK REG_GENMASK(29, 28) +#define MCURSOR_PIPE_SEL(pipe) REG_FIELD_PREP(MCURSOR_PIPE_SEL_MASK, (pipe)) +#define MCURSOR_PIPE_GAMMA_ENABLE REG_BIT(26) +#define MCURSOR_PIPE_CSC_ENABLE REG_BIT(24) /* ilk+ */ +#define MCURSOR_ROTATE_180 REG_BIT(15) +#define MCURSOR_TRICKLE_FEED_DISABLE REG_BIT(14) +#define MCURSOR_MODE_MASK 0x27 +#define MCURSOR_MODE_DISABLE 0x00 +#define MCURSOR_MODE_128_32B_AX 0x02 +#define MCURSOR_MODE_256_32B_AX 0x03 +#define MCURSOR_MODE_64_32B_AX 0x07 +#define MCURSOR_MODE_128_ARGB_AX (0x20 | MCURSOR_MODE_128_32B_AX) +#define MCURSOR_MODE_256_ARGB_AX (0x20 | MCURSOR_MODE_256_32B_AX) +#define MCURSOR_MODE_64_ARGB_AX (0x20 | MCURSOR_MODE_64_32B_AX) #define _CURABASE 0x70084 #define _CURAPOS 0x70088 -#define CURSOR_POS_MASK 0x007FF -#define CURSOR_POS_SIGN 0x8000 -#define CURSOR_X_SHIFT 0 -#define CURSOR_Y_SHIFT 16 -#define CURSIZE _MMIO(0x700a0) /* 845/865 */ +#define CURSOR_POS_Y_SIGN REG_BIT(31) +#define CURSOR_POS_Y_MASK REG_GENMASK(30, 16) +#define CURSOR_POS_Y(y) REG_FIELD_PREP(CURSOR_POS_Y_MASK, (y)) +#define CURSOR_POS_X_SIGN REG_BIT(15) +#define CURSOR_POS_X_MASK REG_GENMASK(14, 0) +#define CURSOR_POS_X(x) REG_FIELD_PREP(CURSOR_POS_X_MASK, (x)) +#define _CURASIZE 0x700a0 /* 845/865 */ +#define CURSOR_HEIGHT_MASK REG_GENMASK(21, 12) +#define CURSOR_HEIGHT(h) REG_FIELD_PREP(CURSOR_HEIGHT_MASK, (h)) +#define CURSOR_WIDTH_MASK REG_GENMASK(9, 0) +#define CURSOR_WIDTH(w) REG_FIELD_PREP(CURSOR_WIDTH_MASK, (w)) #define _CUR_FBC_CTL_A 0x700a0 /* ivb+ */ -#define CUR_FBC_CTL_EN (1 << 31) +#define CUR_FBC_EN REG_BIT(31) +#define CUR_FBC_HEIGHT_MASK REG_GENMASK(7, 0) +#define CUR_FBC_HEIGHT(h) REG_FIELD_PREP(CUR_FBC_HEIGHT_MASK, (h)) #define _CURASURFLIVE 0x700ac /* g4x+ */ #define _CURBCNTR 0x700c0 #define _CURBBASE 0x700c4 @@ -5806,6 +5812,7 @@ enum { #define CURCNTR(pipe) _CURSOR2(pipe, _CURACNTR) #define CURBASE(pipe) _CURSOR2(pipe, _CURABASE) #define CURPOS(pipe) _CURSOR2(pipe, _CURAPOS) +#define CURSIZE(pipe) _CURSOR2(pipe, _CURASIZE) #define CUR_FBC_CTL(pipe) _CURSOR2(pipe, _CUR_FBC_CTL_A) #define CURSURFLIVE(pipe) _CURSOR2(pipe, _CURASURFLIVE) -- cgit From 366714b0883f0411a4b142b1f7cefc6b184183eb Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 1 Dec 2021 17:25:50 +0200 Subject: drm/i915: Extract skl_plane_aux_dist() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Extract the PLANE_AUX_DIST stuff into a small helper to dclutter skl_program_plane_arm() a bit. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20211201152552.7821-13-ville.syrjala@linux.intel.com Reviewed-by: José Roberto de Souza --- drivers/gpu/drm/i915/display/skl_universal_plane.c | 35 ++++++++++++++-------- 1 file changed, 23 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c index cc9d1c6b6c2e..9ec686836908 100644 --- a/drivers/gpu/drm/i915/display/skl_universal_plane.c +++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c @@ -992,6 +992,26 @@ static u32 skl_plane_surf(const struct intel_plane_state *plane_state, return plane_surf; } +static u32 skl_plane_aux_dist(const struct intel_plane_state *plane_state, + int color_plane) +{ + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); + const struct drm_framebuffer *fb = plane_state->hw.fb; + int aux_plane = skl_main_to_aux_plane(fb, color_plane); + u32 aux_dist; + + if (!aux_plane) + return 0; + + aux_dist = skl_surf_address(plane_state, aux_plane) - + skl_surf_address(plane_state, color_plane); + + if (DISPLAY_VER(i915) < 12) + aux_dist |= PLANE_AUX_STRIDE(skl_plane_stride(plane_state, aux_plane)); + + return aux_dist; +} + static void icl_plane_csc_load_black(struct intel_plane *plane) { struct drm_i915_private *i915 = to_i915(plane->base.dev); @@ -1086,11 +1106,9 @@ skl_program_plane_arm(struct intel_plane *plane, enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; - const struct drm_framebuffer *fb = plane_state->hw.fb; - int aux_plane = skl_main_to_aux_plane(fb, color_plane); u32 x = plane_state->view.color_plane[color_plane].x; u32 y = plane_state->view.color_plane[color_plane].y; - u32 keymsk, keymax, aux_dist = 0, plane_color_ctl = 0; + u32 keymsk, keymax, plane_color_ctl = 0; u8 alpha = plane_state->hw.alpha >> 8; u32 plane_ctl = plane_state->ctl; unsigned long irqflags; @@ -1107,14 +1125,6 @@ skl_program_plane_arm(struct intel_plane *plane, if (alpha < 0xff) keymsk |= PLANE_KEYMSK_ALPHA_ENABLE; - if (aux_plane) { - aux_dist = skl_surf_address(plane_state, aux_plane) - - skl_surf_address(plane_state, color_plane); - - if (DISPLAY_VER(dev_priv) < 12) - aux_dist |= PLANE_AUX_STRIDE(skl_plane_stride(plane_state, aux_plane)); - } - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); intel_de_write_fw(dev_priv, PLANE_KEYVAL(pipe, plane_id), @@ -1125,7 +1135,8 @@ skl_program_plane_arm(struct intel_plane *plane, intel_de_write_fw(dev_priv, PLANE_OFFSET(pipe, plane_id), PLANE_OFFSET_Y(y) | PLANE_OFFSET_X(x)); - intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id), aux_dist); + intel_de_write_fw(dev_priv, PLANE_AUX_DIST(pipe, plane_id), + skl_plane_aux_dist(plane_state, color_plane)); if (DISPLAY_VER(dev_priv) < 11) intel_de_write_fw(dev_priv, PLANE_AUX_OFFSET(pipe, plane_id), -- cgit From 4682a6d99638bb8ae62f00b9466849065c91fd1f Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 1 Dec 2021 17:25:51 +0200 Subject: drm/i915: Declutter color key register stuff MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add a few small helpers to calculate the color key register values. Cleans up skl_program_plane_arm() a bit. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20211201152552.7821-14-ville.syrjala@linux.intel.com Reviewed-by: José Roberto de Souza --- drivers/gpu/drm/i915/display/skl_universal_plane.c | 45 +++++++++++++++------- 1 file changed, 32 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c index 9ec686836908..936f8e00601d 100644 --- a/drivers/gpu/drm/i915/display/skl_universal_plane.c +++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c @@ -1012,6 +1012,34 @@ static u32 skl_plane_aux_dist(const struct intel_plane_state *plane_state, return aux_dist; } +static u32 skl_plane_keyval(const struct intel_plane_state *plane_state) +{ + const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; + + return key->min_value; +} + +static u32 skl_plane_keymax(const struct intel_plane_state *plane_state) +{ + const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; + u8 alpha = plane_state->hw.alpha >> 8; + + return (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha); +} + +static u32 skl_plane_keymsk(const struct intel_plane_state *plane_state) +{ + const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; + u8 alpha = plane_state->hw.alpha >> 8; + u32 keymsk; + + keymsk = key->channel_mask & 0x7ffffff; + if (alpha < 0xff) + keymsk |= PLANE_KEYMSK_ALPHA_ENABLE; + + return keymsk; +} + static void icl_plane_csc_load_black(struct intel_plane *plane) { struct drm_i915_private *i915 = to_i915(plane->base.dev); @@ -1105,11 +1133,9 @@ skl_program_plane_arm(struct intel_plane *plane, struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; - const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; u32 x = plane_state->view.color_plane[color_plane].x; u32 y = plane_state->view.color_plane[color_plane].y; - u32 keymsk, keymax, plane_color_ctl = 0; - u8 alpha = plane_state->hw.alpha >> 8; + u32 plane_color_ctl = 0; u32 plane_ctl = plane_state->ctl; unsigned long irqflags; @@ -1119,18 +1145,11 @@ skl_program_plane_arm(struct intel_plane *plane, plane_color_ctl = plane_state->color_ctl | glk_plane_color_ctl_crtc(crtc_state); - keymax = (key->max_value & 0xffffff) | PLANE_KEYMAX_ALPHA(alpha); - - keymsk = key->channel_mask & 0x7ffffff; - if (alpha < 0xff) - keymsk |= PLANE_KEYMSK_ALPHA_ENABLE; - spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); - intel_de_write_fw(dev_priv, PLANE_KEYVAL(pipe, plane_id), - key->min_value); - intel_de_write_fw(dev_priv, PLANE_KEYMSK(pipe, plane_id), keymsk); - intel_de_write_fw(dev_priv, PLANE_KEYMAX(pipe, plane_id), keymax); + intel_de_write_fw(dev_priv, PLANE_KEYVAL(pipe, plane_id), skl_plane_keyval(plane_state)); + intel_de_write_fw(dev_priv, PLANE_KEYMSK(pipe, plane_id), skl_plane_keymsk(plane_state)); + intel_de_write_fw(dev_priv, PLANE_KEYMAX(pipe, plane_id), skl_plane_keymax(plane_state)); intel_de_write_fw(dev_priv, PLANE_OFFSET(pipe, plane_id), PLANE_OFFSET_Y(y) | PLANE_OFFSET_X(x)); -- cgit From fee076019d0a3634aeea8df55c1f7ae35ca31d18 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 1 Dec 2021 17:25:52 +0200 Subject: drm/i915: Nuke pointless middle men for skl+ plane programming MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There is no real point in having this two stage skl_program_plane*() vs. skl_plane_update*() wrapper stuff. All we need to do is determine the correct color plane and we're done. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20211201152552.7821-15-ville.syrjala@linux.intel.com Reviewed-by: José Roberto de Souza --- drivers/gpu/drm/i915/display/skl_universal_plane.c | 53 +++++++--------------- 1 file changed, 17 insertions(+), 36 deletions(-) diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c index 936f8e00601d..ed6a9bbcf218 100644 --- a/drivers/gpu/drm/i915/display/skl_universal_plane.c +++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c @@ -1064,15 +1064,24 @@ static void icl_plane_csc_load_black(struct intel_plane *plane) intel_de_write_fw(i915, PLANE_CSC_POSTOFF(pipe, plane_id, 2), 0); } +static int skl_plane_color_plane(const struct intel_plane_state *plane_state) +{ + /* Program the UV plane on planar master */ + if (plane_state->planar_linked_plane && !plane_state->planar_slave) + return 1; + else + return 0; +} + static void -skl_program_plane_noarm(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state, - int color_plane) +skl_plane_update_noarm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; + int color_plane = skl_plane_color_plane(plane_state); u32 stride = skl_plane_stride(plane_state, color_plane); const struct drm_framebuffer *fb = plane_state->hw.fb; int crtc_x = plane_state->uapi.dst.x1; @@ -1125,14 +1134,14 @@ skl_program_plane_noarm(struct intel_plane *plane, } static void -skl_program_plane_arm(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state, - int color_plane) +skl_plane_update_arm(struct intel_plane *plane, + const struct intel_crtc_state *crtc_state, + const struct intel_plane_state *plane_state) { struct drm_i915_private *dev_priv = to_i915(plane->base.dev); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; + int color_plane = skl_plane_color_plane(plane_state); u32 x = plane_state->view.color_plane[color_plane].x; u32 y = plane_state->view.color_plane[color_plane].y; u32 plane_color_ctl = 0; @@ -1213,34 +1222,6 @@ skl_plane_async_flip(struct intel_plane *plane, spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } -static void -skl_plane_update_noarm(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) -{ - int color_plane = 0; - - if (plane_state->planar_linked_plane && !plane_state->planar_slave) - /* Program the UV plane on planar master */ - color_plane = 1; - - skl_program_plane_noarm(plane, crtc_state, plane_state, color_plane); -} - -static void -skl_plane_update_arm(struct intel_plane *plane, - const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) -{ - int color_plane = 0; - - if (plane_state->planar_linked_plane && !plane_state->planar_slave) - /* Program the UV plane on planar master */ - color_plane = 1; - - skl_program_plane_arm(plane, crtc_state, plane_state, color_plane); -} - static bool intel_format_is_p01x(u32 format) { switch (format) { -- cgit From 6e7f90d163afa8fc2efd6ae318e7c20156a5621f Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Tue, 18 Jan 2022 17:00:16 -0500 Subject: lockd: fix server crash on reboot of client holding lock I thought I was iterating over the array when actually the iteration is over the values contained in the array? Ugh, keep it simple. Symptoms were a null deference in vfs_lock_file() when an NFSv3 client that previously held a lock came back up and sent a notify. Reported-by: Jonathan Woithe Fixes: 7f024fcd5c97 ("Keep read and write fds with each nlm_file") Signed-off-by: J. Bruce Fields Signed-off-by: Chuck Lever --- fs/lockd/svcsubs.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c index cb3a7512c33e..54c2e42130ca 100644 --- a/fs/lockd/svcsubs.c +++ b/fs/lockd/svcsubs.c @@ -179,19 +179,20 @@ nlm_delete_file(struct nlm_file *file) static int nlm_unlock_files(struct nlm_file *file) { struct file_lock lock; - struct file *f; lock.fl_type = F_UNLCK; lock.fl_start = 0; lock.fl_end = OFFSET_MAX; - for (f = file->f_file[0]; f <= file->f_file[1]; f++) { - if (f && vfs_lock_file(f, F_SETLK, &lock, NULL) < 0) { - pr_warn("lockd: unlock failure in %s:%d\n", - __FILE__, __LINE__); - return 1; - } - } + if (file->f_file[O_RDONLY] && + vfs_lock_file(file->f_file[O_RDONLY], F_SETLK, &lock, NULL)) + goto out_err; + if (file->f_file[O_WRONLY] && + vfs_lock_file(file->f_file[O_WRONLY], F_SETLK, &lock, NULL)) + goto out_err; return 0; +out_err: + pr_warn("lockd: unlock failure in %s:%d\n", __FILE__, __LINE__); + return 1; } /* -- cgit From 83f2726cd9c3b92589f850cd4935ebbc35eee840 Mon Sep 17 00:00:00 2001 From: mziya Date: Tue, 11 Jan 2022 15:08:35 +0800 Subject: drm/amd/pm: Update smu driver interface for sienna cichlid update smu driver if version to 0x40 V2: Interface version append with sienna_cichlid V3: Aligned with latest driver interface. Reviewed-by: Lijo Lazar Signed-off-by: mziya Signed-off-by: Alex Deucher --- .../inc/pmfw_if/smu11_driver_if_sienna_cichlid.h | 24 +++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_sienna_cichlid.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_sienna_cichlid.h index 63b8701fd466..b253be602cc2 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_sienna_cichlid.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_sienna_cichlid.h @@ -27,7 +27,7 @@ // *** IMPORTANT *** // SMU TEAM: Always increment the interface version if // any structure is changed in this file -#define SMU11_DRIVER_IF_VERSION 0x3B +#define SMU11_DRIVER_IF_VERSION 0x40 #define PPTABLE_Sienna_Cichlid_SMU_VERSION 7 @@ -172,6 +172,7 @@ typedef enum { #define DPM_OVERRIDE_DISABLE_FAST_FCLK_TIMER 0x00001000 #define DPM_OVERRIDE_DISABLE_VCN_PG 0x00002000 #define DPM_OVERRIDE_DISABLE_FMAX_VMAX 0x00004000 +#define DPM_OVERRIDE_ENABLE_eGPU_USB_WA 0x00008000 // VR Mapping Bit Defines #define VR_MAPPING_VR_SELECT_MASK 0x01 @@ -263,7 +264,22 @@ typedef enum { #define LED_DISPLAY_ERROR_BIT 2 //RLC Pace Table total number of levels -#define RLC_PACE_TABLE_NUM_LEVELS 16 +#define RLC_PACE_TABLE_NUM_LEVELS 16 +#define SIENNA_CICHLID_UMC_CHANNEL_NUM 16 + +typedef struct { + uint64_t mca_umc_status; + uint64_t mca_umc_addr; + + uint16_t ce_count_lo_chip; + uint16_t ce_count_hi_chip; + + uint32_t eccPadding; +} EccInfo_t; + +typedef struct { + EccInfo_t EccInfo[SIENNA_CICHLID_UMC_CHANNEL_NUM]; +} EccInfoTable_t; typedef enum { DRAM_BIT_WIDTH_DISABLED = 0, @@ -283,6 +299,7 @@ typedef enum { #define MAX_SW_I2C_COMMANDS 24 + typedef enum { I2C_CONTROLLER_PORT_0 = 0, //CKSVII2C0 I2C_CONTROLLER_PORT_1 = 1, //CKSVII2C1 @@ -1672,7 +1689,8 @@ typedef struct { #define TABLE_OVERDRIVE 8 #define TABLE_I2C_COMMANDS 9 #define TABLE_PACE 10 -#define TABLE_COUNT 11 +#define TABLE_ECCINFO 11 +#define TABLE_COUNT 12 typedef struct { float FlopsPerByteTable[RLC_PACE_TABLE_NUM_LEVELS]; -- cgit From c34242eea16f7d973501267142dd340cad3caeec Mon Sep 17 00:00:00 2001 From: mziya Date: Sat, 15 Jan 2022 15:21:54 +0800 Subject: drm/amdgpu: add new query interface for umc_v8_7 block add smu message query error information interface, function name align with IP version number V2: Removed unused err cnt entry Signed-off-by: mziya Acked-by: Evan Quan Reviewed-by: Stanley.Yang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/umc_v8_7.c | 160 ++++++++++++++++++++++++++++++++-- 1 file changed, 155 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c index ff9e1fac616d..291b37f6db4e 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c @@ -40,13 +40,161 @@ const uint32_t {9, 0}, {15, 6} }; -static inline uint32_t get_umc_8_reg_offset(struct amdgpu_device *adev, +static inline uint32_t get_umc_v8_7_reg_offset(struct amdgpu_device *adev, uint32_t umc_inst, uint32_t ch_inst) { return adev->umc.channel_offs*ch_inst + UMC_8_INST_DIST*umc_inst; } +static inline uint32_t get_umc_v8_7_channel_index(struct amdgpu_device *adev, + uint32_t umc_inst, + uint32_t ch_inst) +{ + return adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst]; +} + +static void umc_v8_7_ecc_info_query_correctable_error_count(struct amdgpu_device *adev, + uint32_t channel_index, + unsigned long *error_count) +{ + uint64_t mc_umc_status; + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + /* check for SRAM correctable error + * MCUMC_STATUS is a 64 bit register + */ + mc_umc_status = ras->umc_ecc.ecc[channel_index].mca_umc_status; + if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1) + *error_count += 1; +} + +static void umc_v8_7_ecc_info_querry_uncorrectable_error_count(struct amdgpu_device *adev, + uint32_t channel_index, + unsigned long *error_count) +{ + uint64_t mc_umc_status; + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + + /* check the MCUMC_STATUS */ + mc_umc_status = ras->umc_ecc.ecc[channel_index].mca_umc_status; + if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && + (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, PCC) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UC) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, TCC) == 1)) + *error_count += 1; +} + +static void umc_v8_7_ecc_info_query_ras_error_count(struct amdgpu_device *adev, + void *ras_error_status) +{ + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + + uint32_t umc_inst = 0; + uint32_t ch_inst = 0; + uint32_t umc_reg_offset = 0; + uint32_t channel_index = 0; + + /* TODO: driver needs to toggle DF Cstate to ensure + * safe access of UMC registers. Will add the protection + */ + LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { + umc_reg_offset = get_umc_v8_7_reg_offset(adev, + umc_inst, + ch_inst); + channel_index = get_umc_v8_7_channel_index(adev, + umc_inst, + ch_inst); + umc_v8_7_ecc_info_query_correctable_error_count(adev, + channel_index, + &(err_data->ce_count)); + umc_v8_7_ecc_info_querry_uncorrectable_error_count(adev, + channel_index, + &(err_data->ue_count)); + } +} + +static void umc_v8_7_ecc_info_query_error_address(struct amdgpu_device *adev, + struct ras_err_data *err_data, + uint32_t umc_reg_offset, + uint32_t ch_inst, + uint32_t umc_inst) +{ + uint64_t mc_umc_status, err_addr, retired_page; + struct eeprom_table_record *err_rec; + uint32_t channel_index; + struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + + channel_index = + adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst]; + + mc_umc_status = ras->umc_ecc.ecc[channel_index].mca_umc_status; + + if (mc_umc_status == 0) + return; + + if (!err_data->err_addr) + return; + + err_rec = &err_data->err_addr[err_data->err_addr_cnt]; + + /* calculate error address if ue/ce error is detected */ + if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && + (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || + REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) { + + err_addr = ras->umc_ecc.ecc[channel_index].mca_umc_addr; + err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr); + + /* translate umc channel address to soc pa, 3 parts are included */ + retired_page = ADDR_OF_4KB_BLOCK(err_addr) | + ADDR_OF_256B_BLOCK(channel_index) | + OFFSET_IN_256B_BLOCK(err_addr); + + /* we only save ue error information currently, ce is skipped */ + if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) + == 1) { + err_rec->address = err_addr; + /* page frame address is saved */ + err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT; + err_rec->ts = (uint64_t)ktime_get_real_seconds(); + err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE; + err_rec->cu = 0; + err_rec->mem_channel = channel_index; + err_rec->mcumc_id = umc_inst; + + err_data->err_addr_cnt++; + } + } +} + +static void umc_v8_7_ecc_info_query_ras_error_address(struct amdgpu_device *adev, + void *ras_error_status) +{ + struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; + + uint32_t umc_inst = 0; + uint32_t ch_inst = 0; + uint32_t umc_reg_offset = 0; + + /* TODO: driver needs to toggle DF Cstate to ensure + * safe access of UMC resgisters. Will add the protection + * when firmware interface is ready + */ + LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { + umc_reg_offset = get_umc_v8_7_reg_offset(adev, + umc_inst, + ch_inst); + umc_v8_7_ecc_info_query_error_address(adev, + err_data, + umc_reg_offset, + ch_inst, + umc_inst); + } +} + static void umc_v8_7_clear_error_count_per_channel(struct amdgpu_device *adev, uint32_t umc_reg_offset) { @@ -92,7 +240,7 @@ static void umc_v8_7_clear_error_count(struct amdgpu_device *adev) uint32_t umc_reg_offset = 0; LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { - umc_reg_offset = get_umc_8_reg_offset(adev, + umc_reg_offset = get_umc_v8_7_reg_offset(adev, umc_inst, ch_inst); @@ -178,7 +326,7 @@ static void umc_v8_7_query_ras_error_count(struct amdgpu_device *adev, uint32_t umc_reg_offset = 0; LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { - umc_reg_offset = get_umc_8_reg_offset(adev, + umc_reg_offset = get_umc_v8_7_reg_offset(adev, umc_inst, ch_inst); @@ -268,7 +416,7 @@ static void umc_v8_7_query_ras_error_address(struct amdgpu_device *adev, uint32_t umc_reg_offset = 0; LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { - umc_reg_offset = get_umc_8_reg_offset(adev, + umc_reg_offset = get_umc_v8_7_reg_offset(adev, umc_inst, ch_inst); @@ -316,7 +464,7 @@ static void umc_v8_7_err_cnt_init(struct amdgpu_device *adev) uint32_t umc_reg_offset = 0; LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { - umc_reg_offset = get_umc_8_reg_offset(adev, + umc_reg_offset = get_umc_v8_7_reg_offset(adev, umc_inst, ch_inst); @@ -334,4 +482,6 @@ struct amdgpu_umc_ras umc_v8_7_ras = { .hw_ops = &umc_v8_7_ras_hw_ops, }, .err_cnt_init = umc_v8_7_err_cnt_init, + .ecc_info_query_ras_error_count = umc_v8_7_ecc_info_query_ras_error_count, + .ecc_info_query_ras_error_address = umc_v8_7_ecc_info_query_ras_error_address, }; -- cgit From 3ddd0c90bafd9f2ae1ac9f1e67581537273dfed6 Mon Sep 17 00:00:00 2001 From: mziya Date: Tue, 11 Jan 2022 15:15:13 +0800 Subject: drm/amd/pm: add message smu to get ecc_table support ECC TABLE message, this table include umc ras error count and error address V2: Return after smu version check fail V3: Return -EOPNOTSUPP, if fail to get smc ver. V4: ECCTABLE typo corrected and sentence rephrased. Signed-off-by: mziya Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- .../drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c | 69 ++++++++++++++++++++++ drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c | 2 + 2 files changed, 71 insertions(+) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 4e37cd8025ed..eb47d490ee9d 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -46,6 +46,7 @@ #include "mp/mp_11_0_sh_mask.h" #include "asic_reg/mp/mp_11_0_sh_mask.h" +#include "amdgpu_ras.h" #include "smu_cmn.h" /* @@ -83,6 +84,12 @@ /* STB FIFO depth is in 64bit units */ #define SIENNA_CICHLID_STB_DEPTH_UNIT_BYTES 8 +/* + * SMU support ECCTABLE since version 58.70.0, + * use this to check whether ECCTABLE feature is supported. + */ +#define SUPPORT_ECCTABLE_SMU_VERSION 0x003a4600 + static int get_table_size(struct smu_context *smu) { if (smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13)) @@ -225,6 +232,7 @@ static struct cmn2asic_mapping sienna_cichlid_table_map[SMU_TABLE_COUNT] = { TAB_MAP(OVERDRIVE), TAB_MAP(I2C_COMMANDS), TAB_MAP(PACE), + TAB_MAP(ECCINFO), }; static struct cmn2asic_mapping sienna_cichlid_pwr_src_map[SMU_POWER_SOURCE_COUNT] = { @@ -466,6 +474,8 @@ static int sienna_cichlid_tables_init(struct smu_context *smu) SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF, sizeof(DpmActivityMonitorCoeffIntExternal_t), PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); + SMU_TABLE_INIT(tables, SMU_TABLE_ECCINFO, sizeof(EccInfoTable_t), + PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM); smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL); if (!smu_table->metrics_table) @@ -481,6 +491,10 @@ static int sienna_cichlid_tables_init(struct smu_context *smu) if (!smu_table->watermarks_table) goto err2_out; + smu_table->ecc_table = kzalloc(tables[SMU_TABLE_ECCINFO].size, GFP_KERNEL); + if (!smu_table->ecc_table) + return -ENOMEM; + return 0; err2_out: @@ -3672,6 +3686,60 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu, return sizeof(struct gpu_metrics_v1_3); } +static int sienna_cichlid_check_ecc_table_support(struct smu_context *smu) +{ + uint32_t if_version = 0xff, smu_version = 0xff; + int ret = 0; + + ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version); + if (ret) + return -EOPNOTSUPP; + + if (smu_version < SUPPORT_ECCTABLE_SMU_VERSION) + ret = -EOPNOTSUPP; + + return ret; +} + +static ssize_t sienna_cichlid_get_ecc_info(struct smu_context *smu, + void *table) +{ + struct smu_table_context *smu_table = &smu->smu_table; + EccInfoTable_t *ecc_table = NULL; + struct ecc_info_per_ch *ecc_info_per_channel = NULL; + int i, ret = 0; + struct umc_ecc_info *eccinfo = (struct umc_ecc_info *)table; + + ret = sienna_cichlid_check_ecc_table_support(smu); + if (ret) + return ret; + + ret = smu_cmn_update_table(smu, + SMU_TABLE_ECCINFO, + 0, + smu_table->ecc_table, + false); + if (ret) { + dev_info(smu->adev->dev, "Failed to export SMU ecc table!\n"); + return ret; + } + + ecc_table = (EccInfoTable_t *)smu_table->ecc_table; + + for (i = 0; i < SIENNA_CICHLID_UMC_CHANNEL_NUM; i++) { + ecc_info_per_channel = &(eccinfo->ecc[i]); + ecc_info_per_channel->ce_count_lo_chip = + ecc_table->EccInfo[i].ce_count_lo_chip; + ecc_info_per_channel->ce_count_hi_chip = + ecc_table->EccInfo[i].ce_count_hi_chip; + ecc_info_per_channel->mca_umc_status = + ecc_table->EccInfo[i].mca_umc_status; + ecc_info_per_channel->mca_umc_addr = + ecc_table->EccInfo[i].mca_umc_addr; + } + + return ret; +} static int sienna_cichlid_enable_mgpu_fan_boost(struct smu_context *smu) { struct smu_table_context *table_context = &smu->smu_table; @@ -3923,6 +3991,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = { .gpo_control = sienna_cichlid_gpo_control, .set_mp1_state = sienna_cichlid_set_mp1_state, .stb_collect_info = sienna_cichlid_stb_get_data_direct, + .get_ecc_info = sienna_cichlid_get_ecc_info, }; void sienna_cichlid_set_ppt_funcs(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index 9acf2c045a97..7029e5deb6b3 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -473,8 +473,10 @@ int smu_v11_0_fini_smc_tables(struct smu_context *smu) kfree(smu_table->hardcode_pptable); smu_table->hardcode_pptable = NULL; + kfree(smu_table->ecc_table); kfree(smu_table->metrics_table); kfree(smu_table->watermarks_table); + smu_table->ecc_table = NULL; smu_table->metrics_table = NULL; smu_table->watermarks_table = NULL; smu_table->metrics_time = 0; -- cgit From 79c0462159a1fa3810ae1869a5fc9fd7782b6b70 Mon Sep 17 00:00:00 2001 From: "Stanley.Yang" Date: Tue, 11 Jan 2022 14:14:50 +0800 Subject: drm/amdgpu: handle denied inject error into critical regions v2 Changed from v1: remove unused brace Signed-off-by: Stanley.Yang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 9 ++++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 2 +- drivers/gpu/drm/amd/amdgpu/ta_ras_if.h | 3 ++- 3 files changed, 11 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index c984b5a34679..5c9b67ab168f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -1335,6 +1335,11 @@ static void psp_ras_ta_check_status(struct psp_context *psp) break; case TA_RAS_STATUS__SUCCESS: break; + case TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED: + if (ras_cmd->cmd_id == TA_RAS_COMMAND__TRIGGER_ERROR) + dev_warn(psp->adev->dev, + "RAS WARNING: Inject error to critical region is not allowed\n"); + break; default: dev_warn(psp->adev->dev, "RAS WARNING: ras status = 0x%X\n", ras_cmd->ras_status); @@ -1547,7 +1552,9 @@ int psp_ras_trigger_error(struct psp_context *psp, if (amdgpu_ras_intr_triggered()) return 0; - if (ras_cmd->ras_status) + if (ras_cmd->ras_status == TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED) + return -EACCES; + else if (ras_cmd->ras_status) return -EINVAL; return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 0bb6b5354802..3538032e40d5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -455,7 +455,7 @@ static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, } if (ret) - return -EINVAL; + return ret; return size; } diff --git a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h index 5093826a43d1..509d8a1945eb 100644 --- a/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h +++ b/drivers/gpu/drm/amd/amdgpu/ta_ras_if.h @@ -64,7 +64,8 @@ enum ta_ras_status { TA_RAS_STATUS__ERROR_PCS_STATE_ERROR = 0xA016, TA_RAS_STATUS__ERROR_PCS_STATE_HANG = 0xA017, TA_RAS_STATUS__ERROR_PCS_STATE_UNKNOWN = 0xA018, - TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ = 0xA019 + TA_RAS_STATUS__ERROR_UNSUPPORTED_ERROR_INJ = 0xA019, + TA_RAS_STATUS__TEE_ERROR_ACCESS_DENIED = 0xA01A }; enum ta_ras_block { -- cgit From e475986f182156496fa2991012ca51956fe90bf7 Mon Sep 17 00:00:00 2001 From: Guchun Chen Date: Fri, 14 Jan 2022 13:28:16 +0800 Subject: drm/amdgpu: drop redundant check of ip discovery_bin Early check in amdgpu_discovery_reg_base_init promises this. Signed-off-by: Guchun Chen Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 5 ----- 1 file changed, 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index be45650250fa..b0bf69611fbd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -1195,11 +1195,6 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) return -EINVAL; amdgpu_discovery_harvest_ip(adev); - - if (!adev->mman.discovery_bin) { - DRM_ERROR("ip discovery uninitialized\n"); - return -EINVAL; - } break; } -- cgit From 03f6fb84bd0e98a7b929aef5f308b8e3f2f24a0d Mon Sep 17 00:00:00 2001 From: Guchun Chen Date: Fri, 14 Jan 2022 13:49:13 +0800 Subject: drm/amdgpu: apply vcn harvest quirk This is a following patch to apply the workaround only on those boards with a bad harvest table in ip discovery. Signed-off-by: Guchun Chen Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 32 ++++++++++++++++++++++----- 1 file changed, 27 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index b0bf69611fbd..e6a26b554254 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -243,6 +243,30 @@ static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary) return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE); } +static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev) +{ + /* + * So far, apply this quirk only on those Navy Flounder boards which + * have a bad harvest table of VCN config. + */ + if ((adev->ip_versions[UVD_HWIP][1] == IP_VERSION(3, 0, 1)) && + (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 2))) { + switch (adev->pdev->revision) { + case 0xC1: + case 0xC2: + case 0xC3: + case 0xC5: + case 0xC7: + case 0xCF: + case 0xDF: + adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1; + break; + default: + break; + } + } +} + static int amdgpu_discovery_init(struct amdgpu_device *adev) { struct table_info *info; @@ -548,11 +572,9 @@ void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev) break; } } - /* some IP discovery tables on Navy Flounder don't have this set correctly */ - if ((adev->ip_versions[UVD_HWIP][1] == IP_VERSION(3, 0, 1)) && - (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 2)) && - (adev->pdev->revision != 0xFF)) - adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1; + + amdgpu_discovery_harvest_config_quirk(adev); + if (vcn_harvest_count == adev->vcn.num_vcn_inst) { adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK; adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK; -- cgit From b6efdb02d23ef615464cd0077c211b40a1faca26 Mon Sep 17 00:00:00 2001 From: yipechai Date: Fri, 14 Jan 2022 10:24:30 +0800 Subject: drm/amdgpu: Fix the code style warnings in amdgpu_ras MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix the code style warnings in amdgpu_ras: 1. ERROR: space required before the open parenthesis '('. 2. WARNING: line length of xxx exceeds 100 columns. 3. ERROR: "foo* bar" should be "foo *bar". 4. WARNING: unnecessary whitespace before a quoted newline. 5. WARNING: space prohibited before semicolon. 6. WARNING: suspect code indent for conditional statements. 7. WARNING: braces {} are not necessary for single statement blocks. Signed-off-by: yipechai Reviewed-by: Tao Zhou Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 41 +++++++++++++++++++-------------- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h | 10 ++++---- 2 files changed, 30 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 3538032e40d5..7a1d2bac698e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -872,7 +872,7 @@ static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev, static int amdgpu_ras_block_match_default(struct amdgpu_ras_block_object *block_obj, enum amdgpu_ras_block block) { - if(!block_obj) + if (!block_obj) return -EINVAL; if (block_obj->block == block) @@ -881,7 +881,7 @@ static int amdgpu_ras_block_match_default(struct amdgpu_ras_block_object *block_ return -EINVAL; } -static struct amdgpu_ras_block_object* amdgpu_ras_get_ras_block(struct amdgpu_device *adev, +static struct amdgpu_ras_block_object *amdgpu_ras_get_ras_block(struct amdgpu_device *adev, enum amdgpu_ras_block block, uint32_t sub_block_index) { struct amdgpu_ras_block_object *obj, *tmp; @@ -941,7 +941,7 @@ static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_d int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_if *info) { - struct amdgpu_ras_block_object* block_obj = NULL; + struct amdgpu_ras_block_object *block_obj = NULL; struct ras_manager *obj = amdgpu_ras_find_obj(adev, &info->head); struct ras_err_data err_data = {0, 0, 0, NULL}; @@ -953,7 +953,7 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, } else { block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, 0); if (!block_obj || !block_obj->hw_ops) { - dev_info(adev->dev, "%s doesn't config ras function \n", + dev_info(adev->dev, "%s doesn't config ras function.\n", get_ras_block_str(&info->head)); return -EINVAL; } @@ -1023,13 +1023,14 @@ int amdgpu_ras_query_error_status(struct amdgpu_device *adev, int amdgpu_ras_reset_error_status(struct amdgpu_device *adev, enum amdgpu_ras_block block) { - struct amdgpu_ras_block_object* block_obj = amdgpu_ras_get_ras_block(adev, block, 0); + struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, 0); if (!amdgpu_ras_is_supported(adev, block)) return -EINVAL; if (!block_obj || !block_obj->hw_ops) { - dev_info(adev->dev, "%s doesn't config ras function \n", ras_block_str(block)); + dev_info(adev->dev, "%s doesn't config ras function.\n", + ras_block_str(block)); return -EINVAL; } @@ -1066,7 +1067,8 @@ int amdgpu_ras_error_inject(struct amdgpu_device *adev, return -EINVAL; if (!block_obj || !block_obj->hw_ops) { - dev_info(adev->dev, "%s doesn't config ras function \n", get_ras_block_str(&info->head)); + dev_info(adev->dev, "%s doesn't config ras function.\n", + get_ras_block_str(&info->head)); return -EINVAL; } @@ -1702,19 +1704,25 @@ static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev) static void amdgpu_ras_error_status_query(struct amdgpu_device *adev, struct ras_query_if *info) { - struct amdgpu_ras_block_object* block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, info->head.sub_block_index); + struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, + info->head.block, + info->head.sub_block_index); /* * Only two block need to query read/write * RspStatus at current state */ if ((info->head.block != AMDGPU_RAS_BLOCK__GFX) && (info->head.block != AMDGPU_RAS_BLOCK__MMHUB)) - return ; + return; + + block_obj = amdgpu_ras_get_ras_block(adev, + info->head.block, + info->head.sub_block_index); - block_obj = amdgpu_ras_get_ras_block(adev, info->head.block, info->head.sub_block_index); if (!block_obj || !block_obj->hw_ops) { - dev_info(adev->dev, "%s doesn't config ras function \n", get_ras_block_str(&info->head)); - return ; + dev_info(adev->dev, "%s doesn't config ras function.\n", + get_ras_block_str(&info->head)); + return; } if (block_obj->hw_ops->query_ras_error_status) @@ -2715,7 +2723,7 @@ static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev) } #endif -struct amdgpu_ras* amdgpu_ras_get_context(struct amdgpu_device *adev) +struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev) { if (!adev) return NULL; @@ -2723,7 +2731,7 @@ struct amdgpu_ras* amdgpu_ras_get_context(struct amdgpu_device *adev) return adev->psp.ras_context.ras; } -int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras* ras_con) +int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con) { if (!adev) return -EINVAL; @@ -2755,7 +2763,7 @@ int amdgpu_ras_reset_gpu(struct amdgpu_device *adev) /* Register each ip ras block into amdgpu ras */ int amdgpu_ras_register_ras_block(struct amdgpu_device *adev, - struct amdgpu_ras_block_object* ras_block_obj) + struct amdgpu_ras_block_object *ras_block_obj) { struct amdgpu_ras_block_object *obj, *tmp; if (!adev || !ras_block_obj) @@ -2766,9 +2774,8 @@ int amdgpu_ras_register_ras_block(struct amdgpu_device *adev, /* If the ras object is in ras_list, don't add it again */ list_for_each_entry_safe(obj, tmp, &adev->ras_list, node) { - if (obj == ras_block_obj) { + if (obj == ras_block_obj) return 0; - } } INIT_LIST_HEAD(&ras_block_obj->node); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h index 7a4d82378205..a51a281bd91a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.h @@ -496,7 +496,8 @@ struct amdgpu_ras_block_object { /* ras block link */ struct list_head node; - int (*ras_block_match)(struct amdgpu_ras_block_object *block_obj, enum amdgpu_ras_block block, uint32_t sub_block_index); + int (*ras_block_match)(struct amdgpu_ras_block_object *block_obj, + enum amdgpu_ras_block block, uint32_t sub_block_index); int (*ras_late_init)(struct amdgpu_device *adev, void *ras_info); void (*ras_fini)(struct amdgpu_device *adev); const struct amdgpu_ras_block_hw_ops *hw_ops; @@ -504,7 +505,7 @@ struct amdgpu_ras_block_object { struct amdgpu_ras_block_hw_ops { int (*ras_error_inject)(struct amdgpu_device *adev, void *inject_if); - void (*query_ras_error_count)(struct amdgpu_device *adev,void *ras_error_status); + void (*query_ras_error_count)(struct amdgpu_device *adev, void *ras_error_status); void (*query_ras_error_status)(struct amdgpu_device *adev); void (*query_ras_error_address)(struct amdgpu_device *adev, void *ras_error_status); void (*reset_ras_error_count)(struct amdgpu_device *adev); @@ -678,7 +679,8 @@ int amdgpu_ras_reset_gpu(struct amdgpu_device *adev); struct amdgpu_ras* amdgpu_ras_get_context(struct amdgpu_device *adev); -int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras* ras_con); +int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con); -int amdgpu_ras_register_ras_block(struct amdgpu_device *adev, struct amdgpu_ras_block_object* ras_block_obj); +int amdgpu_ras_register_ras_block(struct amdgpu_device *adev, + struct amdgpu_ras_block_object *ras_block_obj); #endif -- cgit From 4f64ccf4f27c89089f3206446e2158833bda4795 Mon Sep 17 00:00:00 2001 From: yipechai Date: Fri, 14 Jan 2022 10:30:27 +0800 Subject: drm/amdgpu: Fix the code style warnings in gfx MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix the code style warnings in gfx: 1. WARNING: suspect code indent for conditional statements. 2. ERROR: spaces required around that '=' (ctx:WxV). Signed-off-by: yipechai Reviewed-by: Tao Zhou Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index d36a6bc62560..e12f9f5c3beb 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -2314,11 +2314,11 @@ static int gfx_v9_0_gpu_early_init(struct amdgpu_device *adev) /* If not define special ras_late_init function, use gfx default ras_late_init */ if (!adev->gfx.ras->ras_block.ras_late_init) - adev->gfx.ras->ras_block.ras_late_init = amdgpu_gfx_ras_late_init; + adev->gfx.ras->ras_block.ras_late_init = amdgpu_gfx_ras_late_init; /* If not define special ras_fini function, use gfx default ras_fini */ if (!adev->gfx.ras->ras_block.ras_fini) - adev->gfx.ras->ras_block.ras_fini = amdgpu_gfx_ras_fini; + adev->gfx.ras->ras_block.ras_fini = amdgpu_gfx_ras_fini; } adev->gfx.config.gb_addr_config = gb_addr_config; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c index 7ec6243e015e..7653ebd0e67b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_2.c @@ -1930,7 +1930,7 @@ static void gfx_v9_4_2_reset_sq_timeout_status(struct amdgpu_device *adev) mutex_unlock(&adev->grbm_idx_mutex); } -struct amdgpu_ras_block_hw_ops gfx_v9_4_2_ras_ops ={ +struct amdgpu_ras_block_hw_ops gfx_v9_4_2_ras_ops = { .ras_error_inject = &gfx_v9_4_2_ras_error_inject, .query_ras_error_count = &gfx_v9_4_2_query_ras_error_count, .reset_ras_error_count = &gfx_v9_4_2_reset_ras_error_count, -- cgit From d622c094f8fe7a77fda613964ffdd9a248d2550a Mon Sep 17 00:00:00 2001 From: yipechai Date: Fri, 14 Jan 2022 10:31:49 +0800 Subject: drm/amdgpu: Fix the code style warnings in gmc MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix the code style warnings in gmc: ERROR: space required after that ',' (ctx:VxV). Signed-off-by: yipechai Reviewed-by: Tao Zhou Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index 5ef4ad28ab26..4f8d356f8432 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -672,7 +672,7 @@ static void gmc_v10_0_set_umc_funcs(struct amdgpu_device *adev) if (adev->umc.ras) { amdgpu_ras_register_ras_block(adev, &adev->umc.ras->ras_block); - strcpy(adev->umc.ras->ras_block.name,"umc"); + strcpy(adev->umc.ras->ras_block.name, "umc"); adev->umc.ras->ras_block.block = AMDGPU_RAS_BLOCK__UMC; /* If don't define special ras_late_init function, use default ras_late_init */ diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 3965aae435f8..c76ffd1a70cd 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1231,7 +1231,7 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) if (adev->umc.ras) { amdgpu_ras_register_ras_block(adev, &adev->umc.ras->ras_block); - strcpy(adev->umc.ras->ras_block.name,"umc"); + strcpy(adev->umc.ras->ras_block.name, "umc"); adev->umc.ras->ras_block.block = AMDGPU_RAS_BLOCK__UMC; /* If don't define special ras_late_init function, use default ras_late_init */ -- cgit From 8697a19ee955e32fe298b00feb0c61fc75cb5261 Mon Sep 17 00:00:00 2001 From: yipechai Date: Fri, 14 Jan 2022 10:33:09 +0800 Subject: drm/amdgpu: Fix the code style warnings in sdma MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Fix the code style warnings in sdma: 1. WARNING: Missing a blank line after declarations. 2. ERROR: that open brace { should be on the previous line. 3. WARNING: unnecessary whitespace before a quoted newline. 4. ERROR: space required after that ',' (ctx:VxV). Signed-off-by: yipechai Reviewed-by: Tao Zhou Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c | 8 ++++---- drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c | 6 +++--- 2 files changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c index 3c1483dc113e..06a7ceda4c87 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_0.c @@ -2766,10 +2766,10 @@ static int sdma_v4_0_query_ras_error_count_by_instance(struct amdgpu_device *ade static void sdma_v4_0_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status) { int i = 0; + for (i = 0; i < adev->sdma.num_instances; i++) { - if (sdma_v4_0_query_ras_error_count_by_instance(adev, i, ras_error_status)) - { - dev_err(adev->dev, "Query ras error count failed in SDMA%d \n", i); + if (sdma_v4_0_query_ras_error_count_by_instance(adev, i, ras_error_status)) { + dev_err(adev->dev, "Query ras error count failed in SDMA%d\n", i); return; } } @@ -2814,7 +2814,7 @@ static void sdma_v4_0_set_ras_funcs(struct amdgpu_device *adev) if (adev->sdma.ras) { amdgpu_ras_register_ras_block(adev, &adev->sdma.ras->ras_block); - strcpy(adev->sdma.ras->ras_block.name,"sdma"); + strcpy(adev->sdma.ras->ras_block.name, "sdma"); adev->sdma.ras->ras_block.block = AMDGPU_RAS_BLOCK__SDMA; /* If don't define special ras_late_init function, use default ras_late_init */ diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c index 5c1ba1116e5c..6f9895cdddb1 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v4_4.c @@ -248,10 +248,10 @@ static void sdma_v4_4_reset_ras_error_count(struct amdgpu_device *adev) static void sdma_v4_4_query_ras_error_count(struct amdgpu_device *adev, void *ras_error_status) { int i = 0; + for (i = 0; i < adev->sdma.num_instances; i++) { - if (sdma_v4_4_query_ras_error_count_by_instance(adev, i, ras_error_status)) - { - dev_err(adev->dev, "Query ras error count failed in SDMA%d \n", i); + if (sdma_v4_4_query_ras_error_count_by_instance(adev, i, ras_error_status)) { + dev_err(adev->dev, "Query ras error count failed in SDMA%d\n", i); return; } } -- cgit From 71b6c4a277dbb2594c260ccedcafaef5154b0da0 Mon Sep 17 00:00:00 2001 From: yipechai Date: Fri, 14 Jan 2022 10:40:15 +0800 Subject: drm/amdgpu: Fix the code style warnings in hdp xgmi mca and umc MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit drm/amdgpu: Fix the code style warnings in hdp xgmi mca and umc: 1. WARNING: missing space after struct definition. 2. WARNING: please, no space before tabs. 3. WARNING: line length of xxx exceeds 100 columns. 4. ERROR: "foo* bar" should be "foo *bar". 5. ERROR: space required before the open parenthesis '('. 6. ERROR: space prohibited after that open parenthesis '('. Signed-off-by: yipechai Reviewed-by: Tao Zhou Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c | 3 ++- drivers/gpu/drm/amd/amdgpu/mca_v3_0.c | 7 ++++--- 3 files changed, 8 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h index c94a4b3c8d6d..4af2c2a322e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hdp.h @@ -24,7 +24,7 @@ #define __AMDGPU_HDP_H__ #include "amdgpu_ras.h" -struct amdgpu_hdp_ras{ +struct amdgpu_hdp_ras { struct amdgpu_ras_block_object ras_block; }; @@ -40,7 +40,7 @@ struct amdgpu_hdp_funcs { struct amdgpu_hdp { struct ras_common_if *ras_if; const struct amdgpu_hdp_funcs *funcs; - struct amdgpu_hdp_ras *ras; + struct amdgpu_hdp_ras *ras; }; int amdgpu_hdp_ras_late_init(struct amdgpu_device *adev, void *ras_info); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c index 478457637d29..5929d6f528c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xgmi.c @@ -950,7 +950,8 @@ static void amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev, static int amdgpu_ras_error_inject_xgmi(struct amdgpu_device *adev, void *inject_if) { int ret = 0; - struct ta_ras_trigger_error_input *block_info = (struct ta_ras_trigger_error_input *)inject_if; + struct ta_ras_trigger_error_input *block_info = + (struct ta_ras_trigger_error_input *)inject_if; if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW)) dev_warn(adev->dev, "Failed to disallow df cstate"); diff --git a/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c b/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c index f51092041edc..68565262af9c 100644 --- a/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mca_v3_0.c @@ -47,12 +47,13 @@ static void mca_v3_0_mp0_ras_fini(struct amdgpu_device *adev) amdgpu_mca_ras_fini(adev, &adev->mca.mp0); } -static int mca_v3_0_ras_block_match(struct amdgpu_ras_block_object* block_obj, enum amdgpu_ras_block block, uint32_t sub_block_index) +static int mca_v3_0_ras_block_match(struct amdgpu_ras_block_object *block_obj, + enum amdgpu_ras_block block, uint32_t sub_block_index) { - if(!block_obj) + if (!block_obj) return -EINVAL; - if( (block_obj->block == block) && + if ((block_obj->block == block) && (block_obj->sub_block_index == sub_block_index)) { return 0; } -- cgit From 22c16d251a79c3156d17627810557878e600dc6a Mon Sep 17 00:00:00 2001 From: Jingwen Chen Date: Thu, 13 Jan 2022 19:06:59 +0800 Subject: drm/amd/amdgpu: fixing read wrong pf2vf data in SRIOV [Why] This fixes 892deb48269c ("drm/amdgpu: Separate vf2pf work item init from virt data exchange"). we should read pf2vf data based at mman.fw_vram_usage_va after gmc sw_init. commit 892deb48269c breaks this logic. [How] calling amdgpu_virt_exchange_data in amdgpu_virt_init_data_exchange to set the right base in the right sequence. v2: call amdgpu_virt_init_data_exchange after gmc sw_init to make data exchange workqueue run v3: clean up the code logic v4: add some comment and make the code more readable Fixes: 892deb48269c ("drm/amdgpu: Separate vf2pf work item init from virt data exchange") Signed-off-by: Jingwen Chen Reviewed-by: Horace Chen Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 20 +++++++------------- 2 files changed, 8 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index d363ae2da5c5..f0c07523b04d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2354,7 +2354,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) } if (amdgpu_sriov_vf(adev)) - amdgpu_virt_exchange_data(adev); + amdgpu_virt_init_data_exchange(adev); r = amdgpu_ib_pool_init(adev); if (r) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 894444ab0032..07bc0f504713 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -625,20 +625,20 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev) adev->virt.fw_reserve.p_vf2pf = NULL; adev->virt.vf2pf_update_interval_ms = 0; - if (adev->bios != NULL) { - adev->virt.vf2pf_update_interval_ms = 2000; + if (adev->mman.fw_vram_usage_va != NULL) { + /* go through this logic in ip_init and reset to init workqueue*/ + amdgpu_virt_exchange_data(adev); + INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item); + schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms)); + } else if (adev->bios != NULL) { + /* got through this logic in early init stage to get necessary flags, e.g. rlcg_acc related*/ adev->virt.fw_reserve.p_pf2vf = (struct amd_sriov_msg_pf2vf_info_header *) (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10)); amdgpu_virt_read_pf2vf_data(adev); } - - if (adev->virt.vf2pf_update_interval_ms != 0) { - INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item); - schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms)); - } } @@ -674,12 +674,6 @@ void amdgpu_virt_exchange_data(struct amdgpu_device *adev) if (adev->virt.ras_init_done) amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size); } - } else if (adev->bios != NULL) { - adev->virt.fw_reserve.p_pf2vf = - (struct amd_sriov_msg_pf2vf_info_header *) - (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10)); - - amdgpu_virt_read_pf2vf_data(adev); } } -- cgit From f3527a6483fbccbf569f91d9ee9c561b14d86a9f Mon Sep 17 00:00:00 2001 From: Marina Nikolic Date: Fri, 14 Jan 2022 00:46:33 +0800 Subject: drm/amd/pm: Enable sysfs required by rocm-smi tool for One VF mode Enable power level, power limit and fan speed information retrieval in one VF mode. This is required so that tool ROCM-SMI can provide this information to users. Signed-off-by: Marina Nikolic Acked-by: Evan Quan Reviewed-by: Kevin Wang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/amdgpu_pm.c | 17 ++--------------- drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c | 2 +- 2 files changed, 3 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 49a9c6375343..837a31a46596 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -1903,8 +1903,8 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = { AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), - AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC), - AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), + AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC), AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC), AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), @@ -3152,19 +3152,6 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj, if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) return 0; - /* there is no fan under pp one vf mode */ - if (amdgpu_sriov_is_pp_one_vf(adev) && - (attr == &sensor_dev_attr_pwm1.dev_attr.attr || - attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || - attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || - attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || - attr == &sensor_dev_attr_fan1_input.dev_attr.attr || - attr == &sensor_dev_attr_fan1_min.dev_attr.attr || - attr == &sensor_dev_attr_fan1_max.dev_attr.attr || - attr == &sensor_dev_attr_fan1_target.dev_attr.attr || - attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) - return 0; - /* Skip fan attributes if fan is not present */ if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr || attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index eb47d490ee9d..c29353e05fd2 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -141,7 +141,7 @@ static struct cmn2asic_msg_mapping sienna_cichlid_message_map[SMU_MSG_MAX_COUNT] MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 1), MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 0), MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0), - MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0), + MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 1), MSG_MAP(GetDcModeMaxDpmFreq, PPSMC_MSG_GetDcModeMaxDpmFreq, 1), MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco, 0), MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 0), -- cgit From 47f29ac1cbae6e18329f0066f7c8d5d2346a5bce Mon Sep 17 00:00:00 2001 From: Christian König Date: Mon, 17 Jan 2022 10:31:26 +0100 Subject: drm/radeon: fix error handling in radeon_driver_open_kms MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The return value was never initialized so the cleanup code executed when it isn't even necessary. Just add proper error handling. Fixes: ab50cb9df889 ("drm/radeon/radeon_kms: Fix a NULL pointer dereference in radeon_driver_open_kms()") Signed-off-by: Christian König Tested-by: Jan Stancek Tested-by: Borislav Petkov Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_kms.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c index e2488559cc9f..11ad210919c8 100644 --- a/drivers/gpu/drm/radeon/radeon_kms.c +++ b/drivers/gpu/drm/radeon/radeon_kms.c @@ -666,18 +666,18 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) fpriv = kzalloc(sizeof(*fpriv), GFP_KERNEL); if (unlikely(!fpriv)) { r = -ENOMEM; - goto out_suspend; + goto err_suspend; } if (rdev->accel_working) { vm = &fpriv->vm; r = radeon_vm_init(rdev, vm); if (r) - goto out_fpriv; + goto err_fpriv; r = radeon_bo_reserve(rdev->ring_tmp_bo.bo, false); if (r) - goto out_vm_fini; + goto err_vm_fini; /* map the ib pool buffer read only into * virtual address space */ @@ -685,7 +685,7 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) rdev->ring_tmp_bo.bo); if (!vm->ib_bo_va) { r = -ENOMEM; - goto out_vm_fini; + goto err_vm_fini; } r = radeon_vm_bo_set_addr(rdev, vm->ib_bo_va, @@ -693,19 +693,21 @@ int radeon_driver_open_kms(struct drm_device *dev, struct drm_file *file_priv) RADEON_VM_PAGE_READABLE | RADEON_VM_PAGE_SNOOPED); if (r) - goto out_vm_fini; + goto err_vm_fini; } file_priv->driver_priv = fpriv; } - if (!r) - goto out_suspend; + pm_runtime_mark_last_busy(dev->dev); + pm_runtime_put_autosuspend(dev->dev); + return 0; -out_vm_fini: +err_vm_fini: radeon_vm_fini(rdev, vm); -out_fpriv: +err_fpriv: kfree(fpriv); -out_suspend: + +err_suspend: pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return r; -- cgit From 7aba117ac9e01dc58afe29490f50025add9d388b Mon Sep 17 00:00:00 2001 From: Jarif Aftab Date: Tue, 16 Nov 2021 18:24:06 -0500 Subject: drm/amd/display: Disabled seamless boots on DP and renamed power_down_display_on_boot [WHY] - We only ever want seamless boots on eDPs - The naming and logic did not match the context [HOW] - Removed unnecessary if statements - Renamed power_down_display_on_boot to seamless_boot_edp_requested and swapped the logic Reviewed-by: Martin Leung Acked-by: Rodrigo Siqueira Signed-off-by: Jarif Aftab Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 4 ++-- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 12 ------------ drivers/gpu/drm/amd/display/dc/dc.h | 2 +- drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c | 5 ++--- drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c | 7 +++---- drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c | 5 ++--- 6 files changed, 10 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 6692c8f75f23..8a46b8430f1e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1496,10 +1496,10 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) init_data.flags.allow_lttpr_non_transparent_mode.bits.DP2_0 = true; #endif - init_data.flags.power_down_display_on_boot = true; + init_data.flags.seamless_boot_edp_requested = false; if (check_seamless_boot_capability(adev)) { - init_data.flags.power_down_display_on_boot = false; + init_data.flags.seamless_boot_edp_requested = true; init_data.flags.allow_seamless_boot_optimization = true; DRM_INFO("Seamless boot condition check passed\n"); } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index b5e570d33ca9..0c64dea4fdd8 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -1019,18 +1019,6 @@ static bool dc_link_detect_helper(struct dc_link *link, link->type != dc_connection_mst_branch) dm_helpers_dp_mst_stop_top_mgr(link->ctx, link); - - // For seamless boot, to skip verify link cap, we read UEFI settings and set them as verified. - if (reason == DETECT_REASON_BOOT && - !dc_ctx->dc->config.power_down_display_on_boot && - link->link_status.link_active) - perform_dp_seamless_boot = true; - - if (perform_dp_seamless_boot) { - read_current_link_settings_on_detect(link); - link->verified_link_cap = link->reported_link_cap; - } - break; } diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 288e7b01f561..263f7edd42a4 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -322,7 +322,7 @@ struct dc_config { bool fbc_support; bool disable_fractional_pwm; bool allow_seamless_boot_optimization; - bool power_down_display_on_boot; + bool seamless_boot_edp_requested; bool edp_not_connected; bool edp_no_power_sequencing; bool force_enum_edp; diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c index 530a72e3eefe..915eecb40788 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c @@ -1505,8 +1505,7 @@ void dcn10_init_hw(struct dc *dc) dmub_enable_outbox_notification(dc); /* we want to turn off all dp displays before doing detection */ - if (dc->config.power_down_display_on_boot) - dc_link_blank_all_dp_displays(dc); + dc_link_blank_all_dp_displays(dc); /* If taking control over from VBIOS, we may want to optimize our first * mode set, so we need to skip powering down pipes until we know which @@ -1514,7 +1513,7 @@ void dcn10_init_hw(struct dc *dc) * Otherwise, if taking control is not possible, we need to power * everything down. */ - if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) { + if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) { if (!is_optimized_init_done) { hws->funcs.init_pipes(dc, dc->current_state); if (dc->res_pool->hubbub->funcs->allow_self_refresh_control) diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c index 1db1ca19411d..ed0a0e5fd805 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c @@ -545,8 +545,7 @@ void dcn30_init_hw(struct dc *dc) hws->funcs.dsc_pg_control(hws, res_pool->dscs[i]->inst, false); /* we want to turn off all dp displays before doing detection */ - if (dc->config.power_down_display_on_boot) - dc_link_blank_all_dp_displays(dc); + dc_link_blank_all_dp_displays(dc); /* If taking control over from VBIOS, we may want to optimize our first * mode set, so we need to skip powering down pipes until we know which @@ -554,7 +553,7 @@ void dcn30_init_hw(struct dc *dc) * Otherwise, if taking control is not possible, we need to power * everything down. */ - if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) { + if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) { hws->funcs.init_pipes(dc, dc->current_state); if (dc->res_pool->hubbub->funcs->allow_self_refresh_control) dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, @@ -566,7 +565,7 @@ void dcn30_init_hw(struct dc *dc) * To avoid this, power down hardware on boot * if DIG is turned on and seamless boot not enabled */ - if (dc->config.power_down_display_on_boot) { + if (!dc->config.seamless_boot_edp_requested) { struct dc_link *edp_links[MAX_NUM_EDP]; struct dc_link *edp_link = NULL; diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c index 4206ce5bf9a9..b2cfd277b913 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hwseq.c @@ -197,8 +197,7 @@ void dcn31_init_hw(struct dc *dc) dmub_enable_outbox_notification(dc); /* we want to turn off all dp displays before doing detection */ - if (dc->config.power_down_display_on_boot) - dc_link_blank_all_dp_displays(dc); + dc_link_blank_all_dp_displays(dc); /* If taking control over from VBIOS, we may want to optimize our first * mode set, so we need to skip powering down pipes until we know which @@ -206,7 +205,7 @@ void dcn31_init_hw(struct dc *dc) * Otherwise, if taking control is not possible, we need to power * everything down. */ - if (dcb->funcs->is_accelerated_mode(dcb) || dc->config.power_down_display_on_boot) { + if (dcb->funcs->is_accelerated_mode(dcb) || !dc->config.seamless_boot_edp_requested) { hws->funcs.init_pipes(dc, dc->current_state); if (dc->res_pool->hubbub->funcs->allow_self_refresh_control) dc->res_pool->hubbub->funcs->allow_self_refresh_control(dc->res_pool->hubbub, -- cgit From fd9048dd4425a9dc252153c8b72369f1969a35b3 Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Fri, 10 Dec 2021 12:28:57 -0500 Subject: drm/amd/display: adjust bit comparison to be more type safe Might potentially have truncation problem with the implicit casting Reviewed-by: Nicholas Kazlauskas Acked-by: Rodrigo Siqueira Signed-off-by: Eric Yang Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c index fa0569174aec..82c651535628 100644 --- a/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c +++ b/drivers/gpu/drm/amd/display/dmub/src/dmub_dcn31.c @@ -441,7 +441,7 @@ void dmub_dcn31_get_diagnostic_data(struct dmub_srv *dmub, struct dmub_diagnosti bool dmub_dcn31_should_detect(struct dmub_srv *dmub) { uint32_t fw_boot_status = REG_READ(DMCUB_SCRATCH0); - bool should_detect = fw_boot_status & DMUB_FW_BOOT_STATUS_BIT_DETECTION_REQUIRED; + bool should_detect = (fw_boot_status & DMUB_FW_BOOT_STATUS_BIT_DETECTION_REQUIRED) != 0; return should_detect; } -- cgit From b4c55e525461a9a091421e952d1e052127d558a8 Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Wed, 15 Dec 2021 17:09:05 -0500 Subject: drm/amd/display: support new PMFW interface to disable Z9 only [Why] Need to disable Z9 on configurations that only support Z10 [How] Support new PMFW interface to disable Z9 Reviewed-by: Nicholas Kazlauskas Acked-by: Rodrigo Siqueira Signed-off-by: Eric Yang Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c | 6 +++--- drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c | 15 ++++++++++----- drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.h | 2 +- drivers/gpu/drm/amd/display/dc/dc.h | 1 + drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 8 +++++++- 5 files changed, 22 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c index 4162ce40089b..66bd0261ead6 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_clk_mgr.c @@ -139,9 +139,9 @@ void dcn31_update_clocks(struct clk_mgr *clk_mgr_base, * also if safe to lower is false, we just go in the higher state */ if (safe_to_lower) { - if (new_clocks->zstate_support == DCN_ZSTATE_SUPPORT_ALLOW && + if (new_clocks->zstate_support != DCN_ZSTATE_SUPPORT_DISALLOW && new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) { - dcn31_smu_set_Z9_support(clk_mgr, true); + dcn31_smu_set_zstate_support(clk_mgr, new_clocks->zstate_support); dm_helpers_enable_periodic_detection(clk_mgr_base->ctx, true); clk_mgr_base->clks.zstate_support = new_clocks->zstate_support; } @@ -167,7 +167,7 @@ void dcn31_update_clocks(struct clk_mgr *clk_mgr_base, } else { if (new_clocks->zstate_support == DCN_ZSTATE_SUPPORT_DISALLOW && new_clocks->zstate_support != clk_mgr_base->clks.zstate_support) { - dcn31_smu_set_Z9_support(clk_mgr, false); + dcn31_smu_set_zstate_support(clk_mgr, DCN_ZSTATE_SUPPORT_DISALLOW); dm_helpers_enable_periodic_detection(clk_mgr_base->ctx, false); clk_mgr_base->clks.zstate_support = new_clocks->zstate_support; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c index a1011f3273f3..1c0415366216 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c @@ -306,23 +306,28 @@ void dcn31_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr) VBIOSSMC_MSG_TransferTableDram2Smu, TABLE_WATERMARKS); } -void dcn31_smu_set_Z9_support(struct clk_mgr_internal *clk_mgr, bool support) +void dcn31_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zstate_support_state support) { //TODO: Work with smu team to define optimization options. - unsigned int msg_id; + unsigned int msg_id, param; if (!clk_mgr->smu_present) return; - if (support) - msg_id = VBIOSSMC_MSG_AllowZstatesEntry; + if (support == DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY) + param = 1; else + param = 0; + + if (support == DCN_ZSTATE_SUPPORT_DISALLOW) msg_id = VBIOSSMC_MSG_DisallowZstatesEntry; + else + msg_id = VBIOSSMC_MSG_AllowZstatesEntry; dcn31_smu_send_msg_with_param( clk_mgr, msg_id, - 0); + param); } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.h b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.h index cd0b7e1e685f..dfa25a76a6d1 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.h +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.h @@ -265,7 +265,7 @@ void dcn31_smu_set_dram_addr_low(struct clk_mgr_internal *clk_mgr, uint32_t addr void dcn31_smu_transfer_dpm_table_smu_2_dram(struct clk_mgr_internal *clk_mgr); void dcn31_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr); -void dcn31_smu_set_Z9_support(struct clk_mgr_internal *clk_mgr, bool support); +void dcn31_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zstate_support_state support); void dcn31_smu_set_dtbclk(struct clk_mgr_internal *clk_mgr, bool enable); #endif /* DAL_DC_31_SMU_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 263f7edd42a4..b5e15da29017 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -396,6 +396,7 @@ enum dcn_pwr_state { enum dcn_zstate_support_state { DCN_ZSTATE_SUPPORT_UNKNOWN, DCN_ZSTATE_SUPPORT_ALLOW, + DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY, DCN_ZSTATE_SUPPORT_DISALLOW, }; #endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index 2bc93df023ad..d0a5c7afa265 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -3093,8 +3093,14 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc else if (context->stream_count == 1 && context->streams[0]->signal == SIGNAL_TYPE_EDP) { struct dc_link *link = context->streams[0]->sink->link; - if (link->link_index == 0 && context->bw_ctx.dml.vba.StutterPeriod > 5000.0) + /* zstate only supported on PWRSEQ0 */ + if (link->link_index != 0) + return DCN_ZSTATE_SUPPORT_DISALLOW; + + if (context->bw_ctx.dml.vba.StutterPeriod > 5000.0) return DCN_ZSTATE_SUPPORT_ALLOW; + else if (link->psr_settings.psr_feature_enabled) + return DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY; else return DCN_ZSTATE_SUPPORT_DISALLOW; } else -- cgit From 82dcd8eb357caf4b16e6a201f6578b6e8795e541 Mon Sep 17 00:00:00 2001 From: Aric Cyr Date: Mon, 3 Jan 2022 23:51:57 -0500 Subject: drm/amd/display: 3.2.168 This version brings improvements in the following: - Drop unnecessary DCN guards - Improve Z9 interface Acked-by: Rodrigo Siqueira Signed-off-by: Aric Cyr Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index b5e15da29017..1c6728cf2d49 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -47,7 +47,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.167" +#define DC_VER "3.2.168" #define MAX_SURFACES 3 #define MAX_PLANES 6 -- cgit From f369e4eba89a6fc1cac619db86c00a964d1fc0a2 Mon Sep 17 00:00:00 2001 From: Jasdeep Dhillon Date: Wed, 5 Jan 2022 15:59:04 -0500 Subject: drm/amd/display: move FPU associated DCN302 code to DML folder (#2266) [Why & How] As part of the FPU isolation work documented in https://patchwork.freedesktop.org/series/93042/, isolate code that uses FPU in DCN302 to DML, where all FPU code should locate. Co-authored-by: Jasdeep Dhillon Reviewed-by: Rodrigo Siqueira Acked-by: Wayne Lin Signed-off-by: Jasdeep Dhillon Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn302/Makefile | 12 - .../drm/amd/display/dc/dcn302/dcn302_resource.c | 316 +----------------- .../drm/amd/display/dc/dcn302/dcn302_resource.h | 3 + drivers/gpu/drm/amd/display/dc/dml/Makefile | 2 + .../gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c | 357 +++++++++++++++++++++ .../gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.h | 32 ++ 6 files changed, 404 insertions(+), 318 deletions(-) create mode 100644 drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c create mode 100644 drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.h diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/Makefile b/drivers/gpu/drm/amd/display/dc/dcn302/Makefile index 101620a8867a..f9561d7f97a1 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn302/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn302/Makefile @@ -1,11 +1,6 @@ # # (c) Copyright 2020 Advanced Micro Devices, Inc. All the rights reserved # -# All rights reserved. This notice is intended as a precaution against -# inadvertent publication and does not imply publication or any waiver -# of confidentiality. The year included in the foregoing notice is the -# year of creation of the work. -# # Authors: AMD # # Makefile for dcn302. @@ -20,13 +15,6 @@ ifdef CONFIG_PPC64 CFLAGS_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o := -mhard-float -maltivec endif -ifdef CONFIG_CC_IS_GCC -ifeq ($(call cc-ifversion, -lt, 0701, y), y) -IS_OLD_GCC = 1 -endif -CFLAGS_$(AMDDALPATH)/dc/dcn302/dcn302_resource.o += -mhard-float -endif - ifdef CONFIG_X86 ifdef IS_OLD_GCC # Stack alignment mismatch, proceed with caution. diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c index 2e9cbfa7663b..e512ae6d00d4 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.c @@ -61,6 +61,8 @@ #include "resource.h" #include "vm_helper.h" +#include "dml/dcn302/dcn302_fpu.h" + #include "dimgrey_cavefish_ip_offset.h" #include "dcn/dcn_3_0_2_offset.h" #include "dcn/dcn_3_0_2_sh_mask.h" @@ -71,140 +73,6 @@ #define DC_LOGGER_INIT(logger) -struct _vcs_dpi_ip_params_st dcn3_02_ip = { - .use_min_dcfclk = 0, - .clamp_min_dcfclk = 0, - .odm_capable = 1, - .gpuvm_enable = 1, - .hostvm_enable = 0, - .gpuvm_max_page_table_levels = 4, - .hostvm_max_page_table_levels = 4, - .hostvm_cached_page_table_levels = 0, - .pte_group_size_bytes = 2048, - .num_dsc = 5, - .rob_buffer_size_kbytes = 184, - .det_buffer_size_kbytes = 184, - .dpte_buffer_size_in_pte_reqs_luma = 64, - .dpte_buffer_size_in_pte_reqs_chroma = 34, - .pde_proc_buffer_size_64k_reqs = 48, - .dpp_output_buffer_pixels = 2560, - .opp_output_buffer_lines = 1, - .pixel_chunk_size_kbytes = 8, - .pte_enable = 1, - .max_page_table_levels = 2, - .pte_chunk_size_kbytes = 2, // ? - .meta_chunk_size_kbytes = 2, - .writeback_chunk_size_kbytes = 8, - .line_buffer_size_bits = 789504, - .is_line_buffer_bpp_fixed = 0, // ? - .line_buffer_fixed_bpp = 0, // ? - .dcc_supported = true, - .writeback_interface_buffer_size_kbytes = 90, - .writeback_line_buffer_buffer_size = 0, - .max_line_buffer_lines = 12, - .writeback_luma_buffer_size_kbytes = 12, // writeback_line_buffer_buffer_size = 656640 - .writeback_chroma_buffer_size_kbytes = 8, - .writeback_chroma_line_buffer_width_pixels = 4, - .writeback_max_hscl_ratio = 1, - .writeback_max_vscl_ratio = 1, - .writeback_min_hscl_ratio = 1, - .writeback_min_vscl_ratio = 1, - .writeback_max_hscl_taps = 1, - .writeback_max_vscl_taps = 1, - .writeback_line_buffer_luma_buffer_size = 0, - .writeback_line_buffer_chroma_buffer_size = 14643, - .cursor_buffer_size = 8, - .cursor_chunk_size = 2, - .max_num_otg = 5, - .max_num_dpp = 5, - .max_num_wb = 1, - .max_dchub_pscl_bw_pix_per_clk = 4, - .max_pscl_lb_bw_pix_per_clk = 2, - .max_lb_vscl_bw_pix_per_clk = 4, - .max_vscl_hscl_bw_pix_per_clk = 4, - .max_hscl_ratio = 6, - .max_vscl_ratio = 6, - .hscl_mults = 4, - .vscl_mults = 4, - .max_hscl_taps = 8, - .max_vscl_taps = 8, - .dispclk_ramp_margin_percent = 1, - .underscan_factor = 1.11, - .min_vblank_lines = 32, - .dppclk_delay_subtotal = 46, - .dynamic_metadata_vm_enabled = true, - .dppclk_delay_scl_lb_only = 16, - .dppclk_delay_scl = 50, - .dppclk_delay_cnvc_formatter = 27, - .dppclk_delay_cnvc_cursor = 6, - .dispclk_delay_subtotal = 119, - .dcfclk_cstate_latency = 5.2, // SRExitTime - .max_inter_dcn_tile_repeaters = 8, - .max_num_hdmi_frl_outputs = 1, - .odm_combine_4to1_supported = true, - - .xfc_supported = false, - .xfc_fill_bw_overhead_percent = 10.0, - .xfc_fill_constant_bytes = 0, - .gfx7_compat_tiling_supported = 0, - .number_of_cursors = 1, -}; - -struct _vcs_dpi_soc_bounding_box_st dcn3_02_soc = { - .clock_limits = { - { - .state = 0, - .dispclk_mhz = 562.0, - .dppclk_mhz = 300.0, - .phyclk_mhz = 300.0, - .phyclk_d18_mhz = 667.0, - .dscclk_mhz = 405.6, - }, - }, - - .min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */ - .num_states = 1, - .sr_exit_time_us = 26.5, - .sr_enter_plus_exit_time_us = 31, - .urgent_latency_us = 4.0, - .urgent_latency_pixel_data_only_us = 4.0, - .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, - .urgent_latency_vm_data_only_us = 4.0, - .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096, - .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096, - .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096, - .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0, - .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, - .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0, - .max_avg_sdp_bw_use_normal_percent = 60.0, - .max_avg_dram_bw_use_normal_percent = 40.0, - .writeback_latency_us = 12.0, - .max_request_size_bytes = 256, - .fabric_datapath_to_dcn_data_return_bytes = 64, - .dcn_downspread_percent = 0.5, - .downspread_percent = 0.38, - .dram_page_open_time_ns = 50.0, - .dram_rw_turnaround_time_ns = 17.5, - .dram_return_buffer_per_channel_bytes = 8192, - .round_trip_ping_latency_dcfclk_cycles = 156, - .urgent_out_of_order_return_per_channel_bytes = 4096, - .channel_interleave_bytes = 256, - .num_banks = 8, - .gpuvm_min_page_size_bytes = 4096, - .hostvm_min_page_size_bytes = 4096, - .dram_clock_change_latency_us = 404, - .dummy_pstate_latency_us = 5, - .writeback_dram_clock_change_latency_us = 23.0, - .return_bus_width_bytes = 64, - .dispclk_dppclk_vco_speed_mhz = 3650, - .xfc_bus_transport_time_us = 20, // ? - .xfc_xbuf_latency_tolerance_us = 4, // ? - .use_urgent_burst_bw = 1, // ? - .do_urgent_latency_adjustment = true, - .urgent_latency_adjustment_fabric_clock_component_us = 1.0, - .urgent_latency_adjustment_fabric_clock_reference_mhz = 1000, -}; - static const struct dc_debug_options debug_defaults_drv = { .disable_dmcu = true, .force_abm_enable = false, @@ -1105,24 +973,19 @@ static bool init_soc_bounding_box(struct dc *dc, struct resource_pool *pool) loaded_ip->max_num_otg = pool->pipe_count; loaded_ip->max_num_dpp = pool->pipe_count; loaded_ip->clamp_min_dcfclk = dc->config.clamp_min_dcfclk; + DC_FP_START(); dcn20_patch_bounding_box(dc, loaded_bb); + DC_FP_END(); if (dc->ctx->dc_bios->funcs->get_soc_bb_info) { struct bp_soc_bb_info bb_info = { 0 }; if (dc->ctx->dc_bios->funcs->get_soc_bb_info( dc->ctx->dc_bios, &bb_info) == BP_RESULT_OK) { - if (bb_info.dram_clock_change_latency_100ns > 0) - dcn3_02_soc.dram_clock_change_latency_us = - bb_info.dram_clock_change_latency_100ns * 10; - if (bb_info.dram_sr_enter_exit_latency_100ns > 0) - dcn3_02_soc.sr_enter_plus_exit_time_us = - bb_info.dram_sr_enter_exit_latency_100ns * 10; - - if (bb_info.dram_sr_exit_latency_100ns > 0) - dcn3_02_soc.sr_exit_time_us = - bb_info.dram_sr_exit_latency_100ns * 10; + DC_FP_START(); + dcn302_fpu_init_soc_bounding_box(bb_info); + DC_FP_END(); } } @@ -1257,170 +1120,11 @@ static void dcn302_destroy_resource_pool(struct resource_pool **pool) *pool = NULL; } -static void dcn302_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts, - unsigned int *optimal_dcfclk, - unsigned int *optimal_fclk) -{ - double bw_from_dram, bw_from_dram1, bw_from_dram2; - - bw_from_dram1 = uclk_mts * dcn3_02_soc.num_chans * - dcn3_02_soc.dram_channel_width_bytes * (dcn3_02_soc.max_avg_dram_bw_use_normal_percent / 100); - bw_from_dram2 = uclk_mts * dcn3_02_soc.num_chans * - dcn3_02_soc.dram_channel_width_bytes * (dcn3_02_soc.max_avg_sdp_bw_use_normal_percent / 100); - - bw_from_dram = (bw_from_dram1 < bw_from_dram2) ? bw_from_dram1 : bw_from_dram2; - - if (optimal_fclk) - *optimal_fclk = bw_from_dram / - (dcn3_02_soc.fabric_datapath_to_dcn_data_return_bytes * (dcn3_02_soc.max_avg_sdp_bw_use_normal_percent / 100)); - - if (optimal_dcfclk) - *optimal_dcfclk = bw_from_dram / - (dcn3_02_soc.return_bus_width_bytes * (dcn3_02_soc.max_avg_sdp_bw_use_normal_percent / 100)); -} - void dcn302_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) { - unsigned int i, j; - unsigned int num_states = 0; - - unsigned int dcfclk_mhz[DC__VOLTAGE_STATES] = {0}; - unsigned int dram_speed_mts[DC__VOLTAGE_STATES] = {0}; - unsigned int optimal_uclk_for_dcfclk_sta_targets[DC__VOLTAGE_STATES] = {0}; - unsigned int optimal_dcfclk_for_uclk[DC__VOLTAGE_STATES] = {0}; - - unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {694, 875, 1000, 1200}; - unsigned int num_dcfclk_sta_targets = 4; - unsigned int num_uclk_states; - - - if (dc->ctx->dc_bios->vram_info.num_chans) - dcn3_02_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans; - - if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) - dcn3_02_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes; - - dcn3_02_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; - dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; - - if (bw_params->clk_table.entries[0].memclk_mhz) { - int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, max_phyclk_mhz = 0; - - for (i = 0; i < MAX_NUM_DPM_LVL; i++) { - if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz) - max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz; - if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz) - max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz; - if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz) - max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz; - if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz) - max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz; - } - if (!max_dcfclk_mhz) - max_dcfclk_mhz = dcn3_02_soc.clock_limits[0].dcfclk_mhz; - if (!max_dispclk_mhz) - max_dispclk_mhz = dcn3_02_soc.clock_limits[0].dispclk_mhz; - if (!max_dppclk_mhz) - max_dppclk_mhz = dcn3_02_soc.clock_limits[0].dppclk_mhz; - if (!max_phyclk_mhz) - max_phyclk_mhz = dcn3_02_soc.clock_limits[0].phyclk_mhz; - - if (max_dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) { - /* If max DCFCLK is greater than the max DCFCLK STA target, insert into the DCFCLK STA target array */ - dcfclk_sta_targets[num_dcfclk_sta_targets] = max_dcfclk_mhz; - num_dcfclk_sta_targets++; - } else if (max_dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) { - /* If max DCFCLK is less than the max DCFCLK STA target, cap values and remove duplicates */ - for (i = 0; i < num_dcfclk_sta_targets; i++) { - if (dcfclk_sta_targets[i] > max_dcfclk_mhz) { - dcfclk_sta_targets[i] = max_dcfclk_mhz; - break; - } - } - /* Update size of array since we "removed" duplicates */ - num_dcfclk_sta_targets = i + 1; - } - - num_uclk_states = bw_params->clk_table.num_entries; - - /* Calculate optimal dcfclk for each uclk */ - for (i = 0; i < num_uclk_states; i++) { - dcn302_get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16, - &optimal_dcfclk_for_uclk[i], NULL); - if (optimal_dcfclk_for_uclk[i] < bw_params->clk_table.entries[0].dcfclk_mhz) { - optimal_dcfclk_for_uclk[i] = bw_params->clk_table.entries[0].dcfclk_mhz; - } - } - - /* Calculate optimal uclk for each dcfclk sta target */ - for (i = 0; i < num_dcfclk_sta_targets; i++) { - for (j = 0; j < num_uclk_states; j++) { - if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) { - optimal_uclk_for_dcfclk_sta_targets[i] = - bw_params->clk_table.entries[j].memclk_mhz * 16; - break; - } - } - } - - i = 0; - j = 0; - /* create the final dcfclk and uclk table */ - while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) { - if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) { - dcfclk_mhz[num_states] = dcfclk_sta_targets[i]; - dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++]; - } else { - if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) { - dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j]; - dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16; - } else { - j = num_uclk_states; - } - } - } - - while (i < num_dcfclk_sta_targets && num_states < DC__VOLTAGE_STATES) { - dcfclk_mhz[num_states] = dcfclk_sta_targets[i]; - dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++]; - } - - while (j < num_uclk_states && num_states < DC__VOLTAGE_STATES && - optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) { - dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j]; - dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16; - } - - dcn3_02_soc.num_states = num_states; - for (i = 0; i < dcn3_02_soc.num_states; i++) { - dcn3_02_soc.clock_limits[i].state = i; - dcn3_02_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i]; - dcn3_02_soc.clock_limits[i].fabricclk_mhz = dcfclk_mhz[i]; - dcn3_02_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i]; - - /* Fill all states with max values of all other clocks */ - dcn3_02_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz; - dcn3_02_soc.clock_limits[i].dppclk_mhz = max_dppclk_mhz; - dcn3_02_soc.clock_limits[i].phyclk_mhz = max_phyclk_mhz; - /* Populate from bw_params for DTBCLK, SOCCLK */ - if (!bw_params->clk_table.entries[i].dtbclk_mhz && i > 0) - dcn3_02_soc.clock_limits[i].dtbclk_mhz = dcn3_02_soc.clock_limits[i-1].dtbclk_mhz; - else - dcn3_02_soc.clock_limits[i].dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz; - if (!bw_params->clk_table.entries[i].socclk_mhz && i > 0) - dcn3_02_soc.clock_limits[i].socclk_mhz = dcn3_02_soc.clock_limits[i-1].socclk_mhz; - else - dcn3_02_soc.clock_limits[i].socclk_mhz = bw_params->clk_table.entries[i].socclk_mhz; - /* These clocks cannot come from bw_params, always fill from dcn3_02_soc[1] */ - /* FCLK, PHYCLK_D18, DSCCLK */ - dcn3_02_soc.clock_limits[i].phyclk_d18_mhz = dcn3_02_soc.clock_limits[0].phyclk_d18_mhz; - dcn3_02_soc.clock_limits[i].dscclk_mhz = dcn3_02_soc.clock_limits[0].dscclk_mhz; - } - /* re-init DML with updated bb */ - dml_init_instance(&dc->dml, &dcn3_02_soc, &dcn3_02_ip, DML_PROJECT_DCN30); - if (dc->current_state) - dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_02_soc, &dcn3_02_ip, DML_PROJECT_DCN30); - } + DC_FP_START(); + dcn302_fpu_update_bw_bounding_box(dc, bw_params); + DC_FP_END(); } static struct resource_funcs dcn302_res_pool_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.h b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.h index 42d2c73e30bc..9f24e73b92b3 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.h +++ b/drivers/gpu/drm/amd/display/dc/dcn302/dcn302_resource.h @@ -28,6 +28,9 @@ #include "core_types.h" +extern struct _vcs_dpi_ip_params_st dcn3_02_ip; +extern struct _vcs_dpi_soc_bounding_box_st dcn3_02_soc; + struct resource_pool *dcn302_create_resource_pool(const struct dc_init_data *init_data, struct dc *dc); void dcn302_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params); diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile index eee6672bd32d..06910b1f5965 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile @@ -71,6 +71,7 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn30/display_rq_dlg_calc_30.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_mode_vba_31.o := $(dml_ccflags) $(frame_warn_flag) CFLAGS_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn301/dcn301_fpu.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/dcn302/dcn302_fpu.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dsc/rc_calc_fpu.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_rcflags) @@ -104,6 +105,7 @@ DML += dcn21/display_rq_dlg_calc_21.o dcn21/display_mode_vba_21.o DML += dcn30/display_mode_vba_30.o dcn30/display_rq_dlg_calc_30.o DML += dcn31/display_mode_vba_31.o dcn31/display_rq_dlg_calc_31.o DML += dcn301/dcn301_fpu.o +DML += dcn302/dcn302_fpu.o DML += dsc/rc_calc_fpu.o endif diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c new file mode 100644 index 000000000000..e2bcd205aa93 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.c @@ -0,0 +1,357 @@ +/* + * Copyright 2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "resource.h" +#include "clk_mgr.h" +#include "dcn20/dcn20_resource.h" +#include "dcn302/dcn302_resource.h" + +#include "dml/dcn20/dcn20_fpu.h" +#include "dcn302_fpu.h" + +struct _vcs_dpi_ip_params_st dcn3_02_ip = { + .use_min_dcfclk = 0, + .clamp_min_dcfclk = 0, + .odm_capable = 1, + .gpuvm_enable = 1, + .hostvm_enable = 0, + .gpuvm_max_page_table_levels = 4, + .hostvm_max_page_table_levels = 4, + .hostvm_cached_page_table_levels = 0, + .pte_group_size_bytes = 2048, + .num_dsc = 5, + .rob_buffer_size_kbytes = 184, + .det_buffer_size_kbytes = 184, + .dpte_buffer_size_in_pte_reqs_luma = 64, + .dpte_buffer_size_in_pte_reqs_chroma = 34, + .pde_proc_buffer_size_64k_reqs = 48, + .dpp_output_buffer_pixels = 2560, + .opp_output_buffer_lines = 1, + .pixel_chunk_size_kbytes = 8, + .pte_enable = 1, + .max_page_table_levels = 2, + .pte_chunk_size_kbytes = 2, // ? + .meta_chunk_size_kbytes = 2, + .writeback_chunk_size_kbytes = 8, + .line_buffer_size_bits = 789504, + .is_line_buffer_bpp_fixed = 0, // ? + .line_buffer_fixed_bpp = 0, // ? + .dcc_supported = true, + .writeback_interface_buffer_size_kbytes = 90, + .writeback_line_buffer_buffer_size = 0, + .max_line_buffer_lines = 12, + .writeback_luma_buffer_size_kbytes = 12, // writeback_line_buffer_buffer_size = 656640 + .writeback_chroma_buffer_size_kbytes = 8, + .writeback_chroma_line_buffer_width_pixels = 4, + .writeback_max_hscl_ratio = 1, + .writeback_max_vscl_ratio = 1, + .writeback_min_hscl_ratio = 1, + .writeback_min_vscl_ratio = 1, + .writeback_max_hscl_taps = 1, + .writeback_max_vscl_taps = 1, + .writeback_line_buffer_luma_buffer_size = 0, + .writeback_line_buffer_chroma_buffer_size = 14643, + .cursor_buffer_size = 8, + .cursor_chunk_size = 2, + .max_num_otg = 5, + .max_num_dpp = 5, + .max_num_wb = 1, + .max_dchub_pscl_bw_pix_per_clk = 4, + .max_pscl_lb_bw_pix_per_clk = 2, + .max_lb_vscl_bw_pix_per_clk = 4, + .max_vscl_hscl_bw_pix_per_clk = 4, + .max_hscl_ratio = 6, + .max_vscl_ratio = 6, + .hscl_mults = 4, + .vscl_mults = 4, + .max_hscl_taps = 8, + .max_vscl_taps = 8, + .dispclk_ramp_margin_percent = 1, + .underscan_factor = 1.11, + .min_vblank_lines = 32, + .dppclk_delay_subtotal = 46, + .dynamic_metadata_vm_enabled = true, + .dppclk_delay_scl_lb_only = 16, + .dppclk_delay_scl = 50, + .dppclk_delay_cnvc_formatter = 27, + .dppclk_delay_cnvc_cursor = 6, + .dispclk_delay_subtotal = 119, + .dcfclk_cstate_latency = 5.2, // SRExitTime + .max_inter_dcn_tile_repeaters = 8, + .max_num_hdmi_frl_outputs = 1, + .odm_combine_4to1_supported = true, + + .xfc_supported = false, + .xfc_fill_bw_overhead_percent = 10.0, + .xfc_fill_constant_bytes = 0, + .gfx7_compat_tiling_supported = 0, + .number_of_cursors = 1, +}; + +struct _vcs_dpi_soc_bounding_box_st dcn3_02_soc = { + .clock_limits = { + { + .state = 0, + .dispclk_mhz = 562.0, + .dppclk_mhz = 300.0, + .phyclk_mhz = 300.0, + .phyclk_d18_mhz = 667.0, + .dscclk_mhz = 405.6, + }, + }, + + .min_dcfclk = 500.0, /* TODO: set this to actual min DCFCLK */ + .num_states = 1, + .sr_exit_time_us = 26.5, + .sr_enter_plus_exit_time_us = 31, + .urgent_latency_us = 4.0, + .urgent_latency_pixel_data_only_us = 4.0, + .urgent_latency_pixel_mixed_with_vm_data_us = 4.0, + .urgent_latency_vm_data_only_us = 4.0, + .urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096, + .urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096, + .urgent_out_of_order_return_per_channel_vm_only_bytes = 4096, + .pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 80.0, + .pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 60.0, + .pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0, + .max_avg_sdp_bw_use_normal_percent = 60.0, + .max_avg_dram_bw_use_normal_percent = 40.0, + .writeback_latency_us = 12.0, + .max_request_size_bytes = 256, + .fabric_datapath_to_dcn_data_return_bytes = 64, + .dcn_downspread_percent = 0.5, + .downspread_percent = 0.38, + .dram_page_open_time_ns = 50.0, + .dram_rw_turnaround_time_ns = 17.5, + .dram_return_buffer_per_channel_bytes = 8192, + .round_trip_ping_latency_dcfclk_cycles = 156, + .urgent_out_of_order_return_per_channel_bytes = 4096, + .channel_interleave_bytes = 256, + .num_banks = 8, + .gpuvm_min_page_size_bytes = 4096, + .hostvm_min_page_size_bytes = 4096, + .dram_clock_change_latency_us = 404, + .dummy_pstate_latency_us = 5, + .writeback_dram_clock_change_latency_us = 23.0, + .return_bus_width_bytes = 64, + .dispclk_dppclk_vco_speed_mhz = 3650, + .xfc_bus_transport_time_us = 20, // ? + .xfc_xbuf_latency_tolerance_us = 4, // ? + .use_urgent_burst_bw = 1, // ? + .do_urgent_latency_adjustment = true, + .urgent_latency_adjustment_fabric_clock_component_us = 1.0, + .urgent_latency_adjustment_fabric_clock_reference_mhz = 1000, +}; + +static void dcn302_get_optimal_dcfclk_fclk_for_uclk(unsigned int uclk_mts, + unsigned int *optimal_dcfclk, + unsigned int *optimal_fclk) +{ + + double bw_from_dram, bw_from_dram1, bw_from_dram2; + + bw_from_dram1 = uclk_mts * dcn3_02_soc.num_chans * + dcn3_02_soc.dram_channel_width_bytes * + (dcn3_02_soc.max_avg_dram_bw_use_normal_percent / 100); + bw_from_dram2 = uclk_mts * dcn3_02_soc.num_chans * + dcn3_02_soc.dram_channel_width_bytes * + (dcn3_02_soc.max_avg_sdp_bw_use_normal_percent / 100); + + bw_from_dram = (bw_from_dram1 < bw_from_dram2) ? bw_from_dram1 : bw_from_dram2; + + if (optimal_fclk) + *optimal_fclk = bw_from_dram / + (dcn3_02_soc.fabric_datapath_to_dcn_data_return_bytes * + (dcn3_02_soc.max_avg_sdp_bw_use_normal_percent / 100)); + + if (optimal_dcfclk) + *optimal_dcfclk = bw_from_dram / + (dcn3_02_soc.return_bus_width_bytes * + (dcn3_02_soc.max_avg_sdp_bw_use_normal_percent / 100)); +} + +void dcn302_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params) +{ + unsigned int i, j; + unsigned int num_states = 0; + + unsigned int dcfclk_mhz[DC__VOLTAGE_STATES] = {0}; + unsigned int dram_speed_mts[DC__VOLTAGE_STATES] = {0}; + unsigned int optimal_uclk_for_dcfclk_sta_targets[DC__VOLTAGE_STATES] = {0}; + unsigned int optimal_dcfclk_for_uclk[DC__VOLTAGE_STATES] = {0}; + + unsigned int dcfclk_sta_targets[DC__VOLTAGE_STATES] = {694, 875, 1000, 1200}; + unsigned int num_dcfclk_sta_targets = 4; + unsigned int num_uclk_states; + + dc_assert_fp_enabled(); + + if (dc->ctx->dc_bios->vram_info.num_chans) + dcn3_02_soc.num_chans = dc->ctx->dc_bios->vram_info.num_chans; + + if (dc->ctx->dc_bios->vram_info.dram_channel_width_bytes) + dcn3_02_soc.dram_channel_width_bytes = dc->ctx->dc_bios->vram_info.dram_channel_width_bytes; + + dcn3_02_soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; + dc->dml.soc.dispclk_dppclk_vco_speed_mhz = dc->clk_mgr->dentist_vco_freq_khz / 1000.0; + + if (bw_params->clk_table.entries[0].memclk_mhz) { + int max_dcfclk_mhz = 0, max_dispclk_mhz = 0, max_dppclk_mhz = 0, max_phyclk_mhz = 0; + + for (i = 0; i < MAX_NUM_DPM_LVL; i++) { + if (bw_params->clk_table.entries[i].dcfclk_mhz > max_dcfclk_mhz) + max_dcfclk_mhz = bw_params->clk_table.entries[i].dcfclk_mhz; + if (bw_params->clk_table.entries[i].dispclk_mhz > max_dispclk_mhz) + max_dispclk_mhz = bw_params->clk_table.entries[i].dispclk_mhz; + if (bw_params->clk_table.entries[i].dppclk_mhz > max_dppclk_mhz) + max_dppclk_mhz = bw_params->clk_table.entries[i].dppclk_mhz; + if (bw_params->clk_table.entries[i].phyclk_mhz > max_phyclk_mhz) + max_phyclk_mhz = bw_params->clk_table.entries[i].phyclk_mhz; + } + if (!max_dcfclk_mhz) + max_dcfclk_mhz = dcn3_02_soc.clock_limits[0].dcfclk_mhz; + if (!max_dispclk_mhz) + max_dispclk_mhz = dcn3_02_soc.clock_limits[0].dispclk_mhz; + if (!max_dppclk_mhz) + max_dppclk_mhz = dcn3_02_soc.clock_limits[0].dppclk_mhz; + if (!max_phyclk_mhz) + max_phyclk_mhz = dcn3_02_soc.clock_limits[0].phyclk_mhz; + + if (max_dcfclk_mhz > dcfclk_sta_targets[num_dcfclk_sta_targets-1]) { + /* If max DCFCLK is greater than the max DCFCLK STA target, insert into the DCFCLK STA target array */ + dcfclk_sta_targets[num_dcfclk_sta_targets] = max_dcfclk_mhz; + num_dcfclk_sta_targets++; + } else if (max_dcfclk_mhz < dcfclk_sta_targets[num_dcfclk_sta_targets-1]) { + /* If max DCFCLK is less than the max DCFCLK STA target, cap values and remove duplicates */ + for (i = 0; i < num_dcfclk_sta_targets; i++) { + if (dcfclk_sta_targets[i] > max_dcfclk_mhz) { + dcfclk_sta_targets[i] = max_dcfclk_mhz; + break; + } + } + /* Update size of array since we "removed" duplicates */ + num_dcfclk_sta_targets = i + 1; + } + + num_uclk_states = bw_params->clk_table.num_entries; + + /* Calculate optimal dcfclk for each uclk */ + for (i = 0; i < num_uclk_states; i++) { + dcn302_get_optimal_dcfclk_fclk_for_uclk(bw_params->clk_table.entries[i].memclk_mhz * 16, + &optimal_dcfclk_for_uclk[i], NULL); + if (optimal_dcfclk_for_uclk[i] < bw_params->clk_table.entries[0].dcfclk_mhz) + optimal_dcfclk_for_uclk[i] = bw_params->clk_table.entries[0].dcfclk_mhz; + } + + /* Calculate optimal uclk for each dcfclk sta target */ + for (i = 0; i < num_dcfclk_sta_targets; i++) { + for (j = 0; j < num_uclk_states; j++) { + if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j]) { + optimal_uclk_for_dcfclk_sta_targets[i] = + bw_params->clk_table.entries[j].memclk_mhz * 16; + break; + } + } + } + + i = 0; + j = 0; + /* create the final dcfclk and uclk table */ + while (i < num_dcfclk_sta_targets && j < num_uclk_states && num_states < DC__VOLTAGE_STATES) { + if (dcfclk_sta_targets[i] < optimal_dcfclk_for_uclk[j] && i < num_dcfclk_sta_targets) { + dcfclk_mhz[num_states] = dcfclk_sta_targets[i]; + dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++]; + } else { + if (j < num_uclk_states && optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) { + dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j]; + dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16; + } else { + j = num_uclk_states; + } + } + } + + while (i < num_dcfclk_sta_targets && num_states < DC__VOLTAGE_STATES) { + dcfclk_mhz[num_states] = dcfclk_sta_targets[i]; + dram_speed_mts[num_states++] = optimal_uclk_for_dcfclk_sta_targets[i++]; + } + + while (j < num_uclk_states && num_states < DC__VOLTAGE_STATES && + optimal_dcfclk_for_uclk[j] <= max_dcfclk_mhz) { + dcfclk_mhz[num_states] = optimal_dcfclk_for_uclk[j]; + dram_speed_mts[num_states++] = bw_params->clk_table.entries[j++].memclk_mhz * 16; + } + + dcn3_02_soc.num_states = num_states; + for (i = 0; i < dcn3_02_soc.num_states; i++) { + dcn3_02_soc.clock_limits[i].state = i; + dcn3_02_soc.clock_limits[i].dcfclk_mhz = dcfclk_mhz[i]; + dcn3_02_soc.clock_limits[i].fabricclk_mhz = dcfclk_mhz[i]; + dcn3_02_soc.clock_limits[i].dram_speed_mts = dram_speed_mts[i]; + + /* Fill all states with max values of all other clocks */ + dcn3_02_soc.clock_limits[i].dispclk_mhz = max_dispclk_mhz; + dcn3_02_soc.clock_limits[i].dppclk_mhz = max_dppclk_mhz; + dcn3_02_soc.clock_limits[i].phyclk_mhz = max_phyclk_mhz; + /* Populate from bw_params for DTBCLK, SOCCLK */ + if (!bw_params->clk_table.entries[i].dtbclk_mhz && i > 0) + dcn3_02_soc.clock_limits[i].dtbclk_mhz = dcn3_02_soc.clock_limits[i-1].dtbclk_mhz; + else + dcn3_02_soc.clock_limits[i].dtbclk_mhz = bw_params->clk_table.entries[i].dtbclk_mhz; + if (!bw_params->clk_table.entries[i].socclk_mhz && i > 0) + dcn3_02_soc.clock_limits[i].socclk_mhz = dcn3_02_soc.clock_limits[i-1].socclk_mhz; + else + dcn3_02_soc.clock_limits[i].socclk_mhz = bw_params->clk_table.entries[i].socclk_mhz; + /* These clocks cannot come from bw_params, always fill from dcn3_02_soc[1] */ + /* FCLK, PHYCLK_D18, DSCCLK */ + dcn3_02_soc.clock_limits[i].phyclk_d18_mhz = dcn3_02_soc.clock_limits[0].phyclk_d18_mhz; + dcn3_02_soc.clock_limits[i].dscclk_mhz = dcn3_02_soc.clock_limits[0].dscclk_mhz; + } + /* re-init DML with updated bb */ + dml_init_instance(&dc->dml, &dcn3_02_soc, &dcn3_02_ip, DML_PROJECT_DCN30); + if (dc->current_state) + dml_init_instance(&dc->current_state->bw_ctx.dml, &dcn3_02_soc, &dcn3_02_ip, DML_PROJECT_DCN30); + } +} + +void dcn302_fpu_init_soc_bounding_box(struct bp_soc_bb_info bb_info) +{ + + dc_assert_fp_enabled(); + + if (bb_info.dram_clock_change_latency_100ns > 0) + dcn3_02_soc.dram_clock_change_latency_us = + bb_info.dram_clock_change_latency_100ns * 10; + + if (bb_info.dram_sr_enter_exit_latency_100ns > 0) + dcn3_02_soc.sr_enter_plus_exit_time_us = + bb_info.dram_sr_enter_exit_latency_100ns * 10; + + if (bb_info.dram_sr_exit_latency_100ns > 0) + dcn3_02_soc.sr_exit_time_us = + bb_info.dram_sr_exit_latency_100ns * 10; +} + + diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.h new file mode 100644 index 000000000000..548305d96cee --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn302/dcn302_fpu.h @@ -0,0 +1,32 @@ +/* + * Copyright 2019-2021 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef __DCN302_FPU_H__ +#define __DCN302_FPU_H__ + +void dcn302_fpu_init_soc_bounding_box(struct bp_soc_bb_info bb_info); +void dcn302_fpu_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw_params); + +#endif /* __DCN302_FPU_H__*/ -- cgit From c1a20f707ac33b1fec1d78bab74e07656c237801 Mon Sep 17 00:00:00 2001 From: Ian Chen Date: Tue, 21 Dec 2021 11:17:30 +0800 Subject: drm/amd/display: modify SMU_TIMEOUT macro. [WHY] If some SMU features are not enabled, SMU will return fail to that message. [HOW] SMU_TIMEOUT macro will treat "return fail" as timeout also. Correct the macro to only report timeout case. Reviewed-by: Jun Lei Acked-by: Wayne Lin Signed-off-by: Ian Chen Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dm_helpers.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h index 7f94e3f70d7f..a5497ba89f15 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h +++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h @@ -170,9 +170,9 @@ bool dm_helpers_dmub_outbox_interrupt_control(struct dc_context *ctx, bool enabl void dm_helpers_smu_timeout(struct dc_context *ctx, unsigned int msg_id, unsigned int param, unsigned int timeout_us); -// 0x1 = Result_OK, 0xFE = Result_UnkmownCmd +// 0x1 = Result_OK, 0xFE = Result_UnkmownCmd, 0x0 = Status_Busy #define IS_SMU_TIMEOUT(result) \ - (!(result == 0x1 || result == 0xFE)) + (result == 0x0) int dm_helper_dmub_aux_transfer_sync( struct dc_context *ctx, -- cgit From 6421c49567d55b1ba8d9389e5832297398a51a1c Mon Sep 17 00:00:00 2001 From: Chris Park Date: Wed, 5 Jan 2022 23:39:56 -0500 Subject: drm/amd/display: DCEFCLK DS on CLK init [Why] On HG APU + dGPU scenario with no display to dGPU, DS stays disabled due to no display present. This problem can be worked around by DAL calling DCEFCLK DS message to SMU on clk init. [How] Call DCEFCLK DS message to SMU on clk init. Reviewed-by: Jun Lei Acked-by: Wayne Lin Signed-off-by: Chris Park Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c index f977f29907df..0602bde78e6c 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn30/dcn30_clk_mgr.c @@ -184,6 +184,7 @@ void dcn3_init_clocks(struct clk_mgr *clk_mgr_base) dcn3_init_single_clock(clk_mgr, PPCLK_DCEFCLK, &clk_mgr_base->bw_params->clk_table.entries[0].dcfclk_mhz, &num_levels); + dcn30_smu_set_min_deep_sleep_dcef_clk(clk_mgr, 0); /* DTBCLK */ dcn3_init_single_clock(clk_mgr, PPCLK_DTBCLK, -- cgit From 58c69b53aee24f47741b150e309567d9b38ecd6c Mon Sep 17 00:00:00 2001 From: Felipe Clark Date: Thu, 6 Jan 2022 15:30:33 -0500 Subject: drm/amd/display: Fix black screen issue on memory clock switch en [WHY] With some monitors when multi plane overlay is enabled the memory clock switching mechanism has to change and, due to an error in the initialization sequence, it may cause a black screen. [HOW] Change the firmware assisted memory clock switch initialization and tear-down sequence utilizing the prepare_bandwidth and optimize_bandwidth contexts. Reviewed-by: Aric Cyr Acked-by: Wayne Lin Signed-off-by: Felipe Clark Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c | 5 +++++ drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h | 3 +++ drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c | 2 +- 3 files changed, 9 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c index ed0a0e5fd805..dec8604a0612 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c @@ -355,6 +355,11 @@ void dcn30_prepare_bandwidth(struct dc *dc, dcn20_prepare_bandwidth(dc, context); } +void dcn30_optimize_bandwidth(struct dc *dc, struct dc_state *context) +{ + dcn20_optimize_bandwidth(dc, context); +} + void dcn30_disable_writeback( struct dc *dc, unsigned int dwb_pipe_inst) diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h index 73e7b690e82c..357ee14711ad 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h @@ -50,6 +50,9 @@ void dcn30_disable_writeback( void dcn30_prepare_bandwidth(struct dc *dc, struct dc_state *context); +void dcn30_optimize_bandwidth(struct dc *dc, + struct dc_state *context); + bool dcn30_mmhubbub_warmup( struct dc *dc, unsigned int num_dwb, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c index bb347319de83..73a416cba563 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c @@ -60,7 +60,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = { .interdependent_update_lock = dcn10_lock_all_pipes, .cursor_lock = dcn10_cursor_lock, .prepare_bandwidth = dcn20_prepare_bandwidth, - .optimize_bandwidth = dcn20_optimize_bandwidth, + .optimize_bandwidth = dcn30_optimize_bandwidth, .update_bandwidth = dcn20_update_bandwidth, .set_drr = dcn10_set_drr, .get_position = dcn10_get_position, -- cgit From 9338cb719f0f0b43ee1ab39d17214f9b388c7fbf Mon Sep 17 00:00:00 2001 From: Charlene Liu Date: Fri, 7 Jan 2022 04:46:36 -0500 Subject: drm/amd/display: update dml to rev.99 and smu clk_table w/a [why] 1. update dml to rev.99 2. add smu clk table w/a: smu gives 1 dtm level with mismatch votage table which causes multiple issues. Reviewed-by: Nicholas Kazlauskas Acked-by: Wayne Lin Signed-off-by: Charlene Liu Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- .../amd/display/dc/dml/dcn31/display_mode_vba_31.c | 29 +++++++++++++++++++--- .../gpu/drm/amd/display/dc/dml/display_mode_vba.h | 2 ++ 2 files changed, 28 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c index 6feb23432f8d..e4b9fd31223c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn31/display_mode_vba_31.c @@ -64,6 +64,8 @@ typedef struct { double DCFCLKDeepSleep; unsigned int DPPPerPlane; bool ScalerEnabled; + double VRatio; + double VRatioChroma; enum scan_direction_class SourceScan; unsigned int BlockWidth256BytesY; unsigned int BlockHeight256BytesY; @@ -942,6 +944,7 @@ static bool CalculatePrefetchSchedule( double dst_y_prefetch_equ; double Tsw_oto; double prefetch_bw_oto; + double prefetch_bw_pr; double Tvm_oto; double Tr0_oto; double Tvm_oto_lines; @@ -971,6 +974,7 @@ static bool CalculatePrefetchSchedule( double min_Lsw; double Tsw_est1 = 0; double Tsw_est3 = 0; + double max_Tsw = 0; if (GPUVMEnable == true && HostVMEnable == true) { HostVMDynamicLevelsTrips = HostVMMaxNonCachedPageTableLevels; @@ -1111,11 +1115,14 @@ static bool CalculatePrefetchSchedule( bytes_pp = myPipe->BytePerPixelY + myPipe->BytePerPixelC / 4; else bytes_pp = myPipe->BytePerPixelY + myPipe->BytePerPixelC; - + /*rev 99*/ + prefetch_bw_pr = dml_min(1, bytes_pp * myPipe->PixelClock / (double) myPipe->DPPPerPlane); + max_Tsw = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * LineTime; prefetch_sw_bytes = PrefetchSourceLinesY * swath_width_luma_ub * myPipe->BytePerPixelY + PrefetchSourceLinesC * swath_width_chroma_ub * myPipe->BytePerPixelC; prefetch_bw_oto = dml_max(bytes_pp * myPipe->PixelClock / myPipe->DPPPerPlane, prefetch_sw_bytes / (dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) * LineTime)); + prefetch_bw_oto = dml_max(prefetch_bw_pr, prefetch_sw_bytes / max_Tsw); - min_Lsw = dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) / max_vratio_pre; + min_Lsw = dml_max(1, dml_max(PrefetchSourceLinesY, PrefetchSourceLinesC) / max_vratio_pre); Lsw_oto = dml_ceil(4 * dml_max(prefetch_sw_bytes / prefetch_bw_oto / LineTime, min_Lsw), 1) / 4; Tsw_oto = Lsw_oto * LineTime; @@ -1389,7 +1396,7 @@ static bool CalculatePrefetchSchedule( dml_print("DML::%s: SwathHeightC = %d\n", __func__, SwathHeightC); dml_print("DML::%s: VInitPreFillC = %f\n", __func__, VInitPreFillC); #endif - if ((SwathHeightC > 4)) { + if ((SwathHeightC > 4) || VInitPreFillC > 3) { if (LinesToRequestPrefetchPixelData > (VInitPreFillC - 3.0) / 2.0) { *VRatioPrefetchC = dml_max( *VRatioPrefetchC, @@ -2663,6 +2670,8 @@ static void DISPCLKDPPCLKDCFCLKDeepSleepPrefetchParametersWatermarksAndPerforman myPipe.DCFCLKDeepSleep = v->DCFCLKDeepSleep; myPipe.DPPPerPlane = v->DPPPerPlane[k]; myPipe.ScalerEnabled = v->ScalerEnabled[k]; + myPipe.VRatio = v->VRatio[k]; + myPipe.VRatioChroma = v->VRatioChroma[k]; myPipe.SourceScan = v->SourceScan[k]; myPipe.BlockWidth256BytesY = v->BlockWidth256BytesY[k]; myPipe.BlockHeight256BytesY = v->BlockHeight256BytesY[k]; @@ -3911,6 +3920,9 @@ static noinline void CalculatePrefetchSchedulePerPlane( myPipe.DCFCLKDeepSleep = v->ProjectedDCFCLKDeepSleep[i][j]; myPipe.DPPPerPlane = v->NoOfDPP[i][j][k]; myPipe.ScalerEnabled = v->ScalerEnabled[k]; + myPipe.VRatio = mode_lib->vba.VRatio[k]; + myPipe.VRatioChroma = mode_lib->vba.VRatioChroma[k]; + myPipe.SourceScan = v->SourceScan[k]; myPipe.BlockWidth256BytesY = v->Read256BlockWidthY[k]; myPipe.BlockHeight256BytesY = v->Read256BlockHeightY[k]; @@ -4987,6 +4999,17 @@ void dml31_ModeSupportAndSystemConfigurationFull(struct display_mode_lib *mode_l &v->meta_row_bandwidth[i][j][k], &v->dpte_row_bandwidth[i][j][k]); } + /*DCCMetaBufferSizeSupport(i, j) = True + For k = 0 To NumberOfActivePlanes - 1 + If MetaRowBytes(i, j, k) > 24064 Then + DCCMetaBufferSizeSupport(i, j) = False + End If + Next k*/ + v->DCCMetaBufferSizeSupport[i][j] = true; + for (k = 0; k < v->NumberOfActivePlanes; ++k) { + if (v->MetaRowBytes[i][j][k] > 24064) + v->DCCMetaBufferSizeSupport[i][j] = false; + } v->UrgLatency[i] = CalculateUrgentLatency( v->UrgentLatencyPixelDataOnly, v->UrgentLatencyPixelMixedWithVMData, diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h index 90e87961fe3e..8fe74a3b39a8 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h @@ -544,6 +544,8 @@ struct vba_vars_st { bool DTBCLKRequiredMoreThanSupported[DC__VOLTAGE_STATES]; double UrgentRoundTripAndOutOfOrderLatencyPerState[DC__VOLTAGE_STATES]; bool ROBSupport[DC__VOLTAGE_STATES][2]; + //based on rev 99: Dim DCCMetaBufferSizeSupport(NumberOfStates, 1) As Boolean + bool DCCMetaBufferSizeSupport[DC__VOLTAGE_STATES][2]; bool PTEBufferSizeNotExceeded[DC__VOLTAGE_STATES][2]; bool TotalVerticalActiveBandwidthSupport[DC__VOLTAGE_STATES][2]; double MaxTotalVerticalActiveAvailableBandwidth[DC__VOLTAGE_STATES][2]; -- cgit From 047db281c026de5971cedb5bb486aa29bd16a39d Mon Sep 17 00:00:00 2001 From: Dale Zhao Date: Tue, 28 Dec 2021 16:50:28 +0800 Subject: drm/amd/display: Add signal type check when verify stream backends same [Why] For allow eDP hot-plug feature, the stream signal may change to VIRTUAL when plug-out and back to eDP when plug-in. OS will still setPathMode with same timing for each plugging, but eDP gets no stream update as we don't check signal type changing back as keeping it VIRTUAL. It's also unsafe for future cases that stream signal is switched with same timing. [How] Check stream signal type change include previous HDMI signal case. Reviewed-by: Aric Cyr Acked-by: Wayne Lin Signed-off-by: Dale Zhao Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index b3912ff9dc91..b34bf59cf54b 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -1640,6 +1640,9 @@ static bool are_stream_backends_same( if (is_timing_changed(stream_a, stream_b)) return false; + if (stream_a->signal != stream_b->signal) + return false; + if (stream_a->dpms_off != stream_b->dpms_off) return false; -- cgit From 56ca49cf6004ff328458954dd3d0fcf0189a96a4 Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Sat, 8 Jan 2022 19:25:16 -0500 Subject: drm/amd/display: [FW Promotion] Release 0.0.100.0 Reviewed-by: Aric Cyr Acked-by: Wayne Lin Signed-off-by: Anthony Koo Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index 873ecd04e01d..e13d50abc281 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -46,10 +46,10 @@ /* Firmware versioning. */ #ifdef DMUB_EXPOSE_VERSION -#define DMUB_FW_VERSION_GIT_HASH 0xbaf06b95 +#define DMUB_FW_VERSION_GIT_HASH 0x56a29f36 #define DMUB_FW_VERSION_MAJOR 0 #define DMUB_FW_VERSION_MINOR 0 -#define DMUB_FW_VERSION_REVISION 98 +#define DMUB_FW_VERSION_REVISION 100 #define DMUB_FW_VERSION_TEST 0 #define DMUB_FW_VERSION_VBIOS 0 #define DMUB_FW_VERSION_HOTFIX 0 -- cgit From ce0bdc62e80d40caa346ac4840a20769d5bd1ae6 Mon Sep 17 00:00:00 2001 From: Aric Cyr Date: Mon, 10 Jan 2022 11:06:09 -0500 Subject: drm/amd/display: 3.2.169 This version brings along following fixes: - Organize FPU associated code to DML - Modify SMU_TIMEOUT macro - Organize dcn201 code - Address DS stays disabled problem under specific scenario - Fix black screen issue - Update DML to rev.99 - Address problem of eDP hot-plug feature Acked-by: Wayne Lin Signed-off-by: Aric Cyr Tested-by: Daniel Wheeler Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 1c6728cf2d49..235e33f73913 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -47,7 +47,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.168" +#define DC_VER "3.2.169" #define MAX_SURFACES 3 #define MAX_PLANES 6 -- cgit From 552b7cb0eed12c184b3bdfcc262e96a954a2bc86 Mon Sep 17 00:00:00 2001 From: Isabella Basso Date: Fri, 7 Jan 2022 18:33:36 -0300 Subject: drm/amd/display: move calcs folder into DML The calcs folder has FPU code on it, which should be isolated inside the DML folder as per https://patchwork.freedesktop.org/series/93042/. This commit aims single-handedly to correct the location of such FPU code and does not refactor any functions. Changes since v2: - Corrected problems to compile when DCN was disabled. Reviewed-by: Rodrigo Siqueira Signed-off-by: Isabella Basso Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/Makefile | 4 +- drivers/gpu/drm/amd/display/dc/calcs/Makefile | 68 - drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c | 191 -- .../gpu/drm/amd/display/dc/calcs/calcs_logger.h | 578 ---- .../gpu/drm/amd/display/dc/calcs/custom_float.c | 197 -- drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c | 3625 -------------------- .../gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c | 1933 ----------- .../gpu/drm/amd/display/dc/calcs/dcn_calc_auto.h | 38 - .../gpu/drm/amd/display/dc/calcs/dcn_calc_math.c | 147 - drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c | 1808 ---------- drivers/gpu/drm/amd/display/dc/dml/Makefile | 10 +- .../gpu/drm/amd/display/dc/dml/calcs/bw_fixed.c | 191 ++ .../drm/amd/display/dc/dml/calcs/calcs_logger.h | 578 ++++ .../drm/amd/display/dc/dml/calcs/custom_float.c | 197 ++ .../gpu/drm/amd/display/dc/dml/calcs/dce_calcs.c | 3625 ++++++++++++++++++++ .../drm/amd/display/dc/dml/calcs/dcn_calc_auto.c | 1933 +++++++++++ .../drm/amd/display/dc/dml/calcs/dcn_calc_auto.h | 38 + .../drm/amd/display/dc/dml/calcs/dcn_calc_math.c | 147 + .../gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c | 1808 ++++++++++ 19 files changed, 8528 insertions(+), 8588 deletions(-) delete mode 100644 drivers/gpu/drm/amd/display/dc/calcs/Makefile delete mode 100644 drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c delete mode 100644 drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h delete mode 100644 drivers/gpu/drm/amd/display/dc/calcs/custom_float.c delete mode 100644 drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c delete mode 100644 drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c delete mode 100644 drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.h delete mode 100644 drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.c delete mode 100644 drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c create mode 100644 drivers/gpu/drm/amd/display/dc/dml/calcs/bw_fixed.c create mode 100644 drivers/gpu/drm/amd/display/dc/dml/calcs/calcs_logger.h create mode 100644 drivers/gpu/drm/amd/display/dc/dml/calcs/custom_float.c create mode 100644 drivers/gpu/drm/amd/display/dc/dml/calcs/dce_calcs.c create mode 100644 drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_auto.c create mode 100644 drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_auto.h create mode 100644 drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_math.c create mode 100644 drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile index b1f0d6260226..a4ef8f314307 100644 --- a/drivers/gpu/drm/amd/display/dc/Makefile +++ b/drivers/gpu/drm/amd/display/dc/Makefile @@ -23,12 +23,12 @@ # Makefile for Display Core (dc) component. # -DC_LIBS = basics bios calcs clk_mgr dce gpio irq virtual +DC_LIBS = basics bios clk_mgr dce dml gpio irq virtual ifdef CONFIG_DRM_AMD_DC_DCN DC_LIBS += dcn20 DC_LIBS += dsc -DC_LIBS += dcn10 dml +DC_LIBS += dcn10 DC_LIBS += dcn21 DC_LIBS += dcn201 DC_LIBS += dcn30 diff --git a/drivers/gpu/drm/amd/display/dc/calcs/Makefile b/drivers/gpu/drm/amd/display/dc/calcs/Makefile deleted file mode 100644 index f3c00f479e1c..000000000000 --- a/drivers/gpu/drm/amd/display/dc/calcs/Makefile +++ /dev/null @@ -1,68 +0,0 @@ -# -# Copyright 2017 Advanced Micro Devices, Inc. -# Copyright 2019 Raptor Engineering, LLC -# -# Permission is hereby granted, free of charge, to any person obtaining a -# copy of this software and associated documentation files (the "Software"), -# to deal in the Software without restriction, including without limitation -# the rights to use, copy, modify, merge, publish, distribute, sublicense, -# and/or sell copies of the Software, and to permit persons to whom the -# Software is furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL -# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR -# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, -# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -# OTHER DEALINGS IN THE SOFTWARE. -# -# -# Makefile for the 'calcs' sub-component of DAL. -# It calculates Bandwidth and Watermarks values for HW programming -# - -ifdef CONFIG_X86 -calcs_ccflags := -mhard-float -msse -endif - -ifdef CONFIG_PPC64 -calcs_ccflags := -mhard-float -maltivec -endif - -ifdef CONFIG_CC_IS_GCC -ifeq ($(call cc-ifversion, -lt, 0701, y), y) -IS_OLD_GCC = 1 -endif -endif - -ifdef CONFIG_X86 -ifdef IS_OLD_GCC -# Stack alignment mismatch, proceed with caution. -# GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 -# (8B stack alignment). -calcs_ccflags += -mpreferred-stack-boundary=4 -else -calcs_ccflags += -msse2 -endif -endif - -CFLAGS_$(AMDDALPATH)/dc/calcs/dcn_calcs.o := $(calcs_ccflags) -CFLAGS_$(AMDDALPATH)/dc/calcs/dcn_calc_auto.o := $(calcs_ccflags) -CFLAGS_$(AMDDALPATH)/dc/calcs/dcn_calc_math.o := $(calcs_ccflags) -Wno-tautological-compare -CFLAGS_REMOVE_$(AMDDALPATH)/dc/calcs/dcn_calcs.o := $(calcs_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/calcs/dcn_calc_auto.o := $(calcs_rcflags) -CFLAGS_REMOVE_$(AMDDALPATH)/dc/calcs/dcn_calc_math.o := $(calcs_rcflags) - -BW_CALCS = dce_calcs.o bw_fixed.o custom_float.o - -ifdef CONFIG_DRM_AMD_DC_DCN -BW_CALCS += dcn_calcs.o dcn_calc_math.o dcn_calc_auto.o -endif - -AMD_DAL_BW_CALCS = $(addprefix $(AMDDALPATH)/dc/calcs/,$(BW_CALCS)) - -AMD_DISPLAY_FILES += $(AMD_DAL_BW_CALCS) diff --git a/drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c b/drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c deleted file mode 100644 index 6ca288fb5fb9..000000000000 --- a/drivers/gpu/drm/amd/display/dc/calcs/bw_fixed.c +++ /dev/null @@ -1,191 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ -#include "dm_services.h" -#include "bw_fixed.h" - - -#define MIN_I64 \ - (int64_t)(-(1LL << 63)) - -#define MAX_I64 \ - (int64_t)((1ULL << 63) - 1) - -#define FRACTIONAL_PART_MASK \ - ((1ULL << BW_FIXED_BITS_PER_FRACTIONAL_PART) - 1) - -#define GET_FRACTIONAL_PART(x) \ - (FRACTIONAL_PART_MASK & (x)) - -static uint64_t abs_i64(int64_t arg) -{ - if (arg >= 0) - return (uint64_t)(arg); - else - return (uint64_t)(-arg); -} - -struct bw_fixed bw_int_to_fixed_nonconst(int64_t value) -{ - struct bw_fixed res; - ASSERT(value < BW_FIXED_MAX_I32 && value > BW_FIXED_MIN_I32); - res.value = value << BW_FIXED_BITS_PER_FRACTIONAL_PART; - return res; -} - -struct bw_fixed bw_frc_to_fixed(int64_t numerator, int64_t denominator) -{ - struct bw_fixed res; - bool arg1_negative = numerator < 0; - bool arg2_negative = denominator < 0; - uint64_t arg1_value; - uint64_t arg2_value; - uint64_t remainder; - - /* determine integer part */ - uint64_t res_value; - - ASSERT(denominator != 0); - - arg1_value = abs_i64(numerator); - arg2_value = abs_i64(denominator); - res_value = div64_u64_rem(arg1_value, arg2_value, &remainder); - - ASSERT(res_value <= BW_FIXED_MAX_I32); - - /* determine fractional part */ - { - uint32_t i = BW_FIXED_BITS_PER_FRACTIONAL_PART; - - do - { - remainder <<= 1; - - res_value <<= 1; - - if (remainder >= arg2_value) - { - res_value |= 1; - remainder -= arg2_value; - } - } while (--i != 0); - } - - /* round up LSB */ - { - uint64_t summand = (remainder << 1) >= arg2_value; - - ASSERT(res_value <= MAX_I64 - summand); - - res_value += summand; - } - - res.value = (int64_t)(res_value); - - if (arg1_negative ^ arg2_negative) - res.value = -res.value; - return res; -} - -struct bw_fixed bw_floor2( - const struct bw_fixed arg, - const struct bw_fixed significance) -{ - struct bw_fixed result; - int64_t multiplicand; - - multiplicand = div64_s64(arg.value, abs_i64(significance.value)); - result.value = abs_i64(significance.value) * multiplicand; - ASSERT(abs_i64(result.value) <= abs_i64(arg.value)); - return result; -} - -struct bw_fixed bw_ceil2( - const struct bw_fixed arg, - const struct bw_fixed significance) -{ - struct bw_fixed result; - int64_t multiplicand; - - multiplicand = div64_s64(arg.value, abs_i64(significance.value)); - result.value = abs_i64(significance.value) * multiplicand; - if (abs_i64(result.value) < abs_i64(arg.value)) { - if (arg.value < 0) - result.value -= abs_i64(significance.value); - else - result.value += abs_i64(significance.value); - } - return result; -} - -struct bw_fixed bw_mul(const struct bw_fixed arg1, const struct bw_fixed arg2) -{ - struct bw_fixed res; - - bool arg1_negative = arg1.value < 0; - bool arg2_negative = arg2.value < 0; - - uint64_t arg1_value = abs_i64(arg1.value); - uint64_t arg2_value = abs_i64(arg2.value); - - uint64_t arg1_int = BW_FIXED_GET_INTEGER_PART(arg1_value); - uint64_t arg2_int = BW_FIXED_GET_INTEGER_PART(arg2_value); - - uint64_t arg1_fra = GET_FRACTIONAL_PART(arg1_value); - uint64_t arg2_fra = GET_FRACTIONAL_PART(arg2_value); - - uint64_t tmp; - - res.value = arg1_int * arg2_int; - - ASSERT(res.value <= BW_FIXED_MAX_I32); - - res.value <<= BW_FIXED_BITS_PER_FRACTIONAL_PART; - - tmp = arg1_int * arg2_fra; - - ASSERT(tmp <= (uint64_t)(MAX_I64 - res.value)); - - res.value += tmp; - - tmp = arg2_int * arg1_fra; - - ASSERT(tmp <= (uint64_t)(MAX_I64 - res.value)); - - res.value += tmp; - - tmp = arg1_fra * arg2_fra; - - tmp = (tmp >> BW_FIXED_BITS_PER_FRACTIONAL_PART) + - (tmp >= (uint64_t)(bw_frc_to_fixed(1, 2).value)); - - ASSERT(tmp <= (uint64_t)(MAX_I64 - res.value)); - - res.value += tmp; - - if (arg1_negative ^ arg2_negative) - res.value = -res.value; - return res; -} - diff --git a/drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h b/drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h deleted file mode 100644 index 62435bfc274d..000000000000 --- a/drivers/gpu/drm/amd/display/dc/calcs/calcs_logger.h +++ /dev/null @@ -1,578 +0,0 @@ -/* - * Copyright 2018 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef _CALCS_CALCS_LOGGER_H_ -#define _CALCS_CALCS_LOGGER_H_ -#define DC_LOGGER ctx->logger - -static void print_bw_calcs_dceip(struct dc_context *ctx, const struct bw_calcs_dceip *dceip) -{ - - DC_LOG_BANDWIDTH_CALCS("#####################################################################"); - DC_LOG_BANDWIDTH_CALCS("struct bw_calcs_dceip"); - DC_LOG_BANDWIDTH_CALCS("#####################################################################"); - DC_LOG_BANDWIDTH_CALCS(" [enum] bw_calcs_version version %d", dceip->version); - DC_LOG_BANDWIDTH_CALCS(" [bool] large_cursor: %d", dceip->large_cursor); - DC_LOG_BANDWIDTH_CALCS(" [bool] dmif_pipe_en_fbc_chunk_tracker: %d", dceip->dmif_pipe_en_fbc_chunk_tracker); - DC_LOG_BANDWIDTH_CALCS(" [bool] display_write_back_supported: %d", dceip->display_write_back_supported); - DC_LOG_BANDWIDTH_CALCS(" [bool] argb_compression_support: %d", dceip->argb_compression_support); - DC_LOG_BANDWIDTH_CALCS(" [bool] pre_downscaler_enabled: %d", dceip->pre_downscaler_enabled); - DC_LOG_BANDWIDTH_CALCS(" [bool] underlay_downscale_prefetch_enabled: %d", - dceip->underlay_downscale_prefetch_enabled); - DC_LOG_BANDWIDTH_CALCS(" [bool] graphics_lb_nodownscaling_multi_line_prefetching: %d", - dceip->graphics_lb_nodownscaling_multi_line_prefetching); - DC_LOG_BANDWIDTH_CALCS(" [bool] limit_excessive_outstanding_dmif_requests: %d", - dceip->limit_excessive_outstanding_dmif_requests); - DC_LOG_BANDWIDTH_CALCS(" [uint32_t] cursor_max_outstanding_group_num: %d", - dceip->cursor_max_outstanding_group_num); - DC_LOG_BANDWIDTH_CALCS(" [uint32_t] lines_interleaved_into_lb: %d", dceip->lines_interleaved_into_lb); - DC_LOG_BANDWIDTH_CALCS(" [uint32_t] low_power_tiling_mode: %d", dceip->low_power_tiling_mode); - DC_LOG_BANDWIDTH_CALCS(" [uint32_t] chunk_width: %d", dceip->chunk_width); - DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_graphics_pipes: %d", dceip->number_of_graphics_pipes); - DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_underlay_pipes: %d", dceip->number_of_underlay_pipes); - DC_LOG_BANDWIDTH_CALCS(" [uint32_t] max_dmif_buffer_allocated: %d", dceip->max_dmif_buffer_allocated); - DC_LOG_BANDWIDTH_CALCS(" [uint32_t] graphics_dmif_size: %d", dceip->graphics_dmif_size); - DC_LOG_BANDWIDTH_CALCS(" [uint32_t] underlay_luma_dmif_size: %d", dceip->underlay_luma_dmif_size); - DC_LOG_BANDWIDTH_CALCS(" [uint32_t] underlay_chroma_dmif_size: %d", dceip->underlay_chroma_dmif_size); - DC_LOG_BANDWIDTH_CALCS(" [uint32_t] scatter_gather_lines_of_pte_prefetching_in_linear_mode: %d", - dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode); - DC_LOG_BANDWIDTH_CALCS(" [uint32_t] display_write_back420_luma_mcifwr_buffer_size: %d", - dceip->display_write_back420_luma_mcifwr_buffer_size); - DC_LOG_BANDWIDTH_CALCS(" [uint32_t] display_write_back420_chroma_mcifwr_buffer_size: %d", - dceip->display_write_back420_chroma_mcifwr_buffer_size); - DC_LOG_BANDWIDTH_CALCS(" [uint32_t] scatter_gather_pte_request_rows_in_tiling_mode: %d", - dceip->scatter_gather_pte_request_rows_in_tiling_mode); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay_vscaler_efficiency10_bit_per_component: %d", - bw_fixed_to_int(dceip->underlay_vscaler_efficiency10_bit_per_component)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay_vscaler_efficiency12_bit_per_component: %d", - bw_fixed_to_int(dceip->underlay_vscaler_efficiency12_bit_per_component)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] graphics_vscaler_efficiency6_bit_per_component: %d", - bw_fixed_to_int(dceip->graphics_vscaler_efficiency6_bit_per_component)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] graphics_vscaler_efficiency8_bit_per_component: %d", - bw_fixed_to_int(dceip->graphics_vscaler_efficiency8_bit_per_component)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] graphics_vscaler_efficiency10_bit_per_component: %d", - bw_fixed_to_int(dceip->graphics_vscaler_efficiency10_bit_per_component)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] graphics_vscaler_efficiency12_bit_per_component: %d", - bw_fixed_to_int(dceip->graphics_vscaler_efficiency12_bit_per_component)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] alpha_vscaler_efficiency: %d", - bw_fixed_to_int(dceip->alpha_vscaler_efficiency)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_write_pixels_per_dispclk: %d", - bw_fixed_to_int(dceip->lb_write_pixels_per_dispclk)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_size_per_component444: %d", - bw_fixed_to_int(dceip->lb_size_per_component444)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_and_dram_clock_state_change_gated_before_cursor: %d", - bw_fixed_to_int(dceip->stutter_and_dram_clock_state_change_gated_before_cursor)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay420_luma_lb_size_per_component: %d", - bw_fixed_to_int(dceip->underlay420_luma_lb_size_per_component)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay420_chroma_lb_size_per_component: %d", - bw_fixed_to_int(dceip->underlay420_chroma_lb_size_per_component)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay422_lb_size_per_component: %d", - bw_fixed_to_int(dceip->underlay422_lb_size_per_component)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_chunk_width: %d", bw_fixed_to_int(dceip->cursor_chunk_width)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_dcp_buffer_lines: %d", - bw_fixed_to_int(dceip->cursor_dcp_buffer_lines)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay_maximum_width_efficient_for_tiling: %d", - bw_fixed_to_int(dceip->underlay_maximum_width_efficient_for_tiling)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay_maximum_height_efficient_for_tiling: %d", - bw_fixed_to_int(dceip->underlay_maximum_height_efficient_for_tiling)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display: %d", - bw_fixed_to_int(dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation: %d", - bw_fixed_to_int(dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] minimum_outstanding_pte_request_limit: %d", - bw_fixed_to_int(dceip->minimum_outstanding_pte_request_limit)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] maximum_total_outstanding_pte_requests_allowed_by_saw: %d", - bw_fixed_to_int(dceip->maximum_total_outstanding_pte_requests_allowed_by_saw)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] linear_mode_line_request_alternation_slice: %d", - bw_fixed_to_int(dceip->linear_mode_line_request_alternation_slice)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] request_efficiency: %d", bw_fixed_to_int(dceip->request_efficiency)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_per_request: %d", bw_fixed_to_int(dceip->dispclk_per_request)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_ramping_factor: %d", - bw_fixed_to_int(dceip->dispclk_ramping_factor)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_pipe_throughput_factor: %d", - bw_fixed_to_int(dceip->display_pipe_throughput_factor)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwr_all_surfaces_burst_time: %d", - bw_fixed_to_int(dceip->mcifwr_all_surfaces_burst_time)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_request_buffer_size: %d", - bw_fixed_to_int(dceip->dmif_request_buffer_size)); - - -} - -static void print_bw_calcs_vbios(struct dc_context *ctx, const struct bw_calcs_vbios *vbios) -{ - - DC_LOG_BANDWIDTH_CALCS("#####################################################################"); - DC_LOG_BANDWIDTH_CALCS("struct bw_calcs_vbios vbios"); - DC_LOG_BANDWIDTH_CALCS("#####################################################################"); - DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines memory_type: %d", vbios->memory_type); - DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines memory_type: %d", vbios->memory_type); - DC_LOG_BANDWIDTH_CALCS(" [uint32_t] dram_channel_width_in_bits: %d", vbios->dram_channel_width_in_bits); - DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_dram_channels: %d", vbios->number_of_dram_channels); - DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_dram_banks: %d", vbios->number_of_dram_banks); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] low_yclk: %d", bw_fixed_to_int(vbios->low_yclk)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid_yclk: %d", bw_fixed_to_int(vbios->mid_yclk)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] high_yclk: %d", bw_fixed_to_int(vbios->high_yclk)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] low_sclk: %d", bw_fixed_to_int(vbios->low_sclk)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid1_sclk: %d", bw_fixed_to_int(vbios->mid1_sclk)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid2_sclk: %d", bw_fixed_to_int(vbios->mid2_sclk)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid3_sclk: %d", bw_fixed_to_int(vbios->mid3_sclk)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid4_sclk: %d", bw_fixed_to_int(vbios->mid4_sclk)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid5_sclk: %d", bw_fixed_to_int(vbios->mid5_sclk)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid6_sclk: %d", bw_fixed_to_int(vbios->mid6_sclk)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] high_sclk: %d", bw_fixed_to_int(vbios->high_sclk)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] low_voltage_max_dispclk: %d", - bw_fixed_to_int(vbios->low_voltage_max_dispclk)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid_voltage_max_dispclk;: %d", - bw_fixed_to_int(vbios->mid_voltage_max_dispclk)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] high_voltage_max_dispclk;: %d", - bw_fixed_to_int(vbios->high_voltage_max_dispclk)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] low_voltage_max_phyclk: %d", - bw_fixed_to_int(vbios->low_voltage_max_phyclk)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid_voltage_max_phyclk: %d", - bw_fixed_to_int(vbios->mid_voltage_max_phyclk)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] high_voltage_max_phyclk: %d", - bw_fixed_to_int(vbios->high_voltage_max_phyclk)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] data_return_bus_width: %d", bw_fixed_to_int(vbios->data_return_bus_width)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] trc: %d", bw_fixed_to_int(vbios->trc)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmifmc_urgent_latency: %d", bw_fixed_to_int(vbios->dmifmc_urgent_latency)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_self_refresh_exit_latency: %d", - bw_fixed_to_int(vbios->stutter_self_refresh_exit_latency)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_self_refresh_entry_latency: %d", - bw_fixed_to_int(vbios->stutter_self_refresh_entry_latency)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] nbp_state_change_latency: %d", - bw_fixed_to_int(vbios->nbp_state_change_latency)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwrmc_urgent_latency: %d", - bw_fixed_to_int(vbios->mcifwrmc_urgent_latency)); - DC_LOG_BANDWIDTH_CALCS(" [bool] scatter_gather_enable: %d", vbios->scatter_gather_enable); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] down_spread_percentage: %d", - bw_fixed_to_int(vbios->down_spread_percentage)); - DC_LOG_BANDWIDTH_CALCS(" [uint32_t] cursor_width: %d", vbios->cursor_width); - DC_LOG_BANDWIDTH_CALCS(" [uint32_t] average_compression_rate: %d", vbios->average_compression_rate); - DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_request_slots_gmc_reserves_for_dmif_per_channel: %d", - vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] blackout_duration: %d", bw_fixed_to_int(vbios->blackout_duration)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] maximum_blackout_recovery_time: %d", - bw_fixed_to_int(vbios->maximum_blackout_recovery_time)); - - -} - -static void print_bw_calcs_data(struct dc_context *ctx, struct bw_calcs_data *data) -{ - - int i, j, k; - - DC_LOG_BANDWIDTH_CALCS("#####################################################################"); - DC_LOG_BANDWIDTH_CALCS("struct bw_calcs_data data"); - DC_LOG_BANDWIDTH_CALCS("#####################################################################"); - DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_displays: %d", data->number_of_displays); - DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines underlay_surface_type: %d", data->underlay_surface_type); - DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines panning_and_bezel_adjustment: %d", - data->panning_and_bezel_adjustment); - DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines graphics_tiling_mode: %d", data->graphics_tiling_mode); - DC_LOG_BANDWIDTH_CALCS(" [uint32_t] graphics_lb_bpc: %d", data->graphics_lb_bpc); - DC_LOG_BANDWIDTH_CALCS(" [uint32_t] underlay_lb_bpc: %d", data->underlay_lb_bpc); - DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines underlay_tiling_mode: %d", data->underlay_tiling_mode); - DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines d0_underlay_mode: %d", data->d0_underlay_mode); - DC_LOG_BANDWIDTH_CALCS(" [bool] d1_display_write_back_dwb_enable: %d", data->d1_display_write_back_dwb_enable); - DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines d1_underlay_mode: %d", data->d1_underlay_mode); - DC_LOG_BANDWIDTH_CALCS(" [bool] cpup_state_change_enable: %d", data->cpup_state_change_enable); - DC_LOG_BANDWIDTH_CALCS(" [bool] cpuc_state_change_enable: %d", data->cpuc_state_change_enable); - DC_LOG_BANDWIDTH_CALCS(" [bool] nbp_state_change_enable: %d", data->nbp_state_change_enable); - DC_LOG_BANDWIDTH_CALCS(" [bool] stutter_mode_enable: %d", data->stutter_mode_enable); - DC_LOG_BANDWIDTH_CALCS(" [uint32_t] y_clk_level: %d", data->y_clk_level); - DC_LOG_BANDWIDTH_CALCS(" [uint32_t] sclk_level: %d", data->sclk_level); - DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_underlay_surfaces: %d", data->number_of_underlay_surfaces); - DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_dram_wrchannels: %d", data->number_of_dram_wrchannels); - DC_LOG_BANDWIDTH_CALCS(" [uint32_t] chunk_request_delay: %d", data->chunk_request_delay); - DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_dram_channels: %d", data->number_of_dram_channels); - DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines underlay_micro_tile_mode: %d", data->underlay_micro_tile_mode); - DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines graphics_micro_tile_mode: %d", data->graphics_micro_tile_mode); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] max_phyclk: %d", bw_fixed_to_int(data->max_phyclk)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dram_efficiency: %d", bw_fixed_to_int(data->dram_efficiency)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_width_after_surface_type: %d", - bw_fixed_to_int(data->src_width_after_surface_type)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_height_after_surface_type: %d", - bw_fixed_to_int(data->src_height_after_surface_type)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] hsr_after_surface_type: %d", - bw_fixed_to_int(data->hsr_after_surface_type)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] vsr_after_surface_type: %d", bw_fixed_to_int(data->vsr_after_surface_type)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_width_after_rotation: %d", - bw_fixed_to_int(data->src_width_after_rotation)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_height_after_rotation: %d", - bw_fixed_to_int(data->src_height_after_rotation)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] hsr_after_rotation: %d", bw_fixed_to_int(data->hsr_after_rotation)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] vsr_after_rotation: %d", bw_fixed_to_int(data->vsr_after_rotation)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] source_height_pixels: %d", bw_fixed_to_int(data->source_height_pixels)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] hsr_after_stereo: %d", bw_fixed_to_int(data->hsr_after_stereo)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] vsr_after_stereo: %d", bw_fixed_to_int(data->vsr_after_stereo)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] source_width_in_lb: %d", bw_fixed_to_int(data->source_width_in_lb)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_line_pitch: %d", bw_fixed_to_int(data->lb_line_pitch)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay_maximum_source_efficient_for_tiling: %d", - bw_fixed_to_int(data->underlay_maximum_source_efficient_for_tiling)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] num_lines_at_frame_start: %d", - bw_fixed_to_int(data->num_lines_at_frame_start)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_dmif_size_in_time: %d", bw_fixed_to_int(data->min_dmif_size_in_time)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_mcifwr_size_in_time: %d", - bw_fixed_to_int(data->min_mcifwr_size_in_time)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_requests_for_dmif_size: %d", - bw_fixed_to_int(data->total_requests_for_dmif_size)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] peak_pte_request_to_eviction_ratio_limiting: %d", - bw_fixed_to_int(data->peak_pte_request_to_eviction_ratio_limiting)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] useful_pte_per_pte_request: %d", - bw_fixed_to_int(data->useful_pte_per_pte_request)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_pte_request_rows: %d", - bw_fixed_to_int(data->scatter_gather_pte_request_rows)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_row_height: %d", - bw_fixed_to_int(data->scatter_gather_row_height)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_pte_requests_in_vblank: %d", - bw_fixed_to_int(data->scatter_gather_pte_requests_in_vblank)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] inefficient_linear_pitch_in_bytes: %d", - bw_fixed_to_int(data->inefficient_linear_pitch_in_bytes)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_total_data: %d", bw_fixed_to_int(data->cursor_total_data)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_total_request_groups: %d", - bw_fixed_to_int(data->cursor_total_request_groups)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_total_pte_requests: %d", - bw_fixed_to_int(data->scatter_gather_total_pte_requests)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_total_pte_request_groups: %d", - bw_fixed_to_int(data->scatter_gather_total_pte_request_groups)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] tile_width_in_pixels: %d", bw_fixed_to_int(data->tile_width_in_pixels)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_total_number_of_data_request_page_close_open: %d", - bw_fixed_to_int(data->dmif_total_number_of_data_request_page_close_open)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwr_total_number_of_data_request_page_close_open: %d", - bw_fixed_to_int(data->mcifwr_total_number_of_data_request_page_close_open)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] bytes_per_page_close_open: %d", - bw_fixed_to_int(data->bytes_per_page_close_open)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwr_total_page_close_open_time: %d", - bw_fixed_to_int(data->mcifwr_total_page_close_open_time)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_requests_for_adjusted_dmif_size: %d", - bw_fixed_to_int(data->total_requests_for_adjusted_dmif_size)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_dmifmc_urgent_trips: %d", - bw_fixed_to_int(data->total_dmifmc_urgent_trips)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_dmifmc_urgent_latency: %d", - bw_fixed_to_int(data->total_dmifmc_urgent_latency)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_display_reads_required_data: %d", - bw_fixed_to_int(data->total_display_reads_required_data)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_display_reads_required_dram_access_data: %d", - bw_fixed_to_int(data->total_display_reads_required_dram_access_data)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_display_writes_required_data: %d", - bw_fixed_to_int(data->total_display_writes_required_data)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_display_writes_required_dram_access_data: %d", - bw_fixed_to_int(data->total_display_writes_required_dram_access_data)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_reads_required_data: %d", - bw_fixed_to_int(data->display_reads_required_data)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_reads_required_dram_access_data: %d", - bw_fixed_to_int(data->display_reads_required_dram_access_data)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_total_page_close_open_time: %d", - bw_fixed_to_int(data->dmif_total_page_close_open_time)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_cursor_memory_interface_buffer_size_in_time: %d", - bw_fixed_to_int(data->min_cursor_memory_interface_buffer_size_in_time)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_read_buffer_size_in_time: %d", - bw_fixed_to_int(data->min_read_buffer_size_in_time)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_reads_time_for_data_transfer: %d", - bw_fixed_to_int(data->display_reads_time_for_data_transfer)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_writes_time_for_data_transfer: %d", - bw_fixed_to_int(data->display_writes_time_for_data_transfer)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_required_dram_bandwidth: %d", - bw_fixed_to_int(data->dmif_required_dram_bandwidth)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwr_required_dram_bandwidth: %d", - bw_fixed_to_int(data->mcifwr_required_dram_bandwidth)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] required_dmifmc_urgent_latency_for_page_close_open: %d", - bw_fixed_to_int(data->required_dmifmc_urgent_latency_for_page_close_open)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] required_mcifmcwr_urgent_latency: %d", - bw_fixed_to_int(data->required_mcifmcwr_urgent_latency)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] required_dram_bandwidth_gbyte_per_second: %d", - bw_fixed_to_int(data->required_dram_bandwidth_gbyte_per_second)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dram_bandwidth: %d", bw_fixed_to_int(data->dram_bandwidth)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_required_sclk: %d", bw_fixed_to_int(data->dmif_required_sclk)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwr_required_sclk: %d", bw_fixed_to_int(data->mcifwr_required_sclk)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] required_sclk: %d", bw_fixed_to_int(data->required_sclk)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] downspread_factor: %d", bw_fixed_to_int(data->downspread_factor)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_scaler_efficiency: %d", bw_fixed_to_int(data->v_scaler_efficiency)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scaler_limits_factor: %d", bw_fixed_to_int(data->scaler_limits_factor)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_pipe_pixel_throughput: %d", - bw_fixed_to_int(data->display_pipe_pixel_throughput)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_dispclk_required_with_ramping: %d", - bw_fixed_to_int(data->total_dispclk_required_with_ramping)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_dispclk_required_without_ramping: %d", - bw_fixed_to_int(data->total_dispclk_required_without_ramping)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_read_request_bandwidth: %d", - bw_fixed_to_int(data->total_read_request_bandwidth)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_write_request_bandwidth: %d", - bw_fixed_to_int(data->total_write_request_bandwidth)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_required_for_total_read_request_bandwidth: %d", - bw_fixed_to_int(data->dispclk_required_for_total_read_request_bandwidth)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_dispclk_required_with_ramping_with_request_bandwidth: %d", - bw_fixed_to_int(data->total_dispclk_required_with_ramping_with_request_bandwidth)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_dispclk_required_without_ramping_with_request_bandwidth: %d", - bw_fixed_to_int(data->total_dispclk_required_without_ramping_with_request_bandwidth)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk: %d", bw_fixed_to_int(data->dispclk)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] blackout_recovery_time: %d", bw_fixed_to_int(data->blackout_recovery_time)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_pixels_per_data_fifo_entry: %d", - bw_fixed_to_int(data->min_pixels_per_data_fifo_entry)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] sclk_deep_sleep: %d", bw_fixed_to_int(data->sclk_deep_sleep)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] chunk_request_time: %d", bw_fixed_to_int(data->chunk_request_time)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_request_time: %d", bw_fixed_to_int(data->cursor_request_time)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] line_source_pixels_transfer_time: %d", - bw_fixed_to_int(data->line_source_pixels_transfer_time)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmifdram_access_efficiency: %d", - bw_fixed_to_int(data->dmifdram_access_efficiency)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwrdram_access_efficiency: %d", - bw_fixed_to_int(data->mcifwrdram_access_efficiency)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_average_bandwidth_no_compression: %d", - bw_fixed_to_int(data->total_average_bandwidth_no_compression)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_average_bandwidth: %d", - bw_fixed_to_int(data->total_average_bandwidth)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_stutter_cycle_duration: %d", - bw_fixed_to_int(data->total_stutter_cycle_duration)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_burst_time: %d", bw_fixed_to_int(data->stutter_burst_time)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] time_in_self_refresh: %d", bw_fixed_to_int(data->time_in_self_refresh)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_efficiency: %d", bw_fixed_to_int(data->stutter_efficiency)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] worst_number_of_trips_to_memory: %d", - bw_fixed_to_int(data->worst_number_of_trips_to_memory)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] immediate_flip_time: %d", bw_fixed_to_int(data->immediate_flip_time)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] latency_for_non_dmif_clients: %d", - bw_fixed_to_int(data->latency_for_non_dmif_clients)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] latency_for_non_mcifwr_clients: %d", - bw_fixed_to_int(data->latency_for_non_mcifwr_clients)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmifmc_urgent_latency_supported_in_high_sclk_and_yclk: %d", - bw_fixed_to_int(data->dmifmc_urgent_latency_supported_in_high_sclk_and_yclk)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] nbp_state_dram_speed_change_margin: %d", - bw_fixed_to_int(data->nbp_state_dram_speed_change_margin)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_reads_time_for_data_transfer_and_urgent_latency: %d", - bw_fixed_to_int(data->display_reads_time_for_data_transfer_and_urgent_latency)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dram_speed_change_margin: %d", - bw_fixed_to_int(data->dram_speed_change_margin)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_vblank_dram_speed_change_margin: %d", - bw_fixed_to_int(data->min_vblank_dram_speed_change_margin)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_stutter_refresh_duration: %d", - bw_fixed_to_int(data->min_stutter_refresh_duration)); - DC_LOG_BANDWIDTH_CALCS(" [uint32_t] total_stutter_dmif_buffer_size: %d", data->total_stutter_dmif_buffer_size); - DC_LOG_BANDWIDTH_CALCS(" [uint32_t] total_bytes_requested: %d", data->total_bytes_requested); - DC_LOG_BANDWIDTH_CALCS(" [uint32_t] min_stutter_dmif_buffer_size: %d", data->min_stutter_dmif_buffer_size); - DC_LOG_BANDWIDTH_CALCS(" [uint32_t] num_stutter_bursts: %d", data->num_stutter_bursts); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_blank_nbp_state_dram_speed_change_latency_supported: %d", - bw_fixed_to_int(data->v_blank_nbp_state_dram_speed_change_latency_supported)); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] nbp_state_dram_speed_change_latency_supported: %d", - bw_fixed_to_int(data->nbp_state_dram_speed_change_latency_supported)); - - for (i = 0; i < maximum_number_of_surfaces; i++) { - DC_LOG_BANDWIDTH_CALCS(" [bool] fbc_en[%d]:%d\n", i, data->fbc_en[i]); - DC_LOG_BANDWIDTH_CALCS(" [bool] lpt_en[%d]:%d", i, data->lpt_en[i]); - DC_LOG_BANDWIDTH_CALCS(" [bool] displays_match_flag[%d]:%d", i, data->displays_match_flag[i]); - DC_LOG_BANDWIDTH_CALCS(" [bool] use_alpha[%d]:%d", i, data->use_alpha[i]); - DC_LOG_BANDWIDTH_CALCS(" [bool] orthogonal_rotation[%d]:%d", i, data->orthogonal_rotation[i]); - DC_LOG_BANDWIDTH_CALCS(" [bool] enable[%d]:%d", i, data->enable[i]); - DC_LOG_BANDWIDTH_CALCS(" [bool] access_one_channel_only[%d]:%d", i, data->access_one_channel_only[i]); - DC_LOG_BANDWIDTH_CALCS(" [bool] scatter_gather_enable_for_pipe[%d]:%d", - i, data->scatter_gather_enable_for_pipe[i]); - DC_LOG_BANDWIDTH_CALCS(" [bool] interlace_mode[%d]:%d", - i, data->interlace_mode[i]); - DC_LOG_BANDWIDTH_CALCS(" [bool] display_pstate_change_enable[%d]:%d", - i, data->display_pstate_change_enable[i]); - DC_LOG_BANDWIDTH_CALCS(" [bool] line_buffer_prefetch[%d]:%d", i, data->line_buffer_prefetch[i]); - DC_LOG_BANDWIDTH_CALCS(" [uint32_t] bytes_per_pixel[%d]:%d", i, data->bytes_per_pixel[i]); - DC_LOG_BANDWIDTH_CALCS(" [uint32_t] max_chunks_non_fbc_mode[%d]:%d", - i, data->max_chunks_non_fbc_mode[i]); - DC_LOG_BANDWIDTH_CALCS(" [uint32_t] lb_bpc[%d]:%d", i, data->lb_bpc[i]); - DC_LOG_BANDWIDTH_CALCS(" [uint32_t] output_bpphdmi[%d]:%d", i, data->output_bpphdmi[i]); - DC_LOG_BANDWIDTH_CALCS(" [uint32_t] output_bppdp4_lane_hbr[%d]:%d", i, data->output_bppdp4_lane_hbr[i]); - DC_LOG_BANDWIDTH_CALCS(" [uint32_t] output_bppdp4_lane_hbr2[%d]:%d", - i, data->output_bppdp4_lane_hbr2[i]); - DC_LOG_BANDWIDTH_CALCS(" [uint32_t] output_bppdp4_lane_hbr3[%d]:%d", - i, data->output_bppdp4_lane_hbr3[i]); - DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines stereo_mode[%d]:%d", i, data->stereo_mode[i]); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_buffer_transfer_time[%d]:%d", - i, bw_fixed_to_int(data->dmif_buffer_transfer_time[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] displays_with_same_mode[%d]:%d", - i, bw_fixed_to_int(data->displays_with_same_mode[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_dmif_buffer_size[%d]:%d", - i, bw_fixed_to_int(data->stutter_dmif_buffer_size[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_refresh_duration[%d]:%d", - i, bw_fixed_to_int(data->stutter_refresh_duration[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_exit_watermark[%d]:%d", - i, bw_fixed_to_int(data->stutter_exit_watermark[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_entry_watermark[%d]:%d", - i, bw_fixed_to_int(data->stutter_entry_watermark[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] h_total[%d]:%d", i, bw_fixed_to_int(data->h_total[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_total[%d]:%d", i, bw_fixed_to_int(data->v_total[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] pixel_rate[%d]:%d", i, bw_fixed_to_int(data->pixel_rate[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_width[%d]:%d", i, bw_fixed_to_int(data->src_width[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] pitch_in_pixels[%d]:%d", - i, bw_fixed_to_int(data->pitch_in_pixels[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] pitch_in_pixels_after_surface_type[%d]:%d", - i, bw_fixed_to_int(data->pitch_in_pixels_after_surface_type[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_height[%d]:%d", i, bw_fixed_to_int(data->src_height[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scale_ratio[%d]:%d", i, bw_fixed_to_int(data->scale_ratio[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] h_taps[%d]:%d", i, bw_fixed_to_int(data->h_taps[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_taps[%d]:%d", i, bw_fixed_to_int(data->v_taps[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] h_scale_ratio[%d]:%d", i, bw_fixed_to_int(data->h_scale_ratio[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_scale_ratio[%d]:%d", i, bw_fixed_to_int(data->v_scale_ratio[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] rotation_angle[%d]:%d", - i, bw_fixed_to_int(data->rotation_angle[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] compression_rate[%d]:%d", - i, bw_fixed_to_int(data->compression_rate[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] hsr[%d]:%d", i, bw_fixed_to_int(data->hsr[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] vsr[%d]:%d", i, bw_fixed_to_int(data->vsr[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] source_width_rounded_up_to_chunks[%d]:%d", - i, bw_fixed_to_int(data->source_width_rounded_up_to_chunks[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] source_width_pixels[%d]:%d", - i, bw_fixed_to_int(data->source_width_pixels[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] source_height_rounded_up_to_chunks[%d]:%d", - i, bw_fixed_to_int(data->source_height_rounded_up_to_chunks[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_bandwidth[%d]:%d", - i, bw_fixed_to_int(data->display_bandwidth[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] request_bandwidth[%d]:%d", - i, bw_fixed_to_int(data->request_bandwidth[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] bytes_per_request[%d]:%d", - i, bw_fixed_to_int(data->bytes_per_request[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] useful_bytes_per_request[%d]:%d", - i, bw_fixed_to_int(data->useful_bytes_per_request[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lines_interleaved_in_mem_access[%d]:%d", - i, bw_fixed_to_int(data->lines_interleaved_in_mem_access[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] latency_hiding_lines[%d]:%d", - i, bw_fixed_to_int(data->latency_hiding_lines[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_partitions[%d]:%d", - i, bw_fixed_to_int(data->lb_partitions[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_partitions_max[%d]:%d", - i, bw_fixed_to_int(data->lb_partitions_max[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_required_with_ramping[%d]:%d", - i, bw_fixed_to_int(data->dispclk_required_with_ramping[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_required_without_ramping[%d]:%d", - i, bw_fixed_to_int(data->dispclk_required_without_ramping[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] data_buffer_size[%d]:%d", - i, bw_fixed_to_int(data->data_buffer_size[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] outstanding_chunk_request_limit[%d]:%d", - i, bw_fixed_to_int(data->outstanding_chunk_request_limit[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] urgent_watermark[%d]:%d", - i, bw_fixed_to_int(data->urgent_watermark[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] nbp_state_change_watermark[%d]:%d", - i, bw_fixed_to_int(data->nbp_state_change_watermark[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_filter_init[%d]:%d", i, bw_fixed_to_int(data->v_filter_init[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_cycle_duration[%d]:%d", - i, bw_fixed_to_int(data->stutter_cycle_duration[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] average_bandwidth[%d]:%d", - i, bw_fixed_to_int(data->average_bandwidth[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] average_bandwidth_no_compression[%d]:%d", - i, bw_fixed_to_int(data->average_bandwidth_no_compression[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_pte_request_limit[%d]:%d", - i, bw_fixed_to_int(data->scatter_gather_pte_request_limit[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_size_per_component[%d]:%d", - i, bw_fixed_to_int(data->lb_size_per_component[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] memory_chunk_size_in_bytes[%d]:%d", - i, bw_fixed_to_int(data->memory_chunk_size_in_bytes[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] pipe_chunk_size_in_bytes[%d]:%d", - i, bw_fixed_to_int(data->pipe_chunk_size_in_bytes[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] number_of_trips_to_memory_for_getting_apte_row[%d]:%d", - i, bw_fixed_to_int(data->number_of_trips_to_memory_for_getting_apte_row[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] adjusted_data_buffer_size[%d]:%d", - i, bw_fixed_to_int(data->adjusted_data_buffer_size[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] adjusted_data_buffer_size_in_memory[%d]:%d", - i, bw_fixed_to_int(data->adjusted_data_buffer_size_in_memory[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] pixels_per_data_fifo_entry[%d]:%d", - i, bw_fixed_to_int(data->pixels_per_data_fifo_entry[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_pte_requests_in_row[%d]:%d", - i, bw_fixed_to_int(data->scatter_gather_pte_requests_in_row[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] pte_request_per_chunk[%d]:%d", - i, bw_fixed_to_int(data->pte_request_per_chunk[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_page_width[%d]:%d", - i, bw_fixed_to_int(data->scatter_gather_page_width[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_page_height[%d]:%d", - i, bw_fixed_to_int(data->scatter_gather_page_height[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_lines_in_per_line_out_in_beginning_of_frame[%d]:%d", - i, bw_fixed_to_int(data->lb_lines_in_per_line_out_in_beginning_of_frame[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_lines_in_per_line_out_in_middle_of_frame[%d]:%d", - i, bw_fixed_to_int(data->lb_lines_in_per_line_out_in_middle_of_frame[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_width_pixels[%d]:%d", - i, bw_fixed_to_int(data->cursor_width_pixels[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] minimum_latency_hiding[%d]:%d", - i, bw_fixed_to_int(data->minimum_latency_hiding[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] maximum_latency_hiding[%d]:%d", - i, bw_fixed_to_int(data->maximum_latency_hiding[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] minimum_latency_hiding_with_cursor[%d]:%d", - i, bw_fixed_to_int(data->minimum_latency_hiding_with_cursor[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] maximum_latency_hiding_with_cursor[%d]:%d", - i, bw_fixed_to_int(data->maximum_latency_hiding_with_cursor[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_pixels_for_first_output_pixel[%d]:%d", - i, bw_fixed_to_int(data->src_pixels_for_first_output_pixel[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_pixels_for_last_output_pixel[%d]:%d", - i, bw_fixed_to_int(data->src_pixels_for_last_output_pixel[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_data_for_first_output_pixel[%d]:%d", - i, bw_fixed_to_int(data->src_data_for_first_output_pixel[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_data_for_last_output_pixel[%d]:%d", - i, bw_fixed_to_int(data->src_data_for_last_output_pixel[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] active_time[%d]:%d", i, bw_fixed_to_int(data->active_time[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] horizontal_blank_and_chunk_granularity_factor[%d]:%d", - i, bw_fixed_to_int(data->horizontal_blank_and_chunk_granularity_factor[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_latency_hiding[%d]:%d", - i, bw_fixed_to_int(data->cursor_latency_hiding[i])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_blank_dram_speed_change_margin[%d]:%d", - i, bw_fixed_to_int(data->v_blank_dram_speed_change_margin[i])); - } - - for (i = 0; i < maximum_number_of_surfaces; i++) { - for (j = 0; j < 3; j++) { - for (k = 0; k < 8; k++) { - - DC_LOG_BANDWIDTH_CALCS("\n [bw_fixed] line_source_transfer_time[%d][%d][%d]:%d", - i, j, k, bw_fixed_to_int(data->line_source_transfer_time[i][j][k])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dram_speed_change_line_source_transfer_time[%d][%d][%d]:%d", - i, j, k, - bw_fixed_to_int(data->dram_speed_change_line_source_transfer_time[i][j][k])); - } - } - } - - for (i = 0; i < 3; i++) { - for (j = 0; j < 8; j++) { - - DC_LOG_BANDWIDTH_CALCS("\n [uint32_t] num_displays_with_margin[%d][%d]:%d", - i, j, data->num_displays_with_margin[i][j]); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_burst_time[%d][%d]:%d", - i, j, bw_fixed_to_int(data->dmif_burst_time[i][j])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwr_burst_time[%d][%d]:%d", - i, j, bw_fixed_to_int(data->mcifwr_burst_time[i][j])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_dram_speed_change_margin[%d][%d]:%d", - i, j, bw_fixed_to_int(data->min_dram_speed_change_margin[i][j])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_required_for_dram_speed_change[%d][%d]:%d", - i, j, bw_fixed_to_int(data->dispclk_required_for_dram_speed_change[i][j])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] blackout_duration_margin[%d][%d]:%d", - i, j, bw_fixed_to_int(data->blackout_duration_margin[i][j])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_required_for_blackout_duration[%d][%d]:%d", - i, j, bw_fixed_to_int(data->dispclk_required_for_blackout_duration[i][j])); - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_required_for_blackout_recovery[%d][%d]:%d", - i, j, bw_fixed_to_int(data->dispclk_required_for_blackout_recovery[i][j])); - } - } - - for (i = 0; i < 6; i++) { - DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_required_sclk_for_urgent_latency[%d]:%d", - i, bw_fixed_to_int(data->dmif_required_sclk_for_urgent_latency[i])); - } -} -; - -#endif /* _CALCS_CALCS_LOGGER_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/calcs/custom_float.c b/drivers/gpu/drm/amd/display/dc/calcs/custom_float.c deleted file mode 100644 index 31d167bc548f..000000000000 --- a/drivers/gpu/drm/amd/display/dc/calcs/custom_float.c +++ /dev/null @@ -1,197 +0,0 @@ -/* - * Copyright 2017 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ -#include "dm_services.h" -#include "custom_float.h" - - -static bool build_custom_float( - struct fixed31_32 value, - const struct custom_float_format *format, - bool *negative, - uint32_t *mantissa, - uint32_t *exponenta) -{ - uint32_t exp_offset = (1 << (format->exponenta_bits - 1)) - 1; - - const struct fixed31_32 mantissa_constant_plus_max_fraction = - dc_fixpt_from_fraction( - (1LL << (format->mantissa_bits + 1)) - 1, - 1LL << format->mantissa_bits); - - struct fixed31_32 mantiss; - - if (dc_fixpt_eq( - value, - dc_fixpt_zero)) { - *negative = false; - *mantissa = 0; - *exponenta = 0; - return true; - } - - if (dc_fixpt_lt( - value, - dc_fixpt_zero)) { - *negative = format->sign; - value = dc_fixpt_neg(value); - } else { - *negative = false; - } - - if (dc_fixpt_lt( - value, - dc_fixpt_one)) { - uint32_t i = 1; - - do { - value = dc_fixpt_shl(value, 1); - ++i; - } while (dc_fixpt_lt( - value, - dc_fixpt_one)); - - --i; - - if (exp_offset <= i) { - *mantissa = 0; - *exponenta = 0; - return true; - } - - *exponenta = exp_offset - i; - } else if (dc_fixpt_le( - mantissa_constant_plus_max_fraction, - value)) { - uint32_t i = 1; - - do { - value = dc_fixpt_shr(value, 1); - ++i; - } while (dc_fixpt_lt( - mantissa_constant_plus_max_fraction, - value)); - - *exponenta = exp_offset + i - 1; - } else { - *exponenta = exp_offset; - } - - mantiss = dc_fixpt_sub( - value, - dc_fixpt_one); - - if (dc_fixpt_lt( - mantiss, - dc_fixpt_zero) || - dc_fixpt_lt( - dc_fixpt_one, - mantiss)) - mantiss = dc_fixpt_zero; - else - mantiss = dc_fixpt_shl( - mantiss, - format->mantissa_bits); - - *mantissa = dc_fixpt_floor(mantiss); - - return true; -} - -static bool setup_custom_float( - const struct custom_float_format *format, - bool negative, - uint32_t mantissa, - uint32_t exponenta, - uint32_t *result) -{ - uint32_t i = 0; - uint32_t j = 0; - - uint32_t value = 0; - - /* verification code: - * once calculation is ok we can remove it - */ - - const uint32_t mantissa_mask = - (1 << (format->mantissa_bits + 1)) - 1; - - const uint32_t exponenta_mask = - (1 << (format->exponenta_bits + 1)) - 1; - - if (mantissa & ~mantissa_mask) { - BREAK_TO_DEBUGGER(); - mantissa = mantissa_mask; - } - - if (exponenta & ~exponenta_mask) { - BREAK_TO_DEBUGGER(); - exponenta = exponenta_mask; - } - - /* end of verification code */ - - while (i < format->mantissa_bits) { - uint32_t mask = 1 << i; - - if (mantissa & mask) - value |= mask; - - ++i; - } - - while (j < format->exponenta_bits) { - uint32_t mask = 1 << j; - - if (exponenta & mask) - value |= mask << i; - - ++j; - } - - if (negative && format->sign) - value |= 1 << (i + j); - - *result = value; - - return true; -} - -bool convert_to_custom_float_format( - struct fixed31_32 value, - const struct custom_float_format *format, - uint32_t *result) -{ - uint32_t mantissa; - uint32_t exponenta; - bool negative; - - return build_custom_float( - value, format, &negative, &mantissa, &exponenta) && - setup_custom_float( - format, negative, mantissa, exponenta, result); -} - - diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c deleted file mode 100644 index ff5bb152ef49..000000000000 --- a/drivers/gpu/drm/amd/display/dc/calcs/dce_calcs.c +++ /dev/null @@ -1,3625 +0,0 @@ -/* - * Copyright 2015 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include - -#include "resource.h" -#include "dm_services.h" -#include "dce_calcs.h" -#include "dc.h" -#include "core_types.h" -#include "dal_asic_id.h" -#include "calcs_logger.h" - -/* - * NOTE: - * This file is gcc-parseable HW gospel, coming straight from HW engineers. - * - * It doesn't adhere to Linux kernel style and sometimes will do things in odd - * ways. Unless there is something clearly wrong with it the code should - * remain as-is as it provides us with a guarantee from HW that it is correct. - */ - -/******************************************************************************* - * Private Functions - ******************************************************************************/ - -static enum bw_calcs_version bw_calcs_version_from_asic_id(struct hw_asic_id asic_id) -{ - switch (asic_id.chip_family) { - - case FAMILY_CZ: - if (ASIC_REV_IS_STONEY(asic_id.hw_internal_rev)) - return BW_CALCS_VERSION_STONEY; - return BW_CALCS_VERSION_CARRIZO; - - case FAMILY_VI: - if (ASIC_REV_IS_POLARIS12_V(asic_id.hw_internal_rev)) - return BW_CALCS_VERSION_POLARIS12; - if (ASIC_REV_IS_POLARIS10_P(asic_id.hw_internal_rev)) - return BW_CALCS_VERSION_POLARIS10; - if (ASIC_REV_IS_POLARIS11_M(asic_id.hw_internal_rev)) - return BW_CALCS_VERSION_POLARIS11; - if (ASIC_REV_IS_VEGAM(asic_id.hw_internal_rev)) - return BW_CALCS_VERSION_VEGAM; - return BW_CALCS_VERSION_INVALID; - - case FAMILY_AI: - return BW_CALCS_VERSION_VEGA10; - - default: - return BW_CALCS_VERSION_INVALID; - } -} - -static void calculate_bandwidth( - const struct bw_calcs_dceip *dceip, - const struct bw_calcs_vbios *vbios, - struct bw_calcs_data *data) - -{ - const int32_t pixels_per_chunk = 512; - const int32_t high = 2; - const int32_t mid = 1; - const int32_t low = 0; - const uint32_t s_low = 0; - const uint32_t s_mid1 = 1; - const uint32_t s_mid2 = 2; - const uint32_t s_mid3 = 3; - const uint32_t s_mid4 = 4; - const uint32_t s_mid5 = 5; - const uint32_t s_mid6 = 6; - const uint32_t s_high = 7; - const uint32_t dmif_chunk_buff_margin = 1; - - uint32_t max_chunks_fbc_mode; - int32_t num_cursor_lines; - - int32_t i, j, k; - struct bw_fixed *yclk; - struct bw_fixed *sclk; - bool d0_underlay_enable; - bool d1_underlay_enable; - bool fbc_enabled; - bool lpt_enabled; - enum bw_defines sclk_message; - enum bw_defines yclk_message; - enum bw_defines *tiling_mode; - enum bw_defines *surface_type; - enum bw_defines voltage; - enum bw_defines pipe_check; - enum bw_defines hsr_check; - enum bw_defines vsr_check; - enum bw_defines lb_size_check; - enum bw_defines fbc_check; - enum bw_defines rotation_check; - enum bw_defines mode_check; - enum bw_defines nbp_state_change_enable_blank; - /*initialize variables*/ - int32_t number_of_displays_enabled = 0; - int32_t number_of_displays_enabled_with_margin = 0; - int32_t number_of_aligned_displays_with_no_margin = 0; - - yclk = kcalloc(3, sizeof(*yclk), GFP_KERNEL); - if (!yclk) - return; - - sclk = kcalloc(8, sizeof(*sclk), GFP_KERNEL); - if (!sclk) - goto free_yclk; - - tiling_mode = kcalloc(maximum_number_of_surfaces, sizeof(*tiling_mode), GFP_KERNEL); - if (!tiling_mode) - goto free_sclk; - - surface_type = kcalloc(maximum_number_of_surfaces, sizeof(*surface_type), GFP_KERNEL); - if (!surface_type) - goto free_tiling_mode; - - yclk[low] = vbios->low_yclk; - yclk[mid] = vbios->mid_yclk; - yclk[high] = vbios->high_yclk; - sclk[s_low] = vbios->low_sclk; - sclk[s_mid1] = vbios->mid1_sclk; - sclk[s_mid2] = vbios->mid2_sclk; - sclk[s_mid3] = vbios->mid3_sclk; - sclk[s_mid4] = vbios->mid4_sclk; - sclk[s_mid5] = vbios->mid5_sclk; - sclk[s_mid6] = vbios->mid6_sclk; - sclk[s_high] = vbios->high_sclk; - /*''''''''''''''''''*/ - /* surface assignment:*/ - /* 0: d0 underlay or underlay luma*/ - /* 1: d0 underlay chroma*/ - /* 2: d1 underlay or underlay luma*/ - /* 3: d1 underlay chroma*/ - /* 4: d0 graphics*/ - /* 5: d1 graphics*/ - /* 6: d2 graphics*/ - /* 7: d3 graphics, same mode as d2*/ - /* 8: d4 graphics, same mode as d2*/ - /* 9: d5 graphics, same mode as d2*/ - /* ...*/ - /* maximum_number_of_surfaces-2: d1 display_write_back420 luma*/ - /* maximum_number_of_surfaces-1: d1 display_write_back420 chroma*/ - /* underlay luma and chroma surface parameters from spreadsheet*/ - - - - - if (data->d0_underlay_mode == bw_def_none) - d0_underlay_enable = false; - else - d0_underlay_enable = true; - if (data->d1_underlay_mode == bw_def_none) - d1_underlay_enable = false; - else - d1_underlay_enable = true; - data->number_of_underlay_surfaces = d0_underlay_enable + d1_underlay_enable; - switch (data->underlay_surface_type) { - case bw_def_420: - surface_type[0] = bw_def_underlay420_luma; - surface_type[2] = bw_def_underlay420_luma; - data->bytes_per_pixel[0] = 1; - data->bytes_per_pixel[2] = 1; - surface_type[1] = bw_def_underlay420_chroma; - surface_type[3] = bw_def_underlay420_chroma; - data->bytes_per_pixel[1] = 2; - data->bytes_per_pixel[3] = 2; - data->lb_size_per_component[0] = dceip->underlay420_luma_lb_size_per_component; - data->lb_size_per_component[1] = dceip->underlay420_chroma_lb_size_per_component; - data->lb_size_per_component[2] = dceip->underlay420_luma_lb_size_per_component; - data->lb_size_per_component[3] = dceip->underlay420_chroma_lb_size_per_component; - break; - case bw_def_422: - surface_type[0] = bw_def_underlay422; - surface_type[2] = bw_def_underlay422; - data->bytes_per_pixel[0] = 2; - data->bytes_per_pixel[2] = 2; - data->lb_size_per_component[0] = dceip->underlay422_lb_size_per_component; - data->lb_size_per_component[2] = dceip->underlay422_lb_size_per_component; - break; - default: - surface_type[0] = bw_def_underlay444; - surface_type[2] = bw_def_underlay444; - data->bytes_per_pixel[0] = 4; - data->bytes_per_pixel[2] = 4; - data->lb_size_per_component[0] = dceip->lb_size_per_component444; - data->lb_size_per_component[2] = dceip->lb_size_per_component444; - break; - } - if (d0_underlay_enable) { - switch (data->underlay_surface_type) { - case bw_def_420: - data->enable[0] = 1; - data->enable[1] = 1; - break; - default: - data->enable[0] = 1; - data->enable[1] = 0; - break; - } - } - else { - data->enable[0] = 0; - data->enable[1] = 0; - } - if (d1_underlay_enable) { - switch (data->underlay_surface_type) { - case bw_def_420: - data->enable[2] = 1; - data->enable[3] = 1; - break; - default: - data->enable[2] = 1; - data->enable[3] = 0; - break; - } - } - else { - data->enable[2] = 0; - data->enable[3] = 0; - } - data->use_alpha[0] = 0; - data->use_alpha[1] = 0; - data->use_alpha[2] = 0; - data->use_alpha[3] = 0; - data->scatter_gather_enable_for_pipe[0] = vbios->scatter_gather_enable; - data->scatter_gather_enable_for_pipe[1] = vbios->scatter_gather_enable; - data->scatter_gather_enable_for_pipe[2] = vbios->scatter_gather_enable; - data->scatter_gather_enable_for_pipe[3] = vbios->scatter_gather_enable; - /*underlay0 same and graphics display pipe0*/ - data->interlace_mode[0] = data->interlace_mode[4]; - data->interlace_mode[1] = data->interlace_mode[4]; - /*underlay1 same and graphics display pipe1*/ - data->interlace_mode[2] = data->interlace_mode[5]; - data->interlace_mode[3] = data->interlace_mode[5]; - /*underlay0 same and graphics display pipe0*/ - data->h_total[0] = data->h_total[4]; - data->v_total[0] = data->v_total[4]; - data->h_total[1] = data->h_total[4]; - data->v_total[1] = data->v_total[4]; - /*underlay1 same and graphics display pipe1*/ - data->h_total[2] = data->h_total[5]; - data->v_total[2] = data->v_total[5]; - data->h_total[3] = data->h_total[5]; - data->v_total[3] = data->v_total[5]; - /*underlay0 same and graphics display pipe0*/ - data->pixel_rate[0] = data->pixel_rate[4]; - data->pixel_rate[1] = data->pixel_rate[4]; - /*underlay1 same and graphics display pipe1*/ - data->pixel_rate[2] = data->pixel_rate[5]; - data->pixel_rate[3] = data->pixel_rate[5]; - if ((data->underlay_tiling_mode == bw_def_array_linear_general || data->underlay_tiling_mode == bw_def_array_linear_aligned)) { - tiling_mode[0] = bw_def_linear; - tiling_mode[1] = bw_def_linear; - tiling_mode[2] = bw_def_linear; - tiling_mode[3] = bw_def_linear; - } - else { - tiling_mode[0] = bw_def_landscape; - tiling_mode[1] = bw_def_landscape; - tiling_mode[2] = bw_def_landscape; - tiling_mode[3] = bw_def_landscape; - } - data->lb_bpc[0] = data->underlay_lb_bpc; - data->lb_bpc[1] = data->underlay_lb_bpc; - data->lb_bpc[2] = data->underlay_lb_bpc; - data->lb_bpc[3] = data->underlay_lb_bpc; - data->compression_rate[0] = bw_int_to_fixed(1); - data->compression_rate[1] = bw_int_to_fixed(1); - data->compression_rate[2] = bw_int_to_fixed(1); - data->compression_rate[3] = bw_int_to_fixed(1); - data->access_one_channel_only[0] = 0; - data->access_one_channel_only[1] = 0; - data->access_one_channel_only[2] = 0; - data->access_one_channel_only[3] = 0; - data->cursor_width_pixels[0] = bw_int_to_fixed(0); - data->cursor_width_pixels[1] = bw_int_to_fixed(0); - data->cursor_width_pixels[2] = bw_int_to_fixed(0); - data->cursor_width_pixels[3] = bw_int_to_fixed(0); - /* graphics surface parameters from spreadsheet*/ - fbc_enabled = false; - lpt_enabled = false; - for (i = 4; i <= maximum_number_of_surfaces - 3; i++) { - if (i < data->number_of_displays + 4) { - if (i == 4 && data->d0_underlay_mode == bw_def_underlay_only) { - data->enable[i] = 0; - data->use_alpha[i] = 0; - } - else if (i == 4 && data->d0_underlay_mode == bw_def_blend) { - data->enable[i] = 1; - data->use_alpha[i] = 1; - } - else if (i == 4) { - data->enable[i] = 1; - data->use_alpha[i] = 0; - } - else if (i == 5 && data->d1_underlay_mode == bw_def_underlay_only) { - data->enable[i] = 0; - data->use_alpha[i] = 0; - } - else if (i == 5 && data->d1_underlay_mode == bw_def_blend) { - data->enable[i] = 1; - data->use_alpha[i] = 1; - } - else { - data->enable[i] = 1; - data->use_alpha[i] = 0; - } - } - else { - data->enable[i] = 0; - data->use_alpha[i] = 0; - } - data->scatter_gather_enable_for_pipe[i] = vbios->scatter_gather_enable; - surface_type[i] = bw_def_graphics; - data->lb_size_per_component[i] = dceip->lb_size_per_component444; - if (data->graphics_tiling_mode == bw_def_array_linear_general || data->graphics_tiling_mode == bw_def_array_linear_aligned) { - tiling_mode[i] = bw_def_linear; - } - else { - tiling_mode[i] = bw_def_tiled; - } - data->lb_bpc[i] = data->graphics_lb_bpc; - if ((data->fbc_en[i] == 1 && (dceip->argb_compression_support || data->d0_underlay_mode != bw_def_blended))) { - data->compression_rate[i] = bw_int_to_fixed(vbios->average_compression_rate); - data->access_one_channel_only[i] = data->lpt_en[i]; - } - else { - data->compression_rate[i] = bw_int_to_fixed(1); - data->access_one_channel_only[i] = 0; - } - if (data->fbc_en[i] == 1) { - fbc_enabled = true; - if (data->lpt_en[i] == 1) { - lpt_enabled = true; - } - } - data->cursor_width_pixels[i] = bw_int_to_fixed(vbios->cursor_width); - } - /* display_write_back420*/ - data->scatter_gather_enable_for_pipe[maximum_number_of_surfaces - 2] = 0; - data->scatter_gather_enable_for_pipe[maximum_number_of_surfaces - 1] = 0; - if (data->d1_display_write_back_dwb_enable == 1) { - data->enable[maximum_number_of_surfaces - 2] = 1; - data->enable[maximum_number_of_surfaces - 1] = 1; - } - else { - data->enable[maximum_number_of_surfaces - 2] = 0; - data->enable[maximum_number_of_surfaces - 1] = 0; - } - surface_type[maximum_number_of_surfaces - 2] = bw_def_display_write_back420_luma; - surface_type[maximum_number_of_surfaces - 1] = bw_def_display_write_back420_chroma; - data->lb_size_per_component[maximum_number_of_surfaces - 2] = dceip->underlay420_luma_lb_size_per_component; - data->lb_size_per_component[maximum_number_of_surfaces - 1] = dceip->underlay420_chroma_lb_size_per_component; - data->bytes_per_pixel[maximum_number_of_surfaces - 2] = 1; - data->bytes_per_pixel[maximum_number_of_surfaces - 1] = 2; - data->interlace_mode[maximum_number_of_surfaces - 2] = data->interlace_mode[5]; - data->interlace_mode[maximum_number_of_surfaces - 1] = data->interlace_mode[5]; - data->h_taps[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1); - data->h_taps[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1); - data->v_taps[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1); - data->v_taps[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1); - data->rotation_angle[maximum_number_of_surfaces - 2] = bw_int_to_fixed(0); - data->rotation_angle[maximum_number_of_surfaces - 1] = bw_int_to_fixed(0); - tiling_mode[maximum_number_of_surfaces - 2] = bw_def_linear; - tiling_mode[maximum_number_of_surfaces - 1] = bw_def_linear; - data->lb_bpc[maximum_number_of_surfaces - 2] = 8; - data->lb_bpc[maximum_number_of_surfaces - 1] = 8; - data->compression_rate[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1); - data->compression_rate[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1); - data->access_one_channel_only[maximum_number_of_surfaces - 2] = 0; - data->access_one_channel_only[maximum_number_of_surfaces - 1] = 0; - /*assume display pipe1 has dwb enabled*/ - data->h_total[maximum_number_of_surfaces - 2] = data->h_total[5]; - data->h_total[maximum_number_of_surfaces - 1] = data->h_total[5]; - data->v_total[maximum_number_of_surfaces - 2] = data->v_total[5]; - data->v_total[maximum_number_of_surfaces - 1] = data->v_total[5]; - data->pixel_rate[maximum_number_of_surfaces - 2] = data->pixel_rate[5]; - data->pixel_rate[maximum_number_of_surfaces - 1] = data->pixel_rate[5]; - data->src_width[maximum_number_of_surfaces - 2] = data->src_width[5]; - data->src_width[maximum_number_of_surfaces - 1] = data->src_width[5]; - data->src_height[maximum_number_of_surfaces - 2] = data->src_height[5]; - data->src_height[maximum_number_of_surfaces - 1] = data->src_height[5]; - data->pitch_in_pixels[maximum_number_of_surfaces - 2] = data->src_width[5]; - data->pitch_in_pixels[maximum_number_of_surfaces - 1] = data->src_width[5]; - data->h_scale_ratio[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1); - data->h_scale_ratio[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1); - data->v_scale_ratio[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1); - data->v_scale_ratio[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1); - data->stereo_mode[maximum_number_of_surfaces - 2] = bw_def_mono; - data->stereo_mode[maximum_number_of_surfaces - 1] = bw_def_mono; - data->cursor_width_pixels[maximum_number_of_surfaces - 2] = bw_int_to_fixed(0); - data->cursor_width_pixels[maximum_number_of_surfaces - 1] = bw_int_to_fixed(0); - data->use_alpha[maximum_number_of_surfaces - 2] = 0; - data->use_alpha[maximum_number_of_surfaces - 1] = 0; - /*mode check calculations:*/ - /* mode within dce ip capabilities*/ - /* fbc*/ - /* hsr*/ - /* vsr*/ - /* lb size*/ - /*effective scaling source and ratios:*/ - /*for graphics, non-stereo, non-interlace surfaces when the size of the source and destination are the same, only one tap is used*/ - /*420 chroma has half the width, height, horizontal and vertical scaling ratios than luma*/ - /*rotating a graphic or underlay surface swaps the width, height, horizontal and vertical scaling ratios*/ - /*in top-bottom stereo mode there is 2:1 vertical downscaling for each eye*/ - /*in side-by-side stereo mode there is 2:1 horizontal downscaling for each eye*/ - /*in interlace mode there is 2:1 vertical downscaling for each field*/ - /*in panning or bezel adjustment mode the source width has an extra 128 pixels*/ - for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { - if (data->enable[i]) { - if (bw_equ(data->h_scale_ratio[i], bw_int_to_fixed(1)) && bw_equ(data->v_scale_ratio[i], bw_int_to_fixed(1)) && surface_type[i] == bw_def_graphics && data->stereo_mode[i] == bw_def_mono && data->interlace_mode[i] == 0) { - data->h_taps[i] = bw_int_to_fixed(1); - data->v_taps[i] = bw_int_to_fixed(1); - } - if (surface_type[i] == bw_def_display_write_back420_chroma || surface_type[i] == bw_def_underlay420_chroma) { - data->pitch_in_pixels_after_surface_type[i] = bw_div(data->pitch_in_pixels[i], bw_int_to_fixed(2)); - data->src_width_after_surface_type = bw_div(data->src_width[i], bw_int_to_fixed(2)); - data->src_height_after_surface_type = bw_div(data->src_height[i], bw_int_to_fixed(2)); - data->hsr_after_surface_type = bw_div(data->h_scale_ratio[i], bw_int_to_fixed(2)); - data->vsr_after_surface_type = bw_div(data->v_scale_ratio[i], bw_int_to_fixed(2)); - } - else { - data->pitch_in_pixels_after_surface_type[i] = data->pitch_in_pixels[i]; - data->src_width_after_surface_type = data->src_width[i]; - data->src_height_after_surface_type = data->src_height[i]; - data->hsr_after_surface_type = data->h_scale_ratio[i]; - data->vsr_after_surface_type = data->v_scale_ratio[i]; - } - if ((bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270))) && surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) { - data->src_width_after_rotation = data->src_height_after_surface_type; - data->src_height_after_rotation = data->src_width_after_surface_type; - data->hsr_after_rotation = data->vsr_after_surface_type; - data->vsr_after_rotation = data->hsr_after_surface_type; - } - else { - data->src_width_after_rotation = data->src_width_after_surface_type; - data->src_height_after_rotation = data->src_height_after_surface_type; - data->hsr_after_rotation = data->hsr_after_surface_type; - data->vsr_after_rotation = data->vsr_after_surface_type; - } - switch (data->stereo_mode[i]) { - case bw_def_top_bottom: - data->source_width_pixels[i] = data->src_width_after_rotation; - data->source_height_pixels = bw_mul(bw_int_to_fixed(2), data->src_height_after_rotation); - data->hsr_after_stereo = data->hsr_after_rotation; - data->vsr_after_stereo = bw_mul(bw_int_to_fixed(1), data->vsr_after_rotation); - break; - case bw_def_side_by_side: - data->source_width_pixels[i] = bw_mul(bw_int_to_fixed(2), data->src_width_after_rotation); - data->source_height_pixels = data->src_height_after_rotation; - data->hsr_after_stereo = bw_mul(bw_int_to_fixed(1), data->hsr_after_rotation); - data->vsr_after_stereo = data->vsr_after_rotation; - break; - default: - data->source_width_pixels[i] = data->src_width_after_rotation; - data->source_height_pixels = data->src_height_after_rotation; - data->hsr_after_stereo = data->hsr_after_rotation; - data->vsr_after_stereo = data->vsr_after_rotation; - break; - } - data->hsr[i] = data->hsr_after_stereo; - if (data->interlace_mode[i]) { - data->vsr[i] = bw_mul(data->vsr_after_stereo, bw_int_to_fixed(2)); - } - else { - data->vsr[i] = data->vsr_after_stereo; - } - if (data->panning_and_bezel_adjustment != bw_def_none) { - data->source_width_rounded_up_to_chunks[i] = bw_add(bw_floor2(bw_sub(data->source_width_pixels[i], bw_int_to_fixed(1)), bw_int_to_fixed(128)), bw_int_to_fixed(256)); - } - else { - data->source_width_rounded_up_to_chunks[i] = bw_ceil2(data->source_width_pixels[i], bw_int_to_fixed(128)); - } - data->source_height_rounded_up_to_chunks[i] = data->source_height_pixels; - } - } - /*mode support checks:*/ - /*the number of graphics and underlay pipes is limited by the ip support*/ - /*maximum horizontal and vertical scale ratio is 4, and should not exceed the number of taps*/ - /*for downscaling with the pre-downscaler, the horizontal scale ratio must be more than the ceiling of one quarter of the number of taps*/ - /*the pre-downscaler reduces the line buffer source by the horizontal scale ratio*/ - /*the number of lines in the line buffer has to exceed the number of vertical taps*/ - /*the size of the line in the line buffer is the product of the source width and the bits per component, rounded up to a multiple of 48*/ - /*the size of the line in the line buffer in the case of 10 bit per component is the product of the source width rounded up to multiple of 8 and 30.023438 / 3, rounded up to a multiple of 48*/ - /*the size of the line in the line buffer in the case of 8 bit per component is the product of the source width rounded up to multiple of 8 and 30.023438 / 3, rounded up to a multiple of 48*/ - /*frame buffer compression is not supported with stereo mode, rotation, or non- 888 formats*/ - /*rotation is not supported with linear of stereo modes*/ - if (dceip->number_of_graphics_pipes >= data->number_of_displays && dceip->number_of_underlay_pipes >= data->number_of_underlay_surfaces && !(dceip->display_write_back_supported == 0 && data->d1_display_write_back_dwb_enable == 1)) { - pipe_check = bw_def_ok; - } - else { - pipe_check = bw_def_notok; - } - hsr_check = bw_def_ok; - for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { - if (data->enable[i]) { - if (bw_neq(data->hsr[i], bw_int_to_fixed(1))) { - if (bw_mtn(data->hsr[i], bw_int_to_fixed(4))) { - hsr_check = bw_def_hsr_mtn_4; - } - else { - if (bw_mtn(data->hsr[i], data->h_taps[i])) { - hsr_check = bw_def_hsr_mtn_h_taps; - } - else { - if (dceip->pre_downscaler_enabled == 1 && bw_mtn(data->hsr[i], bw_int_to_fixed(1)) && bw_leq(data->hsr[i], bw_ceil2(bw_div(data->h_taps[i], bw_int_to_fixed(4)), bw_int_to_fixed(1)))) { - hsr_check = bw_def_ceiling__h_taps_div_4___meq_hsr; - } - } - } - } - } - } - vsr_check = bw_def_ok; - for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { - if (data->enable[i]) { - if (bw_neq(data->vsr[i], bw_int_to_fixed(1))) { - if (bw_mtn(data->vsr[i], bw_int_to_fixed(4))) { - vsr_check = bw_def_vsr_mtn_4; - } - else { - if (bw_mtn(data->vsr[i], data->v_taps[i])) { - vsr_check = bw_def_vsr_mtn_v_taps; - } - } - } - } - } - lb_size_check = bw_def_ok; - for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { - if (data->enable[i]) { - if ((dceip->pre_downscaler_enabled && bw_mtn(data->hsr[i], bw_int_to_fixed(1)))) { - data->source_width_in_lb = bw_div(data->source_width_pixels[i], data->hsr[i]); - } - else { - data->source_width_in_lb = data->source_width_pixels[i]; - } - switch (data->lb_bpc[i]) { - case 8: - data->lb_line_pitch = bw_ceil2(bw_mul(bw_div(bw_frc_to_fixed(2401171875ul, 100000000), bw_int_to_fixed(3)), bw_ceil2(data->source_width_in_lb, bw_int_to_fixed(8))), bw_int_to_fixed(48)); - break; - case 10: - data->lb_line_pitch = bw_ceil2(bw_mul(bw_div(bw_frc_to_fixed(300234375, 10000000), bw_int_to_fixed(3)), bw_ceil2(data->source_width_in_lb, bw_int_to_fixed(8))), bw_int_to_fixed(48)); - break; - default: - data->lb_line_pitch = bw_ceil2(bw_mul(bw_int_to_fixed(data->lb_bpc[i]), data->source_width_in_lb), bw_int_to_fixed(48)); - break; - } - data->lb_partitions[i] = bw_floor2(bw_div(data->lb_size_per_component[i], data->lb_line_pitch), bw_int_to_fixed(1)); - /*clamp the partitions to the maxium number supported by the lb*/ - if ((surface_type[i] != bw_def_graphics || dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1)) { - data->lb_partitions_max[i] = bw_int_to_fixed(10); - } - else { - data->lb_partitions_max[i] = bw_int_to_fixed(7); - } - data->lb_partitions[i] = bw_min2(data->lb_partitions_max[i], data->lb_partitions[i]); - if (bw_mtn(bw_add(data->v_taps[i], bw_int_to_fixed(1)), data->lb_partitions[i])) { - lb_size_check = bw_def_notok; - } - } - } - fbc_check = bw_def_ok; - for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { - if (data->enable[i] && data->fbc_en[i] == 1 && (bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270)) || data->stereo_mode[i] != bw_def_mono || data->bytes_per_pixel[i] != 4)) { - fbc_check = bw_def_invalid_rotation_or_bpp_or_stereo; - } - } - rotation_check = bw_def_ok; - for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { - if (data->enable[i]) { - if ((bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270))) && (tiling_mode[i] == bw_def_linear || data->stereo_mode[i] != bw_def_mono)) { - rotation_check = bw_def_invalid_linear_or_stereo_mode; - } - } - } - if (pipe_check == bw_def_ok && hsr_check == bw_def_ok && vsr_check == bw_def_ok && lb_size_check == bw_def_ok && fbc_check == bw_def_ok && rotation_check == bw_def_ok) { - mode_check = bw_def_ok; - } - else { - mode_check = bw_def_notok; - } - /*number of memory channels for write-back client*/ - data->number_of_dram_wrchannels = vbios->number_of_dram_channels; - data->number_of_dram_channels = vbios->number_of_dram_channels; - /*modify number of memory channels if lpt mode is enabled*/ - /* low power tiling mode register*/ - /* 0 = use channel 0*/ - /* 1 = use channel 0 and 1*/ - /* 2 = use channel 0,1,2,3*/ - if ((fbc_enabled == 1 && lpt_enabled == 1)) { - if (vbios->memory_type == bw_def_hbm) - data->dram_efficiency = bw_frc_to_fixed(5, 10); - else - data->dram_efficiency = bw_int_to_fixed(1); - - - if (dceip->low_power_tiling_mode == 0) { - data->number_of_dram_channels = 1; - } - else if (dceip->low_power_tiling_mode == 1) { - data->number_of_dram_channels = 2; - } - else if (dceip->low_power_tiling_mode == 2) { - data->number_of_dram_channels = 4; - } - else { - data->number_of_dram_channels = 1; - } - } - else { - if (vbios->memory_type == bw_def_hbm) - data->dram_efficiency = bw_frc_to_fixed(5, 10); - else - data->dram_efficiency = bw_frc_to_fixed(8, 10); - } - /*memory request size and latency hiding:*/ - /*request size is normally 64 byte, 2-line interleaved, with full latency hiding*/ - /*the display write-back requests are single line*/ - /*for tiled graphics surfaces, or undelay surfaces with width higher than the maximum size for full efficiency, request size is 32 byte in 8 and 16 bpp or if the rotation is orthogonal to the tiling grain. only half is useful of the bytes in the request size in 8 bpp or in 32 bpp if the rotation is orthogonal to the tiling grain.*/ - /*for undelay surfaces with width lower than the maximum size for full efficiency, requests are 4-line interleaved in 16bpp if the rotation is parallel to the tiling grain, and 8-line interleaved with 4-line latency hiding in 8bpp or if the rotation is orthogonal to the tiling grain.*/ - for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { - if (data->enable[i]) { - if ((bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270)))) { - if ((i < 4)) { - /*underlay portrait tiling mode is not supported*/ - data->orthogonal_rotation[i] = 1; - } - else { - /*graphics portrait tiling mode*/ - if (data->graphics_micro_tile_mode == bw_def_rotated_micro_tiling) { - data->orthogonal_rotation[i] = 0; - } - else { - data->orthogonal_rotation[i] = 1; - } - } - } - else { - if ((i < 4)) { - /*underlay landscape tiling mode is only supported*/ - if (data->underlay_micro_tile_mode == bw_def_display_micro_tiling) { - data->orthogonal_rotation[i] = 0; - } - else { - data->orthogonal_rotation[i] = 1; - } - } - else { - /*graphics landscape tiling mode*/ - if (data->graphics_micro_tile_mode == bw_def_display_micro_tiling) { - data->orthogonal_rotation[i] = 0; - } - else { - data->orthogonal_rotation[i] = 1; - } - } - } - if (bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270))) { - data->underlay_maximum_source_efficient_for_tiling = dceip->underlay_maximum_height_efficient_for_tiling; - } - else { - data->underlay_maximum_source_efficient_for_tiling = dceip->underlay_maximum_width_efficient_for_tiling; - } - if (surface_type[i] == bw_def_display_write_back420_luma || surface_type[i] == bw_def_display_write_back420_chroma) { - data->bytes_per_request[i] = bw_int_to_fixed(64); - data->useful_bytes_per_request[i] = bw_int_to_fixed(64); - data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(1); - data->latency_hiding_lines[i] = bw_int_to_fixed(1); - } - else if (tiling_mode[i] == bw_def_linear) { - data->bytes_per_request[i] = bw_int_to_fixed(64); - data->useful_bytes_per_request[i] = bw_int_to_fixed(64); - data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2); - data->latency_hiding_lines[i] = bw_int_to_fixed(2); - } - else { - if (surface_type[i] == bw_def_graphics || (bw_mtn(data->source_width_rounded_up_to_chunks[i], bw_ceil2(data->underlay_maximum_source_efficient_for_tiling, bw_int_to_fixed(256))))) { - switch (data->bytes_per_pixel[i]) { - case 8: - data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2); - data->latency_hiding_lines[i] = bw_int_to_fixed(2); - if (data->orthogonal_rotation[i]) { - data->bytes_per_request[i] = bw_int_to_fixed(32); - data->useful_bytes_per_request[i] = bw_int_to_fixed(32); - } - else { - data->bytes_per_request[i] = bw_int_to_fixed(64); - data->useful_bytes_per_request[i] = bw_int_to_fixed(64); - } - break; - case 4: - if (data->orthogonal_rotation[i]) { - data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2); - data->latency_hiding_lines[i] = bw_int_to_fixed(2); - data->bytes_per_request[i] = bw_int_to_fixed(32); - data->useful_bytes_per_request[i] = bw_int_to_fixed(16); - } - else { - data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2); - data->latency_hiding_lines[i] = bw_int_to_fixed(2); - data->bytes_per_request[i] = bw_int_to_fixed(64); - data->useful_bytes_per_request[i] = bw_int_to_fixed(64); - } - break; - case 2: - data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2); - data->latency_hiding_lines[i] = bw_int_to_fixed(2); - data->bytes_per_request[i] = bw_int_to_fixed(32); - data->useful_bytes_per_request[i] = bw_int_to_fixed(32); - break; - default: - data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2); - data->latency_hiding_lines[i] = bw_int_to_fixed(2); - data->bytes_per_request[i] = bw_int_to_fixed(32); - data->useful_bytes_per_request[i] = bw_int_to_fixed(16); - break; - } - } - else { - data->bytes_per_request[i] = bw_int_to_fixed(64); - data->useful_bytes_per_request[i] = bw_int_to_fixed(64); - if (data->orthogonal_rotation[i]) { - data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(8); - data->latency_hiding_lines[i] = bw_int_to_fixed(4); - } - else { - switch (data->bytes_per_pixel[i]) { - case 4: - data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2); - data->latency_hiding_lines[i] = bw_int_to_fixed(2); - break; - case 2: - data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(4); - data->latency_hiding_lines[i] = bw_int_to_fixed(4); - break; - default: - data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(8); - data->latency_hiding_lines[i] = bw_int_to_fixed(4); - break; - } - } - } - } - } - } - /*requested peak bandwidth:*/ - /*the peak request-per-second bandwidth is the product of the maximum source lines in per line out in the beginning*/ - /*and in the middle of the frame, the ratio of the source width to the line time, the ratio of line interleaving*/ - /*in memory to lines of latency hiding, and the ratio of bytes per pixel to useful bytes per request.*/ - /**/ - /*if the dmif data buffer size holds more than vta_ps worth of source lines, then only vsr is used.*/ - /*the peak bandwidth is the peak request-per-second bandwidth times the request size.*/ - /**/ - /*the line buffer lines in per line out in the beginning of the frame is the vertical filter initialization value*/ - /*rounded up to even and divided by the line times for initialization, which is normally three.*/ - /*the line buffer lines in per line out in the middle of the frame is at least one, or the vertical scale ratio,*/ - /*rounded up to line pairs if not doing line buffer prefetching.*/ - /**/ - /*the non-prefetching rounding up of the vertical scale ratio can also be done up to 1 (for a 0,2 pattern), 4/3 (for a 0,2,2 pattern),*/ - /*6/4 (for a 0,2,2,2 pattern), or 3 (for a 2,4 pattern).*/ - /**/ - /*the scaler vertical filter initialization value is calculated by the hardware as the floor of the average of the*/ - /*vertical scale ratio and the number of vertical taps increased by one. add one more for possible odd line*/ - /*panning/bezel adjustment mode.*/ - /**/ - /*for the bottom interlace field an extra 50% of the vertical scale ratio is considered for this calculation.*/ - /*in top-bottom stereo mode software has to set the filter initialization value manually and explicitly limit it to 4.*/ - /*furthermore, there is only one line time for initialization.*/ - /**/ - /*line buffer prefetching is done when the number of lines in the line buffer exceeds the number of taps plus*/ - /*the ceiling of the vertical scale ratio.*/ - /**/ - /*multi-line buffer prefetching is only done in the graphics pipe when the scaler is disabled or when upscaling and the vsr <= 0.8.'*/ - /**/ - /*the horizontal blank and chunk granularity factor is indirectly used indicate the interval of time required to transfer the source pixels.*/ - /*the denominator of this term represents the total number of destination output pixels required for the input source pixels.*/ - /*it applies when the lines in per line out is not 2 or 4. it does not apply when there is a line buffer between the scl and blnd.*/ - for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { - if (data->enable[i]) { - data->v_filter_init[i] = bw_floor2(bw_div((bw_add(bw_add(bw_add(bw_int_to_fixed(1), data->v_taps[i]), data->vsr[i]), bw_mul(bw_mul(bw_int_to_fixed(data->interlace_mode[i]), bw_frc_to_fixed(5, 10)), data->vsr[i]))), bw_int_to_fixed(2)), bw_int_to_fixed(1)); - if (data->panning_and_bezel_adjustment == bw_def_any_lines) { - data->v_filter_init[i] = bw_add(data->v_filter_init[i], bw_int_to_fixed(1)); - } - if (data->stereo_mode[i] == bw_def_top_bottom) { - data->v_filter_init[i] = bw_min2(data->v_filter_init[i], bw_int_to_fixed(4)); - } - if (data->stereo_mode[i] == bw_def_top_bottom) { - data->num_lines_at_frame_start = bw_int_to_fixed(1); - } - else { - data->num_lines_at_frame_start = bw_int_to_fixed(3); - } - if ((bw_mtn(data->vsr[i], bw_int_to_fixed(1)) && surface_type[i] == bw_def_graphics) || data->panning_and_bezel_adjustment == bw_def_any_lines) { - data->line_buffer_prefetch[i] = 0; - } - else if ((((dceip->underlay_downscale_prefetch_enabled == 1 && surface_type[i] != bw_def_graphics) || surface_type[i] == bw_def_graphics) && (bw_mtn(data->lb_partitions[i], bw_add(data->v_taps[i], bw_ceil2(data->vsr[i], bw_int_to_fixed(1))))))) { - data->line_buffer_prefetch[i] = 1; - } - else { - data->line_buffer_prefetch[i] = 0; - } - data->lb_lines_in_per_line_out_in_beginning_of_frame[i] = bw_div(bw_ceil2(data->v_filter_init[i], bw_int_to_fixed(dceip->lines_interleaved_into_lb)), data->num_lines_at_frame_start); - if (data->line_buffer_prefetch[i] == 1) { - data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_max2(bw_int_to_fixed(1), data->vsr[i]); - } - else if (bw_leq(data->vsr[i], bw_int_to_fixed(1))) { - data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_int_to_fixed(1); - } else if (bw_leq(data->vsr[i], - bw_frc_to_fixed(4, 3))) { - data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_div(bw_int_to_fixed(4), bw_int_to_fixed(3)); - } else if (bw_leq(data->vsr[i], - bw_frc_to_fixed(6, 4))) { - data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_div(bw_int_to_fixed(6), bw_int_to_fixed(4)); - } - else if (bw_leq(data->vsr[i], bw_int_to_fixed(2))) { - data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_int_to_fixed(2); - } - else if (bw_leq(data->vsr[i], bw_int_to_fixed(3))) { - data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_int_to_fixed(3); - } - else { - data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_int_to_fixed(4); - } - if (data->line_buffer_prefetch[i] == 1 || bw_equ(data->lb_lines_in_per_line_out_in_middle_of_frame[i], bw_int_to_fixed(2)) || bw_equ(data->lb_lines_in_per_line_out_in_middle_of_frame[i], bw_int_to_fixed(4))) { - data->horizontal_blank_and_chunk_granularity_factor[i] = bw_int_to_fixed(1); - } - else { - data->horizontal_blank_and_chunk_granularity_factor[i] = bw_div(data->h_total[i], (bw_div((bw_add(data->h_total[i], bw_div((bw_sub(data->source_width_pixels[i], bw_int_to_fixed(dceip->chunk_width))), data->hsr[i]))), bw_int_to_fixed(2)))); - } - data->request_bandwidth[i] = bw_div(bw_mul(bw_div(bw_mul(bw_div(bw_mul(bw_max2(data->lb_lines_in_per_line_out_in_beginning_of_frame[i], data->lb_lines_in_per_line_out_in_middle_of_frame[i]), data->source_width_rounded_up_to_chunks[i]), (bw_div(data->h_total[i], data->pixel_rate[i]))), bw_int_to_fixed(data->bytes_per_pixel[i])), data->useful_bytes_per_request[i]), data->lines_interleaved_in_mem_access[i]), data->latency_hiding_lines[i]); - data->display_bandwidth[i] = bw_mul(data->request_bandwidth[i], data->bytes_per_request[i]); - } - } - /*outstanding chunk request limit*/ - /*if underlay buffer sharing is enabled, the data buffer size for underlay in 422 or 444 is the sum of the luma and chroma data buffer sizes.*/ - /*underlay buffer sharing mode is only permitted in orthogonal rotation modes.*/ - /**/ - /*if there is only one display enabled, the dmif data buffer size for the graphics surface is increased by concatenating the adjacent buffers.*/ - /**/ - /*the memory chunk size in bytes is 1024 for the writeback, and 256 times the memory line interleaving and the bytes per pixel for graphics*/ - /*and underlay.*/ - /**/ - /*the pipe chunk size uses 2 for line interleaving, except for the write back, in which case it is 1.*/ - /*graphics and underlay data buffer size is adjusted (limited) using the outstanding chunk request limit if there is more than one*/ - /*display enabled or if the dmif request buffer is not large enough for the total data buffer size.*/ - /*the outstanding chunk request limit is the ceiling of the adjusted data buffer size divided by the chunk size in bytes*/ - /*the adjusted data buffer size is the product of the display bandwidth and the minimum effective data buffer size in terms of time,*/ - /*rounded up to the chunk size in bytes, but should not exceed the original data buffer size*/ - for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { - if (data->enable[i]) { - if ((dceip->dmif_pipe_en_fbc_chunk_tracker + 3 == i && fbc_enabled == 0 && tiling_mode[i] != bw_def_linear)) { - data->max_chunks_non_fbc_mode[i] = 128 - dmif_chunk_buff_margin; - } - else { - data->max_chunks_non_fbc_mode[i] = 16 - dmif_chunk_buff_margin; - } - } - if (data->fbc_en[i] == 1) { - max_chunks_fbc_mode = 128 - dmif_chunk_buff_margin; - } - } - for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { - if (data->enable[i]) { - switch (surface_type[i]) { - case bw_def_display_write_back420_luma: - data->data_buffer_size[i] = bw_int_to_fixed(dceip->display_write_back420_luma_mcifwr_buffer_size); - break; - case bw_def_display_write_back420_chroma: - data->data_buffer_size[i] = bw_int_to_fixed(dceip->display_write_back420_chroma_mcifwr_buffer_size); - break; - case bw_def_underlay420_luma: - data->data_buffer_size[i] = bw_int_to_fixed(dceip->underlay_luma_dmif_size); - break; - case bw_def_underlay420_chroma: - data->data_buffer_size[i] = bw_div(bw_int_to_fixed(dceip->underlay_chroma_dmif_size), bw_int_to_fixed(2)); - break; - case bw_def_underlay422:case bw_def_underlay444: - if (data->orthogonal_rotation[i] == 0) { - data->data_buffer_size[i] = bw_int_to_fixed(dceip->underlay_luma_dmif_size); - } - else { - data->data_buffer_size[i] = bw_add(bw_int_to_fixed(dceip->underlay_luma_dmif_size), bw_int_to_fixed(dceip->underlay_chroma_dmif_size)); - } - break; - default: - if (data->fbc_en[i] == 1) { - /*data_buffer_size(i) = max_dmif_buffer_allocated * graphics_dmif_size*/ - if (data->number_of_displays == 1) { - data->data_buffer_size[i] = bw_min2(bw_mul(bw_mul(bw_int_to_fixed(max_chunks_fbc_mode), bw_int_to_fixed(pixels_per_chunk)), bw_int_to_fixed(data->bytes_per_pixel[i])), bw_mul(bw_int_to_fixed(dceip->max_dmif_buffer_allocated), bw_int_to_fixed(dceip->graphics_dmif_size))); - } - else { - data->data_buffer_size[i] = bw_min2(bw_mul(bw_mul(bw_int_to_fixed(max_chunks_fbc_mode), bw_int_to_fixed(pixels_per_chunk)), bw_int_to_fixed(data->bytes_per_pixel[i])), bw_int_to_fixed(dceip->graphics_dmif_size)); - } - } - else { - /*the effective dmif buffer size in non-fbc mode is limited by the 16 entry chunk tracker*/ - if (data->number_of_displays == 1) { - data->data_buffer_size[i] = bw_min2(bw_mul(bw_mul(bw_int_to_fixed(data->max_chunks_non_fbc_mode[i]), bw_int_to_fixed(pixels_per_chunk)), bw_int_to_fixed(data->bytes_per_pixel[i])), bw_mul(bw_int_to_fixed(dceip->max_dmif_buffer_allocated), bw_int_to_fixed(dceip->graphics_dmif_size))); - } - else { - data->data_buffer_size[i] = bw_min2(bw_mul(bw_mul(bw_int_to_fixed(data->max_chunks_non_fbc_mode[i]), bw_int_to_fixed(pixels_per_chunk)), bw_int_to_fixed(data->bytes_per_pixel[i])), bw_int_to_fixed(dceip->graphics_dmif_size)); - } - } - break; - } - if (surface_type[i] == bw_def_display_write_back420_luma || surface_type[i] == bw_def_display_write_back420_chroma) { - data->memory_chunk_size_in_bytes[i] = bw_int_to_fixed(1024); - data->pipe_chunk_size_in_bytes[i] = bw_int_to_fixed(1024); - } - else { - data->memory_chunk_size_in_bytes[i] = bw_mul(bw_mul(bw_int_to_fixed(dceip->chunk_width), data->lines_interleaved_in_mem_access[i]), bw_int_to_fixed(data->bytes_per_pixel[i])); - data->pipe_chunk_size_in_bytes[i] = bw_mul(bw_mul(bw_int_to_fixed(dceip->chunk_width), bw_int_to_fixed(dceip->lines_interleaved_into_lb)), bw_int_to_fixed(data->bytes_per_pixel[i])); - } - } - } - data->min_dmif_size_in_time = bw_int_to_fixed(9999); - data->min_mcifwr_size_in_time = bw_int_to_fixed(9999); - for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { - if (data->enable[i]) { - if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) { - if (bw_ltn(bw_div(bw_div(bw_mul(data->data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]), data->display_bandwidth[i]), data->min_dmif_size_in_time)) { - data->min_dmif_size_in_time = bw_div(bw_div(bw_mul(data->data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]), data->display_bandwidth[i]); - } - } - else { - if (bw_ltn(bw_div(bw_div(bw_mul(data->data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]), data->display_bandwidth[i]), data->min_mcifwr_size_in_time)) { - data->min_mcifwr_size_in_time = bw_div(bw_div(bw_mul(data->data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]), data->display_bandwidth[i]); - } - } - } - } - data->total_requests_for_dmif_size = bw_int_to_fixed(0); - for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { - if (data->enable[i] && surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) { - data->total_requests_for_dmif_size = bw_add(data->total_requests_for_dmif_size, bw_div(data->data_buffer_size[i], data->useful_bytes_per_request[i])); - } - } - for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { - if (data->enable[i]) { - if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma && dceip->limit_excessive_outstanding_dmif_requests && (data->number_of_displays > 1 || bw_mtn(data->total_requests_for_dmif_size, dceip->dmif_request_buffer_size))) { - data->adjusted_data_buffer_size[i] = bw_min2(data->data_buffer_size[i], bw_ceil2(bw_mul(data->min_dmif_size_in_time, data->display_bandwidth[i]), data->memory_chunk_size_in_bytes[i])); - } - else { - data->adjusted_data_buffer_size[i] = data->data_buffer_size[i]; - } - } - } - for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { - if (data->enable[i]) { - if (data->number_of_displays == 1 && data->number_of_underlay_surfaces == 0) { - /*set maximum chunk limit if only one graphic pipe is enabled*/ - data->outstanding_chunk_request_limit[i] = bw_int_to_fixed(127); - } - else { - data->outstanding_chunk_request_limit[i] = bw_ceil2(bw_div(data->adjusted_data_buffer_size[i], data->pipe_chunk_size_in_bytes[i]), bw_int_to_fixed(1)); - /*clamp maximum chunk limit in the graphic display pipe*/ - if (i >= 4) { - data->outstanding_chunk_request_limit[i] = bw_max2(bw_int_to_fixed(127), data->outstanding_chunk_request_limit[i]); - } - } - } - } - /*outstanding pte request limit*/ - /*in tiling mode with no rotation the sg pte requests are 8 useful pt_es, the sg row height is the page height and the sg page width x height is 64x64 for 8bpp, 64x32 for 16 bpp, 32x32 for 32 bpp*/ - /*in tiling mode with rotation the sg pte requests are only one useful pte, and the sg row height is also the page height, but the sg page width and height are swapped*/ - /*in linear mode the pte requests are 8 useful pt_es, the sg page width is 4096 divided by the bytes per pixel, the sg page height is 1, but there is just one row whose height is the lines of pte prefetching*/ - /*the outstanding pte request limit is obtained by multiplying the outstanding chunk request limit by the peak pte request to eviction limiting ratio, rounding up to integer, multiplying by the pte requests per chunk, and rounding up to integer again*/ - /*if not using peak pte request to eviction limiting, the outstanding pte request limit is the pte requests in the vblank*/ - /*the pte requests in the vblank is the product of the number of pte request rows times the number of pte requests in a row*/ - /*the number of pte requests in a row is the quotient of the source width divided by 256, multiplied by the pte requests per chunk, rounded up to even, multiplied by the scatter-gather row height and divided by the scatter-gather page height*/ - /*the pte requests per chunk is 256 divided by the scatter-gather page width and the useful pt_es per pte request*/ - if (data->number_of_displays > 1 || (bw_neq(data->rotation_angle[4], bw_int_to_fixed(0)) && bw_neq(data->rotation_angle[4], bw_int_to_fixed(180)))) { - data->peak_pte_request_to_eviction_ratio_limiting = dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display; - } - else { - data->peak_pte_request_to_eviction_ratio_limiting = dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation; - } - for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { - if (data->enable[i] && data->scatter_gather_enable_for_pipe[i] == 1) { - if (tiling_mode[i] == bw_def_linear) { - data->useful_pte_per_pte_request = bw_int_to_fixed(8); - data->scatter_gather_page_width[i] = bw_div(bw_int_to_fixed(4096), bw_int_to_fixed(data->bytes_per_pixel[i])); - data->scatter_gather_page_height[i] = bw_int_to_fixed(1); - data->scatter_gather_pte_request_rows = bw_int_to_fixed(1); - data->scatter_gather_row_height = bw_int_to_fixed(dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode); - } - else if (bw_equ(data->rotation_angle[i], bw_int_to_fixed(0)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(180))) { - data->useful_pte_per_pte_request = bw_int_to_fixed(8); - switch (data->bytes_per_pixel[i]) { - case 4: - data->scatter_gather_page_width[i] = bw_int_to_fixed(32); - data->scatter_gather_page_height[i] = bw_int_to_fixed(32); - break; - case 2: - data->scatter_gather_page_width[i] = bw_int_to_fixed(64); - data->scatter_gather_page_height[i] = bw_int_to_fixed(32); - break; - default: - data->scatter_gather_page_width[i] = bw_int_to_fixed(64); - data->scatter_gather_page_height[i] = bw_int_to_fixed(64); - break; - } - data->scatter_gather_pte_request_rows = bw_int_to_fixed(dceip->scatter_gather_pte_request_rows_in_tiling_mode); - data->scatter_gather_row_height = data->scatter_gather_page_height[i]; - } - else { - data->useful_pte_per_pte_request = bw_int_to_fixed(1); - switch (data->bytes_per_pixel[i]) { - case 4: - data->scatter_gather_page_width[i] = bw_int_to_fixed(32); - data->scatter_gather_page_height[i] = bw_int_to_fixed(32); - break; - case 2: - data->scatter_gather_page_width[i] = bw_int_to_fixed(32); - data->scatter_gather_page_height[i] = bw_int_to_fixed(64); - break; - default: - data->scatter_gather_page_width[i] = bw_int_to_fixed(64); - data->scatter_gather_page_height[i] = bw_int_to_fixed(64); - break; - } - data->scatter_gather_pte_request_rows = bw_int_to_fixed(dceip->scatter_gather_pte_request_rows_in_tiling_mode); - data->scatter_gather_row_height = data->scatter_gather_page_height[i]; - } - data->pte_request_per_chunk[i] = bw_div(bw_div(bw_int_to_fixed(dceip->chunk_width), data->scatter_gather_page_width[i]), data->useful_pte_per_pte_request); - data->scatter_gather_pte_requests_in_row[i] = bw_div(bw_mul(bw_ceil2(bw_mul(bw_div(data->source_width_rounded_up_to_chunks[i], bw_int_to_fixed(dceip->chunk_width)), data->pte_request_per_chunk[i]), bw_int_to_fixed(1)), data->scatter_gather_row_height), data->scatter_gather_page_height[i]); - data->scatter_gather_pte_requests_in_vblank = bw_mul(data->scatter_gather_pte_request_rows, data->scatter_gather_pte_requests_in_row[i]); - if (bw_equ(data->peak_pte_request_to_eviction_ratio_limiting, bw_int_to_fixed(0))) { - data->scatter_gather_pte_request_limit[i] = data->scatter_gather_pte_requests_in_vblank; - } - else { - data->scatter_gather_pte_request_limit[i] = bw_max2(dceip->minimum_outstanding_pte_request_limit, bw_min2(data->scatter_gather_pte_requests_in_vblank, bw_ceil2(bw_mul(bw_mul(bw_div(bw_ceil2(data->adjusted_data_buffer_size[i], data->memory_chunk_size_in_bytes[i]), data->memory_chunk_size_in_bytes[i]), data->pte_request_per_chunk[i]), data->peak_pte_request_to_eviction_ratio_limiting), bw_int_to_fixed(1)))); - } - } - } - /*pitch padding recommended for efficiency in linear mode*/ - /*in linear mode graphics or underlay with scatter gather, a pitch that is a multiple of the channel interleave (256 bytes) times the channel-bank rotation is not efficient*/ - /*if that is the case it is recommended to pad the pitch by at least 256 pixels*/ - data->inefficient_linear_pitch_in_bytes = bw_mul(bw_mul(bw_int_to_fixed(256), bw_int_to_fixed(vbios->number_of_dram_banks)), bw_int_to_fixed(data->number_of_dram_channels)); - - /*pixel transfer time*/ - /*the dmif and mcifwr yclk(pclk) required is the one that allows the transfer of all pipe's data buffer size in memory in the time for data transfer*/ - /*for dmif, pte and cursor requests have to be included.*/ - /*the dram data requirement is doubled when the data request size in bytes is less than the dram channel width times the burst size (8)*/ - /*the dram data requirement is also multiplied by the number of channels in the case of low power tiling*/ - /*the page close-open time is determined by trc and the number of page close-opens*/ - /*in tiled mode graphics or underlay with scatter-gather enabled the bytes per page close-open is the product of the memory line interleave times the maximum of the scatter-gather page width and the product of the tile width (8 pixels) times the number of channels times the number of banks.*/ - /*in linear mode graphics or underlay with scatter-gather enabled and inefficient pitch, the bytes per page close-open is the line request alternation slice, because different lines are in completely different 4k address bases.*/ - /*otherwise, the bytes page close-open is the chunk size because that is the arbitration slice.*/ - /*pte requests are grouped by pte requests per chunk if that is more than 1. each group costs a page close-open time for dmif reads*/ - /*cursor requests outstanding are limited to a group of two source lines. each group costs a page close-open time for dmif reads*/ - /*the display reads and writes time for data transfer is the minimum data or cursor buffer size in time minus the mc urgent latency*/ - /*the mc urgent latency is experienced more than one time if the number of dmif requests in the data buffer exceeds the request buffer size plus the request slots reserved for dmif in the dram channel arbiter queues*/ - /*the dispclk required is the maximum for all surfaces of the maximum of the source pixels for first output pixel times the throughput factor, divided by the pixels per dispclk, and divided by the minimum latency hiding minus the dram speed/p-state change latency minus the burst time, and the source pixels for last output pixel, times the throughput factor, divided by the pixels per dispclk, and divided by the minimum latency hiding minus the dram speed/p-state change latency minus the burst time, plus the active time.*/ - /*the data burst time is the maximum of the total page close-open time, total dmif/mcifwr buffer size in memory divided by the dram bandwidth, and the total dmif/mcifwr buffer size in memory divided by the 32 byte sclk data bus bandwidth, each multiplied by its efficiency.*/ - /*the source line transfer time is the maximum for all surfaces of the maximum of the burst time plus the urgent latency times the floor of the data required divided by the buffer size for the fist pixel, and the burst time plus the urgent latency times the floor of the data required divided by the buffer size for the last pixel plus the active time.*/ - /*the source pixels for the first output pixel is 512 if the scaler vertical filter initialization value is greater than 2, and it is 4 times the source width if it is greater than 4.*/ - /*the source pixels for the last output pixel is the source width times the scaler vertical filter initialization value rounded up to even*/ - /*the source data for these pixels is the number of pixels times the bytes per pixel times the bytes per request divided by the useful bytes per request.*/ - data->cursor_total_data = bw_int_to_fixed(0); - data->cursor_total_request_groups = bw_int_to_fixed(0); - data->scatter_gather_total_pte_requests = bw_int_to_fixed(0); - data->scatter_gather_total_pte_request_groups = bw_int_to_fixed(0); - for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { - if (data->enable[i]) { - data->cursor_total_data = bw_add(data->cursor_total_data, bw_mul(bw_mul(bw_int_to_fixed(2), data->cursor_width_pixels[i]), bw_int_to_fixed(4))); - if (dceip->large_cursor == 1) { - data->cursor_total_request_groups = bw_add(data->cursor_total_request_groups, bw_int_to_fixed((dceip->cursor_max_outstanding_group_num + 1))); - } - else { - data->cursor_total_request_groups = bw_add(data->cursor_total_request_groups, bw_ceil2(bw_div(data->cursor_width_pixels[i], dceip->cursor_chunk_width), bw_int_to_fixed(1))); - } - if (data->scatter_gather_enable_for_pipe[i]) { - data->scatter_gather_total_pte_requests = bw_add(data->scatter_gather_total_pte_requests, data->scatter_gather_pte_request_limit[i]); - data->scatter_gather_total_pte_request_groups = bw_add(data->scatter_gather_total_pte_request_groups, bw_ceil2(bw_div(data->scatter_gather_pte_request_limit[i], bw_ceil2(data->pte_request_per_chunk[i], bw_int_to_fixed(1))), bw_int_to_fixed(1))); - } - } - } - data->tile_width_in_pixels = bw_int_to_fixed(8); - data->dmif_total_number_of_data_request_page_close_open = bw_int_to_fixed(0); - data->mcifwr_total_number_of_data_request_page_close_open = bw_int_to_fixed(0); - for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { - if (data->enable[i]) { - if (data->scatter_gather_enable_for_pipe[i] == 1 && tiling_mode[i] != bw_def_linear) { - data->bytes_per_page_close_open = bw_mul(data->lines_interleaved_in_mem_access[i], bw_max2(bw_mul(bw_mul(bw_mul(bw_int_to_fixed(data->bytes_per_pixel[i]), data->tile_width_in_pixels), bw_int_to_fixed(vbios->number_of_dram_banks)), bw_int_to_fixed(data->number_of_dram_channels)), bw_mul(bw_int_to_fixed(data->bytes_per_pixel[i]), data->scatter_gather_page_width[i]))); - } - else if (data->scatter_gather_enable_for_pipe[i] == 1 && tiling_mode[i] == bw_def_linear && bw_equ(bw_mod((bw_mul(data->pitch_in_pixels_after_surface_type[i], bw_int_to_fixed(data->bytes_per_pixel[i]))), data->inefficient_linear_pitch_in_bytes), bw_int_to_fixed(0))) { - data->bytes_per_page_close_open = dceip->linear_mode_line_request_alternation_slice; - } - else { - data->bytes_per_page_close_open = data->memory_chunk_size_in_bytes[i]; - } - if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) { - data->dmif_total_number_of_data_request_page_close_open = bw_add(data->dmif_total_number_of_data_request_page_close_open, bw_div(bw_ceil2(data->adjusted_data_buffer_size[i], data->memory_chunk_size_in_bytes[i]), data->bytes_per_page_close_open)); - } - else { - data->mcifwr_total_number_of_data_request_page_close_open = bw_add(data->mcifwr_total_number_of_data_request_page_close_open, bw_div(bw_ceil2(data->adjusted_data_buffer_size[i], data->memory_chunk_size_in_bytes[i]), data->bytes_per_page_close_open)); - } - } - } - data->dmif_total_page_close_open_time = bw_div(bw_mul((bw_add(bw_add(data->dmif_total_number_of_data_request_page_close_open, data->scatter_gather_total_pte_request_groups), data->cursor_total_request_groups)), vbios->trc), bw_int_to_fixed(1000)); - data->mcifwr_total_page_close_open_time = bw_div(bw_mul(data->mcifwr_total_number_of_data_request_page_close_open, vbios->trc), bw_int_to_fixed(1000)); - for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { - if (data->enable[i]) { - data->adjusted_data_buffer_size_in_memory[i] = bw_div(bw_mul(data->adjusted_data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]); - } - } - data->total_requests_for_adjusted_dmif_size = bw_int_to_fixed(0); - for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { - if (data->enable[i]) { - if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) { - data->total_requests_for_adjusted_dmif_size = bw_add(data->total_requests_for_adjusted_dmif_size, bw_div(data->adjusted_data_buffer_size[i], data->useful_bytes_per_request[i])); - } - } - } - data->total_dmifmc_urgent_trips = bw_ceil2(bw_div(data->total_requests_for_adjusted_dmif_size, (bw_add(dceip->dmif_request_buffer_size, bw_int_to_fixed(vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel * data->number_of_dram_channels)))), bw_int_to_fixed(1)); - data->total_dmifmc_urgent_latency = bw_mul(vbios->dmifmc_urgent_latency, data->total_dmifmc_urgent_trips); - data->total_display_reads_required_data = bw_int_to_fixed(0); - data->total_display_reads_required_dram_access_data = bw_int_to_fixed(0); - data->total_display_writes_required_data = bw_int_to_fixed(0); - data->total_display_writes_required_dram_access_data = bw_int_to_fixed(0); - for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { - if (data->enable[i]) { - if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) { - data->display_reads_required_data = data->adjusted_data_buffer_size_in_memory[i]; - /*for hbm memories, each channel is split into 2 pseudo-channels that are each 64 bits in width. each*/ - /*pseudo-channel may be read independently of one another.*/ - /*the read burst length (bl) for hbm memories is 4, so each read command will access 32 bytes of data.*/ - /*the 64 or 32 byte sized data is stored in one pseudo-channel.*/ - /*it will take 4 memclk cycles or 8 yclk cycles to fetch 64 bytes of data from the hbm memory (2 read commands).*/ - /*it will take 2 memclk cycles or 4 yclk cycles to fetch 32 bytes of data from the hbm memory (1 read command).*/ - /*for gddr5/ddr4 memories, there is additional overhead if the size of the request is smaller than 64 bytes.*/ - /*the read burst length (bl) for gddr5/ddr4 memories is 8, regardless of the size of the data request.*/ - /*therefore it will require 8 cycles to fetch 64 or 32 bytes of data from the memory.*/ - /*the memory efficiency will be 50% for the 32 byte sized data.*/ - if (vbios->memory_type == bw_def_hbm) { - data->display_reads_required_dram_access_data = data->adjusted_data_buffer_size_in_memory[i]; - } - else { - data->display_reads_required_dram_access_data = bw_mul(data->adjusted_data_buffer_size_in_memory[i], bw_ceil2(bw_div(bw_int_to_fixed((8 * vbios->dram_channel_width_in_bits / 8)), data->bytes_per_request[i]), bw_int_to_fixed(1))); - } - data->total_display_reads_required_data = bw_add(data->total_display_reads_required_data, data->display_reads_required_data); - data->total_display_reads_required_dram_access_data = bw_add(data->total_display_reads_required_dram_access_data, data->display_reads_required_dram_access_data); - } - else { - data->total_display_writes_required_data = bw_add(data->total_display_writes_required_data, data->adjusted_data_buffer_size_in_memory[i]); - data->total_display_writes_required_dram_access_data = bw_add(data->total_display_writes_required_dram_access_data, bw_mul(data->adjusted_data_buffer_size_in_memory[i], bw_ceil2(bw_div(bw_int_to_fixed(vbios->dram_channel_width_in_bits), data->bytes_per_request[i]), bw_int_to_fixed(1)))); - } - } - } - data->total_display_reads_required_data = bw_add(bw_add(data->total_display_reads_required_data, data->cursor_total_data), bw_mul(data->scatter_gather_total_pte_requests, bw_int_to_fixed(64))); - data->total_display_reads_required_dram_access_data = bw_add(bw_add(data->total_display_reads_required_dram_access_data, data->cursor_total_data), bw_mul(data->scatter_gather_total_pte_requests, bw_int_to_fixed(64))); - for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { - if (data->enable[i]) { - if (bw_mtn(data->v_filter_init[i], bw_int_to_fixed(4))) { - data->src_pixels_for_first_output_pixel[i] = bw_mul(bw_int_to_fixed(4), data->source_width_rounded_up_to_chunks[i]); - } - else { - if (bw_mtn(data->v_filter_init[i], bw_int_to_fixed(2))) { - data->src_pixels_for_first_output_pixel[i] = bw_int_to_fixed(512); - } - else { - data->src_pixels_for_first_output_pixel[i] = bw_int_to_fixed(0); - } - } - data->src_data_for_first_output_pixel[i] = bw_div(bw_mul(bw_mul(data->src_pixels_for_first_output_pixel[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->bytes_per_request[i]), data->useful_bytes_per_request[i]); - data->src_pixels_for_last_output_pixel[i] = bw_mul(data->source_width_rounded_up_to_chunks[i], bw_max2(bw_ceil2(data->v_filter_init[i], bw_int_to_fixed(dceip->lines_interleaved_into_lb)), bw_mul(bw_ceil2(data->vsr[i], bw_int_to_fixed(dceip->lines_interleaved_into_lb)), data->horizontal_blank_and_chunk_granularity_factor[i]))); - data->src_data_for_last_output_pixel[i] = bw_div(bw_mul(bw_mul(bw_mul(data->source_width_rounded_up_to_chunks[i], bw_max2(bw_ceil2(data->v_filter_init[i], bw_int_to_fixed(dceip->lines_interleaved_into_lb)), data->lines_interleaved_in_mem_access[i])), bw_int_to_fixed(data->bytes_per_pixel[i])), data->bytes_per_request[i]), data->useful_bytes_per_request[i]); - data->active_time[i] = bw_div(bw_div(data->source_width_rounded_up_to_chunks[i], data->hsr[i]), data->pixel_rate[i]); - } - } - for (i = 0; i <= 2; i++) { - for (j = 0; j <= 7; j++) { - data->dmif_burst_time[i][j] = bw_max3(data->dmif_total_page_close_open_time, bw_div(data->total_display_reads_required_dram_access_data, (bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[i]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)))), bw_div(data->total_display_reads_required_data, (bw_mul(bw_mul(sclk[j], vbios->data_return_bus_width), bw_frc_to_fixed(dceip->percent_of_ideal_port_bw_received_after_urgent_latency, 100))))); - if (data->d1_display_write_back_dwb_enable == 1) { - data->mcifwr_burst_time[i][j] = bw_max3(data->mcifwr_total_page_close_open_time, bw_div(data->total_display_writes_required_dram_access_data, (bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[i]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_wrchannels)))), bw_div(data->total_display_writes_required_data, (bw_mul(sclk[j], vbios->data_return_bus_width)))); - } - } - } - for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { - for (j = 0; j <= 2; j++) { - for (k = 0; k <= 7; k++) { - if (data->enable[i]) { - if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) { - /*time to transfer data from the dmif buffer to the lb. since the mc to dmif transfer time overlaps*/ - /*with the dmif to lb transfer time, only time to transfer the last chunk is considered.*/ - data->dmif_buffer_transfer_time[i] = bw_mul(data->source_width_rounded_up_to_chunks[i], (bw_div(dceip->lb_write_pixels_per_dispclk, (bw_div(vbios->low_voltage_max_dispclk, dceip->display_pipe_throughput_factor))))); - data->line_source_transfer_time[i][j][k] = bw_max2(bw_mul((bw_add(data->total_dmifmc_urgent_latency, data->dmif_burst_time[j][k])), bw_floor2(bw_div(data->src_data_for_first_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), bw_sub(bw_add(bw_mul((bw_add(data->total_dmifmc_urgent_latency, data->dmif_burst_time[j][k])), bw_floor2(bw_div(data->src_data_for_last_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), data->dmif_buffer_transfer_time[i]), data->active_time[i])); - /*during an mclk switch the requests from the dce ip are stored in the gmc/arb. these requests should be serviced immediately*/ - /*after the mclk switch sequence and not incur an urgent latency penalty. it is assumed that the gmc/arb can hold up to 256 requests*/ - /*per memory channel. if the dce ip is urgent after the mclk switch sequence, all pending requests and subsequent requests should be*/ - /*immediately serviced without a gap in the urgent requests.*/ - /*the latency incurred would be the time to issue the requests and return the data for the first or last output pixel.*/ - if (surface_type[i] == bw_def_graphics) { - switch (data->lb_bpc[i]) { - case 6: - data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency6_bit_per_component; - break; - case 8: - data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency8_bit_per_component; - break; - case 10: - data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency10_bit_per_component; - break; - default: - data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency12_bit_per_component; - break; - } - if (data->use_alpha[i] == 1) { - data->v_scaler_efficiency = bw_min2(data->v_scaler_efficiency, dceip->alpha_vscaler_efficiency); - } - } - else { - switch (data->lb_bpc[i]) { - case 6: - data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency6_bit_per_component; - break; - case 8: - data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency8_bit_per_component; - break; - case 10: - data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency10_bit_per_component; - break; - default: - data->v_scaler_efficiency = bw_int_to_fixed(3); - break; - } - } - if (dceip->pre_downscaler_enabled && bw_mtn(data->hsr[i], bw_int_to_fixed(1))) { - data->scaler_limits_factor = bw_max2(bw_div(data->v_taps[i], data->v_scaler_efficiency), bw_div(data->source_width_rounded_up_to_chunks[i], data->h_total[i])); - } - else { - data->scaler_limits_factor = bw_max3(bw_int_to_fixed(1), bw_ceil2(bw_div(data->h_taps[i], bw_int_to_fixed(4)), bw_int_to_fixed(1)), bw_mul(data->hsr[i], bw_max2(bw_div(data->v_taps[i], data->v_scaler_efficiency), bw_int_to_fixed(1)))); - } - data->dram_speed_change_line_source_transfer_time[i][j][k] = bw_mul(bw_int_to_fixed(2), bw_max2((bw_add((bw_div(data->src_data_for_first_output_pixel[i], bw_min2(bw_mul(data->bytes_per_request[i], sclk[k]), bw_div(bw_mul(bw_mul(data->bytes_per_request[i], data->pixel_rate[i]), data->scaler_limits_factor), bw_int_to_fixed(2))))), (bw_mul(data->dmif_burst_time[j][k], bw_floor2(bw_div(data->src_data_for_first_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1)))))), (bw_add((bw_div(data->src_data_for_last_output_pixel[i], bw_min2(bw_mul(data->bytes_per_request[i], sclk[k]), bw_div(bw_mul(bw_mul(data->bytes_per_request[i], data->pixel_rate[i]), data->scaler_limits_factor), bw_int_to_fixed(2))))), (bw_sub(bw_mul(data->dmif_burst_time[j][k], bw_floor2(bw_div(data->src_data_for_last_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), data->active_time[i])))))); - } - else { - data->line_source_transfer_time[i][j][k] = bw_max2(bw_mul((bw_add(vbios->mcifwrmc_urgent_latency, data->mcifwr_burst_time[j][k])), bw_floor2(bw_div(data->src_data_for_first_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), bw_sub(bw_mul((bw_add(vbios->mcifwrmc_urgent_latency, data->mcifwr_burst_time[j][k])), bw_floor2(bw_div(data->src_data_for_last_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), data->active_time[i])); - /*during an mclk switch the requests from the dce ip are stored in the gmc/arb. these requests should be serviced immediately*/ - /*after the mclk switch sequence and not incur an urgent latency penalty. it is assumed that the gmc/arb can hold up to 256 requests*/ - /*per memory channel. if the dce ip is urgent after the mclk switch sequence, all pending requests and subsequent requests should be*/ - /*immediately serviced without a gap in the urgent requests.*/ - /*the latency incurred would be the time to issue the requests and return the data for the first or last output pixel.*/ - data->dram_speed_change_line_source_transfer_time[i][j][k] = bw_max2((bw_add((bw_div(data->src_data_for_first_output_pixel[i], bw_min2(bw_mul(data->bytes_per_request[i], sclk[k]), bw_div(bw_mul(data->bytes_per_request[i], vbios->low_voltage_max_dispclk), bw_int_to_fixed(2))))), (bw_mul(data->mcifwr_burst_time[j][k], bw_floor2(bw_div(data->src_data_for_first_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1)))))), (bw_add((bw_div(data->src_data_for_last_output_pixel[i], bw_min2(bw_mul(data->bytes_per_request[i], sclk[k]), bw_div(bw_mul(data->bytes_per_request[i], vbios->low_voltage_max_dispclk), bw_int_to_fixed(2))))), (bw_sub(bw_mul(data->mcifwr_burst_time[j][k], bw_floor2(bw_div(data->src_data_for_last_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), data->active_time[i]))))); - } - } - } - } - } - /*cpu c-state and p-state change enable*/ - /*for cpu p-state change to be possible for a yclk(pclk) and sclk level the dispclk required has to be enough for the blackout duration*/ - /*for cpu c-state change to be possible for a yclk(pclk) and sclk level the dispclk required has to be enough for the blackout duration and recovery*/ - /*condition for the blackout duration:*/ - /* minimum latency hiding > blackout duration + dmif burst time + line source transfer time*/ - /*condition for the blackout recovery:*/ - /* recovery time > dmif burst time + 2 * urgent latency*/ - /* recovery time > (display bw * blackout duration + (2 * urgent latency + dmif burst time)*dispclk - dmif size )*/ - /* / (dispclk - display bw)*/ - /*the minimum latency hiding is the minimum for all pipes of one screen line time, plus one more line time if doing lb prefetch, plus the dmif data buffer size equivalent in time, minus the urgent latency.*/ - /*the minimum latency hiding is further limited by the cursor. the cursor latency hiding is the number of lines of the cursor buffer, minus one if the downscaling is less than two, or minus three if it is more*/ - - /*initialize variables*/ - number_of_displays_enabled = 0; - number_of_displays_enabled_with_margin = 0; - for (k = 0; k <= maximum_number_of_surfaces - 1; k++) { - if (data->enable[k]) { - number_of_displays_enabled = number_of_displays_enabled + 1; - } - data->display_pstate_change_enable[k] = 0; - } - for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { - if (data->enable[i]) { - if ((bw_equ(dceip->stutter_and_dram_clock_state_change_gated_before_cursor, bw_int_to_fixed(0)) && bw_mtn(data->cursor_width_pixels[i], bw_int_to_fixed(0)))) { - if (bw_ltn(data->vsr[i], bw_int_to_fixed(2))) { - data->cursor_latency_hiding[i] = bw_div(bw_div(bw_mul((bw_sub(dceip->cursor_dcp_buffer_lines, bw_int_to_fixed(1))), data->h_total[i]), data->vsr[i]), data->pixel_rate[i]); - } - else { - data->cursor_latency_hiding[i] = bw_div(bw_div(bw_mul((bw_sub(dceip->cursor_dcp_buffer_lines, bw_int_to_fixed(3))), data->h_total[i]), data->vsr[i]), data->pixel_rate[i]); - } - } - else { - data->cursor_latency_hiding[i] = bw_int_to_fixed(9999); - } - } - } - for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { - if (data->enable[i]) { - if (dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1 && (bw_equ(data->vsr[i], bw_int_to_fixed(1)) || (bw_leq(data->vsr[i], bw_frc_to_fixed(8, 10)) && bw_leq(data->v_taps[i], bw_int_to_fixed(2)) && data->lb_bpc[i] == 8)) && surface_type[i] == bw_def_graphics) { - if (number_of_displays_enabled > 2) - data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_sub(data->lb_partitions[i], bw_int_to_fixed(2)), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency); - else - data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_sub(data->lb_partitions[i], bw_int_to_fixed(1)), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency); - } - else { - data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_int_to_fixed(1 + data->line_buffer_prefetch[i]), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency); - } - data->minimum_latency_hiding_with_cursor[i] = bw_min2(data->minimum_latency_hiding[i], data->cursor_latency_hiding[i]); - } - } - for (i = 0; i <= 2; i++) { - for (j = 0; j <= 7; j++) { - data->blackout_duration_margin[i][j] = bw_int_to_fixed(9999); - data->dispclk_required_for_blackout_duration[i][j] = bw_int_to_fixed(0); - data->dispclk_required_for_blackout_recovery[i][j] = bw_int_to_fixed(0); - for (k = 0; k <= maximum_number_of_surfaces - 1; k++) { - if (data->enable[k] && bw_mtn(vbios->blackout_duration, bw_int_to_fixed(0))) { - if (surface_type[k] != bw_def_display_write_back420_luma && surface_type[k] != bw_def_display_write_back420_chroma) { - data->blackout_duration_margin[i][j] = bw_min2(data->blackout_duration_margin[i][j], bw_sub(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->line_source_transfer_time[k][i][j])); - data->dispclk_required_for_blackout_duration[i][j] = bw_max3(data->dispclk_required_for_blackout_duration[i][j], bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->active_time[k])))); - if (bw_leq(vbios->maximum_blackout_recovery_time, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j]))) { - data->dispclk_required_for_blackout_recovery[i][j] = bw_int_to_fixed(9999); - } - else if (bw_ltn(data->adjusted_data_buffer_size[k], bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j])))))) { - data->dispclk_required_for_blackout_recovery[i][j] = bw_max2(data->dispclk_required_for_blackout_recovery[i][j], bw_div(bw_mul(bw_div(bw_div((bw_sub(bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, vbios->maximum_blackout_recovery_time))), data->adjusted_data_buffer_size[k])), bw_int_to_fixed(data->bytes_per_pixel[k])), (bw_sub(vbios->maximum_blackout_recovery_time, bw_sub(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j])))), data->latency_hiding_lines[k]), data->lines_interleaved_in_mem_access[k])); - } - } - else { - data->blackout_duration_margin[i][j] = bw_min2(data->blackout_duration_margin[i][j], bw_sub(bw_sub(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]), data->line_source_transfer_time[k][i][j])); - data->dispclk_required_for_blackout_duration[i][j] = bw_max3(data->dispclk_required_for_blackout_duration[i][j], bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]), data->active_time[k])))); - if (bw_ltn(vbios->maximum_blackout_recovery_time, bw_add(bw_add(bw_mul(bw_int_to_fixed(2), vbios->mcifwrmc_urgent_latency), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]))) { - data->dispclk_required_for_blackout_recovery[i][j] = bw_int_to_fixed(9999); - } - else if (bw_ltn(data->adjusted_data_buffer_size[k], bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j])))))) { - data->dispclk_required_for_blackout_recovery[i][j] = bw_max2(data->dispclk_required_for_blackout_recovery[i][j], bw_div(bw_mul(bw_div(bw_div((bw_sub(bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, vbios->maximum_blackout_recovery_time))), data->adjusted_data_buffer_size[k])), bw_int_to_fixed(data->bytes_per_pixel[k])), (bw_sub(vbios->maximum_blackout_recovery_time, (bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j]))))), data->latency_hiding_lines[k]), data->lines_interleaved_in_mem_access[k])); - } - } - } - } - } - } - if (bw_mtn(data->blackout_duration_margin[high][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[high][s_high], vbios->high_voltage_max_dispclk)) { - data->cpup_state_change_enable = bw_def_yes; - if (bw_ltn(data->dispclk_required_for_blackout_recovery[high][s_high], vbios->high_voltage_max_dispclk)) { - data->cpuc_state_change_enable = bw_def_yes; - } - else { - data->cpuc_state_change_enable = bw_def_no; - } - } - else { - data->cpup_state_change_enable = bw_def_no; - data->cpuc_state_change_enable = bw_def_no; - } - /*nb p-state change enable*/ - /*for dram speed/p-state change to be possible for a yclk(pclk) and sclk level there has to be positive margin and the dispclk required has to be*/ - /*below the maximum.*/ - /*the dram speed/p-state change margin is the minimum for all surfaces of the maximum latency hiding minus the dram speed/p-state change latency,*/ - /*minus the dmif burst time, minus the source line transfer time*/ - /*the maximum latency hiding is the minimum latency hiding plus one source line used for de-tiling in the line buffer, plus half the urgent latency*/ - /*if stutter and dram clock state change are gated before cursor then the cursor latency hiding does not limit stutter or dram clock state change*/ - for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { - if (data->enable[i]) { - /*maximum_latency_hiding(i) = minimum_latency_hiding(i) + 1 / vsr(i) **/ - /* h_total(i) / pixel_rate(i) + 0.5 * total_dmifmc_urgent_latency*/ - data->maximum_latency_hiding[i] = bw_add(data->minimum_latency_hiding[i], - bw_mul(bw_frc_to_fixed(5, 10), data->total_dmifmc_urgent_latency)); - data->maximum_latency_hiding_with_cursor[i] = bw_min2(data->maximum_latency_hiding[i], data->cursor_latency_hiding[i]); - } - } - for (i = 0; i <= 2; i++) { - for (j = 0; j <= 7; j++) { - data->min_dram_speed_change_margin[i][j] = bw_int_to_fixed(9999); - data->dram_speed_change_margin = bw_int_to_fixed(9999); - data->dispclk_required_for_dram_speed_change[i][j] = bw_int_to_fixed(0); - data->num_displays_with_margin[i][j] = 0; - for (k = 0; k <= maximum_number_of_surfaces - 1; k++) { - if (data->enable[k]) { - if (surface_type[k] != bw_def_display_write_back420_luma && surface_type[k] != bw_def_display_write_back420_chroma) { - data->dram_speed_change_margin = bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]); - if ((bw_mtn(data->dram_speed_change_margin, bw_int_to_fixed(0)) && bw_ltn(data->dram_speed_change_margin, bw_int_to_fixed(9999)))) { - /*determine the minimum dram clock change margin for each set of clock frequencies*/ - data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin); - /*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/ - data->dispclk_required_for_dram_speed_change_pipe[i][j] = bw_max2(bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->active_time[k])))); - if ((bw_ltn(data->dispclk_required_for_dram_speed_change_pipe[i][j], vbios->high_voltage_max_dispclk))) { - data->display_pstate_change_enable[k] = 1; - data->num_displays_with_margin[i][j] = data->num_displays_with_margin[i][j] + 1; - data->dispclk_required_for_dram_speed_change[i][j] = bw_max2(data->dispclk_required_for_dram_speed_change[i][j], data->dispclk_required_for_dram_speed_change_pipe[i][j]); - } - } - } - else { - data->dram_speed_change_margin = bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]); - if ((bw_mtn(data->dram_speed_change_margin, bw_int_to_fixed(0)) && bw_ltn(data->dram_speed_change_margin, bw_int_to_fixed(9999)))) { - /*determine the minimum dram clock change margin for each display pipe*/ - data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin); - /*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/ - data->dispclk_required_for_dram_speed_change_pipe[i][j] = bw_max2(bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]), data->active_time[k])))); - if ((bw_ltn(data->dispclk_required_for_dram_speed_change_pipe[i][j], vbios->high_voltage_max_dispclk))) { - data->display_pstate_change_enable[k] = 1; - data->num_displays_with_margin[i][j] = data->num_displays_with_margin[i][j] + 1; - data->dispclk_required_for_dram_speed_change[i][j] = bw_max2(data->dispclk_required_for_dram_speed_change[i][j], data->dispclk_required_for_dram_speed_change_pipe[i][j]); - } - } - } - } - } - } - } - /*determine the number of displays with margin to switch in the v_active region*/ - for (k = 0; k <= maximum_number_of_surfaces - 1; k++) { - if (data->enable[k] == 1 && data->display_pstate_change_enable[k] == 1) { - number_of_displays_enabled_with_margin = number_of_displays_enabled_with_margin + 1; - } - } - /*determine the number of displays that don't have any dram clock change margin, but*/ - /*have the same resolution. these displays can switch in a common vblank region if*/ - /*their frames are aligned.*/ - data->min_vblank_dram_speed_change_margin = bw_int_to_fixed(9999); - for (k = 0; k <= maximum_number_of_surfaces - 1; k++) { - if (data->enable[k]) { - if (surface_type[k] != bw_def_display_write_back420_luma && surface_type[k] != bw_def_display_write_back420_chroma) { - data->v_blank_dram_speed_change_margin[k] = bw_sub(bw_sub(bw_sub(bw_div(bw_mul((bw_sub(data->v_total[k], bw_sub(bw_div(data->src_height[k], data->v_scale_ratio[k]), bw_int_to_fixed(4)))), data->h_total[k]), data->pixel_rate[k]), vbios->nbp_state_change_latency), data->dmif_burst_time[low][s_low]), data->dram_speed_change_line_source_transfer_time[k][low][s_low]); - data->min_vblank_dram_speed_change_margin = bw_min2(data->min_vblank_dram_speed_change_margin, data->v_blank_dram_speed_change_margin[k]); - } - else { - data->v_blank_dram_speed_change_margin[k] = bw_sub(bw_sub(bw_sub(bw_sub(bw_div(bw_mul((bw_sub(data->v_total[k], bw_sub(bw_div(data->src_height[k], data->v_scale_ratio[k]), bw_int_to_fixed(4)))), data->h_total[k]), data->pixel_rate[k]), vbios->nbp_state_change_latency), data->dmif_burst_time[low][s_low]), data->mcifwr_burst_time[low][s_low]), data->dram_speed_change_line_source_transfer_time[k][low][s_low]); - data->min_vblank_dram_speed_change_margin = bw_min2(data->min_vblank_dram_speed_change_margin, data->v_blank_dram_speed_change_margin[k]); - } - } - } - for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { - data->displays_with_same_mode[i] = bw_int_to_fixed(0); - if (data->enable[i] == 1 && data->display_pstate_change_enable[i] == 0 && bw_mtn(data->v_blank_dram_speed_change_margin[i], bw_int_to_fixed(0))) { - for (j = 0; j <= maximum_number_of_surfaces - 1; j++) { - if ((i == j || data->display_synchronization_enabled) && (data->enable[j] == 1 && bw_equ(data->source_width_rounded_up_to_chunks[i], data->source_width_rounded_up_to_chunks[j]) && bw_equ(data->source_height_rounded_up_to_chunks[i], data->source_height_rounded_up_to_chunks[j]) && bw_equ(data->vsr[i], data->vsr[j]) && bw_equ(data->hsr[i], data->hsr[j]) && bw_equ(data->pixel_rate[i], data->pixel_rate[j]))) { - data->displays_with_same_mode[i] = bw_add(data->displays_with_same_mode[i], bw_int_to_fixed(1)); - } - } - } - } - /*compute the maximum number of aligned displays with no margin*/ - number_of_aligned_displays_with_no_margin = 0; - for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { - number_of_aligned_displays_with_no_margin = bw_fixed_to_int(bw_max2(bw_int_to_fixed(number_of_aligned_displays_with_no_margin), data->displays_with_same_mode[i])); - } - /*dram clock change is possible, if all displays have positive margin except for one display or a group of*/ - /*aligned displays with the same timing.*/ - /*the display(s) with the negative margin can be switched in the v_blank region while the other*/ - /*displays are in v_blank or v_active.*/ - if (number_of_displays_enabled_with_margin > 0 && (number_of_displays_enabled_with_margin + number_of_aligned_displays_with_no_margin) == number_of_displays_enabled && bw_mtn(data->min_dram_speed_change_margin[high][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[high][s_high], bw_int_to_fixed(9999)) && bw_ltn(data->dispclk_required_for_dram_speed_change[high][s_high], vbios->high_voltage_max_dispclk)) { - data->nbp_state_change_enable = bw_def_yes; - } - else { - data->nbp_state_change_enable = bw_def_no; - } - /*dram clock change is possible only in vblank if all displays are aligned and have no margin*/ - if (number_of_aligned_displays_with_no_margin == number_of_displays_enabled) { - nbp_state_change_enable_blank = bw_def_yes; - } - else { - nbp_state_change_enable_blank = bw_def_no; - } - - /*average bandwidth*/ - /*the average bandwidth with no compression is the vertical active time is the source width times the bytes per pixel divided by the line time, multiplied by the vertical scale ratio and the ratio of bytes per request divided by the useful bytes per request.*/ - /*the average bandwidth with compression is the same, divided by the compression ratio*/ - for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { - if (data->enable[i]) { - data->average_bandwidth_no_compression[i] = bw_div(bw_mul(bw_mul(bw_div(bw_mul(data->source_width_rounded_up_to_chunks[i], bw_int_to_fixed(data->bytes_per_pixel[i])), (bw_div(data->h_total[i], data->pixel_rate[i]))), data->vsr[i]), data->bytes_per_request[i]), data->useful_bytes_per_request[i]); - data->average_bandwidth[i] = bw_div(data->average_bandwidth_no_compression[i], data->compression_rate[i]); - } - } - data->total_average_bandwidth_no_compression = bw_int_to_fixed(0); - data->total_average_bandwidth = bw_int_to_fixed(0); - for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { - if (data->enable[i]) { - data->total_average_bandwidth_no_compression = bw_add(data->total_average_bandwidth_no_compression, data->average_bandwidth_no_compression[i]); - data->total_average_bandwidth = bw_add(data->total_average_bandwidth, data->average_bandwidth[i]); - } - } - - /*required yclk(pclk)*/ - /*yclk requirement only makes sense if the dmif and mcifwr data total page close-open time is less than the time for data transfer and the total pte requests fit in the scatter-gather saw queque size*/ - /*if that is the case, the yclk requirement is the maximum of the ones required by dmif and mcifwr, and the high/low yclk(pclk) is chosen accordingly*/ - /*high yclk(pclk) has to be selected when dram speed/p-state change is not possible.*/ - data->min_cursor_memory_interface_buffer_size_in_time = bw_int_to_fixed(9999); - /* number of cursor lines stored in the cursor data return buffer*/ - num_cursor_lines = 0; - for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { - if (data->enable[i]) { - if (bw_mtn(data->cursor_width_pixels[i], bw_int_to_fixed(0))) { - /*compute number of cursor lines stored in data return buffer*/ - if (bw_leq(data->cursor_width_pixels[i], bw_int_to_fixed(64)) && dceip->large_cursor == 1) { - num_cursor_lines = 4; - } - else { - num_cursor_lines = 2; - } - data->min_cursor_memory_interface_buffer_size_in_time = bw_min2(data->min_cursor_memory_interface_buffer_size_in_time, bw_div(bw_mul(bw_div(bw_int_to_fixed(num_cursor_lines), data->vsr[i]), data->h_total[i]), data->pixel_rate[i])); - } - } - } - /*compute minimum time to read one chunk from the dmif buffer*/ - if (number_of_displays_enabled > 2) { - data->chunk_request_delay = 0; - } - else { - data->chunk_request_delay = bw_fixed_to_int(bw_div(bw_int_to_fixed(512), vbios->high_voltage_max_dispclk)); - } - data->min_read_buffer_size_in_time = bw_min2(data->min_cursor_memory_interface_buffer_size_in_time, data->min_dmif_size_in_time); - data->display_reads_time_for_data_transfer = bw_sub(bw_sub(data->min_read_buffer_size_in_time, data->total_dmifmc_urgent_latency), bw_int_to_fixed(data->chunk_request_delay)); - data->display_writes_time_for_data_transfer = bw_sub(data->min_mcifwr_size_in_time, vbios->mcifwrmc_urgent_latency); - data->dmif_required_dram_bandwidth = bw_div(data->total_display_reads_required_dram_access_data, data->display_reads_time_for_data_transfer); - data->mcifwr_required_dram_bandwidth = bw_div(data->total_display_writes_required_dram_access_data, data->display_writes_time_for_data_transfer); - data->required_dmifmc_urgent_latency_for_page_close_open = bw_div((bw_sub(data->min_read_buffer_size_in_time, data->dmif_total_page_close_open_time)), data->total_dmifmc_urgent_trips); - data->required_mcifmcwr_urgent_latency = bw_sub(data->min_mcifwr_size_in_time, data->mcifwr_total_page_close_open_time); - if (bw_mtn(data->scatter_gather_total_pte_requests, dceip->maximum_total_outstanding_pte_requests_allowed_by_saw)) { - data->required_dram_bandwidth_gbyte_per_second = bw_int_to_fixed(9999); - yclk_message = bw_def_exceeded_allowed_outstanding_pte_req_queue_size; - data->y_clk_level = high; - data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)); - } - else if (bw_mtn(vbios->dmifmc_urgent_latency, data->required_dmifmc_urgent_latency_for_page_close_open) || bw_mtn(vbios->mcifwrmc_urgent_latency, data->required_mcifmcwr_urgent_latency)) { - data->required_dram_bandwidth_gbyte_per_second = bw_int_to_fixed(9999); - yclk_message = bw_def_exceeded_allowed_page_close_open; - data->y_clk_level = high; - data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)); - } - else { - data->required_dram_bandwidth_gbyte_per_second = bw_div(bw_max2(data->dmif_required_dram_bandwidth, data->mcifwr_required_dram_bandwidth), bw_int_to_fixed(1000)); - if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation, 100),yclk[low]),bw_div(bw_int_to_fixed(vbios->dram_channel_width_in_bits),bw_int_to_fixed(8))),bw_int_to_fixed(vbios->number_of_dram_channels))) - && bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[low]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels))) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[low][s_high], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[low][s_high], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[low][s_high], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[low][s_high], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[low][s_high], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[low][s_high] == number_of_displays_enabled_with_margin))) { - yclk_message = bw_fixed_to_int(vbios->low_yclk); - data->y_clk_level = low; - data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[low]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)); - } - else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation, 100),yclk[mid]),bw_div(bw_int_to_fixed(vbios->dram_channel_width_in_bits),bw_int_to_fixed(8))),bw_int_to_fixed(vbios->number_of_dram_channels))) - && bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[mid]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels))) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[mid][s_high], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[mid][s_high], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[mid][s_high], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[mid][s_high], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[mid][s_high], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[mid][s_high] == number_of_displays_enabled_with_margin))) { - yclk_message = bw_fixed_to_int(vbios->mid_yclk); - data->y_clk_level = mid; - data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[mid]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)); - } - else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation, 100),yclk[high]),bw_div(bw_int_to_fixed(vbios->dram_channel_width_in_bits),bw_int_to_fixed(8))),bw_int_to_fixed(vbios->number_of_dram_channels))) - && bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)))) { - yclk_message = bw_fixed_to_int(vbios->high_yclk); - data->y_clk_level = high; - data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)); - } - else { - yclk_message = bw_def_exceeded_allowed_maximum_bw; - data->y_clk_level = high; - data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)); - } - } - /*required sclk*/ - /*sclk requirement only makes sense if the total pte requests fit in the scatter-gather saw queque size*/ - /*if that is the case, the sclk requirement is the maximum of the ones required by dmif and mcifwr, and the high/mid/low sclk is chosen accordingly, unless that choice results in foresaking dram speed/nb p-state change.*/ - /*the dmif and mcifwr sclk required is the one that allows the transfer of all pipe's data buffer size through the sclk bus in the time for data transfer*/ - /*for dmif, pte and cursor requests have to be included.*/ - data->dmif_required_sclk = bw_div(bw_div(data->total_display_reads_required_data, data->display_reads_time_for_data_transfer), (bw_mul(vbios->data_return_bus_width, bw_frc_to_fixed(dceip->percent_of_ideal_port_bw_received_after_urgent_latency, 100)))); - data->mcifwr_required_sclk = bw_div(bw_div(data->total_display_writes_required_data, data->display_writes_time_for_data_transfer), vbios->data_return_bus_width); - if (bw_mtn(data->scatter_gather_total_pte_requests, dceip->maximum_total_outstanding_pte_requests_allowed_by_saw)) { - data->required_sclk = bw_int_to_fixed(9999); - sclk_message = bw_def_exceeded_allowed_outstanding_pte_req_queue_size; - data->sclk_level = s_high; - } - else if (bw_mtn(vbios->dmifmc_urgent_latency, data->required_dmifmc_urgent_latency_for_page_close_open) || bw_mtn(vbios->mcifwrmc_urgent_latency, data->required_mcifmcwr_urgent_latency)) { - data->required_sclk = bw_int_to_fixed(9999); - sclk_message = bw_def_exceeded_allowed_page_close_open; - data->sclk_level = s_high; - } - else { - data->required_sclk = bw_max2(data->dmif_required_sclk, data->mcifwr_required_sclk); - if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[low]),vbios->data_return_bus_width)) - && bw_ltn(data->required_sclk, sclk[s_low]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_low], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_low], vbios->low_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_low] == number_of_displays_enabled_with_margin))) { - sclk_message = bw_def_low; - data->sclk_level = s_low; - data->required_sclk = vbios->low_sclk; - } - else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[mid]),vbios->data_return_bus_width)) - && bw_ltn(data->required_sclk, sclk[s_mid1]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid1], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid1] == number_of_displays_enabled_with_margin))) { - sclk_message = bw_def_mid; - data->sclk_level = s_mid1; - data->required_sclk = vbios->mid1_sclk; - } - else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid2]),vbios->data_return_bus_width)) - && bw_ltn(data->required_sclk, sclk[s_mid2]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid2], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid2] == number_of_displays_enabled_with_margin))) { - sclk_message = bw_def_mid; - data->sclk_level = s_mid2; - data->required_sclk = vbios->mid2_sclk; - } - else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid3]),vbios->data_return_bus_width)) - && bw_ltn(data->required_sclk, sclk[s_mid3]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid3], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid3] == number_of_displays_enabled_with_margin))) { - sclk_message = bw_def_mid; - data->sclk_level = s_mid3; - data->required_sclk = vbios->mid3_sclk; - } - else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid4]),vbios->data_return_bus_width)) - && bw_ltn(data->required_sclk, sclk[s_mid4]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid4], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid4] == number_of_displays_enabled_with_margin))) { - sclk_message = bw_def_mid; - data->sclk_level = s_mid4; - data->required_sclk = vbios->mid4_sclk; - } - else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid5]),vbios->data_return_bus_width)) - && bw_ltn(data->required_sclk, sclk[s_mid5]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid5], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid5] == number_of_displays_enabled_with_margin))) { - sclk_message = bw_def_mid; - data->sclk_level = s_mid5; - data->required_sclk = vbios->mid5_sclk; - } - else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid6]),vbios->data_return_bus_width)) - && bw_ltn(data->required_sclk, sclk[s_mid6]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid6] == number_of_displays_enabled_with_margin))) { - sclk_message = bw_def_mid; - data->sclk_level = s_mid6; - data->required_sclk = vbios->mid6_sclk; - } - else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_high]),vbios->data_return_bus_width)) - && bw_ltn(data->required_sclk, sclk[s_high])) { - sclk_message = bw_def_high; - data->sclk_level = s_high; - data->required_sclk = vbios->high_sclk; - } - else if (bw_meq(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_high]),vbios->data_return_bus_width)) - && bw_ltn(data->required_sclk, sclk[s_high])) { - sclk_message = bw_def_high; - data->sclk_level = s_high; - data->required_sclk = vbios->high_sclk; - } - else { - sclk_message = bw_def_exceeded_allowed_maximum_sclk; - data->sclk_level = s_high; - /*required_sclk = high_sclk*/ - } - } - /*dispclk*/ - /*if dispclk is set to the maximum, ramping is not required. dispclk required without ramping is less than the dispclk required with ramping.*/ - /*if dispclk required without ramping is more than the maximum dispclk, that is the dispclk required, and the mode is not supported*/ - /*if that does not happen, but dispclk required with ramping is more than the maximum dispclk, dispclk required is just the maximum dispclk*/ - /*if that does not happen either, dispclk required is the dispclk required with ramping.*/ - /*dispclk required without ramping is the maximum of the one required for display pipe pixel throughput, for scaler throughput, for total read request thrrougput and for dram/np p-state change if enabled.*/ - /*the display pipe pixel throughput is the maximum of lines in per line out in the beginning of the frame and lines in per line out in the middle of the frame multiplied by the horizontal blank and chunk granularity factor, altogether multiplied by the ratio of the source width to the line time, divided by the line buffer pixels per dispclk throughput, and multiplied by the display pipe throughput factor.*/ - /*the horizontal blank and chunk granularity factor is the ratio of the line time divided by the line time minus half the horizontal blank and chunk time. it applies when the lines in per line out is not 2 or 4.*/ - /*the dispclk required for scaler throughput is the product of the pixel rate and the scaling limits factor.*/ - /*the dispclk required for total read request throughput is the product of the peak request-per-second bandwidth and the dispclk cycles per request, divided by the request efficiency.*/ - /*for the dispclk required with ramping, instead of multiplying just the pipe throughput by the display pipe throughput factor, we multiply the scaler and pipe throughput by the ramping factor.*/ - /*the scaling limits factor is the product of the horizontal scale ratio, and the ratio of the vertical taps divided by the scaler efficiency clamped to at least 1.*/ - /*the scaling limits factor itself it also clamped to at least 1*/ - /*if doing downscaling with the pre-downscaler enabled, the horizontal scale ratio should not be considered above (use "1")*/ - data->downspread_factor = bw_add(bw_int_to_fixed(1), bw_div(vbios->down_spread_percentage, bw_int_to_fixed(100))); - for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { - if (data->enable[i]) { - if (surface_type[i] == bw_def_graphics) { - switch (data->lb_bpc[i]) { - case 6: - data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency6_bit_per_component; - break; - case 8: - data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency8_bit_per_component; - break; - case 10: - data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency10_bit_per_component; - break; - default: - data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency12_bit_per_component; - break; - } - if (data->use_alpha[i] == 1) { - data->v_scaler_efficiency = bw_min2(data->v_scaler_efficiency, dceip->alpha_vscaler_efficiency); - } - } - else { - switch (data->lb_bpc[i]) { - case 6: - data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency6_bit_per_component; - break; - case 8: - data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency8_bit_per_component; - break; - case 10: - data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency10_bit_per_component; - break; - default: - data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency12_bit_per_component; - break; - } - } - if (dceip->pre_downscaler_enabled && bw_mtn(data->hsr[i], bw_int_to_fixed(1))) { - data->scaler_limits_factor = bw_max2(bw_div(data->v_taps[i], data->v_scaler_efficiency), bw_div(data->source_width_rounded_up_to_chunks[i], data->h_total[i])); - } - else { - data->scaler_limits_factor = bw_max3(bw_int_to_fixed(1), bw_ceil2(bw_div(data->h_taps[i], bw_int_to_fixed(4)), bw_int_to_fixed(1)), bw_mul(data->hsr[i], bw_max2(bw_div(data->v_taps[i], data->v_scaler_efficiency), bw_int_to_fixed(1)))); - } - data->display_pipe_pixel_throughput = bw_div(bw_div(bw_mul(bw_max2(data->lb_lines_in_per_line_out_in_beginning_of_frame[i], bw_mul(data->lb_lines_in_per_line_out_in_middle_of_frame[i], data->horizontal_blank_and_chunk_granularity_factor[i])), data->source_width_rounded_up_to_chunks[i]), (bw_div(data->h_total[i], data->pixel_rate[i]))), dceip->lb_write_pixels_per_dispclk); - data->dispclk_required_without_ramping[i] = bw_mul(data->downspread_factor, bw_max2(bw_mul(data->pixel_rate[i], data->scaler_limits_factor), bw_mul(dceip->display_pipe_throughput_factor, data->display_pipe_pixel_throughput))); - data->dispclk_required_with_ramping[i] = bw_mul(dceip->dispclk_ramping_factor, bw_max2(bw_mul(data->pixel_rate[i], data->scaler_limits_factor), data->display_pipe_pixel_throughput)); - } - } - data->total_dispclk_required_with_ramping = bw_int_to_fixed(0); - data->total_dispclk_required_without_ramping = bw_int_to_fixed(0); - for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { - if (data->enable[i]) { - if (bw_ltn(data->total_dispclk_required_with_ramping, data->dispclk_required_with_ramping[i])) { - data->total_dispclk_required_with_ramping = data->dispclk_required_with_ramping[i]; - } - if (bw_ltn(data->total_dispclk_required_without_ramping, data->dispclk_required_without_ramping[i])) { - data->total_dispclk_required_without_ramping = data->dispclk_required_without_ramping[i]; - } - } - } - data->total_read_request_bandwidth = bw_int_to_fixed(0); - data->total_write_request_bandwidth = bw_int_to_fixed(0); - for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { - if (data->enable[i]) { - if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) { - data->total_read_request_bandwidth = bw_add(data->total_read_request_bandwidth, data->request_bandwidth[i]); - } - else { - data->total_write_request_bandwidth = bw_add(data->total_write_request_bandwidth, data->request_bandwidth[i]); - } - } - } - data->dispclk_required_for_total_read_request_bandwidth = bw_div(bw_mul(data->total_read_request_bandwidth, dceip->dispclk_per_request), dceip->request_efficiency); - data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_with_ramping, data->dispclk_required_for_total_read_request_bandwidth); - data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_without_ramping, data->dispclk_required_for_total_read_request_bandwidth); - if (data->cpuc_state_change_enable == bw_def_yes) { - data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max3(data->total_dispclk_required_with_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level], data->dispclk_required_for_blackout_recovery[data->y_clk_level][data->sclk_level]); - data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max3(data->total_dispclk_required_without_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level], data->dispclk_required_for_blackout_recovery[data->y_clk_level][data->sclk_level]); - } - if (data->cpup_state_change_enable == bw_def_yes) { - data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_with_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level]); - data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_without_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level]); - } - if (data->nbp_state_change_enable == bw_def_yes && data->increase_voltage_to_support_mclk_switch) { - data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_with_ramping_with_request_bandwidth, data->dispclk_required_for_dram_speed_change[data->y_clk_level][data->sclk_level]); - data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_without_ramping_with_request_bandwidth, data->dispclk_required_for_dram_speed_change[data->y_clk_level][data->sclk_level]); - } - if (bw_ltn(data->total_dispclk_required_with_ramping_with_request_bandwidth, vbios->high_voltage_max_dispclk)) { - data->dispclk = data->total_dispclk_required_with_ramping_with_request_bandwidth; - } - else if (bw_ltn(data->total_dispclk_required_without_ramping_with_request_bandwidth, vbios->high_voltage_max_dispclk)) { - data->dispclk = vbios->high_voltage_max_dispclk; - } - else { - data->dispclk = data->total_dispclk_required_without_ramping_with_request_bandwidth; - } - /* required core voltage*/ - /* the core voltage required is low if sclk, yclk(pclk)and dispclk are within the low limits*/ - /* otherwise, the core voltage required is medium if yclk (pclk) is within the low limit and sclk and dispclk are within the medium limit*/ - /* otherwise, the core voltage required is high if the three clocks are within the high limits*/ - /* otherwise, or if the mode is not supported, core voltage requirement is not applicable*/ - if (pipe_check == bw_def_notok) { - voltage = bw_def_na; - } - else if (mode_check == bw_def_notok) { - voltage = bw_def_notok; - } - else if (bw_equ(bw_int_to_fixed(yclk_message), vbios->low_yclk) && sclk_message == bw_def_low && bw_ltn(data->dispclk, vbios->low_voltage_max_dispclk)) { - voltage = bw_def_0_72; - } - else if ((bw_equ(bw_int_to_fixed(yclk_message), vbios->low_yclk) || bw_equ(bw_int_to_fixed(yclk_message), vbios->mid_yclk)) && (sclk_message == bw_def_low || sclk_message == bw_def_mid) && bw_ltn(data->dispclk, vbios->mid_voltage_max_dispclk)) { - voltage = bw_def_0_8; - } - else if ((bw_equ(bw_int_to_fixed(yclk_message), vbios->low_yclk) || bw_equ(bw_int_to_fixed(yclk_message), vbios->mid_yclk) || bw_equ(bw_int_to_fixed(yclk_message), vbios->high_yclk)) && (sclk_message == bw_def_low || sclk_message == bw_def_mid || sclk_message == bw_def_high) && bw_leq(data->dispclk, vbios->high_voltage_max_dispclk)) { - if ((data->nbp_state_change_enable == bw_def_no && nbp_state_change_enable_blank == bw_def_no)) { - voltage = bw_def_high_no_nbp_state_change; - } - else { - voltage = bw_def_0_9; - } - } - else { - voltage = bw_def_notok; - } - if (voltage == bw_def_0_72) { - data->max_phyclk = vbios->low_voltage_max_phyclk; - } - else if (voltage == bw_def_0_8) { - data->max_phyclk = vbios->mid_voltage_max_phyclk; - } - else { - data->max_phyclk = vbios->high_voltage_max_phyclk; - } - /*required blackout recovery time*/ - data->blackout_recovery_time = bw_int_to_fixed(0); - for (k = 0; k <= maximum_number_of_surfaces - 1; k++) { - if (data->enable[k] && bw_mtn(vbios->blackout_duration, bw_int_to_fixed(0)) && data->cpup_state_change_enable == bw_def_yes) { - if (surface_type[k] != bw_def_display_write_back420_luma && surface_type[k] != bw_def_display_write_back420_chroma) { - data->blackout_recovery_time = bw_max2(data->blackout_recovery_time, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[data->y_clk_level][data->sclk_level])); - if (bw_ltn(data->adjusted_data_buffer_size[k], bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[data->y_clk_level][data->sclk_level])))))) { - data->blackout_recovery_time = bw_max2(data->blackout_recovery_time, bw_div((bw_add(bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), vbios->blackout_duration), bw_sub(bw_div(bw_mul(bw_mul(bw_mul((bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[data->y_clk_level][data->sclk_level])), data->dispclk), bw_int_to_fixed(data->bytes_per_pixel[k])), data->lines_interleaved_in_mem_access[k]), data->latency_hiding_lines[k]), data->adjusted_data_buffer_size[k]))), (bw_sub(bw_div(bw_mul(bw_mul(data->dispclk, bw_int_to_fixed(data->bytes_per_pixel[k])), data->lines_interleaved_in_mem_access[k]), data->latency_hiding_lines[k]), bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]))))); - } - } - else { - data->blackout_recovery_time = bw_max2(data->blackout_recovery_time, bw_add(bw_mul(bw_int_to_fixed(2), vbios->mcifwrmc_urgent_latency), data->mcifwr_burst_time[data->y_clk_level][data->sclk_level])); - if (bw_ltn(data->adjusted_data_buffer_size[k], bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, bw_add(bw_mul(bw_int_to_fixed(2), vbios->mcifwrmc_urgent_latency), data->mcifwr_burst_time[data->y_clk_level][data->sclk_level])))))) { - data->blackout_recovery_time = bw_max2(data->blackout_recovery_time, bw_div((bw_add(bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), vbios->blackout_duration), bw_sub(bw_div(bw_mul(bw_mul(bw_mul((bw_add(bw_add(bw_mul(bw_int_to_fixed(2), vbios->mcifwrmc_urgent_latency), data->dmif_burst_time[data->y_clk_level][data->sclk_level]), data->mcifwr_burst_time[data->y_clk_level][data->sclk_level])), data->dispclk), bw_int_to_fixed(data->bytes_per_pixel[k])), data->lines_interleaved_in_mem_access[k]), data->latency_hiding_lines[k]), data->adjusted_data_buffer_size[k]))), (bw_sub(bw_div(bw_mul(bw_mul(data->dispclk, bw_int_to_fixed(data->bytes_per_pixel[k])), data->lines_interleaved_in_mem_access[k]), data->latency_hiding_lines[k]), bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]))))); - } - } - } - } - /*sclk deep sleep*/ - /*during self-refresh, sclk can be reduced to dispclk divided by the minimum pixels in the data fifo entry, with 15% margin, but shoudl not be set to less than the request bandwidth.*/ - /*the data fifo entry is 16 pixels for the writeback, 64 bytes/bytes_per_pixel for the graphics, 16 pixels for the parallel rotation underlay,*/ - /*and 16 bytes/bytes_per_pixel for the orthogonal rotation underlay.*/ - /*in parallel mode (underlay pipe), the data read from the dmifv buffer is variable and based on the pixel depth (8bbp - 16 bytes, 16 bpp - 32 bytes, 32 bpp - 64 bytes)*/ - /*in orthogonal mode (underlay pipe), the data read from the dmifv buffer is fixed at 16 bytes.*/ - for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { - if (data->enable[i]) { - if (surface_type[i] == bw_def_display_write_back420_luma || surface_type[i] == bw_def_display_write_back420_chroma) { - data->pixels_per_data_fifo_entry[i] = bw_int_to_fixed(16); - } - else if (surface_type[i] == bw_def_graphics) { - data->pixels_per_data_fifo_entry[i] = bw_div(bw_int_to_fixed(64), bw_int_to_fixed(data->bytes_per_pixel[i])); - } - else if (data->orthogonal_rotation[i] == 0) { - data->pixels_per_data_fifo_entry[i] = bw_int_to_fixed(16); - } - else { - data->pixels_per_data_fifo_entry[i] = bw_div(bw_int_to_fixed(16), bw_int_to_fixed(data->bytes_per_pixel[i])); - } - } - } - data->min_pixels_per_data_fifo_entry = bw_int_to_fixed(9999); - for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { - if (data->enable[i]) { - if (bw_mtn(data->min_pixels_per_data_fifo_entry, data->pixels_per_data_fifo_entry[i])) { - data->min_pixels_per_data_fifo_entry = data->pixels_per_data_fifo_entry[i]; - } - } - } - data->sclk_deep_sleep = bw_max2(bw_div(bw_mul(data->dispclk, bw_frc_to_fixed(115, 100)), data->min_pixels_per_data_fifo_entry), data->total_read_request_bandwidth); - /*urgent, stutter and nb-p_state watermark*/ - /*the urgent watermark is the maximum of the urgent trip time plus the pixel transfer time, the urgent trip times to get data for the first pixel, and the urgent trip times to get data for the last pixel.*/ - /*the stutter exit watermark is the self refresh exit time plus the maximum of the data burst time plus the pixel transfer time, the data burst times to get data for the first pixel, and the data burst times to get data for the last pixel. it does not apply to the writeback.*/ - /*the nb p-state change watermark is the dram speed/p-state change time plus the maximum of the data burst time plus the pixel transfer time, the data burst times to get data for the first pixel, and the data burst times to get data for the last pixel.*/ - /*the pixel transfer time is the maximum of the time to transfer the source pixels required for the first output pixel, and the time to transfer the pixels for the last output pixel minus the active line time.*/ - /*blackout_duration is added to the urgent watermark*/ - data->chunk_request_time = bw_int_to_fixed(0); - data->cursor_request_time = bw_int_to_fixed(0); - /*compute total time to request one chunk from each active display pipe*/ - for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { - if (data->enable[i]) { - data->chunk_request_time = bw_add(data->chunk_request_time, (bw_div((bw_div(bw_int_to_fixed(pixels_per_chunk * data->bytes_per_pixel[i]), data->useful_bytes_per_request[i])), bw_min2(sclk[data->sclk_level], bw_div(data->dispclk, bw_int_to_fixed(2)))))); - } - } - /*compute total time to request cursor data*/ - data->cursor_request_time = (bw_div(data->cursor_total_data, (bw_mul(bw_int_to_fixed(32), sclk[data->sclk_level])))); - for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { - if (data->enable[i]) { - data->line_source_pixels_transfer_time = bw_max2(bw_div(bw_div(data->src_pixels_for_first_output_pixel[i], dceip->lb_write_pixels_per_dispclk), (bw_div(data->dispclk, dceip->display_pipe_throughput_factor))), bw_sub(bw_div(bw_div(data->src_pixels_for_last_output_pixel[i], dceip->lb_write_pixels_per_dispclk), (bw_div(data->dispclk, dceip->display_pipe_throughput_factor))), data->active_time[i])); - if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) { - data->urgent_watermark[i] = bw_add(bw_add(bw_add(bw_add(bw_add(data->total_dmifmc_urgent_latency, data->dmif_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->line_source_transfer_time[i][data->y_clk_level][data->sclk_level])), vbios->blackout_duration), data->chunk_request_time), data->cursor_request_time); - data->stutter_exit_watermark[i] = bw_add(bw_sub(vbios->stutter_self_refresh_exit_latency, data->total_dmifmc_urgent_latency), data->urgent_watermark[i]); - data->stutter_entry_watermark[i] = bw_add(bw_sub(bw_add(vbios->stutter_self_refresh_exit_latency, vbios->stutter_self_refresh_entry_latency), data->total_dmifmc_urgent_latency), data->urgent_watermark[i]); - /*unconditionally remove black out time from the nb p_state watermark*/ - if (data->display_pstate_change_enable[i] == 1) { - data->nbp_state_change_watermark[i] = bw_add(bw_add(vbios->nbp_state_change_latency, data->dmif_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->dram_speed_change_line_source_transfer_time[i][data->y_clk_level][data->sclk_level])); - } - else { - /*maximize the watermark to force the switch in the vb_lank region of the frame*/ - data->nbp_state_change_watermark[i] = bw_int_to_fixed(131000); - } - } - else { - data->urgent_watermark[i] = bw_add(bw_add(bw_add(bw_add(bw_add(vbios->mcifwrmc_urgent_latency, data->mcifwr_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->line_source_transfer_time[i][data->y_clk_level][data->sclk_level])), vbios->blackout_duration), data->chunk_request_time), data->cursor_request_time); - data->stutter_exit_watermark[i] = bw_int_to_fixed(0); - data->stutter_entry_watermark[i] = bw_int_to_fixed(0); - if (data->display_pstate_change_enable[i] == 1) { - data->nbp_state_change_watermark[i] = bw_add(bw_add(vbios->nbp_state_change_latency, data->mcifwr_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->dram_speed_change_line_source_transfer_time[i][data->y_clk_level][data->sclk_level])); - } - else { - /*maximize the watermark to force the switch in the vb_lank region of the frame*/ - data->nbp_state_change_watermark[i] = bw_int_to_fixed(131000); - } - } - } - } - /*stutter mode enable*/ - /*in the multi-display case the stutter exit or entry watermark cannot exceed the minimum latency hiding capabilities of the*/ - /*display pipe.*/ - data->stutter_mode_enable = data->cpuc_state_change_enable; - if (data->number_of_displays > 1) { - for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { - if (data->enable[i]) { - if ((bw_mtn(data->stutter_exit_watermark[i], data->minimum_latency_hiding[i]) || bw_mtn(data->stutter_entry_watermark[i], data->minimum_latency_hiding[i]))) { - data->stutter_mode_enable = bw_def_no; - } - } - } - } - /*performance metrics*/ - /* display read access efficiency (%)*/ - /* display write back access efficiency (%)*/ - /* stutter efficiency (%)*/ - /* extra underlay pitch recommended for efficiency (pixels)*/ - /* immediate flip time (us)*/ - /* latency for other clients due to urgent display read (us)*/ - /* latency for other clients due to urgent display write (us)*/ - /* average bandwidth consumed by display (no compression) (gb/s)*/ - /* required dram bandwidth (gb/s)*/ - /* required sclk (m_hz)*/ - /* required rd urgent latency (us)*/ - /* nb p-state change margin (us)*/ - /*dmif and mcifwr dram access efficiency*/ - /*is the ratio between the ideal dram access time (which is the data buffer size in memory divided by the dram bandwidth), and the actual time which is the total page close-open time. but it cannot exceed the dram efficiency provided by the memory subsystem*/ - data->dmifdram_access_efficiency = bw_min2(bw_div(bw_div(data->total_display_reads_required_dram_access_data, data->dram_bandwidth), data->dmif_total_page_close_open_time), bw_int_to_fixed(1)); - if (bw_mtn(data->total_display_writes_required_dram_access_data, bw_int_to_fixed(0))) { - data->mcifwrdram_access_efficiency = bw_min2(bw_div(bw_div(data->total_display_writes_required_dram_access_data, data->dram_bandwidth), data->mcifwr_total_page_close_open_time), bw_int_to_fixed(1)); - } - else { - data->mcifwrdram_access_efficiency = bw_int_to_fixed(0); - } - /*stutter efficiency*/ - /*the stutter efficiency is the frame-average time in self-refresh divided by the frame-average stutter cycle duration. only applies if the display write-back is not enabled.*/ - /*the frame-average stutter cycle used is the minimum for all pipes of the frame-average data buffer size in time, times the compression rate*/ - /*the frame-average time in self-refresh is the stutter cycle minus the self refresh exit latency and the burst time*/ - /*the stutter cycle is the dmif buffer size reduced by the excess of the stutter exit watermark over the lb size in time.*/ - /*the burst time is the data needed during the stutter cycle divided by the available bandwidth*/ - /*compute the time read all the data from the dmif buffer to the lb (dram refresh period)*/ - for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { - if (data->enable[i]) { - data->stutter_refresh_duration[i] = bw_sub(bw_mul(bw_div(bw_div(bw_mul(bw_div(bw_div(data->adjusted_data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_rounded_up_to_chunks[i]), data->h_total[i]), data->vsr[i]), data->pixel_rate[i]), data->compression_rate[i]), bw_max2(bw_int_to_fixed(0), bw_sub(data->stutter_exit_watermark[i], bw_div(bw_mul((bw_sub(data->lb_partitions[i], bw_int_to_fixed(1))), data->h_total[i]), data->pixel_rate[i])))); - data->stutter_dmif_buffer_size[i] = bw_div(bw_mul(bw_mul(bw_div(bw_mul(bw_mul(data->stutter_refresh_duration[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_rounded_up_to_chunks[i]), data->h_total[i]), data->vsr[i]), data->pixel_rate[i]), data->compression_rate[i]); - } - } - data->min_stutter_refresh_duration = bw_int_to_fixed(9999); - data->total_stutter_dmif_buffer_size = 0; - data->total_bytes_requested = 0; - data->min_stutter_dmif_buffer_size = 9999; - for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { - if (data->enable[i]) { - if (bw_mtn(data->min_stutter_refresh_duration, data->stutter_refresh_duration[i])) { - data->min_stutter_refresh_duration = data->stutter_refresh_duration[i]; - data->total_bytes_requested = bw_fixed_to_int(bw_add(bw_int_to_fixed(data->total_bytes_requested), (bw_mul(bw_mul(data->source_height_rounded_up_to_chunks[i], data->source_width_rounded_up_to_chunks[i]), bw_int_to_fixed(data->bytes_per_pixel[i]))))); - data->min_stutter_dmif_buffer_size = bw_fixed_to_int(data->stutter_dmif_buffer_size[i]); - } - data->total_stutter_dmif_buffer_size = bw_fixed_to_int(bw_add(data->stutter_dmif_buffer_size[i], bw_int_to_fixed(data->total_stutter_dmif_buffer_size))); - } - } - data->stutter_burst_time = bw_div(bw_int_to_fixed(data->total_stutter_dmif_buffer_size), bw_mul(sclk[data->sclk_level], vbios->data_return_bus_width)); - data->num_stutter_bursts = data->total_bytes_requested / data->min_stutter_dmif_buffer_size; - data->total_stutter_cycle_duration = bw_add(bw_add(data->min_stutter_refresh_duration, vbios->stutter_self_refresh_exit_latency), data->stutter_burst_time); - data->time_in_self_refresh = data->min_stutter_refresh_duration; - if (data->d1_display_write_back_dwb_enable == 1) { - data->stutter_efficiency = bw_int_to_fixed(0); - } - else if (bw_ltn(data->time_in_self_refresh, bw_int_to_fixed(0))) { - data->stutter_efficiency = bw_int_to_fixed(0); - } - else { - /*compute stutter efficiency assuming 60 hz refresh rate*/ - data->stutter_efficiency = bw_max2(bw_int_to_fixed(0), bw_mul((bw_sub(bw_int_to_fixed(1), (bw_div(bw_mul((bw_add(vbios->stutter_self_refresh_exit_latency, data->stutter_burst_time)), bw_int_to_fixed(data->num_stutter_bursts)), bw_frc_to_fixed(166666667, 10000))))), bw_int_to_fixed(100))); - } - /*immediate flip time*/ - /*if scatter gather is enabled, the immediate flip takes a number of urgent memory trips equivalent to the pte requests in a row divided by the pte request limit.*/ - /*otherwise, it may take just one urgenr memory trip*/ - data->worst_number_of_trips_to_memory = bw_int_to_fixed(1); - for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { - if (data->enable[i] && data->scatter_gather_enable_for_pipe[i] == 1) { - data->number_of_trips_to_memory_for_getting_apte_row[i] = bw_ceil2(bw_div(data->scatter_gather_pte_requests_in_row[i], data->scatter_gather_pte_request_limit[i]), bw_int_to_fixed(1)); - if (bw_ltn(data->worst_number_of_trips_to_memory, data->number_of_trips_to_memory_for_getting_apte_row[i])) { - data->worst_number_of_trips_to_memory = data->number_of_trips_to_memory_for_getting_apte_row[i]; - } - } - } - data->immediate_flip_time = bw_mul(data->worst_number_of_trips_to_memory, data->total_dmifmc_urgent_latency); - /*worst latency for other clients*/ - /*it is the urgent latency plus the urgent burst time*/ - data->latency_for_non_dmif_clients = bw_add(data->total_dmifmc_urgent_latency, data->dmif_burst_time[data->y_clk_level][data->sclk_level]); - if (data->d1_display_write_back_dwb_enable == 1) { - data->latency_for_non_mcifwr_clients = bw_add(vbios->mcifwrmc_urgent_latency, dceip->mcifwr_all_surfaces_burst_time); - } - else { - data->latency_for_non_mcifwr_clients = bw_int_to_fixed(0); - } - /*dmif mc urgent latency supported in high sclk and yclk*/ - data->dmifmc_urgent_latency_supported_in_high_sclk_and_yclk = bw_div((bw_sub(data->min_read_buffer_size_in_time, data->dmif_burst_time[high][s_high])), data->total_dmifmc_urgent_trips); - /*dram speed/p-state change margin*/ - /*in the multi-display case the nb p-state change watermark cannot exceed the average lb size plus the dmif size or the cursor dcp buffer size*/ - data->v_blank_nbp_state_dram_speed_change_latency_supported = bw_int_to_fixed(99999); - data->nbp_state_dram_speed_change_latency_supported = bw_int_to_fixed(99999); - for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { - if (data->enable[i]) { - data->nbp_state_dram_speed_change_latency_supported = bw_min2(data->nbp_state_dram_speed_change_latency_supported, bw_add(bw_sub(data->maximum_latency_hiding_with_cursor[i], data->nbp_state_change_watermark[i]), vbios->nbp_state_change_latency)); - data->v_blank_nbp_state_dram_speed_change_latency_supported = bw_min2(data->v_blank_nbp_state_dram_speed_change_latency_supported, bw_add(bw_sub(bw_div(bw_mul((bw_sub(data->v_total[i], bw_sub(bw_div(data->src_height[i], data->v_scale_ratio[i]), bw_int_to_fixed(4)))), data->h_total[i]), data->pixel_rate[i]), data->nbp_state_change_watermark[i]), vbios->nbp_state_change_latency)); - } - } - /*sclk required vs urgent latency*/ - for (i = 1; i <= 5; i++) { - data->display_reads_time_for_data_transfer_and_urgent_latency = bw_sub(data->min_read_buffer_size_in_time, bw_mul(data->total_dmifmc_urgent_trips, bw_int_to_fixed(i))); - if (pipe_check == bw_def_ok && (bw_mtn(data->display_reads_time_for_data_transfer_and_urgent_latency, data->dmif_total_page_close_open_time))) { - data->dmif_required_sclk_for_urgent_latency[i] = bw_div(bw_div(data->total_display_reads_required_data, data->display_reads_time_for_data_transfer_and_urgent_latency), (bw_mul(vbios->data_return_bus_width, bw_frc_to_fixed(dceip->percent_of_ideal_port_bw_received_after_urgent_latency, 100)))); - } - else { - data->dmif_required_sclk_for_urgent_latency[i] = bw_int_to_fixed(bw_def_na); - } - } - /*output link bit per pixel supported*/ - for (k = 0; k <= maximum_number_of_surfaces - 1; k++) { - data->output_bpphdmi[k] = bw_def_na; - data->output_bppdp4_lane_hbr[k] = bw_def_na; - data->output_bppdp4_lane_hbr2[k] = bw_def_na; - data->output_bppdp4_lane_hbr3[k] = bw_def_na; - if (data->enable[k]) { - data->output_bpphdmi[k] = bw_fixed_to_int(bw_mul(bw_div(bw_min2(bw_int_to_fixed(600), data->max_phyclk), data->pixel_rate[k]), bw_int_to_fixed(24))); - if (bw_meq(data->max_phyclk, bw_int_to_fixed(270))) { - data->output_bppdp4_lane_hbr[k] = bw_fixed_to_int(bw_mul(bw_div(bw_mul(bw_int_to_fixed(270), bw_int_to_fixed(4)), data->pixel_rate[k]), bw_int_to_fixed(8))); - } - if (bw_meq(data->max_phyclk, bw_int_to_fixed(540))) { - data->output_bppdp4_lane_hbr2[k] = bw_fixed_to_int(bw_mul(bw_div(bw_mul(bw_int_to_fixed(540), bw_int_to_fixed(4)), data->pixel_rate[k]), bw_int_to_fixed(8))); - } - if (bw_meq(data->max_phyclk, bw_int_to_fixed(810))) { - data->output_bppdp4_lane_hbr3[k] = bw_fixed_to_int(bw_mul(bw_div(bw_mul(bw_int_to_fixed(810), bw_int_to_fixed(4)), data->pixel_rate[k]), bw_int_to_fixed(8))); - } - } - } - - kfree(surface_type); -free_tiling_mode: - kfree(tiling_mode); -free_yclk: - kfree(yclk); -free_sclk: - kfree(sclk); -} - -/******************************************************************************* - * Public functions - ******************************************************************************/ -void bw_calcs_init(struct bw_calcs_dceip *bw_dceip, - struct bw_calcs_vbios *bw_vbios, - struct hw_asic_id asic_id) -{ - struct bw_calcs_dceip *dceip; - struct bw_calcs_vbios *vbios; - - enum bw_calcs_version version = bw_calcs_version_from_asic_id(asic_id); - - dceip = kzalloc(sizeof(*dceip), GFP_KERNEL); - if (!dceip) - return; - - vbios = kzalloc(sizeof(*vbios), GFP_KERNEL); - if (!vbios) { - kfree(dceip); - return; - } - - dceip->version = version; - - switch (version) { - case BW_CALCS_VERSION_CARRIZO: - vbios->memory_type = bw_def_gddr5; - vbios->dram_channel_width_in_bits = 64; - vbios->number_of_dram_channels = asic_id.vram_width / vbios->dram_channel_width_in_bits; - vbios->number_of_dram_banks = 8; - vbios->high_yclk = bw_int_to_fixed(1600); - vbios->mid_yclk = bw_int_to_fixed(1600); - vbios->low_yclk = bw_frc_to_fixed(66666, 100); - vbios->low_sclk = bw_int_to_fixed(200); - vbios->mid1_sclk = bw_int_to_fixed(300); - vbios->mid2_sclk = bw_int_to_fixed(300); - vbios->mid3_sclk = bw_int_to_fixed(300); - vbios->mid4_sclk = bw_int_to_fixed(300); - vbios->mid5_sclk = bw_int_to_fixed(300); - vbios->mid6_sclk = bw_int_to_fixed(300); - vbios->high_sclk = bw_frc_to_fixed(62609, 100); - vbios->low_voltage_max_dispclk = bw_int_to_fixed(352); - vbios->mid_voltage_max_dispclk = bw_int_to_fixed(467); - vbios->high_voltage_max_dispclk = bw_int_to_fixed(643); - vbios->low_voltage_max_phyclk = bw_int_to_fixed(540); - vbios->mid_voltage_max_phyclk = bw_int_to_fixed(810); - vbios->high_voltage_max_phyclk = bw_int_to_fixed(810); - vbios->data_return_bus_width = bw_int_to_fixed(32); - vbios->trc = bw_int_to_fixed(50); - vbios->dmifmc_urgent_latency = bw_int_to_fixed(4); - vbios->stutter_self_refresh_exit_latency = bw_frc_to_fixed(153, 10); - vbios->stutter_self_refresh_entry_latency = bw_int_to_fixed(0); - vbios->nbp_state_change_latency = bw_frc_to_fixed(19649, 1000); - vbios->mcifwrmc_urgent_latency = bw_int_to_fixed(10); - vbios->scatter_gather_enable = true; - vbios->down_spread_percentage = bw_frc_to_fixed(5, 10); - vbios->cursor_width = 32; - vbios->average_compression_rate = 4; - vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256; - vbios->blackout_duration = bw_int_to_fixed(0); /* us */ - vbios->maximum_blackout_recovery_time = bw_int_to_fixed(0); - - dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100; - dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100; - dceip->percent_of_ideal_port_bw_received_after_urgent_latency = 100; - dceip->large_cursor = false; - dceip->dmif_request_buffer_size = bw_int_to_fixed(768); - dceip->dmif_pipe_en_fbc_chunk_tracker = false; - dceip->cursor_max_outstanding_group_num = 1; - dceip->lines_interleaved_into_lb = 2; - dceip->chunk_width = 256; - dceip->number_of_graphics_pipes = 3; - dceip->number_of_underlay_pipes = 1; - dceip->low_power_tiling_mode = 0; - dceip->display_write_back_supported = false; - dceip->argb_compression_support = false; - dceip->underlay_vscaler_efficiency6_bit_per_component = - bw_frc_to_fixed(35556, 10000); - dceip->underlay_vscaler_efficiency8_bit_per_component = - bw_frc_to_fixed(34286, 10000); - dceip->underlay_vscaler_efficiency10_bit_per_component = - bw_frc_to_fixed(32, 10); - dceip->underlay_vscaler_efficiency12_bit_per_component = - bw_int_to_fixed(3); - dceip->graphics_vscaler_efficiency6_bit_per_component = - bw_frc_to_fixed(35, 10); - dceip->graphics_vscaler_efficiency8_bit_per_component = - bw_frc_to_fixed(34286, 10000); - dceip->graphics_vscaler_efficiency10_bit_per_component = - bw_frc_to_fixed(32, 10); - dceip->graphics_vscaler_efficiency12_bit_per_component = - bw_int_to_fixed(3); - dceip->alpha_vscaler_efficiency = bw_int_to_fixed(3); - dceip->max_dmif_buffer_allocated = 2; - dceip->graphics_dmif_size = 12288; - dceip->underlay_luma_dmif_size = 19456; - dceip->underlay_chroma_dmif_size = 23552; - dceip->pre_downscaler_enabled = true; - dceip->underlay_downscale_prefetch_enabled = true; - dceip->lb_write_pixels_per_dispclk = bw_int_to_fixed(1); - dceip->lb_size_per_component444 = bw_int_to_fixed(82176); - dceip->graphics_lb_nodownscaling_multi_line_prefetching = false; - dceip->stutter_and_dram_clock_state_change_gated_before_cursor = - bw_int_to_fixed(0); - dceip->underlay420_luma_lb_size_per_component = bw_int_to_fixed( - 82176); - dceip->underlay420_chroma_lb_size_per_component = - bw_int_to_fixed(164352); - dceip->underlay422_lb_size_per_component = bw_int_to_fixed( - 82176); - dceip->cursor_chunk_width = bw_int_to_fixed(64); - dceip->cursor_dcp_buffer_lines = bw_int_to_fixed(4); - dceip->underlay_maximum_width_efficient_for_tiling = - bw_int_to_fixed(1920); - dceip->underlay_maximum_height_efficient_for_tiling = - bw_int_to_fixed(1080); - dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display = - bw_frc_to_fixed(3, 10); - dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation = - bw_int_to_fixed(25); - dceip->minimum_outstanding_pte_request_limit = bw_int_to_fixed( - 2); - dceip->maximum_total_outstanding_pte_requests_allowed_by_saw = - bw_int_to_fixed(128); - dceip->limit_excessive_outstanding_dmif_requests = true; - dceip->linear_mode_line_request_alternation_slice = - bw_int_to_fixed(64); - dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode = - 32; - dceip->display_write_back420_luma_mcifwr_buffer_size = 12288; - dceip->display_write_back420_chroma_mcifwr_buffer_size = 8192; - dceip->request_efficiency = bw_frc_to_fixed(8, 10); - dceip->dispclk_per_request = bw_int_to_fixed(2); - dceip->dispclk_ramping_factor = bw_frc_to_fixed(105, 100); - dceip->display_pipe_throughput_factor = bw_frc_to_fixed(105, 100); - dceip->scatter_gather_pte_request_rows_in_tiling_mode = 2; - dceip->mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0); /* todo: this is a bug*/ - break; - case BW_CALCS_VERSION_POLARIS10: - /* TODO: Treat VEGAM the same as P10 for now - * Need to tune the para for VEGAM if needed */ - case BW_CALCS_VERSION_VEGAM: - vbios->memory_type = bw_def_gddr5; - vbios->dram_channel_width_in_bits = 32; - vbios->number_of_dram_channels = asic_id.vram_width / vbios->dram_channel_width_in_bits; - vbios->number_of_dram_banks = 8; - vbios->high_yclk = bw_int_to_fixed(6000); - vbios->mid_yclk = bw_int_to_fixed(3200); - vbios->low_yclk = bw_int_to_fixed(1000); - vbios->low_sclk = bw_int_to_fixed(300); - vbios->mid1_sclk = bw_int_to_fixed(400); - vbios->mid2_sclk = bw_int_to_fixed(500); - vbios->mid3_sclk = bw_int_to_fixed(600); - vbios->mid4_sclk = bw_int_to_fixed(700); - vbios->mid5_sclk = bw_int_to_fixed(800); - vbios->mid6_sclk = bw_int_to_fixed(974); - vbios->high_sclk = bw_int_to_fixed(1154); - vbios->low_voltage_max_dispclk = bw_int_to_fixed(459); - vbios->mid_voltage_max_dispclk = bw_int_to_fixed(654); - vbios->high_voltage_max_dispclk = bw_int_to_fixed(1108); - vbios->low_voltage_max_phyclk = bw_int_to_fixed(540); - vbios->mid_voltage_max_phyclk = bw_int_to_fixed(810); - vbios->high_voltage_max_phyclk = bw_int_to_fixed(810); - vbios->data_return_bus_width = bw_int_to_fixed(32); - vbios->trc = bw_int_to_fixed(48); - vbios->dmifmc_urgent_latency = bw_int_to_fixed(3); - vbios->stutter_self_refresh_exit_latency = bw_int_to_fixed(5); - vbios->stutter_self_refresh_entry_latency = bw_int_to_fixed(0); - vbios->nbp_state_change_latency = bw_int_to_fixed(45); - vbios->mcifwrmc_urgent_latency = bw_int_to_fixed(10); - vbios->scatter_gather_enable = true; - vbios->down_spread_percentage = bw_frc_to_fixed(5, 10); - vbios->cursor_width = 32; - vbios->average_compression_rate = 4; - vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256; - vbios->blackout_duration = bw_int_to_fixed(0); /* us */ - vbios->maximum_blackout_recovery_time = bw_int_to_fixed(0); - - dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100; - dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100; - dceip->percent_of_ideal_port_bw_received_after_urgent_latency = 100; - dceip->large_cursor = false; - dceip->dmif_request_buffer_size = bw_int_to_fixed(768); - dceip->dmif_pipe_en_fbc_chunk_tracker = false; - dceip->cursor_max_outstanding_group_num = 1; - dceip->lines_interleaved_into_lb = 2; - dceip->chunk_width = 256; - dceip->number_of_graphics_pipes = 6; - dceip->number_of_underlay_pipes = 0; - dceip->low_power_tiling_mode = 0; - dceip->display_write_back_supported = false; - dceip->argb_compression_support = true; - dceip->underlay_vscaler_efficiency6_bit_per_component = - bw_frc_to_fixed(35556, 10000); - dceip->underlay_vscaler_efficiency8_bit_per_component = - bw_frc_to_fixed(34286, 10000); - dceip->underlay_vscaler_efficiency10_bit_per_component = - bw_frc_to_fixed(32, 10); - dceip->underlay_vscaler_efficiency12_bit_per_component = - bw_int_to_fixed(3); - dceip->graphics_vscaler_efficiency6_bit_per_component = - bw_frc_to_fixed(35, 10); - dceip->graphics_vscaler_efficiency8_bit_per_component = - bw_frc_to_fixed(34286, 10000); - dceip->graphics_vscaler_efficiency10_bit_per_component = - bw_frc_to_fixed(32, 10); - dceip->graphics_vscaler_efficiency12_bit_per_component = - bw_int_to_fixed(3); - dceip->alpha_vscaler_efficiency = bw_int_to_fixed(3); - dceip->max_dmif_buffer_allocated = 4; - dceip->graphics_dmif_size = 12288; - dceip->underlay_luma_dmif_size = 19456; - dceip->underlay_chroma_dmif_size = 23552; - dceip->pre_downscaler_enabled = true; - dceip->underlay_downscale_prefetch_enabled = true; - dceip->lb_write_pixels_per_dispclk = bw_int_to_fixed(1); - dceip->lb_size_per_component444 = bw_int_to_fixed(245952); - dceip->graphics_lb_nodownscaling_multi_line_prefetching = true; - dceip->stutter_and_dram_clock_state_change_gated_before_cursor = - bw_int_to_fixed(1); - dceip->underlay420_luma_lb_size_per_component = bw_int_to_fixed( - 82176); - dceip->underlay420_chroma_lb_size_per_component = - bw_int_to_fixed(164352); - dceip->underlay422_lb_size_per_component = bw_int_to_fixed( - 82176); - dceip->cursor_chunk_width = bw_int_to_fixed(64); - dceip->cursor_dcp_buffer_lines = bw_int_to_fixed(4); - dceip->underlay_maximum_width_efficient_for_tiling = - bw_int_to_fixed(1920); - dceip->underlay_maximum_height_efficient_for_tiling = - bw_int_to_fixed(1080); - dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display = - bw_frc_to_fixed(3, 10); - dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation = - bw_int_to_fixed(25); - dceip->minimum_outstanding_pte_request_limit = bw_int_to_fixed( - 2); - dceip->maximum_total_outstanding_pte_requests_allowed_by_saw = - bw_int_to_fixed(128); - dceip->limit_excessive_outstanding_dmif_requests = true; - dceip->linear_mode_line_request_alternation_slice = - bw_int_to_fixed(64); - dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode = - 32; - dceip->display_write_back420_luma_mcifwr_buffer_size = 12288; - dceip->display_write_back420_chroma_mcifwr_buffer_size = 8192; - dceip->request_efficiency = bw_frc_to_fixed(8, 10); - dceip->dispclk_per_request = bw_int_to_fixed(2); - dceip->dispclk_ramping_factor = bw_frc_to_fixed(105, 100); - dceip->display_pipe_throughput_factor = bw_frc_to_fixed(105, 100); - dceip->scatter_gather_pte_request_rows_in_tiling_mode = 2; - dceip->mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0); - break; - case BW_CALCS_VERSION_POLARIS11: - vbios->memory_type = bw_def_gddr5; - vbios->dram_channel_width_in_bits = 32; - vbios->number_of_dram_channels = asic_id.vram_width / vbios->dram_channel_width_in_bits; - vbios->number_of_dram_banks = 8; - vbios->high_yclk = bw_int_to_fixed(6000); - vbios->mid_yclk = bw_int_to_fixed(3200); - vbios->low_yclk = bw_int_to_fixed(1000); - vbios->low_sclk = bw_int_to_fixed(300); - vbios->mid1_sclk = bw_int_to_fixed(400); - vbios->mid2_sclk = bw_int_to_fixed(500); - vbios->mid3_sclk = bw_int_to_fixed(600); - vbios->mid4_sclk = bw_int_to_fixed(700); - vbios->mid5_sclk = bw_int_to_fixed(800); - vbios->mid6_sclk = bw_int_to_fixed(974); - vbios->high_sclk = bw_int_to_fixed(1154); - vbios->low_voltage_max_dispclk = bw_int_to_fixed(459); - vbios->mid_voltage_max_dispclk = bw_int_to_fixed(654); - vbios->high_voltage_max_dispclk = bw_int_to_fixed(1108); - vbios->low_voltage_max_phyclk = bw_int_to_fixed(540); - vbios->mid_voltage_max_phyclk = bw_int_to_fixed(810); - vbios->high_voltage_max_phyclk = bw_int_to_fixed(810); - vbios->data_return_bus_width = bw_int_to_fixed(32); - vbios->trc = bw_int_to_fixed(48); - if (vbios->number_of_dram_channels == 2) // 64-bit - vbios->dmifmc_urgent_latency = bw_int_to_fixed(4); - else - vbios->dmifmc_urgent_latency = bw_int_to_fixed(3); - vbios->stutter_self_refresh_exit_latency = bw_int_to_fixed(5); - vbios->stutter_self_refresh_entry_latency = bw_int_to_fixed(0); - vbios->nbp_state_change_latency = bw_int_to_fixed(45); - vbios->mcifwrmc_urgent_latency = bw_int_to_fixed(10); - vbios->scatter_gather_enable = true; - vbios->down_spread_percentage = bw_frc_to_fixed(5, 10); - vbios->cursor_width = 32; - vbios->average_compression_rate = 4; - vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256; - vbios->blackout_duration = bw_int_to_fixed(0); /* us */ - vbios->maximum_blackout_recovery_time = bw_int_to_fixed(0); - - dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100; - dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100; - dceip->percent_of_ideal_port_bw_received_after_urgent_latency = 100; - dceip->large_cursor = false; - dceip->dmif_request_buffer_size = bw_int_to_fixed(768); - dceip->dmif_pipe_en_fbc_chunk_tracker = false; - dceip->cursor_max_outstanding_group_num = 1; - dceip->lines_interleaved_into_lb = 2; - dceip->chunk_width = 256; - dceip->number_of_graphics_pipes = 5; - dceip->number_of_underlay_pipes = 0; - dceip->low_power_tiling_mode = 0; - dceip->display_write_back_supported = false; - dceip->argb_compression_support = true; - dceip->underlay_vscaler_efficiency6_bit_per_component = - bw_frc_to_fixed(35556, 10000); - dceip->underlay_vscaler_efficiency8_bit_per_component = - bw_frc_to_fixed(34286, 10000); - dceip->underlay_vscaler_efficiency10_bit_per_component = - bw_frc_to_fixed(32, 10); - dceip->underlay_vscaler_efficiency12_bit_per_component = - bw_int_to_fixed(3); - dceip->graphics_vscaler_efficiency6_bit_per_component = - bw_frc_to_fixed(35, 10); - dceip->graphics_vscaler_efficiency8_bit_per_component = - bw_frc_to_fixed(34286, 10000); - dceip->graphics_vscaler_efficiency10_bit_per_component = - bw_frc_to_fixed(32, 10); - dceip->graphics_vscaler_efficiency12_bit_per_component = - bw_int_to_fixed(3); - dceip->alpha_vscaler_efficiency = bw_int_to_fixed(3); - dceip->max_dmif_buffer_allocated = 4; - dceip->graphics_dmif_size = 12288; - dceip->underlay_luma_dmif_size = 19456; - dceip->underlay_chroma_dmif_size = 23552; - dceip->pre_downscaler_enabled = true; - dceip->underlay_downscale_prefetch_enabled = true; - dceip->lb_write_pixels_per_dispclk = bw_int_to_fixed(1); - dceip->lb_size_per_component444 = bw_int_to_fixed(245952); - dceip->graphics_lb_nodownscaling_multi_line_prefetching = true; - dceip->stutter_and_dram_clock_state_change_gated_before_cursor = - bw_int_to_fixed(1); - dceip->underlay420_luma_lb_size_per_component = bw_int_to_fixed( - 82176); - dceip->underlay420_chroma_lb_size_per_component = - bw_int_to_fixed(164352); - dceip->underlay422_lb_size_per_component = bw_int_to_fixed( - 82176); - dceip->cursor_chunk_width = bw_int_to_fixed(64); - dceip->cursor_dcp_buffer_lines = bw_int_to_fixed(4); - dceip->underlay_maximum_width_efficient_for_tiling = - bw_int_to_fixed(1920); - dceip->underlay_maximum_height_efficient_for_tiling = - bw_int_to_fixed(1080); - dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display = - bw_frc_to_fixed(3, 10); - dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation = - bw_int_to_fixed(25); - dceip->minimum_outstanding_pte_request_limit = bw_int_to_fixed( - 2); - dceip->maximum_total_outstanding_pte_requests_allowed_by_saw = - bw_int_to_fixed(128); - dceip->limit_excessive_outstanding_dmif_requests = true; - dceip->linear_mode_line_request_alternation_slice = - bw_int_to_fixed(64); - dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode = - 32; - dceip->display_write_back420_luma_mcifwr_buffer_size = 12288; - dceip->display_write_back420_chroma_mcifwr_buffer_size = 8192; - dceip->request_efficiency = bw_frc_to_fixed(8, 10); - dceip->dispclk_per_request = bw_int_to_fixed(2); - dceip->dispclk_ramping_factor = bw_frc_to_fixed(105, 100); - dceip->display_pipe_throughput_factor = bw_frc_to_fixed(105, 100); - dceip->scatter_gather_pte_request_rows_in_tiling_mode = 2; - dceip->mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0); - break; - case BW_CALCS_VERSION_POLARIS12: - vbios->memory_type = bw_def_gddr5; - vbios->dram_channel_width_in_bits = 32; - vbios->number_of_dram_channels = asic_id.vram_width / vbios->dram_channel_width_in_bits; - vbios->number_of_dram_banks = 8; - vbios->high_yclk = bw_int_to_fixed(6000); - vbios->mid_yclk = bw_int_to_fixed(3200); - vbios->low_yclk = bw_int_to_fixed(1000); - vbios->low_sclk = bw_int_to_fixed(678); - vbios->mid1_sclk = bw_int_to_fixed(864); - vbios->mid2_sclk = bw_int_to_fixed(900); - vbios->mid3_sclk = bw_int_to_fixed(920); - vbios->mid4_sclk = bw_int_to_fixed(940); - vbios->mid5_sclk = bw_int_to_fixed(960); - vbios->mid6_sclk = bw_int_to_fixed(980); - vbios->high_sclk = bw_int_to_fixed(1049); - vbios->low_voltage_max_dispclk = bw_int_to_fixed(459); - vbios->mid_voltage_max_dispclk = bw_int_to_fixed(654); - vbios->high_voltage_max_dispclk = bw_int_to_fixed(1108); - vbios->low_voltage_max_phyclk = bw_int_to_fixed(540); - vbios->mid_voltage_max_phyclk = bw_int_to_fixed(810); - vbios->high_voltage_max_phyclk = bw_int_to_fixed(810); - vbios->data_return_bus_width = bw_int_to_fixed(32); - vbios->trc = bw_int_to_fixed(48); - if (vbios->number_of_dram_channels == 2) // 64-bit - vbios->dmifmc_urgent_latency = bw_int_to_fixed(4); - else - vbios->dmifmc_urgent_latency = bw_int_to_fixed(3); - vbios->stutter_self_refresh_exit_latency = bw_int_to_fixed(5); - vbios->stutter_self_refresh_entry_latency = bw_int_to_fixed(0); - vbios->nbp_state_change_latency = bw_int_to_fixed(250); - vbios->mcifwrmc_urgent_latency = bw_int_to_fixed(10); - vbios->scatter_gather_enable = false; - vbios->down_spread_percentage = bw_frc_to_fixed(5, 10); - vbios->cursor_width = 32; - vbios->average_compression_rate = 4; - vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256; - vbios->blackout_duration = bw_int_to_fixed(0); /* us */ - vbios->maximum_blackout_recovery_time = bw_int_to_fixed(0); - - dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100; - dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100; - dceip->percent_of_ideal_port_bw_received_after_urgent_latency = 100; - dceip->large_cursor = false; - dceip->dmif_request_buffer_size = bw_int_to_fixed(768); - dceip->dmif_pipe_en_fbc_chunk_tracker = false; - dceip->cursor_max_outstanding_group_num = 1; - dceip->lines_interleaved_into_lb = 2; - dceip->chunk_width = 256; - dceip->number_of_graphics_pipes = 5; - dceip->number_of_underlay_pipes = 0; - dceip->low_power_tiling_mode = 0; - dceip->display_write_back_supported = true; - dceip->argb_compression_support = true; - dceip->underlay_vscaler_efficiency6_bit_per_component = - bw_frc_to_fixed(35556, 10000); - dceip->underlay_vscaler_efficiency8_bit_per_component = - bw_frc_to_fixed(34286, 10000); - dceip->underlay_vscaler_efficiency10_bit_per_component = - bw_frc_to_fixed(32, 10); - dceip->underlay_vscaler_efficiency12_bit_per_component = - bw_int_to_fixed(3); - dceip->graphics_vscaler_efficiency6_bit_per_component = - bw_frc_to_fixed(35, 10); - dceip->graphics_vscaler_efficiency8_bit_per_component = - bw_frc_to_fixed(34286, 10000); - dceip->graphics_vscaler_efficiency10_bit_per_component = - bw_frc_to_fixed(32, 10); - dceip->graphics_vscaler_efficiency12_bit_per_component = - bw_int_to_fixed(3); - dceip->alpha_vscaler_efficiency = bw_int_to_fixed(3); - dceip->max_dmif_buffer_allocated = 4; - dceip->graphics_dmif_size = 12288; - dceip->underlay_luma_dmif_size = 19456; - dceip->underlay_chroma_dmif_size = 23552; - dceip->pre_downscaler_enabled = true; - dceip->underlay_downscale_prefetch_enabled = true; - dceip->lb_write_pixels_per_dispclk = bw_int_to_fixed(1); - dceip->lb_size_per_component444 = bw_int_to_fixed(245952); - dceip->graphics_lb_nodownscaling_multi_line_prefetching = true; - dceip->stutter_and_dram_clock_state_change_gated_before_cursor = - bw_int_to_fixed(1); - dceip->underlay420_luma_lb_size_per_component = bw_int_to_fixed( - 82176); - dceip->underlay420_chroma_lb_size_per_component = - bw_int_to_fixed(164352); - dceip->underlay422_lb_size_per_component = bw_int_to_fixed( - 82176); - dceip->cursor_chunk_width = bw_int_to_fixed(64); - dceip->cursor_dcp_buffer_lines = bw_int_to_fixed(4); - dceip->underlay_maximum_width_efficient_for_tiling = - bw_int_to_fixed(1920); - dceip->underlay_maximum_height_efficient_for_tiling = - bw_int_to_fixed(1080); - dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display = - bw_frc_to_fixed(3, 10); - dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation = - bw_int_to_fixed(25); - dceip->minimum_outstanding_pte_request_limit = bw_int_to_fixed( - 2); - dceip->maximum_total_outstanding_pte_requests_allowed_by_saw = - bw_int_to_fixed(128); - dceip->limit_excessive_outstanding_dmif_requests = true; - dceip->linear_mode_line_request_alternation_slice = - bw_int_to_fixed(64); - dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode = - 32; - dceip->display_write_back420_luma_mcifwr_buffer_size = 12288; - dceip->display_write_back420_chroma_mcifwr_buffer_size = 8192; - dceip->request_efficiency = bw_frc_to_fixed(8, 10); - dceip->dispclk_per_request = bw_int_to_fixed(2); - dceip->dispclk_ramping_factor = bw_frc_to_fixed(105, 100); - dceip->display_pipe_throughput_factor = bw_frc_to_fixed(105, 100); - dceip->scatter_gather_pte_request_rows_in_tiling_mode = 2; - dceip->mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0); - break; - case BW_CALCS_VERSION_STONEY: - vbios->memory_type = bw_def_gddr5; - vbios->dram_channel_width_in_bits = 64; - vbios->number_of_dram_channels = asic_id.vram_width / vbios->dram_channel_width_in_bits; - vbios->number_of_dram_banks = 8; - vbios->high_yclk = bw_int_to_fixed(1866); - vbios->mid_yclk = bw_int_to_fixed(1866); - vbios->low_yclk = bw_int_to_fixed(1333); - vbios->low_sclk = bw_int_to_fixed(200); - vbios->mid1_sclk = bw_int_to_fixed(600); - vbios->mid2_sclk = bw_int_to_fixed(600); - vbios->mid3_sclk = bw_int_to_fixed(600); - vbios->mid4_sclk = bw_int_to_fixed(600); - vbios->mid5_sclk = bw_int_to_fixed(600); - vbios->mid6_sclk = bw_int_to_fixed(600); - vbios->high_sclk = bw_int_to_fixed(800); - vbios->low_voltage_max_dispclk = bw_int_to_fixed(352); - vbios->mid_voltage_max_dispclk = bw_int_to_fixed(467); - vbios->high_voltage_max_dispclk = bw_int_to_fixed(643); - vbios->low_voltage_max_phyclk = bw_int_to_fixed(540); - vbios->mid_voltage_max_phyclk = bw_int_to_fixed(810); - vbios->high_voltage_max_phyclk = bw_int_to_fixed(810); - vbios->data_return_bus_width = bw_int_to_fixed(32); - vbios->trc = bw_int_to_fixed(50); - vbios->dmifmc_urgent_latency = bw_int_to_fixed(4); - vbios->stutter_self_refresh_exit_latency = bw_frc_to_fixed(158, 10); - vbios->stutter_self_refresh_entry_latency = bw_int_to_fixed(0); - vbios->nbp_state_change_latency = bw_frc_to_fixed(2008, 100); - vbios->mcifwrmc_urgent_latency = bw_int_to_fixed(10); - vbios->scatter_gather_enable = true; - vbios->down_spread_percentage = bw_frc_to_fixed(5, 10); - vbios->cursor_width = 32; - vbios->average_compression_rate = 4; - vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256; - vbios->blackout_duration = bw_int_to_fixed(0); /* us */ - vbios->maximum_blackout_recovery_time = bw_int_to_fixed(0); - - dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100; - dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100; - dceip->percent_of_ideal_port_bw_received_after_urgent_latency = 100; - dceip->large_cursor = false; - dceip->dmif_request_buffer_size = bw_int_to_fixed(768); - dceip->dmif_pipe_en_fbc_chunk_tracker = false; - dceip->cursor_max_outstanding_group_num = 1; - dceip->lines_interleaved_into_lb = 2; - dceip->chunk_width = 256; - dceip->number_of_graphics_pipes = 2; - dceip->number_of_underlay_pipes = 1; - dceip->low_power_tiling_mode = 0; - dceip->display_write_back_supported = false; - dceip->argb_compression_support = true; - dceip->underlay_vscaler_efficiency6_bit_per_component = - bw_frc_to_fixed(35556, 10000); - dceip->underlay_vscaler_efficiency8_bit_per_component = - bw_frc_to_fixed(34286, 10000); - dceip->underlay_vscaler_efficiency10_bit_per_component = - bw_frc_to_fixed(32, 10); - dceip->underlay_vscaler_efficiency12_bit_per_component = - bw_int_to_fixed(3); - dceip->graphics_vscaler_efficiency6_bit_per_component = - bw_frc_to_fixed(35, 10); - dceip->graphics_vscaler_efficiency8_bit_per_component = - bw_frc_to_fixed(34286, 10000); - dceip->graphics_vscaler_efficiency10_bit_per_component = - bw_frc_to_fixed(32, 10); - dceip->graphics_vscaler_efficiency12_bit_per_component = - bw_int_to_fixed(3); - dceip->alpha_vscaler_efficiency = bw_int_to_fixed(3); - dceip->max_dmif_buffer_allocated = 2; - dceip->graphics_dmif_size = 12288; - dceip->underlay_luma_dmif_size = 19456; - dceip->underlay_chroma_dmif_size = 23552; - dceip->pre_downscaler_enabled = true; - dceip->underlay_downscale_prefetch_enabled = true; - dceip->lb_write_pixels_per_dispclk = bw_int_to_fixed(1); - dceip->lb_size_per_component444 = bw_int_to_fixed(82176); - dceip->graphics_lb_nodownscaling_multi_line_prefetching = false; - dceip->stutter_and_dram_clock_state_change_gated_before_cursor = - bw_int_to_fixed(0); - dceip->underlay420_luma_lb_size_per_component = bw_int_to_fixed( - 82176); - dceip->underlay420_chroma_lb_size_per_component = - bw_int_to_fixed(164352); - dceip->underlay422_lb_size_per_component = bw_int_to_fixed( - 82176); - dceip->cursor_chunk_width = bw_int_to_fixed(64); - dceip->cursor_dcp_buffer_lines = bw_int_to_fixed(4); - dceip->underlay_maximum_width_efficient_for_tiling = - bw_int_to_fixed(1920); - dceip->underlay_maximum_height_efficient_for_tiling = - bw_int_to_fixed(1080); - dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display = - bw_frc_to_fixed(3, 10); - dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation = - bw_int_to_fixed(25); - dceip->minimum_outstanding_pte_request_limit = bw_int_to_fixed( - 2); - dceip->maximum_total_outstanding_pte_requests_allowed_by_saw = - bw_int_to_fixed(128); - dceip->limit_excessive_outstanding_dmif_requests = true; - dceip->linear_mode_line_request_alternation_slice = - bw_int_to_fixed(64); - dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode = - 32; - dceip->display_write_back420_luma_mcifwr_buffer_size = 12288; - dceip->display_write_back420_chroma_mcifwr_buffer_size = 8192; - dceip->request_efficiency = bw_frc_to_fixed(8, 10); - dceip->dispclk_per_request = bw_int_to_fixed(2); - dceip->dispclk_ramping_factor = bw_frc_to_fixed(105, 100); - dceip->display_pipe_throughput_factor = bw_frc_to_fixed(105, 100); - dceip->scatter_gather_pte_request_rows_in_tiling_mode = 2; - dceip->mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0); - break; - case BW_CALCS_VERSION_VEGA10: - vbios->memory_type = bw_def_hbm; - vbios->dram_channel_width_in_bits = 128; - vbios->number_of_dram_channels = asic_id.vram_width / vbios->dram_channel_width_in_bits; - vbios->number_of_dram_banks = 16; - vbios->high_yclk = bw_int_to_fixed(2400); - vbios->mid_yclk = bw_int_to_fixed(1700); - vbios->low_yclk = bw_int_to_fixed(1000); - vbios->low_sclk = bw_int_to_fixed(300); - vbios->mid1_sclk = bw_int_to_fixed(350); - vbios->mid2_sclk = bw_int_to_fixed(400); - vbios->mid3_sclk = bw_int_to_fixed(500); - vbios->mid4_sclk = bw_int_to_fixed(600); - vbios->mid5_sclk = bw_int_to_fixed(700); - vbios->mid6_sclk = bw_int_to_fixed(760); - vbios->high_sclk = bw_int_to_fixed(776); - vbios->low_voltage_max_dispclk = bw_int_to_fixed(460); - vbios->mid_voltage_max_dispclk = bw_int_to_fixed(670); - vbios->high_voltage_max_dispclk = bw_int_to_fixed(1133); - vbios->low_voltage_max_phyclk = bw_int_to_fixed(540); - vbios->mid_voltage_max_phyclk = bw_int_to_fixed(810); - vbios->high_voltage_max_phyclk = bw_int_to_fixed(810); - vbios->data_return_bus_width = bw_int_to_fixed(32); - vbios->trc = bw_int_to_fixed(48); - vbios->dmifmc_urgent_latency = bw_int_to_fixed(3); - vbios->stutter_self_refresh_exit_latency = bw_frc_to_fixed(75, 10); - vbios->stutter_self_refresh_entry_latency = bw_frc_to_fixed(19, 10); - vbios->nbp_state_change_latency = bw_int_to_fixed(39); - vbios->mcifwrmc_urgent_latency = bw_int_to_fixed(10); - vbios->scatter_gather_enable = false; - vbios->down_spread_percentage = bw_frc_to_fixed(5, 10); - vbios->cursor_width = 32; - vbios->average_compression_rate = 4; - vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel = 8; - vbios->blackout_duration = bw_int_to_fixed(0); /* us */ - vbios->maximum_blackout_recovery_time = bw_int_to_fixed(0); - - dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100; - dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100; - dceip->percent_of_ideal_port_bw_received_after_urgent_latency = 100; - dceip->large_cursor = false; - dceip->dmif_request_buffer_size = bw_int_to_fixed(2304); - dceip->dmif_pipe_en_fbc_chunk_tracker = true; - dceip->cursor_max_outstanding_group_num = 1; - dceip->lines_interleaved_into_lb = 2; - dceip->chunk_width = 256; - dceip->number_of_graphics_pipes = 6; - dceip->number_of_underlay_pipes = 0; - dceip->low_power_tiling_mode = 0; - dceip->display_write_back_supported = true; - dceip->argb_compression_support = true; - dceip->underlay_vscaler_efficiency6_bit_per_component = - bw_frc_to_fixed(35556, 10000); - dceip->underlay_vscaler_efficiency8_bit_per_component = - bw_frc_to_fixed(34286, 10000); - dceip->underlay_vscaler_efficiency10_bit_per_component = - bw_frc_to_fixed(32, 10); - dceip->underlay_vscaler_efficiency12_bit_per_component = - bw_int_to_fixed(3); - dceip->graphics_vscaler_efficiency6_bit_per_component = - bw_frc_to_fixed(35, 10); - dceip->graphics_vscaler_efficiency8_bit_per_component = - bw_frc_to_fixed(34286, 10000); - dceip->graphics_vscaler_efficiency10_bit_per_component = - bw_frc_to_fixed(32, 10); - dceip->graphics_vscaler_efficiency12_bit_per_component = - bw_int_to_fixed(3); - dceip->alpha_vscaler_efficiency = bw_int_to_fixed(3); - dceip->max_dmif_buffer_allocated = 4; - dceip->graphics_dmif_size = 24576; - dceip->underlay_luma_dmif_size = 19456; - dceip->underlay_chroma_dmif_size = 23552; - dceip->pre_downscaler_enabled = true; - dceip->underlay_downscale_prefetch_enabled = false; - dceip->lb_write_pixels_per_dispclk = bw_int_to_fixed(1); - dceip->lb_size_per_component444 = bw_int_to_fixed(245952); - dceip->graphics_lb_nodownscaling_multi_line_prefetching = true; - dceip->stutter_and_dram_clock_state_change_gated_before_cursor = - bw_int_to_fixed(1); - dceip->underlay420_luma_lb_size_per_component = bw_int_to_fixed( - 82176); - dceip->underlay420_chroma_lb_size_per_component = - bw_int_to_fixed(164352); - dceip->underlay422_lb_size_per_component = bw_int_to_fixed( - 82176); - dceip->cursor_chunk_width = bw_int_to_fixed(64); - dceip->cursor_dcp_buffer_lines = bw_int_to_fixed(4); - dceip->underlay_maximum_width_efficient_for_tiling = - bw_int_to_fixed(1920); - dceip->underlay_maximum_height_efficient_for_tiling = - bw_int_to_fixed(1080); - dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display = - bw_frc_to_fixed(3, 10); - dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation = - bw_int_to_fixed(25); - dceip->minimum_outstanding_pte_request_limit = bw_int_to_fixed( - 2); - dceip->maximum_total_outstanding_pte_requests_allowed_by_saw = - bw_int_to_fixed(128); - dceip->limit_excessive_outstanding_dmif_requests = true; - dceip->linear_mode_line_request_alternation_slice = - bw_int_to_fixed(64); - dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode = - 32; - dceip->display_write_back420_luma_mcifwr_buffer_size = 12288; - dceip->display_write_back420_chroma_mcifwr_buffer_size = 8192; - dceip->request_efficiency = bw_frc_to_fixed(8, 10); - dceip->dispclk_per_request = bw_int_to_fixed(2); - dceip->dispclk_ramping_factor = bw_frc_to_fixed(105, 100); - dceip->display_pipe_throughput_factor = bw_frc_to_fixed(105, 100); - dceip->scatter_gather_pte_request_rows_in_tiling_mode = 2; - dceip->mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0); - break; - default: - break; - } - *bw_dceip = *dceip; - *bw_vbios = *vbios; - - kfree(dceip); - kfree(vbios); -} - -/* - * Compare calculated (required) clocks against the clocks available at - * maximum voltage (max Performance Level). - */ -static bool is_display_configuration_supported( - const struct bw_calcs_vbios *vbios, - const struct dce_bw_output *calcs_output) -{ - uint32_t int_max_clk; - - int_max_clk = bw_fixed_to_int(vbios->high_voltage_max_dispclk); - int_max_clk *= 1000; /* MHz to kHz */ - if (calcs_output->dispclk_khz > int_max_clk) - return false; - - int_max_clk = bw_fixed_to_int(vbios->high_sclk); - int_max_clk *= 1000; /* MHz to kHz */ - if (calcs_output->sclk_khz > int_max_clk) - return false; - - return true; -} - -static void populate_initial_data( - const struct pipe_ctx pipe[], int pipe_count, struct bw_calcs_data *data) -{ - int i, j; - int num_displays = 0; - - data->underlay_surface_type = bw_def_420; - data->panning_and_bezel_adjustment = bw_def_none; - data->graphics_lb_bpc = 10; - data->underlay_lb_bpc = 8; - data->underlay_tiling_mode = bw_def_tiled; - data->graphics_tiling_mode = bw_def_tiled; - data->underlay_micro_tile_mode = bw_def_display_micro_tiling; - data->graphics_micro_tile_mode = bw_def_display_micro_tiling; - data->increase_voltage_to_support_mclk_switch = true; - - /* Pipes with underlay first */ - for (i = 0; i < pipe_count; i++) { - if (!pipe[i].stream || !pipe[i].bottom_pipe) - continue; - - ASSERT(pipe[i].plane_state); - - if (num_displays == 0) { - if (!pipe[i].plane_state->visible) - data->d0_underlay_mode = bw_def_underlay_only; - else - data->d0_underlay_mode = bw_def_blend; - } else { - if (!pipe[i].plane_state->visible) - data->d1_underlay_mode = bw_def_underlay_only; - else - data->d1_underlay_mode = bw_def_blend; - } - - data->fbc_en[num_displays + 4] = false; - data->lpt_en[num_displays + 4] = false; - data->h_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.h_total); - data->v_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.v_total); - data->pixel_rate[num_displays + 4] = bw_frc_to_fixed(pipe[i].stream->timing.pix_clk_100hz, 10000); - data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.viewport.width); - data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4]; - data->src_height[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.viewport.height); - data->h_taps[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.taps.h_taps); - data->v_taps[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.taps.v_taps); - data->h_scale_ratio[num_displays + 4] = fixed31_32_to_bw_fixed(pipe[i].plane_res.scl_data.ratios.horz.value); - data->v_scale_ratio[num_displays + 4] = fixed31_32_to_bw_fixed(pipe[i].plane_res.scl_data.ratios.vert.value); - switch (pipe[i].plane_state->rotation) { - case ROTATION_ANGLE_0: - data->rotation_angle[num_displays + 4] = bw_int_to_fixed(0); - break; - case ROTATION_ANGLE_90: - data->rotation_angle[num_displays + 4] = bw_int_to_fixed(90); - break; - case ROTATION_ANGLE_180: - data->rotation_angle[num_displays + 4] = bw_int_to_fixed(180); - break; - case ROTATION_ANGLE_270: - data->rotation_angle[num_displays + 4] = bw_int_to_fixed(270); - break; - default: - break; - } - switch (pipe[i].plane_state->format) { - case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: - case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: - case SURFACE_PIXEL_FORMAT_GRPH_RGB565: - data->bytes_per_pixel[num_displays + 4] = 2; - break; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: - case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS: - case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: - case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: - data->bytes_per_pixel[num_displays + 4] = 4; - break; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: - data->bytes_per_pixel[num_displays + 4] = 8; - break; - default: - data->bytes_per_pixel[num_displays + 4] = 4; - break; - } - data->interlace_mode[num_displays + 4] = false; - data->stereo_mode[num_displays + 4] = bw_def_mono; - - - for (j = 0; j < 2; j++) { - data->fbc_en[num_displays * 2 + j] = false; - data->lpt_en[num_displays * 2 + j] = false; - - data->src_height[num_displays * 2 + j] = bw_int_to_fixed(pipe[i].bottom_pipe->plane_res.scl_data.viewport.height); - data->src_width[num_displays * 2 + j] = bw_int_to_fixed(pipe[i].bottom_pipe->plane_res.scl_data.viewport.width); - data->pitch_in_pixels[num_displays * 2 + j] = bw_int_to_fixed( - pipe[i].bottom_pipe->plane_state->plane_size.surface_pitch); - data->h_taps[num_displays * 2 + j] = bw_int_to_fixed(pipe[i].bottom_pipe->plane_res.scl_data.taps.h_taps); - data->v_taps[num_displays * 2 + j] = bw_int_to_fixed(pipe[i].bottom_pipe->plane_res.scl_data.taps.v_taps); - data->h_scale_ratio[num_displays * 2 + j] = fixed31_32_to_bw_fixed( - pipe[i].bottom_pipe->plane_res.scl_data.ratios.horz.value); - data->v_scale_ratio[num_displays * 2 + j] = fixed31_32_to_bw_fixed( - pipe[i].bottom_pipe->plane_res.scl_data.ratios.vert.value); - switch (pipe[i].bottom_pipe->plane_state->rotation) { - case ROTATION_ANGLE_0: - data->rotation_angle[num_displays * 2 + j] = bw_int_to_fixed(0); - break; - case ROTATION_ANGLE_90: - data->rotation_angle[num_displays * 2 + j] = bw_int_to_fixed(90); - break; - case ROTATION_ANGLE_180: - data->rotation_angle[num_displays * 2 + j] = bw_int_to_fixed(180); - break; - case ROTATION_ANGLE_270: - data->rotation_angle[num_displays * 2 + j] = bw_int_to_fixed(270); - break; - default: - break; - } - data->stereo_mode[num_displays * 2 + j] = bw_def_mono; - } - - num_displays++; - } - - /* Pipes without underlay after */ - for (i = 0; i < pipe_count; i++) { - unsigned int pixel_clock_100hz; - if (!pipe[i].stream || pipe[i].bottom_pipe) - continue; - - - data->fbc_en[num_displays + 4] = false; - data->lpt_en[num_displays + 4] = false; - data->h_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.h_total); - data->v_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.v_total); - pixel_clock_100hz = pipe[i].stream->timing.pix_clk_100hz; - if (pipe[i].stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) - pixel_clock_100hz *= 2; - data->pixel_rate[num_displays + 4] = bw_frc_to_fixed(pixel_clock_100hz, 10000); - if (pipe[i].plane_state) { - data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.viewport.width); - data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4]; - data->src_height[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.viewport.height); - data->h_taps[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.taps.h_taps); - data->v_taps[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.taps.v_taps); - data->h_scale_ratio[num_displays + 4] = fixed31_32_to_bw_fixed(pipe[i].plane_res.scl_data.ratios.horz.value); - data->v_scale_ratio[num_displays + 4] = fixed31_32_to_bw_fixed(pipe[i].plane_res.scl_data.ratios.vert.value); - switch (pipe[i].plane_state->rotation) { - case ROTATION_ANGLE_0: - data->rotation_angle[num_displays + 4] = bw_int_to_fixed(0); - break; - case ROTATION_ANGLE_90: - data->rotation_angle[num_displays + 4] = bw_int_to_fixed(90); - break; - case ROTATION_ANGLE_180: - data->rotation_angle[num_displays + 4] = bw_int_to_fixed(180); - break; - case ROTATION_ANGLE_270: - data->rotation_angle[num_displays + 4] = bw_int_to_fixed(270); - break; - default: - break; - } - switch (pipe[i].plane_state->format) { - case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: - case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: - case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: - case SURFACE_PIXEL_FORMAT_GRPH_RGB565: - data->bytes_per_pixel[num_displays + 4] = 2; - break; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: - case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS: - case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: - case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: - data->bytes_per_pixel[num_displays + 4] = 4; - break; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: - data->bytes_per_pixel[num_displays + 4] = 8; - break; - default: - data->bytes_per_pixel[num_displays + 4] = 4; - break; - } - } else if (pipe[i].stream->dst.width != 0 && - pipe[i].stream->dst.height != 0 && - pipe[i].stream->src.width != 0 && - pipe[i].stream->src.height != 0) { - data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->src.width); - data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4]; - data->src_height[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->src.height); - data->h_taps[num_displays + 4] = pipe[i].stream->src.width == pipe[i].stream->dst.width ? bw_int_to_fixed(1) : bw_int_to_fixed(2); - data->v_taps[num_displays + 4] = pipe[i].stream->src.height == pipe[i].stream->dst.height ? bw_int_to_fixed(1) : bw_int_to_fixed(2); - data->h_scale_ratio[num_displays + 4] = bw_frc_to_fixed(pipe[i].stream->src.width, pipe[i].stream->dst.width); - data->v_scale_ratio[num_displays + 4] = bw_frc_to_fixed(pipe[i].stream->src.height, pipe[i].stream->dst.height); - data->rotation_angle[num_displays + 4] = bw_int_to_fixed(0); - data->bytes_per_pixel[num_displays + 4] = 4; - } else { - data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.h_addressable); - data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4]; - data->src_height[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.v_addressable); - data->h_taps[num_displays + 4] = bw_int_to_fixed(1); - data->v_taps[num_displays + 4] = bw_int_to_fixed(1); - data->h_scale_ratio[num_displays + 4] = bw_int_to_fixed(1); - data->v_scale_ratio[num_displays + 4] = bw_int_to_fixed(1); - data->rotation_angle[num_displays + 4] = bw_int_to_fixed(0); - data->bytes_per_pixel[num_displays + 4] = 4; - } - - data->interlace_mode[num_displays + 4] = false; - data->stereo_mode[num_displays + 4] = bw_def_mono; - num_displays++; - } - - data->number_of_displays = num_displays; -} - -static bool all_displays_in_sync(const struct pipe_ctx pipe[], - int pipe_count) -{ - const struct pipe_ctx *active_pipes[MAX_PIPES]; - int i, num_active_pipes = 0; - - for (i = 0; i < pipe_count; i++) { - if (!pipe[i].stream || pipe[i].top_pipe) - continue; - - active_pipes[num_active_pipes++] = &pipe[i]; - } - - if (!num_active_pipes) - return false; - - for (i = 1; i < num_active_pipes; ++i) { - if (!resource_are_streams_timing_synchronizable( - active_pipes[0]->stream, active_pipes[i]->stream)) { - return false; - } - } - - return true; -} - -/* - * Return: - * true - Display(s) configuration supported. - * In this case 'calcs_output' contains data for HW programming - * false - Display(s) configuration not supported (not enough bandwidth). - */ -bool bw_calcs(struct dc_context *ctx, - const struct bw_calcs_dceip *dceip, - const struct bw_calcs_vbios *vbios, - const struct pipe_ctx pipe[], - int pipe_count, - struct dce_bw_output *calcs_output) -{ - struct bw_calcs_data *data = kzalloc(sizeof(struct bw_calcs_data), - GFP_KERNEL); - if (!data) - return false; - - populate_initial_data(pipe, pipe_count, data); - - if (ctx->dc->config.multi_mon_pp_mclk_switch) - calcs_output->all_displays_in_sync = all_displays_in_sync(pipe, pipe_count); - else - calcs_output->all_displays_in_sync = false; - - if (data->number_of_displays != 0) { - uint8_t yclk_lvl; - struct bw_fixed high_sclk = vbios->high_sclk; - struct bw_fixed mid1_sclk = vbios->mid1_sclk; - struct bw_fixed mid2_sclk = vbios->mid2_sclk; - struct bw_fixed mid3_sclk = vbios->mid3_sclk; - struct bw_fixed mid4_sclk = vbios->mid4_sclk; - struct bw_fixed mid5_sclk = vbios->mid5_sclk; - struct bw_fixed mid6_sclk = vbios->mid6_sclk; - struct bw_fixed low_sclk = vbios->low_sclk; - struct bw_fixed high_yclk = vbios->high_yclk; - struct bw_fixed mid_yclk = vbios->mid_yclk; - struct bw_fixed low_yclk = vbios->low_yclk; - - if (ctx->dc->debug.bandwidth_calcs_trace) { - print_bw_calcs_dceip(ctx, dceip); - print_bw_calcs_vbios(ctx, vbios); - print_bw_calcs_data(ctx, data); - } - calculate_bandwidth(dceip, vbios, data); - - yclk_lvl = data->y_clk_level; - - calcs_output->nbp_state_change_enable = - data->nbp_state_change_enable; - calcs_output->cpuc_state_change_enable = - data->cpuc_state_change_enable; - calcs_output->cpup_state_change_enable = - data->cpup_state_change_enable; - calcs_output->stutter_mode_enable = - data->stutter_mode_enable; - calcs_output->dispclk_khz = - bw_fixed_to_int(bw_mul(data->dispclk, - bw_int_to_fixed(1000))); - calcs_output->blackout_recovery_time_us = - bw_fixed_to_int(data->blackout_recovery_time); - calcs_output->sclk_khz = - bw_fixed_to_int(bw_mul(data->required_sclk, - bw_int_to_fixed(1000))); - calcs_output->sclk_deep_sleep_khz = - bw_fixed_to_int(bw_mul(data->sclk_deep_sleep, - bw_int_to_fixed(1000))); - if (yclk_lvl == 0) - calcs_output->yclk_khz = bw_fixed_to_int( - bw_mul(low_yclk, bw_int_to_fixed(1000))); - else if (yclk_lvl == 1) - calcs_output->yclk_khz = bw_fixed_to_int( - bw_mul(mid_yclk, bw_int_to_fixed(1000))); - else - calcs_output->yclk_khz = bw_fixed_to_int( - bw_mul(high_yclk, bw_int_to_fixed(1000))); - - /* units: nanosecond, 16bit storage. */ - - calcs_output->nbp_state_change_wm_ns[0].a_mark = - bw_fixed_to_int(bw_mul(data-> - nbp_state_change_watermark[4], bw_int_to_fixed(1000))); - calcs_output->nbp_state_change_wm_ns[1].a_mark = - bw_fixed_to_int(bw_mul(data-> - nbp_state_change_watermark[5], bw_int_to_fixed(1000))); - calcs_output->nbp_state_change_wm_ns[2].a_mark = - bw_fixed_to_int(bw_mul(data-> - nbp_state_change_watermark[6], bw_int_to_fixed(1000))); - - if (ctx->dc->caps.max_slave_planes) { - calcs_output->nbp_state_change_wm_ns[3].a_mark = - bw_fixed_to_int(bw_mul(data-> - nbp_state_change_watermark[0], bw_int_to_fixed(1000))); - calcs_output->nbp_state_change_wm_ns[4].a_mark = - bw_fixed_to_int(bw_mul(data-> - nbp_state_change_watermark[1], bw_int_to_fixed(1000))); - } else { - calcs_output->nbp_state_change_wm_ns[3].a_mark = - bw_fixed_to_int(bw_mul(data-> - nbp_state_change_watermark[7], bw_int_to_fixed(1000))); - calcs_output->nbp_state_change_wm_ns[4].a_mark = - bw_fixed_to_int(bw_mul(data-> - nbp_state_change_watermark[8], bw_int_to_fixed(1000))); - } - calcs_output->nbp_state_change_wm_ns[5].a_mark = - bw_fixed_to_int(bw_mul(data-> - nbp_state_change_watermark[9], bw_int_to_fixed(1000))); - - - - calcs_output->stutter_exit_wm_ns[0].a_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_exit_watermark[4], bw_int_to_fixed(1000))); - calcs_output->stutter_exit_wm_ns[1].a_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_exit_watermark[5], bw_int_to_fixed(1000))); - calcs_output->stutter_exit_wm_ns[2].a_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_exit_watermark[6], bw_int_to_fixed(1000))); - if (ctx->dc->caps.max_slave_planes) { - calcs_output->stutter_exit_wm_ns[3].a_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_exit_watermark[0], bw_int_to_fixed(1000))); - calcs_output->stutter_exit_wm_ns[4].a_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_exit_watermark[1], bw_int_to_fixed(1000))); - } else { - calcs_output->stutter_exit_wm_ns[3].a_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_exit_watermark[7], bw_int_to_fixed(1000))); - calcs_output->stutter_exit_wm_ns[4].a_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_exit_watermark[8], bw_int_to_fixed(1000))); - } - calcs_output->stutter_exit_wm_ns[5].a_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_exit_watermark[9], bw_int_to_fixed(1000))); - - calcs_output->stutter_entry_wm_ns[0].a_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[4], bw_int_to_fixed(1000))); - calcs_output->stutter_entry_wm_ns[1].a_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[5], bw_int_to_fixed(1000))); - calcs_output->stutter_entry_wm_ns[2].a_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[6], bw_int_to_fixed(1000))); - if (ctx->dc->caps.max_slave_planes) { - calcs_output->stutter_entry_wm_ns[3].a_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[0], bw_int_to_fixed(1000))); - calcs_output->stutter_entry_wm_ns[4].a_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[1], bw_int_to_fixed(1000))); - } else { - calcs_output->stutter_entry_wm_ns[3].a_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[7], bw_int_to_fixed(1000))); - calcs_output->stutter_entry_wm_ns[4].a_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[8], bw_int_to_fixed(1000))); - } - calcs_output->stutter_entry_wm_ns[5].a_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[9], bw_int_to_fixed(1000))); - - calcs_output->urgent_wm_ns[0].a_mark = - bw_fixed_to_int(bw_mul(data-> - urgent_watermark[4], bw_int_to_fixed(1000))); - calcs_output->urgent_wm_ns[1].a_mark = - bw_fixed_to_int(bw_mul(data-> - urgent_watermark[5], bw_int_to_fixed(1000))); - calcs_output->urgent_wm_ns[2].a_mark = - bw_fixed_to_int(bw_mul(data-> - urgent_watermark[6], bw_int_to_fixed(1000))); - if (ctx->dc->caps.max_slave_planes) { - calcs_output->urgent_wm_ns[3].a_mark = - bw_fixed_to_int(bw_mul(data-> - urgent_watermark[0], bw_int_to_fixed(1000))); - calcs_output->urgent_wm_ns[4].a_mark = - bw_fixed_to_int(bw_mul(data-> - urgent_watermark[1], bw_int_to_fixed(1000))); - } else { - calcs_output->urgent_wm_ns[3].a_mark = - bw_fixed_to_int(bw_mul(data-> - urgent_watermark[7], bw_int_to_fixed(1000))); - calcs_output->urgent_wm_ns[4].a_mark = - bw_fixed_to_int(bw_mul(data-> - urgent_watermark[8], bw_int_to_fixed(1000))); - } - calcs_output->urgent_wm_ns[5].a_mark = - bw_fixed_to_int(bw_mul(data-> - urgent_watermark[9], bw_int_to_fixed(1000))); - - if (dceip->version != BW_CALCS_VERSION_CARRIZO) { - ((struct bw_calcs_vbios *)vbios)->low_sclk = mid3_sclk; - ((struct bw_calcs_vbios *)vbios)->mid1_sclk = mid3_sclk; - ((struct bw_calcs_vbios *)vbios)->mid2_sclk = mid3_sclk; - calculate_bandwidth(dceip, vbios, data); - - calcs_output->nbp_state_change_wm_ns[0].b_mark = - bw_fixed_to_int(bw_mul(data-> - nbp_state_change_watermark[4],bw_int_to_fixed(1000))); - calcs_output->nbp_state_change_wm_ns[1].b_mark = - bw_fixed_to_int(bw_mul(data-> - nbp_state_change_watermark[5], bw_int_to_fixed(1000))); - calcs_output->nbp_state_change_wm_ns[2].b_mark = - bw_fixed_to_int(bw_mul(data-> - nbp_state_change_watermark[6], bw_int_to_fixed(1000))); - - if (ctx->dc->caps.max_slave_planes) { - calcs_output->nbp_state_change_wm_ns[3].b_mark = - bw_fixed_to_int(bw_mul(data-> - nbp_state_change_watermark[0], bw_int_to_fixed(1000))); - calcs_output->nbp_state_change_wm_ns[4].b_mark = - bw_fixed_to_int(bw_mul(data-> - nbp_state_change_watermark[1], bw_int_to_fixed(1000))); - } else { - calcs_output->nbp_state_change_wm_ns[3].b_mark = - bw_fixed_to_int(bw_mul(data-> - nbp_state_change_watermark[7], bw_int_to_fixed(1000))); - calcs_output->nbp_state_change_wm_ns[4].b_mark = - bw_fixed_to_int(bw_mul(data-> - nbp_state_change_watermark[8], bw_int_to_fixed(1000))); - } - calcs_output->nbp_state_change_wm_ns[5].b_mark = - bw_fixed_to_int(bw_mul(data-> - nbp_state_change_watermark[9], bw_int_to_fixed(1000))); - - - - calcs_output->stutter_exit_wm_ns[0].b_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_exit_watermark[4], bw_int_to_fixed(1000))); - calcs_output->stutter_exit_wm_ns[1].b_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_exit_watermark[5], bw_int_to_fixed(1000))); - calcs_output->stutter_exit_wm_ns[2].b_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_exit_watermark[6], bw_int_to_fixed(1000))); - if (ctx->dc->caps.max_slave_planes) { - calcs_output->stutter_exit_wm_ns[3].b_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_exit_watermark[0], bw_int_to_fixed(1000))); - calcs_output->stutter_exit_wm_ns[4].b_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_exit_watermark[1], bw_int_to_fixed(1000))); - } else { - calcs_output->stutter_exit_wm_ns[3].b_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_exit_watermark[7], bw_int_to_fixed(1000))); - calcs_output->stutter_exit_wm_ns[4].b_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_exit_watermark[8], bw_int_to_fixed(1000))); - } - calcs_output->stutter_exit_wm_ns[5].b_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_exit_watermark[9], bw_int_to_fixed(1000))); - - calcs_output->stutter_entry_wm_ns[0].b_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[4], bw_int_to_fixed(1000))); - calcs_output->stutter_entry_wm_ns[1].b_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[5], bw_int_to_fixed(1000))); - calcs_output->stutter_entry_wm_ns[2].b_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[6], bw_int_to_fixed(1000))); - if (ctx->dc->caps.max_slave_planes) { - calcs_output->stutter_entry_wm_ns[3].b_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[0], bw_int_to_fixed(1000))); - calcs_output->stutter_entry_wm_ns[4].b_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[1], bw_int_to_fixed(1000))); - } else { - calcs_output->stutter_entry_wm_ns[3].b_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[7], bw_int_to_fixed(1000))); - calcs_output->stutter_entry_wm_ns[4].b_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[8], bw_int_to_fixed(1000))); - } - calcs_output->stutter_entry_wm_ns[5].b_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[9], bw_int_to_fixed(1000))); - - calcs_output->urgent_wm_ns[0].b_mark = - bw_fixed_to_int(bw_mul(data-> - urgent_watermark[4], bw_int_to_fixed(1000))); - calcs_output->urgent_wm_ns[1].b_mark = - bw_fixed_to_int(bw_mul(data-> - urgent_watermark[5], bw_int_to_fixed(1000))); - calcs_output->urgent_wm_ns[2].b_mark = - bw_fixed_to_int(bw_mul(data-> - urgent_watermark[6], bw_int_to_fixed(1000))); - if (ctx->dc->caps.max_slave_planes) { - calcs_output->urgent_wm_ns[3].b_mark = - bw_fixed_to_int(bw_mul(data-> - urgent_watermark[0], bw_int_to_fixed(1000))); - calcs_output->urgent_wm_ns[4].b_mark = - bw_fixed_to_int(bw_mul(data-> - urgent_watermark[1], bw_int_to_fixed(1000))); - } else { - calcs_output->urgent_wm_ns[3].b_mark = - bw_fixed_to_int(bw_mul(data-> - urgent_watermark[7], bw_int_to_fixed(1000))); - calcs_output->urgent_wm_ns[4].b_mark = - bw_fixed_to_int(bw_mul(data-> - urgent_watermark[8], bw_int_to_fixed(1000))); - } - calcs_output->urgent_wm_ns[5].b_mark = - bw_fixed_to_int(bw_mul(data-> - urgent_watermark[9], bw_int_to_fixed(1000))); - - ((struct bw_calcs_vbios *)vbios)->low_sclk = low_sclk; - ((struct bw_calcs_vbios *)vbios)->mid1_sclk = mid1_sclk; - ((struct bw_calcs_vbios *)vbios)->mid2_sclk = mid2_sclk; - ((struct bw_calcs_vbios *)vbios)->low_yclk = mid_yclk; - calculate_bandwidth(dceip, vbios, data); - - calcs_output->nbp_state_change_wm_ns[0].c_mark = - bw_fixed_to_int(bw_mul(data-> - nbp_state_change_watermark[4], bw_int_to_fixed(1000))); - calcs_output->nbp_state_change_wm_ns[1].c_mark = - bw_fixed_to_int(bw_mul(data-> - nbp_state_change_watermark[5], bw_int_to_fixed(1000))); - calcs_output->nbp_state_change_wm_ns[2].c_mark = - bw_fixed_to_int(bw_mul(data-> - nbp_state_change_watermark[6], bw_int_to_fixed(1000))); - if (ctx->dc->caps.max_slave_planes) { - calcs_output->nbp_state_change_wm_ns[3].c_mark = - bw_fixed_to_int(bw_mul(data-> - nbp_state_change_watermark[0], bw_int_to_fixed(1000))); - calcs_output->nbp_state_change_wm_ns[4].c_mark = - bw_fixed_to_int(bw_mul(data-> - nbp_state_change_watermark[1], bw_int_to_fixed(1000))); - } else { - calcs_output->nbp_state_change_wm_ns[3].c_mark = - bw_fixed_to_int(bw_mul(data-> - nbp_state_change_watermark[7], bw_int_to_fixed(1000))); - calcs_output->nbp_state_change_wm_ns[4].c_mark = - bw_fixed_to_int(bw_mul(data-> - nbp_state_change_watermark[8], bw_int_to_fixed(1000))); - } - calcs_output->nbp_state_change_wm_ns[5].c_mark = - bw_fixed_to_int(bw_mul(data-> - nbp_state_change_watermark[9], bw_int_to_fixed(1000))); - - - calcs_output->stutter_exit_wm_ns[0].c_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_exit_watermark[4], bw_int_to_fixed(1000))); - calcs_output->stutter_exit_wm_ns[1].c_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_exit_watermark[5], bw_int_to_fixed(1000))); - calcs_output->stutter_exit_wm_ns[2].c_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_exit_watermark[6], bw_int_to_fixed(1000))); - if (ctx->dc->caps.max_slave_planes) { - calcs_output->stutter_exit_wm_ns[3].c_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_exit_watermark[0], bw_int_to_fixed(1000))); - calcs_output->stutter_exit_wm_ns[4].c_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_exit_watermark[1], bw_int_to_fixed(1000))); - } else { - calcs_output->stutter_exit_wm_ns[3].c_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_exit_watermark[7], bw_int_to_fixed(1000))); - calcs_output->stutter_exit_wm_ns[4].c_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_exit_watermark[8], bw_int_to_fixed(1000))); - } - calcs_output->stutter_exit_wm_ns[5].c_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_exit_watermark[9], bw_int_to_fixed(1000))); - - calcs_output->stutter_entry_wm_ns[0].c_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[4], bw_int_to_fixed(1000))); - calcs_output->stutter_entry_wm_ns[1].c_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[5], bw_int_to_fixed(1000))); - calcs_output->stutter_entry_wm_ns[2].c_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[6], bw_int_to_fixed(1000))); - if (ctx->dc->caps.max_slave_planes) { - calcs_output->stutter_entry_wm_ns[3].c_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[0], bw_int_to_fixed(1000))); - calcs_output->stutter_entry_wm_ns[4].c_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[1], bw_int_to_fixed(1000))); - } else { - calcs_output->stutter_entry_wm_ns[3].c_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[7], bw_int_to_fixed(1000))); - calcs_output->stutter_entry_wm_ns[4].c_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[8], bw_int_to_fixed(1000))); - } - calcs_output->stutter_entry_wm_ns[5].c_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[9], bw_int_to_fixed(1000))); - - calcs_output->urgent_wm_ns[0].c_mark = - bw_fixed_to_int(bw_mul(data-> - urgent_watermark[4], bw_int_to_fixed(1000))); - calcs_output->urgent_wm_ns[1].c_mark = - bw_fixed_to_int(bw_mul(data-> - urgent_watermark[5], bw_int_to_fixed(1000))); - calcs_output->urgent_wm_ns[2].c_mark = - bw_fixed_to_int(bw_mul(data-> - urgent_watermark[6], bw_int_to_fixed(1000))); - if (ctx->dc->caps.max_slave_planes) { - calcs_output->urgent_wm_ns[3].c_mark = - bw_fixed_to_int(bw_mul(data-> - urgent_watermark[0], bw_int_to_fixed(1000))); - calcs_output->urgent_wm_ns[4].c_mark = - bw_fixed_to_int(bw_mul(data-> - urgent_watermark[1], bw_int_to_fixed(1000))); - } else { - calcs_output->urgent_wm_ns[3].c_mark = - bw_fixed_to_int(bw_mul(data-> - urgent_watermark[7], bw_int_to_fixed(1000))); - calcs_output->urgent_wm_ns[4].c_mark = - bw_fixed_to_int(bw_mul(data-> - urgent_watermark[8], bw_int_to_fixed(1000))); - } - calcs_output->urgent_wm_ns[5].c_mark = - bw_fixed_to_int(bw_mul(data-> - urgent_watermark[9], bw_int_to_fixed(1000))); - } - - if (dceip->version == BW_CALCS_VERSION_CARRIZO) { - ((struct bw_calcs_vbios *)vbios)->low_yclk = high_yclk; - ((struct bw_calcs_vbios *)vbios)->mid_yclk = high_yclk; - ((struct bw_calcs_vbios *)vbios)->low_sclk = high_sclk; - ((struct bw_calcs_vbios *)vbios)->mid1_sclk = high_sclk; - ((struct bw_calcs_vbios *)vbios)->mid2_sclk = high_sclk; - ((struct bw_calcs_vbios *)vbios)->mid3_sclk = high_sclk; - ((struct bw_calcs_vbios *)vbios)->mid4_sclk = high_sclk; - ((struct bw_calcs_vbios *)vbios)->mid5_sclk = high_sclk; - ((struct bw_calcs_vbios *)vbios)->mid6_sclk = high_sclk; - } else { - ((struct bw_calcs_vbios *)vbios)->low_yclk = mid_yclk; - ((struct bw_calcs_vbios *)vbios)->low_sclk = mid3_sclk; - ((struct bw_calcs_vbios *)vbios)->mid1_sclk = mid3_sclk; - ((struct bw_calcs_vbios *)vbios)->mid2_sclk = mid3_sclk; - } - - calculate_bandwidth(dceip, vbios, data); - - calcs_output->nbp_state_change_wm_ns[0].d_mark = - bw_fixed_to_int(bw_mul(data-> - nbp_state_change_watermark[4], bw_int_to_fixed(1000))); - calcs_output->nbp_state_change_wm_ns[1].d_mark = - bw_fixed_to_int(bw_mul(data-> - nbp_state_change_watermark[5], bw_int_to_fixed(1000))); - calcs_output->nbp_state_change_wm_ns[2].d_mark = - bw_fixed_to_int(bw_mul(data-> - nbp_state_change_watermark[6], bw_int_to_fixed(1000))); - if (ctx->dc->caps.max_slave_planes) { - calcs_output->nbp_state_change_wm_ns[3].d_mark = - bw_fixed_to_int(bw_mul(data-> - nbp_state_change_watermark[0], bw_int_to_fixed(1000))); - calcs_output->nbp_state_change_wm_ns[4].d_mark = - bw_fixed_to_int(bw_mul(data-> - nbp_state_change_watermark[1], bw_int_to_fixed(1000))); - } else { - calcs_output->nbp_state_change_wm_ns[3].d_mark = - bw_fixed_to_int(bw_mul(data-> - nbp_state_change_watermark[7], bw_int_to_fixed(1000))); - calcs_output->nbp_state_change_wm_ns[4].d_mark = - bw_fixed_to_int(bw_mul(data-> - nbp_state_change_watermark[8], bw_int_to_fixed(1000))); - } - calcs_output->nbp_state_change_wm_ns[5].d_mark = - bw_fixed_to_int(bw_mul(data-> - nbp_state_change_watermark[9], bw_int_to_fixed(1000))); - - calcs_output->stutter_exit_wm_ns[0].d_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_exit_watermark[4], bw_int_to_fixed(1000))); - calcs_output->stutter_exit_wm_ns[1].d_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_exit_watermark[5], bw_int_to_fixed(1000))); - calcs_output->stutter_exit_wm_ns[2].d_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_exit_watermark[6], bw_int_to_fixed(1000))); - if (ctx->dc->caps.max_slave_planes) { - calcs_output->stutter_exit_wm_ns[3].d_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_exit_watermark[0], bw_int_to_fixed(1000))); - calcs_output->stutter_exit_wm_ns[4].d_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_exit_watermark[1], bw_int_to_fixed(1000))); - } else { - calcs_output->stutter_exit_wm_ns[3].d_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_exit_watermark[7], bw_int_to_fixed(1000))); - calcs_output->stutter_exit_wm_ns[4].d_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_exit_watermark[8], bw_int_to_fixed(1000))); - } - calcs_output->stutter_exit_wm_ns[5].d_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_exit_watermark[9], bw_int_to_fixed(1000))); - - calcs_output->stutter_entry_wm_ns[0].d_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[4], bw_int_to_fixed(1000))); - calcs_output->stutter_entry_wm_ns[1].d_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[5], bw_int_to_fixed(1000))); - calcs_output->stutter_entry_wm_ns[2].d_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[6], bw_int_to_fixed(1000))); - if (ctx->dc->caps.max_slave_planes) { - calcs_output->stutter_entry_wm_ns[3].d_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[0], bw_int_to_fixed(1000))); - calcs_output->stutter_entry_wm_ns[4].d_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[1], bw_int_to_fixed(1000))); - } else { - calcs_output->stutter_entry_wm_ns[3].d_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[7], bw_int_to_fixed(1000))); - calcs_output->stutter_entry_wm_ns[4].d_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[8], bw_int_to_fixed(1000))); - } - calcs_output->stutter_entry_wm_ns[5].d_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[9], bw_int_to_fixed(1000))); - - calcs_output->urgent_wm_ns[0].d_mark = - bw_fixed_to_int(bw_mul(data-> - urgent_watermark[4], bw_int_to_fixed(1000))); - calcs_output->urgent_wm_ns[1].d_mark = - bw_fixed_to_int(bw_mul(data-> - urgent_watermark[5], bw_int_to_fixed(1000))); - calcs_output->urgent_wm_ns[2].d_mark = - bw_fixed_to_int(bw_mul(data-> - urgent_watermark[6], bw_int_to_fixed(1000))); - if (ctx->dc->caps.max_slave_planes) { - calcs_output->urgent_wm_ns[3].d_mark = - bw_fixed_to_int(bw_mul(data-> - urgent_watermark[0], bw_int_to_fixed(1000))); - calcs_output->urgent_wm_ns[4].d_mark = - bw_fixed_to_int(bw_mul(data-> - urgent_watermark[1], bw_int_to_fixed(1000))); - } else { - calcs_output->urgent_wm_ns[3].d_mark = - bw_fixed_to_int(bw_mul(data-> - urgent_watermark[7], bw_int_to_fixed(1000))); - calcs_output->urgent_wm_ns[4].d_mark = - bw_fixed_to_int(bw_mul(data-> - urgent_watermark[8], bw_int_to_fixed(1000))); - } - calcs_output->urgent_wm_ns[5].d_mark = - bw_fixed_to_int(bw_mul(data-> - urgent_watermark[9], bw_int_to_fixed(1000))); - - ((struct bw_calcs_vbios *)vbios)->low_yclk = low_yclk; - ((struct bw_calcs_vbios *)vbios)->mid_yclk = mid_yclk; - ((struct bw_calcs_vbios *)vbios)->low_sclk = low_sclk; - ((struct bw_calcs_vbios *)vbios)->mid1_sclk = mid1_sclk; - ((struct bw_calcs_vbios *)vbios)->mid2_sclk = mid2_sclk; - ((struct bw_calcs_vbios *)vbios)->mid3_sclk = mid3_sclk; - ((struct bw_calcs_vbios *)vbios)->mid4_sclk = mid4_sclk; - ((struct bw_calcs_vbios *)vbios)->mid5_sclk = mid5_sclk; - ((struct bw_calcs_vbios *)vbios)->mid6_sclk = mid6_sclk; - ((struct bw_calcs_vbios *)vbios)->high_sclk = high_sclk; - } else { - calcs_output->nbp_state_change_enable = true; - calcs_output->cpuc_state_change_enable = true; - calcs_output->cpup_state_change_enable = true; - calcs_output->stutter_mode_enable = true; - calcs_output->dispclk_khz = 0; - calcs_output->sclk_khz = 0; - } - - kfree(data); - - return is_display_configuration_supported(vbios, calcs_output); -} diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c deleted file mode 100644 index 41284e263325..000000000000 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.c +++ /dev/null @@ -1,1933 +0,0 @@ -/* - * Copyright 2017 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" -#include "dcn_calc_auto.h" -#include "dcn_calc_math.h" - -/* - * NOTE: - * This file is gcc-parseable HW gospel, coming straight from HW engineers. - * - * It doesn't adhere to Linux kernel style and sometimes will do things in odd - * ways. Unless there is something clearly wrong with it the code should - * remain as-is as it provides us with a guarantee from HW that it is correct. - */ - -/*REVISION#250*/ -void scaler_settings_calculation(struct dcn_bw_internal_vars *v) -{ - int k; - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - if (v->allow_different_hratio_vratio == dcn_bw_yes) { - if (v->source_scan[k] == dcn_bw_hor) { - v->h_ratio[k] = v->viewport_width[k] / v->scaler_rec_out_width[k]; - v->v_ratio[k] = v->viewport_height[k] / v->scaler_recout_height[k]; - } - else { - v->h_ratio[k] = v->viewport_height[k] / v->scaler_rec_out_width[k]; - v->v_ratio[k] = v->viewport_width[k] / v->scaler_recout_height[k]; - } - } - else { - if (v->source_scan[k] == dcn_bw_hor) { - v->h_ratio[k] =dcn_bw_max2(v->viewport_width[k] / v->scaler_rec_out_width[k], v->viewport_height[k] / v->scaler_recout_height[k]); - } - else { - v->h_ratio[k] =dcn_bw_max2(v->viewport_height[k] / v->scaler_rec_out_width[k], v->viewport_width[k] / v->scaler_recout_height[k]); - } - v->v_ratio[k] = v->h_ratio[k]; - } - if (v->interlace_output[k] == 1.0) { - v->v_ratio[k] = 2.0 * v->v_ratio[k]; - } - if (v->underscan_output[k] == 1.0) { - v->h_ratio[k] = v->h_ratio[k] * v->under_scan_factor; - v->v_ratio[k] = v->v_ratio[k] * v->under_scan_factor; - } - } - /*scaler taps calculation*/ - - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - if (v->h_ratio[k] > 1.0) { - v->acceptable_quality_hta_ps =dcn_bw_min2(v->max_hscl_taps, 2.0 *dcn_bw_ceil2(v->h_ratio[k], 1.0)); - } - else if (v->h_ratio[k] < 1.0) { - v->acceptable_quality_hta_ps = 4.0; - } - else { - v->acceptable_quality_hta_ps = 1.0; - } - if (v->ta_pscalculation == dcn_bw_override) { - v->htaps[k] = v->override_hta_ps[k]; - } - else { - v->htaps[k] = v->acceptable_quality_hta_ps; - } - if (v->v_ratio[k] > 1.0) { - v->acceptable_quality_vta_ps =dcn_bw_min2(v->max_vscl_taps, 2.0 *dcn_bw_ceil2(v->v_ratio[k], 1.0)); - } - else if (v->v_ratio[k] < 1.0) { - v->acceptable_quality_vta_ps = 4.0; - } - else { - v->acceptable_quality_vta_ps = 1.0; - } - if (v->ta_pscalculation == dcn_bw_override) { - v->vtaps[k] = v->override_vta_ps[k]; - } - else { - v->vtaps[k] = v->acceptable_quality_vta_ps; - } - if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32 || v->source_pixel_format[k] == dcn_bw_rgb_sub_16) { - v->vta_pschroma[k] = 0.0; - v->hta_pschroma[k] = 0.0; - } - else { - if (v->ta_pscalculation == dcn_bw_override) { - v->vta_pschroma[k] = v->override_vta_pschroma[k]; - v->hta_pschroma[k] = v->override_hta_pschroma[k]; - } - else { - v->vta_pschroma[k] = v->acceptable_quality_vta_ps; - v->hta_pschroma[k] = v->acceptable_quality_hta_ps; - } - } - } -} - -void mode_support_and_system_configuration(struct dcn_bw_internal_vars *v) -{ - int i; - int j; - int k; - /*mode support, voltage state and soc configuration*/ - - /*scale ratio support check*/ - - v->scale_ratio_support = dcn_bw_yes; - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - if (v->h_ratio[k] > v->max_hscl_ratio || v->v_ratio[k] > v->max_vscl_ratio || v->h_ratio[k] > v->htaps[k] || v->v_ratio[k] > v->vtaps[k] || (v->source_pixel_format[k] != dcn_bw_rgb_sub_64 && v->source_pixel_format[k] != dcn_bw_rgb_sub_32 && v->source_pixel_format[k] != dcn_bw_rgb_sub_16 && (v->h_ratio[k] / 2.0 > v->hta_pschroma[k] || v->v_ratio[k] / 2.0 > v->vta_pschroma[k]))) { - v->scale_ratio_support = dcn_bw_no; - } - } - /*source format, pixel format and scan support check*/ - - v->source_format_pixel_and_scan_support = dcn_bw_yes; - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - if ((v->source_surface_mode[k] == dcn_bw_sw_linear && v->source_scan[k] != dcn_bw_hor) || ((v->source_surface_mode[k] == dcn_bw_sw_4_kb_d || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_x || v->source_surface_mode[k] == dcn_bw_sw_var_d || v->source_surface_mode[k] == dcn_bw_sw_var_d_x) && v->source_pixel_format[k] != dcn_bw_rgb_sub_64)) { - v->source_format_pixel_and_scan_support = dcn_bw_no; - } - } - /*bandwidth support check*/ - - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - if (v->source_scan[k] == dcn_bw_hor) { - v->swath_width_ysingle_dpp[k] = v->viewport_width[k]; - } - else { - v->swath_width_ysingle_dpp[k] = v->viewport_height[k]; - } - if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) { - v->byte_per_pixel_in_dety[k] = 8.0; - v->byte_per_pixel_in_detc[k] = 0.0; - } - else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_32) { - v->byte_per_pixel_in_dety[k] = 4.0; - v->byte_per_pixel_in_detc[k] = 0.0; - } - else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_16) { - v->byte_per_pixel_in_dety[k] = 2.0; - v->byte_per_pixel_in_detc[k] = 0.0; - } - else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) { - v->byte_per_pixel_in_dety[k] = 1.0; - v->byte_per_pixel_in_detc[k] = 2.0; - } - else { - v->byte_per_pixel_in_dety[k] = 4.0f / 3.0f; - v->byte_per_pixel_in_detc[k] = 8.0f / 3.0f; - } - } - v->total_read_bandwidth_consumed_gbyte_per_second = 0.0; - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - v->read_bandwidth[k] = v->swath_width_ysingle_dpp[k] * (dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) * v->v_ratio[k] +dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 2.0 * v->v_ratio[k] / 2) / (v->htotal[k] / v->pixel_clock[k]); - if (v->dcc_enable[k] == dcn_bw_yes) { - v->read_bandwidth[k] = v->read_bandwidth[k] * (1 + 1 / 256); - } - if (v->pte_enable == dcn_bw_yes && v->source_scan[k] != dcn_bw_hor && (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d_x)) { - v->read_bandwidth[k] = v->read_bandwidth[k] * (1 + 1 / 64); - } - else if (v->pte_enable == dcn_bw_yes && v->source_scan[k] == dcn_bw_hor && (v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32) && (v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_x)) { - v->read_bandwidth[k] = v->read_bandwidth[k] * (1 + 1 / 256); - } - else if (v->pte_enable == dcn_bw_yes) { - v->read_bandwidth[k] = v->read_bandwidth[k] * (1 + 1 / 512); - } - v->total_read_bandwidth_consumed_gbyte_per_second = v->total_read_bandwidth_consumed_gbyte_per_second + v->read_bandwidth[k] / 1000.0; - } - v->total_write_bandwidth_consumed_gbyte_per_second = 0.0; - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - if (v->output[k] == dcn_bw_writeback && v->output_format[k] == dcn_bw_444) { - v->write_bandwidth[k] = v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) * 4.0; - } - else if (v->output[k] == dcn_bw_writeback) { - v->write_bandwidth[k] = v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) * 1.5; - } - else { - v->write_bandwidth[k] = 0.0; - } - v->total_write_bandwidth_consumed_gbyte_per_second = v->total_write_bandwidth_consumed_gbyte_per_second + v->write_bandwidth[k] / 1000.0; - } - v->total_bandwidth_consumed_gbyte_per_second = v->total_read_bandwidth_consumed_gbyte_per_second + v->total_write_bandwidth_consumed_gbyte_per_second; - v->dcc_enabled_in_any_plane = dcn_bw_no; - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - if (v->dcc_enable[k] == dcn_bw_yes) { - v->dcc_enabled_in_any_plane = dcn_bw_yes; - } - } - for (i = 0; i <= number_of_states_plus_one; i++) { - v->return_bw_todcn_per_state =dcn_bw_min2(v->return_bus_width * v->dcfclk_per_state[i], v->fabric_and_dram_bandwidth_per_state[i] * 1000.0 * v->percent_of_ideal_drambw_received_after_urg_latency / 100.0); - v->return_bw_per_state[i] = v->return_bw_todcn_per_state; - if (v->dcc_enabled_in_any_plane == dcn_bw_yes && v->return_bw_todcn_per_state > v->dcfclk_per_state[i] * v->return_bus_width / 4.0) { - v->return_bw_per_state[i] =dcn_bw_min2(v->return_bw_per_state[i], v->return_bw_todcn_per_state * 4.0 * (1.0 - v->urgent_latency / ((v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 / (v->return_bw_todcn_per_state - v->dcfclk_per_state[i] * v->return_bus_width / 4.0) + v->urgent_latency))); - } - v->critical_point = 2.0 * v->return_bus_width * v->dcfclk_per_state[i] * v->urgent_latency / (v->return_bw_todcn_per_state * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0); - if (v->dcc_enabled_in_any_plane == dcn_bw_yes && v->critical_point > 1.0 && v->critical_point < 4.0) { - v->return_bw_per_state[i] =dcn_bw_min2(v->return_bw_per_state[i], dcn_bw_pow(4.0 * v->return_bw_todcn_per_state * (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 * v->return_bus_width * v->dcfclk_per_state[i] * v->urgent_latency / (v->return_bw_todcn_per_state * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0), 2)); - } - v->return_bw_todcn_per_state =dcn_bw_min2(v->return_bus_width * v->dcfclk_per_state[i], v->fabric_and_dram_bandwidth_per_state[i] * 1000.0); - if (v->dcc_enabled_in_any_plane == dcn_bw_yes && v->return_bw_todcn_per_state > v->dcfclk_per_state[i] * v->return_bus_width / 4.0) { - v->return_bw_per_state[i] =dcn_bw_min2(v->return_bw_per_state[i], v->return_bw_todcn_per_state * 4.0 * (1.0 - v->urgent_latency / ((v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 / (v->return_bw_todcn_per_state - v->dcfclk_per_state[i] * v->return_bus_width / 4.0) + v->urgent_latency))); - } - v->critical_point = 2.0 * v->return_bus_width * v->dcfclk_per_state[i] * v->urgent_latency / (v->return_bw_todcn_per_state * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0); - if (v->dcc_enabled_in_any_plane == dcn_bw_yes && v->critical_point > 1.0 && v->critical_point < 4.0) { - v->return_bw_per_state[i] =dcn_bw_min2(v->return_bw_per_state[i], dcn_bw_pow(4.0 * v->return_bw_todcn_per_state * (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 * v->return_bus_width * v->dcfclk_per_state[i] * v->urgent_latency / (v->return_bw_todcn_per_state * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0), 2)); - } - } - for (i = 0; i <= number_of_states_plus_one; i++) { - if ((v->total_read_bandwidth_consumed_gbyte_per_second * 1000.0 <= v->return_bw_per_state[i]) && (v->total_bandwidth_consumed_gbyte_per_second * 1000.0 <= v->fabric_and_dram_bandwidth_per_state[i] * 1000.0 * v->percent_of_ideal_drambw_received_after_urg_latency / 100.0)) { - v->bandwidth_support[i] = dcn_bw_yes; - } - else { - v->bandwidth_support[i] = dcn_bw_no; - } - } - /*writeback latency support check*/ - - v->writeback_latency_support = dcn_bw_yes; - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - if (v->output[k] == dcn_bw_writeback && v->output_format[k] == dcn_bw_444 && v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) * 4.0 > (v->writeback_luma_buffer_size + v->writeback_chroma_buffer_size) * 1024.0 / v->write_back_latency) { - v->writeback_latency_support = dcn_bw_no; - } - else if (v->output[k] == dcn_bw_writeback && v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) >dcn_bw_min2(v->writeback_luma_buffer_size, 2.0 * v->writeback_chroma_buffer_size) * 1024.0 / v->write_back_latency) { - v->writeback_latency_support = dcn_bw_no; - } - } - /*re-ordering buffer support check*/ - - for (i = 0; i <= number_of_states_plus_one; i++) { - v->urgent_round_trip_and_out_of_order_latency_per_state[i] = (v->round_trip_ping_latency_cycles + 32.0) / v->dcfclk_per_state[i] + v->urgent_out_of_order_return_per_channel * v->number_of_channels / v->return_bw_per_state[i]; - if ((v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 / v->return_bw_per_state[i] > v->urgent_round_trip_and_out_of_order_latency_per_state[i]) { - v->rob_support[i] = dcn_bw_yes; - } - else { - v->rob_support[i] = dcn_bw_no; - } - } - /*display io support check*/ - - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - if (v->output[k] == dcn_bw_dp && v->dsc_capability == dcn_bw_yes) { - if (v->output_format[k] == dcn_bw_420) { - v->required_output_bw = v->pixel_clock[k] / 2.0; - } - else { - v->required_output_bw = v->pixel_clock[k]; - } - } - else if (v->output_format[k] == dcn_bw_420) { - v->required_output_bw = v->pixel_clock[k] * 3.0 / 2.0; - } - else { - v->required_output_bw = v->pixel_clock[k] * 3.0; - } - if (v->output[k] == dcn_bw_hdmi) { - v->required_phyclk[k] = v->required_output_bw; - switch (v->output_deep_color[k]) { - case dcn_bw_encoder_10bpc: - v->required_phyclk[k] = v->required_phyclk[k] * 5.0 / 4; - break; - case dcn_bw_encoder_12bpc: - v->required_phyclk[k] = v->required_phyclk[k] * 3.0 / 2; - break; - default: - break; - } - v->required_phyclk[k] = v->required_phyclk[k] / 3.0; - } - else if (v->output[k] == dcn_bw_dp) { - v->required_phyclk[k] = v->required_output_bw / 4.0; - } - else { - v->required_phyclk[k] = 0.0; - } - } - for (i = 0; i <= number_of_states_plus_one; i++) { - v->dio_support[i] = dcn_bw_yes; - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - if (v->required_phyclk[k] > v->phyclk_per_state[i] || (v->output[k] == dcn_bw_hdmi && v->required_phyclk[k] > 600.0)) { - v->dio_support[i] = dcn_bw_no; - } - } - } - /*total available writeback support check*/ - - v->total_number_of_active_writeback = 0.0; - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - if (v->output[k] == dcn_bw_writeback) { - v->total_number_of_active_writeback = v->total_number_of_active_writeback + 1.0; - } - } - if (v->total_number_of_active_writeback <= v->max_num_writeback) { - v->total_available_writeback_support = dcn_bw_yes; - } - else { - v->total_available_writeback_support = dcn_bw_no; - } - /*maximum dispclk/dppclk support check*/ - - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - if (v->h_ratio[k] > 1.0) { - v->pscl_factor[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput * v->h_ratio[k] /dcn_bw_ceil2(v->htaps[k] / 6.0, 1.0)); - } - else { - v->pscl_factor[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput); - } - if (v->byte_per_pixel_in_detc[k] == 0.0) { - v->pscl_factor_chroma[k] = 0.0; - v->min_dppclk_using_single_dpp[k] = v->pixel_clock[k] *dcn_bw_max3(v->vtaps[k] / 6.0 *dcn_bw_min2(1.0, v->h_ratio[k]), v->h_ratio[k] * v->v_ratio[k] / v->pscl_factor[k], 1.0); - } - else { - if (v->h_ratio[k] / 2.0 > 1.0) { - v->pscl_factor_chroma[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput * v->h_ratio[k] / 2.0 /dcn_bw_ceil2(v->hta_pschroma[k] / 6.0, 1.0)); - } - else { - v->pscl_factor_chroma[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput); - } - v->min_dppclk_using_single_dpp[k] = v->pixel_clock[k] *dcn_bw_max5(v->vtaps[k] / 6.0 *dcn_bw_min2(1.0, v->h_ratio[k]), v->h_ratio[k] * v->v_ratio[k] / v->pscl_factor[k], v->vta_pschroma[k] / 6.0 *dcn_bw_min2(1.0, v->h_ratio[k] / 2.0), v->h_ratio[k] * v->v_ratio[k] / 4.0 / v->pscl_factor_chroma[k], 1.0); - } - } - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - if ((v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32 || v->source_pixel_format[k] == dcn_bw_rgb_sub_16)) { - if (v->source_surface_mode[k] == dcn_bw_sw_linear) { - v->read256_block_height_y[k] = 1.0; - } - else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) { - v->read256_block_height_y[k] = 4.0; - } - else { - v->read256_block_height_y[k] = 8.0; - } - v->read256_block_width_y[k] = 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / v->read256_block_height_y[k]; - v->read256_block_height_c[k] = 0.0; - v->read256_block_width_c[k] = 0.0; - } - else { - if (v->source_surface_mode[k] == dcn_bw_sw_linear) { - v->read256_block_height_y[k] = 1.0; - v->read256_block_height_c[k] = 1.0; - } - else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) { - v->read256_block_height_y[k] = 16.0; - v->read256_block_height_c[k] = 8.0; - } - else { - v->read256_block_height_y[k] = 8.0; - v->read256_block_height_c[k] = 8.0; - } - v->read256_block_width_y[k] = 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / v->read256_block_height_y[k]; - v->read256_block_width_c[k] = 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / v->read256_block_height_c[k]; - } - if (v->source_scan[k] == dcn_bw_hor) { - v->max_swath_height_y[k] = v->read256_block_height_y[k]; - v->max_swath_height_c[k] = v->read256_block_height_c[k]; - } - else { - v->max_swath_height_y[k] = v->read256_block_width_y[k]; - v->max_swath_height_c[k] = v->read256_block_width_c[k]; - } - if ((v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32 || v->source_pixel_format[k] == dcn_bw_rgb_sub_16)) { - if (v->source_surface_mode[k] == dcn_bw_sw_linear || (v->source_pixel_format[k] == dcn_bw_rgb_sub_64 && (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_var_s || v->source_surface_mode[k] == dcn_bw_sw_var_s_x) && v->source_scan[k] == dcn_bw_hor)) { - v->min_swath_height_y[k] = v->max_swath_height_y[k]; - } - else { - v->min_swath_height_y[k] = v->max_swath_height_y[k] / 2.0; - } - v->min_swath_height_c[k] = v->max_swath_height_c[k]; - } - else { - if (v->source_surface_mode[k] == dcn_bw_sw_linear) { - v->min_swath_height_y[k] = v->max_swath_height_y[k]; - v->min_swath_height_c[k] = v->max_swath_height_c[k]; - } - else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8 && v->source_scan[k] == dcn_bw_hor) { - v->min_swath_height_y[k] = v->max_swath_height_y[k] / 2.0; - if (v->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes) { - v->min_swath_height_c[k] = v->max_swath_height_c[k]; - } - else { - v->min_swath_height_c[k] = v->max_swath_height_c[k] / 2.0; - } - } - else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10 && v->source_scan[k] == dcn_bw_hor) { - v->min_swath_height_c[k] = v->max_swath_height_c[k] / 2.0; - if (v->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes) { - v->min_swath_height_y[k] = v->max_swath_height_y[k]; - } - else { - v->min_swath_height_y[k] = v->max_swath_height_y[k] / 2.0; - } - } - else { - v->min_swath_height_y[k] = v->max_swath_height_y[k]; - v->min_swath_height_c[k] = v->max_swath_height_c[k]; - } - } - if (v->source_surface_mode[k] == dcn_bw_sw_linear) { - v->maximum_swath_width = 8192.0; - } - else { - v->maximum_swath_width = 5120.0; - } - v->number_of_dpp_required_for_det_size =dcn_bw_ceil2(v->swath_width_ysingle_dpp[k] /dcn_bw_min2(v->maximum_swath_width, v->det_buffer_size_in_kbyte * 1024.0 / 2.0 / (v->byte_per_pixel_in_dety[k] * v->min_swath_height_y[k] + v->byte_per_pixel_in_detc[k] / 2.0 * v->min_swath_height_c[k])), 1.0); - if (v->byte_per_pixel_in_detc[k] == 0.0) { - v->number_of_dpp_required_for_lb_size =dcn_bw_ceil2((v->vtaps[k] +dcn_bw_max2(dcn_bw_ceil2(v->v_ratio[k], 1.0) - 2, 0.0)) * v->swath_width_ysingle_dpp[k] /dcn_bw_max2(v->h_ratio[k], 1.0) * v->lb_bit_per_pixel[k] / v->line_buffer_size, 1.0); - } - else { - v->number_of_dpp_required_for_lb_size =dcn_bw_max2(dcn_bw_ceil2((v->vtaps[k] +dcn_bw_max2(dcn_bw_ceil2(v->v_ratio[k], 1.0) - 2, 0.0)) * v->swath_width_ysingle_dpp[k] /dcn_bw_max2(v->h_ratio[k], 1.0) * v->lb_bit_per_pixel[k] / v->line_buffer_size, 1.0),dcn_bw_ceil2((v->vta_pschroma[k] +dcn_bw_max2(dcn_bw_ceil2(v->v_ratio[k] / 2.0, 1.0) - 2, 0.0)) * v->swath_width_ysingle_dpp[k] / 2.0 /dcn_bw_max2(v->h_ratio[k] / 2.0, 1.0) * v->lb_bit_per_pixel[k] / v->line_buffer_size, 1.0)); - } - v->number_of_dpp_required_for_det_and_lb_size[k] =dcn_bw_max2(v->number_of_dpp_required_for_det_size, v->number_of_dpp_required_for_lb_size); - } - for (i = 0; i <= number_of_states_plus_one; i++) { - for (j = 0; j <= 1; j++) { - v->total_number_of_active_dpp[i][j] = 0.0; - v->required_dispclk[i][j] = 0.0; - v->dispclk_dppclk_support[i][j] = dcn_bw_yes; - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - v->min_dispclk_using_single_dpp =dcn_bw_max2(v->pixel_clock[k], v->min_dppclk_using_single_dpp[k] * (j + 1)) * (1.0 + v->downspreading / 100.0); - if (v->odm_capability == dcn_bw_yes) { - v->min_dispclk_using_dual_dpp =dcn_bw_max2(v->pixel_clock[k] / 2.0, v->min_dppclk_using_single_dpp[k] / 2.0 * (j + 1)) * (1.0 + v->downspreading / 100.0); - } - else { - v->min_dispclk_using_dual_dpp =dcn_bw_max2(v->pixel_clock[k], v->min_dppclk_using_single_dpp[k] / 2.0 * (j + 1)) * (1.0 + v->downspreading / 100.0); - } - if (i < number_of_states) { - v->min_dispclk_using_single_dpp = v->min_dispclk_using_single_dpp * (1.0 + v->dispclk_ramping_margin / 100.0); - v->min_dispclk_using_dual_dpp = v->min_dispclk_using_dual_dpp * (1.0 + v->dispclk_ramping_margin / 100.0); - } - if (v->min_dispclk_using_single_dpp <=dcn_bw_min2(v->max_dispclk[i], (j + 1) * v->max_dppclk[i]) && v->number_of_dpp_required_for_det_and_lb_size[k] <= 1.0) { - v->no_of_dpp[i][j][k] = 1.0; - v->required_dispclk[i][j] =dcn_bw_max2(v->required_dispclk[i][j], v->min_dispclk_using_single_dpp); - } - else if (v->min_dispclk_using_dual_dpp <=dcn_bw_min2(v->max_dispclk[i], (j + 1) * v->max_dppclk[i])) { - v->no_of_dpp[i][j][k] = 2.0; - v->required_dispclk[i][j] =dcn_bw_max2(v->required_dispclk[i][j], v->min_dispclk_using_dual_dpp); - } - else { - v->no_of_dpp[i][j][k] = 2.0; - v->required_dispclk[i][j] =dcn_bw_max2(v->required_dispclk[i][j], v->min_dispclk_using_dual_dpp); - v->dispclk_dppclk_support[i][j] = dcn_bw_no; - } - v->total_number_of_active_dpp[i][j] = v->total_number_of_active_dpp[i][j] + v->no_of_dpp[i][j][k]; - } - if (v->total_number_of_active_dpp[i][j] > v->max_num_dpp) { - v->total_number_of_active_dpp[i][j] = 0.0; - v->required_dispclk[i][j] = 0.0; - v->dispclk_dppclk_support[i][j] = dcn_bw_yes; - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - v->min_dispclk_using_single_dpp =dcn_bw_max2(v->pixel_clock[k], v->min_dppclk_using_single_dpp[k] * (j + 1)) * (1.0 + v->downspreading / 100.0); - v->min_dispclk_using_dual_dpp =dcn_bw_max2(v->pixel_clock[k], v->min_dppclk_using_single_dpp[k] / 2.0 * (j + 1)) * (1.0 + v->downspreading / 100.0); - if (i < number_of_states) { - v->min_dispclk_using_single_dpp = v->min_dispclk_using_single_dpp * (1.0 + v->dispclk_ramping_margin / 100.0); - v->min_dispclk_using_dual_dpp = v->min_dispclk_using_dual_dpp * (1.0 + v->dispclk_ramping_margin / 100.0); - } - if (v->number_of_dpp_required_for_det_and_lb_size[k] <= 1.0) { - v->no_of_dpp[i][j][k] = 1.0; - v->required_dispclk[i][j] =dcn_bw_max2(v->required_dispclk[i][j], v->min_dispclk_using_single_dpp); - if (v->min_dispclk_using_single_dpp >dcn_bw_min2(v->max_dispclk[i], (j + 1) * v->max_dppclk[i])) { - v->dispclk_dppclk_support[i][j] = dcn_bw_no; - } - } - else { - v->no_of_dpp[i][j][k] = 2.0; - v->required_dispclk[i][j] =dcn_bw_max2(v->required_dispclk[i][j], v->min_dispclk_using_dual_dpp); - if (v->min_dispclk_using_dual_dpp >dcn_bw_min2(v->max_dispclk[i], (j + 1) * v->max_dppclk[i])) { - v->dispclk_dppclk_support[i][j] = dcn_bw_no; - } - } - v->total_number_of_active_dpp[i][j] = v->total_number_of_active_dpp[i][j] + v->no_of_dpp[i][j][k]; - } - } - } - } - /*viewport size check*/ - - v->viewport_size_support = dcn_bw_yes; - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - if (v->number_of_dpp_required_for_det_and_lb_size[k] > 2.0) { - v->viewport_size_support = dcn_bw_no; - } - } - /*total available pipes support check*/ - - for (i = 0; i <= number_of_states_plus_one; i++) { - for (j = 0; j <= 1; j++) { - if (v->total_number_of_active_dpp[i][j] <= v->max_num_dpp) { - v->total_available_pipes_support[i][j] = dcn_bw_yes; - } - else { - v->total_available_pipes_support[i][j] = dcn_bw_no; - } - } - } - /*urgent latency support check*/ - - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - for (i = 0; i <= number_of_states_plus_one; i++) { - for (j = 0; j <= 1; j++) { - v->swath_width_yper_state[i][j][k] = v->swath_width_ysingle_dpp[k] / v->no_of_dpp[i][j][k]; - v->swath_width_granularity_y = 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / v->max_swath_height_y[k]; - v->rounded_up_max_swath_size_bytes_y = (dcn_bw_ceil2(v->swath_width_yper_state[i][j][k] - 1.0, v->swath_width_granularity_y) + v->swath_width_granularity_y) * v->byte_per_pixel_in_dety[k] * v->max_swath_height_y[k]; - if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) { - v->rounded_up_max_swath_size_bytes_y =dcn_bw_ceil2(v->rounded_up_max_swath_size_bytes_y, 256.0) + 256; - } - if (v->max_swath_height_c[k] > 0.0) { - v->swath_width_granularity_c = 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / v->max_swath_height_c[k]; - } - v->rounded_up_max_swath_size_bytes_c = (dcn_bw_ceil2(v->swath_width_yper_state[i][j][k] / 2.0 - 1.0, v->swath_width_granularity_c) + v->swath_width_granularity_c) * v->byte_per_pixel_in_detc[k] * v->max_swath_height_c[k]; - if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) { - v->rounded_up_max_swath_size_bytes_c =dcn_bw_ceil2(v->rounded_up_max_swath_size_bytes_c, 256.0) + 256; - } - if (v->rounded_up_max_swath_size_bytes_y + v->rounded_up_max_swath_size_bytes_c <= v->det_buffer_size_in_kbyte * 1024.0 / 2.0) { - v->swath_height_yper_state[i][j][k] = v->max_swath_height_y[k]; - v->swath_height_cper_state[i][j][k] = v->max_swath_height_c[k]; - } - else { - v->swath_height_yper_state[i][j][k] = v->min_swath_height_y[k]; - v->swath_height_cper_state[i][j][k] = v->min_swath_height_c[k]; - } - if (v->byte_per_pixel_in_detc[k] == 0.0) { - v->lines_in_det_luma = v->det_buffer_size_in_kbyte * 1024.0 / v->byte_per_pixel_in_dety[k] / v->swath_width_yper_state[i][j][k]; - v->lines_in_det_chroma = 0.0; - } - else if (v->swath_height_yper_state[i][j][k] <= v->swath_height_cper_state[i][j][k]) { - v->lines_in_det_luma = v->det_buffer_size_in_kbyte * 1024.0 / 2.0 / v->byte_per_pixel_in_dety[k] / v->swath_width_yper_state[i][j][k]; - v->lines_in_det_chroma = v->det_buffer_size_in_kbyte * 1024.0 / 2.0 / v->byte_per_pixel_in_detc[k] / (v->swath_width_yper_state[i][j][k] / 2.0); - } - else { - v->lines_in_det_luma = v->det_buffer_size_in_kbyte * 1024.0 * 2.0 / 3.0 / v->byte_per_pixel_in_dety[k] / v->swath_width_yper_state[i][j][k]; - v->lines_in_det_chroma = v->det_buffer_size_in_kbyte * 1024.0 / 3.0 / v->byte_per_pixel_in_dety[k] / (v->swath_width_yper_state[i][j][k] / 2.0); - } - v->effective_lb_latency_hiding_source_lines_luma =dcn_bw_min2(v->max_line_buffer_lines,dcn_bw_floor2(v->line_buffer_size / v->lb_bit_per_pixel[k] / (v->swath_width_yper_state[i][j][k] /dcn_bw_max2(v->h_ratio[k], 1.0)), 1.0)) - (v->vtaps[k] - 1.0); - v->effective_lb_latency_hiding_source_lines_chroma =dcn_bw_min2(v->max_line_buffer_lines,dcn_bw_floor2(v->line_buffer_size / v->lb_bit_per_pixel[k] / (v->swath_width_yper_state[i][j][k] / 2.0 /dcn_bw_max2(v->h_ratio[k] / 2.0, 1.0)), 1.0)) - (v->vta_pschroma[k] - 1.0); - v->effective_detlb_lines_luma =dcn_bw_floor2(v->lines_in_det_luma +dcn_bw_min2(v->lines_in_det_luma * v->required_dispclk[i][j] * v->byte_per_pixel_in_dety[k] * v->pscl_factor[k] / v->return_bw_per_state[i], v->effective_lb_latency_hiding_source_lines_luma), v->swath_height_yper_state[i][j][k]); - v->effective_detlb_lines_chroma =dcn_bw_floor2(v->lines_in_det_chroma +dcn_bw_min2(v->lines_in_det_chroma * v->required_dispclk[i][j] * v->byte_per_pixel_in_detc[k] * v->pscl_factor_chroma[k] / v->return_bw_per_state[i], v->effective_lb_latency_hiding_source_lines_chroma), v->swath_height_cper_state[i][j][k]); - if (v->byte_per_pixel_in_detc[k] == 0.0) { - v->urgent_latency_support_us_per_state[i][j][k] = v->effective_detlb_lines_luma * (v->htotal[k] / v->pixel_clock[k]) / v->v_ratio[k] - v->effective_detlb_lines_luma * v->swath_width_yper_state[i][j][k] *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / (v->return_bw_per_state[i] / v->no_of_dpp[i][j][k]); - } - else { - v->urgent_latency_support_us_per_state[i][j][k] =dcn_bw_min2(v->effective_detlb_lines_luma * (v->htotal[k] / v->pixel_clock[k]) / v->v_ratio[k] - v->effective_detlb_lines_luma * v->swath_width_yper_state[i][j][k] *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / (v->return_bw_per_state[i] / v->no_of_dpp[i][j][k]), v->effective_detlb_lines_chroma * (v->htotal[k] / v->pixel_clock[k]) / (v->v_ratio[k] / 2.0) - v->effective_detlb_lines_chroma * v->swath_width_yper_state[i][j][k] / 2.0 *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / (v->return_bw_per_state[i] / v->no_of_dpp[i][j][k])); - } - } - } - } - for (i = 0; i <= number_of_states_plus_one; i++) { - for (j = 0; j <= 1; j++) { - v->urgent_latency_support[i][j] = dcn_bw_yes; - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - if (v->urgent_latency_support_us_per_state[i][j][k] < v->urgent_latency / 1.0) { - v->urgent_latency_support[i][j] = dcn_bw_no; - } - } - } - } - /*prefetch check*/ - - for (i = 0; i <= number_of_states_plus_one; i++) { - for (j = 0; j <= 1; j++) { - v->total_number_of_dcc_active_dpp[i][j] = 0.0; - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - if (v->dcc_enable[k] == dcn_bw_yes) { - v->total_number_of_dcc_active_dpp[i][j] = v->total_number_of_dcc_active_dpp[i][j] + v->no_of_dpp[i][j][k]; - } - } - } - } - for (i = 0; i <= number_of_states_plus_one; i++) { - for (j = 0; j <= 1; j++) { - v->projected_dcfclk_deep_sleep = 8.0; - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, v->pixel_clock[k] / 16.0); - if (v->byte_per_pixel_in_detc[k] == 0.0) { - if (v->v_ratio[k] <= 1.0) { - v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, 1.1 *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 64.0 * v->h_ratio[k] * v->pixel_clock[k] / v->no_of_dpp[i][j][k]); - } - else { - v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, 1.1 *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 64.0 * v->pscl_factor[k] * v->required_dispclk[i][j] / (1 + j)); - } - } - else { - if (v->v_ratio[k] <= 1.0) { - v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, 1.1 *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 32.0 * v->h_ratio[k] * v->pixel_clock[k] / v->no_of_dpp[i][j][k]); - } - else { - v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, 1.1 *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 32.0 * v->pscl_factor[k] * v->required_dispclk[i][j] / (1 + j)); - } - if (v->v_ratio[k] / 2.0 <= 1.0) { - v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, 1.1 *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 32.0 * v->h_ratio[k] / 2.0 * v->pixel_clock[k] / v->no_of_dpp[i][j][k]); - } - else { - v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, 1.1 *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 32.0 * v->pscl_factor_chroma[k] * v->required_dispclk[i][j] / (1 + j)); - } - } - } - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - if (v->dcc_enable[k] == dcn_bw_yes) { - v->meta_req_height_y = 8.0 * v->read256_block_height_y[k]; - v->meta_req_width_y = 64.0 * 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / v->meta_req_height_y; - v->meta_surface_width_y =dcn_bw_ceil2(v->viewport_width[k] / v->no_of_dpp[i][j][k] - 1.0, v->meta_req_width_y) + v->meta_req_width_y; - v->meta_surface_height_y =dcn_bw_ceil2(v->viewport_height[k] - 1.0, v->meta_req_height_y) + v->meta_req_height_y; - if (v->pte_enable == dcn_bw_yes) { - v->meta_pte_bytes_per_frame_y = (dcn_bw_ceil2((v->meta_surface_width_y * v->meta_surface_height_y *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 256.0 - 4096.0) / 8.0 / 4096.0, 1.0) + 1) * 64.0; - } - else { - v->meta_pte_bytes_per_frame_y = 0.0; - } - if (v->source_scan[k] == dcn_bw_hor) { - v->meta_row_bytes_y = v->meta_surface_width_y * v->meta_req_height_y *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 256.0; - } - else { - v->meta_row_bytes_y = v->meta_surface_height_y * v->meta_req_width_y *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 256.0; - } - } - else { - v->meta_pte_bytes_per_frame_y = 0.0; - v->meta_row_bytes_y = 0.0; - } - if (v->pte_enable == dcn_bw_yes) { - if (v->source_surface_mode[k] == dcn_bw_sw_linear) { - v->macro_tile_block_size_bytes_y = 256.0; - v->macro_tile_block_height_y = 1.0; - } - else if (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d_x) { - v->macro_tile_block_size_bytes_y = 4096.0; - v->macro_tile_block_height_y = 4.0 * v->read256_block_height_y[k]; - } - else if (v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_x) { - v->macro_tile_block_size_bytes_y = 64.0 * 1024; - v->macro_tile_block_height_y = 16.0 * v->read256_block_height_y[k]; - } - else { - v->macro_tile_block_size_bytes_y = 256.0 * 1024; - v->macro_tile_block_height_y = 32.0 * v->read256_block_height_y[k]; - } - if (v->macro_tile_block_size_bytes_y <= 65536.0) { - v->data_pte_req_height_y = v->macro_tile_block_height_y; - } - else { - v->data_pte_req_height_y = 16.0 * v->read256_block_height_y[k]; - } - v->data_pte_req_width_y = 4096.0 /dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / v->data_pte_req_height_y * 8; - if (v->source_surface_mode[k] == dcn_bw_sw_linear) { - v->dpte_bytes_per_row_y = 64.0 * (dcn_bw_ceil2((v->viewport_width[k] / v->no_of_dpp[i][j][k] *dcn_bw_min2(128.0, dcn_bw_pow(2.0,dcn_bw_floor2(dcn_bw_log(v->pte_buffer_size_in_requests * v->data_pte_req_width_y / (v->viewport_width[k] / v->no_of_dpp[i][j][k]), 2.0), 1.0))) - 1.0) / v->data_pte_req_width_y, 1.0) + 1); - } - else if (v->source_scan[k] == dcn_bw_hor) { - v->dpte_bytes_per_row_y = 64.0 * (dcn_bw_ceil2((v->viewport_width[k] / v->no_of_dpp[i][j][k] - 1.0) / v->data_pte_req_width_y, 1.0) + 1); - } - else { - v->dpte_bytes_per_row_y = 64.0 * (dcn_bw_ceil2((v->viewport_height[k] - 1.0) / v->data_pte_req_height_y, 1.0) + 1); - } - } - else { - v->dpte_bytes_per_row_y = 0.0; - } - if ((v->source_pixel_format[k] != dcn_bw_rgb_sub_64 && v->source_pixel_format[k] != dcn_bw_rgb_sub_32 && v->source_pixel_format[k] != dcn_bw_rgb_sub_16)) { - if (v->dcc_enable[k] == dcn_bw_yes) { - v->meta_req_height_c = 8.0 * v->read256_block_height_c[k]; - v->meta_req_width_c = 64.0 * 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / v->meta_req_height_c; - v->meta_surface_width_c =dcn_bw_ceil2(v->viewport_width[k] / v->no_of_dpp[i][j][k] / 2.0 - 1.0, v->meta_req_width_c) + v->meta_req_width_c; - v->meta_surface_height_c =dcn_bw_ceil2(v->viewport_height[k] / 2.0 - 1.0, v->meta_req_height_c) + v->meta_req_height_c; - if (v->pte_enable == dcn_bw_yes) { - v->meta_pte_bytes_per_frame_c = (dcn_bw_ceil2((v->meta_surface_width_c * v->meta_surface_height_c *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 256.0 - 4096.0) / 8.0 / 4096.0, 1.0) + 1) * 64.0; - } - else { - v->meta_pte_bytes_per_frame_c = 0.0; - } - if (v->source_scan[k] == dcn_bw_hor) { - v->meta_row_bytes_c = v->meta_surface_width_c * v->meta_req_height_c *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 256.0; - } - else { - v->meta_row_bytes_c = v->meta_surface_height_c * v->meta_req_width_c *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 256.0; - } - } - else { - v->meta_pte_bytes_per_frame_c = 0.0; - v->meta_row_bytes_c = 0.0; - } - if (v->pte_enable == dcn_bw_yes) { - if (v->source_surface_mode[k] == dcn_bw_sw_linear) { - v->macro_tile_block_size_bytes_c = 256.0; - v->macro_tile_block_height_c = 1.0; - } - else if (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d_x) { - v->macro_tile_block_size_bytes_c = 4096.0; - v->macro_tile_block_height_c = 4.0 * v->read256_block_height_c[k]; - } - else if (v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_x) { - v->macro_tile_block_size_bytes_c = 64.0 * 1024; - v->macro_tile_block_height_c = 16.0 * v->read256_block_height_c[k]; - } - else { - v->macro_tile_block_size_bytes_c = 256.0 * 1024; - v->macro_tile_block_height_c = 32.0 * v->read256_block_height_c[k]; - } - v->macro_tile_block_width_c = v->macro_tile_block_size_bytes_c /dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / v->macro_tile_block_height_c; - if (v->macro_tile_block_size_bytes_c <= 65536.0) { - v->data_pte_req_height_c = v->macro_tile_block_height_c; - } - else { - v->data_pte_req_height_c = 16.0 * v->read256_block_height_c[k]; - } - v->data_pte_req_width_c = 4096.0 /dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / v->data_pte_req_height_c * 8; - if (v->source_surface_mode[k] == dcn_bw_sw_linear) { - v->dpte_bytes_per_row_c = 64.0 * (dcn_bw_ceil2((v->viewport_width[k] / v->no_of_dpp[i][j][k] / 2.0 * dcn_bw_min2(128.0, dcn_bw_pow(2.0,dcn_bw_floor2(dcn_bw_log(v->pte_buffer_size_in_requests * v->data_pte_req_width_c / (v->viewport_width[k] / v->no_of_dpp[i][j][k] / 2.0), 2.0), 1.0))) - 1.0) / v->data_pte_req_width_c, 1.0) + 1); - } - else if (v->source_scan[k] == dcn_bw_hor) { - v->dpte_bytes_per_row_c = 64.0 * (dcn_bw_ceil2((v->viewport_width[k] / v->no_of_dpp[i][j][k] / 2.0 - 1.0) / v->data_pte_req_width_c, 1.0) + 1); - } - else { - v->dpte_bytes_per_row_c = 64.0 * (dcn_bw_ceil2((v->viewport_height[k] / 2.0 - 1.0) / v->data_pte_req_height_c, 1.0) + 1); - } - } - else { - v->dpte_bytes_per_row_c = 0.0; - } - } - else { - v->dpte_bytes_per_row_c = 0.0; - v->meta_pte_bytes_per_frame_c = 0.0; - v->meta_row_bytes_c = 0.0; - } - v->dpte_bytes_per_row[k] = v->dpte_bytes_per_row_y + v->dpte_bytes_per_row_c; - v->meta_pte_bytes_per_frame[k] = v->meta_pte_bytes_per_frame_y + v->meta_pte_bytes_per_frame_c; - v->meta_row_bytes[k] = v->meta_row_bytes_y + v->meta_row_bytes_c; - v->v_init_y = (v->v_ratio[k] + v->vtaps[k] + 1.0 + v->interlace_output[k] * 0.5 * v->v_ratio[k]) / 2.0; - v->prefill_y[k] =dcn_bw_floor2(v->v_init_y, 1.0); - v->max_num_sw_y[k] =dcn_bw_ceil2((v->prefill_y[k] - 1.0) / v->swath_height_yper_state[i][j][k], 1.0) + 1; - if (v->prefill_y[k] > 1.0) { - v->max_partial_sw_y =dcn_bw_mod((v->prefill_y[k] - 2.0), v->swath_height_yper_state[i][j][k]); - } - else { - v->max_partial_sw_y =dcn_bw_mod((v->prefill_y[k] + v->swath_height_yper_state[i][j][k] - 2.0), v->swath_height_yper_state[i][j][k]); - } - v->max_partial_sw_y =dcn_bw_max2(1.0, v->max_partial_sw_y); - v->prefetch_lines_y[k] = v->max_num_sw_y[k] * v->swath_height_yper_state[i][j][k] + v->max_partial_sw_y; - if ((v->source_pixel_format[k] != dcn_bw_rgb_sub_64 && v->source_pixel_format[k] != dcn_bw_rgb_sub_32 && v->source_pixel_format[k] != dcn_bw_rgb_sub_16)) { - v->v_init_c = (v->v_ratio[k] / 2.0 + v->vtaps[k] + 1.0 + v->interlace_output[k] * 0.5 * v->v_ratio[k] / 2.0) / 2.0; - v->prefill_c[k] =dcn_bw_floor2(v->v_init_c, 1.0); - v->max_num_sw_c[k] =dcn_bw_ceil2((v->prefill_c[k] - 1.0) / v->swath_height_cper_state[i][j][k], 1.0) + 1; - if (v->prefill_c[k] > 1.0) { - v->max_partial_sw_c =dcn_bw_mod((v->prefill_c[k] - 2.0), v->swath_height_cper_state[i][j][k]); - } - else { - v->max_partial_sw_c =dcn_bw_mod((v->prefill_c[k] + v->swath_height_cper_state[i][j][k] - 2.0), v->swath_height_cper_state[i][j][k]); - } - v->max_partial_sw_c =dcn_bw_max2(1.0, v->max_partial_sw_c); - v->prefetch_lines_c[k] = v->max_num_sw_c[k] * v->swath_height_cper_state[i][j][k] + v->max_partial_sw_c; - } - else { - v->prefetch_lines_c[k] = 0.0; - } - v->dst_x_after_scaler = 90.0 * v->pixel_clock[k] / (v->required_dispclk[i][j] / (j + 1)) + 42.0 * v->pixel_clock[k] / v->required_dispclk[i][j]; - if (v->no_of_dpp[i][j][k] > 1.0) { - v->dst_x_after_scaler = v->dst_x_after_scaler + v->scaler_rec_out_width[k] / 2.0; - } - if (v->output_format[k] == dcn_bw_420) { - v->dst_y_after_scaler = 1.0; - } - else { - v->dst_y_after_scaler = 0.0; - } - v->time_calc = 24.0 / v->projected_dcfclk_deep_sleep; - v->v_update_offset[k][j] = dcn_bw_ceil2(v->htotal[k] / 4.0, 1.0); - v->total_repeater_delay = v->max_inter_dcn_tile_repeaters * (2.0 / (v->required_dispclk[i][j] / (j + 1)) + 3.0 / v->required_dispclk[i][j]); - v->v_update_width[k][j] = (14.0 / v->projected_dcfclk_deep_sleep + 12.0 / (v->required_dispclk[i][j] / (j + 1)) + v->total_repeater_delay) * v->pixel_clock[k]; - v->v_ready_offset[k][j] = dcn_bw_max2(150.0 / (v->required_dispclk[i][j] / (j + 1)), v->total_repeater_delay + 20.0 / v->projected_dcfclk_deep_sleep + 10.0 / (v->required_dispclk[i][j] / (j + 1))) * v->pixel_clock[k]; - v->time_setup = (v->v_update_offset[k][j] + v->v_update_width[k][j] + v->v_ready_offset[k][j]) / v->pixel_clock[k]; - v->extra_latency = v->urgent_round_trip_and_out_of_order_latency_per_state[i] + (v->total_number_of_active_dpp[i][j] * v->pixel_chunk_size_in_kbyte + v->total_number_of_dcc_active_dpp[i][j] * v->meta_chunk_size) * 1024.0 / v->return_bw_per_state[i]; - if (v->pte_enable == dcn_bw_yes) { - v->extra_latency = v->extra_latency + v->total_number_of_active_dpp[i][j] * v->pte_chunk_size * 1024.0 / v->return_bw_per_state[i]; - } - if (v->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one == dcn_bw_yes) { - v->maximum_vstartup = v->vtotal[k] - v->vactive[k] - 1.0; - } - else { - v->maximum_vstartup = v->v_sync_plus_back_porch[k] - 1.0; - } - - do { - v->line_times_for_prefetch[k] = v->maximum_vstartup - v->urgent_latency / (v->htotal[k] / v->pixel_clock[k]) - (v->time_calc + v->time_setup) / (v->htotal[k] / v->pixel_clock[k]) - (v->dst_y_after_scaler + v->dst_x_after_scaler / v->htotal[k]); - v->line_times_for_prefetch[k] =dcn_bw_floor2(4.0 * (v->line_times_for_prefetch[k] + 0.125), 1.0) / 4; - v->prefetch_bw[k] = (v->meta_pte_bytes_per_frame[k] + 2.0 * v->meta_row_bytes[k] + 2.0 * v->dpte_bytes_per_row[k] + v->prefetch_lines_y[k] * v->swath_width_yper_state[i][j][k] *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) + v->prefetch_lines_c[k] * v->swath_width_yper_state[i][j][k] / 2.0 *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0)) / (v->line_times_for_prefetch[k] * v->htotal[k] / v->pixel_clock[k]); - - if (v->pte_enable == dcn_bw_yes && v->dcc_enable[k] == dcn_bw_yes) { - v->time_for_meta_pte_without_immediate_flip = dcn_bw_max3( - v->meta_pte_bytes_frame[k] / v->prefetch_bw[k], - v->extra_latency, - v->htotal[k] / v->pixel_clock[k] / 4.0); - } else { - v->time_for_meta_pte_without_immediate_flip = v->htotal[k] / v->pixel_clock[k] / 4.0; - } - - if (v->pte_enable == dcn_bw_yes || v->dcc_enable[k] == dcn_bw_yes) { - v->time_for_meta_and_dpte_row_without_immediate_flip = dcn_bw_max3(( - v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]) / v->prefetch_bw[k], - v->htotal[k] / v->pixel_clock[k] - v->time_for_meta_pte_without_immediate_flip, - v->extra_latency); - } else { - v->time_for_meta_and_dpte_row_without_immediate_flip = dcn_bw_max2( - v->htotal[k] / v->pixel_clock[k] - v->time_for_meta_pte_without_immediate_flip, - v->extra_latency - v->time_for_meta_pte_with_immediate_flip); - } - - v->lines_for_meta_pte_without_immediate_flip[k] =dcn_bw_floor2(4.0 * (v->time_for_meta_pte_without_immediate_flip / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4; - v->lines_for_meta_and_dpte_row_without_immediate_flip[k] =dcn_bw_floor2(4.0 * (v->time_for_meta_and_dpte_row_without_immediate_flip / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4; - v->maximum_vstartup = v->maximum_vstartup - 1; - - if (v->lines_for_meta_pte_without_immediate_flip[k] < 32.0 && v->lines_for_meta_and_dpte_row_without_immediate_flip[k] < 16.0) - break; - - } while(1); - } - v->bw_available_for_immediate_flip = v->return_bw_per_state[i]; - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - v->bw_available_for_immediate_flip = v->bw_available_for_immediate_flip -dcn_bw_max2(v->read_bandwidth[k], v->prefetch_bw[k]); - } - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - v->total_immediate_flip_bytes[k] = 0.0; - if ((v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) { - v->total_immediate_flip_bytes[k] = v->total_immediate_flip_bytes[k] + v->meta_pte_bytes_per_frame[k] + v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]; - } - } - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - if (v->pte_enable == dcn_bw_yes && v->dcc_enable[k] == dcn_bw_yes) { - v->time_for_meta_pte_with_immediate_flip =dcn_bw_max5(v->meta_pte_bytes_per_frame[k] / v->prefetch_bw[k], v->meta_pte_bytes_per_frame[k] * v->total_immediate_flip_bytes[k] / (v->bw_available_for_immediate_flip * (v->meta_pte_bytes_per_frame[k] + v->meta_row_bytes[k] + v->dpte_bytes_per_row[k])), v->extra_latency, v->urgent_latency, v->htotal[k] / v->pixel_clock[k] / 4.0); - } - else { - v->time_for_meta_pte_with_immediate_flip = v->htotal[k] / v->pixel_clock[k] / 4.0; - } - if (v->pte_enable == dcn_bw_yes || v->dcc_enable[k] == dcn_bw_yes) { - v->time_for_meta_and_dpte_row_with_immediate_flip =dcn_bw_max5((v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]) / v->prefetch_bw[k], (v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]) * v->total_immediate_flip_bytes[k] / (v->bw_available_for_immediate_flip * (v->meta_pte_bytes_per_frame[k] + v->meta_row_bytes[k] + v->dpte_bytes_per_row[k])), v->htotal[k] / v->pixel_clock[k] - v->time_for_meta_pte_with_immediate_flip, v->extra_latency, 2.0 * v->urgent_latency); - } - else { - v->time_for_meta_and_dpte_row_with_immediate_flip =dcn_bw_max2(v->htotal[k] / v->pixel_clock[k] - v->time_for_meta_pte_with_immediate_flip, v->extra_latency - v->time_for_meta_pte_with_immediate_flip); - } - v->lines_for_meta_pte_with_immediate_flip[k] =dcn_bw_floor2(4.0 * (v->time_for_meta_pte_with_immediate_flip / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4; - v->lines_for_meta_and_dpte_row_with_immediate_flip[k] =dcn_bw_floor2(4.0 * (v->time_for_meta_and_dpte_row_with_immediate_flip / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4; - v->line_times_to_request_prefetch_pixel_data_with_immediate_flip = v->line_times_for_prefetch[k] - v->lines_for_meta_pte_with_immediate_flip[k] - v->lines_for_meta_and_dpte_row_with_immediate_flip[k]; - v->line_times_to_request_prefetch_pixel_data_without_immediate_flip = v->line_times_for_prefetch[k] - v->lines_for_meta_pte_without_immediate_flip[k] - v->lines_for_meta_and_dpte_row_without_immediate_flip[k]; - if (v->line_times_to_request_prefetch_pixel_data_with_immediate_flip > 0.0) { - v->v_ratio_pre_ywith_immediate_flip[i][j][k] = v->prefetch_lines_y[k] / v->line_times_to_request_prefetch_pixel_data_with_immediate_flip; - if ((v->swath_height_yper_state[i][j][k] > 4.0)) { - if (v->line_times_to_request_prefetch_pixel_data_with_immediate_flip - (v->prefill_y[k] - 3.0) / 2.0 > 0.0) { - v->v_ratio_pre_ywith_immediate_flip[i][j][k] =dcn_bw_max2(v->v_ratio_pre_ywith_immediate_flip[i][j][k], (v->max_num_sw_y[k] * v->swath_height_yper_state[i][j][k]) / (v->line_times_to_request_prefetch_pixel_data_with_immediate_flip - (v->prefill_y[k] - 3.0) / 2.0)); - } - else { - v->v_ratio_pre_ywith_immediate_flip[i][j][k] = 999999.0; - } - } - v->v_ratio_pre_cwith_immediate_flip[i][j][k] = v->prefetch_lines_c[k] / v->line_times_to_request_prefetch_pixel_data_with_immediate_flip; - if ((v->swath_height_cper_state[i][j][k] > 4.0)) { - if (v->line_times_to_request_prefetch_pixel_data_with_immediate_flip - (v->prefill_c[k] - 3.0) / 2.0 > 0.0) { - v->v_ratio_pre_cwith_immediate_flip[i][j][k] =dcn_bw_max2(v->v_ratio_pre_cwith_immediate_flip[i][j][k], (v->max_num_sw_c[k] * v->swath_height_cper_state[i][j][k]) / (v->line_times_to_request_prefetch_pixel_data_with_immediate_flip - (v->prefill_c[k] - 3.0) / 2.0)); - } - else { - v->v_ratio_pre_cwith_immediate_flip[i][j][k] = 999999.0; - } - } - v->required_prefetch_pixel_data_bw_with_immediate_flip[i][j][k] = v->no_of_dpp[i][j][k] * (v->prefetch_lines_y[k] / v->line_times_to_request_prefetch_pixel_data_with_immediate_flip *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) + v->prefetch_lines_c[k] / v->line_times_to_request_prefetch_pixel_data_with_immediate_flip *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 2.0) * v->swath_width_yper_state[i][j][k] / (v->htotal[k] / v->pixel_clock[k]); - } - else { - v->v_ratio_pre_ywith_immediate_flip[i][j][k] = 999999.0; - v->v_ratio_pre_cwith_immediate_flip[i][j][k] = 999999.0; - v->required_prefetch_pixel_data_bw_with_immediate_flip[i][j][k] = 999999.0; - } - if (v->line_times_to_request_prefetch_pixel_data_without_immediate_flip > 0.0) { - v->v_ratio_pre_ywithout_immediate_flip[i][j][k] = v->prefetch_lines_y[k] / v->line_times_to_request_prefetch_pixel_data_without_immediate_flip; - if ((v->swath_height_yper_state[i][j][k] > 4.0)) { - if (v->line_times_to_request_prefetch_pixel_data_without_immediate_flip - (v->prefill_y[k] - 3.0) / 2.0 > 0.0) { - v->v_ratio_pre_ywithout_immediate_flip[i][j][k] =dcn_bw_max2(v->v_ratio_pre_ywithout_immediate_flip[i][j][k], (v->max_num_sw_y[k] * v->swath_height_yper_state[i][j][k]) / (v->line_times_to_request_prefetch_pixel_data_without_immediate_flip - (v->prefill_y[k] - 3.0) / 2.0)); - } - else { - v->v_ratio_pre_ywithout_immediate_flip[i][j][k] = 999999.0; - } - } - v->v_ratio_pre_cwithout_immediate_flip[i][j][k] = v->prefetch_lines_c[k] / v->line_times_to_request_prefetch_pixel_data_without_immediate_flip; - if ((v->swath_height_cper_state[i][j][k] > 4.0)) { - if (v->line_times_to_request_prefetch_pixel_data_without_immediate_flip - (v->prefill_c[k] - 3.0) / 2.0 > 0.0) { - v->v_ratio_pre_cwithout_immediate_flip[i][j][k] =dcn_bw_max2(v->v_ratio_pre_cwithout_immediate_flip[i][j][k], (v->max_num_sw_c[k] * v->swath_height_cper_state[i][j][k]) / (v->line_times_to_request_prefetch_pixel_data_without_immediate_flip - (v->prefill_c[k] - 3.0) / 2.0)); - } - else { - v->v_ratio_pre_cwithout_immediate_flip[i][j][k] = 999999.0; - } - } - v->required_prefetch_pixel_data_bw_without_immediate_flip[i][j][k] = v->no_of_dpp[i][j][k] * (v->prefetch_lines_y[k] / v->line_times_to_request_prefetch_pixel_data_without_immediate_flip *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) + v->prefetch_lines_c[k] / v->line_times_to_request_prefetch_pixel_data_without_immediate_flip *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 2.0) * v->swath_width_yper_state[i][j][k] / (v->htotal[k] / v->pixel_clock[k]); - } - else { - v->v_ratio_pre_ywithout_immediate_flip[i][j][k] = 999999.0; - v->v_ratio_pre_cwithout_immediate_flip[i][j][k] = 999999.0; - v->required_prefetch_pixel_data_bw_without_immediate_flip[i][j][k] = 999999.0; - } - } - v->maximum_read_bandwidth_with_prefetch_with_immediate_flip = 0.0; - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - if ((v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) { - v->maximum_read_bandwidth_with_prefetch_with_immediate_flip = v->maximum_read_bandwidth_with_prefetch_with_immediate_flip +dcn_bw_max2(v->read_bandwidth[k], v->required_prefetch_pixel_data_bw_with_immediate_flip[i][j][k]) +dcn_bw_max2(v->meta_pte_bytes_per_frame[k] / (v->lines_for_meta_pte_with_immediate_flip[k] * v->htotal[k] / v->pixel_clock[k]), (v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]) / (v->lines_for_meta_and_dpte_row_with_immediate_flip[k] * v->htotal[k] / v->pixel_clock[k])); - } - else { - v->maximum_read_bandwidth_with_prefetch_with_immediate_flip = v->maximum_read_bandwidth_with_prefetch_with_immediate_flip +dcn_bw_max2(v->read_bandwidth[k], v->required_prefetch_pixel_data_bw_without_immediate_flip[i][j][k]); - } - } - v->maximum_read_bandwidth_with_prefetch_without_immediate_flip = 0.0; - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - v->maximum_read_bandwidth_with_prefetch_without_immediate_flip = v->maximum_read_bandwidth_with_prefetch_without_immediate_flip +dcn_bw_max2(v->read_bandwidth[k], v->required_prefetch_pixel_data_bw_without_immediate_flip[i][j][k]); - } - v->prefetch_supported_with_immediate_flip[i][j] = dcn_bw_yes; - if (v->maximum_read_bandwidth_with_prefetch_with_immediate_flip > v->return_bw_per_state[i]) { - v->prefetch_supported_with_immediate_flip[i][j] = dcn_bw_no; - } - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - if (v->line_times_for_prefetch[k] < 2.0 || v->lines_for_meta_pte_with_immediate_flip[k] >= 8.0 || v->lines_for_meta_and_dpte_row_with_immediate_flip[k] >= 16.0) { - v->prefetch_supported_with_immediate_flip[i][j] = dcn_bw_no; - } - } - v->prefetch_supported_without_immediate_flip[i][j] = dcn_bw_yes; - if (v->maximum_read_bandwidth_with_prefetch_without_immediate_flip > v->return_bw_per_state[i]) { - v->prefetch_supported_without_immediate_flip[i][j] = dcn_bw_no; - } - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - if (v->line_times_for_prefetch[k] < 2.0 || v->lines_for_meta_pte_without_immediate_flip[k] >= 8.0 || v->lines_for_meta_and_dpte_row_without_immediate_flip[k] >= 16.0) { - v->prefetch_supported_without_immediate_flip[i][j] = dcn_bw_no; - } - } - } - } - for (i = 0; i <= number_of_states_plus_one; i++) { - for (j = 0; j <= 1; j++) { - v->v_ratio_in_prefetch_supported_with_immediate_flip[i][j] = dcn_bw_yes; - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - if ((((v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10) && (v->v_ratio_pre_ywith_immediate_flip[i][j][k] > 4.0 || v->v_ratio_pre_cwith_immediate_flip[i][j][k] > 4.0)) || ((v->source_pixel_format[k] == dcn_bw_yuv420_sub_8 || v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) && (v->v_ratio_pre_ywithout_immediate_flip[i][j][k] > 4.0 || v->v_ratio_pre_cwithout_immediate_flip[i][j][k] > 4.0)))) { - v->v_ratio_in_prefetch_supported_with_immediate_flip[i][j] = dcn_bw_no; - } - } - v->v_ratio_in_prefetch_supported_without_immediate_flip[i][j] = dcn_bw_yes; - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - if ((v->v_ratio_pre_ywithout_immediate_flip[i][j][k] > 4.0 || v->v_ratio_pre_cwithout_immediate_flip[i][j][k] > 4.0)) { - v->v_ratio_in_prefetch_supported_without_immediate_flip[i][j] = dcn_bw_no; - } - } - } - } - /*mode support, voltage state and soc configuration*/ - - for (i = number_of_states_plus_one; i >= 0; i--) { - for (j = 0; j <= 1; j++) { - if (v->scale_ratio_support == dcn_bw_yes && v->source_format_pixel_and_scan_support == dcn_bw_yes && v->viewport_size_support == dcn_bw_yes && v->bandwidth_support[i] == dcn_bw_yes && v->dio_support[i] == dcn_bw_yes && v->urgent_latency_support[i][j] == dcn_bw_yes && v->rob_support[i] == dcn_bw_yes && v->dispclk_dppclk_support[i][j] == dcn_bw_yes && v->total_available_pipes_support[i][j] == dcn_bw_yes && v->total_available_writeback_support == dcn_bw_yes && v->writeback_latency_support == dcn_bw_yes) { - if (v->prefetch_supported_with_immediate_flip[i][j] == dcn_bw_yes && v->v_ratio_in_prefetch_supported_with_immediate_flip[i][j] == dcn_bw_yes) { - v->mode_support_with_immediate_flip[i][j] = dcn_bw_yes; - } - else { - v->mode_support_with_immediate_flip[i][j] = dcn_bw_no; - } - if (v->prefetch_supported_without_immediate_flip[i][j] == dcn_bw_yes && v->v_ratio_in_prefetch_supported_without_immediate_flip[i][j] == dcn_bw_yes) { - v->mode_support_without_immediate_flip[i][j] = dcn_bw_yes; - } - else { - v->mode_support_without_immediate_flip[i][j] = dcn_bw_no; - } - } - else { - v->mode_support_with_immediate_flip[i][j] = dcn_bw_no; - v->mode_support_without_immediate_flip[i][j] = dcn_bw_no; - } - } - } - for (i = number_of_states_plus_one; i >= 0; i--) { - if ((i == number_of_states_plus_one || v->mode_support_with_immediate_flip[i][1] == dcn_bw_yes || v->mode_support_with_immediate_flip[i][0] == dcn_bw_yes) && i >= v->voltage_override_level) { - v->voltage_level_with_immediate_flip = i; - } - } - for (i = number_of_states_plus_one; i >= 0; i--) { - if ((i == number_of_states_plus_one || v->mode_support_without_immediate_flip[i][1] == dcn_bw_yes || v->mode_support_without_immediate_flip[i][0] == dcn_bw_yes) && i >= v->voltage_override_level) { - v->voltage_level_without_immediate_flip = i; - } - } - if (v->voltage_level_with_immediate_flip == number_of_states_plus_one) { - v->immediate_flip_supported = dcn_bw_no; - v->voltage_level = v->voltage_level_without_immediate_flip; - } - else { - v->immediate_flip_supported = dcn_bw_yes; - v->voltage_level = v->voltage_level_with_immediate_flip; - } - v->dcfclk = v->dcfclk_per_state[v->voltage_level]; - v->fabric_and_dram_bandwidth = v->fabric_and_dram_bandwidth_per_state[v->voltage_level]; - for (j = 0; j <= 1; j++) { - v->required_dispclk_per_ratio[j] = v->required_dispclk[v->voltage_level][j]; - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - v->dpp_per_plane_per_ratio[j][k] = v->no_of_dpp[v->voltage_level][j][k]; - } - v->dispclk_dppclk_support_per_ratio[j] = v->dispclk_dppclk_support[v->voltage_level][j]; - } - v->max_phyclk = v->phyclk_per_state[v->voltage_level]; -} -void display_pipe_configuration(struct dcn_bw_internal_vars *v) -{ - int j; - int k; - /*display pipe configuration*/ - - for (j = 0; j <= 1; j++) { - v->total_number_of_active_dpp_per_ratio[j] = 0.0; - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - v->total_number_of_active_dpp_per_ratio[j] = v->total_number_of_active_dpp_per_ratio[j] + v->dpp_per_plane_per_ratio[j][k]; - } - } - if ((v->dispclk_dppclk_support_per_ratio[0] == dcn_bw_yes && v->dispclk_dppclk_support_per_ratio[1] == dcn_bw_no) || (v->dispclk_dppclk_support_per_ratio[0] == v->dispclk_dppclk_support_per_ratio[1] && (v->total_number_of_active_dpp_per_ratio[0] < v->total_number_of_active_dpp_per_ratio[1] || (((v->total_number_of_active_dpp_per_ratio[0] == v->total_number_of_active_dpp_per_ratio[1]) && v->required_dispclk_per_ratio[0] <= 0.5 * v->required_dispclk_per_ratio[1]))))) { - v->dispclk_dppclk_ratio = 1; - v->final_error_message = v->error_message[0]; - } - else { - v->dispclk_dppclk_ratio = 2; - v->final_error_message = v->error_message[1]; - } - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - v->dpp_per_plane[k] = v->dpp_per_plane_per_ratio[v->dispclk_dppclk_ratio - 1][k]; - } - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) { - v->byte_per_pix_dety = 8.0; - v->byte_per_pix_detc = 0.0; - } - else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_32) { - v->byte_per_pix_dety = 4.0; - v->byte_per_pix_detc = 0.0; - } - else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_16) { - v->byte_per_pix_dety = 2.0; - v->byte_per_pix_detc = 0.0; - } - else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) { - v->byte_per_pix_dety = 1.0; - v->byte_per_pix_detc = 2.0; - } - else { - v->byte_per_pix_dety = 4.0f / 3.0f; - v->byte_per_pix_detc = 8.0f / 3.0f; - } - if ((v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32 || v->source_pixel_format[k] == dcn_bw_rgb_sub_16)) { - if (v->source_surface_mode[k] == dcn_bw_sw_linear) { - v->read256_bytes_block_height_y = 1.0; - } - else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) { - v->read256_bytes_block_height_y = 4.0; - } - else { - v->read256_bytes_block_height_y = 8.0; - } - v->read256_bytes_block_width_y = 256.0 /dcn_bw_ceil2(v->byte_per_pix_dety, 1.0) / v->read256_bytes_block_height_y; - v->read256_bytes_block_height_c = 0.0; - v->read256_bytes_block_width_c = 0.0; - } - else { - if (v->source_surface_mode[k] == dcn_bw_sw_linear) { - v->read256_bytes_block_height_y = 1.0; - v->read256_bytes_block_height_c = 1.0; - } - else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) { - v->read256_bytes_block_height_y = 16.0; - v->read256_bytes_block_height_c = 8.0; - } - else { - v->read256_bytes_block_height_y = 8.0; - v->read256_bytes_block_height_c = 8.0; - } - v->read256_bytes_block_width_y = 256.0 /dcn_bw_ceil2(v->byte_per_pix_dety, 1.0) / v->read256_bytes_block_height_y; - v->read256_bytes_block_width_c = 256.0 /dcn_bw_ceil2(v->byte_per_pix_detc, 2.0) / v->read256_bytes_block_height_c; - } - if (v->source_scan[k] == dcn_bw_hor) { - v->maximum_swath_height_y = v->read256_bytes_block_height_y; - v->maximum_swath_height_c = v->read256_bytes_block_height_c; - } - else { - v->maximum_swath_height_y = v->read256_bytes_block_width_y; - v->maximum_swath_height_c = v->read256_bytes_block_width_c; - } - if ((v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32 || v->source_pixel_format[k] == dcn_bw_rgb_sub_16)) { - if (v->source_surface_mode[k] == dcn_bw_sw_linear || (v->source_pixel_format[k] == dcn_bw_rgb_sub_64 && (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_var_s || v->source_surface_mode[k] == dcn_bw_sw_var_s_x) && v->source_scan[k] == dcn_bw_hor)) { - v->minimum_swath_height_y = v->maximum_swath_height_y; - } - else { - v->minimum_swath_height_y = v->maximum_swath_height_y / 2.0; - } - v->minimum_swath_height_c = v->maximum_swath_height_c; - } - else { - if (v->source_surface_mode[k] == dcn_bw_sw_linear) { - v->minimum_swath_height_y = v->maximum_swath_height_y; - v->minimum_swath_height_c = v->maximum_swath_height_c; - } - else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8 && v->source_scan[k] == dcn_bw_hor) { - v->minimum_swath_height_y = v->maximum_swath_height_y / 2.0; - if (v->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes) { - v->minimum_swath_height_c = v->maximum_swath_height_c; - } - else { - v->minimum_swath_height_c = v->maximum_swath_height_c / 2.0; - } - } - else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10 && v->source_scan[k] == dcn_bw_hor) { - v->minimum_swath_height_c = v->maximum_swath_height_c / 2.0; - if (v->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes) { - v->minimum_swath_height_y = v->maximum_swath_height_y; - } - else { - v->minimum_swath_height_y = v->maximum_swath_height_y / 2.0; - } - } - else { - v->minimum_swath_height_y = v->maximum_swath_height_y; - v->minimum_swath_height_c = v->maximum_swath_height_c; - } - } - if (v->source_scan[k] == dcn_bw_hor) { - v->swath_width = v->viewport_width[k] / v->dpp_per_plane[k]; - } - else { - v->swath_width = v->viewport_height[k] / v->dpp_per_plane[k]; - } - v->swath_width_granularity_y = 256.0 /dcn_bw_ceil2(v->byte_per_pix_dety, 1.0) / v->maximum_swath_height_y; - v->rounded_up_max_swath_size_bytes_y = (dcn_bw_ceil2(v->swath_width - 1.0, v->swath_width_granularity_y) + v->swath_width_granularity_y) * v->byte_per_pix_dety * v->maximum_swath_height_y; - if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) { - v->rounded_up_max_swath_size_bytes_y =dcn_bw_ceil2(v->rounded_up_max_swath_size_bytes_y, 256.0) + 256; - } - if (v->maximum_swath_height_c > 0.0) { - v->swath_width_granularity_c = 256.0 /dcn_bw_ceil2(v->byte_per_pix_detc, 2.0) / v->maximum_swath_height_c; - } - v->rounded_up_max_swath_size_bytes_c = (dcn_bw_ceil2(v->swath_width / 2.0 - 1.0, v->swath_width_granularity_c) + v->swath_width_granularity_c) * v->byte_per_pix_detc * v->maximum_swath_height_c; - if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) { - v->rounded_up_max_swath_size_bytes_c =dcn_bw_ceil2(v->rounded_up_max_swath_size_bytes_c, 256.0) + 256; - } - if (v->rounded_up_max_swath_size_bytes_y + v->rounded_up_max_swath_size_bytes_c <= v->det_buffer_size_in_kbyte * 1024.0 / 2.0) { - v->swath_height_y[k] = v->maximum_swath_height_y; - v->swath_height_c[k] = v->maximum_swath_height_c; - } - else { - v->swath_height_y[k] = v->minimum_swath_height_y; - v->swath_height_c[k] = v->minimum_swath_height_c; - } - if (v->swath_height_c[k] == 0.0) { - v->det_buffer_size_y[k] = v->det_buffer_size_in_kbyte * 1024.0; - v->det_buffer_size_c[k] = 0.0; - } - else if (v->swath_height_y[k] <= v->swath_height_c[k]) { - v->det_buffer_size_y[k] = v->det_buffer_size_in_kbyte * 1024.0 / 2.0; - v->det_buffer_size_c[k] = v->det_buffer_size_in_kbyte * 1024.0 / 2.0; - } - else { - v->det_buffer_size_y[k] = v->det_buffer_size_in_kbyte * 1024.0 * 2.0 / 3.0; - v->det_buffer_size_c[k] = v->det_buffer_size_in_kbyte * 1024.0 / 3.0; - } - } -} -void dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(struct dcn_bw_internal_vars *v) -{ - int k; - /*dispclk and dppclk calculation*/ - - v->dispclk_with_ramping = 0.0; - v->dispclk_without_ramping = 0.0; - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - if (v->h_ratio[k] > 1.0) { - v->pscl_throughput[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput * v->h_ratio[k] /dcn_bw_ceil2(v->htaps[k] / 6.0, 1.0)); - } - else { - v->pscl_throughput[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput); - } - v->dppclk_using_single_dpp_luma = v->pixel_clock[k] *dcn_bw_max3(v->vtaps[k] / 6.0 *dcn_bw_min2(1.0, v->h_ratio[k]), v->h_ratio[k] * v->v_ratio[k] / v->pscl_throughput[k], 1.0); - if ((v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) { - v->pscl_throughput_chroma[k] = 0.0; - v->dppclk_using_single_dpp = v->dppclk_using_single_dpp_luma; - } - else { - if (v->h_ratio[k] > 1.0) { - v->pscl_throughput_chroma[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput * v->h_ratio[k] / 2.0 /dcn_bw_ceil2(v->hta_pschroma[k] / 6.0, 1.0)); - } - else { - v->pscl_throughput_chroma[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput); - } - v->dppclk_using_single_dpp_chroma = v->pixel_clock[k] *dcn_bw_max3(v->vta_pschroma[k] / 6.0 *dcn_bw_min2(1.0, v->h_ratio[k] / 2.0), v->h_ratio[k] * v->v_ratio[k] / 4.0 / v->pscl_throughput_chroma[k], 1.0); - v->dppclk_using_single_dpp =dcn_bw_max2(v->dppclk_using_single_dpp_luma, v->dppclk_using_single_dpp_chroma); - } - if (v->odm_capable == dcn_bw_yes) { - v->dispclk_with_ramping =dcn_bw_max2(v->dispclk_with_ramping,dcn_bw_max2(v->dppclk_using_single_dpp / v->dpp_per_plane[k] * v->dispclk_dppclk_ratio, v->pixel_clock[k] / v->dpp_per_plane[k]) * (1.0 + v->downspreading / 100.0) * (1.0 + v->dispclk_ramping_margin / 100.0)); - v->dispclk_without_ramping =dcn_bw_max2(v->dispclk_without_ramping,dcn_bw_max2(v->dppclk_using_single_dpp / v->dpp_per_plane[k] * v->dispclk_dppclk_ratio, v->pixel_clock[k] / v->dpp_per_plane[k]) * (1.0 + v->downspreading / 100.0)); - } - else { - v->dispclk_with_ramping =dcn_bw_max2(v->dispclk_with_ramping,dcn_bw_max2(v->dppclk_using_single_dpp / v->dpp_per_plane[k] * v->dispclk_dppclk_ratio, v->pixel_clock[k]) * (1.0 + v->downspreading / 100.0) * (1.0 + v->dispclk_ramping_margin / 100.0)); - v->dispclk_without_ramping =dcn_bw_max2(v->dispclk_without_ramping,dcn_bw_max2(v->dppclk_using_single_dpp / v->dpp_per_plane[k] * v->dispclk_dppclk_ratio, v->pixel_clock[k]) * (1.0 + v->downspreading / 100.0)); - } - } - if (v->dispclk_without_ramping > v->max_dispclk[number_of_states]) { - v->dispclk = v->dispclk_without_ramping; - } - else if (v->dispclk_with_ramping > v->max_dispclk[number_of_states]) { - v->dispclk = v->max_dispclk[number_of_states]; - } - else { - v->dispclk = v->dispclk_with_ramping; - } - v->dppclk = v->dispclk / v->dispclk_dppclk_ratio; - /*urgent watermark*/ - - v->return_bandwidth_to_dcn =dcn_bw_min2(v->return_bus_width * v->dcfclk, v->fabric_and_dram_bandwidth * 1000.0 * v->percent_of_ideal_drambw_received_after_urg_latency / 100.0); - v->dcc_enabled_any_plane = dcn_bw_no; - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - if (v->dcc_enable[k] == dcn_bw_yes) { - v->dcc_enabled_any_plane = dcn_bw_yes; - } - } - v->return_bw = v->return_bandwidth_to_dcn; - if (v->dcc_enabled_any_plane == dcn_bw_yes && v->return_bandwidth_to_dcn > v->dcfclk * v->return_bus_width / 4.0) { - v->return_bw =dcn_bw_min2(v->return_bw, v->return_bandwidth_to_dcn * 4.0 * (1.0 - v->urgent_latency / ((v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 / (v->return_bandwidth_to_dcn - v->dcfclk * v->return_bus_width / 4.0) + v->urgent_latency))); - } - v->critical_compression = 2.0 * v->return_bus_width * v->dcfclk * v->urgent_latency / (v->return_bandwidth_to_dcn * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0); - if (v->dcc_enabled_any_plane == dcn_bw_yes && v->critical_compression > 1.0 && v->critical_compression < 4.0) { - v->return_bw =dcn_bw_min2(v->return_bw, dcn_bw_pow(4.0 * v->return_bandwidth_to_dcn * (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 * v->return_bus_width * v->dcfclk * v->urgent_latency / (v->return_bandwidth_to_dcn * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0), 2)); - } - v->return_bandwidth_to_dcn =dcn_bw_min2(v->return_bus_width * v->dcfclk, v->fabric_and_dram_bandwidth * 1000.0); - if (v->dcc_enabled_any_plane == dcn_bw_yes && v->return_bandwidth_to_dcn > v->dcfclk * v->return_bus_width / 4.0) { - v->return_bw =dcn_bw_min2(v->return_bw, v->return_bandwidth_to_dcn * 4.0 * (1.0 - v->urgent_latency / ((v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 / (v->return_bandwidth_to_dcn - v->dcfclk * v->return_bus_width / 4.0) + v->urgent_latency))); - } - v->critical_compression = 2.0 * v->return_bus_width * v->dcfclk * v->urgent_latency / (v->return_bandwidth_to_dcn * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0); - if (v->dcc_enabled_any_plane == dcn_bw_yes && v->critical_compression > 1.0 && v->critical_compression < 4.0) { - v->return_bw =dcn_bw_min2(v->return_bw, dcn_bw_pow(4.0 * v->return_bandwidth_to_dcn * (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 * v->return_bus_width * v->dcfclk * v->urgent_latency / (v->return_bandwidth_to_dcn * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0), 2)); - } - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - if (v->source_scan[k] == dcn_bw_hor) { - v->swath_width_y[k] = v->viewport_width[k] / v->dpp_per_plane[k]; - } - else { - v->swath_width_y[k] = v->viewport_height[k] / v->dpp_per_plane[k]; - } - } - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) { - v->byte_per_pixel_dety[k] = 8.0; - v->byte_per_pixel_detc[k] = 0.0; - } - else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_32) { - v->byte_per_pixel_dety[k] = 4.0; - v->byte_per_pixel_detc[k] = 0.0; - } - else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_16) { - v->byte_per_pixel_dety[k] = 2.0; - v->byte_per_pixel_detc[k] = 0.0; - } - else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) { - v->byte_per_pixel_dety[k] = 1.0; - v->byte_per_pixel_detc[k] = 2.0; - } - else { - v->byte_per_pixel_dety[k] = 4.0f / 3.0f; - v->byte_per_pixel_detc[k] = 8.0f / 3.0f; - } - } - v->total_data_read_bandwidth = 0.0; - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - v->read_bandwidth_plane_luma[k] = v->swath_width_y[k] * v->dpp_per_plane[k] *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / (v->htotal[k] / v->pixel_clock[k]) * v->v_ratio[k]; - v->read_bandwidth_plane_chroma[k] = v->swath_width_y[k] / 2.0 * v->dpp_per_plane[k] *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / (v->htotal[k] / v->pixel_clock[k]) * v->v_ratio[k] / 2.0; - v->total_data_read_bandwidth = v->total_data_read_bandwidth + v->read_bandwidth_plane_luma[k] + v->read_bandwidth_plane_chroma[k]; - } - v->total_active_dpp = 0.0; - v->total_dcc_active_dpp = 0.0; - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - v->total_active_dpp = v->total_active_dpp + v->dpp_per_plane[k]; - if (v->dcc_enable[k] == dcn_bw_yes) { - v->total_dcc_active_dpp = v->total_dcc_active_dpp + v->dpp_per_plane[k]; - } - } - v->urgent_round_trip_and_out_of_order_latency = (v->round_trip_ping_latency_cycles + 32.0) / v->dcfclk + v->urgent_out_of_order_return_per_channel * v->number_of_channels / v->return_bw; - v->last_pixel_of_line_extra_watermark = 0.0; - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - if (v->v_ratio[k] <= 1.0) { - v->display_pipe_line_delivery_time_luma[k] = v->swath_width_y[k] * v->dpp_per_plane[k] / v->h_ratio[k] / v->pixel_clock[k]; - } - else { - v->display_pipe_line_delivery_time_luma[k] = v->swath_width_y[k] / v->pscl_throughput[k] / v->dppclk; - } - v->data_fabric_line_delivery_time_luma = v->swath_width_y[k] * v->swath_height_y[k] *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / (v->return_bw * v->read_bandwidth_plane_luma[k] / v->dpp_per_plane[k] / v->total_data_read_bandwidth); - v->last_pixel_of_line_extra_watermark =dcn_bw_max2(v->last_pixel_of_line_extra_watermark, v->data_fabric_line_delivery_time_luma - v->display_pipe_line_delivery_time_luma[k]); - if (v->byte_per_pixel_detc[k] == 0.0) { - v->display_pipe_line_delivery_time_chroma[k] = 0.0; - } - else { - if (v->v_ratio[k] / 2.0 <= 1.0) { - v->display_pipe_line_delivery_time_chroma[k] = v->swath_width_y[k] / 2.0 * v->dpp_per_plane[k] / (v->h_ratio[k] / 2.0) / v->pixel_clock[k]; - } - else { - v->display_pipe_line_delivery_time_chroma[k] = v->swath_width_y[k] / 2.0 / v->pscl_throughput_chroma[k] / v->dppclk; - } - v->data_fabric_line_delivery_time_chroma = v->swath_width_y[k] / 2.0 * v->swath_height_c[k] *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / (v->return_bw * v->read_bandwidth_plane_chroma[k] / v->dpp_per_plane[k] / v->total_data_read_bandwidth); - v->last_pixel_of_line_extra_watermark =dcn_bw_max2(v->last_pixel_of_line_extra_watermark, v->data_fabric_line_delivery_time_chroma - v->display_pipe_line_delivery_time_chroma[k]); - } - } - v->urgent_extra_latency = v->urgent_round_trip_and_out_of_order_latency + (v->total_active_dpp * v->pixel_chunk_size_in_kbyte + v->total_dcc_active_dpp * v->meta_chunk_size) * 1024.0 / v->return_bw; - if (v->pte_enable == dcn_bw_yes) { - v->urgent_extra_latency = v->urgent_extra_latency + v->total_active_dpp * v->pte_chunk_size * 1024.0 / v->return_bw; - } - v->urgent_watermark = v->urgent_latency + v->last_pixel_of_line_extra_watermark + v->urgent_extra_latency; - v->ptemeta_urgent_watermark = v->urgent_watermark + 2.0 * v->urgent_latency; - /*nb p-state/dram clock change watermark*/ - - v->dram_clock_change_watermark = v->dram_clock_change_latency + v->urgent_watermark; - v->total_active_writeback = 0.0; - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - if (v->output[k] == dcn_bw_writeback) { - v->total_active_writeback = v->total_active_writeback + 1.0; - } - } - if (v->total_active_writeback <= 1.0) { - v->writeback_dram_clock_change_watermark = v->dram_clock_change_latency + v->write_back_latency; - } - else { - v->writeback_dram_clock_change_watermark = v->dram_clock_change_latency + v->write_back_latency + v->writeback_chunk_size * 1024.0 / 32.0 / v->socclk; - } - /*stutter efficiency*/ - - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - v->lines_in_dety[k] = v->det_buffer_size_y[k] / v->byte_per_pixel_dety[k] / v->swath_width_y[k]; - v->lines_in_dety_rounded_down_to_swath[k] =dcn_bw_floor2(v->lines_in_dety[k], v->swath_height_y[k]); - v->full_det_buffering_time_y[k] = v->lines_in_dety_rounded_down_to_swath[k] * (v->htotal[k] / v->pixel_clock[k]) / v->v_ratio[k]; - if (v->byte_per_pixel_detc[k] > 0.0) { - v->lines_in_detc[k] = v->det_buffer_size_c[k] / v->byte_per_pixel_detc[k] / (v->swath_width_y[k] / 2.0); - v->lines_in_detc_rounded_down_to_swath[k] =dcn_bw_floor2(v->lines_in_detc[k], v->swath_height_c[k]); - v->full_det_buffering_time_c[k] = v->lines_in_detc_rounded_down_to_swath[k] * (v->htotal[k] / v->pixel_clock[k]) / (v->v_ratio[k] / 2.0); - } - else { - v->lines_in_detc[k] = 0.0; - v->lines_in_detc_rounded_down_to_swath[k] = 0.0; - v->full_det_buffering_time_c[k] = 999999.0; - } - } - v->min_full_det_buffering_time = 999999.0; - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - if (v->full_det_buffering_time_y[k] < v->min_full_det_buffering_time) { - v->min_full_det_buffering_time = v->full_det_buffering_time_y[k]; - v->frame_time_for_min_full_det_buffering_time = v->vtotal[k] * v->htotal[k] / v->pixel_clock[k]; - } - if (v->full_det_buffering_time_c[k] < v->min_full_det_buffering_time) { - v->min_full_det_buffering_time = v->full_det_buffering_time_c[k]; - v->frame_time_for_min_full_det_buffering_time = v->vtotal[k] * v->htotal[k] / v->pixel_clock[k]; - } - } - v->average_read_bandwidth_gbyte_per_second = 0.0; - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - if (v->dcc_enable[k] == dcn_bw_yes) { - v->average_read_bandwidth_gbyte_per_second = v->average_read_bandwidth_gbyte_per_second + v->read_bandwidth_plane_luma[k] / v->dcc_rate[k] / 1000.0 + v->read_bandwidth_plane_chroma[k] / v->dcc_rate[k] / 1000.0; - } - else { - v->average_read_bandwidth_gbyte_per_second = v->average_read_bandwidth_gbyte_per_second + v->read_bandwidth_plane_luma[k] / 1000.0 + v->read_bandwidth_plane_chroma[k] / 1000.0; - } - if (v->dcc_enable[k] == dcn_bw_yes) { - v->average_read_bandwidth_gbyte_per_second = v->average_read_bandwidth_gbyte_per_second + v->read_bandwidth_plane_luma[k] / 1000.0 / 256.0 + v->read_bandwidth_plane_chroma[k] / 1000.0 / 256.0; - } - if (v->pte_enable == dcn_bw_yes) { - v->average_read_bandwidth_gbyte_per_second = v->average_read_bandwidth_gbyte_per_second + v->read_bandwidth_plane_luma[k] / 1000.0 / 512.0 + v->read_bandwidth_plane_chroma[k] / 1000.0 / 512.0; - } - } - v->part_of_burst_that_fits_in_rob =dcn_bw_min2(v->min_full_det_buffering_time * v->total_data_read_bandwidth, v->rob_buffer_size_in_kbyte * 1024.0 * v->total_data_read_bandwidth / (v->average_read_bandwidth_gbyte_per_second * 1000.0)); - v->stutter_burst_time = v->part_of_burst_that_fits_in_rob * (v->average_read_bandwidth_gbyte_per_second * 1000.0) / v->total_data_read_bandwidth / v->return_bw + (v->min_full_det_buffering_time * v->total_data_read_bandwidth - v->part_of_burst_that_fits_in_rob) / (v->dcfclk * 64.0); - if (v->total_active_writeback == 0.0) { - v->stutter_efficiency_not_including_vblank = (1.0 - (v->sr_exit_time + v->stutter_burst_time) / v->min_full_det_buffering_time) * 100.0; - } - else { - v->stutter_efficiency_not_including_vblank = 0.0; - } - v->smallest_vblank = 999999.0; - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - if (v->synchronized_vblank == dcn_bw_yes || v->number_of_active_planes == 1) { - v->v_blank_time = (v->vtotal[k] - v->vactive[k]) * v->htotal[k] / v->pixel_clock[k]; - } - else { - v->v_blank_time = 0.0; - } - v->smallest_vblank =dcn_bw_min2(v->smallest_vblank, v->v_blank_time); - } - v->stutter_efficiency = (v->stutter_efficiency_not_including_vblank / 100.0 * (v->frame_time_for_min_full_det_buffering_time - v->smallest_vblank) + v->smallest_vblank) / v->frame_time_for_min_full_det_buffering_time * 100.0; - /*dcfclk deep sleep*/ - - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - if (v->byte_per_pixel_detc[k] > 0.0) { - v->dcfclk_deep_sleep_per_plane[k] =dcn_bw_max2(1.1 * v->swath_width_y[k] *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / 32.0 / v->display_pipe_line_delivery_time_luma[k], 1.1 * v->swath_width_y[k] / 2.0 *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / 32.0 / v->display_pipe_line_delivery_time_chroma[k]); - } - else { - v->dcfclk_deep_sleep_per_plane[k] = 1.1 * v->swath_width_y[k] *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / 64.0 / v->display_pipe_line_delivery_time_luma[k]; - } - v->dcfclk_deep_sleep_per_plane[k] =dcn_bw_max2(v->dcfclk_deep_sleep_per_plane[k], v->pixel_clock[k] / 16.0); - } - v->dcf_clk_deep_sleep = 8.0; - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - v->dcf_clk_deep_sleep =dcn_bw_max2(v->dcf_clk_deep_sleep, v->dcfclk_deep_sleep_per_plane[k]); - } - /*stutter watermark*/ - - v->stutter_exit_watermark = v->sr_exit_time + v->last_pixel_of_line_extra_watermark + v->urgent_extra_latency + 10.0 / v->dcf_clk_deep_sleep; - v->stutter_enter_plus_exit_watermark = v->sr_enter_plus_exit_time + v->last_pixel_of_line_extra_watermark + v->urgent_extra_latency; - /*urgent latency supported*/ - - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - v->effective_det_plus_lb_lines_luma =dcn_bw_floor2(v->lines_in_dety[k] +dcn_bw_min2(v->lines_in_dety[k] * v->dppclk * v->byte_per_pixel_dety[k] * v->pscl_throughput[k] / (v->return_bw / v->dpp_per_plane[k]), v->effective_lb_latency_hiding_source_lines_luma), v->swath_height_y[k]); - v->urgent_latency_support_us_luma = v->effective_det_plus_lb_lines_luma * (v->htotal[k] / v->pixel_clock[k]) / v->v_ratio[k] - v->effective_det_plus_lb_lines_luma * v->swath_width_y[k] * v->byte_per_pixel_dety[k] / (v->return_bw / v->dpp_per_plane[k]); - if (v->byte_per_pixel_detc[k] > 0.0) { - v->effective_det_plus_lb_lines_chroma =dcn_bw_floor2(v->lines_in_detc[k] +dcn_bw_min2(v->lines_in_detc[k] * v->dppclk * v->byte_per_pixel_detc[k] * v->pscl_throughput_chroma[k] / (v->return_bw / v->dpp_per_plane[k]), v->effective_lb_latency_hiding_source_lines_chroma), v->swath_height_c[k]); - v->urgent_latency_support_us_chroma = v->effective_det_plus_lb_lines_chroma * (v->htotal[k] / v->pixel_clock[k]) / (v->v_ratio[k] / 2.0) - v->effective_det_plus_lb_lines_chroma * (v->swath_width_y[k] / 2.0) * v->byte_per_pixel_detc[k] / (v->return_bw / v->dpp_per_plane[k]); - v->urgent_latency_support_us[k] =dcn_bw_min2(v->urgent_latency_support_us_luma, v->urgent_latency_support_us_chroma); - } - else { - v->urgent_latency_support_us[k] = v->urgent_latency_support_us_luma; - } - } - v->min_urgent_latency_support_us = 999999.0; - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - v->min_urgent_latency_support_us =dcn_bw_min2(v->min_urgent_latency_support_us, v->urgent_latency_support_us[k]); - } - /*non-urgent latency tolerance*/ - - v->non_urgent_latency_tolerance = v->min_urgent_latency_support_us - v->urgent_watermark; - /*prefetch*/ - - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - if ((v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32 || v->source_pixel_format[k] == dcn_bw_rgb_sub_16)) { - if (v->source_surface_mode[k] == dcn_bw_sw_linear) { - v->block_height256_bytes_y = 1.0; - } - else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) { - v->block_height256_bytes_y = 4.0; - } - else { - v->block_height256_bytes_y = 8.0; - } - v->block_height256_bytes_c = 0.0; - } - else { - if (v->source_surface_mode[k] == dcn_bw_sw_linear) { - v->block_height256_bytes_y = 1.0; - v->block_height256_bytes_c = 1.0; - } - else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) { - v->block_height256_bytes_y = 16.0; - v->block_height256_bytes_c = 8.0; - } - else { - v->block_height256_bytes_y = 8.0; - v->block_height256_bytes_c = 8.0; - } - } - if (v->dcc_enable[k] == dcn_bw_yes) { - v->meta_request_width_y = 64.0 * 256.0 /dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / (8.0 * v->block_height256_bytes_y); - v->meta_surf_width_y =dcn_bw_ceil2(v->swath_width_y[k] - 1.0, v->meta_request_width_y) + v->meta_request_width_y; - v->meta_surf_height_y =dcn_bw_ceil2(v->viewport_height[k] - 1.0, 8.0 * v->block_height256_bytes_y) + 8.0 * v->block_height256_bytes_y; - if (v->pte_enable == dcn_bw_yes) { - v->meta_pte_bytes_frame_y = (dcn_bw_ceil2((v->meta_surf_width_y * v->meta_surf_height_y *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / 256.0 - 4096.0) / 8.0 / 4096.0, 1.0) + 1) * 64.0; - } - else { - v->meta_pte_bytes_frame_y = 0.0; - } - if (v->source_scan[k] == dcn_bw_hor) { - v->meta_row_byte_y = v->meta_surf_width_y * 8.0 * v->block_height256_bytes_y *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / 256.0; - } - else { - v->meta_row_byte_y = v->meta_surf_height_y * v->meta_request_width_y *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / 256.0; - } - } - else { - v->meta_pte_bytes_frame_y = 0.0; - v->meta_row_byte_y = 0.0; - } - if (v->pte_enable == dcn_bw_yes) { - if (v->source_surface_mode[k] == dcn_bw_sw_linear) { - v->macro_tile_size_byte_y = 256.0; - v->macro_tile_height_y = 1.0; - } - else if (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d_x) { - v->macro_tile_size_byte_y = 4096.0; - v->macro_tile_height_y = 4.0 * v->block_height256_bytes_y; - } - else if (v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_x) { - v->macro_tile_size_byte_y = 64.0 * 1024; - v->macro_tile_height_y = 16.0 * v->block_height256_bytes_y; - } - else { - v->macro_tile_size_byte_y = 256.0 * 1024; - v->macro_tile_height_y = 32.0 * v->block_height256_bytes_y; - } - if (v->macro_tile_size_byte_y <= 65536.0) { - v->pixel_pte_req_height_y = v->macro_tile_height_y; - } - else { - v->pixel_pte_req_height_y = 16.0 * v->block_height256_bytes_y; - } - v->pixel_pte_req_width_y = 4096.0 /dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / v->pixel_pte_req_height_y * 8; - if (v->source_surface_mode[k] == dcn_bw_sw_linear) { - v->pixel_pte_bytes_per_row_y = 64.0 * (dcn_bw_ceil2((v->swath_width_y[k] *dcn_bw_min2(128.0, dcn_bw_pow(2.0,dcn_bw_floor2(dcn_bw_log(v->pte_buffer_size_in_requests * v->pixel_pte_req_width_y / v->swath_width_y[k], 2.0), 1.0))) - 1.0) / v->pixel_pte_req_width_y, 1.0) + 1); - } - else if (v->source_scan[k] == dcn_bw_hor) { - v->pixel_pte_bytes_per_row_y = 64.0 * (dcn_bw_ceil2((v->swath_width_y[k] - 1.0) / v->pixel_pte_req_width_y, 1.0) + 1); - } - else { - v->pixel_pte_bytes_per_row_y = 64.0 * (dcn_bw_ceil2((v->viewport_height[k] - 1.0) / v->pixel_pte_req_height_y, 1.0) + 1); - } - } - else { - v->pixel_pte_bytes_per_row_y = 0.0; - } - if ((v->source_pixel_format[k] != dcn_bw_rgb_sub_64 && v->source_pixel_format[k] != dcn_bw_rgb_sub_32 && v->source_pixel_format[k] != dcn_bw_rgb_sub_16)) { - if (v->dcc_enable[k] == dcn_bw_yes) { - v->meta_request_width_c = 64.0 * 256.0 /dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / (8.0 * v->block_height256_bytes_c); - v->meta_surf_width_c =dcn_bw_ceil2(v->swath_width_y[k] / 2.0 - 1.0, v->meta_request_width_c) + v->meta_request_width_c; - v->meta_surf_height_c =dcn_bw_ceil2(v->viewport_height[k] / 2.0 - 1.0, 8.0 * v->block_height256_bytes_c) + 8.0 * v->block_height256_bytes_c; - if (v->pte_enable == dcn_bw_yes) { - v->meta_pte_bytes_frame_c = (dcn_bw_ceil2((v->meta_surf_width_c * v->meta_surf_height_c *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / 256.0 - 4096.0) / 8.0 / 4096.0, 1.0) + 1) * 64.0; - } - else { - v->meta_pte_bytes_frame_c = 0.0; - } - if (v->source_scan[k] == dcn_bw_hor) { - v->meta_row_byte_c = v->meta_surf_width_c * 8.0 * v->block_height256_bytes_c *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / 256.0; - } - else { - v->meta_row_byte_c = v->meta_surf_height_c * v->meta_request_width_c *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / 256.0; - } - } - else { - v->meta_pte_bytes_frame_c = 0.0; - v->meta_row_byte_c = 0.0; - } - if (v->pte_enable == dcn_bw_yes) { - if (v->source_surface_mode[k] == dcn_bw_sw_linear) { - v->macro_tile_size_bytes_c = 256.0; - v->macro_tile_height_c = 1.0; - } - else if (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d_x) { - v->macro_tile_size_bytes_c = 4096.0; - v->macro_tile_height_c = 4.0 * v->block_height256_bytes_c; - } - else if (v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_x) { - v->macro_tile_size_bytes_c = 64.0 * 1024; - v->macro_tile_height_c = 16.0 * v->block_height256_bytes_c; - } - else { - v->macro_tile_size_bytes_c = 256.0 * 1024; - v->macro_tile_height_c = 32.0 * v->block_height256_bytes_c; - } - if (v->macro_tile_size_bytes_c <= 65536.0) { - v->pixel_pte_req_height_c = v->macro_tile_height_c; - } - else { - v->pixel_pte_req_height_c = 16.0 * v->block_height256_bytes_c; - } - v->pixel_pte_req_width_c = 4096.0 /dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / v->pixel_pte_req_height_c * 8; - if (v->source_surface_mode[k] == dcn_bw_sw_linear) { - v->pixel_pte_bytes_per_row_c = 64.0 * (dcn_bw_ceil2((v->swath_width_y[k] / 2.0 * dcn_bw_min2(128.0, dcn_bw_pow(2.0,dcn_bw_floor2(dcn_bw_log(v->pte_buffer_size_in_requests * v->pixel_pte_req_width_c / (v->swath_width_y[k] / 2.0), 2.0), 1.0))) - 1.0) / v->pixel_pte_req_width_c, 1.0) + 1); - } - else if (v->source_scan[k] == dcn_bw_hor) { - v->pixel_pte_bytes_per_row_c = 64.0 * (dcn_bw_ceil2((v->swath_width_y[k] / 2.0 - 1.0) / v->pixel_pte_req_width_c, 1.0) + 1); - } - else { - v->pixel_pte_bytes_per_row_c = 64.0 * (dcn_bw_ceil2((v->viewport_height[k] / 2.0 - 1.0) / v->pixel_pte_req_height_c, 1.0) + 1); - } - } - else { - v->pixel_pte_bytes_per_row_c = 0.0; - } - } - else { - v->pixel_pte_bytes_per_row_c = 0.0; - v->meta_pte_bytes_frame_c = 0.0; - v->meta_row_byte_c = 0.0; - } - v->pixel_pte_bytes_per_row[k] = v->pixel_pte_bytes_per_row_y + v->pixel_pte_bytes_per_row_c; - v->meta_pte_bytes_frame[k] = v->meta_pte_bytes_frame_y + v->meta_pte_bytes_frame_c; - v->meta_row_byte[k] = v->meta_row_byte_y + v->meta_row_byte_c; - v->v_init_pre_fill_y[k] =dcn_bw_floor2((v->v_ratio[k] + v->vtaps[k] + 1.0 + v->interlace_output[k] * 0.5 * v->v_ratio[k]) / 2.0, 1.0); - v->max_num_swath_y[k] =dcn_bw_ceil2((v->v_init_pre_fill_y[k] - 1.0) / v->swath_height_y[k], 1.0) + 1; - if (v->v_init_pre_fill_y[k] > 1.0) { - v->max_partial_swath_y =dcn_bw_mod((v->v_init_pre_fill_y[k] - 2.0), v->swath_height_y[k]); - } - else { - v->max_partial_swath_y =dcn_bw_mod((v->v_init_pre_fill_y[k] + v->swath_height_y[k] - 2.0), v->swath_height_y[k]); - } - v->max_partial_swath_y =dcn_bw_max2(1.0, v->max_partial_swath_y); - v->prefetch_source_lines_y[k] = v->max_num_swath_y[k] * v->swath_height_y[k] + v->max_partial_swath_y; - if ((v->source_pixel_format[k] != dcn_bw_rgb_sub_64 && v->source_pixel_format[k] != dcn_bw_rgb_sub_32 && v->source_pixel_format[k] != dcn_bw_rgb_sub_16)) { - v->v_init_pre_fill_c[k] =dcn_bw_floor2((v->v_ratio[k] / 2.0 + v->vtaps[k] + 1.0 + v->interlace_output[k] * 0.5 * v->v_ratio[k] / 2.0) / 2.0, 1.0); - v->max_num_swath_c[k] =dcn_bw_ceil2((v->v_init_pre_fill_c[k] - 1.0) / v->swath_height_c[k], 1.0) + 1; - if (v->v_init_pre_fill_c[k] > 1.0) { - v->max_partial_swath_c =dcn_bw_mod((v->v_init_pre_fill_c[k] - 2.0), v->swath_height_c[k]); - } - else { - v->max_partial_swath_c =dcn_bw_mod((v->v_init_pre_fill_c[k] + v->swath_height_c[k] - 2.0), v->swath_height_c[k]); - } - v->max_partial_swath_c =dcn_bw_max2(1.0, v->max_partial_swath_c); - } - else { - v->max_num_swath_c[k] = 0.0; - v->max_partial_swath_c = 0.0; - } - v->prefetch_source_lines_c[k] = v->max_num_swath_c[k] * v->swath_height_c[k] + v->max_partial_swath_c; - } - v->t_calc = 24.0 / v->dcf_clk_deep_sleep; - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - if (v->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one == dcn_bw_yes) { - v->max_vstartup_lines[k] = v->vtotal[k] - v->vactive[k] - 1.0; - } - else { - v->max_vstartup_lines[k] = v->v_sync_plus_back_porch[k] - 1.0; - } - } - v->next_prefetch_mode = 0.0; - do { - v->v_startup_lines = 13.0; - do { - v->planes_with_room_to_increase_vstartup_prefetch_bw_less_than_active_bw = dcn_bw_yes; - v->planes_with_room_to_increase_vstartup_vratio_prefetch_more_than4 = dcn_bw_no; - v->planes_with_room_to_increase_vstartup_destination_line_times_for_prefetch_less_than2 = dcn_bw_no; - v->v_ratio_prefetch_more_than4 = dcn_bw_no; - v->destination_line_times_for_prefetch_less_than2 = dcn_bw_no; - v->prefetch_mode = v->next_prefetch_mode; - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - v->dstx_after_scaler = 90.0 * v->pixel_clock[k] / v->dppclk + 42.0 * v->pixel_clock[k] / v->dispclk; - if (v->dpp_per_plane[k] > 1.0) { - v->dstx_after_scaler = v->dstx_after_scaler + v->scaler_rec_out_width[k] / 2.0; - } - if (v->output_format[k] == dcn_bw_420) { - v->dsty_after_scaler = 1.0; - } - else { - v->dsty_after_scaler = 0.0; - } - v->v_update_offset_pix[k] = dcn_bw_ceil2(v->htotal[k] / 4.0, 1.0); - v->total_repeater_delay_time = v->max_inter_dcn_tile_repeaters * (2.0 / v->dppclk + 3.0 / v->dispclk); - v->v_update_width_pix[k] = (14.0 / v->dcf_clk_deep_sleep + 12.0 / v->dppclk + v->total_repeater_delay_time) * v->pixel_clock[k]; - v->v_ready_offset_pix[k] = dcn_bw_max2(150.0 / v->dppclk, v->total_repeater_delay_time + 20.0 / v->dcf_clk_deep_sleep + 10.0 / v->dppclk) * v->pixel_clock[k]; - v->t_setup = (v->v_update_offset_pix[k] + v->v_update_width_pix[k] + v->v_ready_offset_pix[k]) / v->pixel_clock[k]; - v->v_startup[k] =dcn_bw_min2(v->v_startup_lines, v->max_vstartup_lines[k]); - if (v->prefetch_mode == 0.0) { - v->t_wait =dcn_bw_max3(v->dram_clock_change_latency + v->urgent_latency, v->sr_enter_plus_exit_time, v->urgent_latency); - } - else if (v->prefetch_mode == 1.0) { - v->t_wait =dcn_bw_max2(v->sr_enter_plus_exit_time, v->urgent_latency); - } - else { - v->t_wait = v->urgent_latency; - } - v->destination_lines_for_prefetch[k] =dcn_bw_floor2(4.0 * (v->v_startup[k] - v->t_wait / (v->htotal[k] / v->pixel_clock[k]) - (v->t_calc + v->t_setup) / (v->htotal[k] / v->pixel_clock[k]) - (v->dsty_after_scaler + v->dstx_after_scaler / v->htotal[k]) + 0.125), 1.0) / 4; - if (v->destination_lines_for_prefetch[k] > 0.0) { - v->prefetch_bandwidth[k] = (v->meta_pte_bytes_frame[k] + 2.0 * v->meta_row_byte[k] + 2.0 * v->pixel_pte_bytes_per_row[k] + v->prefetch_source_lines_y[k] * v->swath_width_y[k] *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) + v->prefetch_source_lines_c[k] * v->swath_width_y[k] / 2.0 *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0)) / (v->destination_lines_for_prefetch[k] * v->htotal[k] / v->pixel_clock[k]); - } - else { - v->prefetch_bandwidth[k] = 999999.0; - } - } - v->bandwidth_available_for_immediate_flip = v->return_bw; - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - v->bandwidth_available_for_immediate_flip = v->bandwidth_available_for_immediate_flip -dcn_bw_max2(v->read_bandwidth_plane_luma[k] + v->read_bandwidth_plane_chroma[k], v->prefetch_bandwidth[k]); - } - v->tot_immediate_flip_bytes = 0.0; - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - if (v->immediate_flip_supported == dcn_bw_yes && (v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) { - v->tot_immediate_flip_bytes = v->tot_immediate_flip_bytes + v->meta_pte_bytes_frame[k] + v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k]; - } - } - v->max_rd_bandwidth = 0.0; - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - if (v->pte_enable == dcn_bw_yes && v->dcc_enable[k] == dcn_bw_yes) { - if (v->immediate_flip_supported == dcn_bw_yes && (v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) { - v->time_for_fetching_meta_pte =dcn_bw_max5(v->meta_pte_bytes_frame[k] / v->prefetch_bandwidth[k], v->meta_pte_bytes_frame[k] * v->tot_immediate_flip_bytes / (v->bandwidth_available_for_immediate_flip * (v->meta_pte_bytes_frame[k] + v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k])), v->urgent_extra_latency, v->urgent_latency, v->htotal[k] / v->pixel_clock[k] / 4.0); - } - else { - v->time_for_fetching_meta_pte =dcn_bw_max3(v->meta_pte_bytes_frame[k] / v->prefetch_bandwidth[k], v->urgent_extra_latency, v->htotal[k] / v->pixel_clock[k] / 4.0); - } - } - else { - v->time_for_fetching_meta_pte = v->htotal[k] / v->pixel_clock[k] / 4.0; - } - v->destination_lines_to_request_vm_inv_blank[k] =dcn_bw_floor2(4.0 * (v->time_for_fetching_meta_pte / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4; - if ((v->pte_enable == dcn_bw_yes || v->dcc_enable[k] == dcn_bw_yes)) { - if (v->immediate_flip_supported == dcn_bw_yes && (v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) { - v->time_for_fetching_row_in_vblank =dcn_bw_max5((v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k]) / v->prefetch_bandwidth[k], (v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k]) * v->tot_immediate_flip_bytes / (v->bandwidth_available_for_immediate_flip * (v->meta_pte_bytes_frame[k] + v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k])), v->urgent_extra_latency, 2.0 * v->urgent_latency, v->htotal[k] / v->pixel_clock[k] - v->time_for_fetching_meta_pte); - } - else { - v->time_for_fetching_row_in_vblank =dcn_bw_max3((v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k]) / v->prefetch_bandwidth[k], v->urgent_extra_latency, v->htotal[k] / v->pixel_clock[k] - v->time_for_fetching_meta_pte); - } - } - else { - v->time_for_fetching_row_in_vblank =dcn_bw_max2(v->urgent_extra_latency - v->time_for_fetching_meta_pte, v->htotal[k] / v->pixel_clock[k] - v->time_for_fetching_meta_pte); - } - v->destination_lines_to_request_row_in_vblank[k] =dcn_bw_floor2(4.0 * (v->time_for_fetching_row_in_vblank / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4; - v->lines_to_request_prefetch_pixel_data = v->destination_lines_for_prefetch[k] - v->destination_lines_to_request_vm_inv_blank[k] - v->destination_lines_to_request_row_in_vblank[k]; - if (v->lines_to_request_prefetch_pixel_data > 0.0) { - v->v_ratio_prefetch_y[k] = v->prefetch_source_lines_y[k] / v->lines_to_request_prefetch_pixel_data; - if ((v->swath_height_y[k] > 4.0)) { - if (v->lines_to_request_prefetch_pixel_data > (v->v_init_pre_fill_y[k] - 3.0) / 2.0) { - v->v_ratio_prefetch_y[k] =dcn_bw_max2(v->v_ratio_prefetch_y[k], v->max_num_swath_y[k] * v->swath_height_y[k] / (v->lines_to_request_prefetch_pixel_data - (v->v_init_pre_fill_y[k] - 3.0) / 2.0)); - } - else { - v->v_ratio_prefetch_y[k] = 999999.0; - } - } - } - else { - v->v_ratio_prefetch_y[k] = 999999.0; - } - v->v_ratio_prefetch_y[k] =dcn_bw_max2(v->v_ratio_prefetch_y[k], 1.0); - if (v->lines_to_request_prefetch_pixel_data > 0.0) { - v->v_ratio_prefetch_c[k] = v->prefetch_source_lines_c[k] / v->lines_to_request_prefetch_pixel_data; - if ((v->swath_height_c[k] > 4.0)) { - if (v->lines_to_request_prefetch_pixel_data > (v->v_init_pre_fill_c[k] - 3.0) / 2.0) { - v->v_ratio_prefetch_c[k] =dcn_bw_max2(v->v_ratio_prefetch_c[k], v->max_num_swath_c[k] * v->swath_height_c[k] / (v->lines_to_request_prefetch_pixel_data - (v->v_init_pre_fill_c[k] - 3.0) / 2.0)); - } - else { - v->v_ratio_prefetch_c[k] = 999999.0; - } - } - } - else { - v->v_ratio_prefetch_c[k] = 999999.0; - } - v->v_ratio_prefetch_c[k] =dcn_bw_max2(v->v_ratio_prefetch_c[k], 1.0); - if (v->lines_to_request_prefetch_pixel_data > 0.0) { - v->required_prefetch_pix_data_bw = v->dpp_per_plane[k] * (v->prefetch_source_lines_y[k] / v->lines_to_request_prefetch_pixel_data *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) + v->prefetch_source_lines_c[k] / v->lines_to_request_prefetch_pixel_data *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / 2.0) * v->swath_width_y[k] / (v->htotal[k] / v->pixel_clock[k]); - } - else { - v->required_prefetch_pix_data_bw = 999999.0; - } - v->max_rd_bandwidth = v->max_rd_bandwidth +dcn_bw_max2(v->read_bandwidth_plane_luma[k] + v->read_bandwidth_plane_chroma[k], v->required_prefetch_pix_data_bw); - if (v->immediate_flip_supported == dcn_bw_yes && (v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) { - v->max_rd_bandwidth = v->max_rd_bandwidth +dcn_bw_max2(v->meta_pte_bytes_frame[k] / (v->destination_lines_to_request_vm_inv_blank[k] * v->htotal[k] / v->pixel_clock[k]), (v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k]) / (v->destination_lines_to_request_row_in_vblank[k] * v->htotal[k] / v->pixel_clock[k])); - } - if (v->v_ratio_prefetch_y[k] > 4.0 || v->v_ratio_prefetch_c[k] > 4.0) { - v->v_ratio_prefetch_more_than4 = dcn_bw_yes; - } - if (v->destination_lines_for_prefetch[k] < 2.0) { - v->destination_line_times_for_prefetch_less_than2 = dcn_bw_yes; - } - if (v->max_vstartup_lines[k] > v->v_startup_lines) { - if (v->required_prefetch_pix_data_bw > (v->read_bandwidth_plane_luma[k] + v->read_bandwidth_plane_chroma[k])) { - v->planes_with_room_to_increase_vstartup_prefetch_bw_less_than_active_bw = dcn_bw_no; - } - if (v->v_ratio_prefetch_y[k] > 4.0 || v->v_ratio_prefetch_c[k] > 4.0) { - v->planes_with_room_to_increase_vstartup_vratio_prefetch_more_than4 = dcn_bw_yes; - } - if (v->destination_lines_for_prefetch[k] < 2.0) { - v->planes_with_room_to_increase_vstartup_destination_line_times_for_prefetch_less_than2 = dcn_bw_yes; - } - } - } - if (v->max_rd_bandwidth <= v->return_bw && v->v_ratio_prefetch_more_than4 == dcn_bw_no && v->destination_line_times_for_prefetch_less_than2 == dcn_bw_no) { - v->prefetch_mode_supported = dcn_bw_yes; - } - else { - v->prefetch_mode_supported = dcn_bw_no; - } - v->v_startup_lines = v->v_startup_lines + 1.0; - } while (!(v->prefetch_mode_supported == dcn_bw_yes || (v->planes_with_room_to_increase_vstartup_prefetch_bw_less_than_active_bw == dcn_bw_yes && v->planes_with_room_to_increase_vstartup_vratio_prefetch_more_than4 == dcn_bw_no && v->planes_with_room_to_increase_vstartup_destination_line_times_for_prefetch_less_than2 == dcn_bw_no))); - v->next_prefetch_mode = v->next_prefetch_mode + 1.0; - } while (!(v->prefetch_mode_supported == dcn_bw_yes || v->prefetch_mode == 2.0)); - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - if (v->v_ratio_prefetch_y[k] <= 1.0) { - v->display_pipe_line_delivery_time_luma_prefetch[k] = v->swath_width_y[k] * v->dpp_per_plane[k] / v->h_ratio[k] / v->pixel_clock[k]; - } - else { - v->display_pipe_line_delivery_time_luma_prefetch[k] = v->swath_width_y[k] / v->pscl_throughput[k] / v->dppclk; - } - if (v->byte_per_pixel_detc[k] == 0.0) { - v->display_pipe_line_delivery_time_chroma_prefetch[k] = 0.0; - } - else { - if (v->v_ratio_prefetch_c[k] <= 1.0) { - v->display_pipe_line_delivery_time_chroma_prefetch[k] = v->swath_width_y[k] * v->dpp_per_plane[k] / v->h_ratio[k] / v->pixel_clock[k]; - } - else { - v->display_pipe_line_delivery_time_chroma_prefetch[k] = v->swath_width_y[k] / v->pscl_throughput[k] / v->dppclk; - } - } - } - /*min ttuv_blank*/ - - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - if (v->prefetch_mode == 0.0) { - v->allow_dram_clock_change_during_vblank[k] = dcn_bw_yes; - v->allow_dram_self_refresh_during_vblank[k] = dcn_bw_yes; - v->min_ttuv_blank[k] = v->t_calc +dcn_bw_max3(v->dram_clock_change_watermark, v->stutter_enter_plus_exit_watermark, v->urgent_watermark); - } - else if (v->prefetch_mode == 1.0) { - v->allow_dram_clock_change_during_vblank[k] = dcn_bw_no; - v->allow_dram_self_refresh_during_vblank[k] = dcn_bw_yes; - v->min_ttuv_blank[k] = v->t_calc +dcn_bw_max2(v->stutter_enter_plus_exit_watermark, v->urgent_watermark); - } - else { - v->allow_dram_clock_change_during_vblank[k] = dcn_bw_no; - v->allow_dram_self_refresh_during_vblank[k] = dcn_bw_no; - v->min_ttuv_blank[k] = v->t_calc + v->urgent_watermark; - } - } - /*nb p-state/dram clock change support*/ - - v->active_dp_ps = 0.0; - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - v->active_dp_ps = v->active_dp_ps + v->dpp_per_plane[k]; - } - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - v->lb_latency_hiding_source_lines_y =dcn_bw_min2(v->max_line_buffer_lines,dcn_bw_floor2(v->line_buffer_size / v->lb_bit_per_pixel[k] / (v->swath_width_y[k] /dcn_bw_max2(v->h_ratio[k], 1.0)), 1.0)) - (v->vtaps[k] - 1.0); - v->lb_latency_hiding_source_lines_c =dcn_bw_min2(v->max_line_buffer_lines,dcn_bw_floor2(v->line_buffer_size / v->lb_bit_per_pixel[k] / (v->swath_width_y[k] / 2.0 /dcn_bw_max2(v->h_ratio[k] / 2.0, 1.0)), 1.0)) - (v->vta_pschroma[k] - 1.0); - v->effective_lb_latency_hiding_y = v->lb_latency_hiding_source_lines_y / v->v_ratio[k] * (v->htotal[k] / v->pixel_clock[k]); - v->effective_lb_latency_hiding_c = v->lb_latency_hiding_source_lines_c / (v->v_ratio[k] / 2.0) * (v->htotal[k] / v->pixel_clock[k]); - if (v->swath_width_y[k] > 2.0 * v->dpp_output_buffer_pixels) { - v->dpp_output_buffer_lines_y = v->dpp_output_buffer_pixels / v->swath_width_y[k]; - } - else if (v->swath_width_y[k] > v->dpp_output_buffer_pixels) { - v->dpp_output_buffer_lines_y = 0.5; - } - else { - v->dpp_output_buffer_lines_y = 1.0; - } - if (v->swath_width_y[k] / 2.0 > 2.0 * v->dpp_output_buffer_pixels) { - v->dpp_output_buffer_lines_c = v->dpp_output_buffer_pixels / (v->swath_width_y[k] / 2.0); - } - else if (v->swath_width_y[k] / 2.0 > v->dpp_output_buffer_pixels) { - v->dpp_output_buffer_lines_c = 0.5; - } - else { - v->dpp_output_buffer_lines_c = 1.0; - } - v->dppopp_buffering_y = (v->htotal[k] / v->pixel_clock[k]) * (v->dpp_output_buffer_lines_y + v->opp_output_buffer_lines); - v->max_det_buffering_time_y = v->full_det_buffering_time_y[k] + (v->lines_in_dety[k] - v->lines_in_dety_rounded_down_to_swath[k]) / v->swath_height_y[k] * (v->htotal[k] / v->pixel_clock[k]); - v->active_dram_clock_change_latency_margin_y = v->dppopp_buffering_y + v->effective_lb_latency_hiding_y + v->max_det_buffering_time_y - v->dram_clock_change_watermark; - if (v->active_dp_ps > 1.0) { - v->active_dram_clock_change_latency_margin_y = v->active_dram_clock_change_latency_margin_y - (1.0 - 1.0 / (v->active_dp_ps - 1.0)) * v->swath_height_y[k] * (v->htotal[k] / v->pixel_clock[k]); - } - if (v->byte_per_pixel_detc[k] > 0.0) { - v->dppopp_buffering_c = (v->htotal[k] / v->pixel_clock[k]) * (v->dpp_output_buffer_lines_c + v->opp_output_buffer_lines); - v->max_det_buffering_time_c = v->full_det_buffering_time_c[k] + (v->lines_in_detc[k] - v->lines_in_detc_rounded_down_to_swath[k]) / v->swath_height_c[k] * (v->htotal[k] / v->pixel_clock[k]); - v->active_dram_clock_change_latency_margin_c = v->dppopp_buffering_c + v->effective_lb_latency_hiding_c + v->max_det_buffering_time_c - v->dram_clock_change_watermark; - if (v->active_dp_ps > 1.0) { - v->active_dram_clock_change_latency_margin_c = v->active_dram_clock_change_latency_margin_c - (1.0 - 1.0 / (v->active_dp_ps - 1.0)) * v->swath_height_c[k] * (v->htotal[k] / v->pixel_clock[k]); - } - v->active_dram_clock_change_latency_margin[k] =dcn_bw_min2(v->active_dram_clock_change_latency_margin_y, v->active_dram_clock_change_latency_margin_c); - } - else { - v->active_dram_clock_change_latency_margin[k] = v->active_dram_clock_change_latency_margin_y; - } - if (v->output_format[k] == dcn_bw_444) { - v->writeback_dram_clock_change_latency_margin = (v->writeback_luma_buffer_size + v->writeback_chroma_buffer_size) * 1024.0 / (v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) * 4.0) - v->writeback_dram_clock_change_watermark; - } - else { - v->writeback_dram_clock_change_latency_margin =dcn_bw_min2(v->writeback_luma_buffer_size, 2.0 * v->writeback_chroma_buffer_size) * 1024.0 / (v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k])) - v->writeback_dram_clock_change_watermark; - } - if (v->output[k] == dcn_bw_writeback) { - v->active_dram_clock_change_latency_margin[k] =dcn_bw_min2(v->active_dram_clock_change_latency_margin[k], v->writeback_dram_clock_change_latency_margin); - } - } - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - if (v->allow_dram_clock_change_during_vblank[k] == dcn_bw_yes) { - v->v_blank_dram_clock_change_latency_margin[k] = (v->vtotal[k] - v->scaler_recout_height[k]) * (v->htotal[k] / v->pixel_clock[k]) -dcn_bw_max2(v->dram_clock_change_watermark, v->writeback_dram_clock_change_watermark); - } - else { - v->v_blank_dram_clock_change_latency_margin[k] = 0.0; - } - } - v->min_active_dram_clock_change_margin = 999999.0; - v->v_blank_of_min_active_dram_clock_change_margin = 999999.0; - v->second_min_active_dram_clock_change_margin = 999999.0; - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - if (v->active_dram_clock_change_latency_margin[k] < v->min_active_dram_clock_change_margin) { - v->second_min_active_dram_clock_change_margin = v->min_active_dram_clock_change_margin; - v->min_active_dram_clock_change_margin = v->active_dram_clock_change_latency_margin[k]; - v->v_blank_of_min_active_dram_clock_change_margin = v->v_blank_dram_clock_change_latency_margin[k]; - } - else if (v->active_dram_clock_change_latency_margin[k] < v->second_min_active_dram_clock_change_margin) { - v->second_min_active_dram_clock_change_margin = v->active_dram_clock_change_latency_margin[k]; - } - } - v->min_vblank_dram_clock_change_margin = 999999.0; - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - if (v->min_vblank_dram_clock_change_margin > v->v_blank_dram_clock_change_latency_margin[k]) { - v->min_vblank_dram_clock_change_margin = v->v_blank_dram_clock_change_latency_margin[k]; - } - } - if (v->synchronized_vblank == dcn_bw_yes || v->number_of_active_planes == 1) { - v->dram_clock_change_margin =dcn_bw_max2(v->min_active_dram_clock_change_margin, v->min_vblank_dram_clock_change_margin); - } - else if (v->v_blank_of_min_active_dram_clock_change_margin > v->min_active_dram_clock_change_margin) { - v->dram_clock_change_margin =dcn_bw_min2(v->second_min_active_dram_clock_change_margin, v->v_blank_of_min_active_dram_clock_change_margin); - } - else { - v->dram_clock_change_margin = v->min_active_dram_clock_change_margin; - } - if (v->min_active_dram_clock_change_margin > 0.0) { - v->dram_clock_change_support = dcn_bw_supported_in_v_active; - } - else if (v->dram_clock_change_margin > 0.0) { - v->dram_clock_change_support = dcn_bw_supported_in_v_blank; - } - else { - v->dram_clock_change_support = dcn_bw_not_supported; - } - /*maximum bandwidth used*/ - - v->wr_bandwidth = 0.0; - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - if (v->output[k] == dcn_bw_writeback && v->output_format[k] == dcn_bw_444) { - v->wr_bandwidth = v->wr_bandwidth + v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) * 4.0; - } - else if (v->output[k] == dcn_bw_writeback) { - v->wr_bandwidth = v->wr_bandwidth + v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) * 1.5; - } - } - v->max_used_bw = v->max_rd_bandwidth + v->wr_bandwidth; -} diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.h b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.h deleted file mode 100644 index ce35de79a6c7..000000000000 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_auto.h +++ /dev/null @@ -1,38 +0,0 @@ -/* - * Copyright 2017 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#ifndef _DCN_CALC_AUTO_H_ -#define _DCN_CALC_AUTO_H_ - -#include "dc.h" -#include "dcn_calcs.h" - -void scaler_settings_calculation(struct dcn_bw_internal_vars *v); -void mode_support_and_system_configuration(struct dcn_bw_internal_vars *v); -void display_pipe_configuration(struct dcn_bw_internal_vars *v); -void dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation( - struct dcn_bw_internal_vars *v); - -#endif /* _DCN_CALC_AUTO_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.c deleted file mode 100644 index 07d18e78de49..000000000000 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calc_math.c +++ /dev/null @@ -1,147 +0,0 @@ -/* - * Copyright 2017 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dcn_calc_math.h" - -#define isNaN(number) ((number) != (number)) - -/* - * NOTE: - * This file is gcc-parseable HW gospel, coming straight from HW engineers. - * - * It doesn't adhere to Linux kernel style and sometimes will do things in odd - * ways. Unless there is something clearly wrong with it the code should - * remain as-is as it provides us with a guarantee from HW that it is correct. - */ - -float dcn_bw_mod(const float arg1, const float arg2) -{ - if (isNaN(arg1)) - return arg2; - if (isNaN(arg2)) - return arg1; - return arg1 - arg1 * ((int) (arg1 / arg2)); -} - -float dcn_bw_min2(const float arg1, const float arg2) -{ - if (isNaN(arg1)) - return arg2; - if (isNaN(arg2)) - return arg1; - return arg1 < arg2 ? arg1 : arg2; -} - -unsigned int dcn_bw_max(const unsigned int arg1, const unsigned int arg2) -{ - return arg1 > arg2 ? arg1 : arg2; -} -float dcn_bw_max2(const float arg1, const float arg2) -{ - if (isNaN(arg1)) - return arg2; - if (isNaN(arg2)) - return arg1; - return arg1 > arg2 ? arg1 : arg2; -} - -float dcn_bw_floor2(const float arg, const float significance) -{ - if (significance == 0) - return 0; - return ((int) (arg / significance)) * significance; -} -float dcn_bw_floor(const float arg) -{ - return ((int) (arg)); -} - -float dcn_bw_ceil(const float arg) -{ - float flr = dcn_bw_floor2(arg, 1); - - return flr + 0.00001 >= arg ? arg : flr + 1; -} - -float dcn_bw_ceil2(const float arg, const float significance) -{ - float flr = dcn_bw_floor2(arg, significance); - if (significance == 0) - return 0; - return flr + 0.00001 >= arg ? arg : flr + significance; -} - -float dcn_bw_max3(float v1, float v2, float v3) -{ - return v3 > dcn_bw_max2(v1, v2) ? v3 : dcn_bw_max2(v1, v2); -} - -float dcn_bw_max5(float v1, float v2, float v3, float v4, float v5) -{ - return dcn_bw_max3(v1, v2, v3) > dcn_bw_max2(v4, v5) ? dcn_bw_max3(v1, v2, v3) : dcn_bw_max2(v4, v5); -} - -float dcn_bw_pow(float a, float exp) -{ - float temp; - /*ASSERT(exp == (int)exp);*/ - if ((int)exp == 0) - return 1; - temp = dcn_bw_pow(a, (int)(exp / 2)); - if (((int)exp % 2) == 0) { - return temp * temp; - } else { - if ((int)exp > 0) - return a * temp * temp; - else - return (temp * temp) / a; - } -} - -double dcn_bw_fabs(double a) -{ - if (a > 0) - return (a); - else - return (-a); -} - - -float dcn_bw_log(float a, float b) -{ - int * const exp_ptr = (int *)(&a); - int x = *exp_ptr; - const int log_2 = ((x >> 23) & 255) - 128; - x &= ~(255 << 23); - x += 127 << 23; - *exp_ptr = x; - - a = ((-1.0f / 3) * a + 2) * a - 2.0f / 3; - - if (b > 2.00001 || b < 1.99999) - return (a + log_2) / dcn_bw_log(b, 2); - else - return (a + log_2); -} diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c deleted file mode 100644 index ec19678a0702..000000000000 --- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c +++ /dev/null @@ -1,1808 +0,0 @@ -/* - * Copyright 2017 Advanced Micro Devices, Inc. - * Copyright 2019 Raptor Engineering, LLC - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: AMD - * - */ - -#include "dm_services.h" -#include "dc.h" -#include "dcn_calcs.h" -#include "dcn_calc_auto.h" -#include "dal_asic_id.h" -#include "resource.h" -#include "dcn10/dcn10_resource.h" -#include "dcn10/dcn10_hubbub.h" -#include "dml/dml1_display_rq_dlg_calc.h" - -#include "dcn_calc_math.h" - -#define DC_LOGGER \ - dc->ctx->logger - -#define WM_SET_COUNT 4 -#define WM_A 0 -#define WM_B 1 -#define WM_C 2 -#define WM_D 3 - -/* - * NOTE: - * This file is gcc-parseable HW gospel, coming straight from HW engineers. - * - * It doesn't adhere to Linux kernel style and sometimes will do things in odd - * ways. Unless there is something clearly wrong with it the code should - * remain as-is as it provides us with a guarantee from HW that it is correct. - */ - -/* Defaults from spreadsheet rev#247. - * RV2 delta: dram_clock_change_latency, max_num_dpp - */ -const struct dcn_soc_bounding_box dcn10_soc_defaults = { - /* latencies */ - .sr_exit_time = 17, /*us*/ - .sr_enter_plus_exit_time = 19, /*us*/ - .urgent_latency = 4, /*us*/ - .dram_clock_change_latency = 17, /*us*/ - .write_back_latency = 12, /*us*/ - .percent_of_ideal_drambw_received_after_urg_latency = 80, /*%*/ - - /* below default clocks derived from STA target base on - * slow-slow corner + 10% margin with voltages aligned to FCLK. - * - * Use these value if fused value doesn't make sense as earlier - * part don't have correct value fused */ - /* default DCF CLK DPM on RV*/ - .dcfclkv_max0p9 = 655, /* MHz, = 3600/5.5 */ - .dcfclkv_nom0p8 = 626, /* MHz, = 3600/5.75 */ - .dcfclkv_mid0p72 = 600, /* MHz, = 3600/6, bypass */ - .dcfclkv_min0p65 = 300, /* MHz, = 3600/12, bypass */ - - /* default DISP CLK voltage state on RV */ - .max_dispclk_vmax0p9 = 1108, /* MHz, = 3600/3.25 */ - .max_dispclk_vnom0p8 = 1029, /* MHz, = 3600/3.5 */ - .max_dispclk_vmid0p72 = 960, /* MHz, = 3600/3.75 */ - .max_dispclk_vmin0p65 = 626, /* MHz, = 3600/5.75 */ - - /* default DPP CLK voltage state on RV */ - .max_dppclk_vmax0p9 = 720, /* MHz, = 3600/5 */ - .max_dppclk_vnom0p8 = 686, /* MHz, = 3600/5.25 */ - .max_dppclk_vmid0p72 = 626, /* MHz, = 3600/5.75 */ - .max_dppclk_vmin0p65 = 400, /* MHz, = 3600/9 */ - - /* default PHY CLK voltage state on RV */ - .phyclkv_max0p9 = 900, /*MHz*/ - .phyclkv_nom0p8 = 847, /*MHz*/ - .phyclkv_mid0p72 = 800, /*MHz*/ - .phyclkv_min0p65 = 600, /*MHz*/ - - /* BW depend on FCLK, MCLK, # of channels */ - /* dual channel BW */ - .fabric_and_dram_bandwidth_vmax0p9 = 38.4f, /*GB/s*/ - .fabric_and_dram_bandwidth_vnom0p8 = 34.133f, /*GB/s*/ - .fabric_and_dram_bandwidth_vmid0p72 = 29.866f, /*GB/s*/ - .fabric_and_dram_bandwidth_vmin0p65 = 12.8f, /*GB/s*/ - /* single channel BW - .fabric_and_dram_bandwidth_vmax0p9 = 19.2f, - .fabric_and_dram_bandwidth_vnom0p8 = 17.066f, - .fabric_and_dram_bandwidth_vmid0p72 = 14.933f, - .fabric_and_dram_bandwidth_vmin0p65 = 12.8f, - */ - - .number_of_channels = 2, - - .socclk = 208, /*MHz*/ - .downspreading = 0.5f, /*%*/ - .round_trip_ping_latency_cycles = 128, /*DCFCLK Cycles*/ - .urgent_out_of_order_return_per_channel = 256, /*bytes*/ - .vmm_page_size = 4096, /*bytes*/ - .return_bus_width = 64, /*bytes*/ - .max_request_size = 256, /*bytes*/ - - /* Depends on user class (client vs embedded, workstation, etc) */ - .percent_disp_bw_limit = 0.3f /*%*/ -}; - -const struct dcn_ip_params dcn10_ip_defaults = { - .rob_buffer_size_in_kbyte = 64, - .det_buffer_size_in_kbyte = 164, - .dpp_output_buffer_pixels = 2560, - .opp_output_buffer_lines = 1, - .pixel_chunk_size_in_kbyte = 8, - .pte_enable = dcn_bw_yes, - .pte_chunk_size = 2, /*kbytes*/ - .meta_chunk_size = 2, /*kbytes*/ - .writeback_chunk_size = 2, /*kbytes*/ - .odm_capability = dcn_bw_no, - .dsc_capability = dcn_bw_no, - .line_buffer_size = 589824, /*bit*/ - .max_line_buffer_lines = 12, - .is_line_buffer_bpp_fixed = dcn_bw_no, - .line_buffer_fixed_bpp = dcn_bw_na, - .writeback_luma_buffer_size = 12, /*kbytes*/ - .writeback_chroma_buffer_size = 8, /*kbytes*/ - .max_num_dpp = 4, - .max_num_writeback = 2, - .max_dchub_topscl_throughput = 4, /*pixels/dppclk*/ - .max_pscl_tolb_throughput = 2, /*pixels/dppclk*/ - .max_lb_tovscl_throughput = 4, /*pixels/dppclk*/ - .max_vscl_tohscl_throughput = 4, /*pixels/dppclk*/ - .max_hscl_ratio = 4, - .max_vscl_ratio = 4, - .max_hscl_taps = 8, - .max_vscl_taps = 8, - .pte_buffer_size_in_requests = 42, - .dispclk_ramping_margin = 1, /*%*/ - .under_scan_factor = 1.11f, - .max_inter_dcn_tile_repeaters = 8, - .can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = dcn_bw_no, - .bug_forcing_luma_and_chroma_request_to_same_size_fixed = dcn_bw_no, - .dcfclk_cstate_latency = 10 /*TODO clone of something else? sr_enter_plus_exit_time?*/ -}; - -static enum dcn_bw_defs tl_sw_mode_to_bw_defs(enum swizzle_mode_values sw_mode) -{ - switch (sw_mode) { - case DC_SW_LINEAR: - return dcn_bw_sw_linear; - case DC_SW_4KB_S: - return dcn_bw_sw_4_kb_s; - case DC_SW_4KB_D: - return dcn_bw_sw_4_kb_d; - case DC_SW_64KB_S: - return dcn_bw_sw_64_kb_s; - case DC_SW_64KB_D: - return dcn_bw_sw_64_kb_d; - case DC_SW_VAR_S: - return dcn_bw_sw_var_s; - case DC_SW_VAR_D: - return dcn_bw_sw_var_d; - case DC_SW_64KB_S_T: - return dcn_bw_sw_64_kb_s_t; - case DC_SW_64KB_D_T: - return dcn_bw_sw_64_kb_d_t; - case DC_SW_4KB_S_X: - return dcn_bw_sw_4_kb_s_x; - case DC_SW_4KB_D_X: - return dcn_bw_sw_4_kb_d_x; - case DC_SW_64KB_S_X: - return dcn_bw_sw_64_kb_s_x; - case DC_SW_64KB_D_X: - return dcn_bw_sw_64_kb_d_x; - case DC_SW_VAR_S_X: - return dcn_bw_sw_var_s_x; - case DC_SW_VAR_D_X: - return dcn_bw_sw_var_d_x; - case DC_SW_256B_S: - case DC_SW_256_D: - case DC_SW_256_R: - case DC_SW_4KB_R: - case DC_SW_64KB_R: - case DC_SW_VAR_R: - case DC_SW_4KB_R_X: - case DC_SW_64KB_R_X: - case DC_SW_VAR_R_X: - default: - BREAK_TO_DEBUGGER(); /*not in formula*/ - return dcn_bw_sw_4_kb_s; - } -} - -static int tl_lb_bpp_to_int(enum lb_pixel_depth depth) -{ - switch (depth) { - case LB_PIXEL_DEPTH_18BPP: - return 18; - case LB_PIXEL_DEPTH_24BPP: - return 24; - case LB_PIXEL_DEPTH_30BPP: - return 30; - case LB_PIXEL_DEPTH_36BPP: - return 36; - default: - return 30; - } -} - -static enum dcn_bw_defs tl_pixel_format_to_bw_defs(enum surface_pixel_format format) -{ - switch (format) { - case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: - case SURFACE_PIXEL_FORMAT_GRPH_RGB565: - return dcn_bw_rgb_sub_16; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: - case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS: - return dcn_bw_rgb_sub_32; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: - case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: - return dcn_bw_rgb_sub_64; - case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: - case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: - return dcn_bw_yuv420_sub_8; - case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: - case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: - return dcn_bw_yuv420_sub_10; - default: - return dcn_bw_rgb_sub_32; - } -} - -enum source_macro_tile_size swizzle_mode_to_macro_tile_size(enum swizzle_mode_values sw_mode) -{ - switch (sw_mode) { - /* for 4/8/16 high tiles */ - case DC_SW_LINEAR: - return dm_4k_tile; - case DC_SW_4KB_S: - case DC_SW_4KB_S_X: - return dm_4k_tile; - case DC_SW_64KB_S: - case DC_SW_64KB_S_X: - case DC_SW_64KB_S_T: - return dm_64k_tile; - case DC_SW_VAR_S: - case DC_SW_VAR_S_X: - return dm_256k_tile; - - /* For 64bpp 2 high tiles */ - case DC_SW_4KB_D: - case DC_SW_4KB_D_X: - return dm_4k_tile; - case DC_SW_64KB_D: - case DC_SW_64KB_D_X: - case DC_SW_64KB_D_T: - return dm_64k_tile; - case DC_SW_VAR_D: - case DC_SW_VAR_D_X: - return dm_256k_tile; - - case DC_SW_4KB_R: - case DC_SW_4KB_R_X: - return dm_4k_tile; - case DC_SW_64KB_R: - case DC_SW_64KB_R_X: - return dm_64k_tile; - case DC_SW_VAR_R: - case DC_SW_VAR_R_X: - return dm_256k_tile; - - /* Unsupported swizzle modes for dcn */ - case DC_SW_256B_S: - default: - ASSERT(0); /* Not supported */ - return 0; - } -} - -static void pipe_ctx_to_e2e_pipe_params ( - const struct pipe_ctx *pipe, - struct _vcs_dpi_display_pipe_params_st *input) -{ - input->src.is_hsplit = false; - - /* stereo can never be split */ - if (pipe->plane_state->stereo_format == PLANE_STEREO_FORMAT_SIDE_BY_SIDE || - pipe->plane_state->stereo_format == PLANE_STEREO_FORMAT_TOP_AND_BOTTOM) { - /* reset the split group if it was already considered split. */ - input->src.hsplit_grp = pipe->pipe_idx; - } else if (pipe->top_pipe != NULL && pipe->top_pipe->plane_state == pipe->plane_state) { - input->src.is_hsplit = true; - } else if (pipe->bottom_pipe != NULL && pipe->bottom_pipe->plane_state == pipe->plane_state) { - input->src.is_hsplit = true; - } - - if (pipe->plane_res.dpp->ctx->dc->debug.optimized_watermark) { - /* - * this method requires us to always re-calculate watermark when dcc change - * between flip. - */ - input->src.dcc = pipe->plane_state->dcc.enable ? 1 : 0; - } else { - /* - * allow us to disable dcc on the fly without re-calculating WM - * - * extra overhead for DCC is quite small. for 1080p WM without - * DCC is only 0.417us lower (urgent goes from 6.979us to 6.562us) - */ - unsigned int bpe; - - input->src.dcc = pipe->plane_res.dpp->ctx->dc->res_pool->hubbub->funcs-> - dcc_support_pixel_format(pipe->plane_state->format, &bpe) ? 1 : 0; - } - input->src.dcc_rate = 1; - input->src.meta_pitch = pipe->plane_state->dcc.meta_pitch; - input->src.source_scan = dm_horz; - input->src.sw_mode = pipe->plane_state->tiling_info.gfx9.swizzle; - - input->src.viewport_width = pipe->plane_res.scl_data.viewport.width; - input->src.viewport_height = pipe->plane_res.scl_data.viewport.height; - input->src.data_pitch = pipe->plane_res.scl_data.viewport.width; - input->src.data_pitch_c = pipe->plane_res.scl_data.viewport.width; - input->src.cur0_src_width = 128; /* TODO: Cursor calcs, not curently stored */ - input->src.cur0_bpp = 32; - - input->src.macro_tile_size = swizzle_mode_to_macro_tile_size(pipe->plane_state->tiling_info.gfx9.swizzle); - - switch (pipe->plane_state->rotation) { - case ROTATION_ANGLE_0: - case ROTATION_ANGLE_180: - input->src.source_scan = dm_horz; - break; - case ROTATION_ANGLE_90: - case ROTATION_ANGLE_270: - input->src.source_scan = dm_vert; - break; - default: - ASSERT(0); /* Not supported */ - break; - } - - /* TODO: Fix pixel format mappings */ - switch (pipe->plane_state->format) { - case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: - case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: - input->src.source_format = dm_420_8; - input->src.viewport_width_c = input->src.viewport_width / 2; - input->src.viewport_height_c = input->src.viewport_height / 2; - break; - case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: - case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: - input->src.source_format = dm_420_10; - input->src.viewport_width_c = input->src.viewport_width / 2; - input->src.viewport_height_c = input->src.viewport_height / 2; - break; - case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: - case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: - case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: - input->src.source_format = dm_444_64; - input->src.viewport_width_c = input->src.viewport_width; - input->src.viewport_height_c = input->src.viewport_height; - break; - case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA: - input->src.source_format = dm_rgbe_alpha; - input->src.viewport_width_c = input->src.viewport_width; - input->src.viewport_height_c = input->src.viewport_height; - break; - default: - input->src.source_format = dm_444_32; - input->src.viewport_width_c = input->src.viewport_width; - input->src.viewport_height_c = input->src.viewport_height; - break; - } - - input->scale_taps.htaps = pipe->plane_res.scl_data.taps.h_taps; - input->scale_ratio_depth.hscl_ratio = pipe->plane_res.scl_data.ratios.horz.value/4294967296.0; - input->scale_ratio_depth.vscl_ratio = pipe->plane_res.scl_data.ratios.vert.value/4294967296.0; - input->scale_ratio_depth.vinit = pipe->plane_res.scl_data.inits.v.value/4294967296.0; - if (input->scale_ratio_depth.vinit < 1.0) - input->scale_ratio_depth.vinit = 1; - input->scale_taps.vtaps = pipe->plane_res.scl_data.taps.v_taps; - input->scale_taps.vtaps_c = pipe->plane_res.scl_data.taps.v_taps_c; - input->scale_taps.htaps_c = pipe->plane_res.scl_data.taps.h_taps_c; - input->scale_ratio_depth.hscl_ratio_c = pipe->plane_res.scl_data.ratios.horz_c.value/4294967296.0; - input->scale_ratio_depth.vscl_ratio_c = pipe->plane_res.scl_data.ratios.vert_c.value/4294967296.0; - input->scale_ratio_depth.vinit_c = pipe->plane_res.scl_data.inits.v_c.value/4294967296.0; - if (input->scale_ratio_depth.vinit_c < 1.0) - input->scale_ratio_depth.vinit_c = 1; - switch (pipe->plane_res.scl_data.lb_params.depth) { - case LB_PIXEL_DEPTH_30BPP: - input->scale_ratio_depth.lb_depth = 30; break; - case LB_PIXEL_DEPTH_36BPP: - input->scale_ratio_depth.lb_depth = 36; break; - default: - input->scale_ratio_depth.lb_depth = 24; break; - } - - - input->dest.vactive = pipe->stream->timing.v_addressable + pipe->stream->timing.v_border_top - + pipe->stream->timing.v_border_bottom; - - input->dest.recout_width = pipe->plane_res.scl_data.recout.width; - input->dest.recout_height = pipe->plane_res.scl_data.recout.height; - - input->dest.full_recout_width = pipe->plane_res.scl_data.recout.width; - input->dest.full_recout_height = pipe->plane_res.scl_data.recout.height; - - input->dest.htotal = pipe->stream->timing.h_total; - input->dest.hblank_start = input->dest.htotal - pipe->stream->timing.h_front_porch; - input->dest.hblank_end = input->dest.hblank_start - - pipe->stream->timing.h_addressable - - pipe->stream->timing.h_border_left - - pipe->stream->timing.h_border_right; - - input->dest.vtotal = pipe->stream->timing.v_total; - input->dest.vblank_start = input->dest.vtotal - pipe->stream->timing.v_front_porch; - input->dest.vblank_end = input->dest.vblank_start - - pipe->stream->timing.v_addressable - - pipe->stream->timing.v_border_bottom - - pipe->stream->timing.v_border_top; - input->dest.pixel_rate_mhz = pipe->stream->timing.pix_clk_100hz/10000.0; - input->dest.vstartup_start = pipe->pipe_dlg_param.vstartup_start; - input->dest.vupdate_offset = pipe->pipe_dlg_param.vupdate_offset; - input->dest.vupdate_offset = pipe->pipe_dlg_param.vupdate_offset; - input->dest.vupdate_width = pipe->pipe_dlg_param.vupdate_width; - -} - -static void dcn_bw_calc_rq_dlg_ttu( - const struct dc *dc, - const struct dcn_bw_internal_vars *v, - struct pipe_ctx *pipe, - int in_idx) -{ - struct display_mode_lib *dml = (struct display_mode_lib *)(&dc->dml); - struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &pipe->dlg_regs; - struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &pipe->ttu_regs; - struct _vcs_dpi_display_rq_regs_st *rq_regs = &pipe->rq_regs; - struct _vcs_dpi_display_rq_params_st *rq_param = &pipe->dml_rq_param; - struct _vcs_dpi_display_dlg_sys_params_st *dlg_sys_param = &pipe->dml_dlg_sys_param; - struct _vcs_dpi_display_e2e_pipe_params_st *input = &pipe->dml_input; - float total_active_bw = 0; - float total_prefetch_bw = 0; - int total_flip_bytes = 0; - int i; - - memset(dlg_regs, 0, sizeof(*dlg_regs)); - memset(ttu_regs, 0, sizeof(*ttu_regs)); - memset(rq_regs, 0, sizeof(*rq_regs)); - memset(rq_param, 0, sizeof(*rq_param)); - memset(dlg_sys_param, 0, sizeof(*dlg_sys_param)); - memset(input, 0, sizeof(*input)); - - for (i = 0; i < number_of_planes; i++) { - total_active_bw += v->read_bandwidth[i]; - total_prefetch_bw += v->prefetch_bandwidth[i]; - total_flip_bytes += v->total_immediate_flip_bytes[i]; - } - dlg_sys_param->total_flip_bw = v->return_bw - dcn_bw_max2(total_active_bw, total_prefetch_bw); - if (dlg_sys_param->total_flip_bw < 0.0) - dlg_sys_param->total_flip_bw = 0; - - dlg_sys_param->t_mclk_wm_us = v->dram_clock_change_watermark; - dlg_sys_param->t_sr_wm_us = v->stutter_enter_plus_exit_watermark; - dlg_sys_param->t_urg_wm_us = v->urgent_watermark; - dlg_sys_param->t_extra_us = v->urgent_extra_latency; - dlg_sys_param->deepsleep_dcfclk_mhz = v->dcf_clk_deep_sleep; - dlg_sys_param->total_flip_bytes = total_flip_bytes; - - pipe_ctx_to_e2e_pipe_params(pipe, &input->pipe); - input->clks_cfg.dcfclk_mhz = v->dcfclk; - input->clks_cfg.dispclk_mhz = v->dispclk; - input->clks_cfg.dppclk_mhz = v->dppclk; - input->clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0; - input->clks_cfg.socclk_mhz = v->socclk; - input->clks_cfg.voltage = v->voltage_level; -// dc->dml.logger = pool->base.logger; - input->dout.output_format = (v->output_format[in_idx] == dcn_bw_420) ? dm_420 : dm_444; - input->dout.output_type = (v->output[in_idx] == dcn_bw_hdmi) ? dm_hdmi : dm_dp; - //input[in_idx].dout.output_standard; - - /*todo: soc->sr_enter_plus_exit_time??*/ - dlg_sys_param->t_srx_delay_us = dc->dcn_ip->dcfclk_cstate_latency / v->dcf_clk_deep_sleep; - - dml1_rq_dlg_get_rq_params(dml, rq_param, &input->pipe.src); - dml1_extract_rq_regs(dml, rq_regs, rq_param); - dml1_rq_dlg_get_dlg_params( - dml, - dlg_regs, - ttu_regs, - &rq_param->dlg, - dlg_sys_param, - input, - true, - true, - v->pte_enable == dcn_bw_yes, - pipe->plane_state->flip_immediate); -} - -static void split_stream_across_pipes( - struct resource_context *res_ctx, - const struct resource_pool *pool, - struct pipe_ctx *primary_pipe, - struct pipe_ctx *secondary_pipe) -{ - int pipe_idx = secondary_pipe->pipe_idx; - - if (!primary_pipe->plane_state) - return; - - *secondary_pipe = *primary_pipe; - - secondary_pipe->pipe_idx = pipe_idx; - secondary_pipe->plane_res.mi = pool->mis[secondary_pipe->pipe_idx]; - secondary_pipe->plane_res.hubp = pool->hubps[secondary_pipe->pipe_idx]; - secondary_pipe->plane_res.ipp = pool->ipps[secondary_pipe->pipe_idx]; - secondary_pipe->plane_res.xfm = pool->transforms[secondary_pipe->pipe_idx]; - secondary_pipe->plane_res.dpp = pool->dpps[secondary_pipe->pipe_idx]; - secondary_pipe->plane_res.mpcc_inst = pool->dpps[secondary_pipe->pipe_idx]->inst; - if (primary_pipe->bottom_pipe) { - ASSERT(primary_pipe->bottom_pipe != secondary_pipe); - secondary_pipe->bottom_pipe = primary_pipe->bottom_pipe; - secondary_pipe->bottom_pipe->top_pipe = secondary_pipe; - } - primary_pipe->bottom_pipe = secondary_pipe; - secondary_pipe->top_pipe = primary_pipe; - - resource_build_scaling_params(primary_pipe); - resource_build_scaling_params(secondary_pipe); -} - -#if 0 -static void calc_wm_sets_and_perf_params( - struct dc_state *context, - struct dcn_bw_internal_vars *v) -{ - /* Calculate set A last to keep internal var state consistent for required config */ - if (v->voltage_level < 2) { - v->fabric_and_dram_bandwidth_per_state[1] = v->fabric_and_dram_bandwidth_vnom0p8; - v->fabric_and_dram_bandwidth_per_state[0] = v->fabric_and_dram_bandwidth_vnom0p8; - v->fabric_and_dram_bandwidth = v->fabric_and_dram_bandwidth_vnom0p8; - dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v); - - context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = - v->stutter_exit_watermark * 1000; - context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = - v->stutter_enter_plus_exit_watermark * 1000; - context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = - v->dram_clock_change_watermark * 1000; - context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000; - context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = v->urgent_watermark * 1000; - - v->dcfclk_per_state[1] = v->dcfclkv_nom0p8; - v->dcfclk_per_state[0] = v->dcfclkv_nom0p8; - v->dcfclk = v->dcfclkv_nom0p8; - dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v); - - context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = - v->stutter_exit_watermark * 1000; - context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = - v->stutter_enter_plus_exit_watermark * 1000; - context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = - v->dram_clock_change_watermark * 1000; - context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000; - context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = v->urgent_watermark * 1000; - } - - if (v->voltage_level < 3) { - v->fabric_and_dram_bandwidth_per_state[2] = v->fabric_and_dram_bandwidth_vmax0p9; - v->fabric_and_dram_bandwidth_per_state[1] = v->fabric_and_dram_bandwidth_vmax0p9; - v->fabric_and_dram_bandwidth_per_state[0] = v->fabric_and_dram_bandwidth_vmax0p9; - v->fabric_and_dram_bandwidth = v->fabric_and_dram_bandwidth_vmax0p9; - v->dcfclk_per_state[2] = v->dcfclkv_max0p9; - v->dcfclk_per_state[1] = v->dcfclkv_max0p9; - v->dcfclk_per_state[0] = v->dcfclkv_max0p9; - v->dcfclk = v->dcfclkv_max0p9; - dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v); - - context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = - v->stutter_exit_watermark * 1000; - context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = - v->stutter_enter_plus_exit_watermark * 1000; - context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = - v->dram_clock_change_watermark * 1000; - context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000; - context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = v->urgent_watermark * 1000; - } - - v->fabric_and_dram_bandwidth_per_state[2] = v->fabric_and_dram_bandwidth_vnom0p8; - v->fabric_and_dram_bandwidth_per_state[1] = v->fabric_and_dram_bandwidth_vmid0p72; - v->fabric_and_dram_bandwidth_per_state[0] = v->fabric_and_dram_bandwidth_vmin0p65; - v->fabric_and_dram_bandwidth = v->fabric_and_dram_bandwidth_per_state[v->voltage_level]; - v->dcfclk_per_state[2] = v->dcfclkv_nom0p8; - v->dcfclk_per_state[1] = v->dcfclkv_mid0p72; - v->dcfclk_per_state[0] = v->dcfclkv_min0p65; - v->dcfclk = v->dcfclk_per_state[v->voltage_level]; - dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v); - - context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = - v->stutter_exit_watermark * 1000; - context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = - v->stutter_enter_plus_exit_watermark * 1000; - context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = - v->dram_clock_change_watermark * 1000; - context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000; - context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = v->urgent_watermark * 1000; - if (v->voltage_level >= 2) { - context->bw_ctx.bw.dcn.watermarks.b = context->bw_ctx.bw.dcn.watermarks.a; - context->bw_ctx.bw.dcn.watermarks.c = context->bw_ctx.bw.dcn.watermarks.a; - } - if (v->voltage_level >= 3) - context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a; -} -#endif - -static bool dcn_bw_apply_registry_override(struct dc *dc) -{ - bool updated = false; - - DC_FP_START(); - if ((int)(dc->dcn_soc->sr_exit_time * 1000) != dc->debug.sr_exit_time_ns - && dc->debug.sr_exit_time_ns) { - updated = true; - dc->dcn_soc->sr_exit_time = dc->debug.sr_exit_time_ns / 1000.0; - } - - if ((int)(dc->dcn_soc->sr_enter_plus_exit_time * 1000) - != dc->debug.sr_enter_plus_exit_time_ns - && dc->debug.sr_enter_plus_exit_time_ns) { - updated = true; - dc->dcn_soc->sr_enter_plus_exit_time = - dc->debug.sr_enter_plus_exit_time_ns / 1000.0; - } - - if ((int)(dc->dcn_soc->urgent_latency * 1000) != dc->debug.urgent_latency_ns - && dc->debug.urgent_latency_ns) { - updated = true; - dc->dcn_soc->urgent_latency = dc->debug.urgent_latency_ns / 1000.0; - } - - if ((int)(dc->dcn_soc->percent_of_ideal_drambw_received_after_urg_latency * 1000) - != dc->debug.percent_of_ideal_drambw - && dc->debug.percent_of_ideal_drambw) { - updated = true; - dc->dcn_soc->percent_of_ideal_drambw_received_after_urg_latency = - dc->debug.percent_of_ideal_drambw; - } - - if ((int)(dc->dcn_soc->dram_clock_change_latency * 1000) - != dc->debug.dram_clock_change_latency_ns - && dc->debug.dram_clock_change_latency_ns) { - updated = true; - dc->dcn_soc->dram_clock_change_latency = - dc->debug.dram_clock_change_latency_ns / 1000.0; - } - DC_FP_END(); - - return updated; -} - -static void hack_disable_optional_pipe_split(struct dcn_bw_internal_vars *v) -{ - /* - * disable optional pipe split by lower dispclk bounding box - * at DPM0 - */ - v->max_dispclk[0] = v->max_dppclk_vmin0p65; -} - -static void hack_force_pipe_split(struct dcn_bw_internal_vars *v, - unsigned int pixel_rate_100hz) -{ - float pixel_rate_mhz = pixel_rate_100hz / 10000; - - /* - * force enabling pipe split by lower dpp clock for DPM0 to just - * below the specify pixel_rate, so bw calc would split pipe. - */ - if (pixel_rate_mhz < v->max_dppclk[0]) - v->max_dppclk[0] = pixel_rate_mhz; -} - -static void hack_bounding_box(struct dcn_bw_internal_vars *v, - struct dc_debug_options *dbg, - struct dc_state *context) -{ - int i; - - for (i = 0; i < MAX_PIPES; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - - /** - * Workaround for avoiding pipe-split in cases where we'd split - * planes that are too small, resulting in splits that aren't - * valid for the scaler. - */ - if (pipe->plane_state && - (pipe->plane_state->dst_rect.width <= 16 || - pipe->plane_state->dst_rect.height <= 16 || - pipe->plane_state->src_rect.width <= 16 || - pipe->plane_state->src_rect.height <= 16)) { - hack_disable_optional_pipe_split(v); - return; - } - } - - if (dbg->pipe_split_policy == MPC_SPLIT_AVOID) - hack_disable_optional_pipe_split(v); - - if (dbg->pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP && - context->stream_count >= 2) - hack_disable_optional_pipe_split(v); - - if (context->stream_count == 1 && - dbg->force_single_disp_pipe_split) - hack_force_pipe_split(v, context->streams[0]->timing.pix_clk_100hz); -} - -static unsigned int get_highest_allowed_voltage_level(uint32_t chip_family, - uint32_t hw_internal_rev, - uint32_t pci_revision_id) -{ - /* for low power RV2 variants, the highest voltage level we want is 0 */ - if ((chip_family == FAMILY_RV) && - ASICREV_IS_RAVEN2(hw_internal_rev)) - switch (pci_revision_id) { - case PRID_DALI_DE: - case PRID_DALI_DF: - case PRID_DALI_E3: - case PRID_DALI_E4: - case PRID_POLLOCK_94: - case PRID_POLLOCK_95: - case PRID_POLLOCK_E9: - case PRID_POLLOCK_EA: - case PRID_POLLOCK_EB: - return 0; - default: - break; - } - - /* we are ok with all levels */ - return 4; -} - -bool dcn10_validate_bandwidth( - struct dc *dc, - struct dc_state *context, - bool fast_validate) -{ - /* - * we want a breakdown of the various stages of validation, which the - * perf_trace macro doesn't support - */ - BW_VAL_TRACE_SETUP(); - - const struct resource_pool *pool = dc->res_pool; - struct dcn_bw_internal_vars *v = &context->dcn_bw_vars; - int i, input_idx, k; - int vesa_sync_start, asic_blank_end, asic_blank_start; - bool bw_limit_pass; - float bw_limit; - - PERFORMANCE_TRACE_START(); - - BW_VAL_TRACE_COUNT(); - - if (dcn_bw_apply_registry_override(dc)) - dcn_bw_sync_calcs_and_dml(dc); - - memset(v, 0, sizeof(*v)); - DC_FP_START(); - - v->sr_exit_time = dc->dcn_soc->sr_exit_time; - v->sr_enter_plus_exit_time = dc->dcn_soc->sr_enter_plus_exit_time; - v->urgent_latency = dc->dcn_soc->urgent_latency; - v->write_back_latency = dc->dcn_soc->write_back_latency; - v->percent_of_ideal_drambw_received_after_urg_latency = - dc->dcn_soc->percent_of_ideal_drambw_received_after_urg_latency; - - v->dcfclkv_min0p65 = dc->dcn_soc->dcfclkv_min0p65; - v->dcfclkv_mid0p72 = dc->dcn_soc->dcfclkv_mid0p72; - v->dcfclkv_nom0p8 = dc->dcn_soc->dcfclkv_nom0p8; - v->dcfclkv_max0p9 = dc->dcn_soc->dcfclkv_max0p9; - - v->max_dispclk_vmin0p65 = dc->dcn_soc->max_dispclk_vmin0p65; - v->max_dispclk_vmid0p72 = dc->dcn_soc->max_dispclk_vmid0p72; - v->max_dispclk_vnom0p8 = dc->dcn_soc->max_dispclk_vnom0p8; - v->max_dispclk_vmax0p9 = dc->dcn_soc->max_dispclk_vmax0p9; - - v->max_dppclk_vmin0p65 = dc->dcn_soc->max_dppclk_vmin0p65; - v->max_dppclk_vmid0p72 = dc->dcn_soc->max_dppclk_vmid0p72; - v->max_dppclk_vnom0p8 = dc->dcn_soc->max_dppclk_vnom0p8; - v->max_dppclk_vmax0p9 = dc->dcn_soc->max_dppclk_vmax0p9; - - v->socclk = dc->dcn_soc->socclk; - - v->fabric_and_dram_bandwidth_vmin0p65 = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65; - v->fabric_and_dram_bandwidth_vmid0p72 = dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72; - v->fabric_and_dram_bandwidth_vnom0p8 = dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8; - v->fabric_and_dram_bandwidth_vmax0p9 = dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9; - - v->phyclkv_min0p65 = dc->dcn_soc->phyclkv_min0p65; - v->phyclkv_mid0p72 = dc->dcn_soc->phyclkv_mid0p72; - v->phyclkv_nom0p8 = dc->dcn_soc->phyclkv_nom0p8; - v->phyclkv_max0p9 = dc->dcn_soc->phyclkv_max0p9; - - v->downspreading = dc->dcn_soc->downspreading; - v->round_trip_ping_latency_cycles = dc->dcn_soc->round_trip_ping_latency_cycles; - v->urgent_out_of_order_return_per_channel = dc->dcn_soc->urgent_out_of_order_return_per_channel; - v->number_of_channels = dc->dcn_soc->number_of_channels; - v->vmm_page_size = dc->dcn_soc->vmm_page_size; - v->dram_clock_change_latency = dc->dcn_soc->dram_clock_change_latency; - v->return_bus_width = dc->dcn_soc->return_bus_width; - - v->rob_buffer_size_in_kbyte = dc->dcn_ip->rob_buffer_size_in_kbyte; - v->det_buffer_size_in_kbyte = dc->dcn_ip->det_buffer_size_in_kbyte; - v->dpp_output_buffer_pixels = dc->dcn_ip->dpp_output_buffer_pixels; - v->opp_output_buffer_lines = dc->dcn_ip->opp_output_buffer_lines; - v->pixel_chunk_size_in_kbyte = dc->dcn_ip->pixel_chunk_size_in_kbyte; - v->pte_enable = dc->dcn_ip->pte_enable; - v->pte_chunk_size = dc->dcn_ip->pte_chunk_size; - v->meta_chunk_size = dc->dcn_ip->meta_chunk_size; - v->writeback_chunk_size = dc->dcn_ip->writeback_chunk_size; - v->odm_capability = dc->dcn_ip->odm_capability; - v->dsc_capability = dc->dcn_ip->dsc_capability; - v->line_buffer_size = dc->dcn_ip->line_buffer_size; - v->is_line_buffer_bpp_fixed = dc->dcn_ip->is_line_buffer_bpp_fixed; - v->line_buffer_fixed_bpp = dc->dcn_ip->line_buffer_fixed_bpp; - v->max_line_buffer_lines = dc->dcn_ip->max_line_buffer_lines; - v->writeback_luma_buffer_size = dc->dcn_ip->writeback_luma_buffer_size; - v->writeback_chroma_buffer_size = dc->dcn_ip->writeback_chroma_buffer_size; - v->max_num_dpp = dc->dcn_ip->max_num_dpp; - v->max_num_writeback = dc->dcn_ip->max_num_writeback; - v->max_dchub_topscl_throughput = dc->dcn_ip->max_dchub_topscl_throughput; - v->max_pscl_tolb_throughput = dc->dcn_ip->max_pscl_tolb_throughput; - v->max_lb_tovscl_throughput = dc->dcn_ip->max_lb_tovscl_throughput; - v->max_vscl_tohscl_throughput = dc->dcn_ip->max_vscl_tohscl_throughput; - v->max_hscl_ratio = dc->dcn_ip->max_hscl_ratio; - v->max_vscl_ratio = dc->dcn_ip->max_vscl_ratio; - v->max_hscl_taps = dc->dcn_ip->max_hscl_taps; - v->max_vscl_taps = dc->dcn_ip->max_vscl_taps; - v->under_scan_factor = dc->dcn_ip->under_scan_factor; - v->pte_buffer_size_in_requests = dc->dcn_ip->pte_buffer_size_in_requests; - v->dispclk_ramping_margin = dc->dcn_ip->dispclk_ramping_margin; - v->max_inter_dcn_tile_repeaters = dc->dcn_ip->max_inter_dcn_tile_repeaters; - v->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = - dc->dcn_ip->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one; - v->bug_forcing_luma_and_chroma_request_to_same_size_fixed = - dc->dcn_ip->bug_forcing_luma_and_chroma_request_to_same_size_fixed; - - v->voltage[5] = dcn_bw_no_support; - v->voltage[4] = dcn_bw_v_max0p9; - v->voltage[3] = dcn_bw_v_max0p9; - v->voltage[2] = dcn_bw_v_nom0p8; - v->voltage[1] = dcn_bw_v_mid0p72; - v->voltage[0] = dcn_bw_v_min0p65; - v->fabric_and_dram_bandwidth_per_state[5] = v->fabric_and_dram_bandwidth_vmax0p9; - v->fabric_and_dram_bandwidth_per_state[4] = v->fabric_and_dram_bandwidth_vmax0p9; - v->fabric_and_dram_bandwidth_per_state[3] = v->fabric_and_dram_bandwidth_vmax0p9; - v->fabric_and_dram_bandwidth_per_state[2] = v->fabric_and_dram_bandwidth_vnom0p8; - v->fabric_and_dram_bandwidth_per_state[1] = v->fabric_and_dram_bandwidth_vmid0p72; - v->fabric_and_dram_bandwidth_per_state[0] = v->fabric_and_dram_bandwidth_vmin0p65; - v->dcfclk_per_state[5] = v->dcfclkv_max0p9; - v->dcfclk_per_state[4] = v->dcfclkv_max0p9; - v->dcfclk_per_state[3] = v->dcfclkv_max0p9; - v->dcfclk_per_state[2] = v->dcfclkv_nom0p8; - v->dcfclk_per_state[1] = v->dcfclkv_mid0p72; - v->dcfclk_per_state[0] = v->dcfclkv_min0p65; - v->max_dispclk[5] = v->max_dispclk_vmax0p9; - v->max_dispclk[4] = v->max_dispclk_vmax0p9; - v->max_dispclk[3] = v->max_dispclk_vmax0p9; - v->max_dispclk[2] = v->max_dispclk_vnom0p8; - v->max_dispclk[1] = v->max_dispclk_vmid0p72; - v->max_dispclk[0] = v->max_dispclk_vmin0p65; - v->max_dppclk[5] = v->max_dppclk_vmax0p9; - v->max_dppclk[4] = v->max_dppclk_vmax0p9; - v->max_dppclk[3] = v->max_dppclk_vmax0p9; - v->max_dppclk[2] = v->max_dppclk_vnom0p8; - v->max_dppclk[1] = v->max_dppclk_vmid0p72; - v->max_dppclk[0] = v->max_dppclk_vmin0p65; - v->phyclk_per_state[5] = v->phyclkv_max0p9; - v->phyclk_per_state[4] = v->phyclkv_max0p9; - v->phyclk_per_state[3] = v->phyclkv_max0p9; - v->phyclk_per_state[2] = v->phyclkv_nom0p8; - v->phyclk_per_state[1] = v->phyclkv_mid0p72; - v->phyclk_per_state[0] = v->phyclkv_min0p65; - v->synchronized_vblank = dcn_bw_no; - v->ta_pscalculation = dcn_bw_override; - v->allow_different_hratio_vratio = dcn_bw_yes; - - for (i = 0, input_idx = 0; i < pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - - if (!pipe->stream) - continue; - /* skip all but first of split pipes */ - if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) - continue; - - v->underscan_output[input_idx] = false; /* taken care of in recout already*/ - v->interlace_output[input_idx] = false; - - v->htotal[input_idx] = pipe->stream->timing.h_total; - v->vtotal[input_idx] = pipe->stream->timing.v_total; - v->vactive[input_idx] = pipe->stream->timing.v_addressable + - pipe->stream->timing.v_border_top + pipe->stream->timing.v_border_bottom; - v->v_sync_plus_back_porch[input_idx] = pipe->stream->timing.v_total - - v->vactive[input_idx] - - pipe->stream->timing.v_front_porch; - v->pixel_clock[input_idx] = pipe->stream->timing.pix_clk_100hz/10000.0; - if (pipe->stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) - v->pixel_clock[input_idx] *= 2; - if (!pipe->plane_state) { - v->dcc_enable[input_idx] = dcn_bw_yes; - v->source_pixel_format[input_idx] = dcn_bw_rgb_sub_32; - v->source_surface_mode[input_idx] = dcn_bw_sw_4_kb_s; - v->lb_bit_per_pixel[input_idx] = 30; - v->viewport_width[input_idx] = pipe->stream->timing.h_addressable; - v->viewport_height[input_idx] = pipe->stream->timing.v_addressable; - /* - * for cases where we have no plane, we want to validate up to 1080p - * source size because here we are only interested in if the output - * timing is supported or not. if we cannot support native resolution - * of the high res display, we still want to support lower res up scale - * to native - */ - if (v->viewport_width[input_idx] > 1920) - v->viewport_width[input_idx] = 1920; - if (v->viewport_height[input_idx] > 1080) - v->viewport_height[input_idx] = 1080; - v->scaler_rec_out_width[input_idx] = v->viewport_width[input_idx]; - v->scaler_recout_height[input_idx] = v->viewport_height[input_idx]; - v->override_hta_ps[input_idx] = 1; - v->override_vta_ps[input_idx] = 1; - v->override_hta_pschroma[input_idx] = 1; - v->override_vta_pschroma[input_idx] = 1; - v->source_scan[input_idx] = dcn_bw_hor; - - } else { - v->viewport_height[input_idx] = pipe->plane_res.scl_data.viewport.height; - v->viewport_width[input_idx] = pipe->plane_res.scl_data.viewport.width; - v->scaler_rec_out_width[input_idx] = pipe->plane_res.scl_data.recout.width; - v->scaler_recout_height[input_idx] = pipe->plane_res.scl_data.recout.height; - if (pipe->bottom_pipe && pipe->bottom_pipe->plane_state == pipe->plane_state) { - if (pipe->plane_state->rotation % 2 == 0) { - int viewport_end = pipe->plane_res.scl_data.viewport.width - + pipe->plane_res.scl_data.viewport.x; - int viewport_b_end = pipe->bottom_pipe->plane_res.scl_data.viewport.width - + pipe->bottom_pipe->plane_res.scl_data.viewport.x; - - if (viewport_end > viewport_b_end) - v->viewport_width[input_idx] = viewport_end - - pipe->bottom_pipe->plane_res.scl_data.viewport.x; - else - v->viewport_width[input_idx] = viewport_b_end - - pipe->plane_res.scl_data.viewport.x; - } else { - int viewport_end = pipe->plane_res.scl_data.viewport.height - + pipe->plane_res.scl_data.viewport.y; - int viewport_b_end = pipe->bottom_pipe->plane_res.scl_data.viewport.height - + pipe->bottom_pipe->plane_res.scl_data.viewport.y; - - if (viewport_end > viewport_b_end) - v->viewport_height[input_idx] = viewport_end - - pipe->bottom_pipe->plane_res.scl_data.viewport.y; - else - v->viewport_height[input_idx] = viewport_b_end - - pipe->plane_res.scl_data.viewport.y; - } - v->scaler_rec_out_width[input_idx] = pipe->plane_res.scl_data.recout.width - + pipe->bottom_pipe->plane_res.scl_data.recout.width; - } - - if (pipe->plane_state->rotation % 2 == 0) { - ASSERT(pipe->plane_res.scl_data.ratios.horz.value != dc_fixpt_one.value - || v->scaler_rec_out_width[input_idx] == v->viewport_width[input_idx]); - ASSERT(pipe->plane_res.scl_data.ratios.vert.value != dc_fixpt_one.value - || v->scaler_recout_height[input_idx] == v->viewport_height[input_idx]); - } else { - ASSERT(pipe->plane_res.scl_data.ratios.horz.value != dc_fixpt_one.value - || v->scaler_recout_height[input_idx] == v->viewport_width[input_idx]); - ASSERT(pipe->plane_res.scl_data.ratios.vert.value != dc_fixpt_one.value - || v->scaler_rec_out_width[input_idx] == v->viewport_height[input_idx]); - } - - if (dc->debug.optimized_watermark) { - /* - * this method requires us to always re-calculate watermark when dcc change - * between flip. - */ - v->dcc_enable[input_idx] = pipe->plane_state->dcc.enable ? dcn_bw_yes : dcn_bw_no; - } else { - /* - * allow us to disable dcc on the fly without re-calculating WM - * - * extra overhead for DCC is quite small. for 1080p WM without - * DCC is only 0.417us lower (urgent goes from 6.979us to 6.562us) - */ - unsigned int bpe; - - v->dcc_enable[input_idx] = dc->res_pool->hubbub->funcs->dcc_support_pixel_format( - pipe->plane_state->format, &bpe) ? dcn_bw_yes : dcn_bw_no; - } - - v->source_pixel_format[input_idx] = tl_pixel_format_to_bw_defs( - pipe->plane_state->format); - v->source_surface_mode[input_idx] = tl_sw_mode_to_bw_defs( - pipe->plane_state->tiling_info.gfx9.swizzle); - v->lb_bit_per_pixel[input_idx] = tl_lb_bpp_to_int(pipe->plane_res.scl_data.lb_params.depth); - v->override_hta_ps[input_idx] = pipe->plane_res.scl_data.taps.h_taps; - v->override_vta_ps[input_idx] = pipe->plane_res.scl_data.taps.v_taps; - v->override_hta_pschroma[input_idx] = pipe->plane_res.scl_data.taps.h_taps_c; - v->override_vta_pschroma[input_idx] = pipe->plane_res.scl_data.taps.v_taps_c; - /* - * Spreadsheet doesn't handle taps_c is one properly, - * need to force Chroma to always be scaled to pass - * bandwidth validation. - */ - if (v->override_hta_pschroma[input_idx] == 1) - v->override_hta_pschroma[input_idx] = 2; - if (v->override_vta_pschroma[input_idx] == 1) - v->override_vta_pschroma[input_idx] = 2; - v->source_scan[input_idx] = (pipe->plane_state->rotation % 2) ? dcn_bw_vert : dcn_bw_hor; - } - if (v->is_line_buffer_bpp_fixed == dcn_bw_yes) - v->lb_bit_per_pixel[input_idx] = v->line_buffer_fixed_bpp; - v->dcc_rate[input_idx] = 1; /*TODO: Worst case? does this change?*/ - v->output_format[input_idx] = pipe->stream->timing.pixel_encoding == - PIXEL_ENCODING_YCBCR420 ? dcn_bw_420 : dcn_bw_444; - v->output[input_idx] = pipe->stream->signal == - SIGNAL_TYPE_HDMI_TYPE_A ? dcn_bw_hdmi : dcn_bw_dp; - v->output_deep_color[input_idx] = dcn_bw_encoder_8bpc; - if (v->output[input_idx] == dcn_bw_hdmi) { - switch (pipe->stream->timing.display_color_depth) { - case COLOR_DEPTH_101010: - v->output_deep_color[input_idx] = dcn_bw_encoder_10bpc; - break; - case COLOR_DEPTH_121212: - v->output_deep_color[input_idx] = dcn_bw_encoder_12bpc; - break; - case COLOR_DEPTH_161616: - v->output_deep_color[input_idx] = dcn_bw_encoder_16bpc; - break; - default: - break; - } - } - - input_idx++; - } - v->number_of_active_planes = input_idx; - - scaler_settings_calculation(v); - - hack_bounding_box(v, &dc->debug, context); - - mode_support_and_system_configuration(v); - - /* Unhack dppclk: dont bother with trying to pipe split if we cannot maintain dpm0 */ - if (v->voltage_level != 0 - && context->stream_count == 1 - && dc->debug.force_single_disp_pipe_split) { - v->max_dppclk[0] = v->max_dppclk_vmin0p65; - mode_support_and_system_configuration(v); - } - - if (v->voltage_level == 0 && - (dc->debug.sr_exit_time_dpm0_ns - || dc->debug.sr_enter_plus_exit_time_dpm0_ns)) { - - if (dc->debug.sr_enter_plus_exit_time_dpm0_ns) - v->sr_enter_plus_exit_time = - dc->debug.sr_enter_plus_exit_time_dpm0_ns / 1000.0f; - if (dc->debug.sr_exit_time_dpm0_ns) - v->sr_exit_time = dc->debug.sr_exit_time_dpm0_ns / 1000.0f; - context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = v->sr_enter_plus_exit_time; - context->bw_ctx.dml.soc.sr_exit_time_us = v->sr_exit_time; - mode_support_and_system_configuration(v); - } - - display_pipe_configuration(v); - - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - if (v->source_scan[k] == dcn_bw_hor) - v->swath_width_y[k] = v->viewport_width[k] / v->dpp_per_plane[k]; - else - v->swath_width_y[k] = v->viewport_height[k] / v->dpp_per_plane[k]; - } - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) { - v->byte_per_pixel_dety[k] = 8.0; - v->byte_per_pixel_detc[k] = 0.0; - } else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_32) { - v->byte_per_pixel_dety[k] = 4.0; - v->byte_per_pixel_detc[k] = 0.0; - } else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_16) { - v->byte_per_pixel_dety[k] = 2.0; - v->byte_per_pixel_detc[k] = 0.0; - } else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) { - v->byte_per_pixel_dety[k] = 1.0; - v->byte_per_pixel_detc[k] = 2.0; - } else { - v->byte_per_pixel_dety[k] = 4.0f / 3.0f; - v->byte_per_pixel_detc[k] = 8.0f / 3.0f; - } - } - - v->total_data_read_bandwidth = 0.0; - for (k = 0; k <= v->number_of_active_planes - 1; k++) { - v->read_bandwidth_plane_luma[k] = v->swath_width_y[k] * v->dpp_per_plane[k] * - dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / (v->htotal[k] / v->pixel_clock[k]) * v->v_ratio[k]; - v->read_bandwidth_plane_chroma[k] = v->swath_width_y[k] / 2.0 * v->dpp_per_plane[k] * - dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / (v->htotal[k] / v->pixel_clock[k]) * v->v_ratio[k] / 2.0; - v->total_data_read_bandwidth = v->total_data_read_bandwidth + - v->read_bandwidth_plane_luma[k] + v->read_bandwidth_plane_chroma[k]; - } - - BW_VAL_TRACE_END_VOLTAGE_LEVEL(); - - if (v->voltage_level != number_of_states_plus_one && !fast_validate) { - float bw_consumed = v->total_bandwidth_consumed_gbyte_per_second; - - if (bw_consumed < v->fabric_and_dram_bandwidth_vmin0p65) - bw_consumed = v->fabric_and_dram_bandwidth_vmin0p65; - else if (bw_consumed < v->fabric_and_dram_bandwidth_vmid0p72) - bw_consumed = v->fabric_and_dram_bandwidth_vmid0p72; - else if (bw_consumed < v->fabric_and_dram_bandwidth_vnom0p8) - bw_consumed = v->fabric_and_dram_bandwidth_vnom0p8; - else - bw_consumed = v->fabric_and_dram_bandwidth_vmax0p9; - - if (bw_consumed < v->fabric_and_dram_bandwidth) - if (dc->debug.voltage_align_fclk) - bw_consumed = v->fabric_and_dram_bandwidth; - - display_pipe_configuration(v); - /*calc_wm_sets_and_perf_params(context, v);*/ - /* Only 1 set is used by dcn since no noticeable - * performance improvement was measured and due to hw bug DEGVIDCN10-254 - */ - dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v); - - context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = - v->stutter_exit_watermark * 1000; - context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = - v->stutter_enter_plus_exit_watermark * 1000; - context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = - v->dram_clock_change_watermark * 1000; - context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000; - context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = v->urgent_watermark * 1000; - context->bw_ctx.bw.dcn.watermarks.b = context->bw_ctx.bw.dcn.watermarks.a; - context->bw_ctx.bw.dcn.watermarks.c = context->bw_ctx.bw.dcn.watermarks.a; - context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a; - - context->bw_ctx.bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 / - (ddr4_dram_factor_single_Channel * v->number_of_channels)); - if (bw_consumed == v->fabric_and_dram_bandwidth_vmin0p65) - context->bw_ctx.bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 / 32); - - context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = (int)(v->dcf_clk_deep_sleep * 1000); - context->bw_ctx.bw.dcn.clk.dcfclk_khz = (int)(v->dcfclk * 1000); - - context->bw_ctx.bw.dcn.clk.dispclk_khz = (int)(v->dispclk * 1000); - if (dc->debug.max_disp_clk == true) - context->bw_ctx.bw.dcn.clk.dispclk_khz = (int)(dc->dcn_soc->max_dispclk_vmax0p9 * 1000); - - if (context->bw_ctx.bw.dcn.clk.dispclk_khz < - dc->debug.min_disp_clk_khz) { - context->bw_ctx.bw.dcn.clk.dispclk_khz = - dc->debug.min_disp_clk_khz; - } - - context->bw_ctx.bw.dcn.clk.dppclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz / - v->dispclk_dppclk_ratio; - context->bw_ctx.bw.dcn.clk.phyclk_khz = v->phyclk_per_state[v->voltage_level]; - switch (v->voltage_level) { - case 0: - context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = - (int)(dc->dcn_soc->max_dppclk_vmin0p65 * 1000); - break; - case 1: - context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = - (int)(dc->dcn_soc->max_dppclk_vmid0p72 * 1000); - break; - case 2: - context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = - (int)(dc->dcn_soc->max_dppclk_vnom0p8 * 1000); - break; - default: - context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = - (int)(dc->dcn_soc->max_dppclk_vmax0p9 * 1000); - break; - } - - BW_VAL_TRACE_END_WATERMARKS(); - - for (i = 0, input_idx = 0; i < pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; - - /* skip inactive pipe */ - if (!pipe->stream) - continue; - /* skip all but first of split pipes */ - if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) - continue; - - pipe->pipe_dlg_param.vupdate_width = v->v_update_width_pix[input_idx]; - pipe->pipe_dlg_param.vupdate_offset = v->v_update_offset_pix[input_idx]; - pipe->pipe_dlg_param.vready_offset = v->v_ready_offset_pix[input_idx]; - pipe->pipe_dlg_param.vstartup_start = v->v_startup[input_idx]; - - pipe->pipe_dlg_param.htotal = pipe->stream->timing.h_total; - pipe->pipe_dlg_param.vtotal = pipe->stream->timing.v_total; - vesa_sync_start = pipe->stream->timing.v_addressable + - pipe->stream->timing.v_border_bottom + - pipe->stream->timing.v_front_porch; - - asic_blank_end = (pipe->stream->timing.v_total - - vesa_sync_start - - pipe->stream->timing.v_border_top) - * (pipe->stream->timing.flags.INTERLACE ? 1 : 0); - - asic_blank_start = asic_blank_end + - (pipe->stream->timing.v_border_top + - pipe->stream->timing.v_addressable + - pipe->stream->timing.v_border_bottom) - * (pipe->stream->timing.flags.INTERLACE ? 1 : 0); - - pipe->pipe_dlg_param.vblank_start = asic_blank_start; - pipe->pipe_dlg_param.vblank_end = asic_blank_end; - - if (pipe->plane_state) { - struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe; - - pipe->plane_state->update_flags.bits.full_update = 1; - - if (v->dpp_per_plane[input_idx] == 2 || - ((pipe->stream->view_format == - VIEW_3D_FORMAT_SIDE_BY_SIDE || - pipe->stream->view_format == - VIEW_3D_FORMAT_TOP_AND_BOTTOM) && - (pipe->stream->timing.timing_3d_format == - TIMING_3D_FORMAT_TOP_AND_BOTTOM || - pipe->stream->timing.timing_3d_format == - TIMING_3D_FORMAT_SIDE_BY_SIDE))) { - if (hsplit_pipe && hsplit_pipe->plane_state == pipe->plane_state) { - /* update previously split pipe */ - hsplit_pipe->pipe_dlg_param.vupdate_width = v->v_update_width_pix[input_idx]; - hsplit_pipe->pipe_dlg_param.vupdate_offset = v->v_update_offset_pix[input_idx]; - hsplit_pipe->pipe_dlg_param.vready_offset = v->v_ready_offset_pix[input_idx]; - hsplit_pipe->pipe_dlg_param.vstartup_start = v->v_startup[input_idx]; - - hsplit_pipe->pipe_dlg_param.htotal = pipe->stream->timing.h_total; - hsplit_pipe->pipe_dlg_param.vtotal = pipe->stream->timing.v_total; - hsplit_pipe->pipe_dlg_param.vblank_start = pipe->pipe_dlg_param.vblank_start; - hsplit_pipe->pipe_dlg_param.vblank_end = pipe->pipe_dlg_param.vblank_end; - } else { - /* pipe not split previously needs split */ - hsplit_pipe = find_idle_secondary_pipe(&context->res_ctx, pool, pipe); - ASSERT(hsplit_pipe); - split_stream_across_pipes(&context->res_ctx, pool, pipe, hsplit_pipe); - } - - dcn_bw_calc_rq_dlg_ttu(dc, v, hsplit_pipe, input_idx); - } else if (hsplit_pipe && hsplit_pipe->plane_state == pipe->plane_state) { - /* merge previously split pipe */ - pipe->bottom_pipe = hsplit_pipe->bottom_pipe; - if (hsplit_pipe->bottom_pipe) - hsplit_pipe->bottom_pipe->top_pipe = pipe; - hsplit_pipe->plane_state = NULL; - hsplit_pipe->stream = NULL; - hsplit_pipe->top_pipe = NULL; - hsplit_pipe->bottom_pipe = NULL; - /* Clear plane_res and stream_res */ - memset(&hsplit_pipe->plane_res, 0, sizeof(hsplit_pipe->plane_res)); - memset(&hsplit_pipe->stream_res, 0, sizeof(hsplit_pipe->stream_res)); - resource_build_scaling_params(pipe); - } - /* for now important to do this after pipe split for building e2e params */ - dcn_bw_calc_rq_dlg_ttu(dc, v, pipe, input_idx); - } - - input_idx++; - } - } else if (v->voltage_level == number_of_states_plus_one) { - BW_VAL_TRACE_SKIP(fail); - } else if (fast_validate) { - BW_VAL_TRACE_SKIP(fast); - } - - if (v->voltage_level == 0) { - context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = - dc->dcn_soc->sr_enter_plus_exit_time; - context->bw_ctx.dml.soc.sr_exit_time_us = dc->dcn_soc->sr_exit_time; - } - - /* - * BW limit is set to prevent display from impacting other system functions - */ - - bw_limit = dc->dcn_soc->percent_disp_bw_limit * v->fabric_and_dram_bandwidth_vmax0p9; - bw_limit_pass = (v->total_data_read_bandwidth / 1000.0) < bw_limit; - - DC_FP_END(); - - PERFORMANCE_TRACE_END(); - BW_VAL_TRACE_FINISH(); - - if (bw_limit_pass && v->voltage_level <= get_highest_allowed_voltage_level( - dc->ctx->asic_id.chip_family, - dc->ctx->asic_id.hw_internal_rev, - dc->ctx->asic_id.pci_revision_id)) - return true; - else - return false; -} - -static unsigned int dcn_find_normalized_clock_vdd_Level( - const struct dc *dc, - enum dm_pp_clock_type clocks_type, - int clocks_in_khz) -{ - int vdd_level = dcn_bw_v_min0p65; - - if (clocks_in_khz == 0)/*todo some clock not in the considerations*/ - return vdd_level; - - switch (clocks_type) { - case DM_PP_CLOCK_TYPE_DISPLAY_CLK: - if (clocks_in_khz > dc->dcn_soc->max_dispclk_vmax0p9*1000) { - vdd_level = dcn_bw_v_max0p91; - BREAK_TO_DEBUGGER(); - } else if (clocks_in_khz > dc->dcn_soc->max_dispclk_vnom0p8*1000) { - vdd_level = dcn_bw_v_max0p9; - } else if (clocks_in_khz > dc->dcn_soc->max_dispclk_vmid0p72*1000) { - vdd_level = dcn_bw_v_nom0p8; - } else if (clocks_in_khz > dc->dcn_soc->max_dispclk_vmin0p65*1000) { - vdd_level = dcn_bw_v_mid0p72; - } else - vdd_level = dcn_bw_v_min0p65; - break; - case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK: - if (clocks_in_khz > dc->dcn_soc->phyclkv_max0p9*1000) { - vdd_level = dcn_bw_v_max0p91; - BREAK_TO_DEBUGGER(); - } else if (clocks_in_khz > dc->dcn_soc->phyclkv_nom0p8*1000) { - vdd_level = dcn_bw_v_max0p9; - } else if (clocks_in_khz > dc->dcn_soc->phyclkv_mid0p72*1000) { - vdd_level = dcn_bw_v_nom0p8; - } else if (clocks_in_khz > dc->dcn_soc->phyclkv_min0p65*1000) { - vdd_level = dcn_bw_v_mid0p72; - } else - vdd_level = dcn_bw_v_min0p65; - break; - - case DM_PP_CLOCK_TYPE_DPPCLK: - if (clocks_in_khz > dc->dcn_soc->max_dppclk_vmax0p9*1000) { - vdd_level = dcn_bw_v_max0p91; - BREAK_TO_DEBUGGER(); - } else if (clocks_in_khz > dc->dcn_soc->max_dppclk_vnom0p8*1000) { - vdd_level = dcn_bw_v_max0p9; - } else if (clocks_in_khz > dc->dcn_soc->max_dppclk_vmid0p72*1000) { - vdd_level = dcn_bw_v_nom0p8; - } else if (clocks_in_khz > dc->dcn_soc->max_dppclk_vmin0p65*1000) { - vdd_level = dcn_bw_v_mid0p72; - } else - vdd_level = dcn_bw_v_min0p65; - break; - - case DM_PP_CLOCK_TYPE_MEMORY_CLK: - { - unsigned factor = (ddr4_dram_factor_single_Channel * dc->dcn_soc->number_of_channels); - - if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9*1000000/factor) { - vdd_level = dcn_bw_v_max0p91; - BREAK_TO_DEBUGGER(); - } else if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8*1000000/factor) { - vdd_level = dcn_bw_v_max0p9; - } else if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72*1000000/factor) { - vdd_level = dcn_bw_v_nom0p8; - } else if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65*1000000/factor) { - vdd_level = dcn_bw_v_mid0p72; - } else - vdd_level = dcn_bw_v_min0p65; - } - break; - - case DM_PP_CLOCK_TYPE_DCFCLK: - if (clocks_in_khz > dc->dcn_soc->dcfclkv_max0p9*1000) { - vdd_level = dcn_bw_v_max0p91; - BREAK_TO_DEBUGGER(); - } else if (clocks_in_khz > dc->dcn_soc->dcfclkv_nom0p8*1000) { - vdd_level = dcn_bw_v_max0p9; - } else if (clocks_in_khz > dc->dcn_soc->dcfclkv_mid0p72*1000) { - vdd_level = dcn_bw_v_nom0p8; - } else if (clocks_in_khz > dc->dcn_soc->dcfclkv_min0p65*1000) { - vdd_level = dcn_bw_v_mid0p72; - } else - vdd_level = dcn_bw_v_min0p65; - break; - - default: - break; - } - return vdd_level; -} - -unsigned int dcn_find_dcfclk_suits_all( - const struct dc *dc, - struct dc_clocks *clocks) -{ - unsigned vdd_level, vdd_level_temp; - unsigned dcf_clk; - - /*find a common supported voltage level*/ - vdd_level = dcn_find_normalized_clock_vdd_Level( - dc, DM_PP_CLOCK_TYPE_DISPLAY_CLK, clocks->dispclk_khz); - vdd_level_temp = dcn_find_normalized_clock_vdd_Level( - dc, DM_PP_CLOCK_TYPE_DISPLAYPHYCLK, clocks->phyclk_khz); - - vdd_level = dcn_bw_max(vdd_level, vdd_level_temp); - vdd_level_temp = dcn_find_normalized_clock_vdd_Level( - dc, DM_PP_CLOCK_TYPE_DPPCLK, clocks->dppclk_khz); - vdd_level = dcn_bw_max(vdd_level, vdd_level_temp); - - vdd_level_temp = dcn_find_normalized_clock_vdd_Level( - dc, DM_PP_CLOCK_TYPE_MEMORY_CLK, clocks->fclk_khz); - vdd_level = dcn_bw_max(vdd_level, vdd_level_temp); - vdd_level_temp = dcn_find_normalized_clock_vdd_Level( - dc, DM_PP_CLOCK_TYPE_DCFCLK, clocks->dcfclk_khz); - - /*find that level conresponding dcfclk*/ - vdd_level = dcn_bw_max(vdd_level, vdd_level_temp); - if (vdd_level == dcn_bw_v_max0p91) { - BREAK_TO_DEBUGGER(); - dcf_clk = dc->dcn_soc->dcfclkv_max0p9*1000; - } else if (vdd_level == dcn_bw_v_max0p9) - dcf_clk = dc->dcn_soc->dcfclkv_max0p9*1000; - else if (vdd_level == dcn_bw_v_nom0p8) - dcf_clk = dc->dcn_soc->dcfclkv_nom0p8*1000; - else if (vdd_level == dcn_bw_v_mid0p72) - dcf_clk = dc->dcn_soc->dcfclkv_mid0p72*1000; - else - dcf_clk = dc->dcn_soc->dcfclkv_min0p65*1000; - - DC_LOG_BANDWIDTH_CALCS("\tdcf_clk for voltage = %d\n", dcf_clk); - return dcf_clk; -} - -static bool verify_clock_values(struct dm_pp_clock_levels_with_voltage *clks) -{ - int i; - - if (clks->num_levels == 0) - return false; - - for (i = 0; i < clks->num_levels; i++) - /* Ensure that the result is sane */ - if (clks->data[i].clocks_in_khz == 0) - return false; - - return true; -} - -void dcn_bw_update_from_pplib(struct dc *dc) -{ - struct dc_context *ctx = dc->ctx; - struct dm_pp_clock_levels_with_voltage fclks = {0}, dcfclks = {0}; - bool res; - unsigned vmin0p65_idx, vmid0p72_idx, vnom0p8_idx, vmax0p9_idx; - - /* TODO: This is not the proper way to obtain fabric_and_dram_bandwidth, should be min(fclk, memclk) */ - res = dm_pp_get_clock_levels_by_type_with_voltage( - ctx, DM_PP_CLOCK_TYPE_FCLK, &fclks); - - DC_FP_START(); - - if (res) - res = verify_clock_values(&fclks); - - if (res) { - ASSERT(fclks.num_levels); - - vmin0p65_idx = 0; - vmid0p72_idx = fclks.num_levels - - (fclks.num_levels > 2 ? 3 : (fclks.num_levels > 1 ? 2 : 1)); - vnom0p8_idx = fclks.num_levels - (fclks.num_levels > 1 ? 2 : 1); - vmax0p9_idx = fclks.num_levels - 1; - - dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = - 32 * (fclks.data[vmin0p65_idx].clocks_in_khz / 1000.0) / 1000.0; - dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = - dc->dcn_soc->number_of_channels * - (fclks.data[vmid0p72_idx].clocks_in_khz / 1000.0) - * ddr4_dram_factor_single_Channel / 1000.0; - dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 = - dc->dcn_soc->number_of_channels * - (fclks.data[vnom0p8_idx].clocks_in_khz / 1000.0) - * ddr4_dram_factor_single_Channel / 1000.0; - dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = - dc->dcn_soc->number_of_channels * - (fclks.data[vmax0p9_idx].clocks_in_khz / 1000.0) - * ddr4_dram_factor_single_Channel / 1000.0; - } else - BREAK_TO_DEBUGGER(); - - DC_FP_END(); - - res = dm_pp_get_clock_levels_by_type_with_voltage( - ctx, DM_PP_CLOCK_TYPE_DCFCLK, &dcfclks); - - DC_FP_START(); - - if (res) - res = verify_clock_values(&dcfclks); - - if (res && dcfclks.num_levels >= 3) { - dc->dcn_soc->dcfclkv_min0p65 = dcfclks.data[0].clocks_in_khz / 1000.0; - dc->dcn_soc->dcfclkv_mid0p72 = dcfclks.data[dcfclks.num_levels - 3].clocks_in_khz / 1000.0; - dc->dcn_soc->dcfclkv_nom0p8 = dcfclks.data[dcfclks.num_levels - 2].clocks_in_khz / 1000.0; - dc->dcn_soc->dcfclkv_max0p9 = dcfclks.data[dcfclks.num_levels - 1].clocks_in_khz / 1000.0; - } else - BREAK_TO_DEBUGGER(); - - DC_FP_END(); -} - -void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc) -{ - struct pp_smu_funcs_rv *pp = NULL; - struct pp_smu_wm_range_sets ranges = {0}; - int min_fclk_khz, min_dcfclk_khz, socclk_khz; - const int overdrive = 5000000; /* 5 GHz to cover Overdrive */ - - if (dc->res_pool->pp_smu) - pp = &dc->res_pool->pp_smu->rv_funcs; - if (!pp || !pp->set_wm_ranges) - return; - - DC_FP_START(); - min_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 * 1000000 / 32; - min_dcfclk_khz = dc->dcn_soc->dcfclkv_min0p65 * 1000; - socclk_khz = dc->dcn_soc->socclk * 1000; - DC_FP_END(); - - /* Now notify PPLib/SMU about which Watermarks sets they should select - * depending on DPM state they are in. And update BW MGR GFX Engine and - * Memory clock member variables for Watermarks calculations for each - * Watermark Set. Only one watermark set for dcn1 due to hw bug DEGVIDCN10-254. - */ - /* SOCCLK does not affect anytihng but writeback for DCN so for now we dont - * care what the value is, hence min to overdrive level - */ - ranges.num_reader_wm_sets = WM_SET_COUNT; - ranges.num_writer_wm_sets = WM_SET_COUNT; - ranges.reader_wm_sets[0].wm_inst = WM_A; - ranges.reader_wm_sets[0].min_drain_clk_mhz = min_dcfclk_khz / 1000; - ranges.reader_wm_sets[0].max_drain_clk_mhz = overdrive / 1000; - ranges.reader_wm_sets[0].min_fill_clk_mhz = min_fclk_khz / 1000; - ranges.reader_wm_sets[0].max_fill_clk_mhz = overdrive / 1000; - ranges.writer_wm_sets[0].wm_inst = WM_A; - ranges.writer_wm_sets[0].min_fill_clk_mhz = socclk_khz / 1000; - ranges.writer_wm_sets[0].max_fill_clk_mhz = overdrive / 1000; - ranges.writer_wm_sets[0].min_drain_clk_mhz = min_fclk_khz / 1000; - ranges.writer_wm_sets[0].max_drain_clk_mhz = overdrive / 1000; - - if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) { - ranges.reader_wm_sets[0].wm_inst = WM_A; - ranges.reader_wm_sets[0].min_drain_clk_mhz = 300; - ranges.reader_wm_sets[0].max_drain_clk_mhz = 5000; - ranges.reader_wm_sets[0].min_fill_clk_mhz = 800; - ranges.reader_wm_sets[0].max_fill_clk_mhz = 5000; - ranges.writer_wm_sets[0].wm_inst = WM_A; - ranges.writer_wm_sets[0].min_fill_clk_mhz = 200; - ranges.writer_wm_sets[0].max_fill_clk_mhz = 5000; - ranges.writer_wm_sets[0].min_drain_clk_mhz = 800; - ranges.writer_wm_sets[0].max_drain_clk_mhz = 5000; - } - - ranges.reader_wm_sets[1] = ranges.writer_wm_sets[0]; - ranges.reader_wm_sets[1].wm_inst = WM_B; - - ranges.reader_wm_sets[2] = ranges.writer_wm_sets[0]; - ranges.reader_wm_sets[2].wm_inst = WM_C; - - ranges.reader_wm_sets[3] = ranges.writer_wm_sets[0]; - ranges.reader_wm_sets[3].wm_inst = WM_D; - - /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */ - pp->set_wm_ranges(&pp->pp_smu, &ranges); -} - -void dcn_bw_sync_calcs_and_dml(struct dc *dc) -{ - DC_FP_START(); - DC_LOG_BANDWIDTH_CALCS("sr_exit_time: %f ns\n" - "sr_enter_plus_exit_time: %f ns\n" - "urgent_latency: %f ns\n" - "write_back_latency: %f ns\n" - "percent_of_ideal_drambw_received_after_urg_latency: %f %%\n" - "max_request_size: %d bytes\n" - "dcfclkv_max0p9: %f kHz\n" - "dcfclkv_nom0p8: %f kHz\n" - "dcfclkv_mid0p72: %f kHz\n" - "dcfclkv_min0p65: %f kHz\n" - "max_dispclk_vmax0p9: %f kHz\n" - "max_dispclk_vnom0p8: %f kHz\n" - "max_dispclk_vmid0p72: %f kHz\n" - "max_dispclk_vmin0p65: %f kHz\n" - "max_dppclk_vmax0p9: %f kHz\n" - "max_dppclk_vnom0p8: %f kHz\n" - "max_dppclk_vmid0p72: %f kHz\n" - "max_dppclk_vmin0p65: %f kHz\n" - "socclk: %f kHz\n" - "fabric_and_dram_bandwidth_vmax0p9: %f MB/s\n" - "fabric_and_dram_bandwidth_vnom0p8: %f MB/s\n" - "fabric_and_dram_bandwidth_vmid0p72: %f MB/s\n" - "fabric_and_dram_bandwidth_vmin0p65: %f MB/s\n" - "phyclkv_max0p9: %f kHz\n" - "phyclkv_nom0p8: %f kHz\n" - "phyclkv_mid0p72: %f kHz\n" - "phyclkv_min0p65: %f kHz\n" - "downspreading: %f %%\n" - "round_trip_ping_latency_cycles: %d DCFCLK Cycles\n" - "urgent_out_of_order_return_per_channel: %d Bytes\n" - "number_of_channels: %d\n" - "vmm_page_size: %d Bytes\n" - "dram_clock_change_latency: %f ns\n" - "return_bus_width: %d Bytes\n", - dc->dcn_soc->sr_exit_time * 1000, - dc->dcn_soc->sr_enter_plus_exit_time * 1000, - dc->dcn_soc->urgent_latency * 1000, - dc->dcn_soc->write_back_latency * 1000, - dc->dcn_soc->percent_of_ideal_drambw_received_after_urg_latency, - dc->dcn_soc->max_request_size, - dc->dcn_soc->dcfclkv_max0p9 * 1000, - dc->dcn_soc->dcfclkv_nom0p8 * 1000, - dc->dcn_soc->dcfclkv_mid0p72 * 1000, - dc->dcn_soc->dcfclkv_min0p65 * 1000, - dc->dcn_soc->max_dispclk_vmax0p9 * 1000, - dc->dcn_soc->max_dispclk_vnom0p8 * 1000, - dc->dcn_soc->max_dispclk_vmid0p72 * 1000, - dc->dcn_soc->max_dispclk_vmin0p65 * 1000, - dc->dcn_soc->max_dppclk_vmax0p9 * 1000, - dc->dcn_soc->max_dppclk_vnom0p8 * 1000, - dc->dcn_soc->max_dppclk_vmid0p72 * 1000, - dc->dcn_soc->max_dppclk_vmin0p65 * 1000, - dc->dcn_soc->socclk * 1000, - dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 * 1000, - dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 * 1000, - dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 * 1000, - dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 * 1000, - dc->dcn_soc->phyclkv_max0p9 * 1000, - dc->dcn_soc->phyclkv_nom0p8 * 1000, - dc->dcn_soc->phyclkv_mid0p72 * 1000, - dc->dcn_soc->phyclkv_min0p65 * 1000, - dc->dcn_soc->downspreading * 100, - dc->dcn_soc->round_trip_ping_latency_cycles, - dc->dcn_soc->urgent_out_of_order_return_per_channel, - dc->dcn_soc->number_of_channels, - dc->dcn_soc->vmm_page_size, - dc->dcn_soc->dram_clock_change_latency * 1000, - dc->dcn_soc->return_bus_width); - DC_LOG_BANDWIDTH_CALCS("rob_buffer_size_in_kbyte: %f\n" - "det_buffer_size_in_kbyte: %f\n" - "dpp_output_buffer_pixels: %f\n" - "opp_output_buffer_lines: %f\n" - "pixel_chunk_size_in_kbyte: %f\n" - "pte_enable: %d\n" - "pte_chunk_size: %d kbytes\n" - "meta_chunk_size: %d kbytes\n" - "writeback_chunk_size: %d kbytes\n" - "odm_capability: %d\n" - "dsc_capability: %d\n" - "line_buffer_size: %d bits\n" - "max_line_buffer_lines: %d\n" - "is_line_buffer_bpp_fixed: %d\n" - "line_buffer_fixed_bpp: %d\n" - "writeback_luma_buffer_size: %d kbytes\n" - "writeback_chroma_buffer_size: %d kbytes\n" - "max_num_dpp: %d\n" - "max_num_writeback: %d\n" - "max_dchub_topscl_throughput: %d pixels/dppclk\n" - "max_pscl_tolb_throughput: %d pixels/dppclk\n" - "max_lb_tovscl_throughput: %d pixels/dppclk\n" - "max_vscl_tohscl_throughput: %d pixels/dppclk\n" - "max_hscl_ratio: %f\n" - "max_vscl_ratio: %f\n" - "max_hscl_taps: %d\n" - "max_vscl_taps: %d\n" - "pte_buffer_size_in_requests: %d\n" - "dispclk_ramping_margin: %f %%\n" - "under_scan_factor: %f %%\n" - "max_inter_dcn_tile_repeaters: %d\n" - "can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one: %d\n" - "bug_forcing_luma_and_chroma_request_to_same_size_fixed: %d\n" - "dcfclk_cstate_latency: %d\n", - dc->dcn_ip->rob_buffer_size_in_kbyte, - dc->dcn_ip->det_buffer_size_in_kbyte, - dc->dcn_ip->dpp_output_buffer_pixels, - dc->dcn_ip->opp_output_buffer_lines, - dc->dcn_ip->pixel_chunk_size_in_kbyte, - dc->dcn_ip->pte_enable, - dc->dcn_ip->pte_chunk_size, - dc->dcn_ip->meta_chunk_size, - dc->dcn_ip->writeback_chunk_size, - dc->dcn_ip->odm_capability, - dc->dcn_ip->dsc_capability, - dc->dcn_ip->line_buffer_size, - dc->dcn_ip->max_line_buffer_lines, - dc->dcn_ip->is_line_buffer_bpp_fixed, - dc->dcn_ip->line_buffer_fixed_bpp, - dc->dcn_ip->writeback_luma_buffer_size, - dc->dcn_ip->writeback_chroma_buffer_size, - dc->dcn_ip->max_num_dpp, - dc->dcn_ip->max_num_writeback, - dc->dcn_ip->max_dchub_topscl_throughput, - dc->dcn_ip->max_pscl_tolb_throughput, - dc->dcn_ip->max_lb_tovscl_throughput, - dc->dcn_ip->max_vscl_tohscl_throughput, - dc->dcn_ip->max_hscl_ratio, - dc->dcn_ip->max_vscl_ratio, - dc->dcn_ip->max_hscl_taps, - dc->dcn_ip->max_vscl_taps, - dc->dcn_ip->pte_buffer_size_in_requests, - dc->dcn_ip->dispclk_ramping_margin, - dc->dcn_ip->under_scan_factor * 100, - dc->dcn_ip->max_inter_dcn_tile_repeaters, - dc->dcn_ip->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one, - dc->dcn_ip->bug_forcing_luma_and_chroma_request_to_same_size_fixed, - dc->dcn_ip->dcfclk_cstate_latency); - - dc->dml.soc.sr_exit_time_us = dc->dcn_soc->sr_exit_time; - dc->dml.soc.sr_enter_plus_exit_time_us = dc->dcn_soc->sr_enter_plus_exit_time; - dc->dml.soc.urgent_latency_us = dc->dcn_soc->urgent_latency; - dc->dml.soc.writeback_latency_us = dc->dcn_soc->write_back_latency; - dc->dml.soc.ideal_dram_bw_after_urgent_percent = - dc->dcn_soc->percent_of_ideal_drambw_received_after_urg_latency; - dc->dml.soc.max_request_size_bytes = dc->dcn_soc->max_request_size; - dc->dml.soc.downspread_percent = dc->dcn_soc->downspreading; - dc->dml.soc.round_trip_ping_latency_dcfclk_cycles = - dc->dcn_soc->round_trip_ping_latency_cycles; - dc->dml.soc.urgent_out_of_order_return_per_channel_bytes = - dc->dcn_soc->urgent_out_of_order_return_per_channel; - dc->dml.soc.num_chans = dc->dcn_soc->number_of_channels; - dc->dml.soc.vmm_page_size_bytes = dc->dcn_soc->vmm_page_size; - dc->dml.soc.dram_clock_change_latency_us = dc->dcn_soc->dram_clock_change_latency; - dc->dml.soc.return_bus_width_bytes = dc->dcn_soc->return_bus_width; - - dc->dml.ip.rob_buffer_size_kbytes = dc->dcn_ip->rob_buffer_size_in_kbyte; - dc->dml.ip.det_buffer_size_kbytes = dc->dcn_ip->det_buffer_size_in_kbyte; - dc->dml.ip.dpp_output_buffer_pixels = dc->dcn_ip->dpp_output_buffer_pixels; - dc->dml.ip.opp_output_buffer_lines = dc->dcn_ip->opp_output_buffer_lines; - dc->dml.ip.pixel_chunk_size_kbytes = dc->dcn_ip->pixel_chunk_size_in_kbyte; - dc->dml.ip.pte_enable = dc->dcn_ip->pte_enable == dcn_bw_yes; - dc->dml.ip.pte_chunk_size_kbytes = dc->dcn_ip->pte_chunk_size; - dc->dml.ip.meta_chunk_size_kbytes = dc->dcn_ip->meta_chunk_size; - dc->dml.ip.writeback_chunk_size_kbytes = dc->dcn_ip->writeback_chunk_size; - dc->dml.ip.line_buffer_size_bits = dc->dcn_ip->line_buffer_size; - dc->dml.ip.max_line_buffer_lines = dc->dcn_ip->max_line_buffer_lines; - dc->dml.ip.IsLineBufferBppFixed = dc->dcn_ip->is_line_buffer_bpp_fixed == dcn_bw_yes; - dc->dml.ip.LineBufferFixedBpp = dc->dcn_ip->line_buffer_fixed_bpp; - dc->dml.ip.writeback_luma_buffer_size_kbytes = dc->dcn_ip->writeback_luma_buffer_size; - dc->dml.ip.writeback_chroma_buffer_size_kbytes = dc->dcn_ip->writeback_chroma_buffer_size; - dc->dml.ip.max_num_dpp = dc->dcn_ip->max_num_dpp; - dc->dml.ip.max_num_wb = dc->dcn_ip->max_num_writeback; - dc->dml.ip.max_dchub_pscl_bw_pix_per_clk = dc->dcn_ip->max_dchub_topscl_throughput; - dc->dml.ip.max_pscl_lb_bw_pix_per_clk = dc->dcn_ip->max_pscl_tolb_throughput; - dc->dml.ip.max_lb_vscl_bw_pix_per_clk = dc->dcn_ip->max_lb_tovscl_throughput; - dc->dml.ip.max_vscl_hscl_bw_pix_per_clk = dc->dcn_ip->max_vscl_tohscl_throughput; - dc->dml.ip.max_hscl_ratio = dc->dcn_ip->max_hscl_ratio; - dc->dml.ip.max_vscl_ratio = dc->dcn_ip->max_vscl_ratio; - dc->dml.ip.max_hscl_taps = dc->dcn_ip->max_hscl_taps; - dc->dml.ip.max_vscl_taps = dc->dcn_ip->max_vscl_taps; - /*pte_buffer_size_in_requests missing in dml*/ - dc->dml.ip.dispclk_ramp_margin_percent = dc->dcn_ip->dispclk_ramping_margin; - dc->dml.ip.underscan_factor = dc->dcn_ip->under_scan_factor; - dc->dml.ip.max_inter_dcn_tile_repeaters = dc->dcn_ip->max_inter_dcn_tile_repeaters; - dc->dml.ip.can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = - dc->dcn_ip->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one == dcn_bw_yes; - dc->dml.ip.bug_forcing_LC_req_same_size_fixed = - dc->dcn_ip->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes; - dc->dml.ip.dcfclk_cstate_latency = dc->dcn_ip->dcfclk_cstate_latency; - DC_FP_END(); -} diff --git a/drivers/gpu/drm/amd/display/dc/dml/Makefile b/drivers/gpu/drm/amd/display/dc/dml/Makefile index 06910b1f5965..b16c492593e2 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dml/Makefile @@ -74,6 +74,9 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dcn301/dcn301_fpu.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dcn302/dcn302_fpu.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/dsc/rc_calc_fpu.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/calcs/dcn_calcs.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_auto.o := $(dml_ccflags) +CFLAGS_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_math.o := $(dml_ccflags) -Wno-tautological-compare CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/display_mode_vba.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn2x/dcn2x.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn20/display_mode_vba_20.o := $(dml_rcflags) @@ -94,10 +97,14 @@ CFLAGS_$(AMDDALPATH)/dc/dml/dml1_display_rq_dlg_calc.o := $(dml_ccflags) CFLAGS_$(AMDDALPATH)/dc/dml/display_rq_dlg_helpers.o := $(dml_ccflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dml1_display_rq_dlg_calc.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/display_rq_dlg_helpers.o := $(dml_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/calcs/dcn_calcs.o := $(dml_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_auto.o := $(dml_rcflags) +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/calcs/dcn_calc_math.o := $(dml_rcflags) -DML = display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o \ +DML = calcs/dce_calcs.o calcs/custom_float.o calcs/bw_fixed.o ifdef CONFIG_DRM_AMD_DC_DCN +DML += display_mode_lib.o display_rq_dlg_helpers.o dml1_display_rq_dlg_calc.o DML += dcn20/dcn20_fpu.o DML += display_mode_vba.o dcn20/display_rq_dlg_calc_20.o dcn20/display_mode_vba_20.o DML += dcn20/display_rq_dlg_calc_20v2.o dcn20/display_mode_vba_20v2.o @@ -107,6 +114,7 @@ DML += dcn31/display_mode_vba_31.o dcn31/display_rq_dlg_calc_31.o DML += dcn301/dcn301_fpu.o DML += dcn302/dcn302_fpu.o DML += dsc/rc_calc_fpu.o +DML += calcs/dcn_calcs.o calcs/dcn_calc_math.o calcs/dcn_calc_auto.o endif AMD_DAL_DML = $(addprefix $(AMDDALPATH)/dc/dml/,$(DML)) diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/bw_fixed.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/bw_fixed.c new file mode 100644 index 000000000000..6ca288fb5fb9 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/bw_fixed.c @@ -0,0 +1,191 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#include "dm_services.h" +#include "bw_fixed.h" + + +#define MIN_I64 \ + (int64_t)(-(1LL << 63)) + +#define MAX_I64 \ + (int64_t)((1ULL << 63) - 1) + +#define FRACTIONAL_PART_MASK \ + ((1ULL << BW_FIXED_BITS_PER_FRACTIONAL_PART) - 1) + +#define GET_FRACTIONAL_PART(x) \ + (FRACTIONAL_PART_MASK & (x)) + +static uint64_t abs_i64(int64_t arg) +{ + if (arg >= 0) + return (uint64_t)(arg); + else + return (uint64_t)(-arg); +} + +struct bw_fixed bw_int_to_fixed_nonconst(int64_t value) +{ + struct bw_fixed res; + ASSERT(value < BW_FIXED_MAX_I32 && value > BW_FIXED_MIN_I32); + res.value = value << BW_FIXED_BITS_PER_FRACTIONAL_PART; + return res; +} + +struct bw_fixed bw_frc_to_fixed(int64_t numerator, int64_t denominator) +{ + struct bw_fixed res; + bool arg1_negative = numerator < 0; + bool arg2_negative = denominator < 0; + uint64_t arg1_value; + uint64_t arg2_value; + uint64_t remainder; + + /* determine integer part */ + uint64_t res_value; + + ASSERT(denominator != 0); + + arg1_value = abs_i64(numerator); + arg2_value = abs_i64(denominator); + res_value = div64_u64_rem(arg1_value, arg2_value, &remainder); + + ASSERT(res_value <= BW_FIXED_MAX_I32); + + /* determine fractional part */ + { + uint32_t i = BW_FIXED_BITS_PER_FRACTIONAL_PART; + + do + { + remainder <<= 1; + + res_value <<= 1; + + if (remainder >= arg2_value) + { + res_value |= 1; + remainder -= arg2_value; + } + } while (--i != 0); + } + + /* round up LSB */ + { + uint64_t summand = (remainder << 1) >= arg2_value; + + ASSERT(res_value <= MAX_I64 - summand); + + res_value += summand; + } + + res.value = (int64_t)(res_value); + + if (arg1_negative ^ arg2_negative) + res.value = -res.value; + return res; +} + +struct bw_fixed bw_floor2( + const struct bw_fixed arg, + const struct bw_fixed significance) +{ + struct bw_fixed result; + int64_t multiplicand; + + multiplicand = div64_s64(arg.value, abs_i64(significance.value)); + result.value = abs_i64(significance.value) * multiplicand; + ASSERT(abs_i64(result.value) <= abs_i64(arg.value)); + return result; +} + +struct bw_fixed bw_ceil2( + const struct bw_fixed arg, + const struct bw_fixed significance) +{ + struct bw_fixed result; + int64_t multiplicand; + + multiplicand = div64_s64(arg.value, abs_i64(significance.value)); + result.value = abs_i64(significance.value) * multiplicand; + if (abs_i64(result.value) < abs_i64(arg.value)) { + if (arg.value < 0) + result.value -= abs_i64(significance.value); + else + result.value += abs_i64(significance.value); + } + return result; +} + +struct bw_fixed bw_mul(const struct bw_fixed arg1, const struct bw_fixed arg2) +{ + struct bw_fixed res; + + bool arg1_negative = arg1.value < 0; + bool arg2_negative = arg2.value < 0; + + uint64_t arg1_value = abs_i64(arg1.value); + uint64_t arg2_value = abs_i64(arg2.value); + + uint64_t arg1_int = BW_FIXED_GET_INTEGER_PART(arg1_value); + uint64_t arg2_int = BW_FIXED_GET_INTEGER_PART(arg2_value); + + uint64_t arg1_fra = GET_FRACTIONAL_PART(arg1_value); + uint64_t arg2_fra = GET_FRACTIONAL_PART(arg2_value); + + uint64_t tmp; + + res.value = arg1_int * arg2_int; + + ASSERT(res.value <= BW_FIXED_MAX_I32); + + res.value <<= BW_FIXED_BITS_PER_FRACTIONAL_PART; + + tmp = arg1_int * arg2_fra; + + ASSERT(tmp <= (uint64_t)(MAX_I64 - res.value)); + + res.value += tmp; + + tmp = arg2_int * arg1_fra; + + ASSERT(tmp <= (uint64_t)(MAX_I64 - res.value)); + + res.value += tmp; + + tmp = arg1_fra * arg2_fra; + + tmp = (tmp >> BW_FIXED_BITS_PER_FRACTIONAL_PART) + + (tmp >= (uint64_t)(bw_frc_to_fixed(1, 2).value)); + + ASSERT(tmp <= (uint64_t)(MAX_I64 - res.value)); + + res.value += tmp; + + if (arg1_negative ^ arg2_negative) + res.value = -res.value; + return res; +} + diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/calcs_logger.h b/drivers/gpu/drm/amd/display/dc/dml/calcs/calcs_logger.h new file mode 100644 index 000000000000..62435bfc274d --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/calcs_logger.h @@ -0,0 +1,578 @@ +/* + * Copyright 2018 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _CALCS_CALCS_LOGGER_H_ +#define _CALCS_CALCS_LOGGER_H_ +#define DC_LOGGER ctx->logger + +static void print_bw_calcs_dceip(struct dc_context *ctx, const struct bw_calcs_dceip *dceip) +{ + + DC_LOG_BANDWIDTH_CALCS("#####################################################################"); + DC_LOG_BANDWIDTH_CALCS("struct bw_calcs_dceip"); + DC_LOG_BANDWIDTH_CALCS("#####################################################################"); + DC_LOG_BANDWIDTH_CALCS(" [enum] bw_calcs_version version %d", dceip->version); + DC_LOG_BANDWIDTH_CALCS(" [bool] large_cursor: %d", dceip->large_cursor); + DC_LOG_BANDWIDTH_CALCS(" [bool] dmif_pipe_en_fbc_chunk_tracker: %d", dceip->dmif_pipe_en_fbc_chunk_tracker); + DC_LOG_BANDWIDTH_CALCS(" [bool] display_write_back_supported: %d", dceip->display_write_back_supported); + DC_LOG_BANDWIDTH_CALCS(" [bool] argb_compression_support: %d", dceip->argb_compression_support); + DC_LOG_BANDWIDTH_CALCS(" [bool] pre_downscaler_enabled: %d", dceip->pre_downscaler_enabled); + DC_LOG_BANDWIDTH_CALCS(" [bool] underlay_downscale_prefetch_enabled: %d", + dceip->underlay_downscale_prefetch_enabled); + DC_LOG_BANDWIDTH_CALCS(" [bool] graphics_lb_nodownscaling_multi_line_prefetching: %d", + dceip->graphics_lb_nodownscaling_multi_line_prefetching); + DC_LOG_BANDWIDTH_CALCS(" [bool] limit_excessive_outstanding_dmif_requests: %d", + dceip->limit_excessive_outstanding_dmif_requests); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] cursor_max_outstanding_group_num: %d", + dceip->cursor_max_outstanding_group_num); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] lines_interleaved_into_lb: %d", dceip->lines_interleaved_into_lb); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] low_power_tiling_mode: %d", dceip->low_power_tiling_mode); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] chunk_width: %d", dceip->chunk_width); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_graphics_pipes: %d", dceip->number_of_graphics_pipes); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_underlay_pipes: %d", dceip->number_of_underlay_pipes); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] max_dmif_buffer_allocated: %d", dceip->max_dmif_buffer_allocated); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] graphics_dmif_size: %d", dceip->graphics_dmif_size); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] underlay_luma_dmif_size: %d", dceip->underlay_luma_dmif_size); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] underlay_chroma_dmif_size: %d", dceip->underlay_chroma_dmif_size); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] scatter_gather_lines_of_pte_prefetching_in_linear_mode: %d", + dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] display_write_back420_luma_mcifwr_buffer_size: %d", + dceip->display_write_back420_luma_mcifwr_buffer_size); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] display_write_back420_chroma_mcifwr_buffer_size: %d", + dceip->display_write_back420_chroma_mcifwr_buffer_size); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] scatter_gather_pte_request_rows_in_tiling_mode: %d", + dceip->scatter_gather_pte_request_rows_in_tiling_mode); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay_vscaler_efficiency10_bit_per_component: %d", + bw_fixed_to_int(dceip->underlay_vscaler_efficiency10_bit_per_component)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay_vscaler_efficiency12_bit_per_component: %d", + bw_fixed_to_int(dceip->underlay_vscaler_efficiency12_bit_per_component)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] graphics_vscaler_efficiency6_bit_per_component: %d", + bw_fixed_to_int(dceip->graphics_vscaler_efficiency6_bit_per_component)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] graphics_vscaler_efficiency8_bit_per_component: %d", + bw_fixed_to_int(dceip->graphics_vscaler_efficiency8_bit_per_component)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] graphics_vscaler_efficiency10_bit_per_component: %d", + bw_fixed_to_int(dceip->graphics_vscaler_efficiency10_bit_per_component)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] graphics_vscaler_efficiency12_bit_per_component: %d", + bw_fixed_to_int(dceip->graphics_vscaler_efficiency12_bit_per_component)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] alpha_vscaler_efficiency: %d", + bw_fixed_to_int(dceip->alpha_vscaler_efficiency)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_write_pixels_per_dispclk: %d", + bw_fixed_to_int(dceip->lb_write_pixels_per_dispclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_size_per_component444: %d", + bw_fixed_to_int(dceip->lb_size_per_component444)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_and_dram_clock_state_change_gated_before_cursor: %d", + bw_fixed_to_int(dceip->stutter_and_dram_clock_state_change_gated_before_cursor)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay420_luma_lb_size_per_component: %d", + bw_fixed_to_int(dceip->underlay420_luma_lb_size_per_component)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay420_chroma_lb_size_per_component: %d", + bw_fixed_to_int(dceip->underlay420_chroma_lb_size_per_component)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay422_lb_size_per_component: %d", + bw_fixed_to_int(dceip->underlay422_lb_size_per_component)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_chunk_width: %d", bw_fixed_to_int(dceip->cursor_chunk_width)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_dcp_buffer_lines: %d", + bw_fixed_to_int(dceip->cursor_dcp_buffer_lines)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay_maximum_width_efficient_for_tiling: %d", + bw_fixed_to_int(dceip->underlay_maximum_width_efficient_for_tiling)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay_maximum_height_efficient_for_tiling: %d", + bw_fixed_to_int(dceip->underlay_maximum_height_efficient_for_tiling)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display: %d", + bw_fixed_to_int(dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation: %d", + bw_fixed_to_int(dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] minimum_outstanding_pte_request_limit: %d", + bw_fixed_to_int(dceip->minimum_outstanding_pte_request_limit)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] maximum_total_outstanding_pte_requests_allowed_by_saw: %d", + bw_fixed_to_int(dceip->maximum_total_outstanding_pte_requests_allowed_by_saw)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] linear_mode_line_request_alternation_slice: %d", + bw_fixed_to_int(dceip->linear_mode_line_request_alternation_slice)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] request_efficiency: %d", bw_fixed_to_int(dceip->request_efficiency)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_per_request: %d", bw_fixed_to_int(dceip->dispclk_per_request)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_ramping_factor: %d", + bw_fixed_to_int(dceip->dispclk_ramping_factor)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_pipe_throughput_factor: %d", + bw_fixed_to_int(dceip->display_pipe_throughput_factor)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwr_all_surfaces_burst_time: %d", + bw_fixed_to_int(dceip->mcifwr_all_surfaces_burst_time)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_request_buffer_size: %d", + bw_fixed_to_int(dceip->dmif_request_buffer_size)); + + +} + +static void print_bw_calcs_vbios(struct dc_context *ctx, const struct bw_calcs_vbios *vbios) +{ + + DC_LOG_BANDWIDTH_CALCS("#####################################################################"); + DC_LOG_BANDWIDTH_CALCS("struct bw_calcs_vbios vbios"); + DC_LOG_BANDWIDTH_CALCS("#####################################################################"); + DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines memory_type: %d", vbios->memory_type); + DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines memory_type: %d", vbios->memory_type); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] dram_channel_width_in_bits: %d", vbios->dram_channel_width_in_bits); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_dram_channels: %d", vbios->number_of_dram_channels); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_dram_banks: %d", vbios->number_of_dram_banks); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] low_yclk: %d", bw_fixed_to_int(vbios->low_yclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid_yclk: %d", bw_fixed_to_int(vbios->mid_yclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] high_yclk: %d", bw_fixed_to_int(vbios->high_yclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] low_sclk: %d", bw_fixed_to_int(vbios->low_sclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid1_sclk: %d", bw_fixed_to_int(vbios->mid1_sclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid2_sclk: %d", bw_fixed_to_int(vbios->mid2_sclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid3_sclk: %d", bw_fixed_to_int(vbios->mid3_sclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid4_sclk: %d", bw_fixed_to_int(vbios->mid4_sclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid5_sclk: %d", bw_fixed_to_int(vbios->mid5_sclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid6_sclk: %d", bw_fixed_to_int(vbios->mid6_sclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] high_sclk: %d", bw_fixed_to_int(vbios->high_sclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] low_voltage_max_dispclk: %d", + bw_fixed_to_int(vbios->low_voltage_max_dispclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid_voltage_max_dispclk;: %d", + bw_fixed_to_int(vbios->mid_voltage_max_dispclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] high_voltage_max_dispclk;: %d", + bw_fixed_to_int(vbios->high_voltage_max_dispclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] low_voltage_max_phyclk: %d", + bw_fixed_to_int(vbios->low_voltage_max_phyclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mid_voltage_max_phyclk: %d", + bw_fixed_to_int(vbios->mid_voltage_max_phyclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] high_voltage_max_phyclk: %d", + bw_fixed_to_int(vbios->high_voltage_max_phyclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] data_return_bus_width: %d", bw_fixed_to_int(vbios->data_return_bus_width)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] trc: %d", bw_fixed_to_int(vbios->trc)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmifmc_urgent_latency: %d", bw_fixed_to_int(vbios->dmifmc_urgent_latency)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_self_refresh_exit_latency: %d", + bw_fixed_to_int(vbios->stutter_self_refresh_exit_latency)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_self_refresh_entry_latency: %d", + bw_fixed_to_int(vbios->stutter_self_refresh_entry_latency)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] nbp_state_change_latency: %d", + bw_fixed_to_int(vbios->nbp_state_change_latency)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwrmc_urgent_latency: %d", + bw_fixed_to_int(vbios->mcifwrmc_urgent_latency)); + DC_LOG_BANDWIDTH_CALCS(" [bool] scatter_gather_enable: %d", vbios->scatter_gather_enable); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] down_spread_percentage: %d", + bw_fixed_to_int(vbios->down_spread_percentage)); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] cursor_width: %d", vbios->cursor_width); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] average_compression_rate: %d", vbios->average_compression_rate); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_request_slots_gmc_reserves_for_dmif_per_channel: %d", + vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] blackout_duration: %d", bw_fixed_to_int(vbios->blackout_duration)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] maximum_blackout_recovery_time: %d", + bw_fixed_to_int(vbios->maximum_blackout_recovery_time)); + + +} + +static void print_bw_calcs_data(struct dc_context *ctx, struct bw_calcs_data *data) +{ + + int i, j, k; + + DC_LOG_BANDWIDTH_CALCS("#####################################################################"); + DC_LOG_BANDWIDTH_CALCS("struct bw_calcs_data data"); + DC_LOG_BANDWIDTH_CALCS("#####################################################################"); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_displays: %d", data->number_of_displays); + DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines underlay_surface_type: %d", data->underlay_surface_type); + DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines panning_and_bezel_adjustment: %d", + data->panning_and_bezel_adjustment); + DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines graphics_tiling_mode: %d", data->graphics_tiling_mode); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] graphics_lb_bpc: %d", data->graphics_lb_bpc); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] underlay_lb_bpc: %d", data->underlay_lb_bpc); + DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines underlay_tiling_mode: %d", data->underlay_tiling_mode); + DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines d0_underlay_mode: %d", data->d0_underlay_mode); + DC_LOG_BANDWIDTH_CALCS(" [bool] d1_display_write_back_dwb_enable: %d", data->d1_display_write_back_dwb_enable); + DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines d1_underlay_mode: %d", data->d1_underlay_mode); + DC_LOG_BANDWIDTH_CALCS(" [bool] cpup_state_change_enable: %d", data->cpup_state_change_enable); + DC_LOG_BANDWIDTH_CALCS(" [bool] cpuc_state_change_enable: %d", data->cpuc_state_change_enable); + DC_LOG_BANDWIDTH_CALCS(" [bool] nbp_state_change_enable: %d", data->nbp_state_change_enable); + DC_LOG_BANDWIDTH_CALCS(" [bool] stutter_mode_enable: %d", data->stutter_mode_enable); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] y_clk_level: %d", data->y_clk_level); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] sclk_level: %d", data->sclk_level); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_underlay_surfaces: %d", data->number_of_underlay_surfaces); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_dram_wrchannels: %d", data->number_of_dram_wrchannels); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] chunk_request_delay: %d", data->chunk_request_delay); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] number_of_dram_channels: %d", data->number_of_dram_channels); + DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines underlay_micro_tile_mode: %d", data->underlay_micro_tile_mode); + DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines graphics_micro_tile_mode: %d", data->graphics_micro_tile_mode); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] max_phyclk: %d", bw_fixed_to_int(data->max_phyclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dram_efficiency: %d", bw_fixed_to_int(data->dram_efficiency)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_width_after_surface_type: %d", + bw_fixed_to_int(data->src_width_after_surface_type)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_height_after_surface_type: %d", + bw_fixed_to_int(data->src_height_after_surface_type)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] hsr_after_surface_type: %d", + bw_fixed_to_int(data->hsr_after_surface_type)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] vsr_after_surface_type: %d", bw_fixed_to_int(data->vsr_after_surface_type)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_width_after_rotation: %d", + bw_fixed_to_int(data->src_width_after_rotation)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_height_after_rotation: %d", + bw_fixed_to_int(data->src_height_after_rotation)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] hsr_after_rotation: %d", bw_fixed_to_int(data->hsr_after_rotation)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] vsr_after_rotation: %d", bw_fixed_to_int(data->vsr_after_rotation)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] source_height_pixels: %d", bw_fixed_to_int(data->source_height_pixels)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] hsr_after_stereo: %d", bw_fixed_to_int(data->hsr_after_stereo)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] vsr_after_stereo: %d", bw_fixed_to_int(data->vsr_after_stereo)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] source_width_in_lb: %d", bw_fixed_to_int(data->source_width_in_lb)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_line_pitch: %d", bw_fixed_to_int(data->lb_line_pitch)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] underlay_maximum_source_efficient_for_tiling: %d", + bw_fixed_to_int(data->underlay_maximum_source_efficient_for_tiling)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] num_lines_at_frame_start: %d", + bw_fixed_to_int(data->num_lines_at_frame_start)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_dmif_size_in_time: %d", bw_fixed_to_int(data->min_dmif_size_in_time)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_mcifwr_size_in_time: %d", + bw_fixed_to_int(data->min_mcifwr_size_in_time)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_requests_for_dmif_size: %d", + bw_fixed_to_int(data->total_requests_for_dmif_size)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] peak_pte_request_to_eviction_ratio_limiting: %d", + bw_fixed_to_int(data->peak_pte_request_to_eviction_ratio_limiting)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] useful_pte_per_pte_request: %d", + bw_fixed_to_int(data->useful_pte_per_pte_request)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_pte_request_rows: %d", + bw_fixed_to_int(data->scatter_gather_pte_request_rows)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_row_height: %d", + bw_fixed_to_int(data->scatter_gather_row_height)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_pte_requests_in_vblank: %d", + bw_fixed_to_int(data->scatter_gather_pte_requests_in_vblank)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] inefficient_linear_pitch_in_bytes: %d", + bw_fixed_to_int(data->inefficient_linear_pitch_in_bytes)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_total_data: %d", bw_fixed_to_int(data->cursor_total_data)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_total_request_groups: %d", + bw_fixed_to_int(data->cursor_total_request_groups)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_total_pte_requests: %d", + bw_fixed_to_int(data->scatter_gather_total_pte_requests)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_total_pte_request_groups: %d", + bw_fixed_to_int(data->scatter_gather_total_pte_request_groups)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] tile_width_in_pixels: %d", bw_fixed_to_int(data->tile_width_in_pixels)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_total_number_of_data_request_page_close_open: %d", + bw_fixed_to_int(data->dmif_total_number_of_data_request_page_close_open)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwr_total_number_of_data_request_page_close_open: %d", + bw_fixed_to_int(data->mcifwr_total_number_of_data_request_page_close_open)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] bytes_per_page_close_open: %d", + bw_fixed_to_int(data->bytes_per_page_close_open)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwr_total_page_close_open_time: %d", + bw_fixed_to_int(data->mcifwr_total_page_close_open_time)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_requests_for_adjusted_dmif_size: %d", + bw_fixed_to_int(data->total_requests_for_adjusted_dmif_size)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_dmifmc_urgent_trips: %d", + bw_fixed_to_int(data->total_dmifmc_urgent_trips)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_dmifmc_urgent_latency: %d", + bw_fixed_to_int(data->total_dmifmc_urgent_latency)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_display_reads_required_data: %d", + bw_fixed_to_int(data->total_display_reads_required_data)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_display_reads_required_dram_access_data: %d", + bw_fixed_to_int(data->total_display_reads_required_dram_access_data)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_display_writes_required_data: %d", + bw_fixed_to_int(data->total_display_writes_required_data)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_display_writes_required_dram_access_data: %d", + bw_fixed_to_int(data->total_display_writes_required_dram_access_data)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_reads_required_data: %d", + bw_fixed_to_int(data->display_reads_required_data)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_reads_required_dram_access_data: %d", + bw_fixed_to_int(data->display_reads_required_dram_access_data)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_total_page_close_open_time: %d", + bw_fixed_to_int(data->dmif_total_page_close_open_time)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_cursor_memory_interface_buffer_size_in_time: %d", + bw_fixed_to_int(data->min_cursor_memory_interface_buffer_size_in_time)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_read_buffer_size_in_time: %d", + bw_fixed_to_int(data->min_read_buffer_size_in_time)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_reads_time_for_data_transfer: %d", + bw_fixed_to_int(data->display_reads_time_for_data_transfer)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_writes_time_for_data_transfer: %d", + bw_fixed_to_int(data->display_writes_time_for_data_transfer)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_required_dram_bandwidth: %d", + bw_fixed_to_int(data->dmif_required_dram_bandwidth)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwr_required_dram_bandwidth: %d", + bw_fixed_to_int(data->mcifwr_required_dram_bandwidth)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] required_dmifmc_urgent_latency_for_page_close_open: %d", + bw_fixed_to_int(data->required_dmifmc_urgent_latency_for_page_close_open)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] required_mcifmcwr_urgent_latency: %d", + bw_fixed_to_int(data->required_mcifmcwr_urgent_latency)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] required_dram_bandwidth_gbyte_per_second: %d", + bw_fixed_to_int(data->required_dram_bandwidth_gbyte_per_second)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dram_bandwidth: %d", bw_fixed_to_int(data->dram_bandwidth)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_required_sclk: %d", bw_fixed_to_int(data->dmif_required_sclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwr_required_sclk: %d", bw_fixed_to_int(data->mcifwr_required_sclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] required_sclk: %d", bw_fixed_to_int(data->required_sclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] downspread_factor: %d", bw_fixed_to_int(data->downspread_factor)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_scaler_efficiency: %d", bw_fixed_to_int(data->v_scaler_efficiency)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scaler_limits_factor: %d", bw_fixed_to_int(data->scaler_limits_factor)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_pipe_pixel_throughput: %d", + bw_fixed_to_int(data->display_pipe_pixel_throughput)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_dispclk_required_with_ramping: %d", + bw_fixed_to_int(data->total_dispclk_required_with_ramping)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_dispclk_required_without_ramping: %d", + bw_fixed_to_int(data->total_dispclk_required_without_ramping)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_read_request_bandwidth: %d", + bw_fixed_to_int(data->total_read_request_bandwidth)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_write_request_bandwidth: %d", + bw_fixed_to_int(data->total_write_request_bandwidth)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_required_for_total_read_request_bandwidth: %d", + bw_fixed_to_int(data->dispclk_required_for_total_read_request_bandwidth)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_dispclk_required_with_ramping_with_request_bandwidth: %d", + bw_fixed_to_int(data->total_dispclk_required_with_ramping_with_request_bandwidth)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_dispclk_required_without_ramping_with_request_bandwidth: %d", + bw_fixed_to_int(data->total_dispclk_required_without_ramping_with_request_bandwidth)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk: %d", bw_fixed_to_int(data->dispclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] blackout_recovery_time: %d", bw_fixed_to_int(data->blackout_recovery_time)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_pixels_per_data_fifo_entry: %d", + bw_fixed_to_int(data->min_pixels_per_data_fifo_entry)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] sclk_deep_sleep: %d", bw_fixed_to_int(data->sclk_deep_sleep)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] chunk_request_time: %d", bw_fixed_to_int(data->chunk_request_time)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_request_time: %d", bw_fixed_to_int(data->cursor_request_time)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] line_source_pixels_transfer_time: %d", + bw_fixed_to_int(data->line_source_pixels_transfer_time)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmifdram_access_efficiency: %d", + bw_fixed_to_int(data->dmifdram_access_efficiency)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwrdram_access_efficiency: %d", + bw_fixed_to_int(data->mcifwrdram_access_efficiency)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_average_bandwidth_no_compression: %d", + bw_fixed_to_int(data->total_average_bandwidth_no_compression)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_average_bandwidth: %d", + bw_fixed_to_int(data->total_average_bandwidth)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] total_stutter_cycle_duration: %d", + bw_fixed_to_int(data->total_stutter_cycle_duration)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_burst_time: %d", bw_fixed_to_int(data->stutter_burst_time)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] time_in_self_refresh: %d", bw_fixed_to_int(data->time_in_self_refresh)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_efficiency: %d", bw_fixed_to_int(data->stutter_efficiency)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] worst_number_of_trips_to_memory: %d", + bw_fixed_to_int(data->worst_number_of_trips_to_memory)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] immediate_flip_time: %d", bw_fixed_to_int(data->immediate_flip_time)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] latency_for_non_dmif_clients: %d", + bw_fixed_to_int(data->latency_for_non_dmif_clients)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] latency_for_non_mcifwr_clients: %d", + bw_fixed_to_int(data->latency_for_non_mcifwr_clients)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmifmc_urgent_latency_supported_in_high_sclk_and_yclk: %d", + bw_fixed_to_int(data->dmifmc_urgent_latency_supported_in_high_sclk_and_yclk)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] nbp_state_dram_speed_change_margin: %d", + bw_fixed_to_int(data->nbp_state_dram_speed_change_margin)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_reads_time_for_data_transfer_and_urgent_latency: %d", + bw_fixed_to_int(data->display_reads_time_for_data_transfer_and_urgent_latency)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dram_speed_change_margin: %d", + bw_fixed_to_int(data->dram_speed_change_margin)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_vblank_dram_speed_change_margin: %d", + bw_fixed_to_int(data->min_vblank_dram_speed_change_margin)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_stutter_refresh_duration: %d", + bw_fixed_to_int(data->min_stutter_refresh_duration)); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] total_stutter_dmif_buffer_size: %d", data->total_stutter_dmif_buffer_size); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] total_bytes_requested: %d", data->total_bytes_requested); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] min_stutter_dmif_buffer_size: %d", data->min_stutter_dmif_buffer_size); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] num_stutter_bursts: %d", data->num_stutter_bursts); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_blank_nbp_state_dram_speed_change_latency_supported: %d", + bw_fixed_to_int(data->v_blank_nbp_state_dram_speed_change_latency_supported)); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] nbp_state_dram_speed_change_latency_supported: %d", + bw_fixed_to_int(data->nbp_state_dram_speed_change_latency_supported)); + + for (i = 0; i < maximum_number_of_surfaces; i++) { + DC_LOG_BANDWIDTH_CALCS(" [bool] fbc_en[%d]:%d\n", i, data->fbc_en[i]); + DC_LOG_BANDWIDTH_CALCS(" [bool] lpt_en[%d]:%d", i, data->lpt_en[i]); + DC_LOG_BANDWIDTH_CALCS(" [bool] displays_match_flag[%d]:%d", i, data->displays_match_flag[i]); + DC_LOG_BANDWIDTH_CALCS(" [bool] use_alpha[%d]:%d", i, data->use_alpha[i]); + DC_LOG_BANDWIDTH_CALCS(" [bool] orthogonal_rotation[%d]:%d", i, data->orthogonal_rotation[i]); + DC_LOG_BANDWIDTH_CALCS(" [bool] enable[%d]:%d", i, data->enable[i]); + DC_LOG_BANDWIDTH_CALCS(" [bool] access_one_channel_only[%d]:%d", i, data->access_one_channel_only[i]); + DC_LOG_BANDWIDTH_CALCS(" [bool] scatter_gather_enable_for_pipe[%d]:%d", + i, data->scatter_gather_enable_for_pipe[i]); + DC_LOG_BANDWIDTH_CALCS(" [bool] interlace_mode[%d]:%d", + i, data->interlace_mode[i]); + DC_LOG_BANDWIDTH_CALCS(" [bool] display_pstate_change_enable[%d]:%d", + i, data->display_pstate_change_enable[i]); + DC_LOG_BANDWIDTH_CALCS(" [bool] line_buffer_prefetch[%d]:%d", i, data->line_buffer_prefetch[i]); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] bytes_per_pixel[%d]:%d", i, data->bytes_per_pixel[i]); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] max_chunks_non_fbc_mode[%d]:%d", + i, data->max_chunks_non_fbc_mode[i]); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] lb_bpc[%d]:%d", i, data->lb_bpc[i]); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] output_bpphdmi[%d]:%d", i, data->output_bpphdmi[i]); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] output_bppdp4_lane_hbr[%d]:%d", i, data->output_bppdp4_lane_hbr[i]); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] output_bppdp4_lane_hbr2[%d]:%d", + i, data->output_bppdp4_lane_hbr2[i]); + DC_LOG_BANDWIDTH_CALCS(" [uint32_t] output_bppdp4_lane_hbr3[%d]:%d", + i, data->output_bppdp4_lane_hbr3[i]); + DC_LOG_BANDWIDTH_CALCS(" [enum] bw_defines stereo_mode[%d]:%d", i, data->stereo_mode[i]); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_buffer_transfer_time[%d]:%d", + i, bw_fixed_to_int(data->dmif_buffer_transfer_time[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] displays_with_same_mode[%d]:%d", + i, bw_fixed_to_int(data->displays_with_same_mode[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_dmif_buffer_size[%d]:%d", + i, bw_fixed_to_int(data->stutter_dmif_buffer_size[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_refresh_duration[%d]:%d", + i, bw_fixed_to_int(data->stutter_refresh_duration[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_exit_watermark[%d]:%d", + i, bw_fixed_to_int(data->stutter_exit_watermark[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_entry_watermark[%d]:%d", + i, bw_fixed_to_int(data->stutter_entry_watermark[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] h_total[%d]:%d", i, bw_fixed_to_int(data->h_total[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_total[%d]:%d", i, bw_fixed_to_int(data->v_total[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] pixel_rate[%d]:%d", i, bw_fixed_to_int(data->pixel_rate[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_width[%d]:%d", i, bw_fixed_to_int(data->src_width[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] pitch_in_pixels[%d]:%d", + i, bw_fixed_to_int(data->pitch_in_pixels[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] pitch_in_pixels_after_surface_type[%d]:%d", + i, bw_fixed_to_int(data->pitch_in_pixels_after_surface_type[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_height[%d]:%d", i, bw_fixed_to_int(data->src_height[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scale_ratio[%d]:%d", i, bw_fixed_to_int(data->scale_ratio[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] h_taps[%d]:%d", i, bw_fixed_to_int(data->h_taps[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_taps[%d]:%d", i, bw_fixed_to_int(data->v_taps[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] h_scale_ratio[%d]:%d", i, bw_fixed_to_int(data->h_scale_ratio[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_scale_ratio[%d]:%d", i, bw_fixed_to_int(data->v_scale_ratio[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] rotation_angle[%d]:%d", + i, bw_fixed_to_int(data->rotation_angle[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] compression_rate[%d]:%d", + i, bw_fixed_to_int(data->compression_rate[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] hsr[%d]:%d", i, bw_fixed_to_int(data->hsr[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] vsr[%d]:%d", i, bw_fixed_to_int(data->vsr[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] source_width_rounded_up_to_chunks[%d]:%d", + i, bw_fixed_to_int(data->source_width_rounded_up_to_chunks[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] source_width_pixels[%d]:%d", + i, bw_fixed_to_int(data->source_width_pixels[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] source_height_rounded_up_to_chunks[%d]:%d", + i, bw_fixed_to_int(data->source_height_rounded_up_to_chunks[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] display_bandwidth[%d]:%d", + i, bw_fixed_to_int(data->display_bandwidth[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] request_bandwidth[%d]:%d", + i, bw_fixed_to_int(data->request_bandwidth[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] bytes_per_request[%d]:%d", + i, bw_fixed_to_int(data->bytes_per_request[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] useful_bytes_per_request[%d]:%d", + i, bw_fixed_to_int(data->useful_bytes_per_request[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lines_interleaved_in_mem_access[%d]:%d", + i, bw_fixed_to_int(data->lines_interleaved_in_mem_access[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] latency_hiding_lines[%d]:%d", + i, bw_fixed_to_int(data->latency_hiding_lines[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_partitions[%d]:%d", + i, bw_fixed_to_int(data->lb_partitions[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_partitions_max[%d]:%d", + i, bw_fixed_to_int(data->lb_partitions_max[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_required_with_ramping[%d]:%d", + i, bw_fixed_to_int(data->dispclk_required_with_ramping[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_required_without_ramping[%d]:%d", + i, bw_fixed_to_int(data->dispclk_required_without_ramping[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] data_buffer_size[%d]:%d", + i, bw_fixed_to_int(data->data_buffer_size[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] outstanding_chunk_request_limit[%d]:%d", + i, bw_fixed_to_int(data->outstanding_chunk_request_limit[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] urgent_watermark[%d]:%d", + i, bw_fixed_to_int(data->urgent_watermark[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] nbp_state_change_watermark[%d]:%d", + i, bw_fixed_to_int(data->nbp_state_change_watermark[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_filter_init[%d]:%d", i, bw_fixed_to_int(data->v_filter_init[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] stutter_cycle_duration[%d]:%d", + i, bw_fixed_to_int(data->stutter_cycle_duration[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] average_bandwidth[%d]:%d", + i, bw_fixed_to_int(data->average_bandwidth[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] average_bandwidth_no_compression[%d]:%d", + i, bw_fixed_to_int(data->average_bandwidth_no_compression[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_pte_request_limit[%d]:%d", + i, bw_fixed_to_int(data->scatter_gather_pte_request_limit[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_size_per_component[%d]:%d", + i, bw_fixed_to_int(data->lb_size_per_component[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] memory_chunk_size_in_bytes[%d]:%d", + i, bw_fixed_to_int(data->memory_chunk_size_in_bytes[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] pipe_chunk_size_in_bytes[%d]:%d", + i, bw_fixed_to_int(data->pipe_chunk_size_in_bytes[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] number_of_trips_to_memory_for_getting_apte_row[%d]:%d", + i, bw_fixed_to_int(data->number_of_trips_to_memory_for_getting_apte_row[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] adjusted_data_buffer_size[%d]:%d", + i, bw_fixed_to_int(data->adjusted_data_buffer_size[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] adjusted_data_buffer_size_in_memory[%d]:%d", + i, bw_fixed_to_int(data->adjusted_data_buffer_size_in_memory[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] pixels_per_data_fifo_entry[%d]:%d", + i, bw_fixed_to_int(data->pixels_per_data_fifo_entry[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_pte_requests_in_row[%d]:%d", + i, bw_fixed_to_int(data->scatter_gather_pte_requests_in_row[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] pte_request_per_chunk[%d]:%d", + i, bw_fixed_to_int(data->pte_request_per_chunk[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_page_width[%d]:%d", + i, bw_fixed_to_int(data->scatter_gather_page_width[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] scatter_gather_page_height[%d]:%d", + i, bw_fixed_to_int(data->scatter_gather_page_height[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_lines_in_per_line_out_in_beginning_of_frame[%d]:%d", + i, bw_fixed_to_int(data->lb_lines_in_per_line_out_in_beginning_of_frame[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] lb_lines_in_per_line_out_in_middle_of_frame[%d]:%d", + i, bw_fixed_to_int(data->lb_lines_in_per_line_out_in_middle_of_frame[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_width_pixels[%d]:%d", + i, bw_fixed_to_int(data->cursor_width_pixels[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] minimum_latency_hiding[%d]:%d", + i, bw_fixed_to_int(data->minimum_latency_hiding[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] maximum_latency_hiding[%d]:%d", + i, bw_fixed_to_int(data->maximum_latency_hiding[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] minimum_latency_hiding_with_cursor[%d]:%d", + i, bw_fixed_to_int(data->minimum_latency_hiding_with_cursor[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] maximum_latency_hiding_with_cursor[%d]:%d", + i, bw_fixed_to_int(data->maximum_latency_hiding_with_cursor[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_pixels_for_first_output_pixel[%d]:%d", + i, bw_fixed_to_int(data->src_pixels_for_first_output_pixel[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_pixels_for_last_output_pixel[%d]:%d", + i, bw_fixed_to_int(data->src_pixels_for_last_output_pixel[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_data_for_first_output_pixel[%d]:%d", + i, bw_fixed_to_int(data->src_data_for_first_output_pixel[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] src_data_for_last_output_pixel[%d]:%d", + i, bw_fixed_to_int(data->src_data_for_last_output_pixel[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] active_time[%d]:%d", i, bw_fixed_to_int(data->active_time[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] horizontal_blank_and_chunk_granularity_factor[%d]:%d", + i, bw_fixed_to_int(data->horizontal_blank_and_chunk_granularity_factor[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] cursor_latency_hiding[%d]:%d", + i, bw_fixed_to_int(data->cursor_latency_hiding[i])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] v_blank_dram_speed_change_margin[%d]:%d", + i, bw_fixed_to_int(data->v_blank_dram_speed_change_margin[i])); + } + + for (i = 0; i < maximum_number_of_surfaces; i++) { + for (j = 0; j < 3; j++) { + for (k = 0; k < 8; k++) { + + DC_LOG_BANDWIDTH_CALCS("\n [bw_fixed] line_source_transfer_time[%d][%d][%d]:%d", + i, j, k, bw_fixed_to_int(data->line_source_transfer_time[i][j][k])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dram_speed_change_line_source_transfer_time[%d][%d][%d]:%d", + i, j, k, + bw_fixed_to_int(data->dram_speed_change_line_source_transfer_time[i][j][k])); + } + } + } + + for (i = 0; i < 3; i++) { + for (j = 0; j < 8; j++) { + + DC_LOG_BANDWIDTH_CALCS("\n [uint32_t] num_displays_with_margin[%d][%d]:%d", + i, j, data->num_displays_with_margin[i][j]); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_burst_time[%d][%d]:%d", + i, j, bw_fixed_to_int(data->dmif_burst_time[i][j])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] mcifwr_burst_time[%d][%d]:%d", + i, j, bw_fixed_to_int(data->mcifwr_burst_time[i][j])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] min_dram_speed_change_margin[%d][%d]:%d", + i, j, bw_fixed_to_int(data->min_dram_speed_change_margin[i][j])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_required_for_dram_speed_change[%d][%d]:%d", + i, j, bw_fixed_to_int(data->dispclk_required_for_dram_speed_change[i][j])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] blackout_duration_margin[%d][%d]:%d", + i, j, bw_fixed_to_int(data->blackout_duration_margin[i][j])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_required_for_blackout_duration[%d][%d]:%d", + i, j, bw_fixed_to_int(data->dispclk_required_for_blackout_duration[i][j])); + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dispclk_required_for_blackout_recovery[%d][%d]:%d", + i, j, bw_fixed_to_int(data->dispclk_required_for_blackout_recovery[i][j])); + } + } + + for (i = 0; i < 6; i++) { + DC_LOG_BANDWIDTH_CALCS(" [bw_fixed] dmif_required_sclk_for_urgent_latency[%d]:%d", + i, bw_fixed_to_int(data->dmif_required_sclk_for_urgent_latency[i])); + } +} +; + +#endif /* _CALCS_CALCS_LOGGER_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/custom_float.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/custom_float.c new file mode 100644 index 000000000000..31d167bc548f --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/custom_float.c @@ -0,0 +1,197 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ +#include "dm_services.h" +#include "custom_float.h" + + +static bool build_custom_float( + struct fixed31_32 value, + const struct custom_float_format *format, + bool *negative, + uint32_t *mantissa, + uint32_t *exponenta) +{ + uint32_t exp_offset = (1 << (format->exponenta_bits - 1)) - 1; + + const struct fixed31_32 mantissa_constant_plus_max_fraction = + dc_fixpt_from_fraction( + (1LL << (format->mantissa_bits + 1)) - 1, + 1LL << format->mantissa_bits); + + struct fixed31_32 mantiss; + + if (dc_fixpt_eq( + value, + dc_fixpt_zero)) { + *negative = false; + *mantissa = 0; + *exponenta = 0; + return true; + } + + if (dc_fixpt_lt( + value, + dc_fixpt_zero)) { + *negative = format->sign; + value = dc_fixpt_neg(value); + } else { + *negative = false; + } + + if (dc_fixpt_lt( + value, + dc_fixpt_one)) { + uint32_t i = 1; + + do { + value = dc_fixpt_shl(value, 1); + ++i; + } while (dc_fixpt_lt( + value, + dc_fixpt_one)); + + --i; + + if (exp_offset <= i) { + *mantissa = 0; + *exponenta = 0; + return true; + } + + *exponenta = exp_offset - i; + } else if (dc_fixpt_le( + mantissa_constant_plus_max_fraction, + value)) { + uint32_t i = 1; + + do { + value = dc_fixpt_shr(value, 1); + ++i; + } while (dc_fixpt_lt( + mantissa_constant_plus_max_fraction, + value)); + + *exponenta = exp_offset + i - 1; + } else { + *exponenta = exp_offset; + } + + mantiss = dc_fixpt_sub( + value, + dc_fixpt_one); + + if (dc_fixpt_lt( + mantiss, + dc_fixpt_zero) || + dc_fixpt_lt( + dc_fixpt_one, + mantiss)) + mantiss = dc_fixpt_zero; + else + mantiss = dc_fixpt_shl( + mantiss, + format->mantissa_bits); + + *mantissa = dc_fixpt_floor(mantiss); + + return true; +} + +static bool setup_custom_float( + const struct custom_float_format *format, + bool negative, + uint32_t mantissa, + uint32_t exponenta, + uint32_t *result) +{ + uint32_t i = 0; + uint32_t j = 0; + + uint32_t value = 0; + + /* verification code: + * once calculation is ok we can remove it + */ + + const uint32_t mantissa_mask = + (1 << (format->mantissa_bits + 1)) - 1; + + const uint32_t exponenta_mask = + (1 << (format->exponenta_bits + 1)) - 1; + + if (mantissa & ~mantissa_mask) { + BREAK_TO_DEBUGGER(); + mantissa = mantissa_mask; + } + + if (exponenta & ~exponenta_mask) { + BREAK_TO_DEBUGGER(); + exponenta = exponenta_mask; + } + + /* end of verification code */ + + while (i < format->mantissa_bits) { + uint32_t mask = 1 << i; + + if (mantissa & mask) + value |= mask; + + ++i; + } + + while (j < format->exponenta_bits) { + uint32_t mask = 1 << j; + + if (exponenta & mask) + value |= mask << i; + + ++j; + } + + if (negative && format->sign) + value |= 1 << (i + j); + + *result = value; + + return true; +} + +bool convert_to_custom_float_format( + struct fixed31_32 value, + const struct custom_float_format *format, + uint32_t *result) +{ + uint32_t mantissa; + uint32_t exponenta; + bool negative; + + return build_custom_float( + value, format, &negative, &mantissa, &exponenta) && + setup_custom_float( + format, negative, mantissa, exponenta, result); +} + + diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dce_calcs.c new file mode 100644 index 000000000000..ff5bb152ef49 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dce_calcs.c @@ -0,0 +1,3625 @@ +/* + * Copyright 2015 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include + +#include "resource.h" +#include "dm_services.h" +#include "dce_calcs.h" +#include "dc.h" +#include "core_types.h" +#include "dal_asic_id.h" +#include "calcs_logger.h" + +/* + * NOTE: + * This file is gcc-parseable HW gospel, coming straight from HW engineers. + * + * It doesn't adhere to Linux kernel style and sometimes will do things in odd + * ways. Unless there is something clearly wrong with it the code should + * remain as-is as it provides us with a guarantee from HW that it is correct. + */ + +/******************************************************************************* + * Private Functions + ******************************************************************************/ + +static enum bw_calcs_version bw_calcs_version_from_asic_id(struct hw_asic_id asic_id) +{ + switch (asic_id.chip_family) { + + case FAMILY_CZ: + if (ASIC_REV_IS_STONEY(asic_id.hw_internal_rev)) + return BW_CALCS_VERSION_STONEY; + return BW_CALCS_VERSION_CARRIZO; + + case FAMILY_VI: + if (ASIC_REV_IS_POLARIS12_V(asic_id.hw_internal_rev)) + return BW_CALCS_VERSION_POLARIS12; + if (ASIC_REV_IS_POLARIS10_P(asic_id.hw_internal_rev)) + return BW_CALCS_VERSION_POLARIS10; + if (ASIC_REV_IS_POLARIS11_M(asic_id.hw_internal_rev)) + return BW_CALCS_VERSION_POLARIS11; + if (ASIC_REV_IS_VEGAM(asic_id.hw_internal_rev)) + return BW_CALCS_VERSION_VEGAM; + return BW_CALCS_VERSION_INVALID; + + case FAMILY_AI: + return BW_CALCS_VERSION_VEGA10; + + default: + return BW_CALCS_VERSION_INVALID; + } +} + +static void calculate_bandwidth( + const struct bw_calcs_dceip *dceip, + const struct bw_calcs_vbios *vbios, + struct bw_calcs_data *data) + +{ + const int32_t pixels_per_chunk = 512; + const int32_t high = 2; + const int32_t mid = 1; + const int32_t low = 0; + const uint32_t s_low = 0; + const uint32_t s_mid1 = 1; + const uint32_t s_mid2 = 2; + const uint32_t s_mid3 = 3; + const uint32_t s_mid4 = 4; + const uint32_t s_mid5 = 5; + const uint32_t s_mid6 = 6; + const uint32_t s_high = 7; + const uint32_t dmif_chunk_buff_margin = 1; + + uint32_t max_chunks_fbc_mode; + int32_t num_cursor_lines; + + int32_t i, j, k; + struct bw_fixed *yclk; + struct bw_fixed *sclk; + bool d0_underlay_enable; + bool d1_underlay_enable; + bool fbc_enabled; + bool lpt_enabled; + enum bw_defines sclk_message; + enum bw_defines yclk_message; + enum bw_defines *tiling_mode; + enum bw_defines *surface_type; + enum bw_defines voltage; + enum bw_defines pipe_check; + enum bw_defines hsr_check; + enum bw_defines vsr_check; + enum bw_defines lb_size_check; + enum bw_defines fbc_check; + enum bw_defines rotation_check; + enum bw_defines mode_check; + enum bw_defines nbp_state_change_enable_blank; + /*initialize variables*/ + int32_t number_of_displays_enabled = 0; + int32_t number_of_displays_enabled_with_margin = 0; + int32_t number_of_aligned_displays_with_no_margin = 0; + + yclk = kcalloc(3, sizeof(*yclk), GFP_KERNEL); + if (!yclk) + return; + + sclk = kcalloc(8, sizeof(*sclk), GFP_KERNEL); + if (!sclk) + goto free_yclk; + + tiling_mode = kcalloc(maximum_number_of_surfaces, sizeof(*tiling_mode), GFP_KERNEL); + if (!tiling_mode) + goto free_sclk; + + surface_type = kcalloc(maximum_number_of_surfaces, sizeof(*surface_type), GFP_KERNEL); + if (!surface_type) + goto free_tiling_mode; + + yclk[low] = vbios->low_yclk; + yclk[mid] = vbios->mid_yclk; + yclk[high] = vbios->high_yclk; + sclk[s_low] = vbios->low_sclk; + sclk[s_mid1] = vbios->mid1_sclk; + sclk[s_mid2] = vbios->mid2_sclk; + sclk[s_mid3] = vbios->mid3_sclk; + sclk[s_mid4] = vbios->mid4_sclk; + sclk[s_mid5] = vbios->mid5_sclk; + sclk[s_mid6] = vbios->mid6_sclk; + sclk[s_high] = vbios->high_sclk; + /*''''''''''''''''''*/ + /* surface assignment:*/ + /* 0: d0 underlay or underlay luma*/ + /* 1: d0 underlay chroma*/ + /* 2: d1 underlay or underlay luma*/ + /* 3: d1 underlay chroma*/ + /* 4: d0 graphics*/ + /* 5: d1 graphics*/ + /* 6: d2 graphics*/ + /* 7: d3 graphics, same mode as d2*/ + /* 8: d4 graphics, same mode as d2*/ + /* 9: d5 graphics, same mode as d2*/ + /* ...*/ + /* maximum_number_of_surfaces-2: d1 display_write_back420 luma*/ + /* maximum_number_of_surfaces-1: d1 display_write_back420 chroma*/ + /* underlay luma and chroma surface parameters from spreadsheet*/ + + + + + if (data->d0_underlay_mode == bw_def_none) + d0_underlay_enable = false; + else + d0_underlay_enable = true; + if (data->d1_underlay_mode == bw_def_none) + d1_underlay_enable = false; + else + d1_underlay_enable = true; + data->number_of_underlay_surfaces = d0_underlay_enable + d1_underlay_enable; + switch (data->underlay_surface_type) { + case bw_def_420: + surface_type[0] = bw_def_underlay420_luma; + surface_type[2] = bw_def_underlay420_luma; + data->bytes_per_pixel[0] = 1; + data->bytes_per_pixel[2] = 1; + surface_type[1] = bw_def_underlay420_chroma; + surface_type[3] = bw_def_underlay420_chroma; + data->bytes_per_pixel[1] = 2; + data->bytes_per_pixel[3] = 2; + data->lb_size_per_component[0] = dceip->underlay420_luma_lb_size_per_component; + data->lb_size_per_component[1] = dceip->underlay420_chroma_lb_size_per_component; + data->lb_size_per_component[2] = dceip->underlay420_luma_lb_size_per_component; + data->lb_size_per_component[3] = dceip->underlay420_chroma_lb_size_per_component; + break; + case bw_def_422: + surface_type[0] = bw_def_underlay422; + surface_type[2] = bw_def_underlay422; + data->bytes_per_pixel[0] = 2; + data->bytes_per_pixel[2] = 2; + data->lb_size_per_component[0] = dceip->underlay422_lb_size_per_component; + data->lb_size_per_component[2] = dceip->underlay422_lb_size_per_component; + break; + default: + surface_type[0] = bw_def_underlay444; + surface_type[2] = bw_def_underlay444; + data->bytes_per_pixel[0] = 4; + data->bytes_per_pixel[2] = 4; + data->lb_size_per_component[0] = dceip->lb_size_per_component444; + data->lb_size_per_component[2] = dceip->lb_size_per_component444; + break; + } + if (d0_underlay_enable) { + switch (data->underlay_surface_type) { + case bw_def_420: + data->enable[0] = 1; + data->enable[1] = 1; + break; + default: + data->enable[0] = 1; + data->enable[1] = 0; + break; + } + } + else { + data->enable[0] = 0; + data->enable[1] = 0; + } + if (d1_underlay_enable) { + switch (data->underlay_surface_type) { + case bw_def_420: + data->enable[2] = 1; + data->enable[3] = 1; + break; + default: + data->enable[2] = 1; + data->enable[3] = 0; + break; + } + } + else { + data->enable[2] = 0; + data->enable[3] = 0; + } + data->use_alpha[0] = 0; + data->use_alpha[1] = 0; + data->use_alpha[2] = 0; + data->use_alpha[3] = 0; + data->scatter_gather_enable_for_pipe[0] = vbios->scatter_gather_enable; + data->scatter_gather_enable_for_pipe[1] = vbios->scatter_gather_enable; + data->scatter_gather_enable_for_pipe[2] = vbios->scatter_gather_enable; + data->scatter_gather_enable_for_pipe[3] = vbios->scatter_gather_enable; + /*underlay0 same and graphics display pipe0*/ + data->interlace_mode[0] = data->interlace_mode[4]; + data->interlace_mode[1] = data->interlace_mode[4]; + /*underlay1 same and graphics display pipe1*/ + data->interlace_mode[2] = data->interlace_mode[5]; + data->interlace_mode[3] = data->interlace_mode[5]; + /*underlay0 same and graphics display pipe0*/ + data->h_total[0] = data->h_total[4]; + data->v_total[0] = data->v_total[4]; + data->h_total[1] = data->h_total[4]; + data->v_total[1] = data->v_total[4]; + /*underlay1 same and graphics display pipe1*/ + data->h_total[2] = data->h_total[5]; + data->v_total[2] = data->v_total[5]; + data->h_total[3] = data->h_total[5]; + data->v_total[3] = data->v_total[5]; + /*underlay0 same and graphics display pipe0*/ + data->pixel_rate[0] = data->pixel_rate[4]; + data->pixel_rate[1] = data->pixel_rate[4]; + /*underlay1 same and graphics display pipe1*/ + data->pixel_rate[2] = data->pixel_rate[5]; + data->pixel_rate[3] = data->pixel_rate[5]; + if ((data->underlay_tiling_mode == bw_def_array_linear_general || data->underlay_tiling_mode == bw_def_array_linear_aligned)) { + tiling_mode[0] = bw_def_linear; + tiling_mode[1] = bw_def_linear; + tiling_mode[2] = bw_def_linear; + tiling_mode[3] = bw_def_linear; + } + else { + tiling_mode[0] = bw_def_landscape; + tiling_mode[1] = bw_def_landscape; + tiling_mode[2] = bw_def_landscape; + tiling_mode[3] = bw_def_landscape; + } + data->lb_bpc[0] = data->underlay_lb_bpc; + data->lb_bpc[1] = data->underlay_lb_bpc; + data->lb_bpc[2] = data->underlay_lb_bpc; + data->lb_bpc[3] = data->underlay_lb_bpc; + data->compression_rate[0] = bw_int_to_fixed(1); + data->compression_rate[1] = bw_int_to_fixed(1); + data->compression_rate[2] = bw_int_to_fixed(1); + data->compression_rate[3] = bw_int_to_fixed(1); + data->access_one_channel_only[0] = 0; + data->access_one_channel_only[1] = 0; + data->access_one_channel_only[2] = 0; + data->access_one_channel_only[3] = 0; + data->cursor_width_pixels[0] = bw_int_to_fixed(0); + data->cursor_width_pixels[1] = bw_int_to_fixed(0); + data->cursor_width_pixels[2] = bw_int_to_fixed(0); + data->cursor_width_pixels[3] = bw_int_to_fixed(0); + /* graphics surface parameters from spreadsheet*/ + fbc_enabled = false; + lpt_enabled = false; + for (i = 4; i <= maximum_number_of_surfaces - 3; i++) { + if (i < data->number_of_displays + 4) { + if (i == 4 && data->d0_underlay_mode == bw_def_underlay_only) { + data->enable[i] = 0; + data->use_alpha[i] = 0; + } + else if (i == 4 && data->d0_underlay_mode == bw_def_blend) { + data->enable[i] = 1; + data->use_alpha[i] = 1; + } + else if (i == 4) { + data->enable[i] = 1; + data->use_alpha[i] = 0; + } + else if (i == 5 && data->d1_underlay_mode == bw_def_underlay_only) { + data->enable[i] = 0; + data->use_alpha[i] = 0; + } + else if (i == 5 && data->d1_underlay_mode == bw_def_blend) { + data->enable[i] = 1; + data->use_alpha[i] = 1; + } + else { + data->enable[i] = 1; + data->use_alpha[i] = 0; + } + } + else { + data->enable[i] = 0; + data->use_alpha[i] = 0; + } + data->scatter_gather_enable_for_pipe[i] = vbios->scatter_gather_enable; + surface_type[i] = bw_def_graphics; + data->lb_size_per_component[i] = dceip->lb_size_per_component444; + if (data->graphics_tiling_mode == bw_def_array_linear_general || data->graphics_tiling_mode == bw_def_array_linear_aligned) { + tiling_mode[i] = bw_def_linear; + } + else { + tiling_mode[i] = bw_def_tiled; + } + data->lb_bpc[i] = data->graphics_lb_bpc; + if ((data->fbc_en[i] == 1 && (dceip->argb_compression_support || data->d0_underlay_mode != bw_def_blended))) { + data->compression_rate[i] = bw_int_to_fixed(vbios->average_compression_rate); + data->access_one_channel_only[i] = data->lpt_en[i]; + } + else { + data->compression_rate[i] = bw_int_to_fixed(1); + data->access_one_channel_only[i] = 0; + } + if (data->fbc_en[i] == 1) { + fbc_enabled = true; + if (data->lpt_en[i] == 1) { + lpt_enabled = true; + } + } + data->cursor_width_pixels[i] = bw_int_to_fixed(vbios->cursor_width); + } + /* display_write_back420*/ + data->scatter_gather_enable_for_pipe[maximum_number_of_surfaces - 2] = 0; + data->scatter_gather_enable_for_pipe[maximum_number_of_surfaces - 1] = 0; + if (data->d1_display_write_back_dwb_enable == 1) { + data->enable[maximum_number_of_surfaces - 2] = 1; + data->enable[maximum_number_of_surfaces - 1] = 1; + } + else { + data->enable[maximum_number_of_surfaces - 2] = 0; + data->enable[maximum_number_of_surfaces - 1] = 0; + } + surface_type[maximum_number_of_surfaces - 2] = bw_def_display_write_back420_luma; + surface_type[maximum_number_of_surfaces - 1] = bw_def_display_write_back420_chroma; + data->lb_size_per_component[maximum_number_of_surfaces - 2] = dceip->underlay420_luma_lb_size_per_component; + data->lb_size_per_component[maximum_number_of_surfaces - 1] = dceip->underlay420_chroma_lb_size_per_component; + data->bytes_per_pixel[maximum_number_of_surfaces - 2] = 1; + data->bytes_per_pixel[maximum_number_of_surfaces - 1] = 2; + data->interlace_mode[maximum_number_of_surfaces - 2] = data->interlace_mode[5]; + data->interlace_mode[maximum_number_of_surfaces - 1] = data->interlace_mode[5]; + data->h_taps[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1); + data->h_taps[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1); + data->v_taps[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1); + data->v_taps[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1); + data->rotation_angle[maximum_number_of_surfaces - 2] = bw_int_to_fixed(0); + data->rotation_angle[maximum_number_of_surfaces - 1] = bw_int_to_fixed(0); + tiling_mode[maximum_number_of_surfaces - 2] = bw_def_linear; + tiling_mode[maximum_number_of_surfaces - 1] = bw_def_linear; + data->lb_bpc[maximum_number_of_surfaces - 2] = 8; + data->lb_bpc[maximum_number_of_surfaces - 1] = 8; + data->compression_rate[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1); + data->compression_rate[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1); + data->access_one_channel_only[maximum_number_of_surfaces - 2] = 0; + data->access_one_channel_only[maximum_number_of_surfaces - 1] = 0; + /*assume display pipe1 has dwb enabled*/ + data->h_total[maximum_number_of_surfaces - 2] = data->h_total[5]; + data->h_total[maximum_number_of_surfaces - 1] = data->h_total[5]; + data->v_total[maximum_number_of_surfaces - 2] = data->v_total[5]; + data->v_total[maximum_number_of_surfaces - 1] = data->v_total[5]; + data->pixel_rate[maximum_number_of_surfaces - 2] = data->pixel_rate[5]; + data->pixel_rate[maximum_number_of_surfaces - 1] = data->pixel_rate[5]; + data->src_width[maximum_number_of_surfaces - 2] = data->src_width[5]; + data->src_width[maximum_number_of_surfaces - 1] = data->src_width[5]; + data->src_height[maximum_number_of_surfaces - 2] = data->src_height[5]; + data->src_height[maximum_number_of_surfaces - 1] = data->src_height[5]; + data->pitch_in_pixels[maximum_number_of_surfaces - 2] = data->src_width[5]; + data->pitch_in_pixels[maximum_number_of_surfaces - 1] = data->src_width[5]; + data->h_scale_ratio[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1); + data->h_scale_ratio[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1); + data->v_scale_ratio[maximum_number_of_surfaces - 2] = bw_int_to_fixed(1); + data->v_scale_ratio[maximum_number_of_surfaces - 1] = bw_int_to_fixed(1); + data->stereo_mode[maximum_number_of_surfaces - 2] = bw_def_mono; + data->stereo_mode[maximum_number_of_surfaces - 1] = bw_def_mono; + data->cursor_width_pixels[maximum_number_of_surfaces - 2] = bw_int_to_fixed(0); + data->cursor_width_pixels[maximum_number_of_surfaces - 1] = bw_int_to_fixed(0); + data->use_alpha[maximum_number_of_surfaces - 2] = 0; + data->use_alpha[maximum_number_of_surfaces - 1] = 0; + /*mode check calculations:*/ + /* mode within dce ip capabilities*/ + /* fbc*/ + /* hsr*/ + /* vsr*/ + /* lb size*/ + /*effective scaling source and ratios:*/ + /*for graphics, non-stereo, non-interlace surfaces when the size of the source and destination are the same, only one tap is used*/ + /*420 chroma has half the width, height, horizontal and vertical scaling ratios than luma*/ + /*rotating a graphic or underlay surface swaps the width, height, horizontal and vertical scaling ratios*/ + /*in top-bottom stereo mode there is 2:1 vertical downscaling for each eye*/ + /*in side-by-side stereo mode there is 2:1 horizontal downscaling for each eye*/ + /*in interlace mode there is 2:1 vertical downscaling for each field*/ + /*in panning or bezel adjustment mode the source width has an extra 128 pixels*/ + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + if (bw_equ(data->h_scale_ratio[i], bw_int_to_fixed(1)) && bw_equ(data->v_scale_ratio[i], bw_int_to_fixed(1)) && surface_type[i] == bw_def_graphics && data->stereo_mode[i] == bw_def_mono && data->interlace_mode[i] == 0) { + data->h_taps[i] = bw_int_to_fixed(1); + data->v_taps[i] = bw_int_to_fixed(1); + } + if (surface_type[i] == bw_def_display_write_back420_chroma || surface_type[i] == bw_def_underlay420_chroma) { + data->pitch_in_pixels_after_surface_type[i] = bw_div(data->pitch_in_pixels[i], bw_int_to_fixed(2)); + data->src_width_after_surface_type = bw_div(data->src_width[i], bw_int_to_fixed(2)); + data->src_height_after_surface_type = bw_div(data->src_height[i], bw_int_to_fixed(2)); + data->hsr_after_surface_type = bw_div(data->h_scale_ratio[i], bw_int_to_fixed(2)); + data->vsr_after_surface_type = bw_div(data->v_scale_ratio[i], bw_int_to_fixed(2)); + } + else { + data->pitch_in_pixels_after_surface_type[i] = data->pitch_in_pixels[i]; + data->src_width_after_surface_type = data->src_width[i]; + data->src_height_after_surface_type = data->src_height[i]; + data->hsr_after_surface_type = data->h_scale_ratio[i]; + data->vsr_after_surface_type = data->v_scale_ratio[i]; + } + if ((bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270))) && surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) { + data->src_width_after_rotation = data->src_height_after_surface_type; + data->src_height_after_rotation = data->src_width_after_surface_type; + data->hsr_after_rotation = data->vsr_after_surface_type; + data->vsr_after_rotation = data->hsr_after_surface_type; + } + else { + data->src_width_after_rotation = data->src_width_after_surface_type; + data->src_height_after_rotation = data->src_height_after_surface_type; + data->hsr_after_rotation = data->hsr_after_surface_type; + data->vsr_after_rotation = data->vsr_after_surface_type; + } + switch (data->stereo_mode[i]) { + case bw_def_top_bottom: + data->source_width_pixels[i] = data->src_width_after_rotation; + data->source_height_pixels = bw_mul(bw_int_to_fixed(2), data->src_height_after_rotation); + data->hsr_after_stereo = data->hsr_after_rotation; + data->vsr_after_stereo = bw_mul(bw_int_to_fixed(1), data->vsr_after_rotation); + break; + case bw_def_side_by_side: + data->source_width_pixels[i] = bw_mul(bw_int_to_fixed(2), data->src_width_after_rotation); + data->source_height_pixels = data->src_height_after_rotation; + data->hsr_after_stereo = bw_mul(bw_int_to_fixed(1), data->hsr_after_rotation); + data->vsr_after_stereo = data->vsr_after_rotation; + break; + default: + data->source_width_pixels[i] = data->src_width_after_rotation; + data->source_height_pixels = data->src_height_after_rotation; + data->hsr_after_stereo = data->hsr_after_rotation; + data->vsr_after_stereo = data->vsr_after_rotation; + break; + } + data->hsr[i] = data->hsr_after_stereo; + if (data->interlace_mode[i]) { + data->vsr[i] = bw_mul(data->vsr_after_stereo, bw_int_to_fixed(2)); + } + else { + data->vsr[i] = data->vsr_after_stereo; + } + if (data->panning_and_bezel_adjustment != bw_def_none) { + data->source_width_rounded_up_to_chunks[i] = bw_add(bw_floor2(bw_sub(data->source_width_pixels[i], bw_int_to_fixed(1)), bw_int_to_fixed(128)), bw_int_to_fixed(256)); + } + else { + data->source_width_rounded_up_to_chunks[i] = bw_ceil2(data->source_width_pixels[i], bw_int_to_fixed(128)); + } + data->source_height_rounded_up_to_chunks[i] = data->source_height_pixels; + } + } + /*mode support checks:*/ + /*the number of graphics and underlay pipes is limited by the ip support*/ + /*maximum horizontal and vertical scale ratio is 4, and should not exceed the number of taps*/ + /*for downscaling with the pre-downscaler, the horizontal scale ratio must be more than the ceiling of one quarter of the number of taps*/ + /*the pre-downscaler reduces the line buffer source by the horizontal scale ratio*/ + /*the number of lines in the line buffer has to exceed the number of vertical taps*/ + /*the size of the line in the line buffer is the product of the source width and the bits per component, rounded up to a multiple of 48*/ + /*the size of the line in the line buffer in the case of 10 bit per component is the product of the source width rounded up to multiple of 8 and 30.023438 / 3, rounded up to a multiple of 48*/ + /*the size of the line in the line buffer in the case of 8 bit per component is the product of the source width rounded up to multiple of 8 and 30.023438 / 3, rounded up to a multiple of 48*/ + /*frame buffer compression is not supported with stereo mode, rotation, or non- 888 formats*/ + /*rotation is not supported with linear of stereo modes*/ + if (dceip->number_of_graphics_pipes >= data->number_of_displays && dceip->number_of_underlay_pipes >= data->number_of_underlay_surfaces && !(dceip->display_write_back_supported == 0 && data->d1_display_write_back_dwb_enable == 1)) { + pipe_check = bw_def_ok; + } + else { + pipe_check = bw_def_notok; + } + hsr_check = bw_def_ok; + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + if (bw_neq(data->hsr[i], bw_int_to_fixed(1))) { + if (bw_mtn(data->hsr[i], bw_int_to_fixed(4))) { + hsr_check = bw_def_hsr_mtn_4; + } + else { + if (bw_mtn(data->hsr[i], data->h_taps[i])) { + hsr_check = bw_def_hsr_mtn_h_taps; + } + else { + if (dceip->pre_downscaler_enabled == 1 && bw_mtn(data->hsr[i], bw_int_to_fixed(1)) && bw_leq(data->hsr[i], bw_ceil2(bw_div(data->h_taps[i], bw_int_to_fixed(4)), bw_int_to_fixed(1)))) { + hsr_check = bw_def_ceiling__h_taps_div_4___meq_hsr; + } + } + } + } + } + } + vsr_check = bw_def_ok; + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + if (bw_neq(data->vsr[i], bw_int_to_fixed(1))) { + if (bw_mtn(data->vsr[i], bw_int_to_fixed(4))) { + vsr_check = bw_def_vsr_mtn_4; + } + else { + if (bw_mtn(data->vsr[i], data->v_taps[i])) { + vsr_check = bw_def_vsr_mtn_v_taps; + } + } + } + } + } + lb_size_check = bw_def_ok; + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + if ((dceip->pre_downscaler_enabled && bw_mtn(data->hsr[i], bw_int_to_fixed(1)))) { + data->source_width_in_lb = bw_div(data->source_width_pixels[i], data->hsr[i]); + } + else { + data->source_width_in_lb = data->source_width_pixels[i]; + } + switch (data->lb_bpc[i]) { + case 8: + data->lb_line_pitch = bw_ceil2(bw_mul(bw_div(bw_frc_to_fixed(2401171875ul, 100000000), bw_int_to_fixed(3)), bw_ceil2(data->source_width_in_lb, bw_int_to_fixed(8))), bw_int_to_fixed(48)); + break; + case 10: + data->lb_line_pitch = bw_ceil2(bw_mul(bw_div(bw_frc_to_fixed(300234375, 10000000), bw_int_to_fixed(3)), bw_ceil2(data->source_width_in_lb, bw_int_to_fixed(8))), bw_int_to_fixed(48)); + break; + default: + data->lb_line_pitch = bw_ceil2(bw_mul(bw_int_to_fixed(data->lb_bpc[i]), data->source_width_in_lb), bw_int_to_fixed(48)); + break; + } + data->lb_partitions[i] = bw_floor2(bw_div(data->lb_size_per_component[i], data->lb_line_pitch), bw_int_to_fixed(1)); + /*clamp the partitions to the maxium number supported by the lb*/ + if ((surface_type[i] != bw_def_graphics || dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1)) { + data->lb_partitions_max[i] = bw_int_to_fixed(10); + } + else { + data->lb_partitions_max[i] = bw_int_to_fixed(7); + } + data->lb_partitions[i] = bw_min2(data->lb_partitions_max[i], data->lb_partitions[i]); + if (bw_mtn(bw_add(data->v_taps[i], bw_int_to_fixed(1)), data->lb_partitions[i])) { + lb_size_check = bw_def_notok; + } + } + } + fbc_check = bw_def_ok; + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i] && data->fbc_en[i] == 1 && (bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270)) || data->stereo_mode[i] != bw_def_mono || data->bytes_per_pixel[i] != 4)) { + fbc_check = bw_def_invalid_rotation_or_bpp_or_stereo; + } + } + rotation_check = bw_def_ok; + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + if ((bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270))) && (tiling_mode[i] == bw_def_linear || data->stereo_mode[i] != bw_def_mono)) { + rotation_check = bw_def_invalid_linear_or_stereo_mode; + } + } + } + if (pipe_check == bw_def_ok && hsr_check == bw_def_ok && vsr_check == bw_def_ok && lb_size_check == bw_def_ok && fbc_check == bw_def_ok && rotation_check == bw_def_ok) { + mode_check = bw_def_ok; + } + else { + mode_check = bw_def_notok; + } + /*number of memory channels for write-back client*/ + data->number_of_dram_wrchannels = vbios->number_of_dram_channels; + data->number_of_dram_channels = vbios->number_of_dram_channels; + /*modify number of memory channels if lpt mode is enabled*/ + /* low power tiling mode register*/ + /* 0 = use channel 0*/ + /* 1 = use channel 0 and 1*/ + /* 2 = use channel 0,1,2,3*/ + if ((fbc_enabled == 1 && lpt_enabled == 1)) { + if (vbios->memory_type == bw_def_hbm) + data->dram_efficiency = bw_frc_to_fixed(5, 10); + else + data->dram_efficiency = bw_int_to_fixed(1); + + + if (dceip->low_power_tiling_mode == 0) { + data->number_of_dram_channels = 1; + } + else if (dceip->low_power_tiling_mode == 1) { + data->number_of_dram_channels = 2; + } + else if (dceip->low_power_tiling_mode == 2) { + data->number_of_dram_channels = 4; + } + else { + data->number_of_dram_channels = 1; + } + } + else { + if (vbios->memory_type == bw_def_hbm) + data->dram_efficiency = bw_frc_to_fixed(5, 10); + else + data->dram_efficiency = bw_frc_to_fixed(8, 10); + } + /*memory request size and latency hiding:*/ + /*request size is normally 64 byte, 2-line interleaved, with full latency hiding*/ + /*the display write-back requests are single line*/ + /*for tiled graphics surfaces, or undelay surfaces with width higher than the maximum size for full efficiency, request size is 32 byte in 8 and 16 bpp or if the rotation is orthogonal to the tiling grain. only half is useful of the bytes in the request size in 8 bpp or in 32 bpp if the rotation is orthogonal to the tiling grain.*/ + /*for undelay surfaces with width lower than the maximum size for full efficiency, requests are 4-line interleaved in 16bpp if the rotation is parallel to the tiling grain, and 8-line interleaved with 4-line latency hiding in 8bpp or if the rotation is orthogonal to the tiling grain.*/ + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + if ((bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270)))) { + if ((i < 4)) { + /*underlay portrait tiling mode is not supported*/ + data->orthogonal_rotation[i] = 1; + } + else { + /*graphics portrait tiling mode*/ + if (data->graphics_micro_tile_mode == bw_def_rotated_micro_tiling) { + data->orthogonal_rotation[i] = 0; + } + else { + data->orthogonal_rotation[i] = 1; + } + } + } + else { + if ((i < 4)) { + /*underlay landscape tiling mode is only supported*/ + if (data->underlay_micro_tile_mode == bw_def_display_micro_tiling) { + data->orthogonal_rotation[i] = 0; + } + else { + data->orthogonal_rotation[i] = 1; + } + } + else { + /*graphics landscape tiling mode*/ + if (data->graphics_micro_tile_mode == bw_def_display_micro_tiling) { + data->orthogonal_rotation[i] = 0; + } + else { + data->orthogonal_rotation[i] = 1; + } + } + } + if (bw_equ(data->rotation_angle[i], bw_int_to_fixed(90)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(270))) { + data->underlay_maximum_source_efficient_for_tiling = dceip->underlay_maximum_height_efficient_for_tiling; + } + else { + data->underlay_maximum_source_efficient_for_tiling = dceip->underlay_maximum_width_efficient_for_tiling; + } + if (surface_type[i] == bw_def_display_write_back420_luma || surface_type[i] == bw_def_display_write_back420_chroma) { + data->bytes_per_request[i] = bw_int_to_fixed(64); + data->useful_bytes_per_request[i] = bw_int_to_fixed(64); + data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(1); + data->latency_hiding_lines[i] = bw_int_to_fixed(1); + } + else if (tiling_mode[i] == bw_def_linear) { + data->bytes_per_request[i] = bw_int_to_fixed(64); + data->useful_bytes_per_request[i] = bw_int_to_fixed(64); + data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2); + data->latency_hiding_lines[i] = bw_int_to_fixed(2); + } + else { + if (surface_type[i] == bw_def_graphics || (bw_mtn(data->source_width_rounded_up_to_chunks[i], bw_ceil2(data->underlay_maximum_source_efficient_for_tiling, bw_int_to_fixed(256))))) { + switch (data->bytes_per_pixel[i]) { + case 8: + data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2); + data->latency_hiding_lines[i] = bw_int_to_fixed(2); + if (data->orthogonal_rotation[i]) { + data->bytes_per_request[i] = bw_int_to_fixed(32); + data->useful_bytes_per_request[i] = bw_int_to_fixed(32); + } + else { + data->bytes_per_request[i] = bw_int_to_fixed(64); + data->useful_bytes_per_request[i] = bw_int_to_fixed(64); + } + break; + case 4: + if (data->orthogonal_rotation[i]) { + data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2); + data->latency_hiding_lines[i] = bw_int_to_fixed(2); + data->bytes_per_request[i] = bw_int_to_fixed(32); + data->useful_bytes_per_request[i] = bw_int_to_fixed(16); + } + else { + data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2); + data->latency_hiding_lines[i] = bw_int_to_fixed(2); + data->bytes_per_request[i] = bw_int_to_fixed(64); + data->useful_bytes_per_request[i] = bw_int_to_fixed(64); + } + break; + case 2: + data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2); + data->latency_hiding_lines[i] = bw_int_to_fixed(2); + data->bytes_per_request[i] = bw_int_to_fixed(32); + data->useful_bytes_per_request[i] = bw_int_to_fixed(32); + break; + default: + data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2); + data->latency_hiding_lines[i] = bw_int_to_fixed(2); + data->bytes_per_request[i] = bw_int_to_fixed(32); + data->useful_bytes_per_request[i] = bw_int_to_fixed(16); + break; + } + } + else { + data->bytes_per_request[i] = bw_int_to_fixed(64); + data->useful_bytes_per_request[i] = bw_int_to_fixed(64); + if (data->orthogonal_rotation[i]) { + data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(8); + data->latency_hiding_lines[i] = bw_int_to_fixed(4); + } + else { + switch (data->bytes_per_pixel[i]) { + case 4: + data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(2); + data->latency_hiding_lines[i] = bw_int_to_fixed(2); + break; + case 2: + data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(4); + data->latency_hiding_lines[i] = bw_int_to_fixed(4); + break; + default: + data->lines_interleaved_in_mem_access[i] = bw_int_to_fixed(8); + data->latency_hiding_lines[i] = bw_int_to_fixed(4); + break; + } + } + } + } + } + } + /*requested peak bandwidth:*/ + /*the peak request-per-second bandwidth is the product of the maximum source lines in per line out in the beginning*/ + /*and in the middle of the frame, the ratio of the source width to the line time, the ratio of line interleaving*/ + /*in memory to lines of latency hiding, and the ratio of bytes per pixel to useful bytes per request.*/ + /**/ + /*if the dmif data buffer size holds more than vta_ps worth of source lines, then only vsr is used.*/ + /*the peak bandwidth is the peak request-per-second bandwidth times the request size.*/ + /**/ + /*the line buffer lines in per line out in the beginning of the frame is the vertical filter initialization value*/ + /*rounded up to even and divided by the line times for initialization, which is normally three.*/ + /*the line buffer lines in per line out in the middle of the frame is at least one, or the vertical scale ratio,*/ + /*rounded up to line pairs if not doing line buffer prefetching.*/ + /**/ + /*the non-prefetching rounding up of the vertical scale ratio can also be done up to 1 (for a 0,2 pattern), 4/3 (for a 0,2,2 pattern),*/ + /*6/4 (for a 0,2,2,2 pattern), or 3 (for a 2,4 pattern).*/ + /**/ + /*the scaler vertical filter initialization value is calculated by the hardware as the floor of the average of the*/ + /*vertical scale ratio and the number of vertical taps increased by one. add one more for possible odd line*/ + /*panning/bezel adjustment mode.*/ + /**/ + /*for the bottom interlace field an extra 50% of the vertical scale ratio is considered for this calculation.*/ + /*in top-bottom stereo mode software has to set the filter initialization value manually and explicitly limit it to 4.*/ + /*furthermore, there is only one line time for initialization.*/ + /**/ + /*line buffer prefetching is done when the number of lines in the line buffer exceeds the number of taps plus*/ + /*the ceiling of the vertical scale ratio.*/ + /**/ + /*multi-line buffer prefetching is only done in the graphics pipe when the scaler is disabled or when upscaling and the vsr <= 0.8.'*/ + /**/ + /*the horizontal blank and chunk granularity factor is indirectly used indicate the interval of time required to transfer the source pixels.*/ + /*the denominator of this term represents the total number of destination output pixels required for the input source pixels.*/ + /*it applies when the lines in per line out is not 2 or 4. it does not apply when there is a line buffer between the scl and blnd.*/ + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + data->v_filter_init[i] = bw_floor2(bw_div((bw_add(bw_add(bw_add(bw_int_to_fixed(1), data->v_taps[i]), data->vsr[i]), bw_mul(bw_mul(bw_int_to_fixed(data->interlace_mode[i]), bw_frc_to_fixed(5, 10)), data->vsr[i]))), bw_int_to_fixed(2)), bw_int_to_fixed(1)); + if (data->panning_and_bezel_adjustment == bw_def_any_lines) { + data->v_filter_init[i] = bw_add(data->v_filter_init[i], bw_int_to_fixed(1)); + } + if (data->stereo_mode[i] == bw_def_top_bottom) { + data->v_filter_init[i] = bw_min2(data->v_filter_init[i], bw_int_to_fixed(4)); + } + if (data->stereo_mode[i] == bw_def_top_bottom) { + data->num_lines_at_frame_start = bw_int_to_fixed(1); + } + else { + data->num_lines_at_frame_start = bw_int_to_fixed(3); + } + if ((bw_mtn(data->vsr[i], bw_int_to_fixed(1)) && surface_type[i] == bw_def_graphics) || data->panning_and_bezel_adjustment == bw_def_any_lines) { + data->line_buffer_prefetch[i] = 0; + } + else if ((((dceip->underlay_downscale_prefetch_enabled == 1 && surface_type[i] != bw_def_graphics) || surface_type[i] == bw_def_graphics) && (bw_mtn(data->lb_partitions[i], bw_add(data->v_taps[i], bw_ceil2(data->vsr[i], bw_int_to_fixed(1))))))) { + data->line_buffer_prefetch[i] = 1; + } + else { + data->line_buffer_prefetch[i] = 0; + } + data->lb_lines_in_per_line_out_in_beginning_of_frame[i] = bw_div(bw_ceil2(data->v_filter_init[i], bw_int_to_fixed(dceip->lines_interleaved_into_lb)), data->num_lines_at_frame_start); + if (data->line_buffer_prefetch[i] == 1) { + data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_max2(bw_int_to_fixed(1), data->vsr[i]); + } + else if (bw_leq(data->vsr[i], bw_int_to_fixed(1))) { + data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_int_to_fixed(1); + } else if (bw_leq(data->vsr[i], + bw_frc_to_fixed(4, 3))) { + data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_div(bw_int_to_fixed(4), bw_int_to_fixed(3)); + } else if (bw_leq(data->vsr[i], + bw_frc_to_fixed(6, 4))) { + data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_div(bw_int_to_fixed(6), bw_int_to_fixed(4)); + } + else if (bw_leq(data->vsr[i], bw_int_to_fixed(2))) { + data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_int_to_fixed(2); + } + else if (bw_leq(data->vsr[i], bw_int_to_fixed(3))) { + data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_int_to_fixed(3); + } + else { + data->lb_lines_in_per_line_out_in_middle_of_frame[i] = bw_int_to_fixed(4); + } + if (data->line_buffer_prefetch[i] == 1 || bw_equ(data->lb_lines_in_per_line_out_in_middle_of_frame[i], bw_int_to_fixed(2)) || bw_equ(data->lb_lines_in_per_line_out_in_middle_of_frame[i], bw_int_to_fixed(4))) { + data->horizontal_blank_and_chunk_granularity_factor[i] = bw_int_to_fixed(1); + } + else { + data->horizontal_blank_and_chunk_granularity_factor[i] = bw_div(data->h_total[i], (bw_div((bw_add(data->h_total[i], bw_div((bw_sub(data->source_width_pixels[i], bw_int_to_fixed(dceip->chunk_width))), data->hsr[i]))), bw_int_to_fixed(2)))); + } + data->request_bandwidth[i] = bw_div(bw_mul(bw_div(bw_mul(bw_div(bw_mul(bw_max2(data->lb_lines_in_per_line_out_in_beginning_of_frame[i], data->lb_lines_in_per_line_out_in_middle_of_frame[i]), data->source_width_rounded_up_to_chunks[i]), (bw_div(data->h_total[i], data->pixel_rate[i]))), bw_int_to_fixed(data->bytes_per_pixel[i])), data->useful_bytes_per_request[i]), data->lines_interleaved_in_mem_access[i]), data->latency_hiding_lines[i]); + data->display_bandwidth[i] = bw_mul(data->request_bandwidth[i], data->bytes_per_request[i]); + } + } + /*outstanding chunk request limit*/ + /*if underlay buffer sharing is enabled, the data buffer size for underlay in 422 or 444 is the sum of the luma and chroma data buffer sizes.*/ + /*underlay buffer sharing mode is only permitted in orthogonal rotation modes.*/ + /**/ + /*if there is only one display enabled, the dmif data buffer size for the graphics surface is increased by concatenating the adjacent buffers.*/ + /**/ + /*the memory chunk size in bytes is 1024 for the writeback, and 256 times the memory line interleaving and the bytes per pixel for graphics*/ + /*and underlay.*/ + /**/ + /*the pipe chunk size uses 2 for line interleaving, except for the write back, in which case it is 1.*/ + /*graphics and underlay data buffer size is adjusted (limited) using the outstanding chunk request limit if there is more than one*/ + /*display enabled or if the dmif request buffer is not large enough for the total data buffer size.*/ + /*the outstanding chunk request limit is the ceiling of the adjusted data buffer size divided by the chunk size in bytes*/ + /*the adjusted data buffer size is the product of the display bandwidth and the minimum effective data buffer size in terms of time,*/ + /*rounded up to the chunk size in bytes, but should not exceed the original data buffer size*/ + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + if ((dceip->dmif_pipe_en_fbc_chunk_tracker + 3 == i && fbc_enabled == 0 && tiling_mode[i] != bw_def_linear)) { + data->max_chunks_non_fbc_mode[i] = 128 - dmif_chunk_buff_margin; + } + else { + data->max_chunks_non_fbc_mode[i] = 16 - dmif_chunk_buff_margin; + } + } + if (data->fbc_en[i] == 1) { + max_chunks_fbc_mode = 128 - dmif_chunk_buff_margin; + } + } + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + switch (surface_type[i]) { + case bw_def_display_write_back420_luma: + data->data_buffer_size[i] = bw_int_to_fixed(dceip->display_write_back420_luma_mcifwr_buffer_size); + break; + case bw_def_display_write_back420_chroma: + data->data_buffer_size[i] = bw_int_to_fixed(dceip->display_write_back420_chroma_mcifwr_buffer_size); + break; + case bw_def_underlay420_luma: + data->data_buffer_size[i] = bw_int_to_fixed(dceip->underlay_luma_dmif_size); + break; + case bw_def_underlay420_chroma: + data->data_buffer_size[i] = bw_div(bw_int_to_fixed(dceip->underlay_chroma_dmif_size), bw_int_to_fixed(2)); + break; + case bw_def_underlay422:case bw_def_underlay444: + if (data->orthogonal_rotation[i] == 0) { + data->data_buffer_size[i] = bw_int_to_fixed(dceip->underlay_luma_dmif_size); + } + else { + data->data_buffer_size[i] = bw_add(bw_int_to_fixed(dceip->underlay_luma_dmif_size), bw_int_to_fixed(dceip->underlay_chroma_dmif_size)); + } + break; + default: + if (data->fbc_en[i] == 1) { + /*data_buffer_size(i) = max_dmif_buffer_allocated * graphics_dmif_size*/ + if (data->number_of_displays == 1) { + data->data_buffer_size[i] = bw_min2(bw_mul(bw_mul(bw_int_to_fixed(max_chunks_fbc_mode), bw_int_to_fixed(pixels_per_chunk)), bw_int_to_fixed(data->bytes_per_pixel[i])), bw_mul(bw_int_to_fixed(dceip->max_dmif_buffer_allocated), bw_int_to_fixed(dceip->graphics_dmif_size))); + } + else { + data->data_buffer_size[i] = bw_min2(bw_mul(bw_mul(bw_int_to_fixed(max_chunks_fbc_mode), bw_int_to_fixed(pixels_per_chunk)), bw_int_to_fixed(data->bytes_per_pixel[i])), bw_int_to_fixed(dceip->graphics_dmif_size)); + } + } + else { + /*the effective dmif buffer size in non-fbc mode is limited by the 16 entry chunk tracker*/ + if (data->number_of_displays == 1) { + data->data_buffer_size[i] = bw_min2(bw_mul(bw_mul(bw_int_to_fixed(data->max_chunks_non_fbc_mode[i]), bw_int_to_fixed(pixels_per_chunk)), bw_int_to_fixed(data->bytes_per_pixel[i])), bw_mul(bw_int_to_fixed(dceip->max_dmif_buffer_allocated), bw_int_to_fixed(dceip->graphics_dmif_size))); + } + else { + data->data_buffer_size[i] = bw_min2(bw_mul(bw_mul(bw_int_to_fixed(data->max_chunks_non_fbc_mode[i]), bw_int_to_fixed(pixels_per_chunk)), bw_int_to_fixed(data->bytes_per_pixel[i])), bw_int_to_fixed(dceip->graphics_dmif_size)); + } + } + break; + } + if (surface_type[i] == bw_def_display_write_back420_luma || surface_type[i] == bw_def_display_write_back420_chroma) { + data->memory_chunk_size_in_bytes[i] = bw_int_to_fixed(1024); + data->pipe_chunk_size_in_bytes[i] = bw_int_to_fixed(1024); + } + else { + data->memory_chunk_size_in_bytes[i] = bw_mul(bw_mul(bw_int_to_fixed(dceip->chunk_width), data->lines_interleaved_in_mem_access[i]), bw_int_to_fixed(data->bytes_per_pixel[i])); + data->pipe_chunk_size_in_bytes[i] = bw_mul(bw_mul(bw_int_to_fixed(dceip->chunk_width), bw_int_to_fixed(dceip->lines_interleaved_into_lb)), bw_int_to_fixed(data->bytes_per_pixel[i])); + } + } + } + data->min_dmif_size_in_time = bw_int_to_fixed(9999); + data->min_mcifwr_size_in_time = bw_int_to_fixed(9999); + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) { + if (bw_ltn(bw_div(bw_div(bw_mul(data->data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]), data->display_bandwidth[i]), data->min_dmif_size_in_time)) { + data->min_dmif_size_in_time = bw_div(bw_div(bw_mul(data->data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]), data->display_bandwidth[i]); + } + } + else { + if (bw_ltn(bw_div(bw_div(bw_mul(data->data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]), data->display_bandwidth[i]), data->min_mcifwr_size_in_time)) { + data->min_mcifwr_size_in_time = bw_div(bw_div(bw_mul(data->data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]), data->display_bandwidth[i]); + } + } + } + } + data->total_requests_for_dmif_size = bw_int_to_fixed(0); + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i] && surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) { + data->total_requests_for_dmif_size = bw_add(data->total_requests_for_dmif_size, bw_div(data->data_buffer_size[i], data->useful_bytes_per_request[i])); + } + } + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma && dceip->limit_excessive_outstanding_dmif_requests && (data->number_of_displays > 1 || bw_mtn(data->total_requests_for_dmif_size, dceip->dmif_request_buffer_size))) { + data->adjusted_data_buffer_size[i] = bw_min2(data->data_buffer_size[i], bw_ceil2(bw_mul(data->min_dmif_size_in_time, data->display_bandwidth[i]), data->memory_chunk_size_in_bytes[i])); + } + else { + data->adjusted_data_buffer_size[i] = data->data_buffer_size[i]; + } + } + } + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + if (data->number_of_displays == 1 && data->number_of_underlay_surfaces == 0) { + /*set maximum chunk limit if only one graphic pipe is enabled*/ + data->outstanding_chunk_request_limit[i] = bw_int_to_fixed(127); + } + else { + data->outstanding_chunk_request_limit[i] = bw_ceil2(bw_div(data->adjusted_data_buffer_size[i], data->pipe_chunk_size_in_bytes[i]), bw_int_to_fixed(1)); + /*clamp maximum chunk limit in the graphic display pipe*/ + if (i >= 4) { + data->outstanding_chunk_request_limit[i] = bw_max2(bw_int_to_fixed(127), data->outstanding_chunk_request_limit[i]); + } + } + } + } + /*outstanding pte request limit*/ + /*in tiling mode with no rotation the sg pte requests are 8 useful pt_es, the sg row height is the page height and the sg page width x height is 64x64 for 8bpp, 64x32 for 16 bpp, 32x32 for 32 bpp*/ + /*in tiling mode with rotation the sg pte requests are only one useful pte, and the sg row height is also the page height, but the sg page width and height are swapped*/ + /*in linear mode the pte requests are 8 useful pt_es, the sg page width is 4096 divided by the bytes per pixel, the sg page height is 1, but there is just one row whose height is the lines of pte prefetching*/ + /*the outstanding pte request limit is obtained by multiplying the outstanding chunk request limit by the peak pte request to eviction limiting ratio, rounding up to integer, multiplying by the pte requests per chunk, and rounding up to integer again*/ + /*if not using peak pte request to eviction limiting, the outstanding pte request limit is the pte requests in the vblank*/ + /*the pte requests in the vblank is the product of the number of pte request rows times the number of pte requests in a row*/ + /*the number of pte requests in a row is the quotient of the source width divided by 256, multiplied by the pte requests per chunk, rounded up to even, multiplied by the scatter-gather row height and divided by the scatter-gather page height*/ + /*the pte requests per chunk is 256 divided by the scatter-gather page width and the useful pt_es per pte request*/ + if (data->number_of_displays > 1 || (bw_neq(data->rotation_angle[4], bw_int_to_fixed(0)) && bw_neq(data->rotation_angle[4], bw_int_to_fixed(180)))) { + data->peak_pte_request_to_eviction_ratio_limiting = dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display; + } + else { + data->peak_pte_request_to_eviction_ratio_limiting = dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation; + } + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i] && data->scatter_gather_enable_for_pipe[i] == 1) { + if (tiling_mode[i] == bw_def_linear) { + data->useful_pte_per_pte_request = bw_int_to_fixed(8); + data->scatter_gather_page_width[i] = bw_div(bw_int_to_fixed(4096), bw_int_to_fixed(data->bytes_per_pixel[i])); + data->scatter_gather_page_height[i] = bw_int_to_fixed(1); + data->scatter_gather_pte_request_rows = bw_int_to_fixed(1); + data->scatter_gather_row_height = bw_int_to_fixed(dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode); + } + else if (bw_equ(data->rotation_angle[i], bw_int_to_fixed(0)) || bw_equ(data->rotation_angle[i], bw_int_to_fixed(180))) { + data->useful_pte_per_pte_request = bw_int_to_fixed(8); + switch (data->bytes_per_pixel[i]) { + case 4: + data->scatter_gather_page_width[i] = bw_int_to_fixed(32); + data->scatter_gather_page_height[i] = bw_int_to_fixed(32); + break; + case 2: + data->scatter_gather_page_width[i] = bw_int_to_fixed(64); + data->scatter_gather_page_height[i] = bw_int_to_fixed(32); + break; + default: + data->scatter_gather_page_width[i] = bw_int_to_fixed(64); + data->scatter_gather_page_height[i] = bw_int_to_fixed(64); + break; + } + data->scatter_gather_pte_request_rows = bw_int_to_fixed(dceip->scatter_gather_pte_request_rows_in_tiling_mode); + data->scatter_gather_row_height = data->scatter_gather_page_height[i]; + } + else { + data->useful_pte_per_pte_request = bw_int_to_fixed(1); + switch (data->bytes_per_pixel[i]) { + case 4: + data->scatter_gather_page_width[i] = bw_int_to_fixed(32); + data->scatter_gather_page_height[i] = bw_int_to_fixed(32); + break; + case 2: + data->scatter_gather_page_width[i] = bw_int_to_fixed(32); + data->scatter_gather_page_height[i] = bw_int_to_fixed(64); + break; + default: + data->scatter_gather_page_width[i] = bw_int_to_fixed(64); + data->scatter_gather_page_height[i] = bw_int_to_fixed(64); + break; + } + data->scatter_gather_pte_request_rows = bw_int_to_fixed(dceip->scatter_gather_pte_request_rows_in_tiling_mode); + data->scatter_gather_row_height = data->scatter_gather_page_height[i]; + } + data->pte_request_per_chunk[i] = bw_div(bw_div(bw_int_to_fixed(dceip->chunk_width), data->scatter_gather_page_width[i]), data->useful_pte_per_pte_request); + data->scatter_gather_pte_requests_in_row[i] = bw_div(bw_mul(bw_ceil2(bw_mul(bw_div(data->source_width_rounded_up_to_chunks[i], bw_int_to_fixed(dceip->chunk_width)), data->pte_request_per_chunk[i]), bw_int_to_fixed(1)), data->scatter_gather_row_height), data->scatter_gather_page_height[i]); + data->scatter_gather_pte_requests_in_vblank = bw_mul(data->scatter_gather_pte_request_rows, data->scatter_gather_pte_requests_in_row[i]); + if (bw_equ(data->peak_pte_request_to_eviction_ratio_limiting, bw_int_to_fixed(0))) { + data->scatter_gather_pte_request_limit[i] = data->scatter_gather_pte_requests_in_vblank; + } + else { + data->scatter_gather_pte_request_limit[i] = bw_max2(dceip->minimum_outstanding_pte_request_limit, bw_min2(data->scatter_gather_pte_requests_in_vblank, bw_ceil2(bw_mul(bw_mul(bw_div(bw_ceil2(data->adjusted_data_buffer_size[i], data->memory_chunk_size_in_bytes[i]), data->memory_chunk_size_in_bytes[i]), data->pte_request_per_chunk[i]), data->peak_pte_request_to_eviction_ratio_limiting), bw_int_to_fixed(1)))); + } + } + } + /*pitch padding recommended for efficiency in linear mode*/ + /*in linear mode graphics or underlay with scatter gather, a pitch that is a multiple of the channel interleave (256 bytes) times the channel-bank rotation is not efficient*/ + /*if that is the case it is recommended to pad the pitch by at least 256 pixels*/ + data->inefficient_linear_pitch_in_bytes = bw_mul(bw_mul(bw_int_to_fixed(256), bw_int_to_fixed(vbios->number_of_dram_banks)), bw_int_to_fixed(data->number_of_dram_channels)); + + /*pixel transfer time*/ + /*the dmif and mcifwr yclk(pclk) required is the one that allows the transfer of all pipe's data buffer size in memory in the time for data transfer*/ + /*for dmif, pte and cursor requests have to be included.*/ + /*the dram data requirement is doubled when the data request size in bytes is less than the dram channel width times the burst size (8)*/ + /*the dram data requirement is also multiplied by the number of channels in the case of low power tiling*/ + /*the page close-open time is determined by trc and the number of page close-opens*/ + /*in tiled mode graphics or underlay with scatter-gather enabled the bytes per page close-open is the product of the memory line interleave times the maximum of the scatter-gather page width and the product of the tile width (8 pixels) times the number of channels times the number of banks.*/ + /*in linear mode graphics or underlay with scatter-gather enabled and inefficient pitch, the bytes per page close-open is the line request alternation slice, because different lines are in completely different 4k address bases.*/ + /*otherwise, the bytes page close-open is the chunk size because that is the arbitration slice.*/ + /*pte requests are grouped by pte requests per chunk if that is more than 1. each group costs a page close-open time for dmif reads*/ + /*cursor requests outstanding are limited to a group of two source lines. each group costs a page close-open time for dmif reads*/ + /*the display reads and writes time for data transfer is the minimum data or cursor buffer size in time minus the mc urgent latency*/ + /*the mc urgent latency is experienced more than one time if the number of dmif requests in the data buffer exceeds the request buffer size plus the request slots reserved for dmif in the dram channel arbiter queues*/ + /*the dispclk required is the maximum for all surfaces of the maximum of the source pixels for first output pixel times the throughput factor, divided by the pixels per dispclk, and divided by the minimum latency hiding minus the dram speed/p-state change latency minus the burst time, and the source pixels for last output pixel, times the throughput factor, divided by the pixels per dispclk, and divided by the minimum latency hiding minus the dram speed/p-state change latency minus the burst time, plus the active time.*/ + /*the data burst time is the maximum of the total page close-open time, total dmif/mcifwr buffer size in memory divided by the dram bandwidth, and the total dmif/mcifwr buffer size in memory divided by the 32 byte sclk data bus bandwidth, each multiplied by its efficiency.*/ + /*the source line transfer time is the maximum for all surfaces of the maximum of the burst time plus the urgent latency times the floor of the data required divided by the buffer size for the fist pixel, and the burst time plus the urgent latency times the floor of the data required divided by the buffer size for the last pixel plus the active time.*/ + /*the source pixels for the first output pixel is 512 if the scaler vertical filter initialization value is greater than 2, and it is 4 times the source width if it is greater than 4.*/ + /*the source pixels for the last output pixel is the source width times the scaler vertical filter initialization value rounded up to even*/ + /*the source data for these pixels is the number of pixels times the bytes per pixel times the bytes per request divided by the useful bytes per request.*/ + data->cursor_total_data = bw_int_to_fixed(0); + data->cursor_total_request_groups = bw_int_to_fixed(0); + data->scatter_gather_total_pte_requests = bw_int_to_fixed(0); + data->scatter_gather_total_pte_request_groups = bw_int_to_fixed(0); + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + data->cursor_total_data = bw_add(data->cursor_total_data, bw_mul(bw_mul(bw_int_to_fixed(2), data->cursor_width_pixels[i]), bw_int_to_fixed(4))); + if (dceip->large_cursor == 1) { + data->cursor_total_request_groups = bw_add(data->cursor_total_request_groups, bw_int_to_fixed((dceip->cursor_max_outstanding_group_num + 1))); + } + else { + data->cursor_total_request_groups = bw_add(data->cursor_total_request_groups, bw_ceil2(bw_div(data->cursor_width_pixels[i], dceip->cursor_chunk_width), bw_int_to_fixed(1))); + } + if (data->scatter_gather_enable_for_pipe[i]) { + data->scatter_gather_total_pte_requests = bw_add(data->scatter_gather_total_pte_requests, data->scatter_gather_pte_request_limit[i]); + data->scatter_gather_total_pte_request_groups = bw_add(data->scatter_gather_total_pte_request_groups, bw_ceil2(bw_div(data->scatter_gather_pte_request_limit[i], bw_ceil2(data->pte_request_per_chunk[i], bw_int_to_fixed(1))), bw_int_to_fixed(1))); + } + } + } + data->tile_width_in_pixels = bw_int_to_fixed(8); + data->dmif_total_number_of_data_request_page_close_open = bw_int_to_fixed(0); + data->mcifwr_total_number_of_data_request_page_close_open = bw_int_to_fixed(0); + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + if (data->scatter_gather_enable_for_pipe[i] == 1 && tiling_mode[i] != bw_def_linear) { + data->bytes_per_page_close_open = bw_mul(data->lines_interleaved_in_mem_access[i], bw_max2(bw_mul(bw_mul(bw_mul(bw_int_to_fixed(data->bytes_per_pixel[i]), data->tile_width_in_pixels), bw_int_to_fixed(vbios->number_of_dram_banks)), bw_int_to_fixed(data->number_of_dram_channels)), bw_mul(bw_int_to_fixed(data->bytes_per_pixel[i]), data->scatter_gather_page_width[i]))); + } + else if (data->scatter_gather_enable_for_pipe[i] == 1 && tiling_mode[i] == bw_def_linear && bw_equ(bw_mod((bw_mul(data->pitch_in_pixels_after_surface_type[i], bw_int_to_fixed(data->bytes_per_pixel[i]))), data->inefficient_linear_pitch_in_bytes), bw_int_to_fixed(0))) { + data->bytes_per_page_close_open = dceip->linear_mode_line_request_alternation_slice; + } + else { + data->bytes_per_page_close_open = data->memory_chunk_size_in_bytes[i]; + } + if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) { + data->dmif_total_number_of_data_request_page_close_open = bw_add(data->dmif_total_number_of_data_request_page_close_open, bw_div(bw_ceil2(data->adjusted_data_buffer_size[i], data->memory_chunk_size_in_bytes[i]), data->bytes_per_page_close_open)); + } + else { + data->mcifwr_total_number_of_data_request_page_close_open = bw_add(data->mcifwr_total_number_of_data_request_page_close_open, bw_div(bw_ceil2(data->adjusted_data_buffer_size[i], data->memory_chunk_size_in_bytes[i]), data->bytes_per_page_close_open)); + } + } + } + data->dmif_total_page_close_open_time = bw_div(bw_mul((bw_add(bw_add(data->dmif_total_number_of_data_request_page_close_open, data->scatter_gather_total_pte_request_groups), data->cursor_total_request_groups)), vbios->trc), bw_int_to_fixed(1000)); + data->mcifwr_total_page_close_open_time = bw_div(bw_mul(data->mcifwr_total_number_of_data_request_page_close_open, vbios->trc), bw_int_to_fixed(1000)); + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + data->adjusted_data_buffer_size_in_memory[i] = bw_div(bw_mul(data->adjusted_data_buffer_size[i], data->bytes_per_request[i]), data->useful_bytes_per_request[i]); + } + } + data->total_requests_for_adjusted_dmif_size = bw_int_to_fixed(0); + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) { + data->total_requests_for_adjusted_dmif_size = bw_add(data->total_requests_for_adjusted_dmif_size, bw_div(data->adjusted_data_buffer_size[i], data->useful_bytes_per_request[i])); + } + } + } + data->total_dmifmc_urgent_trips = bw_ceil2(bw_div(data->total_requests_for_adjusted_dmif_size, (bw_add(dceip->dmif_request_buffer_size, bw_int_to_fixed(vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel * data->number_of_dram_channels)))), bw_int_to_fixed(1)); + data->total_dmifmc_urgent_latency = bw_mul(vbios->dmifmc_urgent_latency, data->total_dmifmc_urgent_trips); + data->total_display_reads_required_data = bw_int_to_fixed(0); + data->total_display_reads_required_dram_access_data = bw_int_to_fixed(0); + data->total_display_writes_required_data = bw_int_to_fixed(0); + data->total_display_writes_required_dram_access_data = bw_int_to_fixed(0); + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) { + data->display_reads_required_data = data->adjusted_data_buffer_size_in_memory[i]; + /*for hbm memories, each channel is split into 2 pseudo-channels that are each 64 bits in width. each*/ + /*pseudo-channel may be read independently of one another.*/ + /*the read burst length (bl) for hbm memories is 4, so each read command will access 32 bytes of data.*/ + /*the 64 or 32 byte sized data is stored in one pseudo-channel.*/ + /*it will take 4 memclk cycles or 8 yclk cycles to fetch 64 bytes of data from the hbm memory (2 read commands).*/ + /*it will take 2 memclk cycles or 4 yclk cycles to fetch 32 bytes of data from the hbm memory (1 read command).*/ + /*for gddr5/ddr4 memories, there is additional overhead if the size of the request is smaller than 64 bytes.*/ + /*the read burst length (bl) for gddr5/ddr4 memories is 8, regardless of the size of the data request.*/ + /*therefore it will require 8 cycles to fetch 64 or 32 bytes of data from the memory.*/ + /*the memory efficiency will be 50% for the 32 byte sized data.*/ + if (vbios->memory_type == bw_def_hbm) { + data->display_reads_required_dram_access_data = data->adjusted_data_buffer_size_in_memory[i]; + } + else { + data->display_reads_required_dram_access_data = bw_mul(data->adjusted_data_buffer_size_in_memory[i], bw_ceil2(bw_div(bw_int_to_fixed((8 * vbios->dram_channel_width_in_bits / 8)), data->bytes_per_request[i]), bw_int_to_fixed(1))); + } + data->total_display_reads_required_data = bw_add(data->total_display_reads_required_data, data->display_reads_required_data); + data->total_display_reads_required_dram_access_data = bw_add(data->total_display_reads_required_dram_access_data, data->display_reads_required_dram_access_data); + } + else { + data->total_display_writes_required_data = bw_add(data->total_display_writes_required_data, data->adjusted_data_buffer_size_in_memory[i]); + data->total_display_writes_required_dram_access_data = bw_add(data->total_display_writes_required_dram_access_data, bw_mul(data->adjusted_data_buffer_size_in_memory[i], bw_ceil2(bw_div(bw_int_to_fixed(vbios->dram_channel_width_in_bits), data->bytes_per_request[i]), bw_int_to_fixed(1)))); + } + } + } + data->total_display_reads_required_data = bw_add(bw_add(data->total_display_reads_required_data, data->cursor_total_data), bw_mul(data->scatter_gather_total_pte_requests, bw_int_to_fixed(64))); + data->total_display_reads_required_dram_access_data = bw_add(bw_add(data->total_display_reads_required_dram_access_data, data->cursor_total_data), bw_mul(data->scatter_gather_total_pte_requests, bw_int_to_fixed(64))); + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + if (bw_mtn(data->v_filter_init[i], bw_int_to_fixed(4))) { + data->src_pixels_for_first_output_pixel[i] = bw_mul(bw_int_to_fixed(4), data->source_width_rounded_up_to_chunks[i]); + } + else { + if (bw_mtn(data->v_filter_init[i], bw_int_to_fixed(2))) { + data->src_pixels_for_first_output_pixel[i] = bw_int_to_fixed(512); + } + else { + data->src_pixels_for_first_output_pixel[i] = bw_int_to_fixed(0); + } + } + data->src_data_for_first_output_pixel[i] = bw_div(bw_mul(bw_mul(data->src_pixels_for_first_output_pixel[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->bytes_per_request[i]), data->useful_bytes_per_request[i]); + data->src_pixels_for_last_output_pixel[i] = bw_mul(data->source_width_rounded_up_to_chunks[i], bw_max2(bw_ceil2(data->v_filter_init[i], bw_int_to_fixed(dceip->lines_interleaved_into_lb)), bw_mul(bw_ceil2(data->vsr[i], bw_int_to_fixed(dceip->lines_interleaved_into_lb)), data->horizontal_blank_and_chunk_granularity_factor[i]))); + data->src_data_for_last_output_pixel[i] = bw_div(bw_mul(bw_mul(bw_mul(data->source_width_rounded_up_to_chunks[i], bw_max2(bw_ceil2(data->v_filter_init[i], bw_int_to_fixed(dceip->lines_interleaved_into_lb)), data->lines_interleaved_in_mem_access[i])), bw_int_to_fixed(data->bytes_per_pixel[i])), data->bytes_per_request[i]), data->useful_bytes_per_request[i]); + data->active_time[i] = bw_div(bw_div(data->source_width_rounded_up_to_chunks[i], data->hsr[i]), data->pixel_rate[i]); + } + } + for (i = 0; i <= 2; i++) { + for (j = 0; j <= 7; j++) { + data->dmif_burst_time[i][j] = bw_max3(data->dmif_total_page_close_open_time, bw_div(data->total_display_reads_required_dram_access_data, (bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[i]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)))), bw_div(data->total_display_reads_required_data, (bw_mul(bw_mul(sclk[j], vbios->data_return_bus_width), bw_frc_to_fixed(dceip->percent_of_ideal_port_bw_received_after_urgent_latency, 100))))); + if (data->d1_display_write_back_dwb_enable == 1) { + data->mcifwr_burst_time[i][j] = bw_max3(data->mcifwr_total_page_close_open_time, bw_div(data->total_display_writes_required_dram_access_data, (bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[i]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_wrchannels)))), bw_div(data->total_display_writes_required_data, (bw_mul(sclk[j], vbios->data_return_bus_width)))); + } + } + } + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + for (j = 0; j <= 2; j++) { + for (k = 0; k <= 7; k++) { + if (data->enable[i]) { + if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) { + /*time to transfer data from the dmif buffer to the lb. since the mc to dmif transfer time overlaps*/ + /*with the dmif to lb transfer time, only time to transfer the last chunk is considered.*/ + data->dmif_buffer_transfer_time[i] = bw_mul(data->source_width_rounded_up_to_chunks[i], (bw_div(dceip->lb_write_pixels_per_dispclk, (bw_div(vbios->low_voltage_max_dispclk, dceip->display_pipe_throughput_factor))))); + data->line_source_transfer_time[i][j][k] = bw_max2(bw_mul((bw_add(data->total_dmifmc_urgent_latency, data->dmif_burst_time[j][k])), bw_floor2(bw_div(data->src_data_for_first_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), bw_sub(bw_add(bw_mul((bw_add(data->total_dmifmc_urgent_latency, data->dmif_burst_time[j][k])), bw_floor2(bw_div(data->src_data_for_last_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), data->dmif_buffer_transfer_time[i]), data->active_time[i])); + /*during an mclk switch the requests from the dce ip are stored in the gmc/arb. these requests should be serviced immediately*/ + /*after the mclk switch sequence and not incur an urgent latency penalty. it is assumed that the gmc/arb can hold up to 256 requests*/ + /*per memory channel. if the dce ip is urgent after the mclk switch sequence, all pending requests and subsequent requests should be*/ + /*immediately serviced without a gap in the urgent requests.*/ + /*the latency incurred would be the time to issue the requests and return the data for the first or last output pixel.*/ + if (surface_type[i] == bw_def_graphics) { + switch (data->lb_bpc[i]) { + case 6: + data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency6_bit_per_component; + break; + case 8: + data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency8_bit_per_component; + break; + case 10: + data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency10_bit_per_component; + break; + default: + data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency12_bit_per_component; + break; + } + if (data->use_alpha[i] == 1) { + data->v_scaler_efficiency = bw_min2(data->v_scaler_efficiency, dceip->alpha_vscaler_efficiency); + } + } + else { + switch (data->lb_bpc[i]) { + case 6: + data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency6_bit_per_component; + break; + case 8: + data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency8_bit_per_component; + break; + case 10: + data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency10_bit_per_component; + break; + default: + data->v_scaler_efficiency = bw_int_to_fixed(3); + break; + } + } + if (dceip->pre_downscaler_enabled && bw_mtn(data->hsr[i], bw_int_to_fixed(1))) { + data->scaler_limits_factor = bw_max2(bw_div(data->v_taps[i], data->v_scaler_efficiency), bw_div(data->source_width_rounded_up_to_chunks[i], data->h_total[i])); + } + else { + data->scaler_limits_factor = bw_max3(bw_int_to_fixed(1), bw_ceil2(bw_div(data->h_taps[i], bw_int_to_fixed(4)), bw_int_to_fixed(1)), bw_mul(data->hsr[i], bw_max2(bw_div(data->v_taps[i], data->v_scaler_efficiency), bw_int_to_fixed(1)))); + } + data->dram_speed_change_line_source_transfer_time[i][j][k] = bw_mul(bw_int_to_fixed(2), bw_max2((bw_add((bw_div(data->src_data_for_first_output_pixel[i], bw_min2(bw_mul(data->bytes_per_request[i], sclk[k]), bw_div(bw_mul(bw_mul(data->bytes_per_request[i], data->pixel_rate[i]), data->scaler_limits_factor), bw_int_to_fixed(2))))), (bw_mul(data->dmif_burst_time[j][k], bw_floor2(bw_div(data->src_data_for_first_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1)))))), (bw_add((bw_div(data->src_data_for_last_output_pixel[i], bw_min2(bw_mul(data->bytes_per_request[i], sclk[k]), bw_div(bw_mul(bw_mul(data->bytes_per_request[i], data->pixel_rate[i]), data->scaler_limits_factor), bw_int_to_fixed(2))))), (bw_sub(bw_mul(data->dmif_burst_time[j][k], bw_floor2(bw_div(data->src_data_for_last_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), data->active_time[i])))))); + } + else { + data->line_source_transfer_time[i][j][k] = bw_max2(bw_mul((bw_add(vbios->mcifwrmc_urgent_latency, data->mcifwr_burst_time[j][k])), bw_floor2(bw_div(data->src_data_for_first_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), bw_sub(bw_mul((bw_add(vbios->mcifwrmc_urgent_latency, data->mcifwr_burst_time[j][k])), bw_floor2(bw_div(data->src_data_for_last_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), data->active_time[i])); + /*during an mclk switch the requests from the dce ip are stored in the gmc/arb. these requests should be serviced immediately*/ + /*after the mclk switch sequence and not incur an urgent latency penalty. it is assumed that the gmc/arb can hold up to 256 requests*/ + /*per memory channel. if the dce ip is urgent after the mclk switch sequence, all pending requests and subsequent requests should be*/ + /*immediately serviced without a gap in the urgent requests.*/ + /*the latency incurred would be the time to issue the requests and return the data for the first or last output pixel.*/ + data->dram_speed_change_line_source_transfer_time[i][j][k] = bw_max2((bw_add((bw_div(data->src_data_for_first_output_pixel[i], bw_min2(bw_mul(data->bytes_per_request[i], sclk[k]), bw_div(bw_mul(data->bytes_per_request[i], vbios->low_voltage_max_dispclk), bw_int_to_fixed(2))))), (bw_mul(data->mcifwr_burst_time[j][k], bw_floor2(bw_div(data->src_data_for_first_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1)))))), (bw_add((bw_div(data->src_data_for_last_output_pixel[i], bw_min2(bw_mul(data->bytes_per_request[i], sclk[k]), bw_div(bw_mul(data->bytes_per_request[i], vbios->low_voltage_max_dispclk), bw_int_to_fixed(2))))), (bw_sub(bw_mul(data->mcifwr_burst_time[j][k], bw_floor2(bw_div(data->src_data_for_last_output_pixel[i], data->adjusted_data_buffer_size_in_memory[i]), bw_int_to_fixed(1))), data->active_time[i]))))); + } + } + } + } + } + /*cpu c-state and p-state change enable*/ + /*for cpu p-state change to be possible for a yclk(pclk) and sclk level the dispclk required has to be enough for the blackout duration*/ + /*for cpu c-state change to be possible for a yclk(pclk) and sclk level the dispclk required has to be enough for the blackout duration and recovery*/ + /*condition for the blackout duration:*/ + /* minimum latency hiding > blackout duration + dmif burst time + line source transfer time*/ + /*condition for the blackout recovery:*/ + /* recovery time > dmif burst time + 2 * urgent latency*/ + /* recovery time > (display bw * blackout duration + (2 * urgent latency + dmif burst time)*dispclk - dmif size )*/ + /* / (dispclk - display bw)*/ + /*the minimum latency hiding is the minimum for all pipes of one screen line time, plus one more line time if doing lb prefetch, plus the dmif data buffer size equivalent in time, minus the urgent latency.*/ + /*the minimum latency hiding is further limited by the cursor. the cursor latency hiding is the number of lines of the cursor buffer, minus one if the downscaling is less than two, or minus three if it is more*/ + + /*initialize variables*/ + number_of_displays_enabled = 0; + number_of_displays_enabled_with_margin = 0; + for (k = 0; k <= maximum_number_of_surfaces - 1; k++) { + if (data->enable[k]) { + number_of_displays_enabled = number_of_displays_enabled + 1; + } + data->display_pstate_change_enable[k] = 0; + } + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + if ((bw_equ(dceip->stutter_and_dram_clock_state_change_gated_before_cursor, bw_int_to_fixed(0)) && bw_mtn(data->cursor_width_pixels[i], bw_int_to_fixed(0)))) { + if (bw_ltn(data->vsr[i], bw_int_to_fixed(2))) { + data->cursor_latency_hiding[i] = bw_div(bw_div(bw_mul((bw_sub(dceip->cursor_dcp_buffer_lines, bw_int_to_fixed(1))), data->h_total[i]), data->vsr[i]), data->pixel_rate[i]); + } + else { + data->cursor_latency_hiding[i] = bw_div(bw_div(bw_mul((bw_sub(dceip->cursor_dcp_buffer_lines, bw_int_to_fixed(3))), data->h_total[i]), data->vsr[i]), data->pixel_rate[i]); + } + } + else { + data->cursor_latency_hiding[i] = bw_int_to_fixed(9999); + } + } + } + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + if (dceip->graphics_lb_nodownscaling_multi_line_prefetching == 1 && (bw_equ(data->vsr[i], bw_int_to_fixed(1)) || (bw_leq(data->vsr[i], bw_frc_to_fixed(8, 10)) && bw_leq(data->v_taps[i], bw_int_to_fixed(2)) && data->lb_bpc[i] == 8)) && surface_type[i] == bw_def_graphics) { + if (number_of_displays_enabled > 2) + data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_sub(data->lb_partitions[i], bw_int_to_fixed(2)), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency); + else + data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_sub(data->lb_partitions[i], bw_int_to_fixed(1)), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency); + } + else { + data->minimum_latency_hiding[i] = bw_sub(bw_div(bw_mul((bw_div((bw_add(bw_int_to_fixed(1 + data->line_buffer_prefetch[i]), bw_div(bw_div(data->data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_pixels[i]))), data->vsr[i])), data->h_total[i]), data->pixel_rate[i]), data->total_dmifmc_urgent_latency); + } + data->minimum_latency_hiding_with_cursor[i] = bw_min2(data->minimum_latency_hiding[i], data->cursor_latency_hiding[i]); + } + } + for (i = 0; i <= 2; i++) { + for (j = 0; j <= 7; j++) { + data->blackout_duration_margin[i][j] = bw_int_to_fixed(9999); + data->dispclk_required_for_blackout_duration[i][j] = bw_int_to_fixed(0); + data->dispclk_required_for_blackout_recovery[i][j] = bw_int_to_fixed(0); + for (k = 0; k <= maximum_number_of_surfaces - 1; k++) { + if (data->enable[k] && bw_mtn(vbios->blackout_duration, bw_int_to_fixed(0))) { + if (surface_type[k] != bw_def_display_write_back420_luma && surface_type[k] != bw_def_display_write_back420_chroma) { + data->blackout_duration_margin[i][j] = bw_min2(data->blackout_duration_margin[i][j], bw_sub(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->line_source_transfer_time[k][i][j])); + data->dispclk_required_for_blackout_duration[i][j] = bw_max3(data->dispclk_required_for_blackout_duration[i][j], bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->active_time[k])))); + if (bw_leq(vbios->maximum_blackout_recovery_time, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j]))) { + data->dispclk_required_for_blackout_recovery[i][j] = bw_int_to_fixed(9999); + } + else if (bw_ltn(data->adjusted_data_buffer_size[k], bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j])))))) { + data->dispclk_required_for_blackout_recovery[i][j] = bw_max2(data->dispclk_required_for_blackout_recovery[i][j], bw_div(bw_mul(bw_div(bw_div((bw_sub(bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, vbios->maximum_blackout_recovery_time))), data->adjusted_data_buffer_size[k])), bw_int_to_fixed(data->bytes_per_pixel[k])), (bw_sub(vbios->maximum_blackout_recovery_time, bw_sub(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j])))), data->latency_hiding_lines[k]), data->lines_interleaved_in_mem_access[k])); + } + } + else { + data->blackout_duration_margin[i][j] = bw_min2(data->blackout_duration_margin[i][j], bw_sub(bw_sub(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]), data->line_source_transfer_time[k][i][j])); + data->dispclk_required_for_blackout_duration[i][j] = bw_max3(data->dispclk_required_for_blackout_duration[i][j], bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(data->minimum_latency_hiding_with_cursor[k], vbios->blackout_duration), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]), data->active_time[k])))); + if (bw_ltn(vbios->maximum_blackout_recovery_time, bw_add(bw_add(bw_mul(bw_int_to_fixed(2), vbios->mcifwrmc_urgent_latency), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]))) { + data->dispclk_required_for_blackout_recovery[i][j] = bw_int_to_fixed(9999); + } + else if (bw_ltn(data->adjusted_data_buffer_size[k], bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j])))))) { + data->dispclk_required_for_blackout_recovery[i][j] = bw_max2(data->dispclk_required_for_blackout_recovery[i][j], bw_div(bw_mul(bw_div(bw_div((bw_sub(bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, vbios->maximum_blackout_recovery_time))), data->adjusted_data_buffer_size[k])), bw_int_to_fixed(data->bytes_per_pixel[k])), (bw_sub(vbios->maximum_blackout_recovery_time, (bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[i][j]))))), data->latency_hiding_lines[k]), data->lines_interleaved_in_mem_access[k])); + } + } + } + } + } + } + if (bw_mtn(data->blackout_duration_margin[high][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[high][s_high], vbios->high_voltage_max_dispclk)) { + data->cpup_state_change_enable = bw_def_yes; + if (bw_ltn(data->dispclk_required_for_blackout_recovery[high][s_high], vbios->high_voltage_max_dispclk)) { + data->cpuc_state_change_enable = bw_def_yes; + } + else { + data->cpuc_state_change_enable = bw_def_no; + } + } + else { + data->cpup_state_change_enable = bw_def_no; + data->cpuc_state_change_enable = bw_def_no; + } + /*nb p-state change enable*/ + /*for dram speed/p-state change to be possible for a yclk(pclk) and sclk level there has to be positive margin and the dispclk required has to be*/ + /*below the maximum.*/ + /*the dram speed/p-state change margin is the minimum for all surfaces of the maximum latency hiding minus the dram speed/p-state change latency,*/ + /*minus the dmif burst time, minus the source line transfer time*/ + /*the maximum latency hiding is the minimum latency hiding plus one source line used for de-tiling in the line buffer, plus half the urgent latency*/ + /*if stutter and dram clock state change are gated before cursor then the cursor latency hiding does not limit stutter or dram clock state change*/ + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + /*maximum_latency_hiding(i) = minimum_latency_hiding(i) + 1 / vsr(i) **/ + /* h_total(i) / pixel_rate(i) + 0.5 * total_dmifmc_urgent_latency*/ + data->maximum_latency_hiding[i] = bw_add(data->minimum_latency_hiding[i], + bw_mul(bw_frc_to_fixed(5, 10), data->total_dmifmc_urgent_latency)); + data->maximum_latency_hiding_with_cursor[i] = bw_min2(data->maximum_latency_hiding[i], data->cursor_latency_hiding[i]); + } + } + for (i = 0; i <= 2; i++) { + for (j = 0; j <= 7; j++) { + data->min_dram_speed_change_margin[i][j] = bw_int_to_fixed(9999); + data->dram_speed_change_margin = bw_int_to_fixed(9999); + data->dispclk_required_for_dram_speed_change[i][j] = bw_int_to_fixed(0); + data->num_displays_with_margin[i][j] = 0; + for (k = 0; k <= maximum_number_of_surfaces - 1; k++) { + if (data->enable[k]) { + if (surface_type[k] != bw_def_display_write_back420_luma && surface_type[k] != bw_def_display_write_back420_chroma) { + data->dram_speed_change_margin = bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]); + if ((bw_mtn(data->dram_speed_change_margin, bw_int_to_fixed(0)) && bw_ltn(data->dram_speed_change_margin, bw_int_to_fixed(9999)))) { + /*determine the minimum dram clock change margin for each set of clock frequencies*/ + data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin); + /*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/ + data->dispclk_required_for_dram_speed_change_pipe[i][j] = bw_max2(bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->active_time[k])))); + if ((bw_ltn(data->dispclk_required_for_dram_speed_change_pipe[i][j], vbios->high_voltage_max_dispclk))) { + data->display_pstate_change_enable[k] = 1; + data->num_displays_with_margin[i][j] = data->num_displays_with_margin[i][j] + 1; + data->dispclk_required_for_dram_speed_change[i][j] = bw_max2(data->dispclk_required_for_dram_speed_change[i][j], data->dispclk_required_for_dram_speed_change_pipe[i][j]); + } + } + } + else { + data->dram_speed_change_margin = bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->mcifwr_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]); + if ((bw_mtn(data->dram_speed_change_margin, bw_int_to_fixed(0)) && bw_ltn(data->dram_speed_change_margin, bw_int_to_fixed(9999)))) { + /*determine the minimum dram clock change margin for each display pipe*/ + data->min_dram_speed_change_margin[i][j] = bw_min2(data->min_dram_speed_change_margin[i][j], data->dram_speed_change_margin); + /*compute the maximum clock frequuency required for the dram clock change at each set of clock frequencies*/ + data->dispclk_required_for_dram_speed_change_pipe[i][j] = bw_max2(bw_div(bw_div(bw_mul(data->src_pixels_for_first_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]))), bw_div(bw_div(bw_mul(data->src_pixels_for_last_output_pixel[k], dceip->display_pipe_throughput_factor), dceip->lb_write_pixels_per_dispclk), (bw_add(bw_sub(bw_sub(bw_sub(bw_sub(data->maximum_latency_hiding_with_cursor[k], vbios->nbp_state_change_latency), data->dmif_burst_time[i][j]), data->dram_speed_change_line_source_transfer_time[k][i][j]), data->mcifwr_burst_time[i][j]), data->active_time[k])))); + if ((bw_ltn(data->dispclk_required_for_dram_speed_change_pipe[i][j], vbios->high_voltage_max_dispclk))) { + data->display_pstate_change_enable[k] = 1; + data->num_displays_with_margin[i][j] = data->num_displays_with_margin[i][j] + 1; + data->dispclk_required_for_dram_speed_change[i][j] = bw_max2(data->dispclk_required_for_dram_speed_change[i][j], data->dispclk_required_for_dram_speed_change_pipe[i][j]); + } + } + } + } + } + } + } + /*determine the number of displays with margin to switch in the v_active region*/ + for (k = 0; k <= maximum_number_of_surfaces - 1; k++) { + if (data->enable[k] == 1 && data->display_pstate_change_enable[k] == 1) { + number_of_displays_enabled_with_margin = number_of_displays_enabled_with_margin + 1; + } + } + /*determine the number of displays that don't have any dram clock change margin, but*/ + /*have the same resolution. these displays can switch in a common vblank region if*/ + /*their frames are aligned.*/ + data->min_vblank_dram_speed_change_margin = bw_int_to_fixed(9999); + for (k = 0; k <= maximum_number_of_surfaces - 1; k++) { + if (data->enable[k]) { + if (surface_type[k] != bw_def_display_write_back420_luma && surface_type[k] != bw_def_display_write_back420_chroma) { + data->v_blank_dram_speed_change_margin[k] = bw_sub(bw_sub(bw_sub(bw_div(bw_mul((bw_sub(data->v_total[k], bw_sub(bw_div(data->src_height[k], data->v_scale_ratio[k]), bw_int_to_fixed(4)))), data->h_total[k]), data->pixel_rate[k]), vbios->nbp_state_change_latency), data->dmif_burst_time[low][s_low]), data->dram_speed_change_line_source_transfer_time[k][low][s_low]); + data->min_vblank_dram_speed_change_margin = bw_min2(data->min_vblank_dram_speed_change_margin, data->v_blank_dram_speed_change_margin[k]); + } + else { + data->v_blank_dram_speed_change_margin[k] = bw_sub(bw_sub(bw_sub(bw_sub(bw_div(bw_mul((bw_sub(data->v_total[k], bw_sub(bw_div(data->src_height[k], data->v_scale_ratio[k]), bw_int_to_fixed(4)))), data->h_total[k]), data->pixel_rate[k]), vbios->nbp_state_change_latency), data->dmif_burst_time[low][s_low]), data->mcifwr_burst_time[low][s_low]), data->dram_speed_change_line_source_transfer_time[k][low][s_low]); + data->min_vblank_dram_speed_change_margin = bw_min2(data->min_vblank_dram_speed_change_margin, data->v_blank_dram_speed_change_margin[k]); + } + } + } + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + data->displays_with_same_mode[i] = bw_int_to_fixed(0); + if (data->enable[i] == 1 && data->display_pstate_change_enable[i] == 0 && bw_mtn(data->v_blank_dram_speed_change_margin[i], bw_int_to_fixed(0))) { + for (j = 0; j <= maximum_number_of_surfaces - 1; j++) { + if ((i == j || data->display_synchronization_enabled) && (data->enable[j] == 1 && bw_equ(data->source_width_rounded_up_to_chunks[i], data->source_width_rounded_up_to_chunks[j]) && bw_equ(data->source_height_rounded_up_to_chunks[i], data->source_height_rounded_up_to_chunks[j]) && bw_equ(data->vsr[i], data->vsr[j]) && bw_equ(data->hsr[i], data->hsr[j]) && bw_equ(data->pixel_rate[i], data->pixel_rate[j]))) { + data->displays_with_same_mode[i] = bw_add(data->displays_with_same_mode[i], bw_int_to_fixed(1)); + } + } + } + } + /*compute the maximum number of aligned displays with no margin*/ + number_of_aligned_displays_with_no_margin = 0; + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + number_of_aligned_displays_with_no_margin = bw_fixed_to_int(bw_max2(bw_int_to_fixed(number_of_aligned_displays_with_no_margin), data->displays_with_same_mode[i])); + } + /*dram clock change is possible, if all displays have positive margin except for one display or a group of*/ + /*aligned displays with the same timing.*/ + /*the display(s) with the negative margin can be switched in the v_blank region while the other*/ + /*displays are in v_blank or v_active.*/ + if (number_of_displays_enabled_with_margin > 0 && (number_of_displays_enabled_with_margin + number_of_aligned_displays_with_no_margin) == number_of_displays_enabled && bw_mtn(data->min_dram_speed_change_margin[high][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[high][s_high], bw_int_to_fixed(9999)) && bw_ltn(data->dispclk_required_for_dram_speed_change[high][s_high], vbios->high_voltage_max_dispclk)) { + data->nbp_state_change_enable = bw_def_yes; + } + else { + data->nbp_state_change_enable = bw_def_no; + } + /*dram clock change is possible only in vblank if all displays are aligned and have no margin*/ + if (number_of_aligned_displays_with_no_margin == number_of_displays_enabled) { + nbp_state_change_enable_blank = bw_def_yes; + } + else { + nbp_state_change_enable_blank = bw_def_no; + } + + /*average bandwidth*/ + /*the average bandwidth with no compression is the vertical active time is the source width times the bytes per pixel divided by the line time, multiplied by the vertical scale ratio and the ratio of bytes per request divided by the useful bytes per request.*/ + /*the average bandwidth with compression is the same, divided by the compression ratio*/ + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + data->average_bandwidth_no_compression[i] = bw_div(bw_mul(bw_mul(bw_div(bw_mul(data->source_width_rounded_up_to_chunks[i], bw_int_to_fixed(data->bytes_per_pixel[i])), (bw_div(data->h_total[i], data->pixel_rate[i]))), data->vsr[i]), data->bytes_per_request[i]), data->useful_bytes_per_request[i]); + data->average_bandwidth[i] = bw_div(data->average_bandwidth_no_compression[i], data->compression_rate[i]); + } + } + data->total_average_bandwidth_no_compression = bw_int_to_fixed(0); + data->total_average_bandwidth = bw_int_to_fixed(0); + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + data->total_average_bandwidth_no_compression = bw_add(data->total_average_bandwidth_no_compression, data->average_bandwidth_no_compression[i]); + data->total_average_bandwidth = bw_add(data->total_average_bandwidth, data->average_bandwidth[i]); + } + } + + /*required yclk(pclk)*/ + /*yclk requirement only makes sense if the dmif and mcifwr data total page close-open time is less than the time for data transfer and the total pte requests fit in the scatter-gather saw queque size*/ + /*if that is the case, the yclk requirement is the maximum of the ones required by dmif and mcifwr, and the high/low yclk(pclk) is chosen accordingly*/ + /*high yclk(pclk) has to be selected when dram speed/p-state change is not possible.*/ + data->min_cursor_memory_interface_buffer_size_in_time = bw_int_to_fixed(9999); + /* number of cursor lines stored in the cursor data return buffer*/ + num_cursor_lines = 0; + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + if (bw_mtn(data->cursor_width_pixels[i], bw_int_to_fixed(0))) { + /*compute number of cursor lines stored in data return buffer*/ + if (bw_leq(data->cursor_width_pixels[i], bw_int_to_fixed(64)) && dceip->large_cursor == 1) { + num_cursor_lines = 4; + } + else { + num_cursor_lines = 2; + } + data->min_cursor_memory_interface_buffer_size_in_time = bw_min2(data->min_cursor_memory_interface_buffer_size_in_time, bw_div(bw_mul(bw_div(bw_int_to_fixed(num_cursor_lines), data->vsr[i]), data->h_total[i]), data->pixel_rate[i])); + } + } + } + /*compute minimum time to read one chunk from the dmif buffer*/ + if (number_of_displays_enabled > 2) { + data->chunk_request_delay = 0; + } + else { + data->chunk_request_delay = bw_fixed_to_int(bw_div(bw_int_to_fixed(512), vbios->high_voltage_max_dispclk)); + } + data->min_read_buffer_size_in_time = bw_min2(data->min_cursor_memory_interface_buffer_size_in_time, data->min_dmif_size_in_time); + data->display_reads_time_for_data_transfer = bw_sub(bw_sub(data->min_read_buffer_size_in_time, data->total_dmifmc_urgent_latency), bw_int_to_fixed(data->chunk_request_delay)); + data->display_writes_time_for_data_transfer = bw_sub(data->min_mcifwr_size_in_time, vbios->mcifwrmc_urgent_latency); + data->dmif_required_dram_bandwidth = bw_div(data->total_display_reads_required_dram_access_data, data->display_reads_time_for_data_transfer); + data->mcifwr_required_dram_bandwidth = bw_div(data->total_display_writes_required_dram_access_data, data->display_writes_time_for_data_transfer); + data->required_dmifmc_urgent_latency_for_page_close_open = bw_div((bw_sub(data->min_read_buffer_size_in_time, data->dmif_total_page_close_open_time)), data->total_dmifmc_urgent_trips); + data->required_mcifmcwr_urgent_latency = bw_sub(data->min_mcifwr_size_in_time, data->mcifwr_total_page_close_open_time); + if (bw_mtn(data->scatter_gather_total_pte_requests, dceip->maximum_total_outstanding_pte_requests_allowed_by_saw)) { + data->required_dram_bandwidth_gbyte_per_second = bw_int_to_fixed(9999); + yclk_message = bw_def_exceeded_allowed_outstanding_pte_req_queue_size; + data->y_clk_level = high; + data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)); + } + else if (bw_mtn(vbios->dmifmc_urgent_latency, data->required_dmifmc_urgent_latency_for_page_close_open) || bw_mtn(vbios->mcifwrmc_urgent_latency, data->required_mcifmcwr_urgent_latency)) { + data->required_dram_bandwidth_gbyte_per_second = bw_int_to_fixed(9999); + yclk_message = bw_def_exceeded_allowed_page_close_open; + data->y_clk_level = high; + data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)); + } + else { + data->required_dram_bandwidth_gbyte_per_second = bw_div(bw_max2(data->dmif_required_dram_bandwidth, data->mcifwr_required_dram_bandwidth), bw_int_to_fixed(1000)); + if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation, 100),yclk[low]),bw_div(bw_int_to_fixed(vbios->dram_channel_width_in_bits),bw_int_to_fixed(8))),bw_int_to_fixed(vbios->number_of_dram_channels))) + && bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[low]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels))) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[low][s_high], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[low][s_high], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[low][s_high], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[low][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[low][s_high], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[low][s_high], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[low][s_high] == number_of_displays_enabled_with_margin))) { + yclk_message = bw_fixed_to_int(vbios->low_yclk); + data->y_clk_level = low; + data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[low]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)); + } + else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation, 100),yclk[mid]),bw_div(bw_int_to_fixed(vbios->dram_channel_width_in_bits),bw_int_to_fixed(8))),bw_int_to_fixed(vbios->number_of_dram_channels))) + && bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[mid]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels))) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[mid][s_high], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[mid][s_high], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[mid][s_high], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[mid][s_high], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[mid][s_high], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[mid][s_high], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[mid][s_high] == number_of_displays_enabled_with_margin))) { + yclk_message = bw_fixed_to_int(vbios->mid_yclk); + data->y_clk_level = mid; + data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[mid]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)); + } + else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation, 100),yclk[high]),bw_div(bw_int_to_fixed(vbios->dram_channel_width_in_bits),bw_int_to_fixed(8))),bw_int_to_fixed(vbios->number_of_dram_channels))) + && bw_ltn(bw_mul(data->required_dram_bandwidth_gbyte_per_second, bw_int_to_fixed(1000)), bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)))) { + yclk_message = bw_fixed_to_int(vbios->high_yclk); + data->y_clk_level = high; + data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)); + } + else { + yclk_message = bw_def_exceeded_allowed_maximum_bw; + data->y_clk_level = high; + data->dram_bandwidth = bw_mul(bw_div(bw_mul(bw_mul(data->dram_efficiency, yclk[high]), bw_int_to_fixed(vbios->dram_channel_width_in_bits)), bw_int_to_fixed(8)), bw_int_to_fixed(data->number_of_dram_channels)); + } + } + /*required sclk*/ + /*sclk requirement only makes sense if the total pte requests fit in the scatter-gather saw queque size*/ + /*if that is the case, the sclk requirement is the maximum of the ones required by dmif and mcifwr, and the high/mid/low sclk is chosen accordingly, unless that choice results in foresaking dram speed/nb p-state change.*/ + /*the dmif and mcifwr sclk required is the one that allows the transfer of all pipe's data buffer size through the sclk bus in the time for data transfer*/ + /*for dmif, pte and cursor requests have to be included.*/ + data->dmif_required_sclk = bw_div(bw_div(data->total_display_reads_required_data, data->display_reads_time_for_data_transfer), (bw_mul(vbios->data_return_bus_width, bw_frc_to_fixed(dceip->percent_of_ideal_port_bw_received_after_urgent_latency, 100)))); + data->mcifwr_required_sclk = bw_div(bw_div(data->total_display_writes_required_data, data->display_writes_time_for_data_transfer), vbios->data_return_bus_width); + if (bw_mtn(data->scatter_gather_total_pte_requests, dceip->maximum_total_outstanding_pte_requests_allowed_by_saw)) { + data->required_sclk = bw_int_to_fixed(9999); + sclk_message = bw_def_exceeded_allowed_outstanding_pte_req_queue_size; + data->sclk_level = s_high; + } + else if (bw_mtn(vbios->dmifmc_urgent_latency, data->required_dmifmc_urgent_latency_for_page_close_open) || bw_mtn(vbios->mcifwrmc_urgent_latency, data->required_mcifmcwr_urgent_latency)) { + data->required_sclk = bw_int_to_fixed(9999); + sclk_message = bw_def_exceeded_allowed_page_close_open; + data->sclk_level = s_high; + } + else { + data->required_sclk = bw_max2(data->dmif_required_sclk, data->mcifwr_required_sclk); + if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[low]),vbios->data_return_bus_width)) + && bw_ltn(data->required_sclk, sclk[s_low]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_low], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_low], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_low], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_low], vbios->low_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_low] == number_of_displays_enabled_with_margin))) { + sclk_message = bw_def_low; + data->sclk_level = s_low; + data->required_sclk = vbios->low_sclk; + } + else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[mid]),vbios->data_return_bus_width)) + && bw_ltn(data->required_sclk, sclk[s_mid1]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid1], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid1], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid1], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid1] == number_of_displays_enabled_with_margin))) { + sclk_message = bw_def_mid; + data->sclk_level = s_mid1; + data->required_sclk = vbios->mid1_sclk; + } + else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid2]),vbios->data_return_bus_width)) + && bw_ltn(data->required_sclk, sclk[s_mid2]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid2], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid2], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid2], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid2] == number_of_displays_enabled_with_margin))) { + sclk_message = bw_def_mid; + data->sclk_level = s_mid2; + data->required_sclk = vbios->mid2_sclk; + } + else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid3]),vbios->data_return_bus_width)) + && bw_ltn(data->required_sclk, sclk[s_mid3]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid3], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid3], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid3], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid3] == number_of_displays_enabled_with_margin))) { + sclk_message = bw_def_mid; + data->sclk_level = s_mid3; + data->required_sclk = vbios->mid3_sclk; + } + else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid4]),vbios->data_return_bus_width)) + && bw_ltn(data->required_sclk, sclk[s_mid4]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid4], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid4], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid4], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid4] == number_of_displays_enabled_with_margin))) { + sclk_message = bw_def_mid; + data->sclk_level = s_mid4; + data->required_sclk = vbios->mid4_sclk; + } + else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid5]),vbios->data_return_bus_width)) + && bw_ltn(data->required_sclk, sclk[s_mid5]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid5], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid5], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid5], vbios->mid_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid5] == number_of_displays_enabled_with_margin))) { + sclk_message = bw_def_mid; + data->sclk_level = s_mid5; + data->required_sclk = vbios->mid5_sclk; + } + else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_mid6]),vbios->data_return_bus_width)) + && bw_ltn(data->required_sclk, sclk[s_mid6]) && (data->cpup_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk))) && (data->cpuc_state_change_enable == bw_def_no || (bw_mtn(data->blackout_duration_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->dispclk_required_for_blackout_duration[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk) && bw_ltn(data->dispclk_required_for_blackout_recovery[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk))) && (!data->increase_voltage_to_support_mclk_switch || data->nbp_state_change_enable == bw_def_no || (bw_mtn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(0)) && bw_ltn(data->min_dram_speed_change_margin[data->y_clk_level][s_mid6], bw_int_to_fixed(9999)) && bw_leq(data->dispclk_required_for_dram_speed_change[data->y_clk_level][s_mid6], vbios->high_voltage_max_dispclk) && data->num_displays_with_margin[data->y_clk_level][s_mid6] == number_of_displays_enabled_with_margin))) { + sclk_message = bw_def_mid; + data->sclk_level = s_mid6; + data->required_sclk = vbios->mid6_sclk; + } + else if (bw_ltn(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_high]),vbios->data_return_bus_width)) + && bw_ltn(data->required_sclk, sclk[s_high])) { + sclk_message = bw_def_high; + data->sclk_level = s_high; + data->required_sclk = vbios->high_sclk; + } + else if (bw_meq(data->total_average_bandwidth_no_compression, bw_mul(bw_mul(bw_frc_to_fixed(dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation, 100),sclk[s_high]),vbios->data_return_bus_width)) + && bw_ltn(data->required_sclk, sclk[s_high])) { + sclk_message = bw_def_high; + data->sclk_level = s_high; + data->required_sclk = vbios->high_sclk; + } + else { + sclk_message = bw_def_exceeded_allowed_maximum_sclk; + data->sclk_level = s_high; + /*required_sclk = high_sclk*/ + } + } + /*dispclk*/ + /*if dispclk is set to the maximum, ramping is not required. dispclk required without ramping is less than the dispclk required with ramping.*/ + /*if dispclk required without ramping is more than the maximum dispclk, that is the dispclk required, and the mode is not supported*/ + /*if that does not happen, but dispclk required with ramping is more than the maximum dispclk, dispclk required is just the maximum dispclk*/ + /*if that does not happen either, dispclk required is the dispclk required with ramping.*/ + /*dispclk required without ramping is the maximum of the one required for display pipe pixel throughput, for scaler throughput, for total read request thrrougput and for dram/np p-state change if enabled.*/ + /*the display pipe pixel throughput is the maximum of lines in per line out in the beginning of the frame and lines in per line out in the middle of the frame multiplied by the horizontal blank and chunk granularity factor, altogether multiplied by the ratio of the source width to the line time, divided by the line buffer pixels per dispclk throughput, and multiplied by the display pipe throughput factor.*/ + /*the horizontal blank and chunk granularity factor is the ratio of the line time divided by the line time minus half the horizontal blank and chunk time. it applies when the lines in per line out is not 2 or 4.*/ + /*the dispclk required for scaler throughput is the product of the pixel rate and the scaling limits factor.*/ + /*the dispclk required for total read request throughput is the product of the peak request-per-second bandwidth and the dispclk cycles per request, divided by the request efficiency.*/ + /*for the dispclk required with ramping, instead of multiplying just the pipe throughput by the display pipe throughput factor, we multiply the scaler and pipe throughput by the ramping factor.*/ + /*the scaling limits factor is the product of the horizontal scale ratio, and the ratio of the vertical taps divided by the scaler efficiency clamped to at least 1.*/ + /*the scaling limits factor itself it also clamped to at least 1*/ + /*if doing downscaling with the pre-downscaler enabled, the horizontal scale ratio should not be considered above (use "1")*/ + data->downspread_factor = bw_add(bw_int_to_fixed(1), bw_div(vbios->down_spread_percentage, bw_int_to_fixed(100))); + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + if (surface_type[i] == bw_def_graphics) { + switch (data->lb_bpc[i]) { + case 6: + data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency6_bit_per_component; + break; + case 8: + data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency8_bit_per_component; + break; + case 10: + data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency10_bit_per_component; + break; + default: + data->v_scaler_efficiency = dceip->graphics_vscaler_efficiency12_bit_per_component; + break; + } + if (data->use_alpha[i] == 1) { + data->v_scaler_efficiency = bw_min2(data->v_scaler_efficiency, dceip->alpha_vscaler_efficiency); + } + } + else { + switch (data->lb_bpc[i]) { + case 6: + data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency6_bit_per_component; + break; + case 8: + data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency8_bit_per_component; + break; + case 10: + data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency10_bit_per_component; + break; + default: + data->v_scaler_efficiency = dceip->underlay_vscaler_efficiency12_bit_per_component; + break; + } + } + if (dceip->pre_downscaler_enabled && bw_mtn(data->hsr[i], bw_int_to_fixed(1))) { + data->scaler_limits_factor = bw_max2(bw_div(data->v_taps[i], data->v_scaler_efficiency), bw_div(data->source_width_rounded_up_to_chunks[i], data->h_total[i])); + } + else { + data->scaler_limits_factor = bw_max3(bw_int_to_fixed(1), bw_ceil2(bw_div(data->h_taps[i], bw_int_to_fixed(4)), bw_int_to_fixed(1)), bw_mul(data->hsr[i], bw_max2(bw_div(data->v_taps[i], data->v_scaler_efficiency), bw_int_to_fixed(1)))); + } + data->display_pipe_pixel_throughput = bw_div(bw_div(bw_mul(bw_max2(data->lb_lines_in_per_line_out_in_beginning_of_frame[i], bw_mul(data->lb_lines_in_per_line_out_in_middle_of_frame[i], data->horizontal_blank_and_chunk_granularity_factor[i])), data->source_width_rounded_up_to_chunks[i]), (bw_div(data->h_total[i], data->pixel_rate[i]))), dceip->lb_write_pixels_per_dispclk); + data->dispclk_required_without_ramping[i] = bw_mul(data->downspread_factor, bw_max2(bw_mul(data->pixel_rate[i], data->scaler_limits_factor), bw_mul(dceip->display_pipe_throughput_factor, data->display_pipe_pixel_throughput))); + data->dispclk_required_with_ramping[i] = bw_mul(dceip->dispclk_ramping_factor, bw_max2(bw_mul(data->pixel_rate[i], data->scaler_limits_factor), data->display_pipe_pixel_throughput)); + } + } + data->total_dispclk_required_with_ramping = bw_int_to_fixed(0); + data->total_dispclk_required_without_ramping = bw_int_to_fixed(0); + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + if (bw_ltn(data->total_dispclk_required_with_ramping, data->dispclk_required_with_ramping[i])) { + data->total_dispclk_required_with_ramping = data->dispclk_required_with_ramping[i]; + } + if (bw_ltn(data->total_dispclk_required_without_ramping, data->dispclk_required_without_ramping[i])) { + data->total_dispclk_required_without_ramping = data->dispclk_required_without_ramping[i]; + } + } + } + data->total_read_request_bandwidth = bw_int_to_fixed(0); + data->total_write_request_bandwidth = bw_int_to_fixed(0); + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) { + data->total_read_request_bandwidth = bw_add(data->total_read_request_bandwidth, data->request_bandwidth[i]); + } + else { + data->total_write_request_bandwidth = bw_add(data->total_write_request_bandwidth, data->request_bandwidth[i]); + } + } + } + data->dispclk_required_for_total_read_request_bandwidth = bw_div(bw_mul(data->total_read_request_bandwidth, dceip->dispclk_per_request), dceip->request_efficiency); + data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_with_ramping, data->dispclk_required_for_total_read_request_bandwidth); + data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_without_ramping, data->dispclk_required_for_total_read_request_bandwidth); + if (data->cpuc_state_change_enable == bw_def_yes) { + data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max3(data->total_dispclk_required_with_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level], data->dispclk_required_for_blackout_recovery[data->y_clk_level][data->sclk_level]); + data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max3(data->total_dispclk_required_without_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level], data->dispclk_required_for_blackout_recovery[data->y_clk_level][data->sclk_level]); + } + if (data->cpup_state_change_enable == bw_def_yes) { + data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_with_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level]); + data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_without_ramping_with_request_bandwidth, data->dispclk_required_for_blackout_duration[data->y_clk_level][data->sclk_level]); + } + if (data->nbp_state_change_enable == bw_def_yes && data->increase_voltage_to_support_mclk_switch) { + data->total_dispclk_required_with_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_with_ramping_with_request_bandwidth, data->dispclk_required_for_dram_speed_change[data->y_clk_level][data->sclk_level]); + data->total_dispclk_required_without_ramping_with_request_bandwidth = bw_max2(data->total_dispclk_required_without_ramping_with_request_bandwidth, data->dispclk_required_for_dram_speed_change[data->y_clk_level][data->sclk_level]); + } + if (bw_ltn(data->total_dispclk_required_with_ramping_with_request_bandwidth, vbios->high_voltage_max_dispclk)) { + data->dispclk = data->total_dispclk_required_with_ramping_with_request_bandwidth; + } + else if (bw_ltn(data->total_dispclk_required_without_ramping_with_request_bandwidth, vbios->high_voltage_max_dispclk)) { + data->dispclk = vbios->high_voltage_max_dispclk; + } + else { + data->dispclk = data->total_dispclk_required_without_ramping_with_request_bandwidth; + } + /* required core voltage*/ + /* the core voltage required is low if sclk, yclk(pclk)and dispclk are within the low limits*/ + /* otherwise, the core voltage required is medium if yclk (pclk) is within the low limit and sclk and dispclk are within the medium limit*/ + /* otherwise, the core voltage required is high if the three clocks are within the high limits*/ + /* otherwise, or if the mode is not supported, core voltage requirement is not applicable*/ + if (pipe_check == bw_def_notok) { + voltage = bw_def_na; + } + else if (mode_check == bw_def_notok) { + voltage = bw_def_notok; + } + else if (bw_equ(bw_int_to_fixed(yclk_message), vbios->low_yclk) && sclk_message == bw_def_low && bw_ltn(data->dispclk, vbios->low_voltage_max_dispclk)) { + voltage = bw_def_0_72; + } + else if ((bw_equ(bw_int_to_fixed(yclk_message), vbios->low_yclk) || bw_equ(bw_int_to_fixed(yclk_message), vbios->mid_yclk)) && (sclk_message == bw_def_low || sclk_message == bw_def_mid) && bw_ltn(data->dispclk, vbios->mid_voltage_max_dispclk)) { + voltage = bw_def_0_8; + } + else if ((bw_equ(bw_int_to_fixed(yclk_message), vbios->low_yclk) || bw_equ(bw_int_to_fixed(yclk_message), vbios->mid_yclk) || bw_equ(bw_int_to_fixed(yclk_message), vbios->high_yclk)) && (sclk_message == bw_def_low || sclk_message == bw_def_mid || sclk_message == bw_def_high) && bw_leq(data->dispclk, vbios->high_voltage_max_dispclk)) { + if ((data->nbp_state_change_enable == bw_def_no && nbp_state_change_enable_blank == bw_def_no)) { + voltage = bw_def_high_no_nbp_state_change; + } + else { + voltage = bw_def_0_9; + } + } + else { + voltage = bw_def_notok; + } + if (voltage == bw_def_0_72) { + data->max_phyclk = vbios->low_voltage_max_phyclk; + } + else if (voltage == bw_def_0_8) { + data->max_phyclk = vbios->mid_voltage_max_phyclk; + } + else { + data->max_phyclk = vbios->high_voltage_max_phyclk; + } + /*required blackout recovery time*/ + data->blackout_recovery_time = bw_int_to_fixed(0); + for (k = 0; k <= maximum_number_of_surfaces - 1; k++) { + if (data->enable[k] && bw_mtn(vbios->blackout_duration, bw_int_to_fixed(0)) && data->cpup_state_change_enable == bw_def_yes) { + if (surface_type[k] != bw_def_display_write_back420_luma && surface_type[k] != bw_def_display_write_back420_chroma) { + data->blackout_recovery_time = bw_max2(data->blackout_recovery_time, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[data->y_clk_level][data->sclk_level])); + if (bw_ltn(data->adjusted_data_buffer_size[k], bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[data->y_clk_level][data->sclk_level])))))) { + data->blackout_recovery_time = bw_max2(data->blackout_recovery_time, bw_div((bw_add(bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), vbios->blackout_duration), bw_sub(bw_div(bw_mul(bw_mul(bw_mul((bw_add(bw_mul(bw_int_to_fixed(2), data->total_dmifmc_urgent_latency), data->dmif_burst_time[data->y_clk_level][data->sclk_level])), data->dispclk), bw_int_to_fixed(data->bytes_per_pixel[k])), data->lines_interleaved_in_mem_access[k]), data->latency_hiding_lines[k]), data->adjusted_data_buffer_size[k]))), (bw_sub(bw_div(bw_mul(bw_mul(data->dispclk, bw_int_to_fixed(data->bytes_per_pixel[k])), data->lines_interleaved_in_mem_access[k]), data->latency_hiding_lines[k]), bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]))))); + } + } + else { + data->blackout_recovery_time = bw_max2(data->blackout_recovery_time, bw_add(bw_mul(bw_int_to_fixed(2), vbios->mcifwrmc_urgent_latency), data->mcifwr_burst_time[data->y_clk_level][data->sclk_level])); + if (bw_ltn(data->adjusted_data_buffer_size[k], bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), (bw_add(vbios->blackout_duration, bw_add(bw_mul(bw_int_to_fixed(2), vbios->mcifwrmc_urgent_latency), data->mcifwr_burst_time[data->y_clk_level][data->sclk_level])))))) { + data->blackout_recovery_time = bw_max2(data->blackout_recovery_time, bw_div((bw_add(bw_mul(bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]), vbios->blackout_duration), bw_sub(bw_div(bw_mul(bw_mul(bw_mul((bw_add(bw_add(bw_mul(bw_int_to_fixed(2), vbios->mcifwrmc_urgent_latency), data->dmif_burst_time[data->y_clk_level][data->sclk_level]), data->mcifwr_burst_time[data->y_clk_level][data->sclk_level])), data->dispclk), bw_int_to_fixed(data->bytes_per_pixel[k])), data->lines_interleaved_in_mem_access[k]), data->latency_hiding_lines[k]), data->adjusted_data_buffer_size[k]))), (bw_sub(bw_div(bw_mul(bw_mul(data->dispclk, bw_int_to_fixed(data->bytes_per_pixel[k])), data->lines_interleaved_in_mem_access[k]), data->latency_hiding_lines[k]), bw_div(bw_mul(data->display_bandwidth[k], data->useful_bytes_per_request[k]), data->bytes_per_request[k]))))); + } + } + } + } + /*sclk deep sleep*/ + /*during self-refresh, sclk can be reduced to dispclk divided by the minimum pixels in the data fifo entry, with 15% margin, but shoudl not be set to less than the request bandwidth.*/ + /*the data fifo entry is 16 pixels for the writeback, 64 bytes/bytes_per_pixel for the graphics, 16 pixels for the parallel rotation underlay,*/ + /*and 16 bytes/bytes_per_pixel for the orthogonal rotation underlay.*/ + /*in parallel mode (underlay pipe), the data read from the dmifv buffer is variable and based on the pixel depth (8bbp - 16 bytes, 16 bpp - 32 bytes, 32 bpp - 64 bytes)*/ + /*in orthogonal mode (underlay pipe), the data read from the dmifv buffer is fixed at 16 bytes.*/ + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + if (surface_type[i] == bw_def_display_write_back420_luma || surface_type[i] == bw_def_display_write_back420_chroma) { + data->pixels_per_data_fifo_entry[i] = bw_int_to_fixed(16); + } + else if (surface_type[i] == bw_def_graphics) { + data->pixels_per_data_fifo_entry[i] = bw_div(bw_int_to_fixed(64), bw_int_to_fixed(data->bytes_per_pixel[i])); + } + else if (data->orthogonal_rotation[i] == 0) { + data->pixels_per_data_fifo_entry[i] = bw_int_to_fixed(16); + } + else { + data->pixels_per_data_fifo_entry[i] = bw_div(bw_int_to_fixed(16), bw_int_to_fixed(data->bytes_per_pixel[i])); + } + } + } + data->min_pixels_per_data_fifo_entry = bw_int_to_fixed(9999); + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + if (bw_mtn(data->min_pixels_per_data_fifo_entry, data->pixels_per_data_fifo_entry[i])) { + data->min_pixels_per_data_fifo_entry = data->pixels_per_data_fifo_entry[i]; + } + } + } + data->sclk_deep_sleep = bw_max2(bw_div(bw_mul(data->dispclk, bw_frc_to_fixed(115, 100)), data->min_pixels_per_data_fifo_entry), data->total_read_request_bandwidth); + /*urgent, stutter and nb-p_state watermark*/ + /*the urgent watermark is the maximum of the urgent trip time plus the pixel transfer time, the urgent trip times to get data for the first pixel, and the urgent trip times to get data for the last pixel.*/ + /*the stutter exit watermark is the self refresh exit time plus the maximum of the data burst time plus the pixel transfer time, the data burst times to get data for the first pixel, and the data burst times to get data for the last pixel. it does not apply to the writeback.*/ + /*the nb p-state change watermark is the dram speed/p-state change time plus the maximum of the data burst time plus the pixel transfer time, the data burst times to get data for the first pixel, and the data burst times to get data for the last pixel.*/ + /*the pixel transfer time is the maximum of the time to transfer the source pixels required for the first output pixel, and the time to transfer the pixels for the last output pixel minus the active line time.*/ + /*blackout_duration is added to the urgent watermark*/ + data->chunk_request_time = bw_int_to_fixed(0); + data->cursor_request_time = bw_int_to_fixed(0); + /*compute total time to request one chunk from each active display pipe*/ + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + data->chunk_request_time = bw_add(data->chunk_request_time, (bw_div((bw_div(bw_int_to_fixed(pixels_per_chunk * data->bytes_per_pixel[i]), data->useful_bytes_per_request[i])), bw_min2(sclk[data->sclk_level], bw_div(data->dispclk, bw_int_to_fixed(2)))))); + } + } + /*compute total time to request cursor data*/ + data->cursor_request_time = (bw_div(data->cursor_total_data, (bw_mul(bw_int_to_fixed(32), sclk[data->sclk_level])))); + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + data->line_source_pixels_transfer_time = bw_max2(bw_div(bw_div(data->src_pixels_for_first_output_pixel[i], dceip->lb_write_pixels_per_dispclk), (bw_div(data->dispclk, dceip->display_pipe_throughput_factor))), bw_sub(bw_div(bw_div(data->src_pixels_for_last_output_pixel[i], dceip->lb_write_pixels_per_dispclk), (bw_div(data->dispclk, dceip->display_pipe_throughput_factor))), data->active_time[i])); + if (surface_type[i] != bw_def_display_write_back420_luma && surface_type[i] != bw_def_display_write_back420_chroma) { + data->urgent_watermark[i] = bw_add(bw_add(bw_add(bw_add(bw_add(data->total_dmifmc_urgent_latency, data->dmif_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->line_source_transfer_time[i][data->y_clk_level][data->sclk_level])), vbios->blackout_duration), data->chunk_request_time), data->cursor_request_time); + data->stutter_exit_watermark[i] = bw_add(bw_sub(vbios->stutter_self_refresh_exit_latency, data->total_dmifmc_urgent_latency), data->urgent_watermark[i]); + data->stutter_entry_watermark[i] = bw_add(bw_sub(bw_add(vbios->stutter_self_refresh_exit_latency, vbios->stutter_self_refresh_entry_latency), data->total_dmifmc_urgent_latency), data->urgent_watermark[i]); + /*unconditionally remove black out time from the nb p_state watermark*/ + if (data->display_pstate_change_enable[i] == 1) { + data->nbp_state_change_watermark[i] = bw_add(bw_add(vbios->nbp_state_change_latency, data->dmif_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->dram_speed_change_line_source_transfer_time[i][data->y_clk_level][data->sclk_level])); + } + else { + /*maximize the watermark to force the switch in the vb_lank region of the frame*/ + data->nbp_state_change_watermark[i] = bw_int_to_fixed(131000); + } + } + else { + data->urgent_watermark[i] = bw_add(bw_add(bw_add(bw_add(bw_add(vbios->mcifwrmc_urgent_latency, data->mcifwr_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->line_source_transfer_time[i][data->y_clk_level][data->sclk_level])), vbios->blackout_duration), data->chunk_request_time), data->cursor_request_time); + data->stutter_exit_watermark[i] = bw_int_to_fixed(0); + data->stutter_entry_watermark[i] = bw_int_to_fixed(0); + if (data->display_pstate_change_enable[i] == 1) { + data->nbp_state_change_watermark[i] = bw_add(bw_add(vbios->nbp_state_change_latency, data->mcifwr_burst_time[data->y_clk_level][data->sclk_level]), bw_max2(data->line_source_pixels_transfer_time, data->dram_speed_change_line_source_transfer_time[i][data->y_clk_level][data->sclk_level])); + } + else { + /*maximize the watermark to force the switch in the vb_lank region of the frame*/ + data->nbp_state_change_watermark[i] = bw_int_to_fixed(131000); + } + } + } + } + /*stutter mode enable*/ + /*in the multi-display case the stutter exit or entry watermark cannot exceed the minimum latency hiding capabilities of the*/ + /*display pipe.*/ + data->stutter_mode_enable = data->cpuc_state_change_enable; + if (data->number_of_displays > 1) { + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + if ((bw_mtn(data->stutter_exit_watermark[i], data->minimum_latency_hiding[i]) || bw_mtn(data->stutter_entry_watermark[i], data->minimum_latency_hiding[i]))) { + data->stutter_mode_enable = bw_def_no; + } + } + } + } + /*performance metrics*/ + /* display read access efficiency (%)*/ + /* display write back access efficiency (%)*/ + /* stutter efficiency (%)*/ + /* extra underlay pitch recommended for efficiency (pixels)*/ + /* immediate flip time (us)*/ + /* latency for other clients due to urgent display read (us)*/ + /* latency for other clients due to urgent display write (us)*/ + /* average bandwidth consumed by display (no compression) (gb/s)*/ + /* required dram bandwidth (gb/s)*/ + /* required sclk (m_hz)*/ + /* required rd urgent latency (us)*/ + /* nb p-state change margin (us)*/ + /*dmif and mcifwr dram access efficiency*/ + /*is the ratio between the ideal dram access time (which is the data buffer size in memory divided by the dram bandwidth), and the actual time which is the total page close-open time. but it cannot exceed the dram efficiency provided by the memory subsystem*/ + data->dmifdram_access_efficiency = bw_min2(bw_div(bw_div(data->total_display_reads_required_dram_access_data, data->dram_bandwidth), data->dmif_total_page_close_open_time), bw_int_to_fixed(1)); + if (bw_mtn(data->total_display_writes_required_dram_access_data, bw_int_to_fixed(0))) { + data->mcifwrdram_access_efficiency = bw_min2(bw_div(bw_div(data->total_display_writes_required_dram_access_data, data->dram_bandwidth), data->mcifwr_total_page_close_open_time), bw_int_to_fixed(1)); + } + else { + data->mcifwrdram_access_efficiency = bw_int_to_fixed(0); + } + /*stutter efficiency*/ + /*the stutter efficiency is the frame-average time in self-refresh divided by the frame-average stutter cycle duration. only applies if the display write-back is not enabled.*/ + /*the frame-average stutter cycle used is the minimum for all pipes of the frame-average data buffer size in time, times the compression rate*/ + /*the frame-average time in self-refresh is the stutter cycle minus the self refresh exit latency and the burst time*/ + /*the stutter cycle is the dmif buffer size reduced by the excess of the stutter exit watermark over the lb size in time.*/ + /*the burst time is the data needed during the stutter cycle divided by the available bandwidth*/ + /*compute the time read all the data from the dmif buffer to the lb (dram refresh period)*/ + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + data->stutter_refresh_duration[i] = bw_sub(bw_mul(bw_div(bw_div(bw_mul(bw_div(bw_div(data->adjusted_data_buffer_size[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_rounded_up_to_chunks[i]), data->h_total[i]), data->vsr[i]), data->pixel_rate[i]), data->compression_rate[i]), bw_max2(bw_int_to_fixed(0), bw_sub(data->stutter_exit_watermark[i], bw_div(bw_mul((bw_sub(data->lb_partitions[i], bw_int_to_fixed(1))), data->h_total[i]), data->pixel_rate[i])))); + data->stutter_dmif_buffer_size[i] = bw_div(bw_mul(bw_mul(bw_div(bw_mul(bw_mul(data->stutter_refresh_duration[i], bw_int_to_fixed(data->bytes_per_pixel[i])), data->source_width_rounded_up_to_chunks[i]), data->h_total[i]), data->vsr[i]), data->pixel_rate[i]), data->compression_rate[i]); + } + } + data->min_stutter_refresh_duration = bw_int_to_fixed(9999); + data->total_stutter_dmif_buffer_size = 0; + data->total_bytes_requested = 0; + data->min_stutter_dmif_buffer_size = 9999; + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + if (bw_mtn(data->min_stutter_refresh_duration, data->stutter_refresh_duration[i])) { + data->min_stutter_refresh_duration = data->stutter_refresh_duration[i]; + data->total_bytes_requested = bw_fixed_to_int(bw_add(bw_int_to_fixed(data->total_bytes_requested), (bw_mul(bw_mul(data->source_height_rounded_up_to_chunks[i], data->source_width_rounded_up_to_chunks[i]), bw_int_to_fixed(data->bytes_per_pixel[i]))))); + data->min_stutter_dmif_buffer_size = bw_fixed_to_int(data->stutter_dmif_buffer_size[i]); + } + data->total_stutter_dmif_buffer_size = bw_fixed_to_int(bw_add(data->stutter_dmif_buffer_size[i], bw_int_to_fixed(data->total_stutter_dmif_buffer_size))); + } + } + data->stutter_burst_time = bw_div(bw_int_to_fixed(data->total_stutter_dmif_buffer_size), bw_mul(sclk[data->sclk_level], vbios->data_return_bus_width)); + data->num_stutter_bursts = data->total_bytes_requested / data->min_stutter_dmif_buffer_size; + data->total_stutter_cycle_duration = bw_add(bw_add(data->min_stutter_refresh_duration, vbios->stutter_self_refresh_exit_latency), data->stutter_burst_time); + data->time_in_self_refresh = data->min_stutter_refresh_duration; + if (data->d1_display_write_back_dwb_enable == 1) { + data->stutter_efficiency = bw_int_to_fixed(0); + } + else if (bw_ltn(data->time_in_self_refresh, bw_int_to_fixed(0))) { + data->stutter_efficiency = bw_int_to_fixed(0); + } + else { + /*compute stutter efficiency assuming 60 hz refresh rate*/ + data->stutter_efficiency = bw_max2(bw_int_to_fixed(0), bw_mul((bw_sub(bw_int_to_fixed(1), (bw_div(bw_mul((bw_add(vbios->stutter_self_refresh_exit_latency, data->stutter_burst_time)), bw_int_to_fixed(data->num_stutter_bursts)), bw_frc_to_fixed(166666667, 10000))))), bw_int_to_fixed(100))); + } + /*immediate flip time*/ + /*if scatter gather is enabled, the immediate flip takes a number of urgent memory trips equivalent to the pte requests in a row divided by the pte request limit.*/ + /*otherwise, it may take just one urgenr memory trip*/ + data->worst_number_of_trips_to_memory = bw_int_to_fixed(1); + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i] && data->scatter_gather_enable_for_pipe[i] == 1) { + data->number_of_trips_to_memory_for_getting_apte_row[i] = bw_ceil2(bw_div(data->scatter_gather_pte_requests_in_row[i], data->scatter_gather_pte_request_limit[i]), bw_int_to_fixed(1)); + if (bw_ltn(data->worst_number_of_trips_to_memory, data->number_of_trips_to_memory_for_getting_apte_row[i])) { + data->worst_number_of_trips_to_memory = data->number_of_trips_to_memory_for_getting_apte_row[i]; + } + } + } + data->immediate_flip_time = bw_mul(data->worst_number_of_trips_to_memory, data->total_dmifmc_urgent_latency); + /*worst latency for other clients*/ + /*it is the urgent latency plus the urgent burst time*/ + data->latency_for_non_dmif_clients = bw_add(data->total_dmifmc_urgent_latency, data->dmif_burst_time[data->y_clk_level][data->sclk_level]); + if (data->d1_display_write_back_dwb_enable == 1) { + data->latency_for_non_mcifwr_clients = bw_add(vbios->mcifwrmc_urgent_latency, dceip->mcifwr_all_surfaces_burst_time); + } + else { + data->latency_for_non_mcifwr_clients = bw_int_to_fixed(0); + } + /*dmif mc urgent latency supported in high sclk and yclk*/ + data->dmifmc_urgent_latency_supported_in_high_sclk_and_yclk = bw_div((bw_sub(data->min_read_buffer_size_in_time, data->dmif_burst_time[high][s_high])), data->total_dmifmc_urgent_trips); + /*dram speed/p-state change margin*/ + /*in the multi-display case the nb p-state change watermark cannot exceed the average lb size plus the dmif size or the cursor dcp buffer size*/ + data->v_blank_nbp_state_dram_speed_change_latency_supported = bw_int_to_fixed(99999); + data->nbp_state_dram_speed_change_latency_supported = bw_int_to_fixed(99999); + for (i = 0; i <= maximum_number_of_surfaces - 1; i++) { + if (data->enable[i]) { + data->nbp_state_dram_speed_change_latency_supported = bw_min2(data->nbp_state_dram_speed_change_latency_supported, bw_add(bw_sub(data->maximum_latency_hiding_with_cursor[i], data->nbp_state_change_watermark[i]), vbios->nbp_state_change_latency)); + data->v_blank_nbp_state_dram_speed_change_latency_supported = bw_min2(data->v_blank_nbp_state_dram_speed_change_latency_supported, bw_add(bw_sub(bw_div(bw_mul((bw_sub(data->v_total[i], bw_sub(bw_div(data->src_height[i], data->v_scale_ratio[i]), bw_int_to_fixed(4)))), data->h_total[i]), data->pixel_rate[i]), data->nbp_state_change_watermark[i]), vbios->nbp_state_change_latency)); + } + } + /*sclk required vs urgent latency*/ + for (i = 1; i <= 5; i++) { + data->display_reads_time_for_data_transfer_and_urgent_latency = bw_sub(data->min_read_buffer_size_in_time, bw_mul(data->total_dmifmc_urgent_trips, bw_int_to_fixed(i))); + if (pipe_check == bw_def_ok && (bw_mtn(data->display_reads_time_for_data_transfer_and_urgent_latency, data->dmif_total_page_close_open_time))) { + data->dmif_required_sclk_for_urgent_latency[i] = bw_div(bw_div(data->total_display_reads_required_data, data->display_reads_time_for_data_transfer_and_urgent_latency), (bw_mul(vbios->data_return_bus_width, bw_frc_to_fixed(dceip->percent_of_ideal_port_bw_received_after_urgent_latency, 100)))); + } + else { + data->dmif_required_sclk_for_urgent_latency[i] = bw_int_to_fixed(bw_def_na); + } + } + /*output link bit per pixel supported*/ + for (k = 0; k <= maximum_number_of_surfaces - 1; k++) { + data->output_bpphdmi[k] = bw_def_na; + data->output_bppdp4_lane_hbr[k] = bw_def_na; + data->output_bppdp4_lane_hbr2[k] = bw_def_na; + data->output_bppdp4_lane_hbr3[k] = bw_def_na; + if (data->enable[k]) { + data->output_bpphdmi[k] = bw_fixed_to_int(bw_mul(bw_div(bw_min2(bw_int_to_fixed(600), data->max_phyclk), data->pixel_rate[k]), bw_int_to_fixed(24))); + if (bw_meq(data->max_phyclk, bw_int_to_fixed(270))) { + data->output_bppdp4_lane_hbr[k] = bw_fixed_to_int(bw_mul(bw_div(bw_mul(bw_int_to_fixed(270), bw_int_to_fixed(4)), data->pixel_rate[k]), bw_int_to_fixed(8))); + } + if (bw_meq(data->max_phyclk, bw_int_to_fixed(540))) { + data->output_bppdp4_lane_hbr2[k] = bw_fixed_to_int(bw_mul(bw_div(bw_mul(bw_int_to_fixed(540), bw_int_to_fixed(4)), data->pixel_rate[k]), bw_int_to_fixed(8))); + } + if (bw_meq(data->max_phyclk, bw_int_to_fixed(810))) { + data->output_bppdp4_lane_hbr3[k] = bw_fixed_to_int(bw_mul(bw_div(bw_mul(bw_int_to_fixed(810), bw_int_to_fixed(4)), data->pixel_rate[k]), bw_int_to_fixed(8))); + } + } + } + + kfree(surface_type); +free_tiling_mode: + kfree(tiling_mode); +free_yclk: + kfree(yclk); +free_sclk: + kfree(sclk); +} + +/******************************************************************************* + * Public functions + ******************************************************************************/ +void bw_calcs_init(struct bw_calcs_dceip *bw_dceip, + struct bw_calcs_vbios *bw_vbios, + struct hw_asic_id asic_id) +{ + struct bw_calcs_dceip *dceip; + struct bw_calcs_vbios *vbios; + + enum bw_calcs_version version = bw_calcs_version_from_asic_id(asic_id); + + dceip = kzalloc(sizeof(*dceip), GFP_KERNEL); + if (!dceip) + return; + + vbios = kzalloc(sizeof(*vbios), GFP_KERNEL); + if (!vbios) { + kfree(dceip); + return; + } + + dceip->version = version; + + switch (version) { + case BW_CALCS_VERSION_CARRIZO: + vbios->memory_type = bw_def_gddr5; + vbios->dram_channel_width_in_bits = 64; + vbios->number_of_dram_channels = asic_id.vram_width / vbios->dram_channel_width_in_bits; + vbios->number_of_dram_banks = 8; + vbios->high_yclk = bw_int_to_fixed(1600); + vbios->mid_yclk = bw_int_to_fixed(1600); + vbios->low_yclk = bw_frc_to_fixed(66666, 100); + vbios->low_sclk = bw_int_to_fixed(200); + vbios->mid1_sclk = bw_int_to_fixed(300); + vbios->mid2_sclk = bw_int_to_fixed(300); + vbios->mid3_sclk = bw_int_to_fixed(300); + vbios->mid4_sclk = bw_int_to_fixed(300); + vbios->mid5_sclk = bw_int_to_fixed(300); + vbios->mid6_sclk = bw_int_to_fixed(300); + vbios->high_sclk = bw_frc_to_fixed(62609, 100); + vbios->low_voltage_max_dispclk = bw_int_to_fixed(352); + vbios->mid_voltage_max_dispclk = bw_int_to_fixed(467); + vbios->high_voltage_max_dispclk = bw_int_to_fixed(643); + vbios->low_voltage_max_phyclk = bw_int_to_fixed(540); + vbios->mid_voltage_max_phyclk = bw_int_to_fixed(810); + vbios->high_voltage_max_phyclk = bw_int_to_fixed(810); + vbios->data_return_bus_width = bw_int_to_fixed(32); + vbios->trc = bw_int_to_fixed(50); + vbios->dmifmc_urgent_latency = bw_int_to_fixed(4); + vbios->stutter_self_refresh_exit_latency = bw_frc_to_fixed(153, 10); + vbios->stutter_self_refresh_entry_latency = bw_int_to_fixed(0); + vbios->nbp_state_change_latency = bw_frc_to_fixed(19649, 1000); + vbios->mcifwrmc_urgent_latency = bw_int_to_fixed(10); + vbios->scatter_gather_enable = true; + vbios->down_spread_percentage = bw_frc_to_fixed(5, 10); + vbios->cursor_width = 32; + vbios->average_compression_rate = 4; + vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256; + vbios->blackout_duration = bw_int_to_fixed(0); /* us */ + vbios->maximum_blackout_recovery_time = bw_int_to_fixed(0); + + dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100; + dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100; + dceip->percent_of_ideal_port_bw_received_after_urgent_latency = 100; + dceip->large_cursor = false; + dceip->dmif_request_buffer_size = bw_int_to_fixed(768); + dceip->dmif_pipe_en_fbc_chunk_tracker = false; + dceip->cursor_max_outstanding_group_num = 1; + dceip->lines_interleaved_into_lb = 2; + dceip->chunk_width = 256; + dceip->number_of_graphics_pipes = 3; + dceip->number_of_underlay_pipes = 1; + dceip->low_power_tiling_mode = 0; + dceip->display_write_back_supported = false; + dceip->argb_compression_support = false; + dceip->underlay_vscaler_efficiency6_bit_per_component = + bw_frc_to_fixed(35556, 10000); + dceip->underlay_vscaler_efficiency8_bit_per_component = + bw_frc_to_fixed(34286, 10000); + dceip->underlay_vscaler_efficiency10_bit_per_component = + bw_frc_to_fixed(32, 10); + dceip->underlay_vscaler_efficiency12_bit_per_component = + bw_int_to_fixed(3); + dceip->graphics_vscaler_efficiency6_bit_per_component = + bw_frc_to_fixed(35, 10); + dceip->graphics_vscaler_efficiency8_bit_per_component = + bw_frc_to_fixed(34286, 10000); + dceip->graphics_vscaler_efficiency10_bit_per_component = + bw_frc_to_fixed(32, 10); + dceip->graphics_vscaler_efficiency12_bit_per_component = + bw_int_to_fixed(3); + dceip->alpha_vscaler_efficiency = bw_int_to_fixed(3); + dceip->max_dmif_buffer_allocated = 2; + dceip->graphics_dmif_size = 12288; + dceip->underlay_luma_dmif_size = 19456; + dceip->underlay_chroma_dmif_size = 23552; + dceip->pre_downscaler_enabled = true; + dceip->underlay_downscale_prefetch_enabled = true; + dceip->lb_write_pixels_per_dispclk = bw_int_to_fixed(1); + dceip->lb_size_per_component444 = bw_int_to_fixed(82176); + dceip->graphics_lb_nodownscaling_multi_line_prefetching = false; + dceip->stutter_and_dram_clock_state_change_gated_before_cursor = + bw_int_to_fixed(0); + dceip->underlay420_luma_lb_size_per_component = bw_int_to_fixed( + 82176); + dceip->underlay420_chroma_lb_size_per_component = + bw_int_to_fixed(164352); + dceip->underlay422_lb_size_per_component = bw_int_to_fixed( + 82176); + dceip->cursor_chunk_width = bw_int_to_fixed(64); + dceip->cursor_dcp_buffer_lines = bw_int_to_fixed(4); + dceip->underlay_maximum_width_efficient_for_tiling = + bw_int_to_fixed(1920); + dceip->underlay_maximum_height_efficient_for_tiling = + bw_int_to_fixed(1080); + dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display = + bw_frc_to_fixed(3, 10); + dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation = + bw_int_to_fixed(25); + dceip->minimum_outstanding_pte_request_limit = bw_int_to_fixed( + 2); + dceip->maximum_total_outstanding_pte_requests_allowed_by_saw = + bw_int_to_fixed(128); + dceip->limit_excessive_outstanding_dmif_requests = true; + dceip->linear_mode_line_request_alternation_slice = + bw_int_to_fixed(64); + dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode = + 32; + dceip->display_write_back420_luma_mcifwr_buffer_size = 12288; + dceip->display_write_back420_chroma_mcifwr_buffer_size = 8192; + dceip->request_efficiency = bw_frc_to_fixed(8, 10); + dceip->dispclk_per_request = bw_int_to_fixed(2); + dceip->dispclk_ramping_factor = bw_frc_to_fixed(105, 100); + dceip->display_pipe_throughput_factor = bw_frc_to_fixed(105, 100); + dceip->scatter_gather_pte_request_rows_in_tiling_mode = 2; + dceip->mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0); /* todo: this is a bug*/ + break; + case BW_CALCS_VERSION_POLARIS10: + /* TODO: Treat VEGAM the same as P10 for now + * Need to tune the para for VEGAM if needed */ + case BW_CALCS_VERSION_VEGAM: + vbios->memory_type = bw_def_gddr5; + vbios->dram_channel_width_in_bits = 32; + vbios->number_of_dram_channels = asic_id.vram_width / vbios->dram_channel_width_in_bits; + vbios->number_of_dram_banks = 8; + vbios->high_yclk = bw_int_to_fixed(6000); + vbios->mid_yclk = bw_int_to_fixed(3200); + vbios->low_yclk = bw_int_to_fixed(1000); + vbios->low_sclk = bw_int_to_fixed(300); + vbios->mid1_sclk = bw_int_to_fixed(400); + vbios->mid2_sclk = bw_int_to_fixed(500); + vbios->mid3_sclk = bw_int_to_fixed(600); + vbios->mid4_sclk = bw_int_to_fixed(700); + vbios->mid5_sclk = bw_int_to_fixed(800); + vbios->mid6_sclk = bw_int_to_fixed(974); + vbios->high_sclk = bw_int_to_fixed(1154); + vbios->low_voltage_max_dispclk = bw_int_to_fixed(459); + vbios->mid_voltage_max_dispclk = bw_int_to_fixed(654); + vbios->high_voltage_max_dispclk = bw_int_to_fixed(1108); + vbios->low_voltage_max_phyclk = bw_int_to_fixed(540); + vbios->mid_voltage_max_phyclk = bw_int_to_fixed(810); + vbios->high_voltage_max_phyclk = bw_int_to_fixed(810); + vbios->data_return_bus_width = bw_int_to_fixed(32); + vbios->trc = bw_int_to_fixed(48); + vbios->dmifmc_urgent_latency = bw_int_to_fixed(3); + vbios->stutter_self_refresh_exit_latency = bw_int_to_fixed(5); + vbios->stutter_self_refresh_entry_latency = bw_int_to_fixed(0); + vbios->nbp_state_change_latency = bw_int_to_fixed(45); + vbios->mcifwrmc_urgent_latency = bw_int_to_fixed(10); + vbios->scatter_gather_enable = true; + vbios->down_spread_percentage = bw_frc_to_fixed(5, 10); + vbios->cursor_width = 32; + vbios->average_compression_rate = 4; + vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256; + vbios->blackout_duration = bw_int_to_fixed(0); /* us */ + vbios->maximum_blackout_recovery_time = bw_int_to_fixed(0); + + dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100; + dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100; + dceip->percent_of_ideal_port_bw_received_after_urgent_latency = 100; + dceip->large_cursor = false; + dceip->dmif_request_buffer_size = bw_int_to_fixed(768); + dceip->dmif_pipe_en_fbc_chunk_tracker = false; + dceip->cursor_max_outstanding_group_num = 1; + dceip->lines_interleaved_into_lb = 2; + dceip->chunk_width = 256; + dceip->number_of_graphics_pipes = 6; + dceip->number_of_underlay_pipes = 0; + dceip->low_power_tiling_mode = 0; + dceip->display_write_back_supported = false; + dceip->argb_compression_support = true; + dceip->underlay_vscaler_efficiency6_bit_per_component = + bw_frc_to_fixed(35556, 10000); + dceip->underlay_vscaler_efficiency8_bit_per_component = + bw_frc_to_fixed(34286, 10000); + dceip->underlay_vscaler_efficiency10_bit_per_component = + bw_frc_to_fixed(32, 10); + dceip->underlay_vscaler_efficiency12_bit_per_component = + bw_int_to_fixed(3); + dceip->graphics_vscaler_efficiency6_bit_per_component = + bw_frc_to_fixed(35, 10); + dceip->graphics_vscaler_efficiency8_bit_per_component = + bw_frc_to_fixed(34286, 10000); + dceip->graphics_vscaler_efficiency10_bit_per_component = + bw_frc_to_fixed(32, 10); + dceip->graphics_vscaler_efficiency12_bit_per_component = + bw_int_to_fixed(3); + dceip->alpha_vscaler_efficiency = bw_int_to_fixed(3); + dceip->max_dmif_buffer_allocated = 4; + dceip->graphics_dmif_size = 12288; + dceip->underlay_luma_dmif_size = 19456; + dceip->underlay_chroma_dmif_size = 23552; + dceip->pre_downscaler_enabled = true; + dceip->underlay_downscale_prefetch_enabled = true; + dceip->lb_write_pixels_per_dispclk = bw_int_to_fixed(1); + dceip->lb_size_per_component444 = bw_int_to_fixed(245952); + dceip->graphics_lb_nodownscaling_multi_line_prefetching = true; + dceip->stutter_and_dram_clock_state_change_gated_before_cursor = + bw_int_to_fixed(1); + dceip->underlay420_luma_lb_size_per_component = bw_int_to_fixed( + 82176); + dceip->underlay420_chroma_lb_size_per_component = + bw_int_to_fixed(164352); + dceip->underlay422_lb_size_per_component = bw_int_to_fixed( + 82176); + dceip->cursor_chunk_width = bw_int_to_fixed(64); + dceip->cursor_dcp_buffer_lines = bw_int_to_fixed(4); + dceip->underlay_maximum_width_efficient_for_tiling = + bw_int_to_fixed(1920); + dceip->underlay_maximum_height_efficient_for_tiling = + bw_int_to_fixed(1080); + dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display = + bw_frc_to_fixed(3, 10); + dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation = + bw_int_to_fixed(25); + dceip->minimum_outstanding_pte_request_limit = bw_int_to_fixed( + 2); + dceip->maximum_total_outstanding_pte_requests_allowed_by_saw = + bw_int_to_fixed(128); + dceip->limit_excessive_outstanding_dmif_requests = true; + dceip->linear_mode_line_request_alternation_slice = + bw_int_to_fixed(64); + dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode = + 32; + dceip->display_write_back420_luma_mcifwr_buffer_size = 12288; + dceip->display_write_back420_chroma_mcifwr_buffer_size = 8192; + dceip->request_efficiency = bw_frc_to_fixed(8, 10); + dceip->dispclk_per_request = bw_int_to_fixed(2); + dceip->dispclk_ramping_factor = bw_frc_to_fixed(105, 100); + dceip->display_pipe_throughput_factor = bw_frc_to_fixed(105, 100); + dceip->scatter_gather_pte_request_rows_in_tiling_mode = 2; + dceip->mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0); + break; + case BW_CALCS_VERSION_POLARIS11: + vbios->memory_type = bw_def_gddr5; + vbios->dram_channel_width_in_bits = 32; + vbios->number_of_dram_channels = asic_id.vram_width / vbios->dram_channel_width_in_bits; + vbios->number_of_dram_banks = 8; + vbios->high_yclk = bw_int_to_fixed(6000); + vbios->mid_yclk = bw_int_to_fixed(3200); + vbios->low_yclk = bw_int_to_fixed(1000); + vbios->low_sclk = bw_int_to_fixed(300); + vbios->mid1_sclk = bw_int_to_fixed(400); + vbios->mid2_sclk = bw_int_to_fixed(500); + vbios->mid3_sclk = bw_int_to_fixed(600); + vbios->mid4_sclk = bw_int_to_fixed(700); + vbios->mid5_sclk = bw_int_to_fixed(800); + vbios->mid6_sclk = bw_int_to_fixed(974); + vbios->high_sclk = bw_int_to_fixed(1154); + vbios->low_voltage_max_dispclk = bw_int_to_fixed(459); + vbios->mid_voltage_max_dispclk = bw_int_to_fixed(654); + vbios->high_voltage_max_dispclk = bw_int_to_fixed(1108); + vbios->low_voltage_max_phyclk = bw_int_to_fixed(540); + vbios->mid_voltage_max_phyclk = bw_int_to_fixed(810); + vbios->high_voltage_max_phyclk = bw_int_to_fixed(810); + vbios->data_return_bus_width = bw_int_to_fixed(32); + vbios->trc = bw_int_to_fixed(48); + if (vbios->number_of_dram_channels == 2) // 64-bit + vbios->dmifmc_urgent_latency = bw_int_to_fixed(4); + else + vbios->dmifmc_urgent_latency = bw_int_to_fixed(3); + vbios->stutter_self_refresh_exit_latency = bw_int_to_fixed(5); + vbios->stutter_self_refresh_entry_latency = bw_int_to_fixed(0); + vbios->nbp_state_change_latency = bw_int_to_fixed(45); + vbios->mcifwrmc_urgent_latency = bw_int_to_fixed(10); + vbios->scatter_gather_enable = true; + vbios->down_spread_percentage = bw_frc_to_fixed(5, 10); + vbios->cursor_width = 32; + vbios->average_compression_rate = 4; + vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256; + vbios->blackout_duration = bw_int_to_fixed(0); /* us */ + vbios->maximum_blackout_recovery_time = bw_int_to_fixed(0); + + dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100; + dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100; + dceip->percent_of_ideal_port_bw_received_after_urgent_latency = 100; + dceip->large_cursor = false; + dceip->dmif_request_buffer_size = bw_int_to_fixed(768); + dceip->dmif_pipe_en_fbc_chunk_tracker = false; + dceip->cursor_max_outstanding_group_num = 1; + dceip->lines_interleaved_into_lb = 2; + dceip->chunk_width = 256; + dceip->number_of_graphics_pipes = 5; + dceip->number_of_underlay_pipes = 0; + dceip->low_power_tiling_mode = 0; + dceip->display_write_back_supported = false; + dceip->argb_compression_support = true; + dceip->underlay_vscaler_efficiency6_bit_per_component = + bw_frc_to_fixed(35556, 10000); + dceip->underlay_vscaler_efficiency8_bit_per_component = + bw_frc_to_fixed(34286, 10000); + dceip->underlay_vscaler_efficiency10_bit_per_component = + bw_frc_to_fixed(32, 10); + dceip->underlay_vscaler_efficiency12_bit_per_component = + bw_int_to_fixed(3); + dceip->graphics_vscaler_efficiency6_bit_per_component = + bw_frc_to_fixed(35, 10); + dceip->graphics_vscaler_efficiency8_bit_per_component = + bw_frc_to_fixed(34286, 10000); + dceip->graphics_vscaler_efficiency10_bit_per_component = + bw_frc_to_fixed(32, 10); + dceip->graphics_vscaler_efficiency12_bit_per_component = + bw_int_to_fixed(3); + dceip->alpha_vscaler_efficiency = bw_int_to_fixed(3); + dceip->max_dmif_buffer_allocated = 4; + dceip->graphics_dmif_size = 12288; + dceip->underlay_luma_dmif_size = 19456; + dceip->underlay_chroma_dmif_size = 23552; + dceip->pre_downscaler_enabled = true; + dceip->underlay_downscale_prefetch_enabled = true; + dceip->lb_write_pixels_per_dispclk = bw_int_to_fixed(1); + dceip->lb_size_per_component444 = bw_int_to_fixed(245952); + dceip->graphics_lb_nodownscaling_multi_line_prefetching = true; + dceip->stutter_and_dram_clock_state_change_gated_before_cursor = + bw_int_to_fixed(1); + dceip->underlay420_luma_lb_size_per_component = bw_int_to_fixed( + 82176); + dceip->underlay420_chroma_lb_size_per_component = + bw_int_to_fixed(164352); + dceip->underlay422_lb_size_per_component = bw_int_to_fixed( + 82176); + dceip->cursor_chunk_width = bw_int_to_fixed(64); + dceip->cursor_dcp_buffer_lines = bw_int_to_fixed(4); + dceip->underlay_maximum_width_efficient_for_tiling = + bw_int_to_fixed(1920); + dceip->underlay_maximum_height_efficient_for_tiling = + bw_int_to_fixed(1080); + dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display = + bw_frc_to_fixed(3, 10); + dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation = + bw_int_to_fixed(25); + dceip->minimum_outstanding_pte_request_limit = bw_int_to_fixed( + 2); + dceip->maximum_total_outstanding_pte_requests_allowed_by_saw = + bw_int_to_fixed(128); + dceip->limit_excessive_outstanding_dmif_requests = true; + dceip->linear_mode_line_request_alternation_slice = + bw_int_to_fixed(64); + dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode = + 32; + dceip->display_write_back420_luma_mcifwr_buffer_size = 12288; + dceip->display_write_back420_chroma_mcifwr_buffer_size = 8192; + dceip->request_efficiency = bw_frc_to_fixed(8, 10); + dceip->dispclk_per_request = bw_int_to_fixed(2); + dceip->dispclk_ramping_factor = bw_frc_to_fixed(105, 100); + dceip->display_pipe_throughput_factor = bw_frc_to_fixed(105, 100); + dceip->scatter_gather_pte_request_rows_in_tiling_mode = 2; + dceip->mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0); + break; + case BW_CALCS_VERSION_POLARIS12: + vbios->memory_type = bw_def_gddr5; + vbios->dram_channel_width_in_bits = 32; + vbios->number_of_dram_channels = asic_id.vram_width / vbios->dram_channel_width_in_bits; + vbios->number_of_dram_banks = 8; + vbios->high_yclk = bw_int_to_fixed(6000); + vbios->mid_yclk = bw_int_to_fixed(3200); + vbios->low_yclk = bw_int_to_fixed(1000); + vbios->low_sclk = bw_int_to_fixed(678); + vbios->mid1_sclk = bw_int_to_fixed(864); + vbios->mid2_sclk = bw_int_to_fixed(900); + vbios->mid3_sclk = bw_int_to_fixed(920); + vbios->mid4_sclk = bw_int_to_fixed(940); + vbios->mid5_sclk = bw_int_to_fixed(960); + vbios->mid6_sclk = bw_int_to_fixed(980); + vbios->high_sclk = bw_int_to_fixed(1049); + vbios->low_voltage_max_dispclk = bw_int_to_fixed(459); + vbios->mid_voltage_max_dispclk = bw_int_to_fixed(654); + vbios->high_voltage_max_dispclk = bw_int_to_fixed(1108); + vbios->low_voltage_max_phyclk = bw_int_to_fixed(540); + vbios->mid_voltage_max_phyclk = bw_int_to_fixed(810); + vbios->high_voltage_max_phyclk = bw_int_to_fixed(810); + vbios->data_return_bus_width = bw_int_to_fixed(32); + vbios->trc = bw_int_to_fixed(48); + if (vbios->number_of_dram_channels == 2) // 64-bit + vbios->dmifmc_urgent_latency = bw_int_to_fixed(4); + else + vbios->dmifmc_urgent_latency = bw_int_to_fixed(3); + vbios->stutter_self_refresh_exit_latency = bw_int_to_fixed(5); + vbios->stutter_self_refresh_entry_latency = bw_int_to_fixed(0); + vbios->nbp_state_change_latency = bw_int_to_fixed(250); + vbios->mcifwrmc_urgent_latency = bw_int_to_fixed(10); + vbios->scatter_gather_enable = false; + vbios->down_spread_percentage = bw_frc_to_fixed(5, 10); + vbios->cursor_width = 32; + vbios->average_compression_rate = 4; + vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256; + vbios->blackout_duration = bw_int_to_fixed(0); /* us */ + vbios->maximum_blackout_recovery_time = bw_int_to_fixed(0); + + dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100; + dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100; + dceip->percent_of_ideal_port_bw_received_after_urgent_latency = 100; + dceip->large_cursor = false; + dceip->dmif_request_buffer_size = bw_int_to_fixed(768); + dceip->dmif_pipe_en_fbc_chunk_tracker = false; + dceip->cursor_max_outstanding_group_num = 1; + dceip->lines_interleaved_into_lb = 2; + dceip->chunk_width = 256; + dceip->number_of_graphics_pipes = 5; + dceip->number_of_underlay_pipes = 0; + dceip->low_power_tiling_mode = 0; + dceip->display_write_back_supported = true; + dceip->argb_compression_support = true; + dceip->underlay_vscaler_efficiency6_bit_per_component = + bw_frc_to_fixed(35556, 10000); + dceip->underlay_vscaler_efficiency8_bit_per_component = + bw_frc_to_fixed(34286, 10000); + dceip->underlay_vscaler_efficiency10_bit_per_component = + bw_frc_to_fixed(32, 10); + dceip->underlay_vscaler_efficiency12_bit_per_component = + bw_int_to_fixed(3); + dceip->graphics_vscaler_efficiency6_bit_per_component = + bw_frc_to_fixed(35, 10); + dceip->graphics_vscaler_efficiency8_bit_per_component = + bw_frc_to_fixed(34286, 10000); + dceip->graphics_vscaler_efficiency10_bit_per_component = + bw_frc_to_fixed(32, 10); + dceip->graphics_vscaler_efficiency12_bit_per_component = + bw_int_to_fixed(3); + dceip->alpha_vscaler_efficiency = bw_int_to_fixed(3); + dceip->max_dmif_buffer_allocated = 4; + dceip->graphics_dmif_size = 12288; + dceip->underlay_luma_dmif_size = 19456; + dceip->underlay_chroma_dmif_size = 23552; + dceip->pre_downscaler_enabled = true; + dceip->underlay_downscale_prefetch_enabled = true; + dceip->lb_write_pixels_per_dispclk = bw_int_to_fixed(1); + dceip->lb_size_per_component444 = bw_int_to_fixed(245952); + dceip->graphics_lb_nodownscaling_multi_line_prefetching = true; + dceip->stutter_and_dram_clock_state_change_gated_before_cursor = + bw_int_to_fixed(1); + dceip->underlay420_luma_lb_size_per_component = bw_int_to_fixed( + 82176); + dceip->underlay420_chroma_lb_size_per_component = + bw_int_to_fixed(164352); + dceip->underlay422_lb_size_per_component = bw_int_to_fixed( + 82176); + dceip->cursor_chunk_width = bw_int_to_fixed(64); + dceip->cursor_dcp_buffer_lines = bw_int_to_fixed(4); + dceip->underlay_maximum_width_efficient_for_tiling = + bw_int_to_fixed(1920); + dceip->underlay_maximum_height_efficient_for_tiling = + bw_int_to_fixed(1080); + dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display = + bw_frc_to_fixed(3, 10); + dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation = + bw_int_to_fixed(25); + dceip->minimum_outstanding_pte_request_limit = bw_int_to_fixed( + 2); + dceip->maximum_total_outstanding_pte_requests_allowed_by_saw = + bw_int_to_fixed(128); + dceip->limit_excessive_outstanding_dmif_requests = true; + dceip->linear_mode_line_request_alternation_slice = + bw_int_to_fixed(64); + dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode = + 32; + dceip->display_write_back420_luma_mcifwr_buffer_size = 12288; + dceip->display_write_back420_chroma_mcifwr_buffer_size = 8192; + dceip->request_efficiency = bw_frc_to_fixed(8, 10); + dceip->dispclk_per_request = bw_int_to_fixed(2); + dceip->dispclk_ramping_factor = bw_frc_to_fixed(105, 100); + dceip->display_pipe_throughput_factor = bw_frc_to_fixed(105, 100); + dceip->scatter_gather_pte_request_rows_in_tiling_mode = 2; + dceip->mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0); + break; + case BW_CALCS_VERSION_STONEY: + vbios->memory_type = bw_def_gddr5; + vbios->dram_channel_width_in_bits = 64; + vbios->number_of_dram_channels = asic_id.vram_width / vbios->dram_channel_width_in_bits; + vbios->number_of_dram_banks = 8; + vbios->high_yclk = bw_int_to_fixed(1866); + vbios->mid_yclk = bw_int_to_fixed(1866); + vbios->low_yclk = bw_int_to_fixed(1333); + vbios->low_sclk = bw_int_to_fixed(200); + vbios->mid1_sclk = bw_int_to_fixed(600); + vbios->mid2_sclk = bw_int_to_fixed(600); + vbios->mid3_sclk = bw_int_to_fixed(600); + vbios->mid4_sclk = bw_int_to_fixed(600); + vbios->mid5_sclk = bw_int_to_fixed(600); + vbios->mid6_sclk = bw_int_to_fixed(600); + vbios->high_sclk = bw_int_to_fixed(800); + vbios->low_voltage_max_dispclk = bw_int_to_fixed(352); + vbios->mid_voltage_max_dispclk = bw_int_to_fixed(467); + vbios->high_voltage_max_dispclk = bw_int_to_fixed(643); + vbios->low_voltage_max_phyclk = bw_int_to_fixed(540); + vbios->mid_voltage_max_phyclk = bw_int_to_fixed(810); + vbios->high_voltage_max_phyclk = bw_int_to_fixed(810); + vbios->data_return_bus_width = bw_int_to_fixed(32); + vbios->trc = bw_int_to_fixed(50); + vbios->dmifmc_urgent_latency = bw_int_to_fixed(4); + vbios->stutter_self_refresh_exit_latency = bw_frc_to_fixed(158, 10); + vbios->stutter_self_refresh_entry_latency = bw_int_to_fixed(0); + vbios->nbp_state_change_latency = bw_frc_to_fixed(2008, 100); + vbios->mcifwrmc_urgent_latency = bw_int_to_fixed(10); + vbios->scatter_gather_enable = true; + vbios->down_spread_percentage = bw_frc_to_fixed(5, 10); + vbios->cursor_width = 32; + vbios->average_compression_rate = 4; + vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel = 256; + vbios->blackout_duration = bw_int_to_fixed(0); /* us */ + vbios->maximum_blackout_recovery_time = bw_int_to_fixed(0); + + dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100; + dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100; + dceip->percent_of_ideal_port_bw_received_after_urgent_latency = 100; + dceip->large_cursor = false; + dceip->dmif_request_buffer_size = bw_int_to_fixed(768); + dceip->dmif_pipe_en_fbc_chunk_tracker = false; + dceip->cursor_max_outstanding_group_num = 1; + dceip->lines_interleaved_into_lb = 2; + dceip->chunk_width = 256; + dceip->number_of_graphics_pipes = 2; + dceip->number_of_underlay_pipes = 1; + dceip->low_power_tiling_mode = 0; + dceip->display_write_back_supported = false; + dceip->argb_compression_support = true; + dceip->underlay_vscaler_efficiency6_bit_per_component = + bw_frc_to_fixed(35556, 10000); + dceip->underlay_vscaler_efficiency8_bit_per_component = + bw_frc_to_fixed(34286, 10000); + dceip->underlay_vscaler_efficiency10_bit_per_component = + bw_frc_to_fixed(32, 10); + dceip->underlay_vscaler_efficiency12_bit_per_component = + bw_int_to_fixed(3); + dceip->graphics_vscaler_efficiency6_bit_per_component = + bw_frc_to_fixed(35, 10); + dceip->graphics_vscaler_efficiency8_bit_per_component = + bw_frc_to_fixed(34286, 10000); + dceip->graphics_vscaler_efficiency10_bit_per_component = + bw_frc_to_fixed(32, 10); + dceip->graphics_vscaler_efficiency12_bit_per_component = + bw_int_to_fixed(3); + dceip->alpha_vscaler_efficiency = bw_int_to_fixed(3); + dceip->max_dmif_buffer_allocated = 2; + dceip->graphics_dmif_size = 12288; + dceip->underlay_luma_dmif_size = 19456; + dceip->underlay_chroma_dmif_size = 23552; + dceip->pre_downscaler_enabled = true; + dceip->underlay_downscale_prefetch_enabled = true; + dceip->lb_write_pixels_per_dispclk = bw_int_to_fixed(1); + dceip->lb_size_per_component444 = bw_int_to_fixed(82176); + dceip->graphics_lb_nodownscaling_multi_line_prefetching = false; + dceip->stutter_and_dram_clock_state_change_gated_before_cursor = + bw_int_to_fixed(0); + dceip->underlay420_luma_lb_size_per_component = bw_int_to_fixed( + 82176); + dceip->underlay420_chroma_lb_size_per_component = + bw_int_to_fixed(164352); + dceip->underlay422_lb_size_per_component = bw_int_to_fixed( + 82176); + dceip->cursor_chunk_width = bw_int_to_fixed(64); + dceip->cursor_dcp_buffer_lines = bw_int_to_fixed(4); + dceip->underlay_maximum_width_efficient_for_tiling = + bw_int_to_fixed(1920); + dceip->underlay_maximum_height_efficient_for_tiling = + bw_int_to_fixed(1080); + dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display = + bw_frc_to_fixed(3, 10); + dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation = + bw_int_to_fixed(25); + dceip->minimum_outstanding_pte_request_limit = bw_int_to_fixed( + 2); + dceip->maximum_total_outstanding_pte_requests_allowed_by_saw = + bw_int_to_fixed(128); + dceip->limit_excessive_outstanding_dmif_requests = true; + dceip->linear_mode_line_request_alternation_slice = + bw_int_to_fixed(64); + dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode = + 32; + dceip->display_write_back420_luma_mcifwr_buffer_size = 12288; + dceip->display_write_back420_chroma_mcifwr_buffer_size = 8192; + dceip->request_efficiency = bw_frc_to_fixed(8, 10); + dceip->dispclk_per_request = bw_int_to_fixed(2); + dceip->dispclk_ramping_factor = bw_frc_to_fixed(105, 100); + dceip->display_pipe_throughput_factor = bw_frc_to_fixed(105, 100); + dceip->scatter_gather_pte_request_rows_in_tiling_mode = 2; + dceip->mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0); + break; + case BW_CALCS_VERSION_VEGA10: + vbios->memory_type = bw_def_hbm; + vbios->dram_channel_width_in_bits = 128; + vbios->number_of_dram_channels = asic_id.vram_width / vbios->dram_channel_width_in_bits; + vbios->number_of_dram_banks = 16; + vbios->high_yclk = bw_int_to_fixed(2400); + vbios->mid_yclk = bw_int_to_fixed(1700); + vbios->low_yclk = bw_int_to_fixed(1000); + vbios->low_sclk = bw_int_to_fixed(300); + vbios->mid1_sclk = bw_int_to_fixed(350); + vbios->mid2_sclk = bw_int_to_fixed(400); + vbios->mid3_sclk = bw_int_to_fixed(500); + vbios->mid4_sclk = bw_int_to_fixed(600); + vbios->mid5_sclk = bw_int_to_fixed(700); + vbios->mid6_sclk = bw_int_to_fixed(760); + vbios->high_sclk = bw_int_to_fixed(776); + vbios->low_voltage_max_dispclk = bw_int_to_fixed(460); + vbios->mid_voltage_max_dispclk = bw_int_to_fixed(670); + vbios->high_voltage_max_dispclk = bw_int_to_fixed(1133); + vbios->low_voltage_max_phyclk = bw_int_to_fixed(540); + vbios->mid_voltage_max_phyclk = bw_int_to_fixed(810); + vbios->high_voltage_max_phyclk = bw_int_to_fixed(810); + vbios->data_return_bus_width = bw_int_to_fixed(32); + vbios->trc = bw_int_to_fixed(48); + vbios->dmifmc_urgent_latency = bw_int_to_fixed(3); + vbios->stutter_self_refresh_exit_latency = bw_frc_to_fixed(75, 10); + vbios->stutter_self_refresh_entry_latency = bw_frc_to_fixed(19, 10); + vbios->nbp_state_change_latency = bw_int_to_fixed(39); + vbios->mcifwrmc_urgent_latency = bw_int_to_fixed(10); + vbios->scatter_gather_enable = false; + vbios->down_spread_percentage = bw_frc_to_fixed(5, 10); + vbios->cursor_width = 32; + vbios->average_compression_rate = 4; + vbios->number_of_request_slots_gmc_reserves_for_dmif_per_channel = 8; + vbios->blackout_duration = bw_int_to_fixed(0); /* us */ + vbios->maximum_blackout_recovery_time = bw_int_to_fixed(0); + + dceip->max_average_percent_of_ideal_port_bw_display_can_use_in_normal_system_operation = 100; + dceip->max_average_percent_of_ideal_drambw_display_can_use_in_normal_system_operation = 100; + dceip->percent_of_ideal_port_bw_received_after_urgent_latency = 100; + dceip->large_cursor = false; + dceip->dmif_request_buffer_size = bw_int_to_fixed(2304); + dceip->dmif_pipe_en_fbc_chunk_tracker = true; + dceip->cursor_max_outstanding_group_num = 1; + dceip->lines_interleaved_into_lb = 2; + dceip->chunk_width = 256; + dceip->number_of_graphics_pipes = 6; + dceip->number_of_underlay_pipes = 0; + dceip->low_power_tiling_mode = 0; + dceip->display_write_back_supported = true; + dceip->argb_compression_support = true; + dceip->underlay_vscaler_efficiency6_bit_per_component = + bw_frc_to_fixed(35556, 10000); + dceip->underlay_vscaler_efficiency8_bit_per_component = + bw_frc_to_fixed(34286, 10000); + dceip->underlay_vscaler_efficiency10_bit_per_component = + bw_frc_to_fixed(32, 10); + dceip->underlay_vscaler_efficiency12_bit_per_component = + bw_int_to_fixed(3); + dceip->graphics_vscaler_efficiency6_bit_per_component = + bw_frc_to_fixed(35, 10); + dceip->graphics_vscaler_efficiency8_bit_per_component = + bw_frc_to_fixed(34286, 10000); + dceip->graphics_vscaler_efficiency10_bit_per_component = + bw_frc_to_fixed(32, 10); + dceip->graphics_vscaler_efficiency12_bit_per_component = + bw_int_to_fixed(3); + dceip->alpha_vscaler_efficiency = bw_int_to_fixed(3); + dceip->max_dmif_buffer_allocated = 4; + dceip->graphics_dmif_size = 24576; + dceip->underlay_luma_dmif_size = 19456; + dceip->underlay_chroma_dmif_size = 23552; + dceip->pre_downscaler_enabled = true; + dceip->underlay_downscale_prefetch_enabled = false; + dceip->lb_write_pixels_per_dispclk = bw_int_to_fixed(1); + dceip->lb_size_per_component444 = bw_int_to_fixed(245952); + dceip->graphics_lb_nodownscaling_multi_line_prefetching = true; + dceip->stutter_and_dram_clock_state_change_gated_before_cursor = + bw_int_to_fixed(1); + dceip->underlay420_luma_lb_size_per_component = bw_int_to_fixed( + 82176); + dceip->underlay420_chroma_lb_size_per_component = + bw_int_to_fixed(164352); + dceip->underlay422_lb_size_per_component = bw_int_to_fixed( + 82176); + dceip->cursor_chunk_width = bw_int_to_fixed(64); + dceip->cursor_dcp_buffer_lines = bw_int_to_fixed(4); + dceip->underlay_maximum_width_efficient_for_tiling = + bw_int_to_fixed(1920); + dceip->underlay_maximum_height_efficient_for_tiling = + bw_int_to_fixed(1080); + dceip->peak_pte_request_to_eviction_ratio_limiting_multiple_displays_or_single_rotated_display = + bw_frc_to_fixed(3, 10); + dceip->peak_pte_request_to_eviction_ratio_limiting_single_display_no_rotation = + bw_int_to_fixed(25); + dceip->minimum_outstanding_pte_request_limit = bw_int_to_fixed( + 2); + dceip->maximum_total_outstanding_pte_requests_allowed_by_saw = + bw_int_to_fixed(128); + dceip->limit_excessive_outstanding_dmif_requests = true; + dceip->linear_mode_line_request_alternation_slice = + bw_int_to_fixed(64); + dceip->scatter_gather_lines_of_pte_prefetching_in_linear_mode = + 32; + dceip->display_write_back420_luma_mcifwr_buffer_size = 12288; + dceip->display_write_back420_chroma_mcifwr_buffer_size = 8192; + dceip->request_efficiency = bw_frc_to_fixed(8, 10); + dceip->dispclk_per_request = bw_int_to_fixed(2); + dceip->dispclk_ramping_factor = bw_frc_to_fixed(105, 100); + dceip->display_pipe_throughput_factor = bw_frc_to_fixed(105, 100); + dceip->scatter_gather_pte_request_rows_in_tiling_mode = 2; + dceip->mcifwr_all_surfaces_burst_time = bw_int_to_fixed(0); + break; + default: + break; + } + *bw_dceip = *dceip; + *bw_vbios = *vbios; + + kfree(dceip); + kfree(vbios); +} + +/* + * Compare calculated (required) clocks against the clocks available at + * maximum voltage (max Performance Level). + */ +static bool is_display_configuration_supported( + const struct bw_calcs_vbios *vbios, + const struct dce_bw_output *calcs_output) +{ + uint32_t int_max_clk; + + int_max_clk = bw_fixed_to_int(vbios->high_voltage_max_dispclk); + int_max_clk *= 1000; /* MHz to kHz */ + if (calcs_output->dispclk_khz > int_max_clk) + return false; + + int_max_clk = bw_fixed_to_int(vbios->high_sclk); + int_max_clk *= 1000; /* MHz to kHz */ + if (calcs_output->sclk_khz > int_max_clk) + return false; + + return true; +} + +static void populate_initial_data( + const struct pipe_ctx pipe[], int pipe_count, struct bw_calcs_data *data) +{ + int i, j; + int num_displays = 0; + + data->underlay_surface_type = bw_def_420; + data->panning_and_bezel_adjustment = bw_def_none; + data->graphics_lb_bpc = 10; + data->underlay_lb_bpc = 8; + data->underlay_tiling_mode = bw_def_tiled; + data->graphics_tiling_mode = bw_def_tiled; + data->underlay_micro_tile_mode = bw_def_display_micro_tiling; + data->graphics_micro_tile_mode = bw_def_display_micro_tiling; + data->increase_voltage_to_support_mclk_switch = true; + + /* Pipes with underlay first */ + for (i = 0; i < pipe_count; i++) { + if (!pipe[i].stream || !pipe[i].bottom_pipe) + continue; + + ASSERT(pipe[i].plane_state); + + if (num_displays == 0) { + if (!pipe[i].plane_state->visible) + data->d0_underlay_mode = bw_def_underlay_only; + else + data->d0_underlay_mode = bw_def_blend; + } else { + if (!pipe[i].plane_state->visible) + data->d1_underlay_mode = bw_def_underlay_only; + else + data->d1_underlay_mode = bw_def_blend; + } + + data->fbc_en[num_displays + 4] = false; + data->lpt_en[num_displays + 4] = false; + data->h_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.h_total); + data->v_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.v_total); + data->pixel_rate[num_displays + 4] = bw_frc_to_fixed(pipe[i].stream->timing.pix_clk_100hz, 10000); + data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.viewport.width); + data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4]; + data->src_height[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.viewport.height); + data->h_taps[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.taps.h_taps); + data->v_taps[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.taps.v_taps); + data->h_scale_ratio[num_displays + 4] = fixed31_32_to_bw_fixed(pipe[i].plane_res.scl_data.ratios.horz.value); + data->v_scale_ratio[num_displays + 4] = fixed31_32_to_bw_fixed(pipe[i].plane_res.scl_data.ratios.vert.value); + switch (pipe[i].plane_state->rotation) { + case ROTATION_ANGLE_0: + data->rotation_angle[num_displays + 4] = bw_int_to_fixed(0); + break; + case ROTATION_ANGLE_90: + data->rotation_angle[num_displays + 4] = bw_int_to_fixed(90); + break; + case ROTATION_ANGLE_180: + data->rotation_angle[num_displays + 4] = bw_int_to_fixed(180); + break; + case ROTATION_ANGLE_270: + data->rotation_angle[num_displays + 4] = bw_int_to_fixed(270); + break; + default: + break; + } + switch (pipe[i].plane_state->format) { + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: + case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: + case SURFACE_PIXEL_FORMAT_GRPH_RGB565: + data->bytes_per_pixel[num_displays + 4] = 2; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: + case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS: + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: + data->bytes_per_pixel[num_displays + 4] = 4; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: + data->bytes_per_pixel[num_displays + 4] = 8; + break; + default: + data->bytes_per_pixel[num_displays + 4] = 4; + break; + } + data->interlace_mode[num_displays + 4] = false; + data->stereo_mode[num_displays + 4] = bw_def_mono; + + + for (j = 0; j < 2; j++) { + data->fbc_en[num_displays * 2 + j] = false; + data->lpt_en[num_displays * 2 + j] = false; + + data->src_height[num_displays * 2 + j] = bw_int_to_fixed(pipe[i].bottom_pipe->plane_res.scl_data.viewport.height); + data->src_width[num_displays * 2 + j] = bw_int_to_fixed(pipe[i].bottom_pipe->plane_res.scl_data.viewport.width); + data->pitch_in_pixels[num_displays * 2 + j] = bw_int_to_fixed( + pipe[i].bottom_pipe->plane_state->plane_size.surface_pitch); + data->h_taps[num_displays * 2 + j] = bw_int_to_fixed(pipe[i].bottom_pipe->plane_res.scl_data.taps.h_taps); + data->v_taps[num_displays * 2 + j] = bw_int_to_fixed(pipe[i].bottom_pipe->plane_res.scl_data.taps.v_taps); + data->h_scale_ratio[num_displays * 2 + j] = fixed31_32_to_bw_fixed( + pipe[i].bottom_pipe->plane_res.scl_data.ratios.horz.value); + data->v_scale_ratio[num_displays * 2 + j] = fixed31_32_to_bw_fixed( + pipe[i].bottom_pipe->plane_res.scl_data.ratios.vert.value); + switch (pipe[i].bottom_pipe->plane_state->rotation) { + case ROTATION_ANGLE_0: + data->rotation_angle[num_displays * 2 + j] = bw_int_to_fixed(0); + break; + case ROTATION_ANGLE_90: + data->rotation_angle[num_displays * 2 + j] = bw_int_to_fixed(90); + break; + case ROTATION_ANGLE_180: + data->rotation_angle[num_displays * 2 + j] = bw_int_to_fixed(180); + break; + case ROTATION_ANGLE_270: + data->rotation_angle[num_displays * 2 + j] = bw_int_to_fixed(270); + break; + default: + break; + } + data->stereo_mode[num_displays * 2 + j] = bw_def_mono; + } + + num_displays++; + } + + /* Pipes without underlay after */ + for (i = 0; i < pipe_count; i++) { + unsigned int pixel_clock_100hz; + if (!pipe[i].stream || pipe[i].bottom_pipe) + continue; + + + data->fbc_en[num_displays + 4] = false; + data->lpt_en[num_displays + 4] = false; + data->h_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.h_total); + data->v_total[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.v_total); + pixel_clock_100hz = pipe[i].stream->timing.pix_clk_100hz; + if (pipe[i].stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) + pixel_clock_100hz *= 2; + data->pixel_rate[num_displays + 4] = bw_frc_to_fixed(pixel_clock_100hz, 10000); + if (pipe[i].plane_state) { + data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.viewport.width); + data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4]; + data->src_height[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.viewport.height); + data->h_taps[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.taps.h_taps); + data->v_taps[num_displays + 4] = bw_int_to_fixed(pipe[i].plane_res.scl_data.taps.v_taps); + data->h_scale_ratio[num_displays + 4] = fixed31_32_to_bw_fixed(pipe[i].plane_res.scl_data.ratios.horz.value); + data->v_scale_ratio[num_displays + 4] = fixed31_32_to_bw_fixed(pipe[i].plane_res.scl_data.ratios.vert.value); + switch (pipe[i].plane_state->rotation) { + case ROTATION_ANGLE_0: + data->rotation_angle[num_displays + 4] = bw_int_to_fixed(0); + break; + case ROTATION_ANGLE_90: + data->rotation_angle[num_displays + 4] = bw_int_to_fixed(90); + break; + case ROTATION_ANGLE_180: + data->rotation_angle[num_displays + 4] = bw_int_to_fixed(180); + break; + case ROTATION_ANGLE_270: + data->rotation_angle[num_displays + 4] = bw_int_to_fixed(270); + break; + default: + break; + } + switch (pipe[i].plane_state->format) { + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: + case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: + case SURFACE_PIXEL_FORMAT_GRPH_RGB565: + data->bytes_per_pixel[num_displays + 4] = 2; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: + case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS: + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: + data->bytes_per_pixel[num_displays + 4] = 4; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: + data->bytes_per_pixel[num_displays + 4] = 8; + break; + default: + data->bytes_per_pixel[num_displays + 4] = 4; + break; + } + } else if (pipe[i].stream->dst.width != 0 && + pipe[i].stream->dst.height != 0 && + pipe[i].stream->src.width != 0 && + pipe[i].stream->src.height != 0) { + data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->src.width); + data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4]; + data->src_height[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->src.height); + data->h_taps[num_displays + 4] = pipe[i].stream->src.width == pipe[i].stream->dst.width ? bw_int_to_fixed(1) : bw_int_to_fixed(2); + data->v_taps[num_displays + 4] = pipe[i].stream->src.height == pipe[i].stream->dst.height ? bw_int_to_fixed(1) : bw_int_to_fixed(2); + data->h_scale_ratio[num_displays + 4] = bw_frc_to_fixed(pipe[i].stream->src.width, pipe[i].stream->dst.width); + data->v_scale_ratio[num_displays + 4] = bw_frc_to_fixed(pipe[i].stream->src.height, pipe[i].stream->dst.height); + data->rotation_angle[num_displays + 4] = bw_int_to_fixed(0); + data->bytes_per_pixel[num_displays + 4] = 4; + } else { + data->src_width[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.h_addressable); + data->pitch_in_pixels[num_displays + 4] = data->src_width[num_displays + 4]; + data->src_height[num_displays + 4] = bw_int_to_fixed(pipe[i].stream->timing.v_addressable); + data->h_taps[num_displays + 4] = bw_int_to_fixed(1); + data->v_taps[num_displays + 4] = bw_int_to_fixed(1); + data->h_scale_ratio[num_displays + 4] = bw_int_to_fixed(1); + data->v_scale_ratio[num_displays + 4] = bw_int_to_fixed(1); + data->rotation_angle[num_displays + 4] = bw_int_to_fixed(0); + data->bytes_per_pixel[num_displays + 4] = 4; + } + + data->interlace_mode[num_displays + 4] = false; + data->stereo_mode[num_displays + 4] = bw_def_mono; + num_displays++; + } + + data->number_of_displays = num_displays; +} + +static bool all_displays_in_sync(const struct pipe_ctx pipe[], + int pipe_count) +{ + const struct pipe_ctx *active_pipes[MAX_PIPES]; + int i, num_active_pipes = 0; + + for (i = 0; i < pipe_count; i++) { + if (!pipe[i].stream || pipe[i].top_pipe) + continue; + + active_pipes[num_active_pipes++] = &pipe[i]; + } + + if (!num_active_pipes) + return false; + + for (i = 1; i < num_active_pipes; ++i) { + if (!resource_are_streams_timing_synchronizable( + active_pipes[0]->stream, active_pipes[i]->stream)) { + return false; + } + } + + return true; +} + +/* + * Return: + * true - Display(s) configuration supported. + * In this case 'calcs_output' contains data for HW programming + * false - Display(s) configuration not supported (not enough bandwidth). + */ +bool bw_calcs(struct dc_context *ctx, + const struct bw_calcs_dceip *dceip, + const struct bw_calcs_vbios *vbios, + const struct pipe_ctx pipe[], + int pipe_count, + struct dce_bw_output *calcs_output) +{ + struct bw_calcs_data *data = kzalloc(sizeof(struct bw_calcs_data), + GFP_KERNEL); + if (!data) + return false; + + populate_initial_data(pipe, pipe_count, data); + + if (ctx->dc->config.multi_mon_pp_mclk_switch) + calcs_output->all_displays_in_sync = all_displays_in_sync(pipe, pipe_count); + else + calcs_output->all_displays_in_sync = false; + + if (data->number_of_displays != 0) { + uint8_t yclk_lvl; + struct bw_fixed high_sclk = vbios->high_sclk; + struct bw_fixed mid1_sclk = vbios->mid1_sclk; + struct bw_fixed mid2_sclk = vbios->mid2_sclk; + struct bw_fixed mid3_sclk = vbios->mid3_sclk; + struct bw_fixed mid4_sclk = vbios->mid4_sclk; + struct bw_fixed mid5_sclk = vbios->mid5_sclk; + struct bw_fixed mid6_sclk = vbios->mid6_sclk; + struct bw_fixed low_sclk = vbios->low_sclk; + struct bw_fixed high_yclk = vbios->high_yclk; + struct bw_fixed mid_yclk = vbios->mid_yclk; + struct bw_fixed low_yclk = vbios->low_yclk; + + if (ctx->dc->debug.bandwidth_calcs_trace) { + print_bw_calcs_dceip(ctx, dceip); + print_bw_calcs_vbios(ctx, vbios); + print_bw_calcs_data(ctx, data); + } + calculate_bandwidth(dceip, vbios, data); + + yclk_lvl = data->y_clk_level; + + calcs_output->nbp_state_change_enable = + data->nbp_state_change_enable; + calcs_output->cpuc_state_change_enable = + data->cpuc_state_change_enable; + calcs_output->cpup_state_change_enable = + data->cpup_state_change_enable; + calcs_output->stutter_mode_enable = + data->stutter_mode_enable; + calcs_output->dispclk_khz = + bw_fixed_to_int(bw_mul(data->dispclk, + bw_int_to_fixed(1000))); + calcs_output->blackout_recovery_time_us = + bw_fixed_to_int(data->blackout_recovery_time); + calcs_output->sclk_khz = + bw_fixed_to_int(bw_mul(data->required_sclk, + bw_int_to_fixed(1000))); + calcs_output->sclk_deep_sleep_khz = + bw_fixed_to_int(bw_mul(data->sclk_deep_sleep, + bw_int_to_fixed(1000))); + if (yclk_lvl == 0) + calcs_output->yclk_khz = bw_fixed_to_int( + bw_mul(low_yclk, bw_int_to_fixed(1000))); + else if (yclk_lvl == 1) + calcs_output->yclk_khz = bw_fixed_to_int( + bw_mul(mid_yclk, bw_int_to_fixed(1000))); + else + calcs_output->yclk_khz = bw_fixed_to_int( + bw_mul(high_yclk, bw_int_to_fixed(1000))); + + /* units: nanosecond, 16bit storage. */ + + calcs_output->nbp_state_change_wm_ns[0].a_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[4], bw_int_to_fixed(1000))); + calcs_output->nbp_state_change_wm_ns[1].a_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[5], bw_int_to_fixed(1000))); + calcs_output->nbp_state_change_wm_ns[2].a_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[6], bw_int_to_fixed(1000))); + + if (ctx->dc->caps.max_slave_planes) { + calcs_output->nbp_state_change_wm_ns[3].a_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[0], bw_int_to_fixed(1000))); + calcs_output->nbp_state_change_wm_ns[4].a_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[1], bw_int_to_fixed(1000))); + } else { + calcs_output->nbp_state_change_wm_ns[3].a_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[7], bw_int_to_fixed(1000))); + calcs_output->nbp_state_change_wm_ns[4].a_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[8], bw_int_to_fixed(1000))); + } + calcs_output->nbp_state_change_wm_ns[5].a_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[9], bw_int_to_fixed(1000))); + + + + calcs_output->stutter_exit_wm_ns[0].a_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[4], bw_int_to_fixed(1000))); + calcs_output->stutter_exit_wm_ns[1].a_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[5], bw_int_to_fixed(1000))); + calcs_output->stutter_exit_wm_ns[2].a_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[6], bw_int_to_fixed(1000))); + if (ctx->dc->caps.max_slave_planes) { + calcs_output->stutter_exit_wm_ns[3].a_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[0], bw_int_to_fixed(1000))); + calcs_output->stutter_exit_wm_ns[4].a_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[1], bw_int_to_fixed(1000))); + } else { + calcs_output->stutter_exit_wm_ns[3].a_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[7], bw_int_to_fixed(1000))); + calcs_output->stutter_exit_wm_ns[4].a_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[8], bw_int_to_fixed(1000))); + } + calcs_output->stutter_exit_wm_ns[5].a_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[9], bw_int_to_fixed(1000))); + + calcs_output->stutter_entry_wm_ns[0].a_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[4], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[1].a_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[5], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[2].a_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[6], bw_int_to_fixed(1000))); + if (ctx->dc->caps.max_slave_planes) { + calcs_output->stutter_entry_wm_ns[3].a_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[0], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[4].a_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[1], bw_int_to_fixed(1000))); + } else { + calcs_output->stutter_entry_wm_ns[3].a_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[7], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[4].a_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[8], bw_int_to_fixed(1000))); + } + calcs_output->stutter_entry_wm_ns[5].a_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[9], bw_int_to_fixed(1000))); + + calcs_output->urgent_wm_ns[0].a_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[4], bw_int_to_fixed(1000))); + calcs_output->urgent_wm_ns[1].a_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[5], bw_int_to_fixed(1000))); + calcs_output->urgent_wm_ns[2].a_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[6], bw_int_to_fixed(1000))); + if (ctx->dc->caps.max_slave_planes) { + calcs_output->urgent_wm_ns[3].a_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[0], bw_int_to_fixed(1000))); + calcs_output->urgent_wm_ns[4].a_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[1], bw_int_to_fixed(1000))); + } else { + calcs_output->urgent_wm_ns[3].a_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[7], bw_int_to_fixed(1000))); + calcs_output->urgent_wm_ns[4].a_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[8], bw_int_to_fixed(1000))); + } + calcs_output->urgent_wm_ns[5].a_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[9], bw_int_to_fixed(1000))); + + if (dceip->version != BW_CALCS_VERSION_CARRIZO) { + ((struct bw_calcs_vbios *)vbios)->low_sclk = mid3_sclk; + ((struct bw_calcs_vbios *)vbios)->mid1_sclk = mid3_sclk; + ((struct bw_calcs_vbios *)vbios)->mid2_sclk = mid3_sclk; + calculate_bandwidth(dceip, vbios, data); + + calcs_output->nbp_state_change_wm_ns[0].b_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[4],bw_int_to_fixed(1000))); + calcs_output->nbp_state_change_wm_ns[1].b_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[5], bw_int_to_fixed(1000))); + calcs_output->nbp_state_change_wm_ns[2].b_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[6], bw_int_to_fixed(1000))); + + if (ctx->dc->caps.max_slave_planes) { + calcs_output->nbp_state_change_wm_ns[3].b_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[0], bw_int_to_fixed(1000))); + calcs_output->nbp_state_change_wm_ns[4].b_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[1], bw_int_to_fixed(1000))); + } else { + calcs_output->nbp_state_change_wm_ns[3].b_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[7], bw_int_to_fixed(1000))); + calcs_output->nbp_state_change_wm_ns[4].b_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[8], bw_int_to_fixed(1000))); + } + calcs_output->nbp_state_change_wm_ns[5].b_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[9], bw_int_to_fixed(1000))); + + + + calcs_output->stutter_exit_wm_ns[0].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[4], bw_int_to_fixed(1000))); + calcs_output->stutter_exit_wm_ns[1].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[5], bw_int_to_fixed(1000))); + calcs_output->stutter_exit_wm_ns[2].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[6], bw_int_to_fixed(1000))); + if (ctx->dc->caps.max_slave_planes) { + calcs_output->stutter_exit_wm_ns[3].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[0], bw_int_to_fixed(1000))); + calcs_output->stutter_exit_wm_ns[4].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[1], bw_int_to_fixed(1000))); + } else { + calcs_output->stutter_exit_wm_ns[3].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[7], bw_int_to_fixed(1000))); + calcs_output->stutter_exit_wm_ns[4].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[8], bw_int_to_fixed(1000))); + } + calcs_output->stutter_exit_wm_ns[5].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[9], bw_int_to_fixed(1000))); + + calcs_output->stutter_entry_wm_ns[0].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[4], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[1].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[5], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[2].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[6], bw_int_to_fixed(1000))); + if (ctx->dc->caps.max_slave_planes) { + calcs_output->stutter_entry_wm_ns[3].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[0], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[4].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[1], bw_int_to_fixed(1000))); + } else { + calcs_output->stutter_entry_wm_ns[3].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[7], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[4].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[8], bw_int_to_fixed(1000))); + } + calcs_output->stutter_entry_wm_ns[5].b_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[9], bw_int_to_fixed(1000))); + + calcs_output->urgent_wm_ns[0].b_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[4], bw_int_to_fixed(1000))); + calcs_output->urgent_wm_ns[1].b_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[5], bw_int_to_fixed(1000))); + calcs_output->urgent_wm_ns[2].b_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[6], bw_int_to_fixed(1000))); + if (ctx->dc->caps.max_slave_planes) { + calcs_output->urgent_wm_ns[3].b_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[0], bw_int_to_fixed(1000))); + calcs_output->urgent_wm_ns[4].b_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[1], bw_int_to_fixed(1000))); + } else { + calcs_output->urgent_wm_ns[3].b_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[7], bw_int_to_fixed(1000))); + calcs_output->urgent_wm_ns[4].b_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[8], bw_int_to_fixed(1000))); + } + calcs_output->urgent_wm_ns[5].b_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[9], bw_int_to_fixed(1000))); + + ((struct bw_calcs_vbios *)vbios)->low_sclk = low_sclk; + ((struct bw_calcs_vbios *)vbios)->mid1_sclk = mid1_sclk; + ((struct bw_calcs_vbios *)vbios)->mid2_sclk = mid2_sclk; + ((struct bw_calcs_vbios *)vbios)->low_yclk = mid_yclk; + calculate_bandwidth(dceip, vbios, data); + + calcs_output->nbp_state_change_wm_ns[0].c_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[4], bw_int_to_fixed(1000))); + calcs_output->nbp_state_change_wm_ns[1].c_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[5], bw_int_to_fixed(1000))); + calcs_output->nbp_state_change_wm_ns[2].c_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[6], bw_int_to_fixed(1000))); + if (ctx->dc->caps.max_slave_planes) { + calcs_output->nbp_state_change_wm_ns[3].c_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[0], bw_int_to_fixed(1000))); + calcs_output->nbp_state_change_wm_ns[4].c_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[1], bw_int_to_fixed(1000))); + } else { + calcs_output->nbp_state_change_wm_ns[3].c_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[7], bw_int_to_fixed(1000))); + calcs_output->nbp_state_change_wm_ns[4].c_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[8], bw_int_to_fixed(1000))); + } + calcs_output->nbp_state_change_wm_ns[5].c_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[9], bw_int_to_fixed(1000))); + + + calcs_output->stutter_exit_wm_ns[0].c_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[4], bw_int_to_fixed(1000))); + calcs_output->stutter_exit_wm_ns[1].c_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[5], bw_int_to_fixed(1000))); + calcs_output->stutter_exit_wm_ns[2].c_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[6], bw_int_to_fixed(1000))); + if (ctx->dc->caps.max_slave_planes) { + calcs_output->stutter_exit_wm_ns[3].c_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[0], bw_int_to_fixed(1000))); + calcs_output->stutter_exit_wm_ns[4].c_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[1], bw_int_to_fixed(1000))); + } else { + calcs_output->stutter_exit_wm_ns[3].c_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[7], bw_int_to_fixed(1000))); + calcs_output->stutter_exit_wm_ns[4].c_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[8], bw_int_to_fixed(1000))); + } + calcs_output->stutter_exit_wm_ns[5].c_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[9], bw_int_to_fixed(1000))); + + calcs_output->stutter_entry_wm_ns[0].c_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[4], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[1].c_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[5], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[2].c_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[6], bw_int_to_fixed(1000))); + if (ctx->dc->caps.max_slave_planes) { + calcs_output->stutter_entry_wm_ns[3].c_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[0], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[4].c_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[1], bw_int_to_fixed(1000))); + } else { + calcs_output->stutter_entry_wm_ns[3].c_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[7], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[4].c_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[8], bw_int_to_fixed(1000))); + } + calcs_output->stutter_entry_wm_ns[5].c_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[9], bw_int_to_fixed(1000))); + + calcs_output->urgent_wm_ns[0].c_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[4], bw_int_to_fixed(1000))); + calcs_output->urgent_wm_ns[1].c_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[5], bw_int_to_fixed(1000))); + calcs_output->urgent_wm_ns[2].c_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[6], bw_int_to_fixed(1000))); + if (ctx->dc->caps.max_slave_planes) { + calcs_output->urgent_wm_ns[3].c_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[0], bw_int_to_fixed(1000))); + calcs_output->urgent_wm_ns[4].c_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[1], bw_int_to_fixed(1000))); + } else { + calcs_output->urgent_wm_ns[3].c_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[7], bw_int_to_fixed(1000))); + calcs_output->urgent_wm_ns[4].c_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[8], bw_int_to_fixed(1000))); + } + calcs_output->urgent_wm_ns[5].c_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[9], bw_int_to_fixed(1000))); + } + + if (dceip->version == BW_CALCS_VERSION_CARRIZO) { + ((struct bw_calcs_vbios *)vbios)->low_yclk = high_yclk; + ((struct bw_calcs_vbios *)vbios)->mid_yclk = high_yclk; + ((struct bw_calcs_vbios *)vbios)->low_sclk = high_sclk; + ((struct bw_calcs_vbios *)vbios)->mid1_sclk = high_sclk; + ((struct bw_calcs_vbios *)vbios)->mid2_sclk = high_sclk; + ((struct bw_calcs_vbios *)vbios)->mid3_sclk = high_sclk; + ((struct bw_calcs_vbios *)vbios)->mid4_sclk = high_sclk; + ((struct bw_calcs_vbios *)vbios)->mid5_sclk = high_sclk; + ((struct bw_calcs_vbios *)vbios)->mid6_sclk = high_sclk; + } else { + ((struct bw_calcs_vbios *)vbios)->low_yclk = mid_yclk; + ((struct bw_calcs_vbios *)vbios)->low_sclk = mid3_sclk; + ((struct bw_calcs_vbios *)vbios)->mid1_sclk = mid3_sclk; + ((struct bw_calcs_vbios *)vbios)->mid2_sclk = mid3_sclk; + } + + calculate_bandwidth(dceip, vbios, data); + + calcs_output->nbp_state_change_wm_ns[0].d_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[4], bw_int_to_fixed(1000))); + calcs_output->nbp_state_change_wm_ns[1].d_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[5], bw_int_to_fixed(1000))); + calcs_output->nbp_state_change_wm_ns[2].d_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[6], bw_int_to_fixed(1000))); + if (ctx->dc->caps.max_slave_planes) { + calcs_output->nbp_state_change_wm_ns[3].d_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[0], bw_int_to_fixed(1000))); + calcs_output->nbp_state_change_wm_ns[4].d_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[1], bw_int_to_fixed(1000))); + } else { + calcs_output->nbp_state_change_wm_ns[3].d_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[7], bw_int_to_fixed(1000))); + calcs_output->nbp_state_change_wm_ns[4].d_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[8], bw_int_to_fixed(1000))); + } + calcs_output->nbp_state_change_wm_ns[5].d_mark = + bw_fixed_to_int(bw_mul(data-> + nbp_state_change_watermark[9], bw_int_to_fixed(1000))); + + calcs_output->stutter_exit_wm_ns[0].d_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[4], bw_int_to_fixed(1000))); + calcs_output->stutter_exit_wm_ns[1].d_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[5], bw_int_to_fixed(1000))); + calcs_output->stutter_exit_wm_ns[2].d_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[6], bw_int_to_fixed(1000))); + if (ctx->dc->caps.max_slave_planes) { + calcs_output->stutter_exit_wm_ns[3].d_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[0], bw_int_to_fixed(1000))); + calcs_output->stutter_exit_wm_ns[4].d_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[1], bw_int_to_fixed(1000))); + } else { + calcs_output->stutter_exit_wm_ns[3].d_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[7], bw_int_to_fixed(1000))); + calcs_output->stutter_exit_wm_ns[4].d_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[8], bw_int_to_fixed(1000))); + } + calcs_output->stutter_exit_wm_ns[5].d_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_exit_watermark[9], bw_int_to_fixed(1000))); + + calcs_output->stutter_entry_wm_ns[0].d_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[4], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[1].d_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[5], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[2].d_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[6], bw_int_to_fixed(1000))); + if (ctx->dc->caps.max_slave_planes) { + calcs_output->stutter_entry_wm_ns[3].d_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[0], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[4].d_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[1], bw_int_to_fixed(1000))); + } else { + calcs_output->stutter_entry_wm_ns[3].d_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[7], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[4].d_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[8], bw_int_to_fixed(1000))); + } + calcs_output->stutter_entry_wm_ns[5].d_mark = + bw_fixed_to_int(bw_mul(data-> + stutter_entry_watermark[9], bw_int_to_fixed(1000))); + + calcs_output->urgent_wm_ns[0].d_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[4], bw_int_to_fixed(1000))); + calcs_output->urgent_wm_ns[1].d_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[5], bw_int_to_fixed(1000))); + calcs_output->urgent_wm_ns[2].d_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[6], bw_int_to_fixed(1000))); + if (ctx->dc->caps.max_slave_planes) { + calcs_output->urgent_wm_ns[3].d_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[0], bw_int_to_fixed(1000))); + calcs_output->urgent_wm_ns[4].d_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[1], bw_int_to_fixed(1000))); + } else { + calcs_output->urgent_wm_ns[3].d_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[7], bw_int_to_fixed(1000))); + calcs_output->urgent_wm_ns[4].d_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[8], bw_int_to_fixed(1000))); + } + calcs_output->urgent_wm_ns[5].d_mark = + bw_fixed_to_int(bw_mul(data-> + urgent_watermark[9], bw_int_to_fixed(1000))); + + ((struct bw_calcs_vbios *)vbios)->low_yclk = low_yclk; + ((struct bw_calcs_vbios *)vbios)->mid_yclk = mid_yclk; + ((struct bw_calcs_vbios *)vbios)->low_sclk = low_sclk; + ((struct bw_calcs_vbios *)vbios)->mid1_sclk = mid1_sclk; + ((struct bw_calcs_vbios *)vbios)->mid2_sclk = mid2_sclk; + ((struct bw_calcs_vbios *)vbios)->mid3_sclk = mid3_sclk; + ((struct bw_calcs_vbios *)vbios)->mid4_sclk = mid4_sclk; + ((struct bw_calcs_vbios *)vbios)->mid5_sclk = mid5_sclk; + ((struct bw_calcs_vbios *)vbios)->mid6_sclk = mid6_sclk; + ((struct bw_calcs_vbios *)vbios)->high_sclk = high_sclk; + } else { + calcs_output->nbp_state_change_enable = true; + calcs_output->cpuc_state_change_enable = true; + calcs_output->cpup_state_change_enable = true; + calcs_output->stutter_mode_enable = true; + calcs_output->dispclk_khz = 0; + calcs_output->sclk_khz = 0; + } + + kfree(data); + + return is_display_configuration_supported(vbios, calcs_output); +} diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_auto.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_auto.c new file mode 100644 index 000000000000..41284e263325 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_auto.c @@ -0,0 +1,1933 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services.h" +#include "dcn_calc_auto.h" +#include "dcn_calc_math.h" + +/* + * NOTE: + * This file is gcc-parseable HW gospel, coming straight from HW engineers. + * + * It doesn't adhere to Linux kernel style and sometimes will do things in odd + * ways. Unless there is something clearly wrong with it the code should + * remain as-is as it provides us with a guarantee from HW that it is correct. + */ + +/*REVISION#250*/ +void scaler_settings_calculation(struct dcn_bw_internal_vars *v) +{ + int k; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->allow_different_hratio_vratio == dcn_bw_yes) { + if (v->source_scan[k] == dcn_bw_hor) { + v->h_ratio[k] = v->viewport_width[k] / v->scaler_rec_out_width[k]; + v->v_ratio[k] = v->viewport_height[k] / v->scaler_recout_height[k]; + } + else { + v->h_ratio[k] = v->viewport_height[k] / v->scaler_rec_out_width[k]; + v->v_ratio[k] = v->viewport_width[k] / v->scaler_recout_height[k]; + } + } + else { + if (v->source_scan[k] == dcn_bw_hor) { + v->h_ratio[k] =dcn_bw_max2(v->viewport_width[k] / v->scaler_rec_out_width[k], v->viewport_height[k] / v->scaler_recout_height[k]); + } + else { + v->h_ratio[k] =dcn_bw_max2(v->viewport_height[k] / v->scaler_rec_out_width[k], v->viewport_width[k] / v->scaler_recout_height[k]); + } + v->v_ratio[k] = v->h_ratio[k]; + } + if (v->interlace_output[k] == 1.0) { + v->v_ratio[k] = 2.0 * v->v_ratio[k]; + } + if (v->underscan_output[k] == 1.0) { + v->h_ratio[k] = v->h_ratio[k] * v->under_scan_factor; + v->v_ratio[k] = v->v_ratio[k] * v->under_scan_factor; + } + } + /*scaler taps calculation*/ + + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->h_ratio[k] > 1.0) { + v->acceptable_quality_hta_ps =dcn_bw_min2(v->max_hscl_taps, 2.0 *dcn_bw_ceil2(v->h_ratio[k], 1.0)); + } + else if (v->h_ratio[k] < 1.0) { + v->acceptable_quality_hta_ps = 4.0; + } + else { + v->acceptable_quality_hta_ps = 1.0; + } + if (v->ta_pscalculation == dcn_bw_override) { + v->htaps[k] = v->override_hta_ps[k]; + } + else { + v->htaps[k] = v->acceptable_quality_hta_ps; + } + if (v->v_ratio[k] > 1.0) { + v->acceptable_quality_vta_ps =dcn_bw_min2(v->max_vscl_taps, 2.0 *dcn_bw_ceil2(v->v_ratio[k], 1.0)); + } + else if (v->v_ratio[k] < 1.0) { + v->acceptable_quality_vta_ps = 4.0; + } + else { + v->acceptable_quality_vta_ps = 1.0; + } + if (v->ta_pscalculation == dcn_bw_override) { + v->vtaps[k] = v->override_vta_ps[k]; + } + else { + v->vtaps[k] = v->acceptable_quality_vta_ps; + } + if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32 || v->source_pixel_format[k] == dcn_bw_rgb_sub_16) { + v->vta_pschroma[k] = 0.0; + v->hta_pschroma[k] = 0.0; + } + else { + if (v->ta_pscalculation == dcn_bw_override) { + v->vta_pschroma[k] = v->override_vta_pschroma[k]; + v->hta_pschroma[k] = v->override_hta_pschroma[k]; + } + else { + v->vta_pschroma[k] = v->acceptable_quality_vta_ps; + v->hta_pschroma[k] = v->acceptable_quality_hta_ps; + } + } + } +} + +void mode_support_and_system_configuration(struct dcn_bw_internal_vars *v) +{ + int i; + int j; + int k; + /*mode support, voltage state and soc configuration*/ + + /*scale ratio support check*/ + + v->scale_ratio_support = dcn_bw_yes; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->h_ratio[k] > v->max_hscl_ratio || v->v_ratio[k] > v->max_vscl_ratio || v->h_ratio[k] > v->htaps[k] || v->v_ratio[k] > v->vtaps[k] || (v->source_pixel_format[k] != dcn_bw_rgb_sub_64 && v->source_pixel_format[k] != dcn_bw_rgb_sub_32 && v->source_pixel_format[k] != dcn_bw_rgb_sub_16 && (v->h_ratio[k] / 2.0 > v->hta_pschroma[k] || v->v_ratio[k] / 2.0 > v->vta_pschroma[k]))) { + v->scale_ratio_support = dcn_bw_no; + } + } + /*source format, pixel format and scan support check*/ + + v->source_format_pixel_and_scan_support = dcn_bw_yes; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if ((v->source_surface_mode[k] == dcn_bw_sw_linear && v->source_scan[k] != dcn_bw_hor) || ((v->source_surface_mode[k] == dcn_bw_sw_4_kb_d || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_x || v->source_surface_mode[k] == dcn_bw_sw_var_d || v->source_surface_mode[k] == dcn_bw_sw_var_d_x) && v->source_pixel_format[k] != dcn_bw_rgb_sub_64)) { + v->source_format_pixel_and_scan_support = dcn_bw_no; + } + } + /*bandwidth support check*/ + + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->source_scan[k] == dcn_bw_hor) { + v->swath_width_ysingle_dpp[k] = v->viewport_width[k]; + } + else { + v->swath_width_ysingle_dpp[k] = v->viewport_height[k]; + } + if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) { + v->byte_per_pixel_in_dety[k] = 8.0; + v->byte_per_pixel_in_detc[k] = 0.0; + } + else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_32) { + v->byte_per_pixel_in_dety[k] = 4.0; + v->byte_per_pixel_in_detc[k] = 0.0; + } + else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_16) { + v->byte_per_pixel_in_dety[k] = 2.0; + v->byte_per_pixel_in_detc[k] = 0.0; + } + else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) { + v->byte_per_pixel_in_dety[k] = 1.0; + v->byte_per_pixel_in_detc[k] = 2.0; + } + else { + v->byte_per_pixel_in_dety[k] = 4.0f / 3.0f; + v->byte_per_pixel_in_detc[k] = 8.0f / 3.0f; + } + } + v->total_read_bandwidth_consumed_gbyte_per_second = 0.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + v->read_bandwidth[k] = v->swath_width_ysingle_dpp[k] * (dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) * v->v_ratio[k] +dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 2.0 * v->v_ratio[k] / 2) / (v->htotal[k] / v->pixel_clock[k]); + if (v->dcc_enable[k] == dcn_bw_yes) { + v->read_bandwidth[k] = v->read_bandwidth[k] * (1 + 1 / 256); + } + if (v->pte_enable == dcn_bw_yes && v->source_scan[k] != dcn_bw_hor && (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d_x)) { + v->read_bandwidth[k] = v->read_bandwidth[k] * (1 + 1 / 64); + } + else if (v->pte_enable == dcn_bw_yes && v->source_scan[k] == dcn_bw_hor && (v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32) && (v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_x)) { + v->read_bandwidth[k] = v->read_bandwidth[k] * (1 + 1 / 256); + } + else if (v->pte_enable == dcn_bw_yes) { + v->read_bandwidth[k] = v->read_bandwidth[k] * (1 + 1 / 512); + } + v->total_read_bandwidth_consumed_gbyte_per_second = v->total_read_bandwidth_consumed_gbyte_per_second + v->read_bandwidth[k] / 1000.0; + } + v->total_write_bandwidth_consumed_gbyte_per_second = 0.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->output[k] == dcn_bw_writeback && v->output_format[k] == dcn_bw_444) { + v->write_bandwidth[k] = v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) * 4.0; + } + else if (v->output[k] == dcn_bw_writeback) { + v->write_bandwidth[k] = v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) * 1.5; + } + else { + v->write_bandwidth[k] = 0.0; + } + v->total_write_bandwidth_consumed_gbyte_per_second = v->total_write_bandwidth_consumed_gbyte_per_second + v->write_bandwidth[k] / 1000.0; + } + v->total_bandwidth_consumed_gbyte_per_second = v->total_read_bandwidth_consumed_gbyte_per_second + v->total_write_bandwidth_consumed_gbyte_per_second; + v->dcc_enabled_in_any_plane = dcn_bw_no; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->dcc_enable[k] == dcn_bw_yes) { + v->dcc_enabled_in_any_plane = dcn_bw_yes; + } + } + for (i = 0; i <= number_of_states_plus_one; i++) { + v->return_bw_todcn_per_state =dcn_bw_min2(v->return_bus_width * v->dcfclk_per_state[i], v->fabric_and_dram_bandwidth_per_state[i] * 1000.0 * v->percent_of_ideal_drambw_received_after_urg_latency / 100.0); + v->return_bw_per_state[i] = v->return_bw_todcn_per_state; + if (v->dcc_enabled_in_any_plane == dcn_bw_yes && v->return_bw_todcn_per_state > v->dcfclk_per_state[i] * v->return_bus_width / 4.0) { + v->return_bw_per_state[i] =dcn_bw_min2(v->return_bw_per_state[i], v->return_bw_todcn_per_state * 4.0 * (1.0 - v->urgent_latency / ((v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 / (v->return_bw_todcn_per_state - v->dcfclk_per_state[i] * v->return_bus_width / 4.0) + v->urgent_latency))); + } + v->critical_point = 2.0 * v->return_bus_width * v->dcfclk_per_state[i] * v->urgent_latency / (v->return_bw_todcn_per_state * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0); + if (v->dcc_enabled_in_any_plane == dcn_bw_yes && v->critical_point > 1.0 && v->critical_point < 4.0) { + v->return_bw_per_state[i] =dcn_bw_min2(v->return_bw_per_state[i], dcn_bw_pow(4.0 * v->return_bw_todcn_per_state * (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 * v->return_bus_width * v->dcfclk_per_state[i] * v->urgent_latency / (v->return_bw_todcn_per_state * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0), 2)); + } + v->return_bw_todcn_per_state =dcn_bw_min2(v->return_bus_width * v->dcfclk_per_state[i], v->fabric_and_dram_bandwidth_per_state[i] * 1000.0); + if (v->dcc_enabled_in_any_plane == dcn_bw_yes && v->return_bw_todcn_per_state > v->dcfclk_per_state[i] * v->return_bus_width / 4.0) { + v->return_bw_per_state[i] =dcn_bw_min2(v->return_bw_per_state[i], v->return_bw_todcn_per_state * 4.0 * (1.0 - v->urgent_latency / ((v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 / (v->return_bw_todcn_per_state - v->dcfclk_per_state[i] * v->return_bus_width / 4.0) + v->urgent_latency))); + } + v->critical_point = 2.0 * v->return_bus_width * v->dcfclk_per_state[i] * v->urgent_latency / (v->return_bw_todcn_per_state * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0); + if (v->dcc_enabled_in_any_plane == dcn_bw_yes && v->critical_point > 1.0 && v->critical_point < 4.0) { + v->return_bw_per_state[i] =dcn_bw_min2(v->return_bw_per_state[i], dcn_bw_pow(4.0 * v->return_bw_todcn_per_state * (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 * v->return_bus_width * v->dcfclk_per_state[i] * v->urgent_latency / (v->return_bw_todcn_per_state * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0), 2)); + } + } + for (i = 0; i <= number_of_states_plus_one; i++) { + if ((v->total_read_bandwidth_consumed_gbyte_per_second * 1000.0 <= v->return_bw_per_state[i]) && (v->total_bandwidth_consumed_gbyte_per_second * 1000.0 <= v->fabric_and_dram_bandwidth_per_state[i] * 1000.0 * v->percent_of_ideal_drambw_received_after_urg_latency / 100.0)) { + v->bandwidth_support[i] = dcn_bw_yes; + } + else { + v->bandwidth_support[i] = dcn_bw_no; + } + } + /*writeback latency support check*/ + + v->writeback_latency_support = dcn_bw_yes; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->output[k] == dcn_bw_writeback && v->output_format[k] == dcn_bw_444 && v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) * 4.0 > (v->writeback_luma_buffer_size + v->writeback_chroma_buffer_size) * 1024.0 / v->write_back_latency) { + v->writeback_latency_support = dcn_bw_no; + } + else if (v->output[k] == dcn_bw_writeback && v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) >dcn_bw_min2(v->writeback_luma_buffer_size, 2.0 * v->writeback_chroma_buffer_size) * 1024.0 / v->write_back_latency) { + v->writeback_latency_support = dcn_bw_no; + } + } + /*re-ordering buffer support check*/ + + for (i = 0; i <= number_of_states_plus_one; i++) { + v->urgent_round_trip_and_out_of_order_latency_per_state[i] = (v->round_trip_ping_latency_cycles + 32.0) / v->dcfclk_per_state[i] + v->urgent_out_of_order_return_per_channel * v->number_of_channels / v->return_bw_per_state[i]; + if ((v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 / v->return_bw_per_state[i] > v->urgent_round_trip_and_out_of_order_latency_per_state[i]) { + v->rob_support[i] = dcn_bw_yes; + } + else { + v->rob_support[i] = dcn_bw_no; + } + } + /*display io support check*/ + + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->output[k] == dcn_bw_dp && v->dsc_capability == dcn_bw_yes) { + if (v->output_format[k] == dcn_bw_420) { + v->required_output_bw = v->pixel_clock[k] / 2.0; + } + else { + v->required_output_bw = v->pixel_clock[k]; + } + } + else if (v->output_format[k] == dcn_bw_420) { + v->required_output_bw = v->pixel_clock[k] * 3.0 / 2.0; + } + else { + v->required_output_bw = v->pixel_clock[k] * 3.0; + } + if (v->output[k] == dcn_bw_hdmi) { + v->required_phyclk[k] = v->required_output_bw; + switch (v->output_deep_color[k]) { + case dcn_bw_encoder_10bpc: + v->required_phyclk[k] = v->required_phyclk[k] * 5.0 / 4; + break; + case dcn_bw_encoder_12bpc: + v->required_phyclk[k] = v->required_phyclk[k] * 3.0 / 2; + break; + default: + break; + } + v->required_phyclk[k] = v->required_phyclk[k] / 3.0; + } + else if (v->output[k] == dcn_bw_dp) { + v->required_phyclk[k] = v->required_output_bw / 4.0; + } + else { + v->required_phyclk[k] = 0.0; + } + } + for (i = 0; i <= number_of_states_plus_one; i++) { + v->dio_support[i] = dcn_bw_yes; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->required_phyclk[k] > v->phyclk_per_state[i] || (v->output[k] == dcn_bw_hdmi && v->required_phyclk[k] > 600.0)) { + v->dio_support[i] = dcn_bw_no; + } + } + } + /*total available writeback support check*/ + + v->total_number_of_active_writeback = 0.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->output[k] == dcn_bw_writeback) { + v->total_number_of_active_writeback = v->total_number_of_active_writeback + 1.0; + } + } + if (v->total_number_of_active_writeback <= v->max_num_writeback) { + v->total_available_writeback_support = dcn_bw_yes; + } + else { + v->total_available_writeback_support = dcn_bw_no; + } + /*maximum dispclk/dppclk support check*/ + + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->h_ratio[k] > 1.0) { + v->pscl_factor[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput * v->h_ratio[k] /dcn_bw_ceil2(v->htaps[k] / 6.0, 1.0)); + } + else { + v->pscl_factor[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput); + } + if (v->byte_per_pixel_in_detc[k] == 0.0) { + v->pscl_factor_chroma[k] = 0.0; + v->min_dppclk_using_single_dpp[k] = v->pixel_clock[k] *dcn_bw_max3(v->vtaps[k] / 6.0 *dcn_bw_min2(1.0, v->h_ratio[k]), v->h_ratio[k] * v->v_ratio[k] / v->pscl_factor[k], 1.0); + } + else { + if (v->h_ratio[k] / 2.0 > 1.0) { + v->pscl_factor_chroma[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput * v->h_ratio[k] / 2.0 /dcn_bw_ceil2(v->hta_pschroma[k] / 6.0, 1.0)); + } + else { + v->pscl_factor_chroma[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput); + } + v->min_dppclk_using_single_dpp[k] = v->pixel_clock[k] *dcn_bw_max5(v->vtaps[k] / 6.0 *dcn_bw_min2(1.0, v->h_ratio[k]), v->h_ratio[k] * v->v_ratio[k] / v->pscl_factor[k], v->vta_pschroma[k] / 6.0 *dcn_bw_min2(1.0, v->h_ratio[k] / 2.0), v->h_ratio[k] * v->v_ratio[k] / 4.0 / v->pscl_factor_chroma[k], 1.0); + } + } + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if ((v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32 || v->source_pixel_format[k] == dcn_bw_rgb_sub_16)) { + if (v->source_surface_mode[k] == dcn_bw_sw_linear) { + v->read256_block_height_y[k] = 1.0; + } + else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) { + v->read256_block_height_y[k] = 4.0; + } + else { + v->read256_block_height_y[k] = 8.0; + } + v->read256_block_width_y[k] = 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / v->read256_block_height_y[k]; + v->read256_block_height_c[k] = 0.0; + v->read256_block_width_c[k] = 0.0; + } + else { + if (v->source_surface_mode[k] == dcn_bw_sw_linear) { + v->read256_block_height_y[k] = 1.0; + v->read256_block_height_c[k] = 1.0; + } + else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) { + v->read256_block_height_y[k] = 16.0; + v->read256_block_height_c[k] = 8.0; + } + else { + v->read256_block_height_y[k] = 8.0; + v->read256_block_height_c[k] = 8.0; + } + v->read256_block_width_y[k] = 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / v->read256_block_height_y[k]; + v->read256_block_width_c[k] = 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / v->read256_block_height_c[k]; + } + if (v->source_scan[k] == dcn_bw_hor) { + v->max_swath_height_y[k] = v->read256_block_height_y[k]; + v->max_swath_height_c[k] = v->read256_block_height_c[k]; + } + else { + v->max_swath_height_y[k] = v->read256_block_width_y[k]; + v->max_swath_height_c[k] = v->read256_block_width_c[k]; + } + if ((v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32 || v->source_pixel_format[k] == dcn_bw_rgb_sub_16)) { + if (v->source_surface_mode[k] == dcn_bw_sw_linear || (v->source_pixel_format[k] == dcn_bw_rgb_sub_64 && (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_var_s || v->source_surface_mode[k] == dcn_bw_sw_var_s_x) && v->source_scan[k] == dcn_bw_hor)) { + v->min_swath_height_y[k] = v->max_swath_height_y[k]; + } + else { + v->min_swath_height_y[k] = v->max_swath_height_y[k] / 2.0; + } + v->min_swath_height_c[k] = v->max_swath_height_c[k]; + } + else { + if (v->source_surface_mode[k] == dcn_bw_sw_linear) { + v->min_swath_height_y[k] = v->max_swath_height_y[k]; + v->min_swath_height_c[k] = v->max_swath_height_c[k]; + } + else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8 && v->source_scan[k] == dcn_bw_hor) { + v->min_swath_height_y[k] = v->max_swath_height_y[k] / 2.0; + if (v->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes) { + v->min_swath_height_c[k] = v->max_swath_height_c[k]; + } + else { + v->min_swath_height_c[k] = v->max_swath_height_c[k] / 2.0; + } + } + else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10 && v->source_scan[k] == dcn_bw_hor) { + v->min_swath_height_c[k] = v->max_swath_height_c[k] / 2.0; + if (v->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes) { + v->min_swath_height_y[k] = v->max_swath_height_y[k]; + } + else { + v->min_swath_height_y[k] = v->max_swath_height_y[k] / 2.0; + } + } + else { + v->min_swath_height_y[k] = v->max_swath_height_y[k]; + v->min_swath_height_c[k] = v->max_swath_height_c[k]; + } + } + if (v->source_surface_mode[k] == dcn_bw_sw_linear) { + v->maximum_swath_width = 8192.0; + } + else { + v->maximum_swath_width = 5120.0; + } + v->number_of_dpp_required_for_det_size =dcn_bw_ceil2(v->swath_width_ysingle_dpp[k] /dcn_bw_min2(v->maximum_swath_width, v->det_buffer_size_in_kbyte * 1024.0 / 2.0 / (v->byte_per_pixel_in_dety[k] * v->min_swath_height_y[k] + v->byte_per_pixel_in_detc[k] / 2.0 * v->min_swath_height_c[k])), 1.0); + if (v->byte_per_pixel_in_detc[k] == 0.0) { + v->number_of_dpp_required_for_lb_size =dcn_bw_ceil2((v->vtaps[k] +dcn_bw_max2(dcn_bw_ceil2(v->v_ratio[k], 1.0) - 2, 0.0)) * v->swath_width_ysingle_dpp[k] /dcn_bw_max2(v->h_ratio[k], 1.0) * v->lb_bit_per_pixel[k] / v->line_buffer_size, 1.0); + } + else { + v->number_of_dpp_required_for_lb_size =dcn_bw_max2(dcn_bw_ceil2((v->vtaps[k] +dcn_bw_max2(dcn_bw_ceil2(v->v_ratio[k], 1.0) - 2, 0.0)) * v->swath_width_ysingle_dpp[k] /dcn_bw_max2(v->h_ratio[k], 1.0) * v->lb_bit_per_pixel[k] / v->line_buffer_size, 1.0),dcn_bw_ceil2((v->vta_pschroma[k] +dcn_bw_max2(dcn_bw_ceil2(v->v_ratio[k] / 2.0, 1.0) - 2, 0.0)) * v->swath_width_ysingle_dpp[k] / 2.0 /dcn_bw_max2(v->h_ratio[k] / 2.0, 1.0) * v->lb_bit_per_pixel[k] / v->line_buffer_size, 1.0)); + } + v->number_of_dpp_required_for_det_and_lb_size[k] =dcn_bw_max2(v->number_of_dpp_required_for_det_size, v->number_of_dpp_required_for_lb_size); + } + for (i = 0; i <= number_of_states_plus_one; i++) { + for (j = 0; j <= 1; j++) { + v->total_number_of_active_dpp[i][j] = 0.0; + v->required_dispclk[i][j] = 0.0; + v->dispclk_dppclk_support[i][j] = dcn_bw_yes; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + v->min_dispclk_using_single_dpp =dcn_bw_max2(v->pixel_clock[k], v->min_dppclk_using_single_dpp[k] * (j + 1)) * (1.0 + v->downspreading / 100.0); + if (v->odm_capability == dcn_bw_yes) { + v->min_dispclk_using_dual_dpp =dcn_bw_max2(v->pixel_clock[k] / 2.0, v->min_dppclk_using_single_dpp[k] / 2.0 * (j + 1)) * (1.0 + v->downspreading / 100.0); + } + else { + v->min_dispclk_using_dual_dpp =dcn_bw_max2(v->pixel_clock[k], v->min_dppclk_using_single_dpp[k] / 2.0 * (j + 1)) * (1.0 + v->downspreading / 100.0); + } + if (i < number_of_states) { + v->min_dispclk_using_single_dpp = v->min_dispclk_using_single_dpp * (1.0 + v->dispclk_ramping_margin / 100.0); + v->min_dispclk_using_dual_dpp = v->min_dispclk_using_dual_dpp * (1.0 + v->dispclk_ramping_margin / 100.0); + } + if (v->min_dispclk_using_single_dpp <=dcn_bw_min2(v->max_dispclk[i], (j + 1) * v->max_dppclk[i]) && v->number_of_dpp_required_for_det_and_lb_size[k] <= 1.0) { + v->no_of_dpp[i][j][k] = 1.0; + v->required_dispclk[i][j] =dcn_bw_max2(v->required_dispclk[i][j], v->min_dispclk_using_single_dpp); + } + else if (v->min_dispclk_using_dual_dpp <=dcn_bw_min2(v->max_dispclk[i], (j + 1) * v->max_dppclk[i])) { + v->no_of_dpp[i][j][k] = 2.0; + v->required_dispclk[i][j] =dcn_bw_max2(v->required_dispclk[i][j], v->min_dispclk_using_dual_dpp); + } + else { + v->no_of_dpp[i][j][k] = 2.0; + v->required_dispclk[i][j] =dcn_bw_max2(v->required_dispclk[i][j], v->min_dispclk_using_dual_dpp); + v->dispclk_dppclk_support[i][j] = dcn_bw_no; + } + v->total_number_of_active_dpp[i][j] = v->total_number_of_active_dpp[i][j] + v->no_of_dpp[i][j][k]; + } + if (v->total_number_of_active_dpp[i][j] > v->max_num_dpp) { + v->total_number_of_active_dpp[i][j] = 0.0; + v->required_dispclk[i][j] = 0.0; + v->dispclk_dppclk_support[i][j] = dcn_bw_yes; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + v->min_dispclk_using_single_dpp =dcn_bw_max2(v->pixel_clock[k], v->min_dppclk_using_single_dpp[k] * (j + 1)) * (1.0 + v->downspreading / 100.0); + v->min_dispclk_using_dual_dpp =dcn_bw_max2(v->pixel_clock[k], v->min_dppclk_using_single_dpp[k] / 2.0 * (j + 1)) * (1.0 + v->downspreading / 100.0); + if (i < number_of_states) { + v->min_dispclk_using_single_dpp = v->min_dispclk_using_single_dpp * (1.0 + v->dispclk_ramping_margin / 100.0); + v->min_dispclk_using_dual_dpp = v->min_dispclk_using_dual_dpp * (1.0 + v->dispclk_ramping_margin / 100.0); + } + if (v->number_of_dpp_required_for_det_and_lb_size[k] <= 1.0) { + v->no_of_dpp[i][j][k] = 1.0; + v->required_dispclk[i][j] =dcn_bw_max2(v->required_dispclk[i][j], v->min_dispclk_using_single_dpp); + if (v->min_dispclk_using_single_dpp >dcn_bw_min2(v->max_dispclk[i], (j + 1) * v->max_dppclk[i])) { + v->dispclk_dppclk_support[i][j] = dcn_bw_no; + } + } + else { + v->no_of_dpp[i][j][k] = 2.0; + v->required_dispclk[i][j] =dcn_bw_max2(v->required_dispclk[i][j], v->min_dispclk_using_dual_dpp); + if (v->min_dispclk_using_dual_dpp >dcn_bw_min2(v->max_dispclk[i], (j + 1) * v->max_dppclk[i])) { + v->dispclk_dppclk_support[i][j] = dcn_bw_no; + } + } + v->total_number_of_active_dpp[i][j] = v->total_number_of_active_dpp[i][j] + v->no_of_dpp[i][j][k]; + } + } + } + } + /*viewport size check*/ + + v->viewport_size_support = dcn_bw_yes; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->number_of_dpp_required_for_det_and_lb_size[k] > 2.0) { + v->viewport_size_support = dcn_bw_no; + } + } + /*total available pipes support check*/ + + for (i = 0; i <= number_of_states_plus_one; i++) { + for (j = 0; j <= 1; j++) { + if (v->total_number_of_active_dpp[i][j] <= v->max_num_dpp) { + v->total_available_pipes_support[i][j] = dcn_bw_yes; + } + else { + v->total_available_pipes_support[i][j] = dcn_bw_no; + } + } + } + /*urgent latency support check*/ + + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + for (i = 0; i <= number_of_states_plus_one; i++) { + for (j = 0; j <= 1; j++) { + v->swath_width_yper_state[i][j][k] = v->swath_width_ysingle_dpp[k] / v->no_of_dpp[i][j][k]; + v->swath_width_granularity_y = 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / v->max_swath_height_y[k]; + v->rounded_up_max_swath_size_bytes_y = (dcn_bw_ceil2(v->swath_width_yper_state[i][j][k] - 1.0, v->swath_width_granularity_y) + v->swath_width_granularity_y) * v->byte_per_pixel_in_dety[k] * v->max_swath_height_y[k]; + if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) { + v->rounded_up_max_swath_size_bytes_y =dcn_bw_ceil2(v->rounded_up_max_swath_size_bytes_y, 256.0) + 256; + } + if (v->max_swath_height_c[k] > 0.0) { + v->swath_width_granularity_c = 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / v->max_swath_height_c[k]; + } + v->rounded_up_max_swath_size_bytes_c = (dcn_bw_ceil2(v->swath_width_yper_state[i][j][k] / 2.0 - 1.0, v->swath_width_granularity_c) + v->swath_width_granularity_c) * v->byte_per_pixel_in_detc[k] * v->max_swath_height_c[k]; + if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) { + v->rounded_up_max_swath_size_bytes_c =dcn_bw_ceil2(v->rounded_up_max_swath_size_bytes_c, 256.0) + 256; + } + if (v->rounded_up_max_swath_size_bytes_y + v->rounded_up_max_swath_size_bytes_c <= v->det_buffer_size_in_kbyte * 1024.0 / 2.0) { + v->swath_height_yper_state[i][j][k] = v->max_swath_height_y[k]; + v->swath_height_cper_state[i][j][k] = v->max_swath_height_c[k]; + } + else { + v->swath_height_yper_state[i][j][k] = v->min_swath_height_y[k]; + v->swath_height_cper_state[i][j][k] = v->min_swath_height_c[k]; + } + if (v->byte_per_pixel_in_detc[k] == 0.0) { + v->lines_in_det_luma = v->det_buffer_size_in_kbyte * 1024.0 / v->byte_per_pixel_in_dety[k] / v->swath_width_yper_state[i][j][k]; + v->lines_in_det_chroma = 0.0; + } + else if (v->swath_height_yper_state[i][j][k] <= v->swath_height_cper_state[i][j][k]) { + v->lines_in_det_luma = v->det_buffer_size_in_kbyte * 1024.0 / 2.0 / v->byte_per_pixel_in_dety[k] / v->swath_width_yper_state[i][j][k]; + v->lines_in_det_chroma = v->det_buffer_size_in_kbyte * 1024.0 / 2.0 / v->byte_per_pixel_in_detc[k] / (v->swath_width_yper_state[i][j][k] / 2.0); + } + else { + v->lines_in_det_luma = v->det_buffer_size_in_kbyte * 1024.0 * 2.0 / 3.0 / v->byte_per_pixel_in_dety[k] / v->swath_width_yper_state[i][j][k]; + v->lines_in_det_chroma = v->det_buffer_size_in_kbyte * 1024.0 / 3.0 / v->byte_per_pixel_in_dety[k] / (v->swath_width_yper_state[i][j][k] / 2.0); + } + v->effective_lb_latency_hiding_source_lines_luma =dcn_bw_min2(v->max_line_buffer_lines,dcn_bw_floor2(v->line_buffer_size / v->lb_bit_per_pixel[k] / (v->swath_width_yper_state[i][j][k] /dcn_bw_max2(v->h_ratio[k], 1.0)), 1.0)) - (v->vtaps[k] - 1.0); + v->effective_lb_latency_hiding_source_lines_chroma =dcn_bw_min2(v->max_line_buffer_lines,dcn_bw_floor2(v->line_buffer_size / v->lb_bit_per_pixel[k] / (v->swath_width_yper_state[i][j][k] / 2.0 /dcn_bw_max2(v->h_ratio[k] / 2.0, 1.0)), 1.0)) - (v->vta_pschroma[k] - 1.0); + v->effective_detlb_lines_luma =dcn_bw_floor2(v->lines_in_det_luma +dcn_bw_min2(v->lines_in_det_luma * v->required_dispclk[i][j] * v->byte_per_pixel_in_dety[k] * v->pscl_factor[k] / v->return_bw_per_state[i], v->effective_lb_latency_hiding_source_lines_luma), v->swath_height_yper_state[i][j][k]); + v->effective_detlb_lines_chroma =dcn_bw_floor2(v->lines_in_det_chroma +dcn_bw_min2(v->lines_in_det_chroma * v->required_dispclk[i][j] * v->byte_per_pixel_in_detc[k] * v->pscl_factor_chroma[k] / v->return_bw_per_state[i], v->effective_lb_latency_hiding_source_lines_chroma), v->swath_height_cper_state[i][j][k]); + if (v->byte_per_pixel_in_detc[k] == 0.0) { + v->urgent_latency_support_us_per_state[i][j][k] = v->effective_detlb_lines_luma * (v->htotal[k] / v->pixel_clock[k]) / v->v_ratio[k] - v->effective_detlb_lines_luma * v->swath_width_yper_state[i][j][k] *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / (v->return_bw_per_state[i] / v->no_of_dpp[i][j][k]); + } + else { + v->urgent_latency_support_us_per_state[i][j][k] =dcn_bw_min2(v->effective_detlb_lines_luma * (v->htotal[k] / v->pixel_clock[k]) / v->v_ratio[k] - v->effective_detlb_lines_luma * v->swath_width_yper_state[i][j][k] *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / (v->return_bw_per_state[i] / v->no_of_dpp[i][j][k]), v->effective_detlb_lines_chroma * (v->htotal[k] / v->pixel_clock[k]) / (v->v_ratio[k] / 2.0) - v->effective_detlb_lines_chroma * v->swath_width_yper_state[i][j][k] / 2.0 *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / (v->return_bw_per_state[i] / v->no_of_dpp[i][j][k])); + } + } + } + } + for (i = 0; i <= number_of_states_plus_one; i++) { + for (j = 0; j <= 1; j++) { + v->urgent_latency_support[i][j] = dcn_bw_yes; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->urgent_latency_support_us_per_state[i][j][k] < v->urgent_latency / 1.0) { + v->urgent_latency_support[i][j] = dcn_bw_no; + } + } + } + } + /*prefetch check*/ + + for (i = 0; i <= number_of_states_plus_one; i++) { + for (j = 0; j <= 1; j++) { + v->total_number_of_dcc_active_dpp[i][j] = 0.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->dcc_enable[k] == dcn_bw_yes) { + v->total_number_of_dcc_active_dpp[i][j] = v->total_number_of_dcc_active_dpp[i][j] + v->no_of_dpp[i][j][k]; + } + } + } + } + for (i = 0; i <= number_of_states_plus_one; i++) { + for (j = 0; j <= 1; j++) { + v->projected_dcfclk_deep_sleep = 8.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, v->pixel_clock[k] / 16.0); + if (v->byte_per_pixel_in_detc[k] == 0.0) { + if (v->v_ratio[k] <= 1.0) { + v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, 1.1 *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 64.0 * v->h_ratio[k] * v->pixel_clock[k] / v->no_of_dpp[i][j][k]); + } + else { + v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, 1.1 *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 64.0 * v->pscl_factor[k] * v->required_dispclk[i][j] / (1 + j)); + } + } + else { + if (v->v_ratio[k] <= 1.0) { + v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, 1.1 *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 32.0 * v->h_ratio[k] * v->pixel_clock[k] / v->no_of_dpp[i][j][k]); + } + else { + v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, 1.1 *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 32.0 * v->pscl_factor[k] * v->required_dispclk[i][j] / (1 + j)); + } + if (v->v_ratio[k] / 2.0 <= 1.0) { + v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, 1.1 *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 32.0 * v->h_ratio[k] / 2.0 * v->pixel_clock[k] / v->no_of_dpp[i][j][k]); + } + else { + v->projected_dcfclk_deep_sleep =dcn_bw_max2(v->projected_dcfclk_deep_sleep, 1.1 *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 32.0 * v->pscl_factor_chroma[k] * v->required_dispclk[i][j] / (1 + j)); + } + } + } + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->dcc_enable[k] == dcn_bw_yes) { + v->meta_req_height_y = 8.0 * v->read256_block_height_y[k]; + v->meta_req_width_y = 64.0 * 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / v->meta_req_height_y; + v->meta_surface_width_y =dcn_bw_ceil2(v->viewport_width[k] / v->no_of_dpp[i][j][k] - 1.0, v->meta_req_width_y) + v->meta_req_width_y; + v->meta_surface_height_y =dcn_bw_ceil2(v->viewport_height[k] - 1.0, v->meta_req_height_y) + v->meta_req_height_y; + if (v->pte_enable == dcn_bw_yes) { + v->meta_pte_bytes_per_frame_y = (dcn_bw_ceil2((v->meta_surface_width_y * v->meta_surface_height_y *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 256.0 - 4096.0) / 8.0 / 4096.0, 1.0) + 1) * 64.0; + } + else { + v->meta_pte_bytes_per_frame_y = 0.0; + } + if (v->source_scan[k] == dcn_bw_hor) { + v->meta_row_bytes_y = v->meta_surface_width_y * v->meta_req_height_y *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 256.0; + } + else { + v->meta_row_bytes_y = v->meta_surface_height_y * v->meta_req_width_y *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / 256.0; + } + } + else { + v->meta_pte_bytes_per_frame_y = 0.0; + v->meta_row_bytes_y = 0.0; + } + if (v->pte_enable == dcn_bw_yes) { + if (v->source_surface_mode[k] == dcn_bw_sw_linear) { + v->macro_tile_block_size_bytes_y = 256.0; + v->macro_tile_block_height_y = 1.0; + } + else if (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d_x) { + v->macro_tile_block_size_bytes_y = 4096.0; + v->macro_tile_block_height_y = 4.0 * v->read256_block_height_y[k]; + } + else if (v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_x) { + v->macro_tile_block_size_bytes_y = 64.0 * 1024; + v->macro_tile_block_height_y = 16.0 * v->read256_block_height_y[k]; + } + else { + v->macro_tile_block_size_bytes_y = 256.0 * 1024; + v->macro_tile_block_height_y = 32.0 * v->read256_block_height_y[k]; + } + if (v->macro_tile_block_size_bytes_y <= 65536.0) { + v->data_pte_req_height_y = v->macro_tile_block_height_y; + } + else { + v->data_pte_req_height_y = 16.0 * v->read256_block_height_y[k]; + } + v->data_pte_req_width_y = 4096.0 /dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) / v->data_pte_req_height_y * 8; + if (v->source_surface_mode[k] == dcn_bw_sw_linear) { + v->dpte_bytes_per_row_y = 64.0 * (dcn_bw_ceil2((v->viewport_width[k] / v->no_of_dpp[i][j][k] *dcn_bw_min2(128.0, dcn_bw_pow(2.0,dcn_bw_floor2(dcn_bw_log(v->pte_buffer_size_in_requests * v->data_pte_req_width_y / (v->viewport_width[k] / v->no_of_dpp[i][j][k]), 2.0), 1.0))) - 1.0) / v->data_pte_req_width_y, 1.0) + 1); + } + else if (v->source_scan[k] == dcn_bw_hor) { + v->dpte_bytes_per_row_y = 64.0 * (dcn_bw_ceil2((v->viewport_width[k] / v->no_of_dpp[i][j][k] - 1.0) / v->data_pte_req_width_y, 1.0) + 1); + } + else { + v->dpte_bytes_per_row_y = 64.0 * (dcn_bw_ceil2((v->viewport_height[k] - 1.0) / v->data_pte_req_height_y, 1.0) + 1); + } + } + else { + v->dpte_bytes_per_row_y = 0.0; + } + if ((v->source_pixel_format[k] != dcn_bw_rgb_sub_64 && v->source_pixel_format[k] != dcn_bw_rgb_sub_32 && v->source_pixel_format[k] != dcn_bw_rgb_sub_16)) { + if (v->dcc_enable[k] == dcn_bw_yes) { + v->meta_req_height_c = 8.0 * v->read256_block_height_c[k]; + v->meta_req_width_c = 64.0 * 256.0 /dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / v->meta_req_height_c; + v->meta_surface_width_c =dcn_bw_ceil2(v->viewport_width[k] / v->no_of_dpp[i][j][k] / 2.0 - 1.0, v->meta_req_width_c) + v->meta_req_width_c; + v->meta_surface_height_c =dcn_bw_ceil2(v->viewport_height[k] / 2.0 - 1.0, v->meta_req_height_c) + v->meta_req_height_c; + if (v->pte_enable == dcn_bw_yes) { + v->meta_pte_bytes_per_frame_c = (dcn_bw_ceil2((v->meta_surface_width_c * v->meta_surface_height_c *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 256.0 - 4096.0) / 8.0 / 4096.0, 1.0) + 1) * 64.0; + } + else { + v->meta_pte_bytes_per_frame_c = 0.0; + } + if (v->source_scan[k] == dcn_bw_hor) { + v->meta_row_bytes_c = v->meta_surface_width_c * v->meta_req_height_c *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 256.0; + } + else { + v->meta_row_bytes_c = v->meta_surface_height_c * v->meta_req_width_c *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 256.0; + } + } + else { + v->meta_pte_bytes_per_frame_c = 0.0; + v->meta_row_bytes_c = 0.0; + } + if (v->pte_enable == dcn_bw_yes) { + if (v->source_surface_mode[k] == dcn_bw_sw_linear) { + v->macro_tile_block_size_bytes_c = 256.0; + v->macro_tile_block_height_c = 1.0; + } + else if (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d_x) { + v->macro_tile_block_size_bytes_c = 4096.0; + v->macro_tile_block_height_c = 4.0 * v->read256_block_height_c[k]; + } + else if (v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_x) { + v->macro_tile_block_size_bytes_c = 64.0 * 1024; + v->macro_tile_block_height_c = 16.0 * v->read256_block_height_c[k]; + } + else { + v->macro_tile_block_size_bytes_c = 256.0 * 1024; + v->macro_tile_block_height_c = 32.0 * v->read256_block_height_c[k]; + } + v->macro_tile_block_width_c = v->macro_tile_block_size_bytes_c /dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / v->macro_tile_block_height_c; + if (v->macro_tile_block_size_bytes_c <= 65536.0) { + v->data_pte_req_height_c = v->macro_tile_block_height_c; + } + else { + v->data_pte_req_height_c = 16.0 * v->read256_block_height_c[k]; + } + v->data_pte_req_width_c = 4096.0 /dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / v->data_pte_req_height_c * 8; + if (v->source_surface_mode[k] == dcn_bw_sw_linear) { + v->dpte_bytes_per_row_c = 64.0 * (dcn_bw_ceil2((v->viewport_width[k] / v->no_of_dpp[i][j][k] / 2.0 * dcn_bw_min2(128.0, dcn_bw_pow(2.0,dcn_bw_floor2(dcn_bw_log(v->pte_buffer_size_in_requests * v->data_pte_req_width_c / (v->viewport_width[k] / v->no_of_dpp[i][j][k] / 2.0), 2.0), 1.0))) - 1.0) / v->data_pte_req_width_c, 1.0) + 1); + } + else if (v->source_scan[k] == dcn_bw_hor) { + v->dpte_bytes_per_row_c = 64.0 * (dcn_bw_ceil2((v->viewport_width[k] / v->no_of_dpp[i][j][k] / 2.0 - 1.0) / v->data_pte_req_width_c, 1.0) + 1); + } + else { + v->dpte_bytes_per_row_c = 64.0 * (dcn_bw_ceil2((v->viewport_height[k] / 2.0 - 1.0) / v->data_pte_req_height_c, 1.0) + 1); + } + } + else { + v->dpte_bytes_per_row_c = 0.0; + } + } + else { + v->dpte_bytes_per_row_c = 0.0; + v->meta_pte_bytes_per_frame_c = 0.0; + v->meta_row_bytes_c = 0.0; + } + v->dpte_bytes_per_row[k] = v->dpte_bytes_per_row_y + v->dpte_bytes_per_row_c; + v->meta_pte_bytes_per_frame[k] = v->meta_pte_bytes_per_frame_y + v->meta_pte_bytes_per_frame_c; + v->meta_row_bytes[k] = v->meta_row_bytes_y + v->meta_row_bytes_c; + v->v_init_y = (v->v_ratio[k] + v->vtaps[k] + 1.0 + v->interlace_output[k] * 0.5 * v->v_ratio[k]) / 2.0; + v->prefill_y[k] =dcn_bw_floor2(v->v_init_y, 1.0); + v->max_num_sw_y[k] =dcn_bw_ceil2((v->prefill_y[k] - 1.0) / v->swath_height_yper_state[i][j][k], 1.0) + 1; + if (v->prefill_y[k] > 1.0) { + v->max_partial_sw_y =dcn_bw_mod((v->prefill_y[k] - 2.0), v->swath_height_yper_state[i][j][k]); + } + else { + v->max_partial_sw_y =dcn_bw_mod((v->prefill_y[k] + v->swath_height_yper_state[i][j][k] - 2.0), v->swath_height_yper_state[i][j][k]); + } + v->max_partial_sw_y =dcn_bw_max2(1.0, v->max_partial_sw_y); + v->prefetch_lines_y[k] = v->max_num_sw_y[k] * v->swath_height_yper_state[i][j][k] + v->max_partial_sw_y; + if ((v->source_pixel_format[k] != dcn_bw_rgb_sub_64 && v->source_pixel_format[k] != dcn_bw_rgb_sub_32 && v->source_pixel_format[k] != dcn_bw_rgb_sub_16)) { + v->v_init_c = (v->v_ratio[k] / 2.0 + v->vtaps[k] + 1.0 + v->interlace_output[k] * 0.5 * v->v_ratio[k] / 2.0) / 2.0; + v->prefill_c[k] =dcn_bw_floor2(v->v_init_c, 1.0); + v->max_num_sw_c[k] =dcn_bw_ceil2((v->prefill_c[k] - 1.0) / v->swath_height_cper_state[i][j][k], 1.0) + 1; + if (v->prefill_c[k] > 1.0) { + v->max_partial_sw_c =dcn_bw_mod((v->prefill_c[k] - 2.0), v->swath_height_cper_state[i][j][k]); + } + else { + v->max_partial_sw_c =dcn_bw_mod((v->prefill_c[k] + v->swath_height_cper_state[i][j][k] - 2.0), v->swath_height_cper_state[i][j][k]); + } + v->max_partial_sw_c =dcn_bw_max2(1.0, v->max_partial_sw_c); + v->prefetch_lines_c[k] = v->max_num_sw_c[k] * v->swath_height_cper_state[i][j][k] + v->max_partial_sw_c; + } + else { + v->prefetch_lines_c[k] = 0.0; + } + v->dst_x_after_scaler = 90.0 * v->pixel_clock[k] / (v->required_dispclk[i][j] / (j + 1)) + 42.0 * v->pixel_clock[k] / v->required_dispclk[i][j]; + if (v->no_of_dpp[i][j][k] > 1.0) { + v->dst_x_after_scaler = v->dst_x_after_scaler + v->scaler_rec_out_width[k] / 2.0; + } + if (v->output_format[k] == dcn_bw_420) { + v->dst_y_after_scaler = 1.0; + } + else { + v->dst_y_after_scaler = 0.0; + } + v->time_calc = 24.0 / v->projected_dcfclk_deep_sleep; + v->v_update_offset[k][j] = dcn_bw_ceil2(v->htotal[k] / 4.0, 1.0); + v->total_repeater_delay = v->max_inter_dcn_tile_repeaters * (2.0 / (v->required_dispclk[i][j] / (j + 1)) + 3.0 / v->required_dispclk[i][j]); + v->v_update_width[k][j] = (14.0 / v->projected_dcfclk_deep_sleep + 12.0 / (v->required_dispclk[i][j] / (j + 1)) + v->total_repeater_delay) * v->pixel_clock[k]; + v->v_ready_offset[k][j] = dcn_bw_max2(150.0 / (v->required_dispclk[i][j] / (j + 1)), v->total_repeater_delay + 20.0 / v->projected_dcfclk_deep_sleep + 10.0 / (v->required_dispclk[i][j] / (j + 1))) * v->pixel_clock[k]; + v->time_setup = (v->v_update_offset[k][j] + v->v_update_width[k][j] + v->v_ready_offset[k][j]) / v->pixel_clock[k]; + v->extra_latency = v->urgent_round_trip_and_out_of_order_latency_per_state[i] + (v->total_number_of_active_dpp[i][j] * v->pixel_chunk_size_in_kbyte + v->total_number_of_dcc_active_dpp[i][j] * v->meta_chunk_size) * 1024.0 / v->return_bw_per_state[i]; + if (v->pte_enable == dcn_bw_yes) { + v->extra_latency = v->extra_latency + v->total_number_of_active_dpp[i][j] * v->pte_chunk_size * 1024.0 / v->return_bw_per_state[i]; + } + if (v->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one == dcn_bw_yes) { + v->maximum_vstartup = v->vtotal[k] - v->vactive[k] - 1.0; + } + else { + v->maximum_vstartup = v->v_sync_plus_back_porch[k] - 1.0; + } + + do { + v->line_times_for_prefetch[k] = v->maximum_vstartup - v->urgent_latency / (v->htotal[k] / v->pixel_clock[k]) - (v->time_calc + v->time_setup) / (v->htotal[k] / v->pixel_clock[k]) - (v->dst_y_after_scaler + v->dst_x_after_scaler / v->htotal[k]); + v->line_times_for_prefetch[k] =dcn_bw_floor2(4.0 * (v->line_times_for_prefetch[k] + 0.125), 1.0) / 4; + v->prefetch_bw[k] = (v->meta_pte_bytes_per_frame[k] + 2.0 * v->meta_row_bytes[k] + 2.0 * v->dpte_bytes_per_row[k] + v->prefetch_lines_y[k] * v->swath_width_yper_state[i][j][k] *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) + v->prefetch_lines_c[k] * v->swath_width_yper_state[i][j][k] / 2.0 *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0)) / (v->line_times_for_prefetch[k] * v->htotal[k] / v->pixel_clock[k]); + + if (v->pte_enable == dcn_bw_yes && v->dcc_enable[k] == dcn_bw_yes) { + v->time_for_meta_pte_without_immediate_flip = dcn_bw_max3( + v->meta_pte_bytes_frame[k] / v->prefetch_bw[k], + v->extra_latency, + v->htotal[k] / v->pixel_clock[k] / 4.0); + } else { + v->time_for_meta_pte_without_immediate_flip = v->htotal[k] / v->pixel_clock[k] / 4.0; + } + + if (v->pte_enable == dcn_bw_yes || v->dcc_enable[k] == dcn_bw_yes) { + v->time_for_meta_and_dpte_row_without_immediate_flip = dcn_bw_max3(( + v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]) / v->prefetch_bw[k], + v->htotal[k] / v->pixel_clock[k] - v->time_for_meta_pte_without_immediate_flip, + v->extra_latency); + } else { + v->time_for_meta_and_dpte_row_without_immediate_flip = dcn_bw_max2( + v->htotal[k] / v->pixel_clock[k] - v->time_for_meta_pte_without_immediate_flip, + v->extra_latency - v->time_for_meta_pte_with_immediate_flip); + } + + v->lines_for_meta_pte_without_immediate_flip[k] =dcn_bw_floor2(4.0 * (v->time_for_meta_pte_without_immediate_flip / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4; + v->lines_for_meta_and_dpte_row_without_immediate_flip[k] =dcn_bw_floor2(4.0 * (v->time_for_meta_and_dpte_row_without_immediate_flip / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4; + v->maximum_vstartup = v->maximum_vstartup - 1; + + if (v->lines_for_meta_pte_without_immediate_flip[k] < 32.0 && v->lines_for_meta_and_dpte_row_without_immediate_flip[k] < 16.0) + break; + + } while(1); + } + v->bw_available_for_immediate_flip = v->return_bw_per_state[i]; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + v->bw_available_for_immediate_flip = v->bw_available_for_immediate_flip -dcn_bw_max2(v->read_bandwidth[k], v->prefetch_bw[k]); + } + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + v->total_immediate_flip_bytes[k] = 0.0; + if ((v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) { + v->total_immediate_flip_bytes[k] = v->total_immediate_flip_bytes[k] + v->meta_pte_bytes_per_frame[k] + v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]; + } + } + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->pte_enable == dcn_bw_yes && v->dcc_enable[k] == dcn_bw_yes) { + v->time_for_meta_pte_with_immediate_flip =dcn_bw_max5(v->meta_pte_bytes_per_frame[k] / v->prefetch_bw[k], v->meta_pte_bytes_per_frame[k] * v->total_immediate_flip_bytes[k] / (v->bw_available_for_immediate_flip * (v->meta_pte_bytes_per_frame[k] + v->meta_row_bytes[k] + v->dpte_bytes_per_row[k])), v->extra_latency, v->urgent_latency, v->htotal[k] / v->pixel_clock[k] / 4.0); + } + else { + v->time_for_meta_pte_with_immediate_flip = v->htotal[k] / v->pixel_clock[k] / 4.0; + } + if (v->pte_enable == dcn_bw_yes || v->dcc_enable[k] == dcn_bw_yes) { + v->time_for_meta_and_dpte_row_with_immediate_flip =dcn_bw_max5((v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]) / v->prefetch_bw[k], (v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]) * v->total_immediate_flip_bytes[k] / (v->bw_available_for_immediate_flip * (v->meta_pte_bytes_per_frame[k] + v->meta_row_bytes[k] + v->dpte_bytes_per_row[k])), v->htotal[k] / v->pixel_clock[k] - v->time_for_meta_pte_with_immediate_flip, v->extra_latency, 2.0 * v->urgent_latency); + } + else { + v->time_for_meta_and_dpte_row_with_immediate_flip =dcn_bw_max2(v->htotal[k] / v->pixel_clock[k] - v->time_for_meta_pte_with_immediate_flip, v->extra_latency - v->time_for_meta_pte_with_immediate_flip); + } + v->lines_for_meta_pte_with_immediate_flip[k] =dcn_bw_floor2(4.0 * (v->time_for_meta_pte_with_immediate_flip / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4; + v->lines_for_meta_and_dpte_row_with_immediate_flip[k] =dcn_bw_floor2(4.0 * (v->time_for_meta_and_dpte_row_with_immediate_flip / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4; + v->line_times_to_request_prefetch_pixel_data_with_immediate_flip = v->line_times_for_prefetch[k] - v->lines_for_meta_pte_with_immediate_flip[k] - v->lines_for_meta_and_dpte_row_with_immediate_flip[k]; + v->line_times_to_request_prefetch_pixel_data_without_immediate_flip = v->line_times_for_prefetch[k] - v->lines_for_meta_pte_without_immediate_flip[k] - v->lines_for_meta_and_dpte_row_without_immediate_flip[k]; + if (v->line_times_to_request_prefetch_pixel_data_with_immediate_flip > 0.0) { + v->v_ratio_pre_ywith_immediate_flip[i][j][k] = v->prefetch_lines_y[k] / v->line_times_to_request_prefetch_pixel_data_with_immediate_flip; + if ((v->swath_height_yper_state[i][j][k] > 4.0)) { + if (v->line_times_to_request_prefetch_pixel_data_with_immediate_flip - (v->prefill_y[k] - 3.0) / 2.0 > 0.0) { + v->v_ratio_pre_ywith_immediate_flip[i][j][k] =dcn_bw_max2(v->v_ratio_pre_ywith_immediate_flip[i][j][k], (v->max_num_sw_y[k] * v->swath_height_yper_state[i][j][k]) / (v->line_times_to_request_prefetch_pixel_data_with_immediate_flip - (v->prefill_y[k] - 3.0) / 2.0)); + } + else { + v->v_ratio_pre_ywith_immediate_flip[i][j][k] = 999999.0; + } + } + v->v_ratio_pre_cwith_immediate_flip[i][j][k] = v->prefetch_lines_c[k] / v->line_times_to_request_prefetch_pixel_data_with_immediate_flip; + if ((v->swath_height_cper_state[i][j][k] > 4.0)) { + if (v->line_times_to_request_prefetch_pixel_data_with_immediate_flip - (v->prefill_c[k] - 3.0) / 2.0 > 0.0) { + v->v_ratio_pre_cwith_immediate_flip[i][j][k] =dcn_bw_max2(v->v_ratio_pre_cwith_immediate_flip[i][j][k], (v->max_num_sw_c[k] * v->swath_height_cper_state[i][j][k]) / (v->line_times_to_request_prefetch_pixel_data_with_immediate_flip - (v->prefill_c[k] - 3.0) / 2.0)); + } + else { + v->v_ratio_pre_cwith_immediate_flip[i][j][k] = 999999.0; + } + } + v->required_prefetch_pixel_data_bw_with_immediate_flip[i][j][k] = v->no_of_dpp[i][j][k] * (v->prefetch_lines_y[k] / v->line_times_to_request_prefetch_pixel_data_with_immediate_flip *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) + v->prefetch_lines_c[k] / v->line_times_to_request_prefetch_pixel_data_with_immediate_flip *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 2.0) * v->swath_width_yper_state[i][j][k] / (v->htotal[k] / v->pixel_clock[k]); + } + else { + v->v_ratio_pre_ywith_immediate_flip[i][j][k] = 999999.0; + v->v_ratio_pre_cwith_immediate_flip[i][j][k] = 999999.0; + v->required_prefetch_pixel_data_bw_with_immediate_flip[i][j][k] = 999999.0; + } + if (v->line_times_to_request_prefetch_pixel_data_without_immediate_flip > 0.0) { + v->v_ratio_pre_ywithout_immediate_flip[i][j][k] = v->prefetch_lines_y[k] / v->line_times_to_request_prefetch_pixel_data_without_immediate_flip; + if ((v->swath_height_yper_state[i][j][k] > 4.0)) { + if (v->line_times_to_request_prefetch_pixel_data_without_immediate_flip - (v->prefill_y[k] - 3.0) / 2.0 > 0.0) { + v->v_ratio_pre_ywithout_immediate_flip[i][j][k] =dcn_bw_max2(v->v_ratio_pre_ywithout_immediate_flip[i][j][k], (v->max_num_sw_y[k] * v->swath_height_yper_state[i][j][k]) / (v->line_times_to_request_prefetch_pixel_data_without_immediate_flip - (v->prefill_y[k] - 3.0) / 2.0)); + } + else { + v->v_ratio_pre_ywithout_immediate_flip[i][j][k] = 999999.0; + } + } + v->v_ratio_pre_cwithout_immediate_flip[i][j][k] = v->prefetch_lines_c[k] / v->line_times_to_request_prefetch_pixel_data_without_immediate_flip; + if ((v->swath_height_cper_state[i][j][k] > 4.0)) { + if (v->line_times_to_request_prefetch_pixel_data_without_immediate_flip - (v->prefill_c[k] - 3.0) / 2.0 > 0.0) { + v->v_ratio_pre_cwithout_immediate_flip[i][j][k] =dcn_bw_max2(v->v_ratio_pre_cwithout_immediate_flip[i][j][k], (v->max_num_sw_c[k] * v->swath_height_cper_state[i][j][k]) / (v->line_times_to_request_prefetch_pixel_data_without_immediate_flip - (v->prefill_c[k] - 3.0) / 2.0)); + } + else { + v->v_ratio_pre_cwithout_immediate_flip[i][j][k] = 999999.0; + } + } + v->required_prefetch_pixel_data_bw_without_immediate_flip[i][j][k] = v->no_of_dpp[i][j][k] * (v->prefetch_lines_y[k] / v->line_times_to_request_prefetch_pixel_data_without_immediate_flip *dcn_bw_ceil2(v->byte_per_pixel_in_dety[k], 1.0) + v->prefetch_lines_c[k] / v->line_times_to_request_prefetch_pixel_data_without_immediate_flip *dcn_bw_ceil2(v->byte_per_pixel_in_detc[k], 2.0) / 2.0) * v->swath_width_yper_state[i][j][k] / (v->htotal[k] / v->pixel_clock[k]); + } + else { + v->v_ratio_pre_ywithout_immediate_flip[i][j][k] = 999999.0; + v->v_ratio_pre_cwithout_immediate_flip[i][j][k] = 999999.0; + v->required_prefetch_pixel_data_bw_without_immediate_flip[i][j][k] = 999999.0; + } + } + v->maximum_read_bandwidth_with_prefetch_with_immediate_flip = 0.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if ((v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) { + v->maximum_read_bandwidth_with_prefetch_with_immediate_flip = v->maximum_read_bandwidth_with_prefetch_with_immediate_flip +dcn_bw_max2(v->read_bandwidth[k], v->required_prefetch_pixel_data_bw_with_immediate_flip[i][j][k]) +dcn_bw_max2(v->meta_pte_bytes_per_frame[k] / (v->lines_for_meta_pte_with_immediate_flip[k] * v->htotal[k] / v->pixel_clock[k]), (v->meta_row_bytes[k] + v->dpte_bytes_per_row[k]) / (v->lines_for_meta_and_dpte_row_with_immediate_flip[k] * v->htotal[k] / v->pixel_clock[k])); + } + else { + v->maximum_read_bandwidth_with_prefetch_with_immediate_flip = v->maximum_read_bandwidth_with_prefetch_with_immediate_flip +dcn_bw_max2(v->read_bandwidth[k], v->required_prefetch_pixel_data_bw_without_immediate_flip[i][j][k]); + } + } + v->maximum_read_bandwidth_with_prefetch_without_immediate_flip = 0.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + v->maximum_read_bandwidth_with_prefetch_without_immediate_flip = v->maximum_read_bandwidth_with_prefetch_without_immediate_flip +dcn_bw_max2(v->read_bandwidth[k], v->required_prefetch_pixel_data_bw_without_immediate_flip[i][j][k]); + } + v->prefetch_supported_with_immediate_flip[i][j] = dcn_bw_yes; + if (v->maximum_read_bandwidth_with_prefetch_with_immediate_flip > v->return_bw_per_state[i]) { + v->prefetch_supported_with_immediate_flip[i][j] = dcn_bw_no; + } + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->line_times_for_prefetch[k] < 2.0 || v->lines_for_meta_pte_with_immediate_flip[k] >= 8.0 || v->lines_for_meta_and_dpte_row_with_immediate_flip[k] >= 16.0) { + v->prefetch_supported_with_immediate_flip[i][j] = dcn_bw_no; + } + } + v->prefetch_supported_without_immediate_flip[i][j] = dcn_bw_yes; + if (v->maximum_read_bandwidth_with_prefetch_without_immediate_flip > v->return_bw_per_state[i]) { + v->prefetch_supported_without_immediate_flip[i][j] = dcn_bw_no; + } + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->line_times_for_prefetch[k] < 2.0 || v->lines_for_meta_pte_without_immediate_flip[k] >= 8.0 || v->lines_for_meta_and_dpte_row_without_immediate_flip[k] >= 16.0) { + v->prefetch_supported_without_immediate_flip[i][j] = dcn_bw_no; + } + } + } + } + for (i = 0; i <= number_of_states_plus_one; i++) { + for (j = 0; j <= 1; j++) { + v->v_ratio_in_prefetch_supported_with_immediate_flip[i][j] = dcn_bw_yes; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if ((((v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10) && (v->v_ratio_pre_ywith_immediate_flip[i][j][k] > 4.0 || v->v_ratio_pre_cwith_immediate_flip[i][j][k] > 4.0)) || ((v->source_pixel_format[k] == dcn_bw_yuv420_sub_8 || v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) && (v->v_ratio_pre_ywithout_immediate_flip[i][j][k] > 4.0 || v->v_ratio_pre_cwithout_immediate_flip[i][j][k] > 4.0)))) { + v->v_ratio_in_prefetch_supported_with_immediate_flip[i][j] = dcn_bw_no; + } + } + v->v_ratio_in_prefetch_supported_without_immediate_flip[i][j] = dcn_bw_yes; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if ((v->v_ratio_pre_ywithout_immediate_flip[i][j][k] > 4.0 || v->v_ratio_pre_cwithout_immediate_flip[i][j][k] > 4.0)) { + v->v_ratio_in_prefetch_supported_without_immediate_flip[i][j] = dcn_bw_no; + } + } + } + } + /*mode support, voltage state and soc configuration*/ + + for (i = number_of_states_plus_one; i >= 0; i--) { + for (j = 0; j <= 1; j++) { + if (v->scale_ratio_support == dcn_bw_yes && v->source_format_pixel_and_scan_support == dcn_bw_yes && v->viewport_size_support == dcn_bw_yes && v->bandwidth_support[i] == dcn_bw_yes && v->dio_support[i] == dcn_bw_yes && v->urgent_latency_support[i][j] == dcn_bw_yes && v->rob_support[i] == dcn_bw_yes && v->dispclk_dppclk_support[i][j] == dcn_bw_yes && v->total_available_pipes_support[i][j] == dcn_bw_yes && v->total_available_writeback_support == dcn_bw_yes && v->writeback_latency_support == dcn_bw_yes) { + if (v->prefetch_supported_with_immediate_flip[i][j] == dcn_bw_yes && v->v_ratio_in_prefetch_supported_with_immediate_flip[i][j] == dcn_bw_yes) { + v->mode_support_with_immediate_flip[i][j] = dcn_bw_yes; + } + else { + v->mode_support_with_immediate_flip[i][j] = dcn_bw_no; + } + if (v->prefetch_supported_without_immediate_flip[i][j] == dcn_bw_yes && v->v_ratio_in_prefetch_supported_without_immediate_flip[i][j] == dcn_bw_yes) { + v->mode_support_without_immediate_flip[i][j] = dcn_bw_yes; + } + else { + v->mode_support_without_immediate_flip[i][j] = dcn_bw_no; + } + } + else { + v->mode_support_with_immediate_flip[i][j] = dcn_bw_no; + v->mode_support_without_immediate_flip[i][j] = dcn_bw_no; + } + } + } + for (i = number_of_states_plus_one; i >= 0; i--) { + if ((i == number_of_states_plus_one || v->mode_support_with_immediate_flip[i][1] == dcn_bw_yes || v->mode_support_with_immediate_flip[i][0] == dcn_bw_yes) && i >= v->voltage_override_level) { + v->voltage_level_with_immediate_flip = i; + } + } + for (i = number_of_states_plus_one; i >= 0; i--) { + if ((i == number_of_states_plus_one || v->mode_support_without_immediate_flip[i][1] == dcn_bw_yes || v->mode_support_without_immediate_flip[i][0] == dcn_bw_yes) && i >= v->voltage_override_level) { + v->voltage_level_without_immediate_flip = i; + } + } + if (v->voltage_level_with_immediate_flip == number_of_states_plus_one) { + v->immediate_flip_supported = dcn_bw_no; + v->voltage_level = v->voltage_level_without_immediate_flip; + } + else { + v->immediate_flip_supported = dcn_bw_yes; + v->voltage_level = v->voltage_level_with_immediate_flip; + } + v->dcfclk = v->dcfclk_per_state[v->voltage_level]; + v->fabric_and_dram_bandwidth = v->fabric_and_dram_bandwidth_per_state[v->voltage_level]; + for (j = 0; j <= 1; j++) { + v->required_dispclk_per_ratio[j] = v->required_dispclk[v->voltage_level][j]; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + v->dpp_per_plane_per_ratio[j][k] = v->no_of_dpp[v->voltage_level][j][k]; + } + v->dispclk_dppclk_support_per_ratio[j] = v->dispclk_dppclk_support[v->voltage_level][j]; + } + v->max_phyclk = v->phyclk_per_state[v->voltage_level]; +} +void display_pipe_configuration(struct dcn_bw_internal_vars *v) +{ + int j; + int k; + /*display pipe configuration*/ + + for (j = 0; j <= 1; j++) { + v->total_number_of_active_dpp_per_ratio[j] = 0.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + v->total_number_of_active_dpp_per_ratio[j] = v->total_number_of_active_dpp_per_ratio[j] + v->dpp_per_plane_per_ratio[j][k]; + } + } + if ((v->dispclk_dppclk_support_per_ratio[0] == dcn_bw_yes && v->dispclk_dppclk_support_per_ratio[1] == dcn_bw_no) || (v->dispclk_dppclk_support_per_ratio[0] == v->dispclk_dppclk_support_per_ratio[1] && (v->total_number_of_active_dpp_per_ratio[0] < v->total_number_of_active_dpp_per_ratio[1] || (((v->total_number_of_active_dpp_per_ratio[0] == v->total_number_of_active_dpp_per_ratio[1]) && v->required_dispclk_per_ratio[0] <= 0.5 * v->required_dispclk_per_ratio[1]))))) { + v->dispclk_dppclk_ratio = 1; + v->final_error_message = v->error_message[0]; + } + else { + v->dispclk_dppclk_ratio = 2; + v->final_error_message = v->error_message[1]; + } + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + v->dpp_per_plane[k] = v->dpp_per_plane_per_ratio[v->dispclk_dppclk_ratio - 1][k]; + } + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) { + v->byte_per_pix_dety = 8.0; + v->byte_per_pix_detc = 0.0; + } + else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_32) { + v->byte_per_pix_dety = 4.0; + v->byte_per_pix_detc = 0.0; + } + else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_16) { + v->byte_per_pix_dety = 2.0; + v->byte_per_pix_detc = 0.0; + } + else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) { + v->byte_per_pix_dety = 1.0; + v->byte_per_pix_detc = 2.0; + } + else { + v->byte_per_pix_dety = 4.0f / 3.0f; + v->byte_per_pix_detc = 8.0f / 3.0f; + } + if ((v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32 || v->source_pixel_format[k] == dcn_bw_rgb_sub_16)) { + if (v->source_surface_mode[k] == dcn_bw_sw_linear) { + v->read256_bytes_block_height_y = 1.0; + } + else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) { + v->read256_bytes_block_height_y = 4.0; + } + else { + v->read256_bytes_block_height_y = 8.0; + } + v->read256_bytes_block_width_y = 256.0 /dcn_bw_ceil2(v->byte_per_pix_dety, 1.0) / v->read256_bytes_block_height_y; + v->read256_bytes_block_height_c = 0.0; + v->read256_bytes_block_width_c = 0.0; + } + else { + if (v->source_surface_mode[k] == dcn_bw_sw_linear) { + v->read256_bytes_block_height_y = 1.0; + v->read256_bytes_block_height_c = 1.0; + } + else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) { + v->read256_bytes_block_height_y = 16.0; + v->read256_bytes_block_height_c = 8.0; + } + else { + v->read256_bytes_block_height_y = 8.0; + v->read256_bytes_block_height_c = 8.0; + } + v->read256_bytes_block_width_y = 256.0 /dcn_bw_ceil2(v->byte_per_pix_dety, 1.0) / v->read256_bytes_block_height_y; + v->read256_bytes_block_width_c = 256.0 /dcn_bw_ceil2(v->byte_per_pix_detc, 2.0) / v->read256_bytes_block_height_c; + } + if (v->source_scan[k] == dcn_bw_hor) { + v->maximum_swath_height_y = v->read256_bytes_block_height_y; + v->maximum_swath_height_c = v->read256_bytes_block_height_c; + } + else { + v->maximum_swath_height_y = v->read256_bytes_block_width_y; + v->maximum_swath_height_c = v->read256_bytes_block_width_c; + } + if ((v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32 || v->source_pixel_format[k] == dcn_bw_rgb_sub_16)) { + if (v->source_surface_mode[k] == dcn_bw_sw_linear || (v->source_pixel_format[k] == dcn_bw_rgb_sub_64 && (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_var_s || v->source_surface_mode[k] == dcn_bw_sw_var_s_x) && v->source_scan[k] == dcn_bw_hor)) { + v->minimum_swath_height_y = v->maximum_swath_height_y; + } + else { + v->minimum_swath_height_y = v->maximum_swath_height_y / 2.0; + } + v->minimum_swath_height_c = v->maximum_swath_height_c; + } + else { + if (v->source_surface_mode[k] == dcn_bw_sw_linear) { + v->minimum_swath_height_y = v->maximum_swath_height_y; + v->minimum_swath_height_c = v->maximum_swath_height_c; + } + else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8 && v->source_scan[k] == dcn_bw_hor) { + v->minimum_swath_height_y = v->maximum_swath_height_y / 2.0; + if (v->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes) { + v->minimum_swath_height_c = v->maximum_swath_height_c; + } + else { + v->minimum_swath_height_c = v->maximum_swath_height_c / 2.0; + } + } + else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10 && v->source_scan[k] == dcn_bw_hor) { + v->minimum_swath_height_c = v->maximum_swath_height_c / 2.0; + if (v->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes) { + v->minimum_swath_height_y = v->maximum_swath_height_y; + } + else { + v->minimum_swath_height_y = v->maximum_swath_height_y / 2.0; + } + } + else { + v->minimum_swath_height_y = v->maximum_swath_height_y; + v->minimum_swath_height_c = v->maximum_swath_height_c; + } + } + if (v->source_scan[k] == dcn_bw_hor) { + v->swath_width = v->viewport_width[k] / v->dpp_per_plane[k]; + } + else { + v->swath_width = v->viewport_height[k] / v->dpp_per_plane[k]; + } + v->swath_width_granularity_y = 256.0 /dcn_bw_ceil2(v->byte_per_pix_dety, 1.0) / v->maximum_swath_height_y; + v->rounded_up_max_swath_size_bytes_y = (dcn_bw_ceil2(v->swath_width - 1.0, v->swath_width_granularity_y) + v->swath_width_granularity_y) * v->byte_per_pix_dety * v->maximum_swath_height_y; + if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) { + v->rounded_up_max_swath_size_bytes_y =dcn_bw_ceil2(v->rounded_up_max_swath_size_bytes_y, 256.0) + 256; + } + if (v->maximum_swath_height_c > 0.0) { + v->swath_width_granularity_c = 256.0 /dcn_bw_ceil2(v->byte_per_pix_detc, 2.0) / v->maximum_swath_height_c; + } + v->rounded_up_max_swath_size_bytes_c = (dcn_bw_ceil2(v->swath_width / 2.0 - 1.0, v->swath_width_granularity_c) + v->swath_width_granularity_c) * v->byte_per_pix_detc * v->maximum_swath_height_c; + if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_10) { + v->rounded_up_max_swath_size_bytes_c =dcn_bw_ceil2(v->rounded_up_max_swath_size_bytes_c, 256.0) + 256; + } + if (v->rounded_up_max_swath_size_bytes_y + v->rounded_up_max_swath_size_bytes_c <= v->det_buffer_size_in_kbyte * 1024.0 / 2.0) { + v->swath_height_y[k] = v->maximum_swath_height_y; + v->swath_height_c[k] = v->maximum_swath_height_c; + } + else { + v->swath_height_y[k] = v->minimum_swath_height_y; + v->swath_height_c[k] = v->minimum_swath_height_c; + } + if (v->swath_height_c[k] == 0.0) { + v->det_buffer_size_y[k] = v->det_buffer_size_in_kbyte * 1024.0; + v->det_buffer_size_c[k] = 0.0; + } + else if (v->swath_height_y[k] <= v->swath_height_c[k]) { + v->det_buffer_size_y[k] = v->det_buffer_size_in_kbyte * 1024.0 / 2.0; + v->det_buffer_size_c[k] = v->det_buffer_size_in_kbyte * 1024.0 / 2.0; + } + else { + v->det_buffer_size_y[k] = v->det_buffer_size_in_kbyte * 1024.0 * 2.0 / 3.0; + v->det_buffer_size_c[k] = v->det_buffer_size_in_kbyte * 1024.0 / 3.0; + } + } +} +void dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(struct dcn_bw_internal_vars *v) +{ + int k; + /*dispclk and dppclk calculation*/ + + v->dispclk_with_ramping = 0.0; + v->dispclk_without_ramping = 0.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->h_ratio[k] > 1.0) { + v->pscl_throughput[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput * v->h_ratio[k] /dcn_bw_ceil2(v->htaps[k] / 6.0, 1.0)); + } + else { + v->pscl_throughput[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput); + } + v->dppclk_using_single_dpp_luma = v->pixel_clock[k] *dcn_bw_max3(v->vtaps[k] / 6.0 *dcn_bw_min2(1.0, v->h_ratio[k]), v->h_ratio[k] * v->v_ratio[k] / v->pscl_throughput[k], 1.0); + if ((v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) { + v->pscl_throughput_chroma[k] = 0.0; + v->dppclk_using_single_dpp = v->dppclk_using_single_dpp_luma; + } + else { + if (v->h_ratio[k] > 1.0) { + v->pscl_throughput_chroma[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput * v->h_ratio[k] / 2.0 /dcn_bw_ceil2(v->hta_pschroma[k] / 6.0, 1.0)); + } + else { + v->pscl_throughput_chroma[k] =dcn_bw_min2(v->max_dchub_topscl_throughput, v->max_pscl_tolb_throughput); + } + v->dppclk_using_single_dpp_chroma = v->pixel_clock[k] *dcn_bw_max3(v->vta_pschroma[k] / 6.0 *dcn_bw_min2(1.0, v->h_ratio[k] / 2.0), v->h_ratio[k] * v->v_ratio[k] / 4.0 / v->pscl_throughput_chroma[k], 1.0); + v->dppclk_using_single_dpp =dcn_bw_max2(v->dppclk_using_single_dpp_luma, v->dppclk_using_single_dpp_chroma); + } + if (v->odm_capable == dcn_bw_yes) { + v->dispclk_with_ramping =dcn_bw_max2(v->dispclk_with_ramping,dcn_bw_max2(v->dppclk_using_single_dpp / v->dpp_per_plane[k] * v->dispclk_dppclk_ratio, v->pixel_clock[k] / v->dpp_per_plane[k]) * (1.0 + v->downspreading / 100.0) * (1.0 + v->dispclk_ramping_margin / 100.0)); + v->dispclk_without_ramping =dcn_bw_max2(v->dispclk_without_ramping,dcn_bw_max2(v->dppclk_using_single_dpp / v->dpp_per_plane[k] * v->dispclk_dppclk_ratio, v->pixel_clock[k] / v->dpp_per_plane[k]) * (1.0 + v->downspreading / 100.0)); + } + else { + v->dispclk_with_ramping =dcn_bw_max2(v->dispclk_with_ramping,dcn_bw_max2(v->dppclk_using_single_dpp / v->dpp_per_plane[k] * v->dispclk_dppclk_ratio, v->pixel_clock[k]) * (1.0 + v->downspreading / 100.0) * (1.0 + v->dispclk_ramping_margin / 100.0)); + v->dispclk_without_ramping =dcn_bw_max2(v->dispclk_without_ramping,dcn_bw_max2(v->dppclk_using_single_dpp / v->dpp_per_plane[k] * v->dispclk_dppclk_ratio, v->pixel_clock[k]) * (1.0 + v->downspreading / 100.0)); + } + } + if (v->dispclk_without_ramping > v->max_dispclk[number_of_states]) { + v->dispclk = v->dispclk_without_ramping; + } + else if (v->dispclk_with_ramping > v->max_dispclk[number_of_states]) { + v->dispclk = v->max_dispclk[number_of_states]; + } + else { + v->dispclk = v->dispclk_with_ramping; + } + v->dppclk = v->dispclk / v->dispclk_dppclk_ratio; + /*urgent watermark*/ + + v->return_bandwidth_to_dcn =dcn_bw_min2(v->return_bus_width * v->dcfclk, v->fabric_and_dram_bandwidth * 1000.0 * v->percent_of_ideal_drambw_received_after_urg_latency / 100.0); + v->dcc_enabled_any_plane = dcn_bw_no; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->dcc_enable[k] == dcn_bw_yes) { + v->dcc_enabled_any_plane = dcn_bw_yes; + } + } + v->return_bw = v->return_bandwidth_to_dcn; + if (v->dcc_enabled_any_plane == dcn_bw_yes && v->return_bandwidth_to_dcn > v->dcfclk * v->return_bus_width / 4.0) { + v->return_bw =dcn_bw_min2(v->return_bw, v->return_bandwidth_to_dcn * 4.0 * (1.0 - v->urgent_latency / ((v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 / (v->return_bandwidth_to_dcn - v->dcfclk * v->return_bus_width / 4.0) + v->urgent_latency))); + } + v->critical_compression = 2.0 * v->return_bus_width * v->dcfclk * v->urgent_latency / (v->return_bandwidth_to_dcn * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0); + if (v->dcc_enabled_any_plane == dcn_bw_yes && v->critical_compression > 1.0 && v->critical_compression < 4.0) { + v->return_bw =dcn_bw_min2(v->return_bw, dcn_bw_pow(4.0 * v->return_bandwidth_to_dcn * (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 * v->return_bus_width * v->dcfclk * v->urgent_latency / (v->return_bandwidth_to_dcn * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0), 2)); + } + v->return_bandwidth_to_dcn =dcn_bw_min2(v->return_bus_width * v->dcfclk, v->fabric_and_dram_bandwidth * 1000.0); + if (v->dcc_enabled_any_plane == dcn_bw_yes && v->return_bandwidth_to_dcn > v->dcfclk * v->return_bus_width / 4.0) { + v->return_bw =dcn_bw_min2(v->return_bw, v->return_bandwidth_to_dcn * 4.0 * (1.0 - v->urgent_latency / ((v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 / (v->return_bandwidth_to_dcn - v->dcfclk * v->return_bus_width / 4.0) + v->urgent_latency))); + } + v->critical_compression = 2.0 * v->return_bus_width * v->dcfclk * v->urgent_latency / (v->return_bandwidth_to_dcn * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0); + if (v->dcc_enabled_any_plane == dcn_bw_yes && v->critical_compression > 1.0 && v->critical_compression < 4.0) { + v->return_bw =dcn_bw_min2(v->return_bw, dcn_bw_pow(4.0 * v->return_bandwidth_to_dcn * (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0 * v->return_bus_width * v->dcfclk * v->urgent_latency / (v->return_bandwidth_to_dcn * v->urgent_latency + (v->rob_buffer_size_in_kbyte - v->pixel_chunk_size_in_kbyte) * 1024.0), 2)); + } + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->source_scan[k] == dcn_bw_hor) { + v->swath_width_y[k] = v->viewport_width[k] / v->dpp_per_plane[k]; + } + else { + v->swath_width_y[k] = v->viewport_height[k] / v->dpp_per_plane[k]; + } + } + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) { + v->byte_per_pixel_dety[k] = 8.0; + v->byte_per_pixel_detc[k] = 0.0; + } + else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_32) { + v->byte_per_pixel_dety[k] = 4.0; + v->byte_per_pixel_detc[k] = 0.0; + } + else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_16) { + v->byte_per_pixel_dety[k] = 2.0; + v->byte_per_pixel_detc[k] = 0.0; + } + else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) { + v->byte_per_pixel_dety[k] = 1.0; + v->byte_per_pixel_detc[k] = 2.0; + } + else { + v->byte_per_pixel_dety[k] = 4.0f / 3.0f; + v->byte_per_pixel_detc[k] = 8.0f / 3.0f; + } + } + v->total_data_read_bandwidth = 0.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + v->read_bandwidth_plane_luma[k] = v->swath_width_y[k] * v->dpp_per_plane[k] *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / (v->htotal[k] / v->pixel_clock[k]) * v->v_ratio[k]; + v->read_bandwidth_plane_chroma[k] = v->swath_width_y[k] / 2.0 * v->dpp_per_plane[k] *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / (v->htotal[k] / v->pixel_clock[k]) * v->v_ratio[k] / 2.0; + v->total_data_read_bandwidth = v->total_data_read_bandwidth + v->read_bandwidth_plane_luma[k] + v->read_bandwidth_plane_chroma[k]; + } + v->total_active_dpp = 0.0; + v->total_dcc_active_dpp = 0.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + v->total_active_dpp = v->total_active_dpp + v->dpp_per_plane[k]; + if (v->dcc_enable[k] == dcn_bw_yes) { + v->total_dcc_active_dpp = v->total_dcc_active_dpp + v->dpp_per_plane[k]; + } + } + v->urgent_round_trip_and_out_of_order_latency = (v->round_trip_ping_latency_cycles + 32.0) / v->dcfclk + v->urgent_out_of_order_return_per_channel * v->number_of_channels / v->return_bw; + v->last_pixel_of_line_extra_watermark = 0.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->v_ratio[k] <= 1.0) { + v->display_pipe_line_delivery_time_luma[k] = v->swath_width_y[k] * v->dpp_per_plane[k] / v->h_ratio[k] / v->pixel_clock[k]; + } + else { + v->display_pipe_line_delivery_time_luma[k] = v->swath_width_y[k] / v->pscl_throughput[k] / v->dppclk; + } + v->data_fabric_line_delivery_time_luma = v->swath_width_y[k] * v->swath_height_y[k] *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / (v->return_bw * v->read_bandwidth_plane_luma[k] / v->dpp_per_plane[k] / v->total_data_read_bandwidth); + v->last_pixel_of_line_extra_watermark =dcn_bw_max2(v->last_pixel_of_line_extra_watermark, v->data_fabric_line_delivery_time_luma - v->display_pipe_line_delivery_time_luma[k]); + if (v->byte_per_pixel_detc[k] == 0.0) { + v->display_pipe_line_delivery_time_chroma[k] = 0.0; + } + else { + if (v->v_ratio[k] / 2.0 <= 1.0) { + v->display_pipe_line_delivery_time_chroma[k] = v->swath_width_y[k] / 2.0 * v->dpp_per_plane[k] / (v->h_ratio[k] / 2.0) / v->pixel_clock[k]; + } + else { + v->display_pipe_line_delivery_time_chroma[k] = v->swath_width_y[k] / 2.0 / v->pscl_throughput_chroma[k] / v->dppclk; + } + v->data_fabric_line_delivery_time_chroma = v->swath_width_y[k] / 2.0 * v->swath_height_c[k] *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / (v->return_bw * v->read_bandwidth_plane_chroma[k] / v->dpp_per_plane[k] / v->total_data_read_bandwidth); + v->last_pixel_of_line_extra_watermark =dcn_bw_max2(v->last_pixel_of_line_extra_watermark, v->data_fabric_line_delivery_time_chroma - v->display_pipe_line_delivery_time_chroma[k]); + } + } + v->urgent_extra_latency = v->urgent_round_trip_and_out_of_order_latency + (v->total_active_dpp * v->pixel_chunk_size_in_kbyte + v->total_dcc_active_dpp * v->meta_chunk_size) * 1024.0 / v->return_bw; + if (v->pte_enable == dcn_bw_yes) { + v->urgent_extra_latency = v->urgent_extra_latency + v->total_active_dpp * v->pte_chunk_size * 1024.0 / v->return_bw; + } + v->urgent_watermark = v->urgent_latency + v->last_pixel_of_line_extra_watermark + v->urgent_extra_latency; + v->ptemeta_urgent_watermark = v->urgent_watermark + 2.0 * v->urgent_latency; + /*nb p-state/dram clock change watermark*/ + + v->dram_clock_change_watermark = v->dram_clock_change_latency + v->urgent_watermark; + v->total_active_writeback = 0.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->output[k] == dcn_bw_writeback) { + v->total_active_writeback = v->total_active_writeback + 1.0; + } + } + if (v->total_active_writeback <= 1.0) { + v->writeback_dram_clock_change_watermark = v->dram_clock_change_latency + v->write_back_latency; + } + else { + v->writeback_dram_clock_change_watermark = v->dram_clock_change_latency + v->write_back_latency + v->writeback_chunk_size * 1024.0 / 32.0 / v->socclk; + } + /*stutter efficiency*/ + + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + v->lines_in_dety[k] = v->det_buffer_size_y[k] / v->byte_per_pixel_dety[k] / v->swath_width_y[k]; + v->lines_in_dety_rounded_down_to_swath[k] =dcn_bw_floor2(v->lines_in_dety[k], v->swath_height_y[k]); + v->full_det_buffering_time_y[k] = v->lines_in_dety_rounded_down_to_swath[k] * (v->htotal[k] / v->pixel_clock[k]) / v->v_ratio[k]; + if (v->byte_per_pixel_detc[k] > 0.0) { + v->lines_in_detc[k] = v->det_buffer_size_c[k] / v->byte_per_pixel_detc[k] / (v->swath_width_y[k] / 2.0); + v->lines_in_detc_rounded_down_to_swath[k] =dcn_bw_floor2(v->lines_in_detc[k], v->swath_height_c[k]); + v->full_det_buffering_time_c[k] = v->lines_in_detc_rounded_down_to_swath[k] * (v->htotal[k] / v->pixel_clock[k]) / (v->v_ratio[k] / 2.0); + } + else { + v->lines_in_detc[k] = 0.0; + v->lines_in_detc_rounded_down_to_swath[k] = 0.0; + v->full_det_buffering_time_c[k] = 999999.0; + } + } + v->min_full_det_buffering_time = 999999.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->full_det_buffering_time_y[k] < v->min_full_det_buffering_time) { + v->min_full_det_buffering_time = v->full_det_buffering_time_y[k]; + v->frame_time_for_min_full_det_buffering_time = v->vtotal[k] * v->htotal[k] / v->pixel_clock[k]; + } + if (v->full_det_buffering_time_c[k] < v->min_full_det_buffering_time) { + v->min_full_det_buffering_time = v->full_det_buffering_time_c[k]; + v->frame_time_for_min_full_det_buffering_time = v->vtotal[k] * v->htotal[k] / v->pixel_clock[k]; + } + } + v->average_read_bandwidth_gbyte_per_second = 0.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->dcc_enable[k] == dcn_bw_yes) { + v->average_read_bandwidth_gbyte_per_second = v->average_read_bandwidth_gbyte_per_second + v->read_bandwidth_plane_luma[k] / v->dcc_rate[k] / 1000.0 + v->read_bandwidth_plane_chroma[k] / v->dcc_rate[k] / 1000.0; + } + else { + v->average_read_bandwidth_gbyte_per_second = v->average_read_bandwidth_gbyte_per_second + v->read_bandwidth_plane_luma[k] / 1000.0 + v->read_bandwidth_plane_chroma[k] / 1000.0; + } + if (v->dcc_enable[k] == dcn_bw_yes) { + v->average_read_bandwidth_gbyte_per_second = v->average_read_bandwidth_gbyte_per_second + v->read_bandwidth_plane_luma[k] / 1000.0 / 256.0 + v->read_bandwidth_plane_chroma[k] / 1000.0 / 256.0; + } + if (v->pte_enable == dcn_bw_yes) { + v->average_read_bandwidth_gbyte_per_second = v->average_read_bandwidth_gbyte_per_second + v->read_bandwidth_plane_luma[k] / 1000.0 / 512.0 + v->read_bandwidth_plane_chroma[k] / 1000.0 / 512.0; + } + } + v->part_of_burst_that_fits_in_rob =dcn_bw_min2(v->min_full_det_buffering_time * v->total_data_read_bandwidth, v->rob_buffer_size_in_kbyte * 1024.0 * v->total_data_read_bandwidth / (v->average_read_bandwidth_gbyte_per_second * 1000.0)); + v->stutter_burst_time = v->part_of_burst_that_fits_in_rob * (v->average_read_bandwidth_gbyte_per_second * 1000.0) / v->total_data_read_bandwidth / v->return_bw + (v->min_full_det_buffering_time * v->total_data_read_bandwidth - v->part_of_burst_that_fits_in_rob) / (v->dcfclk * 64.0); + if (v->total_active_writeback == 0.0) { + v->stutter_efficiency_not_including_vblank = (1.0 - (v->sr_exit_time + v->stutter_burst_time) / v->min_full_det_buffering_time) * 100.0; + } + else { + v->stutter_efficiency_not_including_vblank = 0.0; + } + v->smallest_vblank = 999999.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->synchronized_vblank == dcn_bw_yes || v->number_of_active_planes == 1) { + v->v_blank_time = (v->vtotal[k] - v->vactive[k]) * v->htotal[k] / v->pixel_clock[k]; + } + else { + v->v_blank_time = 0.0; + } + v->smallest_vblank =dcn_bw_min2(v->smallest_vblank, v->v_blank_time); + } + v->stutter_efficiency = (v->stutter_efficiency_not_including_vblank / 100.0 * (v->frame_time_for_min_full_det_buffering_time - v->smallest_vblank) + v->smallest_vblank) / v->frame_time_for_min_full_det_buffering_time * 100.0; + /*dcfclk deep sleep*/ + + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->byte_per_pixel_detc[k] > 0.0) { + v->dcfclk_deep_sleep_per_plane[k] =dcn_bw_max2(1.1 * v->swath_width_y[k] *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / 32.0 / v->display_pipe_line_delivery_time_luma[k], 1.1 * v->swath_width_y[k] / 2.0 *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / 32.0 / v->display_pipe_line_delivery_time_chroma[k]); + } + else { + v->dcfclk_deep_sleep_per_plane[k] = 1.1 * v->swath_width_y[k] *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / 64.0 / v->display_pipe_line_delivery_time_luma[k]; + } + v->dcfclk_deep_sleep_per_plane[k] =dcn_bw_max2(v->dcfclk_deep_sleep_per_plane[k], v->pixel_clock[k] / 16.0); + } + v->dcf_clk_deep_sleep = 8.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + v->dcf_clk_deep_sleep =dcn_bw_max2(v->dcf_clk_deep_sleep, v->dcfclk_deep_sleep_per_plane[k]); + } + /*stutter watermark*/ + + v->stutter_exit_watermark = v->sr_exit_time + v->last_pixel_of_line_extra_watermark + v->urgent_extra_latency + 10.0 / v->dcf_clk_deep_sleep; + v->stutter_enter_plus_exit_watermark = v->sr_enter_plus_exit_time + v->last_pixel_of_line_extra_watermark + v->urgent_extra_latency; + /*urgent latency supported*/ + + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + v->effective_det_plus_lb_lines_luma =dcn_bw_floor2(v->lines_in_dety[k] +dcn_bw_min2(v->lines_in_dety[k] * v->dppclk * v->byte_per_pixel_dety[k] * v->pscl_throughput[k] / (v->return_bw / v->dpp_per_plane[k]), v->effective_lb_latency_hiding_source_lines_luma), v->swath_height_y[k]); + v->urgent_latency_support_us_luma = v->effective_det_plus_lb_lines_luma * (v->htotal[k] / v->pixel_clock[k]) / v->v_ratio[k] - v->effective_det_plus_lb_lines_luma * v->swath_width_y[k] * v->byte_per_pixel_dety[k] / (v->return_bw / v->dpp_per_plane[k]); + if (v->byte_per_pixel_detc[k] > 0.0) { + v->effective_det_plus_lb_lines_chroma =dcn_bw_floor2(v->lines_in_detc[k] +dcn_bw_min2(v->lines_in_detc[k] * v->dppclk * v->byte_per_pixel_detc[k] * v->pscl_throughput_chroma[k] / (v->return_bw / v->dpp_per_plane[k]), v->effective_lb_latency_hiding_source_lines_chroma), v->swath_height_c[k]); + v->urgent_latency_support_us_chroma = v->effective_det_plus_lb_lines_chroma * (v->htotal[k] / v->pixel_clock[k]) / (v->v_ratio[k] / 2.0) - v->effective_det_plus_lb_lines_chroma * (v->swath_width_y[k] / 2.0) * v->byte_per_pixel_detc[k] / (v->return_bw / v->dpp_per_plane[k]); + v->urgent_latency_support_us[k] =dcn_bw_min2(v->urgent_latency_support_us_luma, v->urgent_latency_support_us_chroma); + } + else { + v->urgent_latency_support_us[k] = v->urgent_latency_support_us_luma; + } + } + v->min_urgent_latency_support_us = 999999.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + v->min_urgent_latency_support_us =dcn_bw_min2(v->min_urgent_latency_support_us, v->urgent_latency_support_us[k]); + } + /*non-urgent latency tolerance*/ + + v->non_urgent_latency_tolerance = v->min_urgent_latency_support_us - v->urgent_watermark; + /*prefetch*/ + + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if ((v->source_pixel_format[k] == dcn_bw_rgb_sub_64 || v->source_pixel_format[k] == dcn_bw_rgb_sub_32 || v->source_pixel_format[k] == dcn_bw_rgb_sub_16)) { + if (v->source_surface_mode[k] == dcn_bw_sw_linear) { + v->block_height256_bytes_y = 1.0; + } + else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) { + v->block_height256_bytes_y = 4.0; + } + else { + v->block_height256_bytes_y = 8.0; + } + v->block_height256_bytes_c = 0.0; + } + else { + if (v->source_surface_mode[k] == dcn_bw_sw_linear) { + v->block_height256_bytes_y = 1.0; + v->block_height256_bytes_c = 1.0; + } + else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) { + v->block_height256_bytes_y = 16.0; + v->block_height256_bytes_c = 8.0; + } + else { + v->block_height256_bytes_y = 8.0; + v->block_height256_bytes_c = 8.0; + } + } + if (v->dcc_enable[k] == dcn_bw_yes) { + v->meta_request_width_y = 64.0 * 256.0 /dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / (8.0 * v->block_height256_bytes_y); + v->meta_surf_width_y =dcn_bw_ceil2(v->swath_width_y[k] - 1.0, v->meta_request_width_y) + v->meta_request_width_y; + v->meta_surf_height_y =dcn_bw_ceil2(v->viewport_height[k] - 1.0, 8.0 * v->block_height256_bytes_y) + 8.0 * v->block_height256_bytes_y; + if (v->pte_enable == dcn_bw_yes) { + v->meta_pte_bytes_frame_y = (dcn_bw_ceil2((v->meta_surf_width_y * v->meta_surf_height_y *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / 256.0 - 4096.0) / 8.0 / 4096.0, 1.0) + 1) * 64.0; + } + else { + v->meta_pte_bytes_frame_y = 0.0; + } + if (v->source_scan[k] == dcn_bw_hor) { + v->meta_row_byte_y = v->meta_surf_width_y * 8.0 * v->block_height256_bytes_y *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / 256.0; + } + else { + v->meta_row_byte_y = v->meta_surf_height_y * v->meta_request_width_y *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / 256.0; + } + } + else { + v->meta_pte_bytes_frame_y = 0.0; + v->meta_row_byte_y = 0.0; + } + if (v->pte_enable == dcn_bw_yes) { + if (v->source_surface_mode[k] == dcn_bw_sw_linear) { + v->macro_tile_size_byte_y = 256.0; + v->macro_tile_height_y = 1.0; + } + else if (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d_x) { + v->macro_tile_size_byte_y = 4096.0; + v->macro_tile_height_y = 4.0 * v->block_height256_bytes_y; + } + else if (v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_x) { + v->macro_tile_size_byte_y = 64.0 * 1024; + v->macro_tile_height_y = 16.0 * v->block_height256_bytes_y; + } + else { + v->macro_tile_size_byte_y = 256.0 * 1024; + v->macro_tile_height_y = 32.0 * v->block_height256_bytes_y; + } + if (v->macro_tile_size_byte_y <= 65536.0) { + v->pixel_pte_req_height_y = v->macro_tile_height_y; + } + else { + v->pixel_pte_req_height_y = 16.0 * v->block_height256_bytes_y; + } + v->pixel_pte_req_width_y = 4096.0 /dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / v->pixel_pte_req_height_y * 8; + if (v->source_surface_mode[k] == dcn_bw_sw_linear) { + v->pixel_pte_bytes_per_row_y = 64.0 * (dcn_bw_ceil2((v->swath_width_y[k] *dcn_bw_min2(128.0, dcn_bw_pow(2.0,dcn_bw_floor2(dcn_bw_log(v->pte_buffer_size_in_requests * v->pixel_pte_req_width_y / v->swath_width_y[k], 2.0), 1.0))) - 1.0) / v->pixel_pte_req_width_y, 1.0) + 1); + } + else if (v->source_scan[k] == dcn_bw_hor) { + v->pixel_pte_bytes_per_row_y = 64.0 * (dcn_bw_ceil2((v->swath_width_y[k] - 1.0) / v->pixel_pte_req_width_y, 1.0) + 1); + } + else { + v->pixel_pte_bytes_per_row_y = 64.0 * (dcn_bw_ceil2((v->viewport_height[k] - 1.0) / v->pixel_pte_req_height_y, 1.0) + 1); + } + } + else { + v->pixel_pte_bytes_per_row_y = 0.0; + } + if ((v->source_pixel_format[k] != dcn_bw_rgb_sub_64 && v->source_pixel_format[k] != dcn_bw_rgb_sub_32 && v->source_pixel_format[k] != dcn_bw_rgb_sub_16)) { + if (v->dcc_enable[k] == dcn_bw_yes) { + v->meta_request_width_c = 64.0 * 256.0 /dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / (8.0 * v->block_height256_bytes_c); + v->meta_surf_width_c =dcn_bw_ceil2(v->swath_width_y[k] / 2.0 - 1.0, v->meta_request_width_c) + v->meta_request_width_c; + v->meta_surf_height_c =dcn_bw_ceil2(v->viewport_height[k] / 2.0 - 1.0, 8.0 * v->block_height256_bytes_c) + 8.0 * v->block_height256_bytes_c; + if (v->pte_enable == dcn_bw_yes) { + v->meta_pte_bytes_frame_c = (dcn_bw_ceil2((v->meta_surf_width_c * v->meta_surf_height_c *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / 256.0 - 4096.0) / 8.0 / 4096.0, 1.0) + 1) * 64.0; + } + else { + v->meta_pte_bytes_frame_c = 0.0; + } + if (v->source_scan[k] == dcn_bw_hor) { + v->meta_row_byte_c = v->meta_surf_width_c * 8.0 * v->block_height256_bytes_c *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / 256.0; + } + else { + v->meta_row_byte_c = v->meta_surf_height_c * v->meta_request_width_c *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / 256.0; + } + } + else { + v->meta_pte_bytes_frame_c = 0.0; + v->meta_row_byte_c = 0.0; + } + if (v->pte_enable == dcn_bw_yes) { + if (v->source_surface_mode[k] == dcn_bw_sw_linear) { + v->macro_tile_size_bytes_c = 256.0; + v->macro_tile_height_c = 1.0; + } + else if (v->source_surface_mode[k] == dcn_bw_sw_4_kb_s || v->source_surface_mode[k] == dcn_bw_sw_4_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d || v->source_surface_mode[k] == dcn_bw_sw_4_kb_d_x) { + v->macro_tile_size_bytes_c = 4096.0; + v->macro_tile_height_c = 4.0 * v->block_height256_bytes_c; + } + else if (v->source_surface_mode[k] == dcn_bw_sw_64_kb_s || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_s_x || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_t || v->source_surface_mode[k] == dcn_bw_sw_64_kb_d_x) { + v->macro_tile_size_bytes_c = 64.0 * 1024; + v->macro_tile_height_c = 16.0 * v->block_height256_bytes_c; + } + else { + v->macro_tile_size_bytes_c = 256.0 * 1024; + v->macro_tile_height_c = 32.0 * v->block_height256_bytes_c; + } + if (v->macro_tile_size_bytes_c <= 65536.0) { + v->pixel_pte_req_height_c = v->macro_tile_height_c; + } + else { + v->pixel_pte_req_height_c = 16.0 * v->block_height256_bytes_c; + } + v->pixel_pte_req_width_c = 4096.0 /dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / v->pixel_pte_req_height_c * 8; + if (v->source_surface_mode[k] == dcn_bw_sw_linear) { + v->pixel_pte_bytes_per_row_c = 64.0 * (dcn_bw_ceil2((v->swath_width_y[k] / 2.0 * dcn_bw_min2(128.0, dcn_bw_pow(2.0,dcn_bw_floor2(dcn_bw_log(v->pte_buffer_size_in_requests * v->pixel_pte_req_width_c / (v->swath_width_y[k] / 2.0), 2.0), 1.0))) - 1.0) / v->pixel_pte_req_width_c, 1.0) + 1); + } + else if (v->source_scan[k] == dcn_bw_hor) { + v->pixel_pte_bytes_per_row_c = 64.0 * (dcn_bw_ceil2((v->swath_width_y[k] / 2.0 - 1.0) / v->pixel_pte_req_width_c, 1.0) + 1); + } + else { + v->pixel_pte_bytes_per_row_c = 64.0 * (dcn_bw_ceil2((v->viewport_height[k] / 2.0 - 1.0) / v->pixel_pte_req_height_c, 1.0) + 1); + } + } + else { + v->pixel_pte_bytes_per_row_c = 0.0; + } + } + else { + v->pixel_pte_bytes_per_row_c = 0.0; + v->meta_pte_bytes_frame_c = 0.0; + v->meta_row_byte_c = 0.0; + } + v->pixel_pte_bytes_per_row[k] = v->pixel_pte_bytes_per_row_y + v->pixel_pte_bytes_per_row_c; + v->meta_pte_bytes_frame[k] = v->meta_pte_bytes_frame_y + v->meta_pte_bytes_frame_c; + v->meta_row_byte[k] = v->meta_row_byte_y + v->meta_row_byte_c; + v->v_init_pre_fill_y[k] =dcn_bw_floor2((v->v_ratio[k] + v->vtaps[k] + 1.0 + v->interlace_output[k] * 0.5 * v->v_ratio[k]) / 2.0, 1.0); + v->max_num_swath_y[k] =dcn_bw_ceil2((v->v_init_pre_fill_y[k] - 1.0) / v->swath_height_y[k], 1.0) + 1; + if (v->v_init_pre_fill_y[k] > 1.0) { + v->max_partial_swath_y =dcn_bw_mod((v->v_init_pre_fill_y[k] - 2.0), v->swath_height_y[k]); + } + else { + v->max_partial_swath_y =dcn_bw_mod((v->v_init_pre_fill_y[k] + v->swath_height_y[k] - 2.0), v->swath_height_y[k]); + } + v->max_partial_swath_y =dcn_bw_max2(1.0, v->max_partial_swath_y); + v->prefetch_source_lines_y[k] = v->max_num_swath_y[k] * v->swath_height_y[k] + v->max_partial_swath_y; + if ((v->source_pixel_format[k] != dcn_bw_rgb_sub_64 && v->source_pixel_format[k] != dcn_bw_rgb_sub_32 && v->source_pixel_format[k] != dcn_bw_rgb_sub_16)) { + v->v_init_pre_fill_c[k] =dcn_bw_floor2((v->v_ratio[k] / 2.0 + v->vtaps[k] + 1.0 + v->interlace_output[k] * 0.5 * v->v_ratio[k] / 2.0) / 2.0, 1.0); + v->max_num_swath_c[k] =dcn_bw_ceil2((v->v_init_pre_fill_c[k] - 1.0) / v->swath_height_c[k], 1.0) + 1; + if (v->v_init_pre_fill_c[k] > 1.0) { + v->max_partial_swath_c =dcn_bw_mod((v->v_init_pre_fill_c[k] - 2.0), v->swath_height_c[k]); + } + else { + v->max_partial_swath_c =dcn_bw_mod((v->v_init_pre_fill_c[k] + v->swath_height_c[k] - 2.0), v->swath_height_c[k]); + } + v->max_partial_swath_c =dcn_bw_max2(1.0, v->max_partial_swath_c); + } + else { + v->max_num_swath_c[k] = 0.0; + v->max_partial_swath_c = 0.0; + } + v->prefetch_source_lines_c[k] = v->max_num_swath_c[k] * v->swath_height_c[k] + v->max_partial_swath_c; + } + v->t_calc = 24.0 / v->dcf_clk_deep_sleep; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one == dcn_bw_yes) { + v->max_vstartup_lines[k] = v->vtotal[k] - v->vactive[k] - 1.0; + } + else { + v->max_vstartup_lines[k] = v->v_sync_plus_back_porch[k] - 1.0; + } + } + v->next_prefetch_mode = 0.0; + do { + v->v_startup_lines = 13.0; + do { + v->planes_with_room_to_increase_vstartup_prefetch_bw_less_than_active_bw = dcn_bw_yes; + v->planes_with_room_to_increase_vstartup_vratio_prefetch_more_than4 = dcn_bw_no; + v->planes_with_room_to_increase_vstartup_destination_line_times_for_prefetch_less_than2 = dcn_bw_no; + v->v_ratio_prefetch_more_than4 = dcn_bw_no; + v->destination_line_times_for_prefetch_less_than2 = dcn_bw_no; + v->prefetch_mode = v->next_prefetch_mode; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + v->dstx_after_scaler = 90.0 * v->pixel_clock[k] / v->dppclk + 42.0 * v->pixel_clock[k] / v->dispclk; + if (v->dpp_per_plane[k] > 1.0) { + v->dstx_after_scaler = v->dstx_after_scaler + v->scaler_rec_out_width[k] / 2.0; + } + if (v->output_format[k] == dcn_bw_420) { + v->dsty_after_scaler = 1.0; + } + else { + v->dsty_after_scaler = 0.0; + } + v->v_update_offset_pix[k] = dcn_bw_ceil2(v->htotal[k] / 4.0, 1.0); + v->total_repeater_delay_time = v->max_inter_dcn_tile_repeaters * (2.0 / v->dppclk + 3.0 / v->dispclk); + v->v_update_width_pix[k] = (14.0 / v->dcf_clk_deep_sleep + 12.0 / v->dppclk + v->total_repeater_delay_time) * v->pixel_clock[k]; + v->v_ready_offset_pix[k] = dcn_bw_max2(150.0 / v->dppclk, v->total_repeater_delay_time + 20.0 / v->dcf_clk_deep_sleep + 10.0 / v->dppclk) * v->pixel_clock[k]; + v->t_setup = (v->v_update_offset_pix[k] + v->v_update_width_pix[k] + v->v_ready_offset_pix[k]) / v->pixel_clock[k]; + v->v_startup[k] =dcn_bw_min2(v->v_startup_lines, v->max_vstartup_lines[k]); + if (v->prefetch_mode == 0.0) { + v->t_wait =dcn_bw_max3(v->dram_clock_change_latency + v->urgent_latency, v->sr_enter_plus_exit_time, v->urgent_latency); + } + else if (v->prefetch_mode == 1.0) { + v->t_wait =dcn_bw_max2(v->sr_enter_plus_exit_time, v->urgent_latency); + } + else { + v->t_wait = v->urgent_latency; + } + v->destination_lines_for_prefetch[k] =dcn_bw_floor2(4.0 * (v->v_startup[k] - v->t_wait / (v->htotal[k] / v->pixel_clock[k]) - (v->t_calc + v->t_setup) / (v->htotal[k] / v->pixel_clock[k]) - (v->dsty_after_scaler + v->dstx_after_scaler / v->htotal[k]) + 0.125), 1.0) / 4; + if (v->destination_lines_for_prefetch[k] > 0.0) { + v->prefetch_bandwidth[k] = (v->meta_pte_bytes_frame[k] + 2.0 * v->meta_row_byte[k] + 2.0 * v->pixel_pte_bytes_per_row[k] + v->prefetch_source_lines_y[k] * v->swath_width_y[k] *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) + v->prefetch_source_lines_c[k] * v->swath_width_y[k] / 2.0 *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0)) / (v->destination_lines_for_prefetch[k] * v->htotal[k] / v->pixel_clock[k]); + } + else { + v->prefetch_bandwidth[k] = 999999.0; + } + } + v->bandwidth_available_for_immediate_flip = v->return_bw; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + v->bandwidth_available_for_immediate_flip = v->bandwidth_available_for_immediate_flip -dcn_bw_max2(v->read_bandwidth_plane_luma[k] + v->read_bandwidth_plane_chroma[k], v->prefetch_bandwidth[k]); + } + v->tot_immediate_flip_bytes = 0.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->immediate_flip_supported == dcn_bw_yes && (v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) { + v->tot_immediate_flip_bytes = v->tot_immediate_flip_bytes + v->meta_pte_bytes_frame[k] + v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k]; + } + } + v->max_rd_bandwidth = 0.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->pte_enable == dcn_bw_yes && v->dcc_enable[k] == dcn_bw_yes) { + if (v->immediate_flip_supported == dcn_bw_yes && (v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) { + v->time_for_fetching_meta_pte =dcn_bw_max5(v->meta_pte_bytes_frame[k] / v->prefetch_bandwidth[k], v->meta_pte_bytes_frame[k] * v->tot_immediate_flip_bytes / (v->bandwidth_available_for_immediate_flip * (v->meta_pte_bytes_frame[k] + v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k])), v->urgent_extra_latency, v->urgent_latency, v->htotal[k] / v->pixel_clock[k] / 4.0); + } + else { + v->time_for_fetching_meta_pte =dcn_bw_max3(v->meta_pte_bytes_frame[k] / v->prefetch_bandwidth[k], v->urgent_extra_latency, v->htotal[k] / v->pixel_clock[k] / 4.0); + } + } + else { + v->time_for_fetching_meta_pte = v->htotal[k] / v->pixel_clock[k] / 4.0; + } + v->destination_lines_to_request_vm_inv_blank[k] =dcn_bw_floor2(4.0 * (v->time_for_fetching_meta_pte / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4; + if ((v->pte_enable == dcn_bw_yes || v->dcc_enable[k] == dcn_bw_yes)) { + if (v->immediate_flip_supported == dcn_bw_yes && (v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) { + v->time_for_fetching_row_in_vblank =dcn_bw_max5((v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k]) / v->prefetch_bandwidth[k], (v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k]) * v->tot_immediate_flip_bytes / (v->bandwidth_available_for_immediate_flip * (v->meta_pte_bytes_frame[k] + v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k])), v->urgent_extra_latency, 2.0 * v->urgent_latency, v->htotal[k] / v->pixel_clock[k] - v->time_for_fetching_meta_pte); + } + else { + v->time_for_fetching_row_in_vblank =dcn_bw_max3((v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k]) / v->prefetch_bandwidth[k], v->urgent_extra_latency, v->htotal[k] / v->pixel_clock[k] - v->time_for_fetching_meta_pte); + } + } + else { + v->time_for_fetching_row_in_vblank =dcn_bw_max2(v->urgent_extra_latency - v->time_for_fetching_meta_pte, v->htotal[k] / v->pixel_clock[k] - v->time_for_fetching_meta_pte); + } + v->destination_lines_to_request_row_in_vblank[k] =dcn_bw_floor2(4.0 * (v->time_for_fetching_row_in_vblank / (v->htotal[k] / v->pixel_clock[k]) + 0.125), 1.0) / 4; + v->lines_to_request_prefetch_pixel_data = v->destination_lines_for_prefetch[k] - v->destination_lines_to_request_vm_inv_blank[k] - v->destination_lines_to_request_row_in_vblank[k]; + if (v->lines_to_request_prefetch_pixel_data > 0.0) { + v->v_ratio_prefetch_y[k] = v->prefetch_source_lines_y[k] / v->lines_to_request_prefetch_pixel_data; + if ((v->swath_height_y[k] > 4.0)) { + if (v->lines_to_request_prefetch_pixel_data > (v->v_init_pre_fill_y[k] - 3.0) / 2.0) { + v->v_ratio_prefetch_y[k] =dcn_bw_max2(v->v_ratio_prefetch_y[k], v->max_num_swath_y[k] * v->swath_height_y[k] / (v->lines_to_request_prefetch_pixel_data - (v->v_init_pre_fill_y[k] - 3.0) / 2.0)); + } + else { + v->v_ratio_prefetch_y[k] = 999999.0; + } + } + } + else { + v->v_ratio_prefetch_y[k] = 999999.0; + } + v->v_ratio_prefetch_y[k] =dcn_bw_max2(v->v_ratio_prefetch_y[k], 1.0); + if (v->lines_to_request_prefetch_pixel_data > 0.0) { + v->v_ratio_prefetch_c[k] = v->prefetch_source_lines_c[k] / v->lines_to_request_prefetch_pixel_data; + if ((v->swath_height_c[k] > 4.0)) { + if (v->lines_to_request_prefetch_pixel_data > (v->v_init_pre_fill_c[k] - 3.0) / 2.0) { + v->v_ratio_prefetch_c[k] =dcn_bw_max2(v->v_ratio_prefetch_c[k], v->max_num_swath_c[k] * v->swath_height_c[k] / (v->lines_to_request_prefetch_pixel_data - (v->v_init_pre_fill_c[k] - 3.0) / 2.0)); + } + else { + v->v_ratio_prefetch_c[k] = 999999.0; + } + } + } + else { + v->v_ratio_prefetch_c[k] = 999999.0; + } + v->v_ratio_prefetch_c[k] =dcn_bw_max2(v->v_ratio_prefetch_c[k], 1.0); + if (v->lines_to_request_prefetch_pixel_data > 0.0) { + v->required_prefetch_pix_data_bw = v->dpp_per_plane[k] * (v->prefetch_source_lines_y[k] / v->lines_to_request_prefetch_pixel_data *dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) + v->prefetch_source_lines_c[k] / v->lines_to_request_prefetch_pixel_data *dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / 2.0) * v->swath_width_y[k] / (v->htotal[k] / v->pixel_clock[k]); + } + else { + v->required_prefetch_pix_data_bw = 999999.0; + } + v->max_rd_bandwidth = v->max_rd_bandwidth +dcn_bw_max2(v->read_bandwidth_plane_luma[k] + v->read_bandwidth_plane_chroma[k], v->required_prefetch_pix_data_bw); + if (v->immediate_flip_supported == dcn_bw_yes && (v->source_pixel_format[k] != dcn_bw_yuv420_sub_8 && v->source_pixel_format[k] != dcn_bw_yuv420_sub_10)) { + v->max_rd_bandwidth = v->max_rd_bandwidth +dcn_bw_max2(v->meta_pte_bytes_frame[k] / (v->destination_lines_to_request_vm_inv_blank[k] * v->htotal[k] / v->pixel_clock[k]), (v->meta_row_byte[k] + v->pixel_pte_bytes_per_row[k]) / (v->destination_lines_to_request_row_in_vblank[k] * v->htotal[k] / v->pixel_clock[k])); + } + if (v->v_ratio_prefetch_y[k] > 4.0 || v->v_ratio_prefetch_c[k] > 4.0) { + v->v_ratio_prefetch_more_than4 = dcn_bw_yes; + } + if (v->destination_lines_for_prefetch[k] < 2.0) { + v->destination_line_times_for_prefetch_less_than2 = dcn_bw_yes; + } + if (v->max_vstartup_lines[k] > v->v_startup_lines) { + if (v->required_prefetch_pix_data_bw > (v->read_bandwidth_plane_luma[k] + v->read_bandwidth_plane_chroma[k])) { + v->planes_with_room_to_increase_vstartup_prefetch_bw_less_than_active_bw = dcn_bw_no; + } + if (v->v_ratio_prefetch_y[k] > 4.0 || v->v_ratio_prefetch_c[k] > 4.0) { + v->planes_with_room_to_increase_vstartup_vratio_prefetch_more_than4 = dcn_bw_yes; + } + if (v->destination_lines_for_prefetch[k] < 2.0) { + v->planes_with_room_to_increase_vstartup_destination_line_times_for_prefetch_less_than2 = dcn_bw_yes; + } + } + } + if (v->max_rd_bandwidth <= v->return_bw && v->v_ratio_prefetch_more_than4 == dcn_bw_no && v->destination_line_times_for_prefetch_less_than2 == dcn_bw_no) { + v->prefetch_mode_supported = dcn_bw_yes; + } + else { + v->prefetch_mode_supported = dcn_bw_no; + } + v->v_startup_lines = v->v_startup_lines + 1.0; + } while (!(v->prefetch_mode_supported == dcn_bw_yes || (v->planes_with_room_to_increase_vstartup_prefetch_bw_less_than_active_bw == dcn_bw_yes && v->planes_with_room_to_increase_vstartup_vratio_prefetch_more_than4 == dcn_bw_no && v->planes_with_room_to_increase_vstartup_destination_line_times_for_prefetch_less_than2 == dcn_bw_no))); + v->next_prefetch_mode = v->next_prefetch_mode + 1.0; + } while (!(v->prefetch_mode_supported == dcn_bw_yes || v->prefetch_mode == 2.0)); + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->v_ratio_prefetch_y[k] <= 1.0) { + v->display_pipe_line_delivery_time_luma_prefetch[k] = v->swath_width_y[k] * v->dpp_per_plane[k] / v->h_ratio[k] / v->pixel_clock[k]; + } + else { + v->display_pipe_line_delivery_time_luma_prefetch[k] = v->swath_width_y[k] / v->pscl_throughput[k] / v->dppclk; + } + if (v->byte_per_pixel_detc[k] == 0.0) { + v->display_pipe_line_delivery_time_chroma_prefetch[k] = 0.0; + } + else { + if (v->v_ratio_prefetch_c[k] <= 1.0) { + v->display_pipe_line_delivery_time_chroma_prefetch[k] = v->swath_width_y[k] * v->dpp_per_plane[k] / v->h_ratio[k] / v->pixel_clock[k]; + } + else { + v->display_pipe_line_delivery_time_chroma_prefetch[k] = v->swath_width_y[k] / v->pscl_throughput[k] / v->dppclk; + } + } + } + /*min ttuv_blank*/ + + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->prefetch_mode == 0.0) { + v->allow_dram_clock_change_during_vblank[k] = dcn_bw_yes; + v->allow_dram_self_refresh_during_vblank[k] = dcn_bw_yes; + v->min_ttuv_blank[k] = v->t_calc +dcn_bw_max3(v->dram_clock_change_watermark, v->stutter_enter_plus_exit_watermark, v->urgent_watermark); + } + else if (v->prefetch_mode == 1.0) { + v->allow_dram_clock_change_during_vblank[k] = dcn_bw_no; + v->allow_dram_self_refresh_during_vblank[k] = dcn_bw_yes; + v->min_ttuv_blank[k] = v->t_calc +dcn_bw_max2(v->stutter_enter_plus_exit_watermark, v->urgent_watermark); + } + else { + v->allow_dram_clock_change_during_vblank[k] = dcn_bw_no; + v->allow_dram_self_refresh_during_vblank[k] = dcn_bw_no; + v->min_ttuv_blank[k] = v->t_calc + v->urgent_watermark; + } + } + /*nb p-state/dram clock change support*/ + + v->active_dp_ps = 0.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + v->active_dp_ps = v->active_dp_ps + v->dpp_per_plane[k]; + } + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + v->lb_latency_hiding_source_lines_y =dcn_bw_min2(v->max_line_buffer_lines,dcn_bw_floor2(v->line_buffer_size / v->lb_bit_per_pixel[k] / (v->swath_width_y[k] /dcn_bw_max2(v->h_ratio[k], 1.0)), 1.0)) - (v->vtaps[k] - 1.0); + v->lb_latency_hiding_source_lines_c =dcn_bw_min2(v->max_line_buffer_lines,dcn_bw_floor2(v->line_buffer_size / v->lb_bit_per_pixel[k] / (v->swath_width_y[k] / 2.0 /dcn_bw_max2(v->h_ratio[k] / 2.0, 1.0)), 1.0)) - (v->vta_pschroma[k] - 1.0); + v->effective_lb_latency_hiding_y = v->lb_latency_hiding_source_lines_y / v->v_ratio[k] * (v->htotal[k] / v->pixel_clock[k]); + v->effective_lb_latency_hiding_c = v->lb_latency_hiding_source_lines_c / (v->v_ratio[k] / 2.0) * (v->htotal[k] / v->pixel_clock[k]); + if (v->swath_width_y[k] > 2.0 * v->dpp_output_buffer_pixels) { + v->dpp_output_buffer_lines_y = v->dpp_output_buffer_pixels / v->swath_width_y[k]; + } + else if (v->swath_width_y[k] > v->dpp_output_buffer_pixels) { + v->dpp_output_buffer_lines_y = 0.5; + } + else { + v->dpp_output_buffer_lines_y = 1.0; + } + if (v->swath_width_y[k] / 2.0 > 2.0 * v->dpp_output_buffer_pixels) { + v->dpp_output_buffer_lines_c = v->dpp_output_buffer_pixels / (v->swath_width_y[k] / 2.0); + } + else if (v->swath_width_y[k] / 2.0 > v->dpp_output_buffer_pixels) { + v->dpp_output_buffer_lines_c = 0.5; + } + else { + v->dpp_output_buffer_lines_c = 1.0; + } + v->dppopp_buffering_y = (v->htotal[k] / v->pixel_clock[k]) * (v->dpp_output_buffer_lines_y + v->opp_output_buffer_lines); + v->max_det_buffering_time_y = v->full_det_buffering_time_y[k] + (v->lines_in_dety[k] - v->lines_in_dety_rounded_down_to_swath[k]) / v->swath_height_y[k] * (v->htotal[k] / v->pixel_clock[k]); + v->active_dram_clock_change_latency_margin_y = v->dppopp_buffering_y + v->effective_lb_latency_hiding_y + v->max_det_buffering_time_y - v->dram_clock_change_watermark; + if (v->active_dp_ps > 1.0) { + v->active_dram_clock_change_latency_margin_y = v->active_dram_clock_change_latency_margin_y - (1.0 - 1.0 / (v->active_dp_ps - 1.0)) * v->swath_height_y[k] * (v->htotal[k] / v->pixel_clock[k]); + } + if (v->byte_per_pixel_detc[k] > 0.0) { + v->dppopp_buffering_c = (v->htotal[k] / v->pixel_clock[k]) * (v->dpp_output_buffer_lines_c + v->opp_output_buffer_lines); + v->max_det_buffering_time_c = v->full_det_buffering_time_c[k] + (v->lines_in_detc[k] - v->lines_in_detc_rounded_down_to_swath[k]) / v->swath_height_c[k] * (v->htotal[k] / v->pixel_clock[k]); + v->active_dram_clock_change_latency_margin_c = v->dppopp_buffering_c + v->effective_lb_latency_hiding_c + v->max_det_buffering_time_c - v->dram_clock_change_watermark; + if (v->active_dp_ps > 1.0) { + v->active_dram_clock_change_latency_margin_c = v->active_dram_clock_change_latency_margin_c - (1.0 - 1.0 / (v->active_dp_ps - 1.0)) * v->swath_height_c[k] * (v->htotal[k] / v->pixel_clock[k]); + } + v->active_dram_clock_change_latency_margin[k] =dcn_bw_min2(v->active_dram_clock_change_latency_margin_y, v->active_dram_clock_change_latency_margin_c); + } + else { + v->active_dram_clock_change_latency_margin[k] = v->active_dram_clock_change_latency_margin_y; + } + if (v->output_format[k] == dcn_bw_444) { + v->writeback_dram_clock_change_latency_margin = (v->writeback_luma_buffer_size + v->writeback_chroma_buffer_size) * 1024.0 / (v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) * 4.0) - v->writeback_dram_clock_change_watermark; + } + else { + v->writeback_dram_clock_change_latency_margin =dcn_bw_min2(v->writeback_luma_buffer_size, 2.0 * v->writeback_chroma_buffer_size) * 1024.0 / (v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k])) - v->writeback_dram_clock_change_watermark; + } + if (v->output[k] == dcn_bw_writeback) { + v->active_dram_clock_change_latency_margin[k] =dcn_bw_min2(v->active_dram_clock_change_latency_margin[k], v->writeback_dram_clock_change_latency_margin); + } + } + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->allow_dram_clock_change_during_vblank[k] == dcn_bw_yes) { + v->v_blank_dram_clock_change_latency_margin[k] = (v->vtotal[k] - v->scaler_recout_height[k]) * (v->htotal[k] / v->pixel_clock[k]) -dcn_bw_max2(v->dram_clock_change_watermark, v->writeback_dram_clock_change_watermark); + } + else { + v->v_blank_dram_clock_change_latency_margin[k] = 0.0; + } + } + v->min_active_dram_clock_change_margin = 999999.0; + v->v_blank_of_min_active_dram_clock_change_margin = 999999.0; + v->second_min_active_dram_clock_change_margin = 999999.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->active_dram_clock_change_latency_margin[k] < v->min_active_dram_clock_change_margin) { + v->second_min_active_dram_clock_change_margin = v->min_active_dram_clock_change_margin; + v->min_active_dram_clock_change_margin = v->active_dram_clock_change_latency_margin[k]; + v->v_blank_of_min_active_dram_clock_change_margin = v->v_blank_dram_clock_change_latency_margin[k]; + } + else if (v->active_dram_clock_change_latency_margin[k] < v->second_min_active_dram_clock_change_margin) { + v->second_min_active_dram_clock_change_margin = v->active_dram_clock_change_latency_margin[k]; + } + } + v->min_vblank_dram_clock_change_margin = 999999.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->min_vblank_dram_clock_change_margin > v->v_blank_dram_clock_change_latency_margin[k]) { + v->min_vblank_dram_clock_change_margin = v->v_blank_dram_clock_change_latency_margin[k]; + } + } + if (v->synchronized_vblank == dcn_bw_yes || v->number_of_active_planes == 1) { + v->dram_clock_change_margin =dcn_bw_max2(v->min_active_dram_clock_change_margin, v->min_vblank_dram_clock_change_margin); + } + else if (v->v_blank_of_min_active_dram_clock_change_margin > v->min_active_dram_clock_change_margin) { + v->dram_clock_change_margin =dcn_bw_min2(v->second_min_active_dram_clock_change_margin, v->v_blank_of_min_active_dram_clock_change_margin); + } + else { + v->dram_clock_change_margin = v->min_active_dram_clock_change_margin; + } + if (v->min_active_dram_clock_change_margin > 0.0) { + v->dram_clock_change_support = dcn_bw_supported_in_v_active; + } + else if (v->dram_clock_change_margin > 0.0) { + v->dram_clock_change_support = dcn_bw_supported_in_v_blank; + } + else { + v->dram_clock_change_support = dcn_bw_not_supported; + } + /*maximum bandwidth used*/ + + v->wr_bandwidth = 0.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->output[k] == dcn_bw_writeback && v->output_format[k] == dcn_bw_444) { + v->wr_bandwidth = v->wr_bandwidth + v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) * 4.0; + } + else if (v->output[k] == dcn_bw_writeback) { + v->wr_bandwidth = v->wr_bandwidth + v->scaler_rec_out_width[k] / (v->htotal[k] / v->pixel_clock[k]) * 1.5; + } + } + v->max_used_bw = v->max_rd_bandwidth + v->wr_bandwidth; +} diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_auto.h b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_auto.h new file mode 100644 index 000000000000..ce35de79a6c7 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_auto.h @@ -0,0 +1,38 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#ifndef _DCN_CALC_AUTO_H_ +#define _DCN_CALC_AUTO_H_ + +#include "dc.h" +#include "dcn_calcs.h" + +void scaler_settings_calculation(struct dcn_bw_internal_vars *v); +void mode_support_and_system_configuration(struct dcn_bw_internal_vars *v); +void display_pipe_configuration(struct dcn_bw_internal_vars *v); +void dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation( + struct dcn_bw_internal_vars *v); + +#endif /* _DCN_CALC_AUTO_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_math.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_math.c new file mode 100644 index 000000000000..07d18e78de49 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calc_math.c @@ -0,0 +1,147 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dcn_calc_math.h" + +#define isNaN(number) ((number) != (number)) + +/* + * NOTE: + * This file is gcc-parseable HW gospel, coming straight from HW engineers. + * + * It doesn't adhere to Linux kernel style and sometimes will do things in odd + * ways. Unless there is something clearly wrong with it the code should + * remain as-is as it provides us with a guarantee from HW that it is correct. + */ + +float dcn_bw_mod(const float arg1, const float arg2) +{ + if (isNaN(arg1)) + return arg2; + if (isNaN(arg2)) + return arg1; + return arg1 - arg1 * ((int) (arg1 / arg2)); +} + +float dcn_bw_min2(const float arg1, const float arg2) +{ + if (isNaN(arg1)) + return arg2; + if (isNaN(arg2)) + return arg1; + return arg1 < arg2 ? arg1 : arg2; +} + +unsigned int dcn_bw_max(const unsigned int arg1, const unsigned int arg2) +{ + return arg1 > arg2 ? arg1 : arg2; +} +float dcn_bw_max2(const float arg1, const float arg2) +{ + if (isNaN(arg1)) + return arg2; + if (isNaN(arg2)) + return arg1; + return arg1 > arg2 ? arg1 : arg2; +} + +float dcn_bw_floor2(const float arg, const float significance) +{ + if (significance == 0) + return 0; + return ((int) (arg / significance)) * significance; +} +float dcn_bw_floor(const float arg) +{ + return ((int) (arg)); +} + +float dcn_bw_ceil(const float arg) +{ + float flr = dcn_bw_floor2(arg, 1); + + return flr + 0.00001 >= arg ? arg : flr + 1; +} + +float dcn_bw_ceil2(const float arg, const float significance) +{ + float flr = dcn_bw_floor2(arg, significance); + if (significance == 0) + return 0; + return flr + 0.00001 >= arg ? arg : flr + significance; +} + +float dcn_bw_max3(float v1, float v2, float v3) +{ + return v3 > dcn_bw_max2(v1, v2) ? v3 : dcn_bw_max2(v1, v2); +} + +float dcn_bw_max5(float v1, float v2, float v3, float v4, float v5) +{ + return dcn_bw_max3(v1, v2, v3) > dcn_bw_max2(v4, v5) ? dcn_bw_max3(v1, v2, v3) : dcn_bw_max2(v4, v5); +} + +float dcn_bw_pow(float a, float exp) +{ + float temp; + /*ASSERT(exp == (int)exp);*/ + if ((int)exp == 0) + return 1; + temp = dcn_bw_pow(a, (int)(exp / 2)); + if (((int)exp % 2) == 0) { + return temp * temp; + } else { + if ((int)exp > 0) + return a * temp * temp; + else + return (temp * temp) / a; + } +} + +double dcn_bw_fabs(double a) +{ + if (a > 0) + return (a); + else + return (-a); +} + + +float dcn_bw_log(float a, float b) +{ + int * const exp_ptr = (int *)(&a); + int x = *exp_ptr; + const int log_2 = ((x >> 23) & 255) - 128; + x &= ~(255 << 23); + x += 127 << 23; + *exp_ptr = x; + + a = ((-1.0f / 3) * a + 2) * a - 2.0f / 3; + + if (b > 2.00001 || b < 1.99999) + return (a + log_2) / dcn_bw_log(b, 2); + else + return (a + log_2); +} diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c new file mode 100644 index 000000000000..ec19678a0702 --- /dev/null +++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c @@ -0,0 +1,1808 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * Copyright 2019 Raptor Engineering, LLC + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: AMD + * + */ + +#include "dm_services.h" +#include "dc.h" +#include "dcn_calcs.h" +#include "dcn_calc_auto.h" +#include "dal_asic_id.h" +#include "resource.h" +#include "dcn10/dcn10_resource.h" +#include "dcn10/dcn10_hubbub.h" +#include "dml/dml1_display_rq_dlg_calc.h" + +#include "dcn_calc_math.h" + +#define DC_LOGGER \ + dc->ctx->logger + +#define WM_SET_COUNT 4 +#define WM_A 0 +#define WM_B 1 +#define WM_C 2 +#define WM_D 3 + +/* + * NOTE: + * This file is gcc-parseable HW gospel, coming straight from HW engineers. + * + * It doesn't adhere to Linux kernel style and sometimes will do things in odd + * ways. Unless there is something clearly wrong with it the code should + * remain as-is as it provides us with a guarantee from HW that it is correct. + */ + +/* Defaults from spreadsheet rev#247. + * RV2 delta: dram_clock_change_latency, max_num_dpp + */ +const struct dcn_soc_bounding_box dcn10_soc_defaults = { + /* latencies */ + .sr_exit_time = 17, /*us*/ + .sr_enter_plus_exit_time = 19, /*us*/ + .urgent_latency = 4, /*us*/ + .dram_clock_change_latency = 17, /*us*/ + .write_back_latency = 12, /*us*/ + .percent_of_ideal_drambw_received_after_urg_latency = 80, /*%*/ + + /* below default clocks derived from STA target base on + * slow-slow corner + 10% margin with voltages aligned to FCLK. + * + * Use these value if fused value doesn't make sense as earlier + * part don't have correct value fused */ + /* default DCF CLK DPM on RV*/ + .dcfclkv_max0p9 = 655, /* MHz, = 3600/5.5 */ + .dcfclkv_nom0p8 = 626, /* MHz, = 3600/5.75 */ + .dcfclkv_mid0p72 = 600, /* MHz, = 3600/6, bypass */ + .dcfclkv_min0p65 = 300, /* MHz, = 3600/12, bypass */ + + /* default DISP CLK voltage state on RV */ + .max_dispclk_vmax0p9 = 1108, /* MHz, = 3600/3.25 */ + .max_dispclk_vnom0p8 = 1029, /* MHz, = 3600/3.5 */ + .max_dispclk_vmid0p72 = 960, /* MHz, = 3600/3.75 */ + .max_dispclk_vmin0p65 = 626, /* MHz, = 3600/5.75 */ + + /* default DPP CLK voltage state on RV */ + .max_dppclk_vmax0p9 = 720, /* MHz, = 3600/5 */ + .max_dppclk_vnom0p8 = 686, /* MHz, = 3600/5.25 */ + .max_dppclk_vmid0p72 = 626, /* MHz, = 3600/5.75 */ + .max_dppclk_vmin0p65 = 400, /* MHz, = 3600/9 */ + + /* default PHY CLK voltage state on RV */ + .phyclkv_max0p9 = 900, /*MHz*/ + .phyclkv_nom0p8 = 847, /*MHz*/ + .phyclkv_mid0p72 = 800, /*MHz*/ + .phyclkv_min0p65 = 600, /*MHz*/ + + /* BW depend on FCLK, MCLK, # of channels */ + /* dual channel BW */ + .fabric_and_dram_bandwidth_vmax0p9 = 38.4f, /*GB/s*/ + .fabric_and_dram_bandwidth_vnom0p8 = 34.133f, /*GB/s*/ + .fabric_and_dram_bandwidth_vmid0p72 = 29.866f, /*GB/s*/ + .fabric_and_dram_bandwidth_vmin0p65 = 12.8f, /*GB/s*/ + /* single channel BW + .fabric_and_dram_bandwidth_vmax0p9 = 19.2f, + .fabric_and_dram_bandwidth_vnom0p8 = 17.066f, + .fabric_and_dram_bandwidth_vmid0p72 = 14.933f, + .fabric_and_dram_bandwidth_vmin0p65 = 12.8f, + */ + + .number_of_channels = 2, + + .socclk = 208, /*MHz*/ + .downspreading = 0.5f, /*%*/ + .round_trip_ping_latency_cycles = 128, /*DCFCLK Cycles*/ + .urgent_out_of_order_return_per_channel = 256, /*bytes*/ + .vmm_page_size = 4096, /*bytes*/ + .return_bus_width = 64, /*bytes*/ + .max_request_size = 256, /*bytes*/ + + /* Depends on user class (client vs embedded, workstation, etc) */ + .percent_disp_bw_limit = 0.3f /*%*/ +}; + +const struct dcn_ip_params dcn10_ip_defaults = { + .rob_buffer_size_in_kbyte = 64, + .det_buffer_size_in_kbyte = 164, + .dpp_output_buffer_pixels = 2560, + .opp_output_buffer_lines = 1, + .pixel_chunk_size_in_kbyte = 8, + .pte_enable = dcn_bw_yes, + .pte_chunk_size = 2, /*kbytes*/ + .meta_chunk_size = 2, /*kbytes*/ + .writeback_chunk_size = 2, /*kbytes*/ + .odm_capability = dcn_bw_no, + .dsc_capability = dcn_bw_no, + .line_buffer_size = 589824, /*bit*/ + .max_line_buffer_lines = 12, + .is_line_buffer_bpp_fixed = dcn_bw_no, + .line_buffer_fixed_bpp = dcn_bw_na, + .writeback_luma_buffer_size = 12, /*kbytes*/ + .writeback_chroma_buffer_size = 8, /*kbytes*/ + .max_num_dpp = 4, + .max_num_writeback = 2, + .max_dchub_topscl_throughput = 4, /*pixels/dppclk*/ + .max_pscl_tolb_throughput = 2, /*pixels/dppclk*/ + .max_lb_tovscl_throughput = 4, /*pixels/dppclk*/ + .max_vscl_tohscl_throughput = 4, /*pixels/dppclk*/ + .max_hscl_ratio = 4, + .max_vscl_ratio = 4, + .max_hscl_taps = 8, + .max_vscl_taps = 8, + .pte_buffer_size_in_requests = 42, + .dispclk_ramping_margin = 1, /*%*/ + .under_scan_factor = 1.11f, + .max_inter_dcn_tile_repeaters = 8, + .can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = dcn_bw_no, + .bug_forcing_luma_and_chroma_request_to_same_size_fixed = dcn_bw_no, + .dcfclk_cstate_latency = 10 /*TODO clone of something else? sr_enter_plus_exit_time?*/ +}; + +static enum dcn_bw_defs tl_sw_mode_to_bw_defs(enum swizzle_mode_values sw_mode) +{ + switch (sw_mode) { + case DC_SW_LINEAR: + return dcn_bw_sw_linear; + case DC_SW_4KB_S: + return dcn_bw_sw_4_kb_s; + case DC_SW_4KB_D: + return dcn_bw_sw_4_kb_d; + case DC_SW_64KB_S: + return dcn_bw_sw_64_kb_s; + case DC_SW_64KB_D: + return dcn_bw_sw_64_kb_d; + case DC_SW_VAR_S: + return dcn_bw_sw_var_s; + case DC_SW_VAR_D: + return dcn_bw_sw_var_d; + case DC_SW_64KB_S_T: + return dcn_bw_sw_64_kb_s_t; + case DC_SW_64KB_D_T: + return dcn_bw_sw_64_kb_d_t; + case DC_SW_4KB_S_X: + return dcn_bw_sw_4_kb_s_x; + case DC_SW_4KB_D_X: + return dcn_bw_sw_4_kb_d_x; + case DC_SW_64KB_S_X: + return dcn_bw_sw_64_kb_s_x; + case DC_SW_64KB_D_X: + return dcn_bw_sw_64_kb_d_x; + case DC_SW_VAR_S_X: + return dcn_bw_sw_var_s_x; + case DC_SW_VAR_D_X: + return dcn_bw_sw_var_d_x; + case DC_SW_256B_S: + case DC_SW_256_D: + case DC_SW_256_R: + case DC_SW_4KB_R: + case DC_SW_64KB_R: + case DC_SW_VAR_R: + case DC_SW_4KB_R_X: + case DC_SW_64KB_R_X: + case DC_SW_VAR_R_X: + default: + BREAK_TO_DEBUGGER(); /*not in formula*/ + return dcn_bw_sw_4_kb_s; + } +} + +static int tl_lb_bpp_to_int(enum lb_pixel_depth depth) +{ + switch (depth) { + case LB_PIXEL_DEPTH_18BPP: + return 18; + case LB_PIXEL_DEPTH_24BPP: + return 24; + case LB_PIXEL_DEPTH_30BPP: + return 30; + case LB_PIXEL_DEPTH_36BPP: + return 36; + default: + return 30; + } +} + +static enum dcn_bw_defs tl_pixel_format_to_bw_defs(enum surface_pixel_format format) +{ + switch (format) { + case SURFACE_PIXEL_FORMAT_GRPH_ARGB1555: + case SURFACE_PIXEL_FORMAT_GRPH_RGB565: + return dcn_bw_rgb_sub_16; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB8888: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR8888: + case SURFACE_PIXEL_FORMAT_GRPH_ARGB2101010: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR2101010_XR_BIAS: + return dcn_bw_rgb_sub_32; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: + return dcn_bw_rgb_sub_64; + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: + return dcn_bw_yuv420_sub_8; + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: + return dcn_bw_yuv420_sub_10; + default: + return dcn_bw_rgb_sub_32; + } +} + +enum source_macro_tile_size swizzle_mode_to_macro_tile_size(enum swizzle_mode_values sw_mode) +{ + switch (sw_mode) { + /* for 4/8/16 high tiles */ + case DC_SW_LINEAR: + return dm_4k_tile; + case DC_SW_4KB_S: + case DC_SW_4KB_S_X: + return dm_4k_tile; + case DC_SW_64KB_S: + case DC_SW_64KB_S_X: + case DC_SW_64KB_S_T: + return dm_64k_tile; + case DC_SW_VAR_S: + case DC_SW_VAR_S_X: + return dm_256k_tile; + + /* For 64bpp 2 high tiles */ + case DC_SW_4KB_D: + case DC_SW_4KB_D_X: + return dm_4k_tile; + case DC_SW_64KB_D: + case DC_SW_64KB_D_X: + case DC_SW_64KB_D_T: + return dm_64k_tile; + case DC_SW_VAR_D: + case DC_SW_VAR_D_X: + return dm_256k_tile; + + case DC_SW_4KB_R: + case DC_SW_4KB_R_X: + return dm_4k_tile; + case DC_SW_64KB_R: + case DC_SW_64KB_R_X: + return dm_64k_tile; + case DC_SW_VAR_R: + case DC_SW_VAR_R_X: + return dm_256k_tile; + + /* Unsupported swizzle modes for dcn */ + case DC_SW_256B_S: + default: + ASSERT(0); /* Not supported */ + return 0; + } +} + +static void pipe_ctx_to_e2e_pipe_params ( + const struct pipe_ctx *pipe, + struct _vcs_dpi_display_pipe_params_st *input) +{ + input->src.is_hsplit = false; + + /* stereo can never be split */ + if (pipe->plane_state->stereo_format == PLANE_STEREO_FORMAT_SIDE_BY_SIDE || + pipe->plane_state->stereo_format == PLANE_STEREO_FORMAT_TOP_AND_BOTTOM) { + /* reset the split group if it was already considered split. */ + input->src.hsplit_grp = pipe->pipe_idx; + } else if (pipe->top_pipe != NULL && pipe->top_pipe->plane_state == pipe->plane_state) { + input->src.is_hsplit = true; + } else if (pipe->bottom_pipe != NULL && pipe->bottom_pipe->plane_state == pipe->plane_state) { + input->src.is_hsplit = true; + } + + if (pipe->plane_res.dpp->ctx->dc->debug.optimized_watermark) { + /* + * this method requires us to always re-calculate watermark when dcc change + * between flip. + */ + input->src.dcc = pipe->plane_state->dcc.enable ? 1 : 0; + } else { + /* + * allow us to disable dcc on the fly without re-calculating WM + * + * extra overhead for DCC is quite small. for 1080p WM without + * DCC is only 0.417us lower (urgent goes from 6.979us to 6.562us) + */ + unsigned int bpe; + + input->src.dcc = pipe->plane_res.dpp->ctx->dc->res_pool->hubbub->funcs-> + dcc_support_pixel_format(pipe->plane_state->format, &bpe) ? 1 : 0; + } + input->src.dcc_rate = 1; + input->src.meta_pitch = pipe->plane_state->dcc.meta_pitch; + input->src.source_scan = dm_horz; + input->src.sw_mode = pipe->plane_state->tiling_info.gfx9.swizzle; + + input->src.viewport_width = pipe->plane_res.scl_data.viewport.width; + input->src.viewport_height = pipe->plane_res.scl_data.viewport.height; + input->src.data_pitch = pipe->plane_res.scl_data.viewport.width; + input->src.data_pitch_c = pipe->plane_res.scl_data.viewport.width; + input->src.cur0_src_width = 128; /* TODO: Cursor calcs, not curently stored */ + input->src.cur0_bpp = 32; + + input->src.macro_tile_size = swizzle_mode_to_macro_tile_size(pipe->plane_state->tiling_info.gfx9.swizzle); + + switch (pipe->plane_state->rotation) { + case ROTATION_ANGLE_0: + case ROTATION_ANGLE_180: + input->src.source_scan = dm_horz; + break; + case ROTATION_ANGLE_90: + case ROTATION_ANGLE_270: + input->src.source_scan = dm_vert; + break; + default: + ASSERT(0); /* Not supported */ + break; + } + + /* TODO: Fix pixel format mappings */ + switch (pipe->plane_state->format) { + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr: + case SURFACE_PIXEL_FORMAT_VIDEO_420_YCrCb: + input->src.source_format = dm_420_8; + input->src.viewport_width_c = input->src.viewport_width / 2; + input->src.viewport_height_c = input->src.viewport_height / 2; + break; + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCbCr: + case SURFACE_PIXEL_FORMAT_VIDEO_420_10bpc_YCrCb: + input->src.source_format = dm_420_10; + input->src.viewport_width_c = input->src.viewport_width / 2; + input->src.viewport_height_c = input->src.viewport_height / 2; + break; + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616: + case SURFACE_PIXEL_FORMAT_GRPH_ARGB16161616F: + case SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F: + input->src.source_format = dm_444_64; + input->src.viewport_width_c = input->src.viewport_width; + input->src.viewport_height_c = input->src.viewport_height; + break; + case SURFACE_PIXEL_FORMAT_GRPH_RGBE_ALPHA: + input->src.source_format = dm_rgbe_alpha; + input->src.viewport_width_c = input->src.viewport_width; + input->src.viewport_height_c = input->src.viewport_height; + break; + default: + input->src.source_format = dm_444_32; + input->src.viewport_width_c = input->src.viewport_width; + input->src.viewport_height_c = input->src.viewport_height; + break; + } + + input->scale_taps.htaps = pipe->plane_res.scl_data.taps.h_taps; + input->scale_ratio_depth.hscl_ratio = pipe->plane_res.scl_data.ratios.horz.value/4294967296.0; + input->scale_ratio_depth.vscl_ratio = pipe->plane_res.scl_data.ratios.vert.value/4294967296.0; + input->scale_ratio_depth.vinit = pipe->plane_res.scl_data.inits.v.value/4294967296.0; + if (input->scale_ratio_depth.vinit < 1.0) + input->scale_ratio_depth.vinit = 1; + input->scale_taps.vtaps = pipe->plane_res.scl_data.taps.v_taps; + input->scale_taps.vtaps_c = pipe->plane_res.scl_data.taps.v_taps_c; + input->scale_taps.htaps_c = pipe->plane_res.scl_data.taps.h_taps_c; + input->scale_ratio_depth.hscl_ratio_c = pipe->plane_res.scl_data.ratios.horz_c.value/4294967296.0; + input->scale_ratio_depth.vscl_ratio_c = pipe->plane_res.scl_data.ratios.vert_c.value/4294967296.0; + input->scale_ratio_depth.vinit_c = pipe->plane_res.scl_data.inits.v_c.value/4294967296.0; + if (input->scale_ratio_depth.vinit_c < 1.0) + input->scale_ratio_depth.vinit_c = 1; + switch (pipe->plane_res.scl_data.lb_params.depth) { + case LB_PIXEL_DEPTH_30BPP: + input->scale_ratio_depth.lb_depth = 30; break; + case LB_PIXEL_DEPTH_36BPP: + input->scale_ratio_depth.lb_depth = 36; break; + default: + input->scale_ratio_depth.lb_depth = 24; break; + } + + + input->dest.vactive = pipe->stream->timing.v_addressable + pipe->stream->timing.v_border_top + + pipe->stream->timing.v_border_bottom; + + input->dest.recout_width = pipe->plane_res.scl_data.recout.width; + input->dest.recout_height = pipe->plane_res.scl_data.recout.height; + + input->dest.full_recout_width = pipe->plane_res.scl_data.recout.width; + input->dest.full_recout_height = pipe->plane_res.scl_data.recout.height; + + input->dest.htotal = pipe->stream->timing.h_total; + input->dest.hblank_start = input->dest.htotal - pipe->stream->timing.h_front_porch; + input->dest.hblank_end = input->dest.hblank_start + - pipe->stream->timing.h_addressable + - pipe->stream->timing.h_border_left + - pipe->stream->timing.h_border_right; + + input->dest.vtotal = pipe->stream->timing.v_total; + input->dest.vblank_start = input->dest.vtotal - pipe->stream->timing.v_front_porch; + input->dest.vblank_end = input->dest.vblank_start + - pipe->stream->timing.v_addressable + - pipe->stream->timing.v_border_bottom + - pipe->stream->timing.v_border_top; + input->dest.pixel_rate_mhz = pipe->stream->timing.pix_clk_100hz/10000.0; + input->dest.vstartup_start = pipe->pipe_dlg_param.vstartup_start; + input->dest.vupdate_offset = pipe->pipe_dlg_param.vupdate_offset; + input->dest.vupdate_offset = pipe->pipe_dlg_param.vupdate_offset; + input->dest.vupdate_width = pipe->pipe_dlg_param.vupdate_width; + +} + +static void dcn_bw_calc_rq_dlg_ttu( + const struct dc *dc, + const struct dcn_bw_internal_vars *v, + struct pipe_ctx *pipe, + int in_idx) +{ + struct display_mode_lib *dml = (struct display_mode_lib *)(&dc->dml); + struct _vcs_dpi_display_dlg_regs_st *dlg_regs = &pipe->dlg_regs; + struct _vcs_dpi_display_ttu_regs_st *ttu_regs = &pipe->ttu_regs; + struct _vcs_dpi_display_rq_regs_st *rq_regs = &pipe->rq_regs; + struct _vcs_dpi_display_rq_params_st *rq_param = &pipe->dml_rq_param; + struct _vcs_dpi_display_dlg_sys_params_st *dlg_sys_param = &pipe->dml_dlg_sys_param; + struct _vcs_dpi_display_e2e_pipe_params_st *input = &pipe->dml_input; + float total_active_bw = 0; + float total_prefetch_bw = 0; + int total_flip_bytes = 0; + int i; + + memset(dlg_regs, 0, sizeof(*dlg_regs)); + memset(ttu_regs, 0, sizeof(*ttu_regs)); + memset(rq_regs, 0, sizeof(*rq_regs)); + memset(rq_param, 0, sizeof(*rq_param)); + memset(dlg_sys_param, 0, sizeof(*dlg_sys_param)); + memset(input, 0, sizeof(*input)); + + for (i = 0; i < number_of_planes; i++) { + total_active_bw += v->read_bandwidth[i]; + total_prefetch_bw += v->prefetch_bandwidth[i]; + total_flip_bytes += v->total_immediate_flip_bytes[i]; + } + dlg_sys_param->total_flip_bw = v->return_bw - dcn_bw_max2(total_active_bw, total_prefetch_bw); + if (dlg_sys_param->total_flip_bw < 0.0) + dlg_sys_param->total_flip_bw = 0; + + dlg_sys_param->t_mclk_wm_us = v->dram_clock_change_watermark; + dlg_sys_param->t_sr_wm_us = v->stutter_enter_plus_exit_watermark; + dlg_sys_param->t_urg_wm_us = v->urgent_watermark; + dlg_sys_param->t_extra_us = v->urgent_extra_latency; + dlg_sys_param->deepsleep_dcfclk_mhz = v->dcf_clk_deep_sleep; + dlg_sys_param->total_flip_bytes = total_flip_bytes; + + pipe_ctx_to_e2e_pipe_params(pipe, &input->pipe); + input->clks_cfg.dcfclk_mhz = v->dcfclk; + input->clks_cfg.dispclk_mhz = v->dispclk; + input->clks_cfg.dppclk_mhz = v->dppclk; + input->clks_cfg.refclk_mhz = dc->res_pool->ref_clocks.dchub_ref_clock_inKhz / 1000.0; + input->clks_cfg.socclk_mhz = v->socclk; + input->clks_cfg.voltage = v->voltage_level; +// dc->dml.logger = pool->base.logger; + input->dout.output_format = (v->output_format[in_idx] == dcn_bw_420) ? dm_420 : dm_444; + input->dout.output_type = (v->output[in_idx] == dcn_bw_hdmi) ? dm_hdmi : dm_dp; + //input[in_idx].dout.output_standard; + + /*todo: soc->sr_enter_plus_exit_time??*/ + dlg_sys_param->t_srx_delay_us = dc->dcn_ip->dcfclk_cstate_latency / v->dcf_clk_deep_sleep; + + dml1_rq_dlg_get_rq_params(dml, rq_param, &input->pipe.src); + dml1_extract_rq_regs(dml, rq_regs, rq_param); + dml1_rq_dlg_get_dlg_params( + dml, + dlg_regs, + ttu_regs, + &rq_param->dlg, + dlg_sys_param, + input, + true, + true, + v->pte_enable == dcn_bw_yes, + pipe->plane_state->flip_immediate); +} + +static void split_stream_across_pipes( + struct resource_context *res_ctx, + const struct resource_pool *pool, + struct pipe_ctx *primary_pipe, + struct pipe_ctx *secondary_pipe) +{ + int pipe_idx = secondary_pipe->pipe_idx; + + if (!primary_pipe->plane_state) + return; + + *secondary_pipe = *primary_pipe; + + secondary_pipe->pipe_idx = pipe_idx; + secondary_pipe->plane_res.mi = pool->mis[secondary_pipe->pipe_idx]; + secondary_pipe->plane_res.hubp = pool->hubps[secondary_pipe->pipe_idx]; + secondary_pipe->plane_res.ipp = pool->ipps[secondary_pipe->pipe_idx]; + secondary_pipe->plane_res.xfm = pool->transforms[secondary_pipe->pipe_idx]; + secondary_pipe->plane_res.dpp = pool->dpps[secondary_pipe->pipe_idx]; + secondary_pipe->plane_res.mpcc_inst = pool->dpps[secondary_pipe->pipe_idx]->inst; + if (primary_pipe->bottom_pipe) { + ASSERT(primary_pipe->bottom_pipe != secondary_pipe); + secondary_pipe->bottom_pipe = primary_pipe->bottom_pipe; + secondary_pipe->bottom_pipe->top_pipe = secondary_pipe; + } + primary_pipe->bottom_pipe = secondary_pipe; + secondary_pipe->top_pipe = primary_pipe; + + resource_build_scaling_params(primary_pipe); + resource_build_scaling_params(secondary_pipe); +} + +#if 0 +static void calc_wm_sets_and_perf_params( + struct dc_state *context, + struct dcn_bw_internal_vars *v) +{ + /* Calculate set A last to keep internal var state consistent for required config */ + if (v->voltage_level < 2) { + v->fabric_and_dram_bandwidth_per_state[1] = v->fabric_and_dram_bandwidth_vnom0p8; + v->fabric_and_dram_bandwidth_per_state[0] = v->fabric_and_dram_bandwidth_vnom0p8; + v->fabric_and_dram_bandwidth = v->fabric_and_dram_bandwidth_vnom0p8; + dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v); + + context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_exit_ns = + v->stutter_exit_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.cstate_enter_plus_exit_ns = + v->stutter_enter_plus_exit_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.b.cstate_pstate.pstate_change_ns = + v->dram_clock_change_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.b.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.b.urgent_ns = v->urgent_watermark * 1000; + + v->dcfclk_per_state[1] = v->dcfclkv_nom0p8; + v->dcfclk_per_state[0] = v->dcfclkv_nom0p8; + v->dcfclk = v->dcfclkv_nom0p8; + dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v); + + context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_exit_ns = + v->stutter_exit_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.cstate_enter_plus_exit_ns = + v->stutter_enter_plus_exit_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.c.cstate_pstate.pstate_change_ns = + v->dram_clock_change_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.c.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.c.urgent_ns = v->urgent_watermark * 1000; + } + + if (v->voltage_level < 3) { + v->fabric_and_dram_bandwidth_per_state[2] = v->fabric_and_dram_bandwidth_vmax0p9; + v->fabric_and_dram_bandwidth_per_state[1] = v->fabric_and_dram_bandwidth_vmax0p9; + v->fabric_and_dram_bandwidth_per_state[0] = v->fabric_and_dram_bandwidth_vmax0p9; + v->fabric_and_dram_bandwidth = v->fabric_and_dram_bandwidth_vmax0p9; + v->dcfclk_per_state[2] = v->dcfclkv_max0p9; + v->dcfclk_per_state[1] = v->dcfclkv_max0p9; + v->dcfclk_per_state[0] = v->dcfclkv_max0p9; + v->dcfclk = v->dcfclkv_max0p9; + dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v); + + context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_exit_ns = + v->stutter_exit_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.cstate_enter_plus_exit_ns = + v->stutter_enter_plus_exit_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.d.cstate_pstate.pstate_change_ns = + v->dram_clock_change_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.d.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.d.urgent_ns = v->urgent_watermark * 1000; + } + + v->fabric_and_dram_bandwidth_per_state[2] = v->fabric_and_dram_bandwidth_vnom0p8; + v->fabric_and_dram_bandwidth_per_state[1] = v->fabric_and_dram_bandwidth_vmid0p72; + v->fabric_and_dram_bandwidth_per_state[0] = v->fabric_and_dram_bandwidth_vmin0p65; + v->fabric_and_dram_bandwidth = v->fabric_and_dram_bandwidth_per_state[v->voltage_level]; + v->dcfclk_per_state[2] = v->dcfclkv_nom0p8; + v->dcfclk_per_state[1] = v->dcfclkv_mid0p72; + v->dcfclk_per_state[0] = v->dcfclkv_min0p65; + v->dcfclk = v->dcfclk_per_state[v->voltage_level]; + dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v); + + context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = + v->stutter_exit_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = + v->stutter_enter_plus_exit_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = + v->dram_clock_change_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = v->urgent_watermark * 1000; + if (v->voltage_level >= 2) { + context->bw_ctx.bw.dcn.watermarks.b = context->bw_ctx.bw.dcn.watermarks.a; + context->bw_ctx.bw.dcn.watermarks.c = context->bw_ctx.bw.dcn.watermarks.a; + } + if (v->voltage_level >= 3) + context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a; +} +#endif + +static bool dcn_bw_apply_registry_override(struct dc *dc) +{ + bool updated = false; + + DC_FP_START(); + if ((int)(dc->dcn_soc->sr_exit_time * 1000) != dc->debug.sr_exit_time_ns + && dc->debug.sr_exit_time_ns) { + updated = true; + dc->dcn_soc->sr_exit_time = dc->debug.sr_exit_time_ns / 1000.0; + } + + if ((int)(dc->dcn_soc->sr_enter_plus_exit_time * 1000) + != dc->debug.sr_enter_plus_exit_time_ns + && dc->debug.sr_enter_plus_exit_time_ns) { + updated = true; + dc->dcn_soc->sr_enter_plus_exit_time = + dc->debug.sr_enter_plus_exit_time_ns / 1000.0; + } + + if ((int)(dc->dcn_soc->urgent_latency * 1000) != dc->debug.urgent_latency_ns + && dc->debug.urgent_latency_ns) { + updated = true; + dc->dcn_soc->urgent_latency = dc->debug.urgent_latency_ns / 1000.0; + } + + if ((int)(dc->dcn_soc->percent_of_ideal_drambw_received_after_urg_latency * 1000) + != dc->debug.percent_of_ideal_drambw + && dc->debug.percent_of_ideal_drambw) { + updated = true; + dc->dcn_soc->percent_of_ideal_drambw_received_after_urg_latency = + dc->debug.percent_of_ideal_drambw; + } + + if ((int)(dc->dcn_soc->dram_clock_change_latency * 1000) + != dc->debug.dram_clock_change_latency_ns + && dc->debug.dram_clock_change_latency_ns) { + updated = true; + dc->dcn_soc->dram_clock_change_latency = + dc->debug.dram_clock_change_latency_ns / 1000.0; + } + DC_FP_END(); + + return updated; +} + +static void hack_disable_optional_pipe_split(struct dcn_bw_internal_vars *v) +{ + /* + * disable optional pipe split by lower dispclk bounding box + * at DPM0 + */ + v->max_dispclk[0] = v->max_dppclk_vmin0p65; +} + +static void hack_force_pipe_split(struct dcn_bw_internal_vars *v, + unsigned int pixel_rate_100hz) +{ + float pixel_rate_mhz = pixel_rate_100hz / 10000; + + /* + * force enabling pipe split by lower dpp clock for DPM0 to just + * below the specify pixel_rate, so bw calc would split pipe. + */ + if (pixel_rate_mhz < v->max_dppclk[0]) + v->max_dppclk[0] = pixel_rate_mhz; +} + +static void hack_bounding_box(struct dcn_bw_internal_vars *v, + struct dc_debug_options *dbg, + struct dc_state *context) +{ + int i; + + for (i = 0; i < MAX_PIPES; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + /** + * Workaround for avoiding pipe-split in cases where we'd split + * planes that are too small, resulting in splits that aren't + * valid for the scaler. + */ + if (pipe->plane_state && + (pipe->plane_state->dst_rect.width <= 16 || + pipe->plane_state->dst_rect.height <= 16 || + pipe->plane_state->src_rect.width <= 16 || + pipe->plane_state->src_rect.height <= 16)) { + hack_disable_optional_pipe_split(v); + return; + } + } + + if (dbg->pipe_split_policy == MPC_SPLIT_AVOID) + hack_disable_optional_pipe_split(v); + + if (dbg->pipe_split_policy == MPC_SPLIT_AVOID_MULT_DISP && + context->stream_count >= 2) + hack_disable_optional_pipe_split(v); + + if (context->stream_count == 1 && + dbg->force_single_disp_pipe_split) + hack_force_pipe_split(v, context->streams[0]->timing.pix_clk_100hz); +} + +static unsigned int get_highest_allowed_voltage_level(uint32_t chip_family, + uint32_t hw_internal_rev, + uint32_t pci_revision_id) +{ + /* for low power RV2 variants, the highest voltage level we want is 0 */ + if ((chip_family == FAMILY_RV) && + ASICREV_IS_RAVEN2(hw_internal_rev)) + switch (pci_revision_id) { + case PRID_DALI_DE: + case PRID_DALI_DF: + case PRID_DALI_E3: + case PRID_DALI_E4: + case PRID_POLLOCK_94: + case PRID_POLLOCK_95: + case PRID_POLLOCK_E9: + case PRID_POLLOCK_EA: + case PRID_POLLOCK_EB: + return 0; + default: + break; + } + + /* we are ok with all levels */ + return 4; +} + +bool dcn10_validate_bandwidth( + struct dc *dc, + struct dc_state *context, + bool fast_validate) +{ + /* + * we want a breakdown of the various stages of validation, which the + * perf_trace macro doesn't support + */ + BW_VAL_TRACE_SETUP(); + + const struct resource_pool *pool = dc->res_pool; + struct dcn_bw_internal_vars *v = &context->dcn_bw_vars; + int i, input_idx, k; + int vesa_sync_start, asic_blank_end, asic_blank_start; + bool bw_limit_pass; + float bw_limit; + + PERFORMANCE_TRACE_START(); + + BW_VAL_TRACE_COUNT(); + + if (dcn_bw_apply_registry_override(dc)) + dcn_bw_sync_calcs_and_dml(dc); + + memset(v, 0, sizeof(*v)); + DC_FP_START(); + + v->sr_exit_time = dc->dcn_soc->sr_exit_time; + v->sr_enter_plus_exit_time = dc->dcn_soc->sr_enter_plus_exit_time; + v->urgent_latency = dc->dcn_soc->urgent_latency; + v->write_back_latency = dc->dcn_soc->write_back_latency; + v->percent_of_ideal_drambw_received_after_urg_latency = + dc->dcn_soc->percent_of_ideal_drambw_received_after_urg_latency; + + v->dcfclkv_min0p65 = dc->dcn_soc->dcfclkv_min0p65; + v->dcfclkv_mid0p72 = dc->dcn_soc->dcfclkv_mid0p72; + v->dcfclkv_nom0p8 = dc->dcn_soc->dcfclkv_nom0p8; + v->dcfclkv_max0p9 = dc->dcn_soc->dcfclkv_max0p9; + + v->max_dispclk_vmin0p65 = dc->dcn_soc->max_dispclk_vmin0p65; + v->max_dispclk_vmid0p72 = dc->dcn_soc->max_dispclk_vmid0p72; + v->max_dispclk_vnom0p8 = dc->dcn_soc->max_dispclk_vnom0p8; + v->max_dispclk_vmax0p9 = dc->dcn_soc->max_dispclk_vmax0p9; + + v->max_dppclk_vmin0p65 = dc->dcn_soc->max_dppclk_vmin0p65; + v->max_dppclk_vmid0p72 = dc->dcn_soc->max_dppclk_vmid0p72; + v->max_dppclk_vnom0p8 = dc->dcn_soc->max_dppclk_vnom0p8; + v->max_dppclk_vmax0p9 = dc->dcn_soc->max_dppclk_vmax0p9; + + v->socclk = dc->dcn_soc->socclk; + + v->fabric_and_dram_bandwidth_vmin0p65 = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65; + v->fabric_and_dram_bandwidth_vmid0p72 = dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72; + v->fabric_and_dram_bandwidth_vnom0p8 = dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8; + v->fabric_and_dram_bandwidth_vmax0p9 = dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9; + + v->phyclkv_min0p65 = dc->dcn_soc->phyclkv_min0p65; + v->phyclkv_mid0p72 = dc->dcn_soc->phyclkv_mid0p72; + v->phyclkv_nom0p8 = dc->dcn_soc->phyclkv_nom0p8; + v->phyclkv_max0p9 = dc->dcn_soc->phyclkv_max0p9; + + v->downspreading = dc->dcn_soc->downspreading; + v->round_trip_ping_latency_cycles = dc->dcn_soc->round_trip_ping_latency_cycles; + v->urgent_out_of_order_return_per_channel = dc->dcn_soc->urgent_out_of_order_return_per_channel; + v->number_of_channels = dc->dcn_soc->number_of_channels; + v->vmm_page_size = dc->dcn_soc->vmm_page_size; + v->dram_clock_change_latency = dc->dcn_soc->dram_clock_change_latency; + v->return_bus_width = dc->dcn_soc->return_bus_width; + + v->rob_buffer_size_in_kbyte = dc->dcn_ip->rob_buffer_size_in_kbyte; + v->det_buffer_size_in_kbyte = dc->dcn_ip->det_buffer_size_in_kbyte; + v->dpp_output_buffer_pixels = dc->dcn_ip->dpp_output_buffer_pixels; + v->opp_output_buffer_lines = dc->dcn_ip->opp_output_buffer_lines; + v->pixel_chunk_size_in_kbyte = dc->dcn_ip->pixel_chunk_size_in_kbyte; + v->pte_enable = dc->dcn_ip->pte_enable; + v->pte_chunk_size = dc->dcn_ip->pte_chunk_size; + v->meta_chunk_size = dc->dcn_ip->meta_chunk_size; + v->writeback_chunk_size = dc->dcn_ip->writeback_chunk_size; + v->odm_capability = dc->dcn_ip->odm_capability; + v->dsc_capability = dc->dcn_ip->dsc_capability; + v->line_buffer_size = dc->dcn_ip->line_buffer_size; + v->is_line_buffer_bpp_fixed = dc->dcn_ip->is_line_buffer_bpp_fixed; + v->line_buffer_fixed_bpp = dc->dcn_ip->line_buffer_fixed_bpp; + v->max_line_buffer_lines = dc->dcn_ip->max_line_buffer_lines; + v->writeback_luma_buffer_size = dc->dcn_ip->writeback_luma_buffer_size; + v->writeback_chroma_buffer_size = dc->dcn_ip->writeback_chroma_buffer_size; + v->max_num_dpp = dc->dcn_ip->max_num_dpp; + v->max_num_writeback = dc->dcn_ip->max_num_writeback; + v->max_dchub_topscl_throughput = dc->dcn_ip->max_dchub_topscl_throughput; + v->max_pscl_tolb_throughput = dc->dcn_ip->max_pscl_tolb_throughput; + v->max_lb_tovscl_throughput = dc->dcn_ip->max_lb_tovscl_throughput; + v->max_vscl_tohscl_throughput = dc->dcn_ip->max_vscl_tohscl_throughput; + v->max_hscl_ratio = dc->dcn_ip->max_hscl_ratio; + v->max_vscl_ratio = dc->dcn_ip->max_vscl_ratio; + v->max_hscl_taps = dc->dcn_ip->max_hscl_taps; + v->max_vscl_taps = dc->dcn_ip->max_vscl_taps; + v->under_scan_factor = dc->dcn_ip->under_scan_factor; + v->pte_buffer_size_in_requests = dc->dcn_ip->pte_buffer_size_in_requests; + v->dispclk_ramping_margin = dc->dcn_ip->dispclk_ramping_margin; + v->max_inter_dcn_tile_repeaters = dc->dcn_ip->max_inter_dcn_tile_repeaters; + v->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = + dc->dcn_ip->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one; + v->bug_forcing_luma_and_chroma_request_to_same_size_fixed = + dc->dcn_ip->bug_forcing_luma_and_chroma_request_to_same_size_fixed; + + v->voltage[5] = dcn_bw_no_support; + v->voltage[4] = dcn_bw_v_max0p9; + v->voltage[3] = dcn_bw_v_max0p9; + v->voltage[2] = dcn_bw_v_nom0p8; + v->voltage[1] = dcn_bw_v_mid0p72; + v->voltage[0] = dcn_bw_v_min0p65; + v->fabric_and_dram_bandwidth_per_state[5] = v->fabric_and_dram_bandwidth_vmax0p9; + v->fabric_and_dram_bandwidth_per_state[4] = v->fabric_and_dram_bandwidth_vmax0p9; + v->fabric_and_dram_bandwidth_per_state[3] = v->fabric_and_dram_bandwidth_vmax0p9; + v->fabric_and_dram_bandwidth_per_state[2] = v->fabric_and_dram_bandwidth_vnom0p8; + v->fabric_and_dram_bandwidth_per_state[1] = v->fabric_and_dram_bandwidth_vmid0p72; + v->fabric_and_dram_bandwidth_per_state[0] = v->fabric_and_dram_bandwidth_vmin0p65; + v->dcfclk_per_state[5] = v->dcfclkv_max0p9; + v->dcfclk_per_state[4] = v->dcfclkv_max0p9; + v->dcfclk_per_state[3] = v->dcfclkv_max0p9; + v->dcfclk_per_state[2] = v->dcfclkv_nom0p8; + v->dcfclk_per_state[1] = v->dcfclkv_mid0p72; + v->dcfclk_per_state[0] = v->dcfclkv_min0p65; + v->max_dispclk[5] = v->max_dispclk_vmax0p9; + v->max_dispclk[4] = v->max_dispclk_vmax0p9; + v->max_dispclk[3] = v->max_dispclk_vmax0p9; + v->max_dispclk[2] = v->max_dispclk_vnom0p8; + v->max_dispclk[1] = v->max_dispclk_vmid0p72; + v->max_dispclk[0] = v->max_dispclk_vmin0p65; + v->max_dppclk[5] = v->max_dppclk_vmax0p9; + v->max_dppclk[4] = v->max_dppclk_vmax0p9; + v->max_dppclk[3] = v->max_dppclk_vmax0p9; + v->max_dppclk[2] = v->max_dppclk_vnom0p8; + v->max_dppclk[1] = v->max_dppclk_vmid0p72; + v->max_dppclk[0] = v->max_dppclk_vmin0p65; + v->phyclk_per_state[5] = v->phyclkv_max0p9; + v->phyclk_per_state[4] = v->phyclkv_max0p9; + v->phyclk_per_state[3] = v->phyclkv_max0p9; + v->phyclk_per_state[2] = v->phyclkv_nom0p8; + v->phyclk_per_state[1] = v->phyclkv_mid0p72; + v->phyclk_per_state[0] = v->phyclkv_min0p65; + v->synchronized_vblank = dcn_bw_no; + v->ta_pscalculation = dcn_bw_override; + v->allow_different_hratio_vratio = dcn_bw_yes; + + for (i = 0, input_idx = 0; i < pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + if (!pipe->stream) + continue; + /* skip all but first of split pipes */ + if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) + continue; + + v->underscan_output[input_idx] = false; /* taken care of in recout already*/ + v->interlace_output[input_idx] = false; + + v->htotal[input_idx] = pipe->stream->timing.h_total; + v->vtotal[input_idx] = pipe->stream->timing.v_total; + v->vactive[input_idx] = pipe->stream->timing.v_addressable + + pipe->stream->timing.v_border_top + pipe->stream->timing.v_border_bottom; + v->v_sync_plus_back_porch[input_idx] = pipe->stream->timing.v_total + - v->vactive[input_idx] + - pipe->stream->timing.v_front_porch; + v->pixel_clock[input_idx] = pipe->stream->timing.pix_clk_100hz/10000.0; + if (pipe->stream->timing.timing_3d_format == TIMING_3D_FORMAT_HW_FRAME_PACKING) + v->pixel_clock[input_idx] *= 2; + if (!pipe->plane_state) { + v->dcc_enable[input_idx] = dcn_bw_yes; + v->source_pixel_format[input_idx] = dcn_bw_rgb_sub_32; + v->source_surface_mode[input_idx] = dcn_bw_sw_4_kb_s; + v->lb_bit_per_pixel[input_idx] = 30; + v->viewport_width[input_idx] = pipe->stream->timing.h_addressable; + v->viewport_height[input_idx] = pipe->stream->timing.v_addressable; + /* + * for cases where we have no plane, we want to validate up to 1080p + * source size because here we are only interested in if the output + * timing is supported or not. if we cannot support native resolution + * of the high res display, we still want to support lower res up scale + * to native + */ + if (v->viewport_width[input_idx] > 1920) + v->viewport_width[input_idx] = 1920; + if (v->viewport_height[input_idx] > 1080) + v->viewport_height[input_idx] = 1080; + v->scaler_rec_out_width[input_idx] = v->viewport_width[input_idx]; + v->scaler_recout_height[input_idx] = v->viewport_height[input_idx]; + v->override_hta_ps[input_idx] = 1; + v->override_vta_ps[input_idx] = 1; + v->override_hta_pschroma[input_idx] = 1; + v->override_vta_pschroma[input_idx] = 1; + v->source_scan[input_idx] = dcn_bw_hor; + + } else { + v->viewport_height[input_idx] = pipe->plane_res.scl_data.viewport.height; + v->viewport_width[input_idx] = pipe->plane_res.scl_data.viewport.width; + v->scaler_rec_out_width[input_idx] = pipe->plane_res.scl_data.recout.width; + v->scaler_recout_height[input_idx] = pipe->plane_res.scl_data.recout.height; + if (pipe->bottom_pipe && pipe->bottom_pipe->plane_state == pipe->plane_state) { + if (pipe->plane_state->rotation % 2 == 0) { + int viewport_end = pipe->plane_res.scl_data.viewport.width + + pipe->plane_res.scl_data.viewport.x; + int viewport_b_end = pipe->bottom_pipe->plane_res.scl_data.viewport.width + + pipe->bottom_pipe->plane_res.scl_data.viewport.x; + + if (viewport_end > viewport_b_end) + v->viewport_width[input_idx] = viewport_end + - pipe->bottom_pipe->plane_res.scl_data.viewport.x; + else + v->viewport_width[input_idx] = viewport_b_end + - pipe->plane_res.scl_data.viewport.x; + } else { + int viewport_end = pipe->plane_res.scl_data.viewport.height + + pipe->plane_res.scl_data.viewport.y; + int viewport_b_end = pipe->bottom_pipe->plane_res.scl_data.viewport.height + + pipe->bottom_pipe->plane_res.scl_data.viewport.y; + + if (viewport_end > viewport_b_end) + v->viewport_height[input_idx] = viewport_end + - pipe->bottom_pipe->plane_res.scl_data.viewport.y; + else + v->viewport_height[input_idx] = viewport_b_end + - pipe->plane_res.scl_data.viewport.y; + } + v->scaler_rec_out_width[input_idx] = pipe->plane_res.scl_data.recout.width + + pipe->bottom_pipe->plane_res.scl_data.recout.width; + } + + if (pipe->plane_state->rotation % 2 == 0) { + ASSERT(pipe->plane_res.scl_data.ratios.horz.value != dc_fixpt_one.value + || v->scaler_rec_out_width[input_idx] == v->viewport_width[input_idx]); + ASSERT(pipe->plane_res.scl_data.ratios.vert.value != dc_fixpt_one.value + || v->scaler_recout_height[input_idx] == v->viewport_height[input_idx]); + } else { + ASSERT(pipe->plane_res.scl_data.ratios.horz.value != dc_fixpt_one.value + || v->scaler_recout_height[input_idx] == v->viewport_width[input_idx]); + ASSERT(pipe->plane_res.scl_data.ratios.vert.value != dc_fixpt_one.value + || v->scaler_rec_out_width[input_idx] == v->viewport_height[input_idx]); + } + + if (dc->debug.optimized_watermark) { + /* + * this method requires us to always re-calculate watermark when dcc change + * between flip. + */ + v->dcc_enable[input_idx] = pipe->plane_state->dcc.enable ? dcn_bw_yes : dcn_bw_no; + } else { + /* + * allow us to disable dcc on the fly without re-calculating WM + * + * extra overhead for DCC is quite small. for 1080p WM without + * DCC is only 0.417us lower (urgent goes from 6.979us to 6.562us) + */ + unsigned int bpe; + + v->dcc_enable[input_idx] = dc->res_pool->hubbub->funcs->dcc_support_pixel_format( + pipe->plane_state->format, &bpe) ? dcn_bw_yes : dcn_bw_no; + } + + v->source_pixel_format[input_idx] = tl_pixel_format_to_bw_defs( + pipe->plane_state->format); + v->source_surface_mode[input_idx] = tl_sw_mode_to_bw_defs( + pipe->plane_state->tiling_info.gfx9.swizzle); + v->lb_bit_per_pixel[input_idx] = tl_lb_bpp_to_int(pipe->plane_res.scl_data.lb_params.depth); + v->override_hta_ps[input_idx] = pipe->plane_res.scl_data.taps.h_taps; + v->override_vta_ps[input_idx] = pipe->plane_res.scl_data.taps.v_taps; + v->override_hta_pschroma[input_idx] = pipe->plane_res.scl_data.taps.h_taps_c; + v->override_vta_pschroma[input_idx] = pipe->plane_res.scl_data.taps.v_taps_c; + /* + * Spreadsheet doesn't handle taps_c is one properly, + * need to force Chroma to always be scaled to pass + * bandwidth validation. + */ + if (v->override_hta_pschroma[input_idx] == 1) + v->override_hta_pschroma[input_idx] = 2; + if (v->override_vta_pschroma[input_idx] == 1) + v->override_vta_pschroma[input_idx] = 2; + v->source_scan[input_idx] = (pipe->plane_state->rotation % 2) ? dcn_bw_vert : dcn_bw_hor; + } + if (v->is_line_buffer_bpp_fixed == dcn_bw_yes) + v->lb_bit_per_pixel[input_idx] = v->line_buffer_fixed_bpp; + v->dcc_rate[input_idx] = 1; /*TODO: Worst case? does this change?*/ + v->output_format[input_idx] = pipe->stream->timing.pixel_encoding == + PIXEL_ENCODING_YCBCR420 ? dcn_bw_420 : dcn_bw_444; + v->output[input_idx] = pipe->stream->signal == + SIGNAL_TYPE_HDMI_TYPE_A ? dcn_bw_hdmi : dcn_bw_dp; + v->output_deep_color[input_idx] = dcn_bw_encoder_8bpc; + if (v->output[input_idx] == dcn_bw_hdmi) { + switch (pipe->stream->timing.display_color_depth) { + case COLOR_DEPTH_101010: + v->output_deep_color[input_idx] = dcn_bw_encoder_10bpc; + break; + case COLOR_DEPTH_121212: + v->output_deep_color[input_idx] = dcn_bw_encoder_12bpc; + break; + case COLOR_DEPTH_161616: + v->output_deep_color[input_idx] = dcn_bw_encoder_16bpc; + break; + default: + break; + } + } + + input_idx++; + } + v->number_of_active_planes = input_idx; + + scaler_settings_calculation(v); + + hack_bounding_box(v, &dc->debug, context); + + mode_support_and_system_configuration(v); + + /* Unhack dppclk: dont bother with trying to pipe split if we cannot maintain dpm0 */ + if (v->voltage_level != 0 + && context->stream_count == 1 + && dc->debug.force_single_disp_pipe_split) { + v->max_dppclk[0] = v->max_dppclk_vmin0p65; + mode_support_and_system_configuration(v); + } + + if (v->voltage_level == 0 && + (dc->debug.sr_exit_time_dpm0_ns + || dc->debug.sr_enter_plus_exit_time_dpm0_ns)) { + + if (dc->debug.sr_enter_plus_exit_time_dpm0_ns) + v->sr_enter_plus_exit_time = + dc->debug.sr_enter_plus_exit_time_dpm0_ns / 1000.0f; + if (dc->debug.sr_exit_time_dpm0_ns) + v->sr_exit_time = dc->debug.sr_exit_time_dpm0_ns / 1000.0f; + context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = v->sr_enter_plus_exit_time; + context->bw_ctx.dml.soc.sr_exit_time_us = v->sr_exit_time; + mode_support_and_system_configuration(v); + } + + display_pipe_configuration(v); + + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->source_scan[k] == dcn_bw_hor) + v->swath_width_y[k] = v->viewport_width[k] / v->dpp_per_plane[k]; + else + v->swath_width_y[k] = v->viewport_height[k] / v->dpp_per_plane[k]; + } + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + if (v->source_pixel_format[k] == dcn_bw_rgb_sub_64) { + v->byte_per_pixel_dety[k] = 8.0; + v->byte_per_pixel_detc[k] = 0.0; + } else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_32) { + v->byte_per_pixel_dety[k] = 4.0; + v->byte_per_pixel_detc[k] = 0.0; + } else if (v->source_pixel_format[k] == dcn_bw_rgb_sub_16) { + v->byte_per_pixel_dety[k] = 2.0; + v->byte_per_pixel_detc[k] = 0.0; + } else if (v->source_pixel_format[k] == dcn_bw_yuv420_sub_8) { + v->byte_per_pixel_dety[k] = 1.0; + v->byte_per_pixel_detc[k] = 2.0; + } else { + v->byte_per_pixel_dety[k] = 4.0f / 3.0f; + v->byte_per_pixel_detc[k] = 8.0f / 3.0f; + } + } + + v->total_data_read_bandwidth = 0.0; + for (k = 0; k <= v->number_of_active_planes - 1; k++) { + v->read_bandwidth_plane_luma[k] = v->swath_width_y[k] * v->dpp_per_plane[k] * + dcn_bw_ceil2(v->byte_per_pixel_dety[k], 1.0) / (v->htotal[k] / v->pixel_clock[k]) * v->v_ratio[k]; + v->read_bandwidth_plane_chroma[k] = v->swath_width_y[k] / 2.0 * v->dpp_per_plane[k] * + dcn_bw_ceil2(v->byte_per_pixel_detc[k], 2.0) / (v->htotal[k] / v->pixel_clock[k]) * v->v_ratio[k] / 2.0; + v->total_data_read_bandwidth = v->total_data_read_bandwidth + + v->read_bandwidth_plane_luma[k] + v->read_bandwidth_plane_chroma[k]; + } + + BW_VAL_TRACE_END_VOLTAGE_LEVEL(); + + if (v->voltage_level != number_of_states_plus_one && !fast_validate) { + float bw_consumed = v->total_bandwidth_consumed_gbyte_per_second; + + if (bw_consumed < v->fabric_and_dram_bandwidth_vmin0p65) + bw_consumed = v->fabric_and_dram_bandwidth_vmin0p65; + else if (bw_consumed < v->fabric_and_dram_bandwidth_vmid0p72) + bw_consumed = v->fabric_and_dram_bandwidth_vmid0p72; + else if (bw_consumed < v->fabric_and_dram_bandwidth_vnom0p8) + bw_consumed = v->fabric_and_dram_bandwidth_vnom0p8; + else + bw_consumed = v->fabric_and_dram_bandwidth_vmax0p9; + + if (bw_consumed < v->fabric_and_dram_bandwidth) + if (dc->debug.voltage_align_fclk) + bw_consumed = v->fabric_and_dram_bandwidth; + + display_pipe_configuration(v); + /*calc_wm_sets_and_perf_params(context, v);*/ + /* Only 1 set is used by dcn since no noticeable + * performance improvement was measured and due to hw bug DEGVIDCN10-254 + */ + dispclkdppclkdcfclk_deep_sleep_prefetch_parameters_watermarks_and_performance_calculation(v); + + context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_exit_ns = + v->stutter_exit_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.cstate_enter_plus_exit_ns = + v->stutter_enter_plus_exit_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.a.cstate_pstate.pstate_change_ns = + v->dram_clock_change_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.a.pte_meta_urgent_ns = v->ptemeta_urgent_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.a.urgent_ns = v->urgent_watermark * 1000; + context->bw_ctx.bw.dcn.watermarks.b = context->bw_ctx.bw.dcn.watermarks.a; + context->bw_ctx.bw.dcn.watermarks.c = context->bw_ctx.bw.dcn.watermarks.a; + context->bw_ctx.bw.dcn.watermarks.d = context->bw_ctx.bw.dcn.watermarks.a; + + context->bw_ctx.bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 / + (ddr4_dram_factor_single_Channel * v->number_of_channels)); + if (bw_consumed == v->fabric_and_dram_bandwidth_vmin0p65) + context->bw_ctx.bw.dcn.clk.fclk_khz = (int)(bw_consumed * 1000000 / 32); + + context->bw_ctx.bw.dcn.clk.dcfclk_deep_sleep_khz = (int)(v->dcf_clk_deep_sleep * 1000); + context->bw_ctx.bw.dcn.clk.dcfclk_khz = (int)(v->dcfclk * 1000); + + context->bw_ctx.bw.dcn.clk.dispclk_khz = (int)(v->dispclk * 1000); + if (dc->debug.max_disp_clk == true) + context->bw_ctx.bw.dcn.clk.dispclk_khz = (int)(dc->dcn_soc->max_dispclk_vmax0p9 * 1000); + + if (context->bw_ctx.bw.dcn.clk.dispclk_khz < + dc->debug.min_disp_clk_khz) { + context->bw_ctx.bw.dcn.clk.dispclk_khz = + dc->debug.min_disp_clk_khz; + } + + context->bw_ctx.bw.dcn.clk.dppclk_khz = context->bw_ctx.bw.dcn.clk.dispclk_khz / + v->dispclk_dppclk_ratio; + context->bw_ctx.bw.dcn.clk.phyclk_khz = v->phyclk_per_state[v->voltage_level]; + switch (v->voltage_level) { + case 0: + context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = + (int)(dc->dcn_soc->max_dppclk_vmin0p65 * 1000); + break; + case 1: + context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = + (int)(dc->dcn_soc->max_dppclk_vmid0p72 * 1000); + break; + case 2: + context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = + (int)(dc->dcn_soc->max_dppclk_vnom0p8 * 1000); + break; + default: + context->bw_ctx.bw.dcn.clk.max_supported_dppclk_khz = + (int)(dc->dcn_soc->max_dppclk_vmax0p9 * 1000); + break; + } + + BW_VAL_TRACE_END_WATERMARKS(); + + for (i = 0, input_idx = 0; i < pool->pipe_count; i++) { + struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + + /* skip inactive pipe */ + if (!pipe->stream) + continue; + /* skip all but first of split pipes */ + if (pipe->top_pipe && pipe->top_pipe->plane_state == pipe->plane_state) + continue; + + pipe->pipe_dlg_param.vupdate_width = v->v_update_width_pix[input_idx]; + pipe->pipe_dlg_param.vupdate_offset = v->v_update_offset_pix[input_idx]; + pipe->pipe_dlg_param.vready_offset = v->v_ready_offset_pix[input_idx]; + pipe->pipe_dlg_param.vstartup_start = v->v_startup[input_idx]; + + pipe->pipe_dlg_param.htotal = pipe->stream->timing.h_total; + pipe->pipe_dlg_param.vtotal = pipe->stream->timing.v_total; + vesa_sync_start = pipe->stream->timing.v_addressable + + pipe->stream->timing.v_border_bottom + + pipe->stream->timing.v_front_porch; + + asic_blank_end = (pipe->stream->timing.v_total - + vesa_sync_start - + pipe->stream->timing.v_border_top) + * (pipe->stream->timing.flags.INTERLACE ? 1 : 0); + + asic_blank_start = asic_blank_end + + (pipe->stream->timing.v_border_top + + pipe->stream->timing.v_addressable + + pipe->stream->timing.v_border_bottom) + * (pipe->stream->timing.flags.INTERLACE ? 1 : 0); + + pipe->pipe_dlg_param.vblank_start = asic_blank_start; + pipe->pipe_dlg_param.vblank_end = asic_blank_end; + + if (pipe->plane_state) { + struct pipe_ctx *hsplit_pipe = pipe->bottom_pipe; + + pipe->plane_state->update_flags.bits.full_update = 1; + + if (v->dpp_per_plane[input_idx] == 2 || + ((pipe->stream->view_format == + VIEW_3D_FORMAT_SIDE_BY_SIDE || + pipe->stream->view_format == + VIEW_3D_FORMAT_TOP_AND_BOTTOM) && + (pipe->stream->timing.timing_3d_format == + TIMING_3D_FORMAT_TOP_AND_BOTTOM || + pipe->stream->timing.timing_3d_format == + TIMING_3D_FORMAT_SIDE_BY_SIDE))) { + if (hsplit_pipe && hsplit_pipe->plane_state == pipe->plane_state) { + /* update previously split pipe */ + hsplit_pipe->pipe_dlg_param.vupdate_width = v->v_update_width_pix[input_idx]; + hsplit_pipe->pipe_dlg_param.vupdate_offset = v->v_update_offset_pix[input_idx]; + hsplit_pipe->pipe_dlg_param.vready_offset = v->v_ready_offset_pix[input_idx]; + hsplit_pipe->pipe_dlg_param.vstartup_start = v->v_startup[input_idx]; + + hsplit_pipe->pipe_dlg_param.htotal = pipe->stream->timing.h_total; + hsplit_pipe->pipe_dlg_param.vtotal = pipe->stream->timing.v_total; + hsplit_pipe->pipe_dlg_param.vblank_start = pipe->pipe_dlg_param.vblank_start; + hsplit_pipe->pipe_dlg_param.vblank_end = pipe->pipe_dlg_param.vblank_end; + } else { + /* pipe not split previously needs split */ + hsplit_pipe = find_idle_secondary_pipe(&context->res_ctx, pool, pipe); + ASSERT(hsplit_pipe); + split_stream_across_pipes(&context->res_ctx, pool, pipe, hsplit_pipe); + } + + dcn_bw_calc_rq_dlg_ttu(dc, v, hsplit_pipe, input_idx); + } else if (hsplit_pipe && hsplit_pipe->plane_state == pipe->plane_state) { + /* merge previously split pipe */ + pipe->bottom_pipe = hsplit_pipe->bottom_pipe; + if (hsplit_pipe->bottom_pipe) + hsplit_pipe->bottom_pipe->top_pipe = pipe; + hsplit_pipe->plane_state = NULL; + hsplit_pipe->stream = NULL; + hsplit_pipe->top_pipe = NULL; + hsplit_pipe->bottom_pipe = NULL; + /* Clear plane_res and stream_res */ + memset(&hsplit_pipe->plane_res, 0, sizeof(hsplit_pipe->plane_res)); + memset(&hsplit_pipe->stream_res, 0, sizeof(hsplit_pipe->stream_res)); + resource_build_scaling_params(pipe); + } + /* for now important to do this after pipe split for building e2e params */ + dcn_bw_calc_rq_dlg_ttu(dc, v, pipe, input_idx); + } + + input_idx++; + } + } else if (v->voltage_level == number_of_states_plus_one) { + BW_VAL_TRACE_SKIP(fail); + } else if (fast_validate) { + BW_VAL_TRACE_SKIP(fast); + } + + if (v->voltage_level == 0) { + context->bw_ctx.dml.soc.sr_enter_plus_exit_time_us = + dc->dcn_soc->sr_enter_plus_exit_time; + context->bw_ctx.dml.soc.sr_exit_time_us = dc->dcn_soc->sr_exit_time; + } + + /* + * BW limit is set to prevent display from impacting other system functions + */ + + bw_limit = dc->dcn_soc->percent_disp_bw_limit * v->fabric_and_dram_bandwidth_vmax0p9; + bw_limit_pass = (v->total_data_read_bandwidth / 1000.0) < bw_limit; + + DC_FP_END(); + + PERFORMANCE_TRACE_END(); + BW_VAL_TRACE_FINISH(); + + if (bw_limit_pass && v->voltage_level <= get_highest_allowed_voltage_level( + dc->ctx->asic_id.chip_family, + dc->ctx->asic_id.hw_internal_rev, + dc->ctx->asic_id.pci_revision_id)) + return true; + else + return false; +} + +static unsigned int dcn_find_normalized_clock_vdd_Level( + const struct dc *dc, + enum dm_pp_clock_type clocks_type, + int clocks_in_khz) +{ + int vdd_level = dcn_bw_v_min0p65; + + if (clocks_in_khz == 0)/*todo some clock not in the considerations*/ + return vdd_level; + + switch (clocks_type) { + case DM_PP_CLOCK_TYPE_DISPLAY_CLK: + if (clocks_in_khz > dc->dcn_soc->max_dispclk_vmax0p9*1000) { + vdd_level = dcn_bw_v_max0p91; + BREAK_TO_DEBUGGER(); + } else if (clocks_in_khz > dc->dcn_soc->max_dispclk_vnom0p8*1000) { + vdd_level = dcn_bw_v_max0p9; + } else if (clocks_in_khz > dc->dcn_soc->max_dispclk_vmid0p72*1000) { + vdd_level = dcn_bw_v_nom0p8; + } else if (clocks_in_khz > dc->dcn_soc->max_dispclk_vmin0p65*1000) { + vdd_level = dcn_bw_v_mid0p72; + } else + vdd_level = dcn_bw_v_min0p65; + break; + case DM_PP_CLOCK_TYPE_DISPLAYPHYCLK: + if (clocks_in_khz > dc->dcn_soc->phyclkv_max0p9*1000) { + vdd_level = dcn_bw_v_max0p91; + BREAK_TO_DEBUGGER(); + } else if (clocks_in_khz > dc->dcn_soc->phyclkv_nom0p8*1000) { + vdd_level = dcn_bw_v_max0p9; + } else if (clocks_in_khz > dc->dcn_soc->phyclkv_mid0p72*1000) { + vdd_level = dcn_bw_v_nom0p8; + } else if (clocks_in_khz > dc->dcn_soc->phyclkv_min0p65*1000) { + vdd_level = dcn_bw_v_mid0p72; + } else + vdd_level = dcn_bw_v_min0p65; + break; + + case DM_PP_CLOCK_TYPE_DPPCLK: + if (clocks_in_khz > dc->dcn_soc->max_dppclk_vmax0p9*1000) { + vdd_level = dcn_bw_v_max0p91; + BREAK_TO_DEBUGGER(); + } else if (clocks_in_khz > dc->dcn_soc->max_dppclk_vnom0p8*1000) { + vdd_level = dcn_bw_v_max0p9; + } else if (clocks_in_khz > dc->dcn_soc->max_dppclk_vmid0p72*1000) { + vdd_level = dcn_bw_v_nom0p8; + } else if (clocks_in_khz > dc->dcn_soc->max_dppclk_vmin0p65*1000) { + vdd_level = dcn_bw_v_mid0p72; + } else + vdd_level = dcn_bw_v_min0p65; + break; + + case DM_PP_CLOCK_TYPE_MEMORY_CLK: + { + unsigned factor = (ddr4_dram_factor_single_Channel * dc->dcn_soc->number_of_channels); + + if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9*1000000/factor) { + vdd_level = dcn_bw_v_max0p91; + BREAK_TO_DEBUGGER(); + } else if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8*1000000/factor) { + vdd_level = dcn_bw_v_max0p9; + } else if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72*1000000/factor) { + vdd_level = dcn_bw_v_nom0p8; + } else if (clocks_in_khz > dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65*1000000/factor) { + vdd_level = dcn_bw_v_mid0p72; + } else + vdd_level = dcn_bw_v_min0p65; + } + break; + + case DM_PP_CLOCK_TYPE_DCFCLK: + if (clocks_in_khz > dc->dcn_soc->dcfclkv_max0p9*1000) { + vdd_level = dcn_bw_v_max0p91; + BREAK_TO_DEBUGGER(); + } else if (clocks_in_khz > dc->dcn_soc->dcfclkv_nom0p8*1000) { + vdd_level = dcn_bw_v_max0p9; + } else if (clocks_in_khz > dc->dcn_soc->dcfclkv_mid0p72*1000) { + vdd_level = dcn_bw_v_nom0p8; + } else if (clocks_in_khz > dc->dcn_soc->dcfclkv_min0p65*1000) { + vdd_level = dcn_bw_v_mid0p72; + } else + vdd_level = dcn_bw_v_min0p65; + break; + + default: + break; + } + return vdd_level; +} + +unsigned int dcn_find_dcfclk_suits_all( + const struct dc *dc, + struct dc_clocks *clocks) +{ + unsigned vdd_level, vdd_level_temp; + unsigned dcf_clk; + + /*find a common supported voltage level*/ + vdd_level = dcn_find_normalized_clock_vdd_Level( + dc, DM_PP_CLOCK_TYPE_DISPLAY_CLK, clocks->dispclk_khz); + vdd_level_temp = dcn_find_normalized_clock_vdd_Level( + dc, DM_PP_CLOCK_TYPE_DISPLAYPHYCLK, clocks->phyclk_khz); + + vdd_level = dcn_bw_max(vdd_level, vdd_level_temp); + vdd_level_temp = dcn_find_normalized_clock_vdd_Level( + dc, DM_PP_CLOCK_TYPE_DPPCLK, clocks->dppclk_khz); + vdd_level = dcn_bw_max(vdd_level, vdd_level_temp); + + vdd_level_temp = dcn_find_normalized_clock_vdd_Level( + dc, DM_PP_CLOCK_TYPE_MEMORY_CLK, clocks->fclk_khz); + vdd_level = dcn_bw_max(vdd_level, vdd_level_temp); + vdd_level_temp = dcn_find_normalized_clock_vdd_Level( + dc, DM_PP_CLOCK_TYPE_DCFCLK, clocks->dcfclk_khz); + + /*find that level conresponding dcfclk*/ + vdd_level = dcn_bw_max(vdd_level, vdd_level_temp); + if (vdd_level == dcn_bw_v_max0p91) { + BREAK_TO_DEBUGGER(); + dcf_clk = dc->dcn_soc->dcfclkv_max0p9*1000; + } else if (vdd_level == dcn_bw_v_max0p9) + dcf_clk = dc->dcn_soc->dcfclkv_max0p9*1000; + else if (vdd_level == dcn_bw_v_nom0p8) + dcf_clk = dc->dcn_soc->dcfclkv_nom0p8*1000; + else if (vdd_level == dcn_bw_v_mid0p72) + dcf_clk = dc->dcn_soc->dcfclkv_mid0p72*1000; + else + dcf_clk = dc->dcn_soc->dcfclkv_min0p65*1000; + + DC_LOG_BANDWIDTH_CALCS("\tdcf_clk for voltage = %d\n", dcf_clk); + return dcf_clk; +} + +static bool verify_clock_values(struct dm_pp_clock_levels_with_voltage *clks) +{ + int i; + + if (clks->num_levels == 0) + return false; + + for (i = 0; i < clks->num_levels; i++) + /* Ensure that the result is sane */ + if (clks->data[i].clocks_in_khz == 0) + return false; + + return true; +} + +void dcn_bw_update_from_pplib(struct dc *dc) +{ + struct dc_context *ctx = dc->ctx; + struct dm_pp_clock_levels_with_voltage fclks = {0}, dcfclks = {0}; + bool res; + unsigned vmin0p65_idx, vmid0p72_idx, vnom0p8_idx, vmax0p9_idx; + + /* TODO: This is not the proper way to obtain fabric_and_dram_bandwidth, should be min(fclk, memclk) */ + res = dm_pp_get_clock_levels_by_type_with_voltage( + ctx, DM_PP_CLOCK_TYPE_FCLK, &fclks); + + DC_FP_START(); + + if (res) + res = verify_clock_values(&fclks); + + if (res) { + ASSERT(fclks.num_levels); + + vmin0p65_idx = 0; + vmid0p72_idx = fclks.num_levels - + (fclks.num_levels > 2 ? 3 : (fclks.num_levels > 1 ? 2 : 1)); + vnom0p8_idx = fclks.num_levels - (fclks.num_levels > 1 ? 2 : 1); + vmax0p9_idx = fclks.num_levels - 1; + + dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 = + 32 * (fclks.data[vmin0p65_idx].clocks_in_khz / 1000.0) / 1000.0; + dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 = + dc->dcn_soc->number_of_channels * + (fclks.data[vmid0p72_idx].clocks_in_khz / 1000.0) + * ddr4_dram_factor_single_Channel / 1000.0; + dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 = + dc->dcn_soc->number_of_channels * + (fclks.data[vnom0p8_idx].clocks_in_khz / 1000.0) + * ddr4_dram_factor_single_Channel / 1000.0; + dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 = + dc->dcn_soc->number_of_channels * + (fclks.data[vmax0p9_idx].clocks_in_khz / 1000.0) + * ddr4_dram_factor_single_Channel / 1000.0; + } else + BREAK_TO_DEBUGGER(); + + DC_FP_END(); + + res = dm_pp_get_clock_levels_by_type_with_voltage( + ctx, DM_PP_CLOCK_TYPE_DCFCLK, &dcfclks); + + DC_FP_START(); + + if (res) + res = verify_clock_values(&dcfclks); + + if (res && dcfclks.num_levels >= 3) { + dc->dcn_soc->dcfclkv_min0p65 = dcfclks.data[0].clocks_in_khz / 1000.0; + dc->dcn_soc->dcfclkv_mid0p72 = dcfclks.data[dcfclks.num_levels - 3].clocks_in_khz / 1000.0; + dc->dcn_soc->dcfclkv_nom0p8 = dcfclks.data[dcfclks.num_levels - 2].clocks_in_khz / 1000.0; + dc->dcn_soc->dcfclkv_max0p9 = dcfclks.data[dcfclks.num_levels - 1].clocks_in_khz / 1000.0; + } else + BREAK_TO_DEBUGGER(); + + DC_FP_END(); +} + +void dcn_bw_notify_pplib_of_wm_ranges(struct dc *dc) +{ + struct pp_smu_funcs_rv *pp = NULL; + struct pp_smu_wm_range_sets ranges = {0}; + int min_fclk_khz, min_dcfclk_khz, socclk_khz; + const int overdrive = 5000000; /* 5 GHz to cover Overdrive */ + + if (dc->res_pool->pp_smu) + pp = &dc->res_pool->pp_smu->rv_funcs; + if (!pp || !pp->set_wm_ranges) + return; + + DC_FP_START(); + min_fclk_khz = dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 * 1000000 / 32; + min_dcfclk_khz = dc->dcn_soc->dcfclkv_min0p65 * 1000; + socclk_khz = dc->dcn_soc->socclk * 1000; + DC_FP_END(); + + /* Now notify PPLib/SMU about which Watermarks sets they should select + * depending on DPM state they are in. And update BW MGR GFX Engine and + * Memory clock member variables for Watermarks calculations for each + * Watermark Set. Only one watermark set for dcn1 due to hw bug DEGVIDCN10-254. + */ + /* SOCCLK does not affect anytihng but writeback for DCN so for now we dont + * care what the value is, hence min to overdrive level + */ + ranges.num_reader_wm_sets = WM_SET_COUNT; + ranges.num_writer_wm_sets = WM_SET_COUNT; + ranges.reader_wm_sets[0].wm_inst = WM_A; + ranges.reader_wm_sets[0].min_drain_clk_mhz = min_dcfclk_khz / 1000; + ranges.reader_wm_sets[0].max_drain_clk_mhz = overdrive / 1000; + ranges.reader_wm_sets[0].min_fill_clk_mhz = min_fclk_khz / 1000; + ranges.reader_wm_sets[0].max_fill_clk_mhz = overdrive / 1000; + ranges.writer_wm_sets[0].wm_inst = WM_A; + ranges.writer_wm_sets[0].min_fill_clk_mhz = socclk_khz / 1000; + ranges.writer_wm_sets[0].max_fill_clk_mhz = overdrive / 1000; + ranges.writer_wm_sets[0].min_drain_clk_mhz = min_fclk_khz / 1000; + ranges.writer_wm_sets[0].max_drain_clk_mhz = overdrive / 1000; + + if (dc->debug.pplib_wm_report_mode == WM_REPORT_OVERRIDE) { + ranges.reader_wm_sets[0].wm_inst = WM_A; + ranges.reader_wm_sets[0].min_drain_clk_mhz = 300; + ranges.reader_wm_sets[0].max_drain_clk_mhz = 5000; + ranges.reader_wm_sets[0].min_fill_clk_mhz = 800; + ranges.reader_wm_sets[0].max_fill_clk_mhz = 5000; + ranges.writer_wm_sets[0].wm_inst = WM_A; + ranges.writer_wm_sets[0].min_fill_clk_mhz = 200; + ranges.writer_wm_sets[0].max_fill_clk_mhz = 5000; + ranges.writer_wm_sets[0].min_drain_clk_mhz = 800; + ranges.writer_wm_sets[0].max_drain_clk_mhz = 5000; + } + + ranges.reader_wm_sets[1] = ranges.writer_wm_sets[0]; + ranges.reader_wm_sets[1].wm_inst = WM_B; + + ranges.reader_wm_sets[2] = ranges.writer_wm_sets[0]; + ranges.reader_wm_sets[2].wm_inst = WM_C; + + ranges.reader_wm_sets[3] = ranges.writer_wm_sets[0]; + ranges.reader_wm_sets[3].wm_inst = WM_D; + + /* Notify PP Lib/SMU which Watermarks to use for which clock ranges */ + pp->set_wm_ranges(&pp->pp_smu, &ranges); +} + +void dcn_bw_sync_calcs_and_dml(struct dc *dc) +{ + DC_FP_START(); + DC_LOG_BANDWIDTH_CALCS("sr_exit_time: %f ns\n" + "sr_enter_plus_exit_time: %f ns\n" + "urgent_latency: %f ns\n" + "write_back_latency: %f ns\n" + "percent_of_ideal_drambw_received_after_urg_latency: %f %%\n" + "max_request_size: %d bytes\n" + "dcfclkv_max0p9: %f kHz\n" + "dcfclkv_nom0p8: %f kHz\n" + "dcfclkv_mid0p72: %f kHz\n" + "dcfclkv_min0p65: %f kHz\n" + "max_dispclk_vmax0p9: %f kHz\n" + "max_dispclk_vnom0p8: %f kHz\n" + "max_dispclk_vmid0p72: %f kHz\n" + "max_dispclk_vmin0p65: %f kHz\n" + "max_dppclk_vmax0p9: %f kHz\n" + "max_dppclk_vnom0p8: %f kHz\n" + "max_dppclk_vmid0p72: %f kHz\n" + "max_dppclk_vmin0p65: %f kHz\n" + "socclk: %f kHz\n" + "fabric_and_dram_bandwidth_vmax0p9: %f MB/s\n" + "fabric_and_dram_bandwidth_vnom0p8: %f MB/s\n" + "fabric_and_dram_bandwidth_vmid0p72: %f MB/s\n" + "fabric_and_dram_bandwidth_vmin0p65: %f MB/s\n" + "phyclkv_max0p9: %f kHz\n" + "phyclkv_nom0p8: %f kHz\n" + "phyclkv_mid0p72: %f kHz\n" + "phyclkv_min0p65: %f kHz\n" + "downspreading: %f %%\n" + "round_trip_ping_latency_cycles: %d DCFCLK Cycles\n" + "urgent_out_of_order_return_per_channel: %d Bytes\n" + "number_of_channels: %d\n" + "vmm_page_size: %d Bytes\n" + "dram_clock_change_latency: %f ns\n" + "return_bus_width: %d Bytes\n", + dc->dcn_soc->sr_exit_time * 1000, + dc->dcn_soc->sr_enter_plus_exit_time * 1000, + dc->dcn_soc->urgent_latency * 1000, + dc->dcn_soc->write_back_latency * 1000, + dc->dcn_soc->percent_of_ideal_drambw_received_after_urg_latency, + dc->dcn_soc->max_request_size, + dc->dcn_soc->dcfclkv_max0p9 * 1000, + dc->dcn_soc->dcfclkv_nom0p8 * 1000, + dc->dcn_soc->dcfclkv_mid0p72 * 1000, + dc->dcn_soc->dcfclkv_min0p65 * 1000, + dc->dcn_soc->max_dispclk_vmax0p9 * 1000, + dc->dcn_soc->max_dispclk_vnom0p8 * 1000, + dc->dcn_soc->max_dispclk_vmid0p72 * 1000, + dc->dcn_soc->max_dispclk_vmin0p65 * 1000, + dc->dcn_soc->max_dppclk_vmax0p9 * 1000, + dc->dcn_soc->max_dppclk_vnom0p8 * 1000, + dc->dcn_soc->max_dppclk_vmid0p72 * 1000, + dc->dcn_soc->max_dppclk_vmin0p65 * 1000, + dc->dcn_soc->socclk * 1000, + dc->dcn_soc->fabric_and_dram_bandwidth_vmax0p9 * 1000, + dc->dcn_soc->fabric_and_dram_bandwidth_vnom0p8 * 1000, + dc->dcn_soc->fabric_and_dram_bandwidth_vmid0p72 * 1000, + dc->dcn_soc->fabric_and_dram_bandwidth_vmin0p65 * 1000, + dc->dcn_soc->phyclkv_max0p9 * 1000, + dc->dcn_soc->phyclkv_nom0p8 * 1000, + dc->dcn_soc->phyclkv_mid0p72 * 1000, + dc->dcn_soc->phyclkv_min0p65 * 1000, + dc->dcn_soc->downspreading * 100, + dc->dcn_soc->round_trip_ping_latency_cycles, + dc->dcn_soc->urgent_out_of_order_return_per_channel, + dc->dcn_soc->number_of_channels, + dc->dcn_soc->vmm_page_size, + dc->dcn_soc->dram_clock_change_latency * 1000, + dc->dcn_soc->return_bus_width); + DC_LOG_BANDWIDTH_CALCS("rob_buffer_size_in_kbyte: %f\n" + "det_buffer_size_in_kbyte: %f\n" + "dpp_output_buffer_pixels: %f\n" + "opp_output_buffer_lines: %f\n" + "pixel_chunk_size_in_kbyte: %f\n" + "pte_enable: %d\n" + "pte_chunk_size: %d kbytes\n" + "meta_chunk_size: %d kbytes\n" + "writeback_chunk_size: %d kbytes\n" + "odm_capability: %d\n" + "dsc_capability: %d\n" + "line_buffer_size: %d bits\n" + "max_line_buffer_lines: %d\n" + "is_line_buffer_bpp_fixed: %d\n" + "line_buffer_fixed_bpp: %d\n" + "writeback_luma_buffer_size: %d kbytes\n" + "writeback_chroma_buffer_size: %d kbytes\n" + "max_num_dpp: %d\n" + "max_num_writeback: %d\n" + "max_dchub_topscl_throughput: %d pixels/dppclk\n" + "max_pscl_tolb_throughput: %d pixels/dppclk\n" + "max_lb_tovscl_throughput: %d pixels/dppclk\n" + "max_vscl_tohscl_throughput: %d pixels/dppclk\n" + "max_hscl_ratio: %f\n" + "max_vscl_ratio: %f\n" + "max_hscl_taps: %d\n" + "max_vscl_taps: %d\n" + "pte_buffer_size_in_requests: %d\n" + "dispclk_ramping_margin: %f %%\n" + "under_scan_factor: %f %%\n" + "max_inter_dcn_tile_repeaters: %d\n" + "can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one: %d\n" + "bug_forcing_luma_and_chroma_request_to_same_size_fixed: %d\n" + "dcfclk_cstate_latency: %d\n", + dc->dcn_ip->rob_buffer_size_in_kbyte, + dc->dcn_ip->det_buffer_size_in_kbyte, + dc->dcn_ip->dpp_output_buffer_pixels, + dc->dcn_ip->opp_output_buffer_lines, + dc->dcn_ip->pixel_chunk_size_in_kbyte, + dc->dcn_ip->pte_enable, + dc->dcn_ip->pte_chunk_size, + dc->dcn_ip->meta_chunk_size, + dc->dcn_ip->writeback_chunk_size, + dc->dcn_ip->odm_capability, + dc->dcn_ip->dsc_capability, + dc->dcn_ip->line_buffer_size, + dc->dcn_ip->max_line_buffer_lines, + dc->dcn_ip->is_line_buffer_bpp_fixed, + dc->dcn_ip->line_buffer_fixed_bpp, + dc->dcn_ip->writeback_luma_buffer_size, + dc->dcn_ip->writeback_chroma_buffer_size, + dc->dcn_ip->max_num_dpp, + dc->dcn_ip->max_num_writeback, + dc->dcn_ip->max_dchub_topscl_throughput, + dc->dcn_ip->max_pscl_tolb_throughput, + dc->dcn_ip->max_lb_tovscl_throughput, + dc->dcn_ip->max_vscl_tohscl_throughput, + dc->dcn_ip->max_hscl_ratio, + dc->dcn_ip->max_vscl_ratio, + dc->dcn_ip->max_hscl_taps, + dc->dcn_ip->max_vscl_taps, + dc->dcn_ip->pte_buffer_size_in_requests, + dc->dcn_ip->dispclk_ramping_margin, + dc->dcn_ip->under_scan_factor * 100, + dc->dcn_ip->max_inter_dcn_tile_repeaters, + dc->dcn_ip->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one, + dc->dcn_ip->bug_forcing_luma_and_chroma_request_to_same_size_fixed, + dc->dcn_ip->dcfclk_cstate_latency); + + dc->dml.soc.sr_exit_time_us = dc->dcn_soc->sr_exit_time; + dc->dml.soc.sr_enter_plus_exit_time_us = dc->dcn_soc->sr_enter_plus_exit_time; + dc->dml.soc.urgent_latency_us = dc->dcn_soc->urgent_latency; + dc->dml.soc.writeback_latency_us = dc->dcn_soc->write_back_latency; + dc->dml.soc.ideal_dram_bw_after_urgent_percent = + dc->dcn_soc->percent_of_ideal_drambw_received_after_urg_latency; + dc->dml.soc.max_request_size_bytes = dc->dcn_soc->max_request_size; + dc->dml.soc.downspread_percent = dc->dcn_soc->downspreading; + dc->dml.soc.round_trip_ping_latency_dcfclk_cycles = + dc->dcn_soc->round_trip_ping_latency_cycles; + dc->dml.soc.urgent_out_of_order_return_per_channel_bytes = + dc->dcn_soc->urgent_out_of_order_return_per_channel; + dc->dml.soc.num_chans = dc->dcn_soc->number_of_channels; + dc->dml.soc.vmm_page_size_bytes = dc->dcn_soc->vmm_page_size; + dc->dml.soc.dram_clock_change_latency_us = dc->dcn_soc->dram_clock_change_latency; + dc->dml.soc.return_bus_width_bytes = dc->dcn_soc->return_bus_width; + + dc->dml.ip.rob_buffer_size_kbytes = dc->dcn_ip->rob_buffer_size_in_kbyte; + dc->dml.ip.det_buffer_size_kbytes = dc->dcn_ip->det_buffer_size_in_kbyte; + dc->dml.ip.dpp_output_buffer_pixels = dc->dcn_ip->dpp_output_buffer_pixels; + dc->dml.ip.opp_output_buffer_lines = dc->dcn_ip->opp_output_buffer_lines; + dc->dml.ip.pixel_chunk_size_kbytes = dc->dcn_ip->pixel_chunk_size_in_kbyte; + dc->dml.ip.pte_enable = dc->dcn_ip->pte_enable == dcn_bw_yes; + dc->dml.ip.pte_chunk_size_kbytes = dc->dcn_ip->pte_chunk_size; + dc->dml.ip.meta_chunk_size_kbytes = dc->dcn_ip->meta_chunk_size; + dc->dml.ip.writeback_chunk_size_kbytes = dc->dcn_ip->writeback_chunk_size; + dc->dml.ip.line_buffer_size_bits = dc->dcn_ip->line_buffer_size; + dc->dml.ip.max_line_buffer_lines = dc->dcn_ip->max_line_buffer_lines; + dc->dml.ip.IsLineBufferBppFixed = dc->dcn_ip->is_line_buffer_bpp_fixed == dcn_bw_yes; + dc->dml.ip.LineBufferFixedBpp = dc->dcn_ip->line_buffer_fixed_bpp; + dc->dml.ip.writeback_luma_buffer_size_kbytes = dc->dcn_ip->writeback_luma_buffer_size; + dc->dml.ip.writeback_chroma_buffer_size_kbytes = dc->dcn_ip->writeback_chroma_buffer_size; + dc->dml.ip.max_num_dpp = dc->dcn_ip->max_num_dpp; + dc->dml.ip.max_num_wb = dc->dcn_ip->max_num_writeback; + dc->dml.ip.max_dchub_pscl_bw_pix_per_clk = dc->dcn_ip->max_dchub_topscl_throughput; + dc->dml.ip.max_pscl_lb_bw_pix_per_clk = dc->dcn_ip->max_pscl_tolb_throughput; + dc->dml.ip.max_lb_vscl_bw_pix_per_clk = dc->dcn_ip->max_lb_tovscl_throughput; + dc->dml.ip.max_vscl_hscl_bw_pix_per_clk = dc->dcn_ip->max_vscl_tohscl_throughput; + dc->dml.ip.max_hscl_ratio = dc->dcn_ip->max_hscl_ratio; + dc->dml.ip.max_vscl_ratio = dc->dcn_ip->max_vscl_ratio; + dc->dml.ip.max_hscl_taps = dc->dcn_ip->max_hscl_taps; + dc->dml.ip.max_vscl_taps = dc->dcn_ip->max_vscl_taps; + /*pte_buffer_size_in_requests missing in dml*/ + dc->dml.ip.dispclk_ramp_margin_percent = dc->dcn_ip->dispclk_ramping_margin; + dc->dml.ip.underscan_factor = dc->dcn_ip->under_scan_factor; + dc->dml.ip.max_inter_dcn_tile_repeaters = dc->dcn_ip->max_inter_dcn_tile_repeaters; + dc->dml.ip.can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one = + dc->dcn_ip->can_vstartup_lines_exceed_vsync_plus_back_porch_lines_minus_one == dcn_bw_yes; + dc->dml.ip.bug_forcing_LC_req_same_size_fixed = + dc->dcn_ip->bug_forcing_luma_and_chroma_request_to_same_size_fixed == dcn_bw_yes; + dc->dml.ip.dcfclk_cstate_latency = dc->dcn_ip->dcfclk_cstate_latency; + DC_FP_END(); +} -- cgit From 4bd8dd0d61f961e6c13cc118d4ebbdba57f8561f Mon Sep 17 00:00:00 2001 From: Yongzhi Liu Date: Tue, 18 Jan 2022 15:11:51 -0500 Subject: drm/amdgpu: Add missing pm_runtime_put_autosuspend pm_runtime_get_sync() increments the runtime PM usage counter even when it returns an error code, thus a matching decrement is needed on the error handling path to keep the counter balanced. Signed-off-by: Yongzhi Liu Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index 9aea1cc5b27e..4b950de9bf66 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -1120,8 +1120,10 @@ static ssize_t amdgpu_debugfs_gfxoff_read(struct file *f, char __user *buf, return -EINVAL; r = pm_runtime_get_sync(adev_to_drm(adev)->dev); - if (r < 0) + if (r < 0) { + pm_runtime_put_autosuspend(adev_to_drm(adev)->dev); return r; + } while (size) { uint32_t value; -- cgit From dfd6879b98999867ac860a51348b00b5c0c2cafd Mon Sep 17 00:00:00 2001 From: Qiang Ma Date: Mon, 17 Jan 2022 15:47:31 +0800 Subject: drm/radeon: fix UVD suspend error I met a bug recently and the kernel log: [ 330.171875] radeon 0000:03:00.0: couldn't schedule ib [ 330.175781] [drm:radeon_uvd_suspend [radeon]] *ERROR* Error destroying UVD (-22)! In radeon drivers, using UVD suspend is as follows: if (rdev->has_uvd) { uvd_v1_0_fini(rdev); radeon_uvd_suspend(rdev); } In radeon_ib_schedule function, we check the 'ring->ready' state, but in uvd_v1_0_fini funciton, we've cleared the ready state. So, just modify the suspend code flow to fix error. Reviewed-by: Leo Liu Signed-off-by: Qiang Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/cik.c | 2 +- drivers/gpu/drm/radeon/evergreen.c | 2 +- drivers/gpu/drm/radeon/ni.c | 2 +- drivers/gpu/drm/radeon/r600.c | 2 +- drivers/gpu/drm/radeon/rv770.c | 2 +- drivers/gpu/drm/radeon/si.c | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/radeon/cik.c b/drivers/gpu/drm/radeon/cik.c index 81b4de7be9f2..5819737c21c6 100644 --- a/drivers/gpu/drm/radeon/cik.c +++ b/drivers/gpu/drm/radeon/cik.c @@ -8517,8 +8517,8 @@ int cik_suspend(struct radeon_device *rdev) cik_cp_enable(rdev, false); cik_sdma_enable(rdev, false); if (rdev->has_uvd) { - uvd_v1_0_fini(rdev); radeon_uvd_suspend(rdev); + uvd_v1_0_fini(rdev); } if (rdev->has_vce) radeon_vce_suspend(rdev); diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index eeb590d2dec2..455f8036aa54 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -5156,8 +5156,8 @@ int evergreen_suspend(struct radeon_device *rdev) radeon_pm_suspend(rdev); radeon_audio_fini(rdev); if (rdev->has_uvd) { - uvd_v1_0_fini(rdev); radeon_uvd_suspend(rdev); + uvd_v1_0_fini(rdev); } r700_cp_stop(rdev); r600_dma_stop(rdev); diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c index 4a364ca7a1be..927e5f42e97d 100644 --- a/drivers/gpu/drm/radeon/ni.c +++ b/drivers/gpu/drm/radeon/ni.c @@ -2323,8 +2323,8 @@ int cayman_suspend(struct radeon_device *rdev) cayman_cp_enable(rdev, false); cayman_dma_stop(rdev); if (rdev->has_uvd) { - uvd_v1_0_fini(rdev); radeon_uvd_suspend(rdev); + uvd_v1_0_fini(rdev); } evergreen_irq_suspend(rdev); radeon_wb_disable(rdev); diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index ca3fcae2adb5..dd78fc499402 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -3232,8 +3232,8 @@ int r600_suspend(struct radeon_device *rdev) radeon_audio_fini(rdev); r600_cp_stop(rdev); if (rdev->has_uvd) { - uvd_v1_0_fini(rdev); radeon_uvd_suspend(rdev); + uvd_v1_0_fini(rdev); } r600_irq_suspend(rdev); radeon_wb_disable(rdev); diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c index e592e57be1bb..38796af4fadd 100644 --- a/drivers/gpu/drm/radeon/rv770.c +++ b/drivers/gpu/drm/radeon/rv770.c @@ -1894,8 +1894,8 @@ int rv770_suspend(struct radeon_device *rdev) radeon_pm_suspend(rdev); radeon_audio_fini(rdev); if (rdev->has_uvd) { - uvd_v1_0_fini(rdev); radeon_uvd_suspend(rdev); + uvd_v1_0_fini(rdev); } r700_cp_stop(rdev); r600_dma_stop(rdev); diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c index 013e44ed0f39..8d5e4b25609d 100644 --- a/drivers/gpu/drm/radeon/si.c +++ b/drivers/gpu/drm/radeon/si.c @@ -6800,8 +6800,8 @@ int si_suspend(struct radeon_device *rdev) si_cp_enable(rdev, false); cayman_dma_stop(rdev); if (rdev->has_uvd) { - uvd_v1_0_fini(rdev); radeon_uvd_suspend(rdev); + uvd_v1_0_fini(rdev); } if (rdev->has_vce) radeon_vce_suspend(rdev); -- cgit From a5e7ffa11974d90d36f818ee34fc170722ec3098 Mon Sep 17 00:00:00 2001 From: Minghao Chi Date: Tue, 18 Jan 2022 07:57:02 +0000 Subject: amdgpu/amdgpu_psp: remove unneeded ret variable Return value from amdgpu_bo_create_kernel() directly instead of taking this in another redundant variable. Reported-by: Zeal Robot Signed-off-by: Minghao Chi Signed-off-by: CGEL ZTE Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c index 5c9b67ab168f..f2806959736a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c @@ -941,19 +941,15 @@ static void psp_prep_ta_load_cmd_buf(struct psp_gfx_cmd_resp *cmd, static int psp_ta_init_shared_buf(struct psp_context *psp, struct ta_mem_context *mem_ctx) { - int ret; - /* * Allocate 16k memory aligned to 4k from Frame Buffer (local * physical) for ta to host memory */ - ret = amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size, + return amdgpu_bo_create_kernel(psp->adev, mem_ctx->shared_mem_size, PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM, &mem_ctx->shared_bo, &mem_ctx->shared_mc_addr, &mem_ctx->shared_buf); - - return ret; } static void psp_ta_free_shared_buf(struct ta_mem_context *mem_ctx) -- cgit From d39bc5c5e10a648c7de9558592816474f45a374d Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 19 Jan 2022 13:05:28 +0200 Subject: drm/i915/dpll: make intel_shared_dpll_funcs internal to intel_dpll_mgr.c MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move struct intel_shared_dpll_funcs to intel_dpll_mgr.c, as no other place needs to have access to it. We also don't need to have kernel-doc documentation for file internal structures, so drop them while at it. Signed-off-by: Jani Nikula Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220119110528.2377899-1-jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_dpll_mgr.c | 35 ++++++++++++++++++++ drivers/gpu/drm/i915/display/intel_dpll_mgr.h | 46 +-------------------------- 2 files changed, 36 insertions(+), 45 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index 3f7357123a6d..6723c3de5a80 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -50,6 +50,41 @@ * commit phase. */ +/* platform specific hooks for managing DPLLs */ +struct intel_shared_dpll_funcs { + /* + * Hook for enabling the pll, called from intel_enable_shared_dpll() if + * the pll is not already enabled. + */ + void (*enable)(struct drm_i915_private *i915, + struct intel_shared_dpll *pll); + + /* + * Hook for disabling the pll, called from intel_disable_shared_dpll() + * only when it is safe to disable the pll, i.e., there are no more + * tracked users for it. + */ + void (*disable)(struct drm_i915_private *i915, + struct intel_shared_dpll *pll); + + /* + * Hook for reading the values currently programmed to the DPLL + * registers. This is used for initial hw state readout and state + * verification after a mode set. + */ + bool (*get_hw_state)(struct drm_i915_private *i915, + struct intel_shared_dpll *pll, + struct intel_dpll_hw_state *hw_state); + + /* + * Hook for calculating the pll's output frequency based on its passed + * in state. + */ + int (*get_freq)(struct drm_i915_private *i915, + const struct intel_shared_dpll *pll, + const struct intel_dpll_hw_state *pll_state); +}; + struct intel_dpll_mgr { const struct dpll_info *dpll_info; diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h index ef2889753807..91fe181462b2 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.h +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.h @@ -44,6 +44,7 @@ struct intel_crtc; struct intel_crtc_state; struct intel_encoder; struct intel_shared_dpll; +struct intel_shared_dpll_funcs; /** * enum intel_dpll_id - possible DPLL ids @@ -251,51 +252,6 @@ struct intel_shared_dpll_state { struct intel_dpll_hw_state hw_state; }; -/** - * struct intel_shared_dpll_funcs - platform specific hooks for managing DPLLs - */ -struct intel_shared_dpll_funcs { - /** - * @enable: - * - * Hook for enabling the pll, called from intel_enable_shared_dpll() - * if the pll is not already enabled. - */ - void (*enable)(struct drm_i915_private *dev_priv, - struct intel_shared_dpll *pll); - - /** - * @disable: - * - * Hook for disabling the pll, called from intel_disable_shared_dpll() - * only when it is safe to disable the pll, i.e., there are no more - * tracked users for it. - */ - void (*disable)(struct drm_i915_private *dev_priv, - struct intel_shared_dpll *pll); - - /** - * @get_hw_state: - * - * Hook for reading the values currently programmed to the DPLL - * registers. This is used for initial hw state readout and state - * verification after a mode set. - */ - bool (*get_hw_state)(struct drm_i915_private *dev_priv, - struct intel_shared_dpll *pll, - struct intel_dpll_hw_state *hw_state); - - /** - * @get_freq: - * - * Hook for calculating the pll's output frequency based on its - * passed in state. - */ - int (*get_freq)(struct drm_i915_private *i915, - const struct intel_shared_dpll *pll, - const struct intel_dpll_hw_state *pll_state); -}; - /** * struct dpll_info - display PLL platform specific info */ -- cgit From 8172375ea95ab8b7f7ea0dda617ad87c439a14ee Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 8 Dec 2021 17:00:50 +0200 Subject: drm/i915: Remove zombie async flip vt-d w/a MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This async flip vt-d w/a was moved to a different place in commit 7d396cacaea6 ("drm/i195: Make the async flip VT-d workaround dynamic") but the drm-intel-fixes cherry-pick commit b2d73debfdc1 ("drm/i915: Extend the async flip VT-d w/a to skl/bxt") resurrected the original code as well. So now we have this w/a in two places. Remove the resurrected zombie code. Not done as a revert to hopefully prevent any kind of automagic stable backport. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20211208150050.17230-1-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/intel_pm.c | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 897d66fec5d6..d6a46811acd1 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -79,8 +79,6 @@ struct intel_wm_config { static void gen9_init_clock_gating(struct drm_i915_private *dev_priv) { - enum pipe pipe; - if (HAS_LLC(dev_priv)) { /* * WaCompressedResourceDisplayNewHashMode:skl,kbl @@ -94,16 +92,6 @@ static void gen9_init_clock_gating(struct drm_i915_private *dev_priv) SKL_DE_COMPRESSED_HASH_MODE); } - for_each_pipe(dev_priv, pipe) { - /* - * "Plane N strech max must be programmed to 11b (x1) - * when Async flips are enabled on that plane." - */ - if (!IS_GEMINILAKE(dev_priv) && intel_vtd_active()) - intel_uncore_rmw(&dev_priv->uncore, CHICKEN_PIPESL_1(pipe), - SKL_PLANE1_STRETCH_MAX_MASK, SKL_PLANE1_STRETCH_MAX_X1); - } - /* See Bspec note for PSR2_CTL bit 31, Wa#828:skl,bxt,kbl,cfl */ intel_uncore_write(&dev_priv->uncore, CHICKEN_PAR1_1, intel_uncore_read(&dev_priv->uncore, CHICKEN_PAR1_1) | SKL_EDP_PSR_FIX_RDWRAP); -- cgit From 4c907bcd9dcd233da6707059d777ab389dcbd964 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 19 Jan 2022 15:31:01 +0300 Subject: ASoC: max9759: fix underflow in speaker_gain_control_put() Check for negative values of "priv->gain" to prevent an out of bounds access. The concern is that these might come from the user via: -> snd_ctl_elem_write_user() -> snd_ctl_elem_write() -> kctl->put() Fixes: fa8d915172b8 ("ASoC: max9759: Add Amplifier Driver") Signed-off-by: Dan Carpenter Link: https://lore.kernel.org/r/20220119123101.GA9509@kili Signed-off-by: Mark Brown --- sound/soc/codecs/max9759.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/sound/soc/codecs/max9759.c b/sound/soc/codecs/max9759.c index d75fd61b9032..bc57d7687f16 100644 --- a/sound/soc/codecs/max9759.c +++ b/sound/soc/codecs/max9759.c @@ -64,7 +64,8 @@ static int speaker_gain_control_put(struct snd_kcontrol *kcontrol, struct snd_soc_component *c = snd_soc_kcontrol_component(kcontrol); struct max9759 *priv = snd_soc_component_get_drvdata(c); - if (ucontrol->value.integer.value[0] > 3) + if (ucontrol->value.integer.value[0] < 0 || + ucontrol->value.integer.value[0] > 3) return -EINVAL; priv->gain = ucontrol->value.integer.value[0]; -- cgit From 579b2c8f72d974f27d85bbd53846f34675ee3b01 Mon Sep 17 00:00:00 2001 From: Julian Braha Date: Mon, 17 Jan 2022 00:03:24 -0500 Subject: ASoC: mediatek: fix unmet dependency on GPIOLIB for SND_SOC_DMIC When SND_SOC_MT8195_MT6359_RT1011_RT5682 is selected, and GPIOLIB is not selected, Kbuild gives the following warning: WARNING: unmet direct dependencies detected for SND_SOC_DMIC Depends on [n]: SOUND [=y] && !UML && SND [=y] && SND_SOC [=y] && GPIOLIB [=n] Selected by [y]: - SND_SOC_MT8195_MT6359_RT1011_RT5682 [=y] && SOUND [=y] && !UML && SND [=y] && SND_SOC [=y] && I2C [=y] && SND_SOC_MT8195 [=y] && MTK_PMIC_WRAP [=y] This is because SND_SOC_MT8195_MT6359_RT1011_RT5682 selects SND_SOC_DMIC without selecting or depending on GPIOLIB, depsite SND_SOC_DMIC depending on GPIOLIB. This unmet dependency bug was detected by Kismet, a static analysis tool for Kconfig. Please advise if this is not the appropriate solution. Signed-off-by: Julian Braha Reviewed-by: Tzung-Bi Shih Link: https://lore.kernel.org/r/20220117050324.68371-1-julianbraha@gmail.com Signed-off-by: Mark Brown --- sound/soc/mediatek/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/soc/mediatek/Kconfig b/sound/soc/mediatek/Kconfig index 9306b7ca2644..0d154350f180 100644 --- a/sound/soc/mediatek/Kconfig +++ b/sound/soc/mediatek/Kconfig @@ -216,7 +216,7 @@ config SND_SOC_MT8195_MT6359_RT1019_RT5682 config SND_SOC_MT8195_MT6359_RT1011_RT5682 tristate "ASoC Audio driver for MT8195 with MT6359 RT1011 RT5682 codec" - depends on I2C + depends on I2C && GPIOLIB depends on SND_SOC_MT8195 && MTK_PMIC_WRAP select SND_SOC_MT6359 select SND_SOC_RT1011 -- cgit From b4c18c18ebf7cf1e602af88c12ef9cb0d6e5ce51 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Fri, 14 Jan 2022 19:36:03 -0800 Subject: regulator: MAX20086: add gpio/consumer.h max20086-regulator.c needs for an enum, some macros, and a function prototype. (seen on ARCH=m68k) Adding this header file fixes multiple build errors: ../drivers/regulator/max20086-regulator.c: In function 'max20086_i2c_probe': ../drivers/regulator/max20086-regulator.c:217:26: error: storage size of 'flags' isn't known 217 | enum gpiod_flags flags; ../drivers/regulator/max20086-regulator.c:261:27: error: 'GPIOD_OUT_HIGH' undeclared (first use in this function); did you mean 'GPIOF_INIT_HIGH'? 261 | flags = boot_on ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW; | ^~~~~~~~~~~~~~ ../drivers/regulator/max20086-regulator.c:261:44: error: 'GPIOD_OUT_LOW' undeclared (first use in this function); did you mean 'GPIOF_INIT_LOW'? 261 | flags = boot_on ? GPIOD_OUT_HIGH : GPIOD_OUT_LOW; ../drivers/regulator/max20086-regulator.c:262:27: error: implicit declaration of function 'devm_gpiod_get'; did you mean 'devm_gpio_free'? [-Werror=implicit-function-declaration] 262 | chip->ena_gpiod = devm_gpiod_get(chip->dev, "enable", flags); ../drivers/regulator/max20086-regulator.c:217:26: warning: unused variable 'flags' [-Wunused-variable] 217 | enum gpiod_flags flags; Fixes: bfff546aae50 ("regulator: Add MAX20086-MAX20089 driver") Signed-off-by: Randy Dunlap Reported-by: kernel test robot Cc: Watson Chow Cc: Mark Brown Cc: Laurent Pinchart Reviewed-by: Laurent Pinchart Link: https://lore.kernel.org/r/20220115033603.24473-1-rdunlap@infradead.org Signed-off-by: Mark Brown --- drivers/regulator/max20086-regulator.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/regulator/max20086-regulator.c b/drivers/regulator/max20086-regulator.c index 63aa6ec3254a..b8bf76c170fe 100644 --- a/drivers/regulator/max20086-regulator.c +++ b/drivers/regulator/max20086-regulator.c @@ -7,6 +7,7 @@ #include #include +#include #include #include #include -- cgit From e4d63473d3110afd170e6e0e48494d3789d26136 Mon Sep 17 00:00:00 2001 From: Patrice Chotard Date: Mon, 17 Jan 2022 13:17:44 +0100 Subject: spi: stm32-qspi: Update spi registering Some device driver need to communicate to qspi device during the remove process, qspi controller must be functional when spi_unregister_master() is called. To ensure this, replace devm_spi_register_master() by spi_register_master() and spi_unregister_master() is called directly in .remove callback before stopping the qspi controller. This issue was put in evidence using kernel v5.11 and later with a spi-nor which supports the software reset feature introduced by commit d73ee7534cc5 ("mtd: spi-nor: core: perform a Soft Reset on shutdown") Fixes: c530cd1d9d5e ("spi: spi-mem: add stm32 qspi controller") Signed-off-by: Patrice Chotard Cc: # 5.8.x Reviewed-by: Lukas Wunner Link: https://lore.kernel.org/r/20220117121744.29729-1-patrice.chotard@foss.st.com Signed-off-by: Mark Brown --- drivers/spi/spi-stm32-qspi.c | 47 ++++++++++++++++---------------------------- 1 file changed, 17 insertions(+), 30 deletions(-) diff --git a/drivers/spi/spi-stm32-qspi.c b/drivers/spi/spi-stm32-qspi.c index 514337c86d2c..ffdc55f87e82 100644 --- a/drivers/spi/spi-stm32-qspi.c +++ b/drivers/spi/spi-stm32-qspi.c @@ -688,7 +688,7 @@ static int stm32_qspi_probe(struct platform_device *pdev) struct resource *res; int ret, irq; - ctrl = spi_alloc_master(dev, sizeof(*qspi)); + ctrl = devm_spi_alloc_master(dev, sizeof(*qspi)); if (!ctrl) return -ENOMEM; @@ -697,58 +697,46 @@ static int stm32_qspi_probe(struct platform_device *pdev) res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi"); qspi->io_base = devm_ioremap_resource(dev, res); - if (IS_ERR(qspi->io_base)) { - ret = PTR_ERR(qspi->io_base); - goto err_master_put; - } + if (IS_ERR(qspi->io_base)) + return PTR_ERR(qspi->io_base); qspi->phys_base = res->start; res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "qspi_mm"); qspi->mm_base = devm_ioremap_resource(dev, res); - if (IS_ERR(qspi->mm_base)) { - ret = PTR_ERR(qspi->mm_base); - goto err_master_put; - } + if (IS_ERR(qspi->mm_base)) + return PTR_ERR(qspi->mm_base); qspi->mm_size = resource_size(res); - if (qspi->mm_size > STM32_QSPI_MAX_MMAP_SZ) { - ret = -EINVAL; - goto err_master_put; - } + if (qspi->mm_size > STM32_QSPI_MAX_MMAP_SZ) + return -EINVAL; irq = platform_get_irq(pdev, 0); - if (irq < 0) { - ret = irq; - goto err_master_put; - } + if (irq < 0) + return irq; ret = devm_request_irq(dev, irq, stm32_qspi_irq, 0, dev_name(dev), qspi); if (ret) { dev_err(dev, "failed to request irq\n"); - goto err_master_put; + return ret; } init_completion(&qspi->data_completion); init_completion(&qspi->match_completion); qspi->clk = devm_clk_get(dev, NULL); - if (IS_ERR(qspi->clk)) { - ret = PTR_ERR(qspi->clk); - goto err_master_put; - } + if (IS_ERR(qspi->clk)) + return PTR_ERR(qspi->clk); qspi->clk_rate = clk_get_rate(qspi->clk); - if (!qspi->clk_rate) { - ret = -EINVAL; - goto err_master_put; - } + if (!qspi->clk_rate) + return -EINVAL; ret = clk_prepare_enable(qspi->clk); if (ret) { dev_err(dev, "can not enable the clock\n"); - goto err_master_put; + return ret; } rstc = devm_reset_control_get_exclusive(dev, NULL); @@ -784,7 +772,7 @@ static int stm32_qspi_probe(struct platform_device *pdev) pm_runtime_enable(dev); pm_runtime_get_noresume(dev); - ret = devm_spi_register_master(dev, ctrl); + ret = spi_register_master(ctrl); if (ret) goto err_pm_runtime_free; @@ -806,8 +794,6 @@ err_dma_free: stm32_qspi_dma_free(qspi); err_clk_disable: clk_disable_unprepare(qspi->clk); -err_master_put: - spi_master_put(qspi->ctrl); return ret; } @@ -817,6 +803,7 @@ static int stm32_qspi_remove(struct platform_device *pdev) struct stm32_qspi *qspi = platform_get_drvdata(pdev); pm_runtime_get_sync(qspi->dev); + spi_unregister_master(qspi->ctrl); /* disable qspi */ writel_relaxed(0, qspi->io_base + QSPI_CR); stm32_qspi_dma_free(qspi); -- cgit From 3cefddb72f80dc8d49ce605628ceb6525cfd64da Mon Sep 17 00:00:00 2001 From: Alain Volmat Date: Wed, 19 Jan 2022 10:32:44 +0100 Subject: spi: stm32: remove inexistant variables in struct stm32_spi_cfg comment Variables 'can_dma' and 'has_startbit' are described within the struct stm32_spi_cfg comment but have never existed in this structure so remove them. Signed-off-by: Alain Volmat Link: https://lore.kernel.org/r/20220119093245.624878-2-alain.volmat@foss.st.com Signed-off-by: Mark Brown --- drivers/spi/spi-stm32.c | 2 -- 1 file changed, 2 deletions(-) diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c index 9bd3fd1652f7..b5ef2470cefe 100644 --- a/drivers/spi/spi-stm32.c +++ b/drivers/spi/spi-stm32.c @@ -221,7 +221,6 @@ struct stm32_spi; * time between frames (if driver has this functionality) * @set_number_of_data: optional routine to configure registers to desired * number of data (if driver has this functionality) - * @can_dma: routine to determine if the transfer is eligible for DMA use * @transfer_one_dma_start: routine to start transfer a single spi_transfer * using DMA * @dma_rx_cb: routine to call after DMA RX channel operation is complete @@ -232,7 +231,6 @@ struct stm32_spi; * @baud_rate_div_min: minimum baud rate divisor * @baud_rate_div_max: maximum baud rate divisor * @has_fifo: boolean to know if fifo is used for driver - * @has_startbit: boolean to know if start bit is used to start transfer */ struct stm32_spi_cfg { const struct stm32_spi_regspec *regs; -- cgit From 9df15d842a0f77f2b8ee29386f6d714e4220df57 Mon Sep 17 00:00:00 2001 From: Alain Volmat Date: Wed, 19 Jan 2022 10:32:45 +0100 Subject: spi: stm32: make SPI_MASTER_MUST_TX flags only specific to STM32F4 Commit 61367d0b8f5e ("spi: stm32: Add 'SPI_SIMPLEX_RX', 'SPI_3WIRE_RX' support for stm32f4") allowed to properly communicate with the st-gyro-spi even when there is no tx_buf provided by setting the flag SPI_MASTER_MUST_TX and thus forcing a dummy TX buffer to work in Full Duplex. This behavior should kept only for the STM32F4 and not for other compatible since the STM32H7 do support SIMPLEX_RX and SIMPLEX_TX. Add the flags variable within the struct stm32_spi_cfg so that flags used at master registration time are compatible specific. Fixes: 61367d0b8f5e ("spi: stm32: Add 'SPI_SIMPLEX_RX', 'SPI_3WIRE_RX' support for stm32f4") Signed-off-by: Alain Volmat Link: https://lore.kernel.org/r/20220119093245.624878-3-alain.volmat@foss.st.com Signed-off-by: Mark Brown --- drivers/spi/spi-stm32.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/spi/spi-stm32.c b/drivers/spi/spi-stm32.c index b5ef2470cefe..7fc24505a72c 100644 --- a/drivers/spi/spi-stm32.c +++ b/drivers/spi/spi-stm32.c @@ -231,6 +231,7 @@ struct stm32_spi; * @baud_rate_div_min: minimum baud rate divisor * @baud_rate_div_max: maximum baud rate divisor * @has_fifo: boolean to know if fifo is used for driver + * @flags: compatible specific SPI controller flags used at registration time */ struct stm32_spi_cfg { const struct stm32_spi_regspec *regs; @@ -251,6 +252,7 @@ struct stm32_spi_cfg { unsigned int baud_rate_div_min; unsigned int baud_rate_div_max; bool has_fifo; + u16 flags; }; /** @@ -1720,6 +1722,7 @@ static const struct stm32_spi_cfg stm32f4_spi_cfg = { .baud_rate_div_min = STM32F4_SPI_BR_DIV_MIN, .baud_rate_div_max = STM32F4_SPI_BR_DIV_MAX, .has_fifo = false, + .flags = SPI_MASTER_MUST_TX, }; static const struct stm32_spi_cfg stm32h7_spi_cfg = { @@ -1852,7 +1855,7 @@ static int stm32_spi_probe(struct platform_device *pdev) master->prepare_message = stm32_spi_prepare_msg; master->transfer_one = stm32_spi_transfer_one; master->unprepare_message = stm32_spi_unprepare_msg; - master->flags = SPI_MASTER_MUST_TX; + master->flags = spi->cfg->flags; spi->dma_tx = dma_request_chan(spi->dev, "tx"); if (IS_ERR(spi->dma_tx)) { -- cgit From b90b6e41379789ed595236113779e0793a63bf18 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 17 Dec 2021 17:53:58 +0200 Subject: drm/i915/bios: Introduce has_ddi_port_info() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Pull the "do we want to use i915->vbt.ports[]?" check into a central place. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20211217155403.31477-2-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_bios.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index 262406c00e53..e9b6b6e613a6 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -2053,6 +2053,11 @@ static void parse_ddi_port(struct drm_i915_private *i915, i915->vbt.ports[port] = devdata; } +static bool has_ddi_port_info(struct drm_i915_private *i915) +{ + return HAS_DDI(i915); +} + static void parse_ddi_ports(struct drm_i915_private *i915) { struct intel_bios_encoder_data *devdata; @@ -2653,7 +2658,7 @@ bool intel_bios_is_port_present(struct drm_i915_private *i915, enum port port) [PORT_F] = { DVO_PORT_DPF, DVO_PORT_HDMIF, }, }; - if (HAS_DDI(i915)) + if (has_ddi_port_info(i915)) return i915->vbt.ports[port]; /* FIXME maybe deal with port A as well? */ @@ -2693,7 +2698,7 @@ bool intel_bios_is_port_edp(struct drm_i915_private *i915, enum port port) [PORT_F] = DVO_PORT_DPF, }; - if (HAS_DDI(i915)) { + if (has_ddi_port_info(i915)) { const struct intel_bios_encoder_data *devdata; devdata = intel_bios_encoder_data_lookup(i915, port); @@ -2748,7 +2753,7 @@ bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *i915, }; const struct intel_bios_encoder_data *devdata; - if (HAS_DDI(i915)) { + if (has_ddi_port_info(i915)) { const struct intel_bios_encoder_data *devdata; devdata = intel_bios_encoder_data_lookup(i915, port); -- cgit From eb9fcf63857556d5eacd67f5c96078e643a8d15a Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 17 Dec 2021 17:53:59 +0200 Subject: drm/i915/bios: Use i915->vbt.ports[] on CHV MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit CHV is currently straddling the divide by using parse_ddi_ports() stuff for aux_ch/ddc_pin but going through all old codepaths for the rest (intel_bios_is_port_present(), intel_bios_is_port_edp(), intel_bios_is_port_dp_dual_mode()). Let's switch over full and use i915->vbt.ports[] for the rest of the stuff. dvo_port_to_port() doesn't know about DSI so we won't get into any kind of "is port B HDMI or DSI or both?" conundrum, which could otherwise happen on VLV/CHV due to DSI ports living in a separate world from the other digital ports. Including Jani's detailed analysis here for posterity: "We stop checking for port A for CHV in intel_bios_is_port_present(), but it's a warn and I don't recall any bug reports, so probably fine. We could add a check in parse_ddi_port(), but meh. Ditto for intel_bios_is_port_dp_dual_mode(), except it doesn't have a warn. The eDP check in intel_bios_is_port_edp() becomes slightly more relaxed. Both the old and new check require these to be set: - DEVICE_TYPE_DISPLAYPORT_OUTPUT - DEVICE_TYPE_INTERNAL_CONNECTOR. The old code also required these to be unset: - DEVICE_TYPE_MIPI_OUTPUT - DEVICE_TYPE_COMPOSITE_OUTPUT - DEVICE_TYPE_DUAL_CHANNEL - DEVICE_TYPE_LVDS_SIGNALING - DEVICE_TYPE_TMDS_DVI_SIGNALING - DEVICE_TYPE_VIDEO_SIGNALING - DEVICE_TYPE_ANALOG_OUTPUT It's possible we've added these just as a sanity check for broken VBTs more than anything. I guess I'd see if actual problems arise. Bottom line, I think the functional changes matter only for VBTs with bogus data." I agree that it should work assuming the VBT isn't totally insane. Modern windows drivers also don't seem to check any of those additional device type bits, which may or may not matter for older devices (no idea what some old driver versions are checking). Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20211217155403.31477-3-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_bios.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index e9b6b6e613a6..3c4165d19edd 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -2055,14 +2055,14 @@ static void parse_ddi_port(struct drm_i915_private *i915, static bool has_ddi_port_info(struct drm_i915_private *i915) { - return HAS_DDI(i915); + return HAS_DDI(i915) || IS_CHERRYVIEW(i915); } static void parse_ddi_ports(struct drm_i915_private *i915) { struct intel_bios_encoder_data *devdata; - if (!HAS_DDI(i915) && !IS_CHERRYVIEW(i915)) + if (!has_ddi_port_info(i915)) return; if (i915->vbt.version < 155) -- cgit From 594c504d33343657ad3b24ff8e4ef032cd4de25e Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 17 Dec 2021 17:54:00 +0200 Subject: drm/i915/bios: Use i915->vbt.ports[] for all g4x+ MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Extend the vbt.ports[] stuff for all g4x+ platforms. We do need to drop the version check as some elk/ctg machines may have VBTs older than that. The oldest I know is an elk with version 142. But the child device stuff has had the correct size since at least version 125 (observed on my sdg), so from that angle this should be totally safe. This does couple of things: - Start using the aux_ch/ddc_pin from VBT instead of just the hardcoded defaults. Hopefully there are no VBTs with entirely bogus information here. - Start using i915->vbt.ports[] for intel_bios_is_port_dp_dual_mode(). Should be fine as the logic doesn't actually change. - Start using i915->vbt.ports[] for intel_bios_is_port_edp(). The old codepath only looks at the DP DVO ports, the new codepath looks at both DP and HDMI DVO ports. In principle that should not matter. We also stop looking at some of the other device type bits (eg. LVDS,MIPI,ANALOG,etc.). Hopefully no VBT is broken enough that it sets up totally conflicting device type bits (eg. LVDS+eDP at the same time). We also lose the "g4x->no eDP ever" hardcoding (shouldn't be hard to re-introduce that into eg. sanitize_device_type() if needed). Lightly smoke tested on a set of machines (one of ctg,ilk,snb,ivb each) with both DP and HDMI (DP++). Everything still worked as it should. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20211217155403.31477-4-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_bios.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index 3c4165d19edd..5e562ae14df9 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -2055,7 +2055,7 @@ static void parse_ddi_port(struct drm_i915_private *i915, static bool has_ddi_port_info(struct drm_i915_private *i915) { - return HAS_DDI(i915) || IS_CHERRYVIEW(i915); + return DISPLAY_VER(i915) >= 5 || IS_G4X(i915); } static void parse_ddi_ports(struct drm_i915_private *i915) @@ -2065,9 +2065,6 @@ static void parse_ddi_ports(struct drm_i915_private *i915) if (!has_ddi_port_info(i915)) return; - if (i915->vbt.version < 155) - return; - list_for_each_entry(devdata, &i915->vbt.display_devices, node) parse_ddi_port(i915, devdata); } -- cgit From a868a1e57e3afca98509345d5a8f747a4d745cb1 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 17 Dec 2021 17:54:01 +0200 Subject: drm/i915/bios: Throw out the !has_ddi_port_info() codepaths MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Now that we parse the DDI port info from the VBT on all g4x+ platforms we can throw out all the old codepaths in intel_bios_is_port_present(), intel_bios_is_port_edp() and intel_bios_is_port_dp_dual_mode(). None of these should be called on pre-g4x platforms. For good measure throw in a WARN into intel_bios_is_port_present() should someone get the urge to call it on older platforms. The other two functions are specific to HDMI and DP so should not need any protection as those encoder types don't even exist on older platforms. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20211217155403.31477-5-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_bios.c | 99 +++------------------------ drivers/gpu/drm/i915/display/intel_vbt_defs.h | 15 ---- 2 files changed, 9 insertions(+), 105 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index 5e562ae14df9..8372809e0e1f 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -2643,37 +2643,10 @@ bool intel_bios_is_lvds_present(struct drm_i915_private *i915, u8 *i2c_pin) */ bool intel_bios_is_port_present(struct drm_i915_private *i915, enum port port) { - const struct intel_bios_encoder_data *devdata; - const struct child_device_config *child; - static const struct { - u16 dp, hdmi; - } port_mapping[] = { - [PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, }, - [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, }, - [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, }, - [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, }, - [PORT_F] = { DVO_PORT_DPF, DVO_PORT_HDMIF, }, - }; - - if (has_ddi_port_info(i915)) - return i915->vbt.ports[port]; - - /* FIXME maybe deal with port A as well? */ - if (drm_WARN_ON(&i915->drm, - port == PORT_A) || port >= ARRAY_SIZE(port_mapping)) - return false; - - list_for_each_entry(devdata, &i915->vbt.display_devices, node) { - child = &devdata->child; - - if ((child->dvo_port == port_mapping[port].dp || - child->dvo_port == port_mapping[port].hdmi) && - (child->device_type & (DEVICE_TYPE_TMDS_DVI_SIGNALING | - DEVICE_TYPE_DISPLAYPORT_OUTPUT))) - return true; - } + if (WARN_ON(!has_ddi_port_info(i915))) + return true; - return false; + return i915->vbt.ports[port]; } /** @@ -2685,34 +2658,10 @@ bool intel_bios_is_port_present(struct drm_i915_private *i915, enum port port) */ bool intel_bios_is_port_edp(struct drm_i915_private *i915, enum port port) { - const struct intel_bios_encoder_data *devdata; - const struct child_device_config *child; - static const short port_mapping[] = { - [PORT_B] = DVO_PORT_DPB, - [PORT_C] = DVO_PORT_DPC, - [PORT_D] = DVO_PORT_DPD, - [PORT_E] = DVO_PORT_DPE, - [PORT_F] = DVO_PORT_DPF, - }; - - if (has_ddi_port_info(i915)) { - const struct intel_bios_encoder_data *devdata; - - devdata = intel_bios_encoder_data_lookup(i915, port); - - return devdata && intel_bios_encoder_supports_edp(devdata); - } + const struct intel_bios_encoder_data *devdata = + intel_bios_encoder_data_lookup(i915, port); - list_for_each_entry(devdata, &i915->vbt.display_devices, node) { - child = &devdata->child; - - if (child->dvo_port == port_mapping[port] && - (child->device_type & DEVICE_TYPE_eDP_BITS) == - (DEVICE_TYPE_eDP & DEVICE_TYPE_eDP_BITS)) - return true; - } - - return false; + return devdata && intel_bios_encoder_supports_edp(devdata); } static bool child_dev_is_dp_dual_mode(const struct child_device_config *child) @@ -2735,40 +2684,10 @@ static bool child_dev_is_dp_dual_mode(const struct child_device_config *child) bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *i915, enum port port) { - static const struct { - u16 dp, hdmi; - } port_mapping[] = { - /* - * Buggy VBTs may declare DP ports as having - * HDMI type dvo_port :( So let's check both. - */ - [PORT_B] = { DVO_PORT_DPB, DVO_PORT_HDMIB, }, - [PORT_C] = { DVO_PORT_DPC, DVO_PORT_HDMIC, }, - [PORT_D] = { DVO_PORT_DPD, DVO_PORT_HDMID, }, - [PORT_E] = { DVO_PORT_DPE, DVO_PORT_HDMIE, }, - [PORT_F] = { DVO_PORT_DPF, DVO_PORT_HDMIF, }, - }; - const struct intel_bios_encoder_data *devdata; + const struct intel_bios_encoder_data *devdata = + intel_bios_encoder_data_lookup(i915, port); - if (has_ddi_port_info(i915)) { - const struct intel_bios_encoder_data *devdata; - - devdata = intel_bios_encoder_data_lookup(i915, port); - - return devdata && child_dev_is_dp_dual_mode(&devdata->child); - } - - if (port == PORT_A || port >= ARRAY_SIZE(port_mapping)) - return false; - - list_for_each_entry(devdata, &i915->vbt.display_devices, node) { - if ((devdata->child.dvo_port == port_mapping[port].dp || - devdata->child.dvo_port == port_mapping[port].hdmi) && - child_dev_is_dp_dual_mode(&devdata->child)) - return true; - } - - return false; + return devdata && child_dev_is_dp_dual_mode(&devdata->child); } /** diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h index f043d85ba64d..c23582769f34 100644 --- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h @@ -226,21 +226,6 @@ struct bdb_general_features { #define DEVICE_TYPE_DIGITAL_OUTPUT (1 << 1) #define DEVICE_TYPE_ANALOG_OUTPUT (1 << 0) -/* - * Bits we care about when checking for DEVICE_TYPE_eDP. Depending on the - * system, the other bits may or may not be set for eDP outputs. - */ -#define DEVICE_TYPE_eDP_BITS \ - (DEVICE_TYPE_INTERNAL_CONNECTOR | \ - DEVICE_TYPE_MIPI_OUTPUT | \ - DEVICE_TYPE_COMPOSITE_OUTPUT | \ - DEVICE_TYPE_DUAL_CHANNEL | \ - DEVICE_TYPE_LVDS_SIGNALING | \ - DEVICE_TYPE_TMDS_DVI_SIGNALING | \ - DEVICE_TYPE_VIDEO_SIGNALING | \ - DEVICE_TYPE_DISPLAYPORT_OUTPUT | \ - DEVICE_TYPE_ANALOG_OUTPUT) - #define DEVICE_TYPE_DP_DUAL_MODE_BITS \ (DEVICE_TYPE_INTERNAL_CONNECTOR | \ DEVICE_TYPE_MIPI_OUTPUT | \ -- cgit From 044cbc7a74c136f12a80c855cadd1b085084aef1 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 17 Dec 2021 17:54:02 +0200 Subject: drm/i915/bios: Nuke DEVICE_TYPE_DP_DUAL_MODE_BITS MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace the DEVICE_TYPE_DP_DUAL_MODE_BITS stuff with just a DP+HDMI check. The rest of the bits shouldn't really matter anyway. The slight change in behaviour here is that now we do look at the DEVICE_TYPE_NOT_HDMI_OUTPUT bit (via intel_bios_encoder_supports_hdmi()) when we previously ignored it. The one platform we know that has problems with that bit is VLV. But IIRC the problem was always that buggy VBTs basically never set that bit. So that should be OK since all it would do is make all DVI ports look like HDMI ports instead. Also can't imagine there are many VLV machines with actual DVI ports in existence. We still keep the rest of the dvo_port/aux_ch checks as we can't trust that DP+HDMI device type equals DP++ due to buggy VBTs. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20211217155403.31477-6-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_bios.c | 10 ++++++---- drivers/gpu/drm/i915/display/intel_vbt_defs.h | 11 ----------- 2 files changed, 6 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index 8372809e0e1f..60386298d799 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -2664,10 +2664,12 @@ bool intel_bios_is_port_edp(struct drm_i915_private *i915, enum port port) return devdata && intel_bios_encoder_supports_edp(devdata); } -static bool child_dev_is_dp_dual_mode(const struct child_device_config *child) +static bool intel_bios_encoder_supports_dp_dual_mode(const struct intel_bios_encoder_data *devdata) { - if ((child->device_type & DEVICE_TYPE_DP_DUAL_MODE_BITS) != - (DEVICE_TYPE_DP_DUAL_MODE & DEVICE_TYPE_DP_DUAL_MODE_BITS)) + const struct child_device_config *child = &devdata->child; + + if (!intel_bios_encoder_supports_dp(devdata) || + !intel_bios_encoder_supports_hdmi(devdata)) return false; if (dvo_port_type(child->dvo_port) == DVO_PORT_DPA) @@ -2687,7 +2689,7 @@ bool intel_bios_is_port_dp_dual_mode(struct drm_i915_private *i915, const struct intel_bios_encoder_data *devdata = intel_bios_encoder_data_lookup(i915, port); - return devdata && child_dev_is_dp_dual_mode(&devdata->child); + return devdata && intel_bios_encoder_supports_dp_dual_mode(devdata); } /** diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h index c23582769f34..a39d6cfea87a 100644 --- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h @@ -226,17 +226,6 @@ struct bdb_general_features { #define DEVICE_TYPE_DIGITAL_OUTPUT (1 << 1) #define DEVICE_TYPE_ANALOG_OUTPUT (1 << 0) -#define DEVICE_TYPE_DP_DUAL_MODE_BITS \ - (DEVICE_TYPE_INTERNAL_CONNECTOR | \ - DEVICE_TYPE_MIPI_OUTPUT | \ - DEVICE_TYPE_COMPOSITE_OUTPUT | \ - DEVICE_TYPE_LVDS_SIGNALING | \ - DEVICE_TYPE_TMDS_DVI_SIGNALING | \ - DEVICE_TYPE_VIDEO_SIGNALING | \ - DEVICE_TYPE_DISPLAYPORT_OUTPUT | \ - DEVICE_TYPE_DIGITAL_OUTPUT | \ - DEVICE_TYPE_ANALOG_OUTPUT) - #define DEVICE_CFG_NONE 0x00 #define DEVICE_CFG_12BIT_DVOB 0x01 #define DEVICE_CFG_12BIT_DVOC 0x02 -- cgit From c26962803d044a7668e9ea4d5313117ac5b878c8 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 22 Dec 2021 18:17:38 +0200 Subject: drm/i915/hdmi: Ignore DP++ TMDS clock limit for native HDMI ports MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Lots of machines these days seem to have a crappy type1 DP dual mode adaptor chip slapped onto the motherboard. Based on the DP dual mode spec we currently limit those to 165MHz max TMDS clock. Windows OTOH ignores DP dual mode adaptors when the VBT indicates that the port is not actually DP++, so we can perhaps assume that the vendors did intend that the 165MHz clock limit doesn't apply here. Though it would be much nicer if they actually declared an explicit limit through VBT, but that doesn't seem to be happening either. So in order to match Windows behaviour let's ignore the DP dual mode adaptor's TMDS clock limit for ports that don't look like DP++ in VBT. Unfortunately many older VBTs misdelcare their DP++ ports as just HDMI (eg. ILK Dell Latitude E5410) or DP (eg. SNB Lenovo ThinkPad X220). So we can't really do this universally without risking black screens. I suppose a sensible cutoff is HSW+ since that's when 4k became a thing and one might assume that the machines have been tested to work with higher TMDS clock rates. v2: s/IS_BROADWELL/IS_HASWELL/ Acked-by: Jani Nikula Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20211222161738.12478-1-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/display/intel_hdmi.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index 3b5b9e7b05b7..3156dc3591d8 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -2359,6 +2359,14 @@ intel_hdmi_dp_dual_mode_detect(struct drm_connector *connector, bool has_edid) "DP dual mode adaptor (%s) detected (max TMDS clock: %d kHz)\n", drm_dp_get_dual_mode_type_name(type), hdmi->dp_dual_mode.max_tmds_clock); + + /* Older VBTs are often buggy and can't be trusted :( Play it safe. */ + if ((DISPLAY_VER(dev_priv) >= 8 || IS_HASWELL(dev_priv)) && + !intel_bios_is_port_dp_dual_mode(dev_priv, port)) { + drm_dbg_kms(&dev_priv->drm, + "Ignoring DP dual mode adaptor max TMDS clock for native HDMI port\n"); + hdmi->dp_dual_mode.max_tmds_clock = 0; + } } static bool -- cgit From 479e3b02b73a2de2b19df2950863149c59d57bfe Mon Sep 17 00:00:00 2001 From: Xiaojian Du Date: Mon, 17 Jan 2022 12:44:51 +0800 Subject: drm/amdgpu: add vram check function for GMC This patch will add vram check function for GMC block. It will write pattern data to the vram and then read back from the vram, so that to verify the work status of vram. This patch will cover gmc v6/7/8/9/10. Signed-off-by: Xiaojian Du Reviewed-by: Huang Rui Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 46 +++++++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h | 1 + drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c | 6 +++++ drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 5 +++- drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 5 +++- drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 5 +++- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 11 ++++++-- 7 files changed, 74 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index a0a9abd23a7b..cbe30ccf6162 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -822,3 +822,49 @@ void amdgpu_gmc_get_reserved_allocation(struct amdgpu_device *adev) break; } } + +int amdgpu_gmc_vram_checking(struct amdgpu_device *adev) +{ + struct amdgpu_bo *vram_bo; + uint64_t vram_gpu; + void *vram_ptr; + + int ret, size = 0x100000; + uint8_t cptr[10]; + + ret = amdgpu_bo_create_kernel(adev, size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, + &vram_bo, + &vram_gpu, + &vram_ptr); + if (ret) + return ret; + + memset(vram_ptr, 0x86, size); + memset(cptr, 0x86, 10); + + /** + * Check the start, the mid, and the end of the memory if the content of + * each byte is the pattern "0x86". If yes, we suppose the vram bo is + * workable. + * + * Note: If check the each byte of whole 1M bo, it will cost too many + * seconds, so here, we just pick up three parts for emulation. + */ + ret = memcmp(vram_ptr, cptr, 10); + if (ret) + return ret; + + ret = memcmp(vram_ptr + (size / 2), cptr, 10); + if (ret) + return ret; + + ret = memcmp(vram_ptr + size - 10, cptr, 10); + if (ret) + return ret; + + amdgpu_bo_free_kernel(&vram_bo, &vram_gpu, + &vram_ptr); + + return 0; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index 0001631cfedb..a5e8e0e08970 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -336,4 +336,5 @@ void amdgpu_gmc_init_pdb0(struct amdgpu_device *adev); uint64_t amdgpu_gmc_vram_mc2pa(struct amdgpu_device *adev, uint64_t mc_addr); uint64_t amdgpu_gmc_vram_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo); uint64_t amdgpu_gmc_vram_cpu_pa(struct amdgpu_device *adev, struct amdgpu_bo *bo); +int amdgpu_gmc_vram_checking(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index 4f8d356f8432..bb9a11bc644b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -1057,6 +1057,12 @@ static int gmc_v10_0_hw_init(void *handle) if (r) return r; + if (amdgpu_emu_mode == 1) { + r = amdgpu_gmc_vram_checking(adev); + if (r) + return r; + } + if (adev->umc.funcs && adev->umc.funcs->init_registers) adev->umc.funcs->init_registers(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index cd6c38e083d0..84f0debe8264 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -922,7 +922,10 @@ static int gmc_v6_0_hw_init(void *handle) if (r) return r; - return r; + if (amdgpu_emu_mode == 1) + return amdgpu_gmc_vram_checking(adev); + else + return r; } static int gmc_v6_0_hw_fini(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index ab8adbff9e2d..8800a18b0cf6 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -1111,7 +1111,10 @@ static int gmc_v7_0_hw_init(void *handle) if (r) return r; - return r; + if (amdgpu_emu_mode == 1) + return amdgpu_gmc_vram_checking(adev); + else + return r; } static int gmc_v7_0_hw_fini(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 054733838292..1c10fa5d0db7 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -1242,7 +1242,10 @@ static int gmc_v8_0_hw_init(void *handle) if (r) return r; - return r; + if (amdgpu_emu_mode == 1) + return amdgpu_gmc_vram_checking(adev); + else + return r; } static int gmc_v8_0_hw_fini(void *handle) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index c76ffd1a70cd..6866e0311b49 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1815,7 +1815,7 @@ static int gmc_v9_0_hw_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; bool value; - int i; + int i, r; /* The sequence of these two function calls matters.*/ gmc_v9_0_init_golden_registers(adev); @@ -1850,7 +1850,14 @@ static int gmc_v9_0_hw_init(void *handle) if (adev->umc.funcs && adev->umc.funcs->init_registers) adev->umc.funcs->init_registers(adev); - return gmc_v9_0_gart_enable(adev); + r = gmc_v9_0_gart_enable(adev); + if (r) + return r; + + if (amdgpu_emu_mode == 1) + return amdgpu_gmc_vram_checking(adev); + else + return r; } /** -- cgit From 86700a402694db56030a74481d09f35520332736 Mon Sep 17 00:00:00 2001 From: Xiaojian Du Date: Tue, 18 Jan 2022 17:18:13 +0800 Subject: drm/amdgpu: modify a pair of functions for the pcie port wreg/rreg This patch will modify a pair of functions for pcie port wreg/rreg. AMD GPU have had an independent NBIO block from SOC15 arch. If the dirver wants to read/write the address space of the pcie devices, it has to go through the NBIO block. This patch will move the pcie port wreg/rreg functions to "amdgpu_device.c", so that to reuse the functions on the future GPU ASICs. Signed-off-by: Xiaojian Du Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 4 ++++ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 33 +++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/nv.c | 34 ++---------------------------- 3 files changed, 39 insertions(+), 32 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 8ddddf12e1ef..8658312764bc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1315,6 +1315,10 @@ void amdgpu_device_invalidate_hdp(struct amdgpu_device *adev, struct amdgpu_ring *ring); void amdgpu_device_halt(struct amdgpu_device *adev); +u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev, + u32 reg); +void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev, + u32 reg, u32 v); /* atpx handler */ #if defined(CONFIG_VGA_SWITCHEROO) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index f0c07523b04d..fcde99c69c47 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -5728,3 +5728,36 @@ void amdgpu_device_halt(struct amdgpu_device *adev) pci_disable_device(pdev); pci_wait_for_pending_transaction(pdev); } + +u32 amdgpu_device_pcie_port_rreg(struct amdgpu_device *adev, + u32 reg) +{ + unsigned long flags, address, data; + u32 r; + + address = adev->nbio.funcs->get_pcie_port_index_offset(adev); + data = adev->nbio.funcs->get_pcie_port_data_offset(adev); + + spin_lock_irqsave(&adev->pcie_idx_lock, flags); + WREG32(address, reg * 4); + (void)RREG32(address); + r = RREG32(data); + spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); + return r; +} + +void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev, + u32 reg, u32 v) +{ + unsigned long flags, address, data; + + address = adev->nbio.funcs->get_pcie_port_index_offset(adev); + data = adev->nbio.funcs->get_pcie_port_data_offset(adev); + + spin_lock_irqsave(&adev->pcie_idx_lock, flags); + WREG32(address, reg * 4); + (void)RREG32(address); + WREG32(data, v); + (void)RREG32(data); + spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); +} diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 2ec1ffb36b1f..8f0c92cbdc4a 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -258,21 +258,6 @@ static u64 nv_pcie_rreg64(struct amdgpu_device *adev, u32 reg) return amdgpu_device_indirect_rreg64(adev, address, data, reg); } -static u32 nv_pcie_port_rreg(struct amdgpu_device *adev, u32 reg) -{ - unsigned long flags, address, data; - u32 r; - address = adev->nbio.funcs->get_pcie_port_index_offset(adev); - data = adev->nbio.funcs->get_pcie_port_data_offset(adev); - - spin_lock_irqsave(&adev->pcie_idx_lock, flags); - WREG32(address, reg * 4); - (void)RREG32(address); - r = RREG32(data); - spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); - return r; -} - static void nv_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v) { unsigned long address, data; @@ -283,21 +268,6 @@ static void nv_pcie_wreg64(struct amdgpu_device *adev, u32 reg, u64 v) amdgpu_device_indirect_wreg64(adev, address, data, reg, v); } -static void nv_pcie_port_wreg(struct amdgpu_device *adev, u32 reg, u32 v) -{ - unsigned long flags, address, data; - - address = adev->nbio.funcs->get_pcie_port_index_offset(adev); - data = adev->nbio.funcs->get_pcie_port_data_offset(adev); - - spin_lock_irqsave(&adev->pcie_idx_lock, flags); - WREG32(address, reg * 4); - (void)RREG32(address); - WREG32(data, v); - (void)RREG32(data); - spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); -} - static u32 nv_didt_rreg(struct amdgpu_device *adev, u32 reg) { unsigned long flags, address, data; @@ -742,8 +712,8 @@ static int nv_common_early_init(void *handle) adev->pcie_wreg = &nv_pcie_wreg; adev->pcie_rreg64 = &nv_pcie_rreg64; adev->pcie_wreg64 = &nv_pcie_wreg64; - adev->pciep_rreg = &nv_pcie_port_rreg; - adev->pciep_wreg = &nv_pcie_port_wreg; + adev->pciep_rreg = amdgpu_device_pcie_port_rreg; + adev->pciep_wreg = amdgpu_device_pcie_port_wreg; /* TODO: will add them during VCN v2 implementation */ adev->uvd_ctx_rreg = NULL; -- cgit From 8eb53bb2aa8afa170ba40f9460f2de4d4d138764 Mon Sep 17 00:00:00 2001 From: yipechai Date: Mon, 17 Jan 2022 11:23:59 +0800 Subject: drm/amdgpu: Remove repeated calls Remove repeated calls. Signed-off-by: yipechai Reviewed-by: Hawking Zhang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 7a1d2bac698e..4992bc554c0c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -1704,9 +1704,7 @@ static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev) static void amdgpu_ras_error_status_query(struct amdgpu_device *adev, struct ras_query_if *info) { - struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, - info->head.block, - info->head.sub_block_index); + struct amdgpu_ras_block_object *block_obj; /* * Only two block need to query read/write * RspStatus at current state -- cgit From 33cd016e600ac3417aff7c85f59b9a4b70a947e9 Mon Sep 17 00:00:00 2001 From: mziya Date: Wed, 19 Jan 2022 13:01:11 +0800 Subject: drm/amdgpu: remove unused variable Remove set but unused variable. warning: variable 'umc_reg_offset' set but not used Signed-off-by: mziya Reported-by: kernel test robot Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/umc_v8_7.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c index 291b37f6db4e..05f79eea307c 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c @@ -94,16 +94,12 @@ static void umc_v8_7_ecc_info_query_ras_error_count(struct amdgpu_device *adev, uint32_t umc_inst = 0; uint32_t ch_inst = 0; - uint32_t umc_reg_offset = 0; uint32_t channel_index = 0; /* TODO: driver needs to toggle DF Cstate to ensure * safe access of UMC registers. Will add the protection */ LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { - umc_reg_offset = get_umc_v8_7_reg_offset(adev, - umc_inst, - ch_inst); channel_index = get_umc_v8_7_channel_index(adev, umc_inst, ch_inst); -- cgit From 5904e4135f3b3e6cc7bed46bda71118d55a56681 Mon Sep 17 00:00:00 2001 From: "Stanley.Yang" Date: Wed, 19 Jan 2022 16:09:40 +0800 Subject: drm/amdgpu: remove unused variable warning Signed-off-by: Stanley.Yang Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/umc_v6_7.c | 23 ----------------------- drivers/gpu/drm/amd/amdgpu/umc_v8_7.c | 6 ------ 2 files changed, 29 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c index 6953426f0bed..526de1ca9b8d 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c @@ -61,22 +61,9 @@ static void umc_v6_7_ecc_info_query_correctable_error_count(struct amdgpu_device uint32_t channel_index, unsigned long *error_count) { - uint32_t ecc_err_cnt; uint64_t mc_umc_status; struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); - /* - * select the lower chip and check the error count - * skip add error count, calc error counter only from mca_umc_status - */ - ecc_err_cnt = ras->umc_ecc.ecc[channel_index].ce_count_lo_chip; - - /* - * select the higher chip and check the err counter - * skip add error count, calc error counter only from mca_umc_status - */ - ecc_err_cnt = ras->umc_ecc.ecc[channel_index].ce_count_hi_chip; - /* check for SRAM correctable error MCUMC_STATUS is a 64 bit register */ mc_umc_status = ras->umc_ecc.ecc[channel_index].mca_umc_status; @@ -110,15 +97,11 @@ static void umc_v6_7_ecc_info_query_ras_error_count(struct amdgpu_device *adev, uint32_t umc_inst = 0; uint32_t ch_inst = 0; - uint32_t umc_reg_offset = 0; uint32_t channel_index = 0; /*TODO: driver needs to toggle DF Cstate to ensure * safe access of UMC registers. Will add the protection */ LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { - umc_reg_offset = get_umc_v6_7_reg_offset(adev, - umc_inst, - ch_inst); channel_index = get_umc_v6_7_channel_index(adev, umc_inst, ch_inst); @@ -133,7 +116,6 @@ static void umc_v6_7_ecc_info_query_ras_error_count(struct amdgpu_device *adev, static void umc_v6_7_ecc_info_query_error_address(struct amdgpu_device *adev, struct ras_err_data *err_data, - uint32_t umc_reg_offset, uint32_t ch_inst, uint32_t umc_inst) { @@ -192,18 +174,13 @@ static void umc_v6_7_ecc_info_query_ras_error_address(struct amdgpu_device *adev uint32_t umc_inst = 0; uint32_t ch_inst = 0; - uint32_t umc_reg_offset = 0; /*TODO: driver needs to toggle DF Cstate to ensure * safe access of UMC resgisters. Will add the protection * when firmware interface is ready */ LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { - umc_reg_offset = get_umc_v6_7_reg_offset(adev, - umc_inst, - ch_inst); umc_v6_7_ecc_info_query_error_address(adev, err_data, - umc_reg_offset, ch_inst, umc_inst); } diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c index 05f79eea307c..cd57f39df7d1 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c @@ -114,7 +114,6 @@ static void umc_v8_7_ecc_info_query_ras_error_count(struct amdgpu_device *adev, static void umc_v8_7_ecc_info_query_error_address(struct amdgpu_device *adev, struct ras_err_data *err_data, - uint32_t umc_reg_offset, uint32_t ch_inst, uint32_t umc_inst) { @@ -173,19 +172,14 @@ static void umc_v8_7_ecc_info_query_ras_error_address(struct amdgpu_device *adev uint32_t umc_inst = 0; uint32_t ch_inst = 0; - uint32_t umc_reg_offset = 0; /* TODO: driver needs to toggle DF Cstate to ensure * safe access of UMC resgisters. Will add the protection * when firmware interface is ready */ LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { - umc_reg_offset = get_umc_v8_7_reg_offset(adev, - umc_inst, - ch_inst); umc_v8_7_ecc_info_query_error_address(adev, err_data, - umc_reg_offset, ch_inst, umc_inst); } -- cgit From 1b08dfb889b2c584b444538c9500af24ba0a6dc7 Mon Sep 17 00:00:00 2001 From: Christian König Date: Tue, 18 Jan 2022 12:53:11 +0100 Subject: drm/amdgpu: remove gart.ready flag MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit That's just a leftover from old radeon days and was preventing CS and GART bindings before the hardware was initialized. But nowdays that is perfectly valid. The only thing we need to warn about are GART binding before the table is even allocated. Signed-off-by: Christian König Reviewed-by: Guchun Chen Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c | 35 ++++--------- drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h | 15 +++--- drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | 9 +--- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 77 +++++++++-------------------- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 4 +- drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c | 11 +---- drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c | 7 +-- drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c | 8 +-- drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c | 8 +-- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 10 +--- drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 5 +- 11 files changed, 52 insertions(+), 137 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index 645950a653a0..53cc844346f0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -150,7 +150,7 @@ void amdgpu_gart_table_vram_free(struct amdgpu_device *adev) * replaces them with the dummy page (all asics). * Returns 0 for success, -EINVAL for failure. */ -int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, +void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, int pages) { unsigned t; @@ -161,13 +161,11 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, uint64_t flags = 0; int idx; - if (!adev->gart.ready) { - WARN(1, "trying to unbind memory from uninitialized GART !\n"); - return -EINVAL; - } + if (WARN_ON(!adev->gart.ptr)) + return; if (!drm_dev_enter(adev_to_drm(adev), &idx)) - return 0; + return; t = offset / AMDGPU_GPU_PAGE_SIZE; p = t / AMDGPU_GPU_PAGES_IN_CPU_PAGE; @@ -188,7 +186,6 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, amdgpu_gmc_flush_gpu_tlb(adev, 0, i, 0); drm_dev_exit(idx); - return 0; } /** @@ -204,7 +201,7 @@ int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, * Map the dma_addresses into GART entries (all asics). * Returns 0 for success, -EINVAL for failure. */ -int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset, +void amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset, int pages, dma_addr_t *dma_addr, uint64_t flags, void *dst) { @@ -212,13 +209,8 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset, unsigned i, j, t; int idx; - if (!adev->gart.ready) { - WARN(1, "trying to bind memory to uninitialized GART !\n"); - return -EINVAL; - } - if (!drm_dev_enter(adev_to_drm(adev), &idx)) - return 0; + return; t = offset / AMDGPU_GPU_PAGE_SIZE; @@ -230,7 +222,6 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset, } } drm_dev_exit(idx); - return 0; } /** @@ -246,20 +237,14 @@ int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset, * (all asics). * Returns 0 for success, -EINVAL for failure. */ -int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, +void amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, int pages, dma_addr_t *dma_addr, uint64_t flags) { - if (!adev->gart.ready) { - WARN(1, "trying to bind memory to uninitialized GART !\n"); - return -EINVAL; - } - - if (!adev->gart.ptr) - return 0; + if (WARN_ON(!adev->gart.ptr)) + return; - return amdgpu_gart_map(adev, offset, pages, dma_addr, flags, - adev->gart.ptr); + amdgpu_gart_map(adev, offset, pages, dma_addr, flags, adev->gart.ptr); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h index 78895413cf9f..8fea3e04e411 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.h @@ -46,7 +46,6 @@ struct amdgpu_gart { unsigned num_gpu_pages; unsigned num_cpu_pages; unsigned table_size; - bool ready; /* Asic default pte flags */ uint64_t gart_pte_flags; @@ -58,12 +57,12 @@ int amdgpu_gart_table_vram_pin(struct amdgpu_device *adev); void amdgpu_gart_table_vram_unpin(struct amdgpu_device *adev); int amdgpu_gart_init(struct amdgpu_device *adev); void amdgpu_gart_dummy_page_fini(struct amdgpu_device *adev); -int amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, - int pages); -int amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset, - int pages, dma_addr_t *dma_addr, uint64_t flags, - void *dst); -int amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, - int pages, dma_addr_t *dma_addr, uint64_t flags); +void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, + int pages); +void amdgpu_gart_map(struct amdgpu_device *adev, uint64_t offset, + int pages, dma_addr_t *dma_addr, uint64_t flags, + void *dst); +void amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, + int pages, dma_addr_t *dma_addr, uint64_t flags); void amdgpu_gart_invalidate_tlb(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index 72022df264f6..c5263908caec 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -220,26 +220,21 @@ uint64_t amdgpu_gtt_mgr_usage(struct amdgpu_gtt_mgr *mgr) * * Re-init the gart for each known BO in the GTT. */ -int amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr) +void amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr) { struct amdgpu_gtt_node *node; struct drm_mm_node *mm_node; struct amdgpu_device *adev; - int r = 0; adev = container_of(mgr, typeof(*adev), mman.gtt_mgr); spin_lock(&mgr->lock); drm_mm_for_each_node(mm_node, &mgr->mm) { node = container_of(mm_node, typeof(*node), base.mm_nodes[0]); - r = amdgpu_ttm_recover_gart(node->tbo); - if (r) - break; + amdgpu_ttm_recover_gart(node->tbo); } spin_unlock(&mgr->lock); amdgpu_gart_invalidate_tlb(adev); - - return r; } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 1e012b45f663..f0cd52b157f8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -242,10 +242,7 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, dma_addr_t *dma_addr; dma_addr = &bo->ttm->dma_address[mm_cur->start >> PAGE_SHIFT]; - r = amdgpu_gart_map(adev, 0, num_pages, dma_addr, flags, - cpu_addr); - if (r) - goto error_free; + amdgpu_gart_map(adev, 0, num_pages, dma_addr, flags, cpu_addr); } else { dma_addr_t dma_address; @@ -253,11 +250,8 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, dma_address += adev->vm_manager.vram_base_offset; for (i = 0; i < num_pages; ++i) { - r = amdgpu_gart_map(adev, i << PAGE_SHIFT, 1, - &dma_address, flags, cpu_addr); - if (r) - goto error_free; - + amdgpu_gart_map(adev, i << PAGE_SHIFT, 1, &dma_address, + flags, cpu_addr); dma_address += PAGE_SIZE; } } @@ -822,14 +816,13 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev, #endif } -static int amdgpu_ttm_gart_bind(struct amdgpu_device *adev, - struct ttm_buffer_object *tbo, - uint64_t flags) +static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev, + struct ttm_buffer_object *tbo, + uint64_t flags) { struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo); struct ttm_tt *ttm = tbo->ttm; struct amdgpu_ttm_tt *gtt = (void *)ttm; - int r; if (amdgpu_bo_encrypted(abo)) flags |= AMDGPU_PTE_TMZ; @@ -837,10 +830,8 @@ static int amdgpu_ttm_gart_bind(struct amdgpu_device *adev, if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) { uint64_t page_idx = 1; - r = amdgpu_gart_bind(adev, gtt->offset, page_idx, - gtt->ttm.dma_address, flags); - if (r) - goto gart_bind_fail; + amdgpu_gart_bind(adev, gtt->offset, page_idx, + gtt->ttm.dma_address, flags); /* The memory type of the first page defaults to UC. Now * modify the memory type to NC from the second page of @@ -849,21 +840,13 @@ static int amdgpu_ttm_gart_bind(struct amdgpu_device *adev, flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK; flags |= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC); - r = amdgpu_gart_bind(adev, - gtt->offset + (page_idx << PAGE_SHIFT), - ttm->num_pages - page_idx, - &(gtt->ttm.dma_address[page_idx]), flags); + amdgpu_gart_bind(adev, gtt->offset + (page_idx << PAGE_SHIFT), + ttm->num_pages - page_idx, + &(gtt->ttm.dma_address[page_idx]), flags); } else { - r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, - gtt->ttm.dma_address, flags); + amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, + gtt->ttm.dma_address, flags); } - -gart_bind_fail: - if (r) - DRM_ERROR("failed to bind %u pages at 0x%08llX\n", - ttm->num_pages, gtt->offset); - - return r; } /* @@ -879,7 +862,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev, struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); struct amdgpu_ttm_tt *gtt = (void*)ttm; uint64_t flags; - int r = 0; + int r; if (!bo_mem) return -EINVAL; @@ -926,14 +909,10 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev, /* bind pages into GART page tables */ gtt->offset = (u64)bo_mem->start << PAGE_SHIFT; - r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, - gtt->ttm.dma_address, flags); - - if (r) - DRM_ERROR("failed to bind %u pages at 0x%08llX\n", - ttm->num_pages, gtt->offset); + amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, + gtt->ttm.dma_address, flags); gtt->bound = true; - return r; + return 0; } /* @@ -983,12 +962,7 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) /* Bind pages */ gtt->offset = (u64)tmp->start << PAGE_SHIFT; - r = amdgpu_ttm_gart_bind(adev, bo, flags); - if (unlikely(r)) { - ttm_resource_free(bo, &tmp); - return r; - } - + amdgpu_ttm_gart_bind(adev, bo, flags); amdgpu_gart_invalidate_tlb(adev); ttm_resource_free(bo, &bo->resource); ttm_bo_assign_mem(bo, tmp); @@ -1002,19 +976,16 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to * rebind GTT pages during a GPU reset. */ -int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo) +void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo) { struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); uint64_t flags; - int r; if (!tbo->ttm) - return 0; + return; flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, tbo->resource); - r = amdgpu_ttm_gart_bind(adev, tbo, flags); - - return r; + amdgpu_ttm_gart_bind(adev, tbo, flags); } /* @@ -1028,7 +999,6 @@ static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev, { struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); struct amdgpu_ttm_tt *gtt = (void *)ttm; - int r; /* if the pages have userptr pinning then clear that first */ if (gtt->userptr) { @@ -1048,10 +1018,7 @@ static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev, return; /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */ - r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages); - if (r) - DRM_ERROR("failed to unbind %u pages at 0x%08llX\n", - gtt->ttm.num_pages, gtt->offset); + amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages); gtt->bound = false; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index f06fd19b4895..0efc31e3a457 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -119,7 +119,7 @@ void amdgpu_vram_mgr_fini(struct amdgpu_device *adev); bool amdgpu_gtt_mgr_has_gart_addr(struct ttm_resource *mem); uint64_t amdgpu_gtt_mgr_usage(struct amdgpu_gtt_mgr *mgr); -int amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr); +void amdgpu_gtt_mgr_recover(struct amdgpu_gtt_mgr *mgr); uint64_t amdgpu_preempt_mgr_usage(struct ttm_resource_manager *man); @@ -162,7 +162,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, struct dma_fence **fence); int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo); -int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo); +void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo); uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type); #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index bb9a11bc644b..5e88655cdfa5 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -1000,14 +1000,7 @@ static int gmc_v10_0_gart_enable(struct amdgpu_device *adev) return -EINVAL; } - if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) - goto skip_pin_bo; - - r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr); - if (r) - return r; - -skip_pin_bo: + amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr); r = adev->gfxhub.funcs->gart_enable(adev); if (r) return r; @@ -1033,8 +1026,6 @@ skip_pin_bo: (unsigned)(adev->gmc.gart_size >> 20), (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo)); - adev->gart.ready = true; - return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c index 84f0debe8264..ec291d28edff 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c @@ -469,16 +469,14 @@ static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable) static int gmc_v6_0_gart_enable(struct amdgpu_device *adev) { uint64_t table_addr; - int r, i; u32 field; + int i; if (adev->gart.bo == NULL) { dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); return -EINVAL; } - r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr); - if (r) - return r; + amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr); table_addr = amdgpu_bo_gpu_offset(adev->gart.bo); @@ -558,7 +556,6 @@ static int gmc_v6_0_gart_enable(struct amdgpu_device *adev) dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n", (unsigned)(adev->gmc.gart_size >> 20), (unsigned long long)table_addr); - adev->gart.ready = true; return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c index 8800a18b0cf6..344d819b4c1b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c @@ -613,17 +613,14 @@ static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable) static int gmc_v7_0_gart_enable(struct amdgpu_device *adev) { uint64_t table_addr; - int r, i; u32 tmp, field; + int i; if (adev->gart.bo == NULL) { dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); return -EINVAL; } - r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr); - if (r) - return r; - + amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr); table_addr = amdgpu_bo_gpu_offset(adev->gart.bo); /* Setup TLB control */ @@ -712,7 +709,6 @@ static int gmc_v7_0_gart_enable(struct amdgpu_device *adev) DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", (unsigned)(adev->gmc.gart_size >> 20), (unsigned long long)table_addr); - adev->gart.ready = true; return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c index 1c10fa5d0db7..ca9841d5669f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c @@ -837,17 +837,14 @@ static void gmc_v8_0_set_prt(struct amdgpu_device *adev, bool enable) static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) { uint64_t table_addr; - int r, i; u32 tmp, field; + int i; if (adev->gart.bo == NULL) { dev_err(adev->dev, "No VRAM object for PCIE GART.\n"); return -EINVAL; } - r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr); - if (r) - return r; - + amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr); table_addr = amdgpu_bo_gpu_offset(adev->gart.bo); /* Setup TLB control */ @@ -953,7 +950,6 @@ static int gmc_v8_0_gart_enable(struct amdgpu_device *adev) DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n", (unsigned)(adev->gmc.gart_size >> 20), (unsigned long long)table_addr); - adev->gart.ready = true; return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 6866e0311b49..de32dbca9ab8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1783,14 +1783,7 @@ static int gmc_v9_0_gart_enable(struct amdgpu_device *adev) return -EINVAL; } - if (amdgpu_sriov_vf(adev) && amdgpu_in_reset(adev)) - goto skip_pin_bo; - - r = amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr); - if (r) - return r; - -skip_pin_bo: + amdgpu_gtt_mgr_recover(&adev->mman.gtt_mgr); r = adev->gfxhub.funcs->gart_enable(adev); if (r) return r; @@ -1807,7 +1800,6 @@ skip_pin_bo: DRM_INFO("PTB located at 0x%016llX\n", (unsigned long long)amdgpu_bo_gpu_offset(adev->gart.bo)); - adev->gart.ready = true; return 0; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index ed5385137f48..d986f9ee0e1f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -86,10 +86,7 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages, cpu_addr = &job->ibs[0].ptr[num_dw]; - r = amdgpu_gart_map(adev, 0, npages, addr, pte_flags, cpu_addr); - if (r) - goto error_free; - + amdgpu_gart_map(adev, 0, npages, addr, pte_flags, cpu_addr); r = amdgpu_job_submit(job, &adev->mman.entity, AMDGPU_FENCE_OWNER_UNDEFINED, &fence); if (r) -- cgit From 590e86fe3462da81f1cbc4fc8d4cbf8b16b4f968 Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Tue, 18 Jan 2022 10:48:49 -0500 Subject: drm/amdgpu: fix broken debug sdma vram access function MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Debug VRAM access through SDMA has several broken parts resulting in silent MMIO fallback. BO kernel creation takes the location of the cpu addr pointer, not the pointer itself for address kmap. drm_dev_enter return true on success so change access check. The source BO is reserved but not pinned so find the address using the cursor offset relative to its memory domain start. Signed-off-by: Jonathan Kim Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 15 ++++++++------- 1 file changed, 8 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index f0cd52b157f8..74d8b721bd7d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1406,6 +1406,7 @@ static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo, { struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); + struct amdgpu_res_cursor src_mm; struct amdgpu_job *job; struct dma_fence *fence; uint64_t src_addr, dst_addr; @@ -1418,9 +1419,8 @@ static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo, if (!adev->mman.sdma_access_ptr) return -EACCES; - r = drm_dev_enter(adev_to_drm(adev), &idx); - if (r) - return r; + if (!drm_dev_enter(adev_to_drm(adev), &idx)) + return -ENODEV; if (write) memcpy(adev->mman.sdma_access_ptr, buf, len); @@ -1430,7 +1430,8 @@ static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo, if (r) goto out; - src_addr = amdgpu_bo_gpu_offset(abo); + amdgpu_res_first(abo->tbo.resource, offset, len, &src_mm); + src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) + src_mm.start; dst_addr = amdgpu_bo_gpu_offset(adev->mman.sdma_access_bo); if (write) swap(src_addr, dst_addr); @@ -1828,7 +1829,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) if (amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE, AMDGPU_GEM_DOMAIN_GTT, &adev->mman.sdma_access_bo, NULL, - adev->mman.sdma_access_ptr)) + &adev->mman.sdma_access_ptr)) DRM_WARN("Debug VRAM access will use slowpath MM access\n"); return 0; @@ -1852,6 +1853,8 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) if (adev->mman.stolen_reserved_size) amdgpu_bo_free_kernel(&adev->mman.stolen_reserved_memory, NULL, NULL); + amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL, + &adev->mman.sdma_access_ptr); amdgpu_ttm_fw_reserve_vram_fini(adev); if (drm_dev_enter(adev_to_drm(adev), &idx)) { @@ -1871,8 +1874,6 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA); ttm_device_fini(&adev->mman.bdev); adev->mman.initialized = false; - amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL, - &adev->mman.sdma_access_ptr); DRM_INFO("amdgpu: ttm finalized\n"); } -- cgit From f61c40c0757a79bcf744314df606c2bc8ae6a729 Mon Sep 17 00:00:00 2001 From: Eric Huang Date: Tue, 18 Jan 2022 10:57:54 -0500 Subject: drm/amdkfd: enable heavy-weight TLB flush on Arcturus SDMA FW fixes the hang issue for adding heavy-weight TLB flush on Arcturus, so we can enable it. Signed-off-by: Eric Huang Acked-by: Alex Deucher Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 6 ------ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 10 ++++++++-- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index f9bab963a948..5df387c4d7fb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1813,12 +1813,6 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( true); ret = unreserve_bo_and_vms(&ctx, false, false); - /* Only apply no TLB flush on Aldebaran to - * workaround regressions on other Asics. - */ - if (table_freed && (adev->asic_type != CHIP_ALDEBARAN)) - *table_freed = true; - goto out; out_unreserve: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 4bfc0c8ab764..337953af7c2f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -1416,6 +1416,12 @@ err_unlock: return ret; } +static bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev) { + return KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2) || + (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) && + dev->adev->sdma.instance[0].fw_version >= 18); +} + static int kfd_ioctl_map_memory_to_gpu(struct file *filep, struct kfd_process *p, void *data) { @@ -1503,7 +1509,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep, } /* Flush TLBs after waiting for the page table updates to complete */ - if (table_freed) { + if (table_freed || !kfd_flush_tlb_after_unmap(dev)) { for (i = 0; i < args->n_devices; i++) { peer = kfd_device_by_id(devices_arr[i]); if (WARN_ON_ONCE(!peer)) @@ -1603,7 +1609,7 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep, } mutex_unlock(&p->mutex); - if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2)) { + if (kfd_flush_tlb_after_unmap(dev)) { err = amdgpu_amdkfd_gpuvm_sync_memory(dev->adev, (struct kgd_mem *) mem, true); if (err) { -- cgit From f548f4291e89e6144d3c5b8a9ada66c7dbaa1639 Mon Sep 17 00:00:00 2001 From: Zhan Liu Date: Wed, 19 Jan 2022 16:55:16 -0500 Subject: drm/amd/display: Correct MPC split policy for DCN301 [Why] DCN301 has seamless boot enabled. With MPC split enabled at the same time, system will hang. [How] Revert MPC split policy back to "MPC_SPLIT_AVOID". Since we have ODM combine enabled on DCN301, pipe split is not necessary here. Signed-off-by: Zhan Liu Reviewed-by: Charlene Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c index c1c6e602b06c..b4001233867c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c @@ -686,7 +686,7 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_clock_gate = true, .disable_pplib_clock_request = true, .disable_pplib_wm_range = true, - .pipe_split_policy = MPC_SPLIT_DYNAMIC, + .pipe_split_policy = MPC_SPLIT_AVOID, .force_single_disp_pipe_split = false, .disable_dcc = DCC_ENABLE, .vsr_support = true, -- cgit From 6d1d72fb4fcf85ff0a96de1c16c46162b3baf9a6 Mon Sep 17 00:00:00 2001 From: Zhan Liu Date: Wed, 19 Jan 2022 17:07:53 -0500 Subject: drm/amd/display: change FIFO reset condition to embedded display only [Why] FIFO reset is only necessary for fast boot sequence, where otg is disabled and dig fe is enabled when changing dispclk. Fast boot is only enabled on embedded displays. [How] Change FIFO reset condition to "embedded display only". Signed-off-by: Zhan Liu Reviewed-by: Charlene Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index f1593186e964..f3ff141b706a 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -1608,7 +1608,7 @@ static enum dc_status apply_single_controller_ctx_to_hw( pipe_ctx->stream_res.stream_enc, pipe_ctx->stream_res.tg->inst); - if (dc_is_dp_signal(pipe_ctx->stream->signal) && + if (dc_is_embedded_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc->funcs->reset_fifo) pipe_ctx->stream_res.stream_enc->funcs->reset_fifo( pipe_ctx->stream_res.stream_enc); -- cgit From 27535f1d94318f34fd6d41fd01bfa4a970e73bd9 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 1 Dec 2021 17:25:47 +0200 Subject: drm/i915: Clean up vlv/chv sprite plane registers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use REG_BIT() & co. to polish the vlv/chv sprite plane registers. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20211201152552.7821-10-ville.syrjala@linux.intel.com Reviewed-by: José Roberto de Souza --- drivers/gpu/drm/i915/display/intel_sprite.c | 9 +-- drivers/gpu/drm/i915/i915_reg.h | 103 ++++++++++++++++++---------- 2 files changed, 70 insertions(+), 42 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index 9c231567bd91..7ffca5669ab9 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -313,7 +313,7 @@ static u32 vlv_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state) u32 sprctl = 0; if (crtc_state->gamma_enable) - sprctl |= SP_GAMMA_ENABLE; + sprctl |= SP_PIPE_GAMMA_ENABLE; return sprctl; } @@ -436,9 +436,9 @@ vlv_sprite_update_noarm(struct intel_plane *plane, intel_de_write_fw(dev_priv, SPSTRIDE(pipe, plane_id), plane_state->view.color_plane[0].mapping_stride); intel_de_write_fw(dev_priv, SPPOS(pipe, plane_id), - (crtc_y << 16) | crtc_x); + SP_POS_Y(crtc_y) | SP_POS_X(crtc_x)); intel_de_write_fw(dev_priv, SPSIZE(pipe, plane_id), - ((crtc_h - 1) << 16) | (crtc_w - 1)); + SP_HEIGHT(crtc_h - 1) | SP_WIDTH(crtc_w - 1)); spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); } @@ -479,7 +479,8 @@ vlv_sprite_update_arm(struct intel_plane *plane, intel_de_write_fw(dev_priv, SPCONSTALPHA(pipe, plane_id), 0); intel_de_write_fw(dev_priv, SPLINOFF(pipe, plane_id), linear_offset); - intel_de_write_fw(dev_priv, SPTILEOFF(pipe, plane_id), (y << 16) | x); + intel_de_write_fw(dev_priv, SPTILEOFF(pipe, plane_id), + SP_OFFSET_Y(y) | SP_OFFSET_X(x)); /* * The control register self-arms if the plane was previously diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 5e57652b7807..552d4803dd90 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -6149,48 +6149,67 @@ enum { #define SPRSURFLIVE(pipe) _MMIO_PIPE(pipe, _SPRA_SURFLIVE, _SPRB_SURFLIVE) #define _SPACNTR (VLV_DISPLAY_BASE + 0x72180) -#define SP_ENABLE (1 << 31) -#define SP_GAMMA_ENABLE (1 << 30) -#define SP_PIXFORMAT_MASK (0xf << 26) -#define SP_FORMAT_YUV422 (0x0 << 26) -#define SP_FORMAT_8BPP (0x2 << 26) -#define SP_FORMAT_BGR565 (0x5 << 26) -#define SP_FORMAT_BGRX8888 (0x6 << 26) -#define SP_FORMAT_BGRA8888 (0x7 << 26) -#define SP_FORMAT_RGBX1010102 (0x8 << 26) -#define SP_FORMAT_RGBA1010102 (0x9 << 26) -#define SP_FORMAT_BGRX1010102 (0xa << 26) /* CHV pipe B */ -#define SP_FORMAT_BGRA1010102 (0xb << 26) /* CHV pipe B */ -#define SP_FORMAT_RGBX8888 (0xe << 26) -#define SP_FORMAT_RGBA8888 (0xf << 26) -#define SP_ALPHA_PREMULTIPLY (1 << 23) /* CHV pipe B */ -#define SP_SOURCE_KEY (1 << 22) -#define SP_YUV_FORMAT_BT709 (1 << 18) -#define SP_YUV_ORDER_MASK (3 << 16) -#define SP_YUV_ORDER_YUYV (0 << 16) -#define SP_YUV_ORDER_UYVY (1 << 16) -#define SP_YUV_ORDER_YVYU (2 << 16) -#define SP_YUV_ORDER_VYUY (3 << 16) -#define SP_ROTATE_180 (1 << 15) -#define SP_TILED (1 << 10) -#define SP_MIRROR (1 << 8) /* CHV pipe B */ +#define SP_ENABLE REG_BIT(31) +#define SP_PIPE_GAMMA_ENABLE REG_BIT(30) +#define SP_FORMAT_MASK REG_GENMASK(29, 26) +#define SP_FORMAT_YUV422 REG_FIELD_PREP(SP_FORMAT_MASK, 0) +#define SP_FORMAT_8BPP REG_FIELD_PREP(SP_FORMAT_MASK, 2) +#define SP_FORMAT_BGR565 REG_FIELD_PREP(SP_FORMAT_MASK, 5) +#define SP_FORMAT_BGRX8888 REG_FIELD_PREP(SP_FORMAT_MASK, 6) +#define SP_FORMAT_BGRA8888 REG_FIELD_PREP(SP_FORMAT_MASK, 7) +#define SP_FORMAT_RGBX1010102 REG_FIELD_PREP(SP_FORMAT_MASK, 8) +#define SP_FORMAT_RGBA1010102 REG_FIELD_PREP(SP_FORMAT_MASK, 9) +#define SP_FORMAT_BGRX1010102 REG_FIELD_PREP(SP_FORMAT_MASK, 10) /* CHV pipe B */ +#define SP_FORMAT_BGRA1010102 REG_FIELD_PREP(SP_FORMAT_MASK, 11) /* CHV pipe B */ +#define SP_FORMAT_RGBX8888 REG_FIELD_PREP(SP_FORMAT_MASK, 14) +#define SP_FORMAT_RGBA8888 REG_FIELD_PREP(SP_FORMAT_MASK, 15) +#define SP_ALPHA_PREMULTIPLY REG_BIT(23) /* CHV pipe B */ +#define SP_SOURCE_KEY REG_BIT(22) +#define SP_YUV_FORMAT_BT709 REG_BIT(18) +#define SP_YUV_ORDER_MASK REG_GENMASK(17, 16) +#define SP_YUV_ORDER_YUYV REG_FIELD_PREP(SP_YUV_ORDER_MASK, 0) +#define SP_YUV_ORDER_UYVY REG_FIELD_PREP(SP_YUV_ORDER_MASK, 1) +#define SP_YUV_ORDER_YVYU REG_FIELD_PREP(SP_YUV_ORDER_MASK, 2) +#define SP_YUV_ORDER_VYUY REG_FIELD_PREP(SP_YUV_ORDER_MASK, 3) +#define SP_ROTATE_180 REG_BIT(15) +#define SP_TILED REG_BIT(10) +#define SP_MIRROR REG_BIT(8) /* CHV pipe B */ #define _SPALINOFF (VLV_DISPLAY_BASE + 0x72184) #define _SPASTRIDE (VLV_DISPLAY_BASE + 0x72188) #define _SPAPOS (VLV_DISPLAY_BASE + 0x7218c) +#define SP_POS_Y_MASK REG_GENMASK(31, 16) +#define SP_POS_Y(y) REG_FIELD_PREP(SP_POS_Y_MASK, (y)) +#define SP_POS_X_MASK REG_GENMASK(15, 0) +#define SP_POS_X(x) REG_FIELD_PREP(SP_POS_X_MASK, (x)) #define _SPASIZE (VLV_DISPLAY_BASE + 0x72190) +#define SP_HEIGHT_MASK REG_GENMASK(31, 16) +#define SP_HEIGHT(h) REG_FIELD_PREP(SP_HEIGHT_MASK, (h)) +#define SP_WIDTH_MASK REG_GENMASK(15, 0) +#define SP_WIDTH(w) REG_FIELD_PREP(SP_WIDTH_MASK, (w)) #define _SPAKEYMINVAL (VLV_DISPLAY_BASE + 0x72194) #define _SPAKEYMSK (VLV_DISPLAY_BASE + 0x72198) #define _SPASURF (VLV_DISPLAY_BASE + 0x7219c) +#define SP_ADDR_MASK REG_GENMASK(31, 12) #define _SPAKEYMAXVAL (VLV_DISPLAY_BASE + 0x721a0) #define _SPATILEOFF (VLV_DISPLAY_BASE + 0x721a4) +#define SP_OFFSET_Y_MASK REG_GENMASK(31, 16) +#define SP_OFFSET_Y(y) REG_FIELD_PREP(SP_OFFSET_Y_MASK, (y)) +#define SP_OFFSET_X_MASK REG_GENMASK(15, 0) +#define SP_OFFSET_X(x) REG_FIELD_PREP(SP_OFFSET_X_MASK, (x)) #define _SPACONSTALPHA (VLV_DISPLAY_BASE + 0x721a8) -#define SP_CONST_ALPHA_ENABLE (1 << 31) +#define SP_CONST_ALPHA_ENABLE REG_BIT(31) +#define SP_CONST_ALPHA_MASK REG_GENMASK(7, 0) +#define SP_CONST_ALPHA(alpha) REG_FIELD_PREP(SP_CONST_ALPHA_MASK, (alpha)) #define _SPACLRC0 (VLV_DISPLAY_BASE + 0x721d0) -#define SP_CONTRAST(x) ((x) << 18) /* u3.6 */ -#define SP_BRIGHTNESS(x) ((x) & 0xff) /* s8 */ +#define SP_CONTRAST_MASK REG_GENMASK(26, 18) +#define SP_CONTRAST(x) REG_FIELD_PREP(SP_CONTRAST_MASK, (x)) /* u3.6 */ +#define SP_BRIGHTNESS_MASK REG_GENMASK(7, 0) +#define SP_BRIGHTNESS(x) REG_FIELD_PREP(SP_BRIGHTNESS_MASK, (x)) /* s8 */ #define _SPACLRC1 (VLV_DISPLAY_BASE + 0x721d4) -#define SP_SH_SIN(x) (((x) & 0x7ff) << 16) /* s4.7 */ -#define SP_SH_COS(x) (x) /* u3.7 */ +#define SP_SH_SIN_MASK REG_GENMASK(26, 16) +#define SP_SH_SIN(x) REG_FIELD_PREP(SP_SH_SIN_MASK, (x)) /* s4.7 */ +#define SP_SH_COS_MASK REG_GENMASK(9, 0) +#define SP_SH_COS(x) REG_FIELD_PREP(SP_SH_COS_MASK, (x)) /* u3.7 */ #define _SPAGAMC (VLV_DISPLAY_BASE + 0x721e0) #define _SPBCNTR (VLV_DISPLAY_BASE + 0x72280) @@ -6241,28 +6260,36 @@ enum { #define SPCSCYGOFF(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d900) #define SPCSCCBOFF(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d904) #define SPCSCCROFF(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d908) -#define SPCSC_OOFF(x) (((x) & 0x7ff) << 16) /* s11 */ -#define SPCSC_IOFF(x) (((x) & 0x7ff) << 0) /* s11 */ +#define SPCSC_OOFF_MASK REG_GENMASK(26, 16) +#define SPCSC_OOFF(x) REG_FIELD_PREP(SPCSC_OOFF_MASK, (x) & 0x7ff) /* s11 */ +#define SPCSC_IOFF_MASK REG_GENMASK(10, 0) +#define SPCSC_IOFF(x) REG_FIELD_PREP(SPCSC_IOFF_MASK, (x) & 0x7ff) /* s11 */ #define SPCSCC01(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d90c) #define SPCSCC23(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d910) #define SPCSCC45(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d914) #define SPCSCC67(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d918) #define SPCSCC8(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d91c) -#define SPCSC_C1(x) (((x) & 0x7fff) << 16) /* s3.12 */ -#define SPCSC_C0(x) (((x) & 0x7fff) << 0) /* s3.12 */ +#define SPCSC_C1_MASK REG_GENMASK(30, 16) +#define SPCSC_C1(x) REG_FIELD_PREP(SPCSC_C1_MASK, (x) & 0x7fff) /* s3.12 */ +#define SPCSC_C0_MASK REG_GENMASK(14, 0) +#define SPCSC_C0(x) REG_FIELD_PREP(SPCSC_C0_MASK, (x) & 0x7fff) /* s3.12 */ #define SPCSCYGICLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d920) #define SPCSCCBICLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d924) #define SPCSCCRICLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d928) -#define SPCSC_IMAX(x) (((x) & 0x7ff) << 16) /* s11 */ -#define SPCSC_IMIN(x) (((x) & 0x7ff) << 0) /* s11 */ +#define SPCSC_IMAX_MASK REG_GENMASK(26, 16) +#define SPCSC_IMAX(x) REG_FIELD_PREP(SPCSC_IMAX_MASK, (x) & 0x7ff) /* s11 */ +#define SPCSC_IMIN_MASK REG_GENMASK(10, 0) +#define SPCSC_IMIN(x) REG_FIELD_PREP(SPCSC_IMIN_MASK, (x) & 0x7ff) /* s11 */ #define SPCSCYGOCLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d92c) #define SPCSCCBOCLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d930) #define SPCSCCROCLAMP(plane_id) _MMIO_CHV_SPCSC(plane_id, 0x6d934) -#define SPCSC_OMAX(x) ((x) << 16) /* u10 */ -#define SPCSC_OMIN(x) ((x) << 0) /* u10 */ +#define SPCSC_OMAX_MASK REG_GENMASK(25, 16) +#define SPCSC_OMAX(x) REG_FIELD_PREP(SPCSC_OMAX_MASK, (x)) /* u10 */ +#define SPCSC_OMIN_MASK REG_GENMASK(9, 0) +#define SPCSC_OMIN(x) REG_FIELD_PREP(SPCSC_OMIN_MASK, (x)) /* u10 */ /* Skylake plane registers */ -- cgit From 5de6a3de999d0cfeea94f1d3932b78892f3d69e8 Mon Sep 17 00:00:00 2001 From: Madhumitha Tolakanahalli Pradeep Date: Thu, 16 Dec 2021 19:41:41 -0800 Subject: drm/i915/dmc: Eliminate remnant GEN references Replace GEN with DISPLAY_VER, in line with the naming convention followed in the i915 driver code. Signed-off-by: Madhumitha Tolakanahalli Pradeep Reviewed-by: Caz Yokoyama Signed-off-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20211217034141.198033-1-madhumitha.tolakanahalli.pradeep@intel.com --- drivers/gpu/drm/i915/display/intel_dmc.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c index a69b28d65a9b..7616a3906b9e 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc.c +++ b/drivers/gpu/drm/i915/display/intel_dmc.c @@ -43,9 +43,9 @@ __stringify(major) "_" \ __stringify(minor) ".bin" -#define GEN12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE +#define DISPLAY_VER13_DMC_MAX_FW_SIZE 0x20000 -#define GEN13_DMC_MAX_FW_SIZE 0x20000 +#define DISPLAY_VER12_DMC_MAX_FW_SIZE ICL_DMC_MAX_FW_SIZE #define ADLP_DMC_PATH DMC_PATH(adlp, 2, 14) #define ADLP_DMC_VERSION_REQUIRED DMC_VERSION(2, 14) @@ -684,23 +684,23 @@ void intel_dmc_ucode_init(struct drm_i915_private *dev_priv) if (IS_ALDERLAKE_P(dev_priv)) { dmc->fw_path = ADLP_DMC_PATH; dmc->required_version = ADLP_DMC_VERSION_REQUIRED; - dmc->max_fw_size = GEN13_DMC_MAX_FW_SIZE; + dmc->max_fw_size = DISPLAY_VER13_DMC_MAX_FW_SIZE; } else if (IS_ALDERLAKE_S(dev_priv)) { dmc->fw_path = ADLS_DMC_PATH; dmc->required_version = ADLS_DMC_VERSION_REQUIRED; - dmc->max_fw_size = GEN12_DMC_MAX_FW_SIZE; + dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE; } else if (IS_DG1(dev_priv)) { dmc->fw_path = DG1_DMC_PATH; dmc->required_version = DG1_DMC_VERSION_REQUIRED; - dmc->max_fw_size = GEN12_DMC_MAX_FW_SIZE; + dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE; } else if (IS_ROCKETLAKE(dev_priv)) { dmc->fw_path = RKL_DMC_PATH; dmc->required_version = RKL_DMC_VERSION_REQUIRED; - dmc->max_fw_size = GEN12_DMC_MAX_FW_SIZE; + dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE; } else if (DISPLAY_VER(dev_priv) >= 12) { dmc->fw_path = TGL_DMC_PATH; dmc->required_version = TGL_DMC_VERSION_REQUIRED; - dmc->max_fw_size = GEN12_DMC_MAX_FW_SIZE; + dmc->max_fw_size = DISPLAY_VER12_DMC_MAX_FW_SIZE; } else if (DISPLAY_VER(dev_priv) == 11) { dmc->fw_path = ICL_DMC_PATH; dmc->required_version = ICL_DMC_VERSION_REQUIRED; -- cgit From 5298d4bfe80f6ae6ae2777bcd1357b0022d98573 Mon Sep 17 00:00:00 2001 From: Christoph Hellwig Date: Tue, 18 Jan 2022 07:56:14 +0100 Subject: unicode: clean up the Kconfig symbol confusion Turn the CONFIG_UNICODE symbol into a tristate that generates some always built in code and remove the confusing CONFIG_UNICODE_UTF8_DATA symbol. Note that a lot of the IS_ENABLED() checks could be turned from cpp statements into normal ifs, but this change is intended to be fairly mechanic, so that should be cleaned up later. Fixes: 2b3d04787012 ("unicode: Add utf8-data module") Reported-by: Linus Torvalds Reviewed-by: Eric Biggers Signed-off-by: Christoph Hellwig Signed-off-by: Gabriel Krisman Bertazi --- fs/Makefile | 2 +- fs/ext4/ext4.h | 14 +++++++------- fs/ext4/hash.c | 2 +- fs/ext4/namei.c | 12 ++++++------ fs/ext4/super.c | 10 +++++----- fs/ext4/sysfs.c | 8 ++++---- fs/f2fs/dir.c | 10 +++++----- fs/f2fs/f2fs.h | 2 +- fs/f2fs/hash.c | 2 +- fs/f2fs/namei.c | 4 ++-- fs/f2fs/recovery.c | 4 ++-- fs/f2fs/super.c | 10 +++++----- fs/f2fs/sysfs.c | 10 +++++----- fs/libfs.c | 10 +++++----- fs/unicode/Kconfig | 18 +++++------------- fs/unicode/Makefile | 6 ++++-- include/linux/fs.h | 2 +- 17 files changed, 60 insertions(+), 66 deletions(-) diff --git a/fs/Makefile b/fs/Makefile index 84c5e4cdfee5..c71ee0127866 100644 --- a/fs/Makefile +++ b/fs/Makefile @@ -94,7 +94,7 @@ obj-$(CONFIG_EXPORTFS) += exportfs/ obj-$(CONFIG_NFSD) += nfsd/ obj-$(CONFIG_LOCKD) += lockd/ obj-$(CONFIG_NLS) += nls/ -obj-$(CONFIG_UNICODE) += unicode/ +obj-y += unicode/ obj-$(CONFIG_SYSV_FS) += sysv/ obj-$(CONFIG_SMBFS_COMMON) += smbfs_common/ obj-$(CONFIG_CIFS) += cifs/ diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h index 71a3cdceaa03..242e74cfb060 100644 --- a/fs/ext4/ext4.h +++ b/fs/ext4/ext4.h @@ -2485,7 +2485,7 @@ struct ext4_filename { #ifdef CONFIG_FS_ENCRYPTION struct fscrypt_str crypto_buf; #endif -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) struct fscrypt_str cf_name; #endif }; @@ -2721,7 +2721,7 @@ extern unsigned ext4_free_clusters_after_init(struct super_block *sb, struct ext4_group_desc *gdp); ext4_fsblk_t ext4_inode_to_goal_block(struct inode *); -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) extern int ext4_fname_setup_ci_filename(struct inode *dir, const struct qstr *iname, struct ext4_filename *fname); @@ -2754,7 +2754,7 @@ static inline int ext4_fname_setup_filename(struct inode *dir, ext4_fname_from_fscrypt_name(fname, &name); -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) err = ext4_fname_setup_ci_filename(dir, iname, fname); #endif return err; @@ -2773,7 +2773,7 @@ static inline int ext4_fname_prepare_lookup(struct inode *dir, ext4_fname_from_fscrypt_name(fname, &name); -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) err = ext4_fname_setup_ci_filename(dir, &dentry->d_name, fname); #endif return err; @@ -2790,7 +2790,7 @@ static inline void ext4_fname_free_filename(struct ext4_filename *fname) fname->usr_fname = NULL; fname->disk_name.name = NULL; -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) kfree(fname->cf_name.name); fname->cf_name.name = NULL; #endif @@ -2806,7 +2806,7 @@ static inline int ext4_fname_setup_filename(struct inode *dir, fname->disk_name.name = (unsigned char *) iname->name; fname->disk_name.len = iname->len; -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) err = ext4_fname_setup_ci_filename(dir, iname, fname); #endif @@ -2822,7 +2822,7 @@ static inline int ext4_fname_prepare_lookup(struct inode *dir, static inline void ext4_fname_free_filename(struct ext4_filename *fname) { -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) kfree(fname->cf_name.name); fname->cf_name.name = NULL; #endif diff --git a/fs/ext4/hash.c b/fs/ext4/hash.c index f34f4176c1e7..147b5241dd94 100644 --- a/fs/ext4/hash.c +++ b/fs/ext4/hash.c @@ -290,7 +290,7 @@ static int __ext4fs_dirhash(const struct inode *dir, const char *name, int len, int ext4fs_dirhash(const struct inode *dir, const char *name, int len, struct dx_hash_info *hinfo) { -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) const struct unicode_map *um = dir->i_sb->s_encoding; int r, dlen; unsigned char *buff; diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 52c9bd154122..269d2d051ede 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -1317,7 +1317,7 @@ static void dx_insert_block(struct dx_frame *frame, u32 hash, ext4_lblk_t block) dx_set_count(entries, count + 1); } -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) /* * Test whether a case-insensitive directory entry matches the filename * being searched for. If quick is set, assume the name being looked up @@ -1428,7 +1428,7 @@ static bool ext4_match(struct inode *parent, f.crypto_buf = fname->crypto_buf; #endif -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) if (parent->i_sb->s_encoding && IS_CASEFOLDED(parent) && (!IS_ENCRYPTED(parent) || fscrypt_has_encryption_key(parent))) { if (fname->cf_name.name) { @@ -1800,7 +1800,7 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, unsi } } -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) if (!inode && IS_CASEFOLDED(dir)) { /* Eventually we want to call d_add_ci(dentry, NULL) * for negative dentries in the encoding case as @@ -2308,7 +2308,7 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry, if (fscrypt_is_nokey_name(dentry)) return -ENOKEY; -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) if (sb_has_strict_encoding(sb) && IS_CASEFOLDED(dir) && sb->s_encoding && utf8_validate(sb->s_encoding, &dentry->d_name)) return -EINVAL; @@ -3126,7 +3126,7 @@ static int ext4_rmdir(struct inode *dir, struct dentry *dentry) ext4_fc_track_unlink(handle, dentry); retval = ext4_mark_inode_dirty(handle, dir); -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) /* VFS negative dentries are incompatible with Encoding and * Case-insensitiveness. Eventually we'll want avoid * invalidating the dentries here, alongside with returning the @@ -3231,7 +3231,7 @@ static int ext4_unlink(struct inode *dir, struct dentry *dentry) retval = __ext4_unlink(handle, dir, &dentry->d_name, d_inode(dentry)); if (!retval) ext4_fc_track_unlink(handle, dentry); -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) /* VFS negative dentries are incompatible with Encoding and * Case-insensitiveness. Eventually we'll want avoid * invalidating the dentries here, alongside with returning the diff --git a/fs/ext4/super.c b/fs/ext4/super.c index db9fe4843529..52be1ca38eef 100644 --- a/fs/ext4/super.c +++ b/fs/ext4/super.c @@ -1302,7 +1302,7 @@ static void ext4_put_super(struct super_block *sb) kfree(sbi->s_blockgroup_lock); fs_put_dax(sbi->s_daxdev); fscrypt_free_dummy_policy(&sbi->s_dummy_enc_policy); -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) utf8_unload(sb->s_encoding); #endif kfree(sbi); @@ -1962,7 +1962,7 @@ static const struct mount_opts { {Opt_err, 0, 0} }; -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) static const struct ext4_sb_encodings { __u16 magic; char *name; @@ -3609,7 +3609,7 @@ int ext4_feature_set_ok(struct super_block *sb, int readonly) return 0; } -#ifndef CONFIG_UNICODE +#if !IS_ENABLED(CONFIG_UNICODE) if (ext4_has_feature_casefold(sb)) { ext4_msg(sb, KERN_ERR, "Filesystem with casefold feature cannot be " @@ -4613,7 +4613,7 @@ static int __ext4_fill_super(struct fs_context *fc, struct super_block *sb) if (err < 0) goto failed_mount; -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) if (ext4_has_feature_casefold(sb) && !sb->s_encoding) { const struct ext4_sb_encodings *encoding_info; struct unicode_map *encoding; @@ -5517,7 +5517,7 @@ failed_mount: if (sbi->s_chksum_driver) crypto_free_shash(sbi->s_chksum_driver); -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) utf8_unload(sb->s_encoding); #endif diff --git a/fs/ext4/sysfs.c b/fs/ext4/sysfs.c index f61e65ae27d8..d233c24ea342 100644 --- a/fs/ext4/sysfs.c +++ b/fs/ext4/sysfs.c @@ -309,7 +309,7 @@ EXT4_ATTR_FEATURE(meta_bg_resize); EXT4_ATTR_FEATURE(encryption); EXT4_ATTR_FEATURE(test_dummy_encryption_v2); #endif -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) EXT4_ATTR_FEATURE(casefold); #endif #ifdef CONFIG_FS_VERITY @@ -317,7 +317,7 @@ EXT4_ATTR_FEATURE(verity); #endif EXT4_ATTR_FEATURE(metadata_csum_seed); EXT4_ATTR_FEATURE(fast_commit); -#if defined(CONFIG_UNICODE) && defined(CONFIG_FS_ENCRYPTION) +#if IS_ENABLED(CONFIG_UNICODE) && defined(CONFIG_FS_ENCRYPTION) EXT4_ATTR_FEATURE(encrypted_casefold); #endif @@ -329,7 +329,7 @@ static struct attribute *ext4_feat_attrs[] = { ATTR_LIST(encryption), ATTR_LIST(test_dummy_encryption_v2), #endif -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) ATTR_LIST(casefold), #endif #ifdef CONFIG_FS_VERITY @@ -337,7 +337,7 @@ static struct attribute *ext4_feat_attrs[] = { #endif ATTR_LIST(metadata_csum_seed), ATTR_LIST(fast_commit), -#if defined(CONFIG_UNICODE) && defined(CONFIG_FS_ENCRYPTION) +#if IS_ENABLED(CONFIG_UNICODE) && defined(CONFIG_FS_ENCRYPTION) ATTR_LIST(encrypted_casefold), #endif NULL, diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c index 1820e9c106f7..166f08623362 100644 --- a/fs/f2fs/dir.c +++ b/fs/f2fs/dir.c @@ -16,7 +16,7 @@ #include "xattr.h" #include -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) extern struct kmem_cache *f2fs_cf_name_slab; #endif @@ -79,7 +79,7 @@ unsigned char f2fs_get_de_type(struct f2fs_dir_entry *de) int f2fs_init_casefolded_name(const struct inode *dir, struct f2fs_filename *fname) { -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) struct super_block *sb = dir->i_sb; if (IS_CASEFOLDED(dir)) { @@ -174,7 +174,7 @@ void f2fs_free_filename(struct f2fs_filename *fname) kfree(fname->crypto_buf.name); fname->crypto_buf.name = NULL; #endif -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) if (fname->cf_name.name) { kmem_cache_free(f2fs_cf_name_slab, fname->cf_name.name); fname->cf_name.name = NULL; @@ -208,7 +208,7 @@ static struct f2fs_dir_entry *find_in_block(struct inode *dir, return f2fs_find_target_dentry(&d, fname, max_slots); } -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) /* * Test whether a case-insensitive directory entry matches the filename * being searched for. @@ -266,7 +266,7 @@ static inline int f2fs_match_name(const struct inode *dir, { struct fscrypt_name f; -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) if (fname->cf_name.name) { struct qstr cf = FSTR_TO_QSTR(&fname->cf_name); diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h index d0d603187171..4da88928ffb5 100644 --- a/fs/f2fs/f2fs.h +++ b/fs/f2fs/f2fs.h @@ -487,7 +487,7 @@ struct f2fs_filename { */ struct fscrypt_str crypto_buf; #endif -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) /* * For casefolded directories: the casefolded name, but it's left NULL * if the original name is not valid Unicode, if the directory is both diff --git a/fs/f2fs/hash.c b/fs/f2fs/hash.c index e3beac546c63..3cb1e7a24740 100644 --- a/fs/f2fs/hash.c +++ b/fs/f2fs/hash.c @@ -105,7 +105,7 @@ void f2fs_hash_filename(const struct inode *dir, struct f2fs_filename *fname) return; } -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) if (IS_CASEFOLDED(dir)) { /* * If the casefolded name is provided, hash it instead of the diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c index a728a0af9ce0..5f213f05556d 100644 --- a/fs/f2fs/namei.c +++ b/fs/f2fs/namei.c @@ -561,7 +561,7 @@ static struct dentry *f2fs_lookup(struct inode *dir, struct dentry *dentry, goto out_iput; } out_splice: -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) if (!inode && IS_CASEFOLDED(dir)) { /* Eventually we want to call d_add_ci(dentry, NULL) * for negative dentries in the encoding case as @@ -622,7 +622,7 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry) goto fail; } f2fs_delete_entry(de, page, dir, inode); -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) /* VFS negative dentries are incompatible with Encoding and * Case-insensitiveness. Eventually we'll want avoid * invalidating the dentries here, alongside with returning the diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c index d1664a0567ef..2fbbc820c00a 100644 --- a/fs/f2fs/recovery.c +++ b/fs/f2fs/recovery.c @@ -46,7 +46,7 @@ static struct kmem_cache *fsync_entry_slab; -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) extern struct kmem_cache *f2fs_cf_name_slab; #endif @@ -149,7 +149,7 @@ static int init_recovered_filename(const struct inode *dir, if (err) return err; f2fs_hash_filename(dir, fname); -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) /* Case-sensitive match is fine for recovery */ kmem_cache_free(f2fs_cf_name_slab, fname->cf_name.name); fname->cf_name.name = NULL; diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c index 15f12ece0ac6..b870c6459fa1 100644 --- a/fs/f2fs/super.c +++ b/fs/f2fs/super.c @@ -256,7 +256,7 @@ void f2fs_printk(struct f2fs_sb_info *sbi, const char *fmt, ...) va_end(args); } -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) static const struct f2fs_sb_encodings { __u16 magic; char *name; @@ -1218,7 +1218,7 @@ default_check: return -EINVAL; } #endif -#ifndef CONFIG_UNICODE +#if !IS_ENABLED(CONFIG_UNICODE) if (f2fs_sb_has_casefold(sbi)) { f2fs_err(sbi, "Filesystem with casefold feature cannot be mounted without CONFIG_UNICODE"); @@ -1578,7 +1578,7 @@ static void f2fs_put_super(struct super_block *sb) f2fs_destroy_iostat(sbi); for (i = 0; i < NR_PAGE_TYPE; i++) kvfree(sbi->write_io[i]); -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) utf8_unload(sb->s_encoding); #endif kfree(sbi); @@ -3861,7 +3861,7 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi) static int f2fs_setup_casefold(struct f2fs_sb_info *sbi) { -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) if (f2fs_sb_has_casefold(sbi) && !sbi->sb->s_encoding) { const struct f2fs_sb_encodings *encoding_info; struct unicode_map *encoding; @@ -4412,7 +4412,7 @@ free_bio_info: for (i = 0; i < NR_PAGE_TYPE; i++) kvfree(sbi->write_io[i]); -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) utf8_unload(sb->s_encoding); sb->s_encoding = NULL; #endif diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c index 8408f77764e8..fa3d9cb2d69b 100644 --- a/fs/f2fs/sysfs.c +++ b/fs/f2fs/sysfs.c @@ -192,7 +192,7 @@ static ssize_t unusable_show(struct f2fs_attr *a, static ssize_t encoding_show(struct f2fs_attr *a, struct f2fs_sb_info *sbi, char *buf) { -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) struct super_block *sb = sbi->sb; if (f2fs_sb_has_casefold(sbi)) @@ -756,7 +756,7 @@ F2FS_GENERAL_RO_ATTR(avg_vblocks); #ifdef CONFIG_FS_ENCRYPTION F2FS_FEATURE_RO_ATTR(encryption); F2FS_FEATURE_RO_ATTR(test_dummy_encryption_v2); -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) F2FS_FEATURE_RO_ATTR(encrypted_casefold); #endif #endif /* CONFIG_FS_ENCRYPTION */ @@ -775,7 +775,7 @@ F2FS_FEATURE_RO_ATTR(lost_found); F2FS_FEATURE_RO_ATTR(verity); #endif F2FS_FEATURE_RO_ATTR(sb_checksum); -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) F2FS_FEATURE_RO_ATTR(casefold); #endif F2FS_FEATURE_RO_ATTR(readonly); @@ -886,7 +886,7 @@ static struct attribute *f2fs_feat_attrs[] = { #ifdef CONFIG_FS_ENCRYPTION ATTR_LIST(encryption), ATTR_LIST(test_dummy_encryption_v2), -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) ATTR_LIST(encrypted_casefold), #endif #endif /* CONFIG_FS_ENCRYPTION */ @@ -905,7 +905,7 @@ static struct attribute *f2fs_feat_attrs[] = { ATTR_LIST(verity), #endif ATTR_LIST(sb_checksum), -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) ATTR_LIST(casefold), #endif ATTR_LIST(readonly), diff --git a/fs/libfs.c b/fs/libfs.c index ba7438ab9371..974125270a42 100644 --- a/fs/libfs.c +++ b/fs/libfs.c @@ -1379,7 +1379,7 @@ bool is_empty_dir_inode(struct inode *inode) (inode->i_op == &empty_dir_inode_operations); } -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) /* * Determine if the name of a dentry should be casefolded. * @@ -1473,7 +1473,7 @@ static const struct dentry_operations generic_encrypted_dentry_ops = { }; #endif -#if defined(CONFIG_FS_ENCRYPTION) && defined(CONFIG_UNICODE) +#if defined(CONFIG_FS_ENCRYPTION) && IS_ENABLED(CONFIG_UNICODE) static const struct dentry_operations generic_encrypted_ci_dentry_ops = { .d_hash = generic_ci_d_hash, .d_compare = generic_ci_d_compare, @@ -1508,10 +1508,10 @@ void generic_set_encrypted_ci_d_ops(struct dentry *dentry) #ifdef CONFIG_FS_ENCRYPTION bool needs_encrypt_ops = dentry->d_flags & DCACHE_NOKEY_NAME; #endif -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) bool needs_ci_ops = dentry->d_sb->s_encoding; #endif -#if defined(CONFIG_FS_ENCRYPTION) && defined(CONFIG_UNICODE) +#if defined(CONFIG_FS_ENCRYPTION) && IS_ENABLED(CONFIG_UNICODE) if (needs_encrypt_ops && needs_ci_ops) { d_set_d_op(dentry, &generic_encrypted_ci_dentry_ops); return; @@ -1523,7 +1523,7 @@ void generic_set_encrypted_ci_d_ops(struct dentry *dentry) return; } #endif -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) if (needs_ci_ops) { d_set_d_op(dentry, &generic_ci_dentry_ops); return; diff --git a/fs/unicode/Kconfig b/fs/unicode/Kconfig index 610d7bc05d6e..da786a687fdc 100644 --- a/fs/unicode/Kconfig +++ b/fs/unicode/Kconfig @@ -3,21 +3,13 @@ # UTF-8 normalization # config UNICODE - bool "UTF-8 normalization and casefolding support" + tristate "UTF-8 normalization and casefolding support" help Say Y here to enable UTF-8 NFD normalization and NFD+CF casefolding - support. - -config UNICODE_UTF8_DATA - tristate "UTF-8 normalization and casefolding tables" - depends on UNICODE - default UNICODE - help - This contains a large table of case foldings, which can be loaded as - a separate module if you say M here. To be on the safe side stick - to the default of Y. Saying N here makes no sense, if you do not want - utf8 casefolding support, disable CONFIG_UNICODE instead. + support. If you say M here the large table of case foldings will + be a separate loadable module that gets requested only when a file + system actually use it. config UNICODE_NORMALIZATION_SELFTEST tristate "Test UTF-8 normalization support" - depends on UNICODE_UTF8_DATA + depends on UNICODE diff --git a/fs/unicode/Makefile b/fs/unicode/Makefile index 2f9d9188852b..0cc87423de82 100644 --- a/fs/unicode/Makefile +++ b/fs/unicode/Makefile @@ -1,8 +1,10 @@ # SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_UNICODE) += unicode.o +ifneq ($(CONFIG_UNICODE),) +obj-y += unicode.o +endif +obj-$(CONFIG_UNICODE) += utf8data.o obj-$(CONFIG_UNICODE_NORMALIZATION_SELFTEST) += utf8-selftest.o -obj-$(CONFIG_UNICODE_UTF8_DATA) += utf8data.o unicode-y := utf8-norm.o utf8-core.o diff --git a/include/linux/fs.h b/include/linux/fs.h index c8510da6cc6d..fdac22d16c2b 100644 --- a/include/linux/fs.h +++ b/include/linux/fs.h @@ -1490,7 +1490,7 @@ struct super_block { #ifdef CONFIG_FS_VERITY const struct fsverity_operations *s_vop; #endif -#ifdef CONFIG_UNICODE +#if IS_ENABLED(CONFIG_UNICODE) struct unicode_map *s_encoding; __u16 s_encoding_flags; #endif -- cgit From 198bca93403d04f43c07c5c87c7b75a54f4bcb54 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Thu, 20 Jan 2022 13:33:46 +0200 Subject: drm/i915: split out i915_reg_read_ioctl() to i915_ioctl.[ch] Add new files i915_ioctl.[ch] to hold small ioctls that are out of place everywhere else, and not big enough to warrant a file of their own. For starters, it's just for i915_reg_read_ioctl() that's a bit high level for a low level implementation that intel_uncore.[ch] is. Suggested-by: Tvrtko Ursulin Signed-off-by: Jani Nikula Reviewed-by: Tvrtko Ursulin Link: https://patchwork.freedesktop.org/patch/msgid/20220120113346.3214745-1-jani.nikula@intel.com --- drivers/gpu/drm/i915/Makefile | 3 +- drivers/gpu/drm/i915/i915_driver.c | 1 + drivers/gpu/drm/i915/i915_drv.h | 3 -- drivers/gpu/drm/i915/i915_ioctl.c | 94 +++++++++++++++++++++++++++++++++++++ drivers/gpu/drm/i915/i915_ioctl.h | 14 ++++++ drivers/gpu/drm/i915/intel_uncore.c | 70 --------------------------- 6 files changed, 111 insertions(+), 74 deletions(-) create mode 100644 drivers/gpu/drm/i915/i915_ioctl.c create mode 100644 drivers/gpu/drm/i915/i915_ioctl.h diff --git a/drivers/gpu/drm/i915/Makefile b/drivers/gpu/drm/i915/Makefile index 213c5f9fae32..0db42a60c89f 100644 --- a/drivers/gpu/drm/i915/Makefile +++ b/drivers/gpu/drm/i915/Makefile @@ -32,8 +32,9 @@ subdir-ccflags-y += -I$(srctree)/$(src) # core driver code i915-y += i915_driver.o \ i915_config.o \ - i915_irq.o \ i915_getparam.o \ + i915_ioctl.o \ + i915_irq.o \ i915_mitigations.o \ i915_module.o \ i915_params.o \ diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index 6a7aac069b18..9898002d8260 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -76,6 +76,7 @@ #include "i915_drv.h" #include "i915_getparam.h" #include "i915_ioc32.h" +#include "i915_ioctl.h" #include "i915_irq.h" #include "i915_memcpy.h" #include "i915_perf.h" diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 290dfd40c7b3..cffba01eed20 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1716,9 +1716,6 @@ mkwrite_device_info(struct drm_i915_private *dev_priv) return (struct intel_device_info *)INTEL_INFO(dev_priv); } -int i915_reg_read_ioctl(struct drm_device *dev, void *data, - struct drm_file *file); - static inline int intel_hws_csb_write_index(struct drm_i915_private *i915) { if (GRAPHICS_VER(i915) >= 11) diff --git a/drivers/gpu/drm/i915/i915_ioctl.c b/drivers/gpu/drm/i915/i915_ioctl.c new file mode 100644 index 000000000000..06a10ccea80b --- /dev/null +++ b/drivers/gpu/drm/i915/i915_ioctl.c @@ -0,0 +1,94 @@ +// SPDX-License-Identifier: MIT +/* + * Copyright © 2022 Intel Corporation + */ + +#include "gt/intel_engine_regs.h" + +#include "i915_drv.h" +#include "i915_gem.h" +#include "i915_ioctl.h" +#include "i915_reg.h" +#include "intel_runtime_pm.h" +#include "intel_uncore.h" + +/* + * This file is for small ioctl functions that are out of place everywhere else, + * and not big enough to warrant a file of their own. + * + * This is not the dumping ground for random ioctls. + */ + +struct reg_whitelist { + i915_reg_t offset_ldw; + i915_reg_t offset_udw; + u8 min_graphics_ver; + u8 max_graphics_ver; + u8 size; +}; + +static const struct reg_whitelist reg_read_whitelist[] = { + { + .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE), + .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE), + .min_graphics_ver = 4, + .max_graphics_ver = 12, + .size = 8 + } +}; + +int i915_reg_read_ioctl(struct drm_device *dev, + void *data, struct drm_file *unused) +{ + struct drm_i915_private *i915 = to_i915(dev); + struct intel_uncore *uncore = &i915->uncore; + struct drm_i915_reg_read *reg = data; + struct reg_whitelist const *entry; + intel_wakeref_t wakeref; + unsigned int flags; + int remain; + int ret = 0; + + entry = reg_read_whitelist; + remain = ARRAY_SIZE(reg_read_whitelist); + while (remain) { + u32 entry_offset = i915_mmio_reg_offset(entry->offset_ldw); + + GEM_BUG_ON(!is_power_of_2(entry->size)); + GEM_BUG_ON(entry->size > 8); + GEM_BUG_ON(entry_offset & (entry->size - 1)); + + if (IS_GRAPHICS_VER(i915, entry->min_graphics_ver, entry->max_graphics_ver) && + entry_offset == (reg->offset & -entry->size)) + break; + entry++; + remain--; + } + + if (!remain) + return -EINVAL; + + flags = reg->offset & (entry->size - 1); + + with_intel_runtime_pm(&i915->runtime_pm, wakeref) { + if (entry->size == 8 && flags == I915_REG_READ_8B_WA) + reg->val = intel_uncore_read64_2x32(uncore, + entry->offset_ldw, + entry->offset_udw); + else if (entry->size == 8 && flags == 0) + reg->val = intel_uncore_read64(uncore, + entry->offset_ldw); + else if (entry->size == 4 && flags == 0) + reg->val = intel_uncore_read(uncore, entry->offset_ldw); + else if (entry->size == 2 && flags == 0) + reg->val = intel_uncore_read16(uncore, + entry->offset_ldw); + else if (entry->size == 1 && flags == 0) + reg->val = intel_uncore_read8(uncore, + entry->offset_ldw); + else + ret = -EINVAL; + } + + return ret; +} diff --git a/drivers/gpu/drm/i915/i915_ioctl.h b/drivers/gpu/drm/i915/i915_ioctl.h new file mode 100644 index 000000000000..f16ae87b8b8a --- /dev/null +++ b/drivers/gpu/drm/i915/i915_ioctl.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: MIT */ +/* + * Copyright © 2022 Intel Corporation + */ + +#ifndef __I915_IOCTL_H__ +#define __I915_IOCTL_H__ + +struct drm_device; +struct drm_file; + +int i915_reg_read_ioctl(struct drm_device *dev, void *data, struct drm_file *file); + +#endif /* __I915_IOCTL_H__ */ diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index fefaf63dfb88..703061e8be51 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -2265,76 +2265,6 @@ void intel_uncore_fini_mmio(struct intel_uncore *uncore) uncore_mmio_cleanup(uncore); } -static const struct reg_whitelist { - i915_reg_t offset_ldw; - i915_reg_t offset_udw; - u8 min_graphics_ver; - u8 max_graphics_ver; - u8 size; -} reg_read_whitelist[] = { { - .offset_ldw = RING_TIMESTAMP(RENDER_RING_BASE), - .offset_udw = RING_TIMESTAMP_UDW(RENDER_RING_BASE), - .min_graphics_ver = 4, - .max_graphics_ver = 12, - .size = 8 -} }; - -int i915_reg_read_ioctl(struct drm_device *dev, - void *data, struct drm_file *file) -{ - struct drm_i915_private *i915 = to_i915(dev); - struct intel_uncore *uncore = &i915->uncore; - struct drm_i915_reg_read *reg = data; - struct reg_whitelist const *entry; - intel_wakeref_t wakeref; - unsigned int flags; - int remain; - int ret = 0; - - entry = reg_read_whitelist; - remain = ARRAY_SIZE(reg_read_whitelist); - while (remain) { - u32 entry_offset = i915_mmio_reg_offset(entry->offset_ldw); - - GEM_BUG_ON(!is_power_of_2(entry->size)); - GEM_BUG_ON(entry->size > 8); - GEM_BUG_ON(entry_offset & (entry->size - 1)); - - if (IS_GRAPHICS_VER(i915, entry->min_graphics_ver, entry->max_graphics_ver) && - entry_offset == (reg->offset & -entry->size)) - break; - entry++; - remain--; - } - - if (!remain) - return -EINVAL; - - flags = reg->offset & (entry->size - 1); - - with_intel_runtime_pm(&i915->runtime_pm, wakeref) { - if (entry->size == 8 && flags == I915_REG_READ_8B_WA) - reg->val = intel_uncore_read64_2x32(uncore, - entry->offset_ldw, - entry->offset_udw); - else if (entry->size == 8 && flags == 0) - reg->val = intel_uncore_read64(uncore, - entry->offset_ldw); - else if (entry->size == 4 && flags == 0) - reg->val = intel_uncore_read(uncore, entry->offset_ldw); - else if (entry->size == 2 && flags == 0) - reg->val = intel_uncore_read16(uncore, - entry->offset_ldw); - else if (entry->size == 1 && flags == 0) - reg->val = intel_uncore_read8(uncore, - entry->offset_ldw); - else - ret = -EINVAL; - } - - return ret; -} - /** * __intel_wait_for_register_fw - wait until register matches expected state * @uncore: the struct intel_uncore -- cgit From 26950f2968e873301c8c536ba0615ba04c17a0de Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 12 Jan 2022 13:03:13 +0200 Subject: drm/i915/mst: fix intel_dp_mst_hpd_irq() indentation Remove extra indentation. Signed-off-by: Jani Nikula Reviewed-by: Uma Shankar Link: https://patchwork.freedesktop.org/patch/msgid/20220112110319.1172110-1-jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_dp.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 942a755a0c48..e789ecbc69f3 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -3618,12 +3618,12 @@ update_status: static void intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, bool *handled) { - drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, handled); + drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, handled); - if (esi[1] & DP_CP_IRQ) { - intel_hdcp_handle_cp_irq(intel_dp->attached_connector); - *handled = true; - } + if (esi[1] & DP_CP_IRQ) { + intel_hdcp_handle_cp_irq(intel_dp->attached_connector); + *handled = true; + } } /** -- cgit From 603801d0f2f418941d2524ffc43fa6d8c95873b3 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 12 Jan 2022 13:03:14 +0200 Subject: drm/i915/mst: abstract intel_dp_ack_sink_irq_esi() Smaller functions make the thing easier to read. Debug log failures to ack. Note: Looks like we have the retry loop simply because of hysterical raisins, dating back to the original DP MST enabling. Keep it, though I have no idea why we have it. Signed-off-by: Jani Nikula Reviewed-by: Uma Shankar Link: https://patchwork.freedesktop.org/patch/msgid/20220112110319.1172110-2-jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_dp.c | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index e789ecbc69f3..a301220ce2ad 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -2821,6 +2821,19 @@ intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector) DP_DPRX_ESI_LEN; } +static bool intel_dp_ack_sink_irq_esi(struct intel_dp *intel_dp, u8 esi[4]) +{ + int retry; + + for (retry = 0; retry < 3; retry++) { + if (drm_dp_dpcd_write(&intel_dp->aux, DP_SINK_COUNT_ESI + 1, + &esi[1], 3) == 3) + return true; + } + + return false; +} + bool intel_dp_needs_vsc_sdp(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) @@ -3661,7 +3674,6 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp) */ u8 esi[DP_DPRX_ESI_LEN+2] = {}; bool handled; - int retry; if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) { drm_dbg_kms(&i915->drm, @@ -3686,15 +3698,8 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp) if (!handled) break; - for (retry = 0; retry < 3; retry++) { - int wret; - - wret = drm_dp_dpcd_write(&intel_dp->aux, - DP_SINK_COUNT_ESI+1, - &esi[1], 3); - if (wret == 3) - break; - } + if (!intel_dp_ack_sink_irq_esi(intel_dp, esi)) + drm_dbg_kms(&i915->drm, "Failed to ack ESI\n"); } return link_ok; -- cgit From 34ed3e83475eab0c8fe6bbb126165a3ff2f2ff90 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 12 Jan 2022 13:03:15 +0200 Subject: drm/i915/mst: debug log 4 bytes of ESI right after reading For whatever reason, the ESI link service irq vector was missing from the debug output. Add the missing byte, clean up the debug message, and do the logging right after reading the data. Signed-off-by: Jani Nikula Reviewed-by: Uma Shankar Link: https://patchwork.freedesktop.org/patch/msgid/20220112110319.1172110-3-jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_dp.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index a301220ce2ad..6de39056e2f8 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -3683,6 +3683,8 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp) break; } + drm_dbg_kms(&i915->drm, "DPRX ESI: %4ph\n", esi); + /* check link status - esi[10] = 0x200c */ if (intel_dp->active_mst_links > 0 && link_ok && !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) { @@ -3691,8 +3693,6 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp) link_ok = false; } - drm_dbg_kms(&i915->drm, "got esi %3ph\n", esi); - intel_dp_mst_hpd_irq(intel_dp, esi, &handled); if (!handled) -- cgit From 1358139bdefdb07bb402efb3164c1c51db99e8a5 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 12 Jan 2022 13:03:16 +0200 Subject: drm/i915/mst: abstract handling of link status in DP MST We'll want to expand on this, so abstract it to a separate function first. Improve debug logging while at it. Signed-off-by: Jani Nikula Reviewed-by: Uma Shankar Link: https://patchwork.freedesktop.org/patch/msgid/20220112110319.1172110-4-jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_dp.c | 23 ++++++++++++++++++----- 1 file changed, 18 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 6de39056e2f8..5a7976768b06 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -3639,6 +3639,21 @@ intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, bool *handled) } } +static bool intel_dp_mst_link_status(struct intel_dp *intel_dp, u8 *esi) +{ + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + + if (!drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) { + drm_dbg_kms(&i915->drm, + "[ENCODER:%d:%s] channel EQ not ok, retraining\n", + encoder->base.base.id, encoder->base.name); + return false; + } + + return true; +} + /** * intel_dp_check_mst_status - service any pending MST interrupts, check link status * @intel_dp: Intel DP struct @@ -3686,11 +3701,9 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp) drm_dbg_kms(&i915->drm, "DPRX ESI: %4ph\n", esi); /* check link status - esi[10] = 0x200c */ - if (intel_dp->active_mst_links > 0 && link_ok && - !drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) { - drm_dbg_kms(&i915->drm, - "channel EQ not ok, retraining\n"); - link_ok = false; + if (intel_dp->active_mst_links > 0 && link_ok) { + if (!intel_dp_mst_link_status(intel_dp, esi)) + link_ok = false; } intel_dp_mst_hpd_irq(intel_dp, esi, &handled); -- cgit From 1d50942dc9304db488d1b3978274b851e890a33b Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 12 Jan 2022 13:03:17 +0200 Subject: drm/i915/mst: read link status only when requested by sink in ESI The link service irq vector in DPCD 0x2005 contains the link status changed bit to indicate the status should be checked. Only read and check the link status when requested by the sink. This also reduces the confusion around the buffer size for the combined ESI and link status. Alas, we still need to take into account that all link status helpers expect a buffer of DP_LINK_STATUS_SIZE (6) while the link status in ESI only has 4 bytes. Signed-off-by: Jani Nikula Reviewed-by: Uma Shankar Link: https://patchwork.freedesktop.org/patch/msgid/20220112110319.1172110-5-jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_dp.c | 41 +++++++++++++++------------------ 1 file changed, 18 insertions(+), 23 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 5a7976768b06..d8a0ba3a14b7 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -73,8 +73,6 @@ #include "intel_vdsc.h" #include "intel_vrr.h" -#define DP_DPRX_ESI_LEN 14 - /* DP DSC throughput values used for slice count calculations KPixels/s */ #define DP_DSC_PEAK_PIXEL_RATE 2720000 #define DP_DSC_MAX_ENC_THROUGHPUT_0 340000 @@ -2814,11 +2812,9 @@ intel_dp_configure_mst(struct intel_dp *intel_dp) } static bool -intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *sink_irq_vector) +intel_dp_get_sink_irq_esi(struct intel_dp *intel_dp, u8 *esi) { - return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, - sink_irq_vector, DP_DPRX_ESI_LEN) == - DP_DPRX_ESI_LEN; + return drm_dp_dpcd_read(&intel_dp->aux, DP_SINK_COUNT_ESI, esi, 4) == 4; } static bool intel_dp_ack_sink_irq_esi(struct intel_dp *intel_dp, u8 esi[4]) @@ -3639,12 +3635,22 @@ intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, bool *handled) } } -static bool intel_dp_mst_link_status(struct intel_dp *intel_dp, u8 *esi) +static bool intel_dp_mst_link_status(struct intel_dp *intel_dp) { struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; struct drm_i915_private *i915 = to_i915(encoder->base.dev); + u8 link_status[DP_LINK_STATUS_SIZE] = {}; + const size_t esi_link_status_size = DP_LINK_STATUS_SIZE - 2; + + if (drm_dp_dpcd_read(&intel_dp->aux, DP_LANE0_1_STATUS_ESI, link_status, + esi_link_status_size) != esi_link_status_size) { + drm_err(&i915->drm, + "[ENCODER:%d:%s] Failed to read link status\n", + encoder->base.base.id, encoder->base.name); + return false; + } - if (!drm_dp_channel_eq_ok(&esi[10], intel_dp->lane_count)) { + if (!drm_dp_channel_eq_ok(link_status, intel_dp->lane_count)) { drm_dbg_kms(&i915->drm, "[ENCODER:%d:%s] channel EQ not ok, retraining\n", encoder->base.base.id, encoder->base.name); @@ -3676,18 +3682,7 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp) drm_WARN_ON_ONCE(&i915->drm, intel_dp->active_mst_links < 0); for (;;) { - /* - * The +2 is because DP_DPRX_ESI_LEN is 14, but we then - * pass in "esi+10" to drm_dp_channel_eq_ok(), which - * takes a 6-byte array. So we actually need 16 bytes - * here. - * - * Somebody who knows what the limits actually are - * should check this, but for now this is at least - * harmless and avoids a valid compiler warning about - * using more of the array than we have allocated. - */ - u8 esi[DP_DPRX_ESI_LEN+2] = {}; + u8 esi[4] = {}; bool handled; if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) { @@ -3700,9 +3695,9 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp) drm_dbg_kms(&i915->drm, "DPRX ESI: %4ph\n", esi); - /* check link status - esi[10] = 0x200c */ - if (intel_dp->active_mst_links > 0 && link_ok) { - if (!intel_dp_mst_link_status(intel_dp, esi)) + if (intel_dp->active_mst_links > 0 && link_ok && + esi[3] & LINK_STATUS_CHANGED) { + if (!intel_dp_mst_link_status(intel_dp)) link_ok = false; } -- cgit From b4a1c675d256bfa1d399490847d086b8b463b5d4 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 12 Jan 2022 13:03:18 +0200 Subject: drm/i915/mst: ack sink irq ESI for link status changes Only specific event status indicators caused the link status to be acked. Be sure to ack the link status change event. Arguably we should track which bits to actually clear in ESI instead of the wholesale approach. Signed-off-by: Jani Nikula Reviewed-by: Uma Shankar Link: https://patchwork.freedesktop.org/patch/msgid/20220112110319.1172110-6-jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_dp.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index d8a0ba3a14b7..95e9f7220ab8 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -3699,6 +3699,7 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp) esi[3] & LINK_STATUS_CHANGED) { if (!intel_dp_mst_link_status(intel_dp)) link_ok = false; + handled = true; } intel_dp_mst_hpd_irq(intel_dp, esi, &handled); -- cgit From 784a2ec00904999fccfca12eaf7c63ac3fde5f48 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Thu, 20 Jan 2022 13:01:02 +0200 Subject: drm/i915/mst: only ack the ESI we actually handled Seems odd that we clear all event status indicators if we've only handled some. Only clear the ones we've handled. v2: ack DOWN_REP and UP_REQ only if they were set in esi (Ville) Signed-off-by: Jani Nikula Reviewed-by: Uma Shankar Link: https://patchwork.freedesktop.org/patch/msgid/20220120110102.3116218-1-jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_dp.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 95e9f7220ab8..f4feeaf5ce4a 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -3625,13 +3625,17 @@ update_status: } static void -intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, bool *handled) +intel_dp_mst_hpd_irq(struct intel_dp *intel_dp, u8 *esi, u8 *ack) { - drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, handled); + bool handled = false; + + drm_dp_mst_hpd_irq(&intel_dp->mst_mgr, esi, &handled); + if (handled) + ack[1] |= esi[1] & (DP_DOWN_REP_MSG_RDY | DP_UP_REQ_MSG_RDY); if (esi[1] & DP_CP_IRQ) { intel_hdcp_handle_cp_irq(intel_dp->attached_connector); - *handled = true; + ack[1] |= DP_CP_IRQ; } } @@ -3683,7 +3687,7 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp) for (;;) { u8 esi[4] = {}; - bool handled; + u8 ack[4] = {}; if (!intel_dp_get_sink_irq_esi(intel_dp, esi)) { drm_dbg_kms(&i915->drm, @@ -3699,15 +3703,15 @@ intel_dp_check_mst_status(struct intel_dp *intel_dp) esi[3] & LINK_STATUS_CHANGED) { if (!intel_dp_mst_link_status(intel_dp)) link_ok = false; - handled = true; + ack[3] |= LINK_STATUS_CHANGED; } - intel_dp_mst_hpd_irq(intel_dp, esi, &handled); + intel_dp_mst_hpd_irq(intel_dp, esi, ack); - if (!handled) + if (!memchr_inv(ack, 0, sizeof(ack))) break; - if (!intel_dp_ack_sink_irq_esi(intel_dp, esi)) + if (!intel_dp_ack_sink_irq_esi(intel_dp, ack)) drm_dbg_kms(&i915->drm, "Failed to ack ESI\n"); } -- cgit From 248be352bbae1a0f14d0d3511a5b0bb9665097f5 Mon Sep 17 00:00:00 2001 From: Ajit Kumar Pandey Date: Thu, 20 Jan 2022 19:06:01 +0530 Subject: ASoC: amd: acp-mach: Fix Left and Right rt1019 amp devices We're setting wrong card codec conf for rt1019 amp devices in our machine driver. Due to this left and right amp channels data are reversed in our machines as wrong device prefix results in wrong value for "Mono LR Select" rt1019 mixer control. Reverse dev ids in codec conf with Left and Right name_prefix to fix such issue. Signed-off-by: Ajit Kumar Pandey Link: https://lore.kernel.org/r/20220120133605.476138-1-AjitKumar.Pandey@amd.com Signed-off-by: Mark Brown --- sound/soc/amd/acp/acp-mach-common.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/sound/soc/amd/acp/acp-mach-common.c b/sound/soc/amd/acp/acp-mach-common.c index c9caade5cb74..cd05ee2802c9 100644 --- a/sound/soc/amd/acp/acp-mach-common.c +++ b/sound/soc/amd/acp/acp-mach-common.c @@ -303,11 +303,11 @@ static const struct snd_soc_dapm_route rt1019_map_lr[] = { static struct snd_soc_codec_conf rt1019_conf[] = { { - .dlc = COMP_CODEC_CONF("i2c-10EC1019:00"), + .dlc = COMP_CODEC_CONF("i2c-10EC1019:01"), .name_prefix = "Left", }, { - .dlc = COMP_CODEC_CONF("i2c-10EC1019:01"), + .dlc = COMP_CODEC_CONF("i2c-10EC1019:00"), .name_prefix = "Right", }, }; -- cgit From 37ff945f804c2d40d030713fc8692e793a81eff8 Mon Sep 17 00:00:00 2001 From: "Stanley.Yang" Date: Thu, 20 Jan 2022 00:17:44 +0800 Subject: drm/amdgpu: fix convert bad page retiremt Pmfw read ecc info registers and store values in eccinfo_table in the following order umc0 ch_inst 0, 1, 2 ... 7 umc1 ch_inst 0, 1, 2 ... 7 ... umc3 ch_inst 0, 1, 2 ... 7 Driver should convert eccinfo_table_idx to channel_index according to channel_idx_tbl. Signed-off-by: Stanley.Yang Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/umc_v6_7.c | 26 ++++++++++++++------------ 1 file changed, 14 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c index 526de1ca9b8d..f5a1ba7db75a 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c @@ -58,29 +58,33 @@ static inline uint32_t get_umc_v6_7_channel_index(struct amdgpu_device *adev, } static void umc_v6_7_ecc_info_query_correctable_error_count(struct amdgpu_device *adev, - uint32_t channel_index, + uint32_t umc_inst, uint32_t ch_inst, unsigned long *error_count) { uint64_t mc_umc_status; + uint32_t eccinfo_table_idx; struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + eccinfo_table_idx = umc_inst * adev->umc.channel_inst_num + ch_inst; /* check for SRAM correctable error MCUMC_STATUS is a 64 bit register */ - mc_umc_status = ras->umc_ecc.ecc[channel_index].mca_umc_status; + mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status; if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1) *error_count += 1; } static void umc_v6_7_ecc_info_querry_uncorrectable_error_count(struct amdgpu_device *adev, - uint32_t channel_index, + uint32_t umc_inst, uint32_t ch_inst, unsigned long *error_count) { uint64_t mc_umc_status; + uint32_t eccinfo_table_idx; struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + eccinfo_table_idx = umc_inst * adev->umc.channel_inst_num + ch_inst; /* check the MCUMC_STATUS */ - mc_umc_status = ras->umc_ecc.ecc[channel_index].mca_umc_status; + mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status; if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 || REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || @@ -97,19 +101,15 @@ static void umc_v6_7_ecc_info_query_ras_error_count(struct amdgpu_device *adev, uint32_t umc_inst = 0; uint32_t ch_inst = 0; - uint32_t channel_index = 0; /*TODO: driver needs to toggle DF Cstate to ensure * safe access of UMC registers. Will add the protection */ LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { - channel_index = get_umc_v6_7_channel_index(adev, - umc_inst, - ch_inst); umc_v6_7_ecc_info_query_correctable_error_count(adev, - channel_index, + umc_inst, ch_inst, &(err_data->ce_count)); umc_v6_7_ecc_info_querry_uncorrectable_error_count(adev, - channel_index, + umc_inst, ch_inst, &(err_data->ue_count)); } } @@ -122,12 +122,14 @@ static void umc_v6_7_ecc_info_query_error_address(struct amdgpu_device *adev, uint64_t mc_umc_status, err_addr, retired_page; struct eeprom_table_record *err_rec; uint32_t channel_index; + uint32_t eccinfo_table_idx; struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + eccinfo_table_idx = umc_inst * adev->umc.channel_inst_num + ch_inst; channel_index = adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst]; - mc_umc_status = ras->umc_ecc.ecc[channel_index].mca_umc_status; + mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status; if (mc_umc_status == 0) return; @@ -142,7 +144,7 @@ static void umc_v6_7_ecc_info_query_error_address(struct amdgpu_device *adev, (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) { - err_addr = ras->umc_ecc.ecc[channel_index].mca_umc_addr; + err_addr = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_addr; err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr); /* translate umc channel address to soc pa, 3 parts are included */ -- cgit From a357dca964e0c77c479075dd65ef86199078d82f Mon Sep 17 00:00:00 2001 From: Xiaojian Du Date: Thu, 20 Jan 2022 15:48:55 +0800 Subject: drm/amdgpu: fix the page fault caused by uninitialized variables This patch will fix the page fault caused by uninitialized variables. Error Log: ...... [ 130.246323] [drm] GART: num cpu pages 131072, num gpu pages 131072 [ 131.963112] [drm] PCIE GART of 512M enabled (table at 0x0000008000000000). [ 131.963130] BUG: unable to handle page fault for address: 000000000002db80 [ 131.963181] #PF: supervisor write access in kernel mode [ 131.963210] #PF: error_code(0x0002) - not-present page [ 131.963233] PGD 0 P4D 0 [ 131.963253] Oops: 0002 [#1] SMP NOPTI [ 131.963273] CPU: 3 PID: 1411 Comm: modprobe Not tainted 5.13.0+ #1 [ 131.963338] RIP: 0010:osq_lock+0x4d/0x120 [ 131.963381] Code: 10 00 00 00 00 48 c7 02 00 00 00 00 89 42 14 87 07 85 c0 0f 84 d0 00 00 00 83 e8 01 48 98 48 03 0c c5 00 d9 ea 9c 48 89 4a 08 <48> 89 11 44 8b 42 10 45 85 c0 0f 85 af 00 00 00 55 48 89 fe 65 4c [ 131.963460] RSP: 0018:ffffa40481717768 EFLAGS: 00010202 [ 131.963483] RAX: fffffffffffffffe RBX: ffffa40481717920 RCX: 000000000002db80 [ 131.963520] RDX: ffff9256fecedb80 RSI: ffff9256cbed2e80 RDI: ffffa40481717ac4 [ 131.963547] RBP: ffffa40481717808 R08: ffffa40481717920 R09: 00000000ffffffff [ 131.963582] R10: 0000000000000001 R11: 0000000000000001 R12: 0000000000000000 [ 131.963609] R13: ffffa40481717ac4 R14: ffffa40481717ab8 R15: ffff9256c9480000 [ 131.963646] FS: 00007f23d9b9c540(0000) GS:ffff9256fecc0000(0000) knlGS:0000000000000000 [ 131.963687] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [ 131.963721] CR2: 000000000002db80 CR3: 0000000008444000 CR4: 00000000000506e0 [ 131.963758] Call Trace: [ 131.963772] ? __ww_mutex_lock.isra.0+0x3a2/0x760 [ 131.963810] ? prb_read_valid+0x1c/0x20 [ 131.963830] ? console_unlock+0x2fe/0x4f0 [ 131.963849] __ww_mutex_lock_interruptible_slowpath+0x16/0x20 [ 131.963882] ww_mutex_lock_interruptible+0x83/0x90 [ 131.963908] amdgpu_bo_create_reserved+0xf0/0x1e0 [amdgpu] [ 131.964237] amdgpu_bo_create_kernel+0x17/0x80 [amdgpu] [ 131.964509] amdgpu_gmc_vram_checking+0x41/0xf0 [amdgpu] [ 131.964807] gmc_v10_0_hw_init+0x105/0x120 [amdgpu] [ 131.965108] amdgpu_device_init.cold+0x1aa4/0x1e3e [amdgpu] ...... Signed-off-by: Xiaojian Du Reviewed-by: Yang Wang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index cbe30ccf6162..e15f6b8a62ea 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -825,9 +825,9 @@ void amdgpu_gmc_get_reserved_allocation(struct amdgpu_device *adev) int amdgpu_gmc_vram_checking(struct amdgpu_device *adev) { - struct amdgpu_bo *vram_bo; - uint64_t vram_gpu; - void *vram_ptr; + struct amdgpu_bo *vram_bo = NULL; + uint64_t vram_gpu = 0; + void *vram_ptr = NULL; int ret, size = 0x100000; uint8_t cptr[10]; -- cgit From 42fed57046fc74586d7058bd51a1c10ac9c690cb Mon Sep 17 00:00:00 2001 From: Al Cooper Date: Wed, 1 Dec 2021 13:06:51 -0500 Subject: phy: usb: Leave some clocks running during suspend The PHY client driver does a phy_exit() call on suspend or rmmod and the PHY driver needs to know the difference because some clocks need to be kept running for suspend but can be shutdown on unbind/rmmod (or if there are no PHY clients at all). The fix is to use a PM notifier so the driver can tell if a PHY client is calling exit() because of a system suspend or a driver unbind/rmmod. Signed-off-by: Al Cooper Acked-by: Florian Fainelli Link: https://lore.kernel.org/r/20211201180653.35097-2-alcooperx@gmail.com Signed-off-by: Vinod Koul --- drivers/phy/broadcom/phy-brcm-usb.c | 38 +++++++++++++++++++++++++++++++++++++ 1 file changed, 38 insertions(+) diff --git a/drivers/phy/broadcom/phy-brcm-usb.c b/drivers/phy/broadcom/phy-brcm-usb.c index 116fb23aebd9..0f1deb6e0eab 100644 --- a/drivers/phy/broadcom/phy-brcm-usb.c +++ b/drivers/phy/broadcom/phy-brcm-usb.c @@ -18,6 +18,7 @@ #include #include #include +#include #include "phy-brcm-usb-init.h" @@ -70,12 +71,35 @@ struct brcm_usb_phy_data { int init_count; int wake_irq; struct brcm_usb_phy phys[BRCM_USB_PHY_ID_MAX]; + struct notifier_block pm_notifier; + bool pm_active; }; static s8 *node_reg_names[BRCM_REGS_MAX] = { "crtl", "xhci_ec", "xhci_gbl", "usb_phy", "usb_mdio", "bdc_ec" }; +static int brcm_pm_notifier(struct notifier_block *notifier, + unsigned long pm_event, + void *unused) +{ + struct brcm_usb_phy_data *priv = + container_of(notifier, struct brcm_usb_phy_data, pm_notifier); + + switch (pm_event) { + case PM_HIBERNATION_PREPARE: + case PM_SUSPEND_PREPARE: + priv->pm_active = true; + break; + case PM_POST_RESTORE: + case PM_POST_HIBERNATION: + case PM_POST_SUSPEND: + priv->pm_active = false; + break; + } + return NOTIFY_DONE; +} + static irqreturn_t brcm_usb_phy_wake_isr(int irq, void *dev_id) { struct phy *gphy = dev_id; @@ -91,6 +115,9 @@ static int brcm_usb_phy_init(struct phy *gphy) struct brcm_usb_phy_data *priv = container_of(phy, struct brcm_usb_phy_data, phys[phy->id]); + if (priv->pm_active) + return 0; + /* * Use a lock to make sure a second caller waits until * the base phy is inited before using it. @@ -120,6 +147,9 @@ static int brcm_usb_phy_exit(struct phy *gphy) struct brcm_usb_phy_data *priv = container_of(phy, struct brcm_usb_phy_data, phys[phy->id]); + if (priv->pm_active) + return 0; + dev_dbg(&gphy->dev, "EXIT\n"); if (phy->id == BRCM_USB_PHY_2_0) brcm_usb_uninit_eohci(&priv->ini); @@ -488,6 +518,9 @@ static int brcm_usb_phy_probe(struct platform_device *pdev) if (err) return err; + priv->pm_notifier.notifier_call = brcm_pm_notifier; + register_pm_notifier(&priv->pm_notifier); + mutex_init(&priv->mutex); /* make sure invert settings are correct */ @@ -528,7 +561,10 @@ static int brcm_usb_phy_probe(struct platform_device *pdev) static int brcm_usb_phy_remove(struct platform_device *pdev) { + struct brcm_usb_phy_data *priv = dev_get_drvdata(&pdev->dev); + sysfs_remove_group(&pdev->dev.kobj, &brcm_usb_phy_group); + unregister_pm_notifier(&priv->pm_notifier); return 0; } @@ -539,6 +575,7 @@ static int brcm_usb_phy_suspend(struct device *dev) struct brcm_usb_phy_data *priv = dev_get_drvdata(dev); if (priv->init_count) { + dev_dbg(dev, "SUSPEND\n"); priv->ini.wake_enabled = device_may_wakeup(dev); if (priv->phys[BRCM_USB_PHY_3_0].inited) brcm_usb_uninit_xhci(&priv->ini); @@ -578,6 +615,7 @@ static int brcm_usb_phy_resume(struct device *dev) * Uninitialize anything that wasn't previously initialized. */ if (priv->init_count) { + dev_dbg(dev, "RESUME\n"); if (priv->wake_irq >= 0) disable_irq_wake(priv->wake_irq); brcm_usb_init_common(&priv->ini); -- cgit From 5070ce86246a8a4ebacd0c15b121e6b6325bc167 Mon Sep 17 00:00:00 2001 From: Al Cooper Date: Wed, 1 Dec 2021 13:06:53 -0500 Subject: phy: broadcom: Kconfig: Fix PHY_BRCM_USB config option MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The previous commit 4b402fa8e0b7 ("phy: phy-brcm-usb: support PHY on the BCM4908") added a second "default" line for ARCH_BCM_4908 above the original "default" line for ARCH_BRCMSTB. When two "default" lines are used, only the first is used and this change stopped the PHY_BRCM_USB option for being enabled for ARCH_BRCMSTB. The fix is to use one "default line with "||". Fixes: 4b402fa8e0b7 ("phy: phy-brcm-usb: support PHY on the BCM4908") Signed-off-by: Al Cooper Acked-by: Rafał Miłecki Acked-by: Florian Fainelli Link: https://lore.kernel.org/r/20211201180653.35097-4-alcooperx@gmail.com Signed-off-by: Vinod Koul --- drivers/phy/broadcom/Kconfig | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/phy/broadcom/Kconfig b/drivers/phy/broadcom/Kconfig index f81e23742079..849c4204f550 100644 --- a/drivers/phy/broadcom/Kconfig +++ b/drivers/phy/broadcom/Kconfig @@ -97,8 +97,7 @@ config PHY_BRCM_USB depends on OF select GENERIC_PHY select SOC_BRCMSTB if ARCH_BRCMSTB - default ARCH_BCM4908 - default ARCH_BRCMSTB + default ARCH_BCM4908 || ARCH_BRCMSTB help Enable this to support the Broadcom STB USB PHY. This driver is required by the USB XHCI, EHCI and OHCI -- cgit From 94bfe2bdfc5059a0870447ccf2c8048f3d016898 Mon Sep 17 00:00:00 2001 From: Alim Akhtar Date: Wed, 5 Jan 2022 22:13:41 +0530 Subject: MAINTAINERS: add reviewer entry for Samsung/Exynos platform Adds myself as reviewer for Samsung/Exynos platform to help in review of current and upcoming SoCs patches. Signed-off-by: Alim Akhtar Acked-by: Sylwester Nawrocki Link: https://lore.kernel.org/r/20220105164341.27479-1-alim.akhtar@samsung.com Signed-off-by: Krzysztof Kozlowski --- MAINTAINERS | 3 +++ 1 file changed, 3 insertions(+) diff --git a/MAINTAINERS b/MAINTAINERS index ea3e6c914384..118bd4649fb2 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -2569,6 +2569,7 @@ N: rockchip ARM/SAMSUNG S3C, S5P AND EXYNOS ARM ARCHITECTURES M: Krzysztof Kozlowski +R: Alim Akhtar L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-samsung-soc@vger.kernel.org S: Maintained @@ -15280,6 +15281,7 @@ PIN CONTROLLER - SAMSUNG M: Tomasz Figa M: Krzysztof Kozlowski M: Sylwester Nawrocki +R: Alim Akhtar L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-samsung-soc@vger.kernel.org S: Maintained @@ -17070,6 +17072,7 @@ SAMSUNG SOC CLOCK DRIVERS M: Sylwester Nawrocki M: Tomasz Figa M: Chanwoo Choi +R: Alim Akhtar L: linux-samsung-soc@vger.kernel.org S: Supported T: git git://git.kernel.org/pub/scm/linux/kernel/git/snawrocki/clk.git -- cgit From 442b0c08db7e35980bed6af091877f4dda72ffca Mon Sep 17 00:00:00 2001 From: Sam Protsenko Date: Fri, 14 Jan 2022 16:46:06 +0200 Subject: soc: samsung: Fix typo in CONFIG_EXYNOS_USI description The proper name is Exynos Auto V9, not V0. It was the typo slipped in unnoticed, fix it. Fixes: b603377e408f ("soc: samsung: Add USI driver") Signed-off-by: Sam Protsenko Reviewed-by: Chanho Park Link: https://lore.kernel.org/r/20220114144606.24358-1-semen.protsenko@linaro.org Signed-off-by: Krzysztof Kozlowski --- drivers/soc/samsung/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/soc/samsung/Kconfig b/drivers/soc/samsung/Kconfig index a9f8b224322e..02e319508cc6 100644 --- a/drivers/soc/samsung/Kconfig +++ b/drivers/soc/samsung/Kconfig @@ -31,7 +31,7 @@ config EXYNOS_USI help Enable support for USI block. USI (Universal Serial Interface) is an IP-core found in modern Samsung Exynos SoCs, like Exynos850 and - ExynosAutoV0. USI block can be configured to provide one of the + ExynosAutoV9. USI block can be configured to provide one of the following serial protocols: UART, SPI or High Speed I2C. This driver allows one to configure USI for desired protocol, which -- cgit From 30cc53897470d45219fb0a5eafd0cc8b0032cd1e Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Tue, 11 Jan 2022 18:29:18 +0100 Subject: pinctrl: thunderbay: comment process of building functions a bit MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This should make code a bit easier to follow. While at it use some "for" loops to simplify array iteration loops. Ref: 5d0674999cc5 ("pinctrl: keembay: comment process of building functions a bit") Signed-off-by: Rafał Miłecki Link: https://lore.kernel.org/r/20220111172919.6567-1-zajec5@gmail.com Signed-off-by: Linus Walleij --- drivers/pinctrl/pinctrl-thunderbay.c | 19 ++++++++++--------- 1 file changed, 10 insertions(+), 9 deletions(-) diff --git a/drivers/pinctrl/pinctrl-thunderbay.c b/drivers/pinctrl/pinctrl-thunderbay.c index b5b47f4dd774..4756a23ca572 100644 --- a/drivers/pinctrl/pinctrl-thunderbay.c +++ b/drivers/pinctrl/pinctrl-thunderbay.c @@ -839,27 +839,30 @@ static int thunderbay_build_functions(struct thunderbay_pinctrl *tpc) void *ptr; int pin; - /* Total number of functions is unknown at this point. Allocate first. */ + /* + * Allocate maximum possible number of functions. Assume every pin + * being part of 8 (hw maximum) globally unique muxes. + */ tpc->nfuncs = 0; thunderbay_funcs = kcalloc(tpc->soc->npins * 8, sizeof(*thunderbay_funcs), GFP_KERNEL); if (!thunderbay_funcs) return -ENOMEM; - /* Find total number of functions and each's properties */ + /* Setup 1 function for each unique mux */ for (pin = 0; pin < tpc->soc->npins; pin++) { const struct pinctrl_pin_desc *pin_info = thunderbay_pins + pin; - struct thunderbay_mux_desc *pin_mux = pin_info->drv_data; + struct thunderbay_mux_desc *pin_mux; - while (pin_mux->name) { - struct function_desc *func = thunderbay_funcs; + for (pin_mux = pin_info->drv_data; pin_mux->name; pin_mux++) { + struct function_desc *func; - while (func->name) { + /* Check if we already have function for this mux */ + for (func = thunderbay_funcs; func->name; func++) { if (!strcmp(pin_mux->name, func->name)) { func->num_group_names++; break; } - func++; } if (!func->name) { @@ -868,8 +871,6 @@ static int thunderbay_build_functions(struct thunderbay_pinctrl *tpc) func->data = (int *)&pin_mux->mode; tpc->nfuncs++; } - - pin_mux++; } } -- cgit From 25d2e41cf59bd6ccd23adc2965a157053bc3ed5c Mon Sep 17 00:00:00 2001 From: Rafał Miłecki Date: Tue, 11 Jan 2022 18:29:19 +0100 Subject: pinctrl: thunderbay: rework loops looking for groups names MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make the outer loop iterate over functions as that's the real subject. This simplifies code (and reduces amount of lines of code) as allocating memory for names doesn't require extra checks anymore. While at it use local "group_names" variable. It fixes: drivers/pinctrl/pinctrl-thunderbay.c: In function 'thunderbay_add_functions': drivers/pinctrl/pinctrl-thunderbay.c:815:8: warning: assignment discards 'const' qualifier from pointer target type [-Wdiscarded-qualifiers] 815 | grp = func->group_names; | ^ Ref: c26c4bfc1040 ("pinctrl: keembay: rework loops looking for groups names") Reported-by: Nathan Chancellor Signed-off-by: Rafał Miłecki Link: https://lore.kernel.org/r/20220111172919.6567-2-zajec5@gmail.com Signed-off-by: Linus Walleij --- drivers/pinctrl/pinctrl-thunderbay.c | 71 +++++++++++++----------------------- 1 file changed, 25 insertions(+), 46 deletions(-) diff --git a/drivers/pinctrl/pinctrl-thunderbay.c b/drivers/pinctrl/pinctrl-thunderbay.c index 4756a23ca572..79d44bca039e 100644 --- a/drivers/pinctrl/pinctrl-thunderbay.c +++ b/drivers/pinctrl/pinctrl-thunderbay.c @@ -773,63 +773,42 @@ static int thunderbay_build_groups(struct thunderbay_pinctrl *tpc) static int thunderbay_add_functions(struct thunderbay_pinctrl *tpc, struct function_desc *funcs) { - struct function_desc *function = funcs; int i; /* Assign the groups for each function */ - for (i = 0; i < tpc->soc->npins; i++) { - const struct pinctrl_pin_desc *pin_info = thunderbay_pins + i; - struct thunderbay_mux_desc *pin_mux = pin_info->drv_data; - - while (pin_mux->name) { - const char **grp; - int j, grp_num, match = 0; - size_t grp_size; - struct function_desc *func; - - for (j = 0; j < tpc->nfuncs; j++) { - if (!strcmp(pin_mux->name, function[j].name)) { - match = 1; - break; - } - } - - if (!match) - return -EINVAL; - - func = function + j; - grp_num = func->num_group_names; - grp_size = sizeof(*func->group_names); - - if (!func->group_names) { - func->group_names = devm_kcalloc(tpc->dev, - grp_num, - grp_size, - GFP_KERNEL); - if (!func->group_names) { - kfree(func); - return -ENOMEM; - } + for (i = 0; i < tpc->nfuncs; i++) { + struct function_desc *func = &funcs[i]; + const char **group_names; + unsigned int grp_idx = 0; + int j; + + group_names = devm_kcalloc(tpc->dev, func->num_group_names, + sizeof(*group_names), GFP_KERNEL); + if (!group_names) + return -ENOMEM; + + for (j = 0; j < tpc->soc->npins; j++) { + const struct pinctrl_pin_desc *pin_info = &thunderbay_pins[j]; + struct thunderbay_mux_desc *pin_mux; + + for (pin_mux = pin_info->drv_data; pin_mux->name; pin_mux++) { + if (!strcmp(pin_mux->name, func->name)) + group_names[grp_idx++] = pin_info->name; } - - grp = func->group_names; - while (*grp) - grp++; - - *grp = pin_info->name; - pin_mux++; } + + func->group_names = group_names; } /* Add all functions */ for (i = 0; i < tpc->nfuncs; i++) { pinmux_generic_add_function(tpc->pctrl, - function[i].name, - function[i].group_names, - function[i].num_group_names, - function[i].data); + funcs[i].name, + funcs[i].group_names, + funcs[i].num_group_names, + funcs[i].data); } - kfree(function); + kfree(funcs); return 0; } -- cgit From aa28514592d52043f4837a6457d6310452135ae1 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Tue, 4 Jan 2022 17:42:38 +0100 Subject: pinctrl: cherryview: Trigger hwirq0 for interrupt-lines without a mapping Commit bdfbef2d29dc ("pinctrl: cherryview: Don't use selection 0 to mark an interrupt line as unused") made the code properly differentiate between unset vs (hwirq) 0 entries in the GPIO-controller interrupt-line to GPIO pinnumber/hwirq mapping. This is causing some boards to not boot. This commit restores the old behavior of triggering hwirq 0 when receiving an interrupt on an interrupt-line for which there is no mapping. Fixes: bdfbef2d29dc ("pinctrl: cherryview: Don't use selection 0 to mark an interrupt line as unused") Reported-and-tested-by: Jarkko Nikula Signed-off-by: Hans de Goede Acked-by: Andy Shevchenko Acked-by: Mika Westerberg Link: https://lore.kernel.org/r/20220104164238.253142-1-hdegoede@redhat.com Signed-off-by: Linus Walleij --- drivers/pinctrl/intel/pinctrl-cherryview.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index abffda1fd51e..1d5818269076 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c @@ -1471,8 +1471,9 @@ static void chv_gpio_irq_handler(struct irq_desc *desc) offset = cctx->intr_lines[intr_line]; if (offset == CHV_INVALID_HWIRQ) { - dev_err(dev, "interrupt on unused interrupt line %u\n", intr_line); - continue; + dev_warn_once(dev, "interrupt on unmapped interrupt line %u\n", intr_line); + /* Some boards expect hwirq 0 to trigger in this case */ + offset = 0; } generic_handle_domain_irq(gc->irq.domain, offset); -- cgit From 1fd6bb5b47a65eacb063b37e6fa6df2b8fa92959 Mon Sep 17 00:00:00 2001 From: Andre Przywara Date: Wed, 5 Jan 2022 17:29:52 +0000 Subject: pinctrl: sunxi: Fix H616 I2S3 pin data Two bugs have sneaked in the H616 pinctrl data: - PH9 uses the mux value of 0x3 twice (one should be 0x5 instead) - PH8 and PH9 use the "i2s3" function name twice in each pin For the double pin name we use the same trick we pulled for i2s0: append the pin function to the group name to designate the special function. Fixes: 25adc29407fb ("pinctrl: sunxi: Add support for the Allwinner H616 pin controller") Reported-by: SASANO Takayoshi Signed-off-by: Andre Przywara Reviewed-by: Jernej Skrabec Reviewed-by: Samuel Holland Link: https://lore.kernel.org/r/20220105172952.23347-1-andre.przywara@arm.com Signed-off-by: Linus Walleij --- drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c b/drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c index ce1917e230f4..152b71226a80 100644 --- a/drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c +++ b/drivers/pinctrl/sunxi/pinctrl-sun50i-h616.c @@ -363,16 +363,16 @@ static const struct sunxi_desc_pin h616_pins[] = { SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), SUNXI_FUNCTION(0x2, "uart2"), /* CTS */ - SUNXI_FUNCTION(0x3, "i2s3"), /* DO0 */ + SUNXI_FUNCTION(0x3, "i2s3_dout0"), /* DO0 */ SUNXI_FUNCTION(0x4, "spi1"), /* MISO */ - SUNXI_FUNCTION(0x5, "i2s3"), /* DI1 */ + SUNXI_FUNCTION(0x5, "i2s3_din1"), /* DI1 */ SUNXI_FUNCTION_IRQ_BANK(0x6, 6, 8)), /* PH_EINT8 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 9), SUNXI_FUNCTION(0x0, "gpio_in"), SUNXI_FUNCTION(0x1, "gpio_out"), - SUNXI_FUNCTION(0x3, "i2s3"), /* DI0 */ + SUNXI_FUNCTION(0x3, "i2s3_din0"), /* DI0 */ SUNXI_FUNCTION(0x4, "spi1"), /* CS1 */ - SUNXI_FUNCTION(0x3, "i2s3"), /* DO1 */ + SUNXI_FUNCTION(0x5, "i2s3_dout1"), /* DO1 */ SUNXI_FUNCTION_IRQ_BANK(0x6, 6, 9)), /* PH_EINT9 */ SUNXI_PIN(SUNXI_PINCTRL_PIN(H, 10), SUNXI_FUNCTION(0x0, "gpio_in"), -- cgit From 9ca8581e79e51c57e60b3b8e3b89d816448f49fe Mon Sep 17 00:00:00 2001 From: Namjae Jeon Date: Sat, 22 Jan 2022 10:47:22 +0900 Subject: ksmbd: fix SMB 3.11 posix extension mount failure cifs client set 4 to DataLength of create_posix context, which mean Mode variable of create_posix context is only available. So buffer validation of ksmbd should check only the size of Mode except for the size of Reserved variable. Fixes: 8f77150c15f8 ("ksmbd: add buffer validation for SMB2_CREATE_CONTEXT") Cc: stable@vger.kernel.org # v5.15+ Reported-by: Steve French Tested-by: Steve French Signed-off-by: Namjae Jeon Signed-off-by: Steve French --- fs/ksmbd/smb2pdu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c index 1866c81c5c99..3926ca18dca4 100644 --- a/fs/ksmbd/smb2pdu.c +++ b/fs/ksmbd/smb2pdu.c @@ -2688,7 +2688,7 @@ int smb2_open(struct ksmbd_work *work) (struct create_posix *)context; if (le16_to_cpu(context->DataOffset) + le32_to_cpu(context->DataLength) < - sizeof(struct create_posix)) { + sizeof(struct create_posix) - 4) { rc = -EINVAL; goto err_out1; } -- cgit From 6d1e6bcb31663ee83aaea1f171f3dbfe95dd4a69 Mon Sep 17 00:00:00 2001 From: Kishon Vijay Abraham I Date: Mon, 17 Jan 2022 16:31:08 +0530 Subject: phy: ti: Fix missing sentinel for clk_div_table _get_table_maxdiv() tries to access "clk_div_table" array out of bound defined in phy-j721e-wiz.c. Add a sentinel entry to prevent the following global-out-of-bounds error reported by enabling KASAN. [ 9.552392] BUG: KASAN: global-out-of-bounds in _get_maxdiv+0xc0/0x148 [ 9.558948] Read of size 4 at addr ffff8000095b25a4 by task kworker/u4:1/38 [ 9.565926] [ 9.567441] CPU: 1 PID: 38 Comm: kworker/u4:1 Not tainted 5.16.0-116492-gdaadb3bd0e8d-dirty #360 [ 9.576242] Hardware name: Texas Instruments J721e EVM (DT) [ 9.581832] Workqueue: events_unbound deferred_probe_work_func [ 9.587708] Call trace: [ 9.590174] dump_backtrace+0x20c/0x218 [ 9.594038] show_stack+0x18/0x68 [ 9.597375] dump_stack_lvl+0x9c/0xd8 [ 9.601062] print_address_description.constprop.0+0x78/0x334 [ 9.606830] kasan_report+0x1f0/0x260 [ 9.610517] __asan_load4+0x9c/0xd8 [ 9.614030] _get_maxdiv+0xc0/0x148 [ 9.617540] divider_determine_rate+0x88/0x488 [ 9.622005] divider_round_rate_parent+0xc8/0x124 [ 9.626729] wiz_clk_div_round_rate+0x54/0x68 [ 9.631113] clk_core_determine_round_nolock+0x124/0x158 [ 9.636448] clk_core_round_rate_nolock+0x68/0x138 [ 9.641260] clk_core_set_rate_nolock+0x268/0x3a8 [ 9.645987] clk_set_rate+0x50/0xa8 [ 9.649499] cdns_sierra_phy_init+0x88/0x248 [ 9.653794] phy_init+0x98/0x108 [ 9.657046] cdns_pcie_enable_phy+0xa0/0x170 [ 9.661340] cdns_pcie_init_phy+0x250/0x2b0 [ 9.665546] j721e_pcie_probe+0x4b8/0x798 [ 9.669579] platform_probe+0x8c/0x108 [ 9.673350] really_probe+0x114/0x630 [ 9.677037] __driver_probe_device+0x18c/0x220 [ 9.681505] driver_probe_device+0xac/0x150 [ 9.685712] __device_attach_driver+0xec/0x170 [ 9.690178] bus_for_each_drv+0xf0/0x158 [ 9.694124] __device_attach+0x184/0x210 [ 9.698070] device_initial_probe+0x14/0x20 [ 9.702277] bus_probe_device+0xec/0x100 [ 9.706223] deferred_probe_work_func+0x124/0x180 [ 9.710951] process_one_work+0x4b0/0xbc0 [ 9.714983] worker_thread+0x74/0x5d0 [ 9.718668] kthread+0x214/0x230 [ 9.721919] ret_from_fork+0x10/0x20 [ 9.725520] [ 9.727032] The buggy address belongs to the variable: [ 9.732183] clk_div_table+0x24/0x440 Fixes: 091876cc355d ("phy: ti: j721e-wiz: Add support for WIZ module present in TI J721E SoC") Cc: stable@vger.kernel.org # v5.10+ Signed-off-by: Kishon Vijay Abraham I Link: https://lore.kernel.org/r/20220117110108.4117-1-kishon@ti.com Signed-off-by: Vinod Koul --- drivers/phy/ti/phy-j721e-wiz.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c index b3384c31637a..da546c35d1d5 100644 --- a/drivers/phy/ti/phy-j721e-wiz.c +++ b/drivers/phy/ti/phy-j721e-wiz.c @@ -233,6 +233,7 @@ static const struct clk_div_table clk_div_table[] = { { .val = 1, .div = 2, }, { .val = 2, .div = 4, }, { .val = 3, .div = 8, }, + { /* sentinel */ }, }; static const struct wiz_clk_div_sel clk_div_sel[] = { -- cgit From 29afbd769ca338fa14cbfbbc824f7dc457ed7f2e Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Sat, 15 Jan 2022 14:51:46 +0300 Subject: phy: cadence: Sierra: fix error handling bugs in probe() There are two bugs in the error handling: 1: If devm_of_phy_provider_register() fails then there was no cleanup. 2: The error handling called of_node_put(child) improperly leading to a use after free. We are only holding the reference inside the loop so the last two gotos after the loop lead to a use after free bug. Fix this by cleaning up the partial allocations (or partial iterations) in the loop before doing the goto. Fixes: a43f72ae136a ("phy: cadence: Sierra: Change MAX_LANES of Sierra to 16") Fixes: 44d30d622821 ("phy: cadence: Add driver for Sierra PHY") Signed-off-by: Dan Carpenter Link: https://lore.kernel.org/r/20220115115146.GC7552@kili Signed-off-by: Vinod Koul --- drivers/phy/cadence/phy-cadence-sierra.c | 35 +++++++++++++++++++------------- 1 file changed, 21 insertions(+), 14 deletions(-) diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c index da24acd26666..e265647e29a2 100644 --- a/drivers/phy/cadence/phy-cadence-sierra.c +++ b/drivers/phy/cadence/phy-cadence-sierra.c @@ -1338,7 +1338,7 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev) struct device *dev = &pdev->dev; const struct cdns_sierra_data *data; unsigned int id_value; - int i, ret, node = 0; + int ret, node = 0; void __iomem *base; struct device_node *dn = dev->of_node, *child; @@ -1416,7 +1416,8 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev) dev_err(dev, "failed to get reset %s\n", child->full_name); ret = PTR_ERR(sp->phys[node].lnk_rst); - goto put_child2; + of_node_put(child); + goto put_control; } if (!sp->autoconf) { @@ -1424,7 +1425,9 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev) if (ret) { dev_err(dev, "missing property in node %s\n", child->name); - goto put_child; + of_node_put(child); + reset_control_put(sp->phys[node].lnk_rst); + goto put_control; } } @@ -1434,7 +1437,9 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev) if (IS_ERR(gphy)) { ret = PTR_ERR(gphy); - goto put_child; + of_node_put(child); + reset_control_put(sp->phys[node].lnk_rst); + goto put_control; } sp->phys[node].phy = gphy; phy_set_drvdata(gphy, &sp->phys[node]); @@ -1446,26 +1451,28 @@ static int cdns_sierra_phy_probe(struct platform_device *pdev) if (sp->num_lanes > SIERRA_MAX_LANES) { ret = -EINVAL; dev_err(dev, "Invalid lane configuration\n"); - goto put_child2; + goto put_control; } /* If more than one subnode, configure the PHY as multilink */ if (!sp->autoconf && sp->nsubnodes > 1) { ret = cdns_sierra_phy_configure_multilink(sp); if (ret) - goto put_child2; + goto put_control; } pm_runtime_enable(dev); phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); - return PTR_ERR_OR_ZERO(phy_provider); - -put_child: - node++; -put_child2: - for (i = 0; i < node; i++) - reset_control_put(sp->phys[i].lnk_rst); - of_node_put(child); + if (IS_ERR(phy_provider)) { + ret = PTR_ERR(phy_provider); + goto put_control; + } + + return 0; + +put_control: + while (--node >= 0) + reset_control_put(sp->phys[node].lnk_rst); clk_disable: cdns_sierra_phy_disable_clocks(sp); reset_control_assert(sp->apb_rst); -- cgit From 428cb15d5b003102bc33d49f2ab31a6e4e785157 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 21 Jan 2022 13:30:31 +0200 Subject: drm/i915: Clean up pre-skl primary plane registers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use REG_BIT() & co. for the pre-skl primary plane registers. Also give everything a consistent namespace. v2: s/DSP/DISP/ to avoid confusion (José) Use DISP_WIDTH rather than DISP_POS_X for DSPSIZE (José) Deal with gvt Cc: José Roberto de Souza Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220121113036.23240-2-ville.syrjala@linux.intel.com Reviewed-by: José Roberto de Souza --- drivers/gpu/drm/i915/display/i9xx_plane.c | 99 ++++++++++++------------ drivers/gpu/drm/i915/display/intel_display.c | 13 ++-- drivers/gpu/drm/i915/gvt/display.c | 4 +- drivers/gpu/drm/i915/gvt/fb_decoder.c | 18 ++--- drivers/gpu/drm/i915/i915_reg.h | 108 +++++++++++++++------------ drivers/gpu/drm/i915/intel_pm.c | 2 +- 6 files changed, 128 insertions(+), 116 deletions(-) diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c index fc6f05146a9f..54f8776ca6b3 100644 --- a/drivers/gpu/drm/i915/display/i9xx_plane.c +++ b/drivers/gpu/drm/i915/display/i9xx_plane.c @@ -155,51 +155,51 @@ static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state, unsigned int rotation = plane_state->hw.rotation; u32 dspcntr; - dspcntr = DISPLAY_PLANE_ENABLE; + dspcntr = DISP_ENABLE; if (IS_G4X(dev_priv) || IS_IRONLAKE(dev_priv) || IS_SANDYBRIDGE(dev_priv) || IS_IVYBRIDGE(dev_priv)) - dspcntr |= DISPPLANE_TRICKLE_FEED_DISABLE; + dspcntr |= DISP_TRICKLE_FEED_DISABLE; switch (fb->format->format) { case DRM_FORMAT_C8: - dspcntr |= DISPPLANE_8BPP; + dspcntr |= DISP_FORMAT_8BPP; break; case DRM_FORMAT_XRGB1555: - dspcntr |= DISPPLANE_BGRX555; + dspcntr |= DISP_FORMAT_BGRX555; break; case DRM_FORMAT_ARGB1555: - dspcntr |= DISPPLANE_BGRA555; + dspcntr |= DISP_FORMAT_BGRA555; break; case DRM_FORMAT_RGB565: - dspcntr |= DISPPLANE_BGRX565; + dspcntr |= DISP_FORMAT_BGRX565; break; case DRM_FORMAT_XRGB8888: - dspcntr |= DISPPLANE_BGRX888; + dspcntr |= DISP_FORMAT_BGRX888; break; case DRM_FORMAT_XBGR8888: - dspcntr |= DISPPLANE_RGBX888; + dspcntr |= DISP_FORMAT_RGBX888; break; case DRM_FORMAT_ARGB8888: - dspcntr |= DISPPLANE_BGRA888; + dspcntr |= DISP_FORMAT_BGRA888; break; case DRM_FORMAT_ABGR8888: - dspcntr |= DISPPLANE_RGBA888; + dspcntr |= DISP_FORMAT_RGBA888; break; case DRM_FORMAT_XRGB2101010: - dspcntr |= DISPPLANE_BGRX101010; + dspcntr |= DISP_FORMAT_BGRX101010; break; case DRM_FORMAT_XBGR2101010: - dspcntr |= DISPPLANE_RGBX101010; + dspcntr |= DISP_FORMAT_RGBX101010; break; case DRM_FORMAT_ARGB2101010: - dspcntr |= DISPPLANE_BGRA101010; + dspcntr |= DISP_FORMAT_BGRA101010; break; case DRM_FORMAT_ABGR2101010: - dspcntr |= DISPPLANE_RGBA101010; + dspcntr |= DISP_FORMAT_RGBA101010; break; case DRM_FORMAT_XBGR16161616F: - dspcntr |= DISPPLANE_RGBX161616; + dspcntr |= DISP_FORMAT_RGBX161616; break; default: MISSING_CASE(fb->format->format); @@ -208,13 +208,13 @@ static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state, if (DISPLAY_VER(dev_priv) >= 4 && fb->modifier == I915_FORMAT_MOD_X_TILED) - dspcntr |= DISPPLANE_TILED; + dspcntr |= DISP_TILED; if (rotation & DRM_MODE_ROTATE_180) - dspcntr |= DISPPLANE_ROTATE_180; + dspcntr |= DISP_ROTATE_180; if (rotation & DRM_MODE_REFLECT_X) - dspcntr |= DISPPLANE_MIRROR; + dspcntr |= DISP_MIRROR; return dspcntr; } @@ -354,13 +354,13 @@ static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state) u32 dspcntr = 0; if (crtc_state->gamma_enable) - dspcntr |= DISPPLANE_GAMMA_ENABLE; + dspcntr |= DISP_PIPE_GAMMA_ENABLE; if (crtc_state->csc_enable) - dspcntr |= DISPPLANE_PIPE_CSC_ENABLE; + dspcntr |= DISP_PIPE_CSC_ENABLE; if (DISPLAY_VER(dev_priv) < 5) - dspcntr |= DISPPLANE_SEL_PIPE(crtc->pipe); + dspcntr |= DISP_PIPE_SEL(crtc->pipe); return dspcntr; } @@ -437,9 +437,9 @@ static void i9xx_plane_update_noarm(struct intel_plane *plane, * program whatever is there. */ intel_de_write_fw(dev_priv, DSPPOS(i9xx_plane), - (crtc_y << 16) | crtc_x); + DISP_POS_Y(crtc_y) | DISP_POS_X(crtc_x)); intel_de_write_fw(dev_priv, DSPSIZE(i9xx_plane), - ((crtc_h - 1) << 16) | (crtc_w - 1)); + DISP_HEIGHT(crtc_h - 1) | DISP_WIDTH(crtc_w - 1)); } spin_unlock_irqrestore(&dev_priv->uncore.lock, irqflags); @@ -474,20 +474,20 @@ static void i9xx_plane_update_arm(struct intel_plane *plane, int crtc_h = drm_rect_height(&plane_state->uapi.dst); intel_de_write_fw(dev_priv, PRIMPOS(i9xx_plane), - (crtc_y << 16) | crtc_x); + PRIM_POS_Y(crtc_y) | PRIM_POS_X(crtc_x)); intel_de_write_fw(dev_priv, PRIMSIZE(i9xx_plane), - ((crtc_h - 1) << 16) | (crtc_w - 1)); + PRIM_HEIGHT(crtc_h - 1) | PRIM_WIDTH(crtc_w - 1)); intel_de_write_fw(dev_priv, PRIMCNSTALPHA(i9xx_plane), 0); } if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { intel_de_write_fw(dev_priv, DSPOFFSET(i9xx_plane), - (y << 16) | x); + DISP_OFFSET_Y(y) | DISP_OFFSET_X(x)); } else if (DISPLAY_VER(dev_priv) >= 4) { intel_de_write_fw(dev_priv, DSPLINOFF(i9xx_plane), linear_offset); intel_de_write_fw(dev_priv, DSPTILEOFF(i9xx_plane), - (y << 16) | x); + DISP_OFFSET_Y(y) | DISP_OFFSET_X(x)); } /* @@ -564,7 +564,7 @@ g4x_primary_async_flip(struct intel_plane *plane, unsigned long irqflags; if (async_flip) - dspcntr |= DISPPLANE_ASYNC_FLIP; + dspcntr |= DISP_ASYNC_FLIP; spin_lock_irqsave(&dev_priv->uncore.lock, irqflags); intel_de_write_fw(dev_priv, DSPCNTR(i9xx_plane), dspcntr); @@ -696,13 +696,12 @@ static bool i9xx_plane_get_hw_state(struct intel_plane *plane, val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane)); - ret = val & DISPLAY_PLANE_ENABLE; + ret = val & DISP_ENABLE; if (DISPLAY_VER(dev_priv) >= 5) *pipe = plane->pipe; else - *pipe = (val & DISPPLANE_SEL_PIPE_MASK) >> - DISPPLANE_SEL_PIPE_SHIFT; + *pipe = REG_FIELD_GET(DISP_PIPE_SEL_MASK, val); intel_display_power_put(dev_priv, power_domain, wakeref); @@ -958,32 +957,32 @@ fail: static int i9xx_format_to_fourcc(int format) { switch (format) { - case DISPPLANE_8BPP: + case DISP_FORMAT_8BPP: return DRM_FORMAT_C8; - case DISPPLANE_BGRA555: + case DISP_FORMAT_BGRA555: return DRM_FORMAT_ARGB1555; - case DISPPLANE_BGRX555: + case DISP_FORMAT_BGRX555: return DRM_FORMAT_XRGB1555; - case DISPPLANE_BGRX565: + case DISP_FORMAT_BGRX565: return DRM_FORMAT_RGB565; default: - case DISPPLANE_BGRX888: + case DISP_FORMAT_BGRX888: return DRM_FORMAT_XRGB8888; - case DISPPLANE_RGBX888: + case DISP_FORMAT_RGBX888: return DRM_FORMAT_XBGR8888; - case DISPPLANE_BGRA888: + case DISP_FORMAT_BGRA888: return DRM_FORMAT_ARGB8888; - case DISPPLANE_RGBA888: + case DISP_FORMAT_RGBA888: return DRM_FORMAT_ABGR8888; - case DISPPLANE_BGRX101010: + case DISP_FORMAT_BGRX101010: return DRM_FORMAT_XRGB2101010; - case DISPPLANE_RGBX101010: + case DISP_FORMAT_RGBX101010: return DRM_FORMAT_XBGR2101010; - case DISPPLANE_BGRA101010: + case DISP_FORMAT_BGRA101010: return DRM_FORMAT_ARGB2101010; - case DISPPLANE_RGBA101010: + case DISP_FORMAT_RGBA101010: return DRM_FORMAT_ABGR2101010; - case DISPPLANE_RGBX161616: + case DISP_FORMAT_RGBX161616: return DRM_FORMAT_XBGR16161616F; } } @@ -1021,26 +1020,26 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc, val = intel_de_read(dev_priv, DSPCNTR(i9xx_plane)); if (DISPLAY_VER(dev_priv) >= 4) { - if (val & DISPPLANE_TILED) { + if (val & DISP_TILED) { plane_config->tiling = I915_TILING_X; fb->modifier = I915_FORMAT_MOD_X_TILED; } - if (val & DISPPLANE_ROTATE_180) + if (val & DISP_ROTATE_180) plane_config->rotation = DRM_MODE_ROTATE_180; } if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B && - val & DISPPLANE_MIRROR) + val & DISP_MIRROR) plane_config->rotation |= DRM_MODE_REFLECT_X; - pixel_format = val & DISPPLANE_PIXFORMAT_MASK; + pixel_format = val & DISP_FORMAT_MASK; fourcc = i9xx_format_to_fourcc(pixel_format); fb->format = drm_format_info(fourcc); if (IS_HASWELL(dev_priv) || IS_BROADWELL(dev_priv)) { offset = intel_de_read(dev_priv, DSPOFFSET(i9xx_plane)); - base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000; + base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & DISP_ADDR_MASK; } else if (DISPLAY_VER(dev_priv) >= 4) { if (plane_config->tiling) offset = intel_de_read(dev_priv, @@ -1048,7 +1047,7 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc, else offset = intel_de_read(dev_priv, DSPLINOFF(i9xx_plane)); - base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & 0xfffff000; + base = intel_de_read(dev_priv, DSPSURF(i9xx_plane)) & DISP_ADDR_MASK; } else { base = intel_de_read(dev_priv, DSPADDR(i9xx_plane)); } diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 2ecf6d9ab84b..f8c7a2855139 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -3543,11 +3543,11 @@ static void i9xx_get_pipe_color_config(struct intel_crtc_state *crtc_state) tmp = intel_de_read(dev_priv, DSPCNTR(i9xx_plane)); - if (tmp & DISPPLANE_GAMMA_ENABLE) + if (tmp & DISP_PIPE_GAMMA_ENABLE) crtc_state->gamma_enable = true; if (!HAS_GMCH(dev_priv) && - tmp & DISPPLANE_PIPE_CSC_ENABLE) + tmp & DISP_PIPE_CSC_ENABLE) crtc_state->csc_enable = true; } @@ -9995,14 +9995,11 @@ void i830_disable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) pipe_name(pipe)); drm_WARN_ON(&dev_priv->drm, - intel_de_read(dev_priv, DSPCNTR(PLANE_A)) & - DISPLAY_PLANE_ENABLE); + intel_de_read(dev_priv, DSPCNTR(PLANE_A)) & DISP_ENABLE); drm_WARN_ON(&dev_priv->drm, - intel_de_read(dev_priv, DSPCNTR(PLANE_B)) & - DISPLAY_PLANE_ENABLE); + intel_de_read(dev_priv, DSPCNTR(PLANE_B)) & DISP_ENABLE); drm_WARN_ON(&dev_priv->drm, - intel_de_read(dev_priv, DSPCNTR(PLANE_C)) & - DISPLAY_PLANE_ENABLE); + intel_de_read(dev_priv, DSPCNTR(PLANE_C)) & DISP_ENABLE); drm_WARN_ON(&dev_priv->drm, intel_de_read(dev_priv, CURCNTR(PIPE_A)) & MCURSOR_MODE_MASK); drm_WARN_ON(&dev_priv->drm, diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index 8ce5d2b2e330..4d66fb5fb29f 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -185,7 +185,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) for_each_pipe(dev_priv, pipe) { vgpu_vreg_t(vgpu, PIPECONF(pipe)) &= ~(PIPECONF_ENABLE | I965_PIPECONF_ACTIVE); - vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISPLAY_PLANE_ENABLE; + vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISP_ENABLE; vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE; vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~MCURSOR_MODE_MASK; vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= MCURSOR_MODE_DISABLE; @@ -496,7 +496,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) /* Disable Primary/Sprite/Cursor plane */ for_each_pipe(dev_priv, pipe) { - vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISPLAY_PLANE_ENABLE; + vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISP_ENABLE; vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE; vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~MCURSOR_MODE_MASK; vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= MCURSOR_MODE_DISABLE; diff --git a/drivers/gpu/drm/i915/gvt/fb_decoder.c b/drivers/gpu/drm/i915/gvt/fb_decoder.c index 40ace46bad46..f2a216347d77 100644 --- a/drivers/gpu/drm/i915/gvt/fb_decoder.c +++ b/drivers/gpu/drm/i915/gvt/fb_decoder.c @@ -83,22 +83,22 @@ static int bdw_format_to_drm(int format) int bdw_pixel_formats_index = 6; switch (format) { - case DISPPLANE_8BPP: + case DISP_FORMAT_8BPP: bdw_pixel_formats_index = 0; break; - case DISPPLANE_BGRX565: + case DISP_FORMAT_BGRX565: bdw_pixel_formats_index = 1; break; - case DISPPLANE_BGRX888: + case DISP_FORMAT_BGRX888: bdw_pixel_formats_index = 2; break; - case DISPPLANE_RGBX101010: + case DISP_FORMAT_RGBX101010: bdw_pixel_formats_index = 3; break; - case DISPPLANE_BGRX101010: + case DISP_FORMAT_BGRX101010: bdw_pixel_formats_index = 4; break; - case DISPPLANE_RGBX888: + case DISP_FORMAT_RGBX888: bdw_pixel_formats_index = 5; break; @@ -211,7 +211,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, return -ENODEV; val = vgpu_vreg_t(vgpu, DSPCNTR(pipe)); - plane->enabled = !!(val & DISPLAY_PLANE_ENABLE); + plane->enabled = !!(val & DISP_ENABLE); if (!plane->enabled) return -ENODEV; @@ -231,8 +231,8 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, plane->bpp = skl_pixel_formats[fmt].bpp; plane->drm_format = skl_pixel_formats[fmt].drm_format; } else { - plane->tiled = val & DISPPLANE_TILED; - fmt = bdw_format_to_drm(val & DISPPLANE_PIXFORMAT_MASK); + plane->tiled = val & DISP_TILED; + fmt = bdw_format_to_drm(val & DISP_FORMAT_MASK); plane->bpp = bdw_pixel_formats[fmt].bpp; plane->drm_format = bdw_pixel_formats[fmt].drm_format; } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 552d4803dd90..cf168c3e0471 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -5826,49 +5826,54 @@ enum { /* Display A control */ #define _DSPAADDR_VLV 0x7017C /* vlv/chv */ #define _DSPACNTR 0x70180 -#define DISPLAY_PLANE_ENABLE (1 << 31) -#define DISPLAY_PLANE_DISABLE 0 -#define DISPPLANE_GAMMA_ENABLE (1 << 30) -#define DISPPLANE_GAMMA_DISABLE 0 -#define DISPPLANE_PIXFORMAT_MASK (0xf << 26) -#define DISPPLANE_YUV422 (0x0 << 26) -#define DISPPLANE_8BPP (0x2 << 26) -#define DISPPLANE_BGRA555 (0x3 << 26) -#define DISPPLANE_BGRX555 (0x4 << 26) -#define DISPPLANE_BGRX565 (0x5 << 26) -#define DISPPLANE_BGRX888 (0x6 << 26) -#define DISPPLANE_BGRA888 (0x7 << 26) -#define DISPPLANE_RGBX101010 (0x8 << 26) -#define DISPPLANE_RGBA101010 (0x9 << 26) -#define DISPPLANE_BGRX101010 (0xa << 26) -#define DISPPLANE_BGRA101010 (0xb << 26) -#define DISPPLANE_RGBX161616 (0xc << 26) -#define DISPPLANE_RGBX888 (0xe << 26) -#define DISPPLANE_RGBA888 (0xf << 26) -#define DISPPLANE_STEREO_ENABLE (1 << 25) -#define DISPPLANE_STEREO_DISABLE 0 -#define DISPPLANE_PIPE_CSC_ENABLE (1 << 24) /* ilk+ */ -#define DISPPLANE_SEL_PIPE_SHIFT 24 -#define DISPPLANE_SEL_PIPE_MASK (3 << DISPPLANE_SEL_PIPE_SHIFT) -#define DISPPLANE_SEL_PIPE(pipe) ((pipe) << DISPPLANE_SEL_PIPE_SHIFT) -#define DISPPLANE_SRC_KEY_ENABLE (1 << 22) -#define DISPPLANE_SRC_KEY_DISABLE 0 -#define DISPPLANE_LINE_DOUBLE (1 << 20) -#define DISPPLANE_NO_LINE_DOUBLE 0 -#define DISPPLANE_STEREO_POLARITY_FIRST 0 -#define DISPPLANE_STEREO_POLARITY_SECOND (1 << 18) -#define DISPPLANE_ALPHA_PREMULTIPLY (1 << 16) /* CHV pipe B */ -#define DISPPLANE_ROTATE_180 (1 << 15) -#define DISPPLANE_TRICKLE_FEED_DISABLE (1 << 14) /* Ironlake */ -#define DISPPLANE_TILED (1 << 10) -#define DISPPLANE_ASYNC_FLIP (1 << 9) /* g4x+ */ -#define DISPPLANE_MIRROR (1 << 8) /* CHV pipe B */ +#define DISP_ENABLE REG_BIT(31) +#define DISP_PIPE_GAMMA_ENABLE REG_BIT(30) +#define DISP_FORMAT_MASK REG_GENMASK(29, 26) +#define DISP_FORMAT_8BPP REG_FIELD_PREP(DISP_FORMAT_MASK, 2) +#define DISP_FORMAT_BGRA555 REG_FIELD_PREP(DISP_FORMAT_MASK, 3) +#define DISP_FORMAT_BGRX555 REG_FIELD_PREP(DISP_FORMAT_MASK, 4) +#define DISP_FORMAT_BGRX565 REG_FIELD_PREP(DISP_FORMAT_MASK, 5) +#define DISP_FORMAT_BGRX888 REG_FIELD_PREP(DISP_FORMAT_MASK, 6) +#define DISP_FORMAT_BGRA888 REG_FIELD_PREP(DISP_FORMAT_MASK, 7) +#define DISP_FORMAT_RGBX101010 REG_FIELD_PREP(DISP_FORMAT_MASK, 8) +#define DISP_FORMAT_RGBA101010 REG_FIELD_PREP(DISP_FORMAT_MASK, 9) +#define DISP_FORMAT_BGRX101010 REG_FIELD_PREP(DISP_FORMAT_MASK, 10) +#define DISP_FORMAT_BGRA101010 REG_FIELD_PREP(DISP_FORMAT_MASK, 11) +#define DISP_FORMAT_RGBX161616 REG_FIELD_PREP(DISP_FORMAT_MASK, 12) +#define DISP_FORMAT_RGBX888 REG_FIELD_PREP(DISP_FORMAT_MASK, 14) +#define DISP_FORMAT_RGBA888 REG_FIELD_PREP(DISP_FORMAT_MASK, 15) +#define DISP_STEREO_ENABLE REG_BIT(25) +#define DISP_PIPE_CSC_ENABLE REG_BIT(24) /* ilk+ */ +#define DISP_PIPE_SEL_MASK REG_GENMASK(25, 24) +#define DISP_PIPE_SEL(pipe) REG_FIELD_PREP(DISP_PIPE_SEL_MASK, (pipe)) +#define DISP_SRC_KEY_ENABLE REG_BIT(22) +#define DISP_LINE_DOUBLE REG_BIT(20) +#define DISP_STEREO_POLARITY_SECOND REG_BIT(18) +#define DISP_ALPHA_PREMULTIPLY REG_BIT(16) /* CHV pipe B */ +#define DISP_ROTATE_180 REG_BIT(15) +#define DISP_TRICKLE_FEED_DISABLE REG_BIT(14) /* g4x+ */ +#define DISP_TILED REG_BIT(10) +#define DISP_ASYNC_FLIP REG_BIT(9) /* g4x+ */ +#define DISP_MIRROR REG_BIT(8) /* CHV pipe B */ #define _DSPAADDR 0x70184 #define _DSPASTRIDE 0x70188 #define _DSPAPOS 0x7018C /* reserved */ +#define DISP_POS_Y_MASK REG_GENMASK(31, 0) +#define DISP_POS_Y(y) REG_FIELD_PREP(DISP_POS_Y_MASK, (y)) +#define DISP_POS_X_MASK REG_GENMASK(15, 0) +#define DISP_POS_X(x) REG_FIELD_PREP(DISP_POS_X_MASK, (x)) #define _DSPASIZE 0x70190 +#define DISP_HEIGHT_MASK REG_GENMASK(31, 0) +#define DISP_HEIGHT(h) REG_FIELD_PREP(DISP_HEIGHT_MASK, (h)) +#define DISP_WIDTH_MASK REG_GENMASK(15, 0) +#define DISP_WIDTH(w) REG_FIELD_PREP(DISP_WIDTH_MASK, (w)) #define _DSPASURF 0x7019C /* 965+ only */ +#define DISP_ADDR_MASK REG_GENMASK(31, 12) #define _DSPATILEOFF 0x701A4 /* 965+ only */ +#define DISP_OFFSET_Y_MASK REG_GENMASK(31, 16) +#define DISP_OFFSET_Y(y) REG_FIELD_PREP(DISP_OFFSET_Y_MASK, (y)) +#define DISP_OFFSET_X_MASK REG_GENMASK(15, 0) +#define DISP_OFFSET_X(x) REG_FIELD_PREP(DISP_OFFSET_X_MASK, (x)) #define _DSPAOFFSET 0x701A4 /* HSW */ #define _DSPASURFLIVE 0x701AC #define _DSPAGAMC 0x701E0 @@ -5888,15 +5893,28 @@ enum { /* CHV pipe B blender and primary plane */ #define _CHV_BLEND_A 0x60a00 -#define CHV_BLEND_LEGACY (0 << 30) -#define CHV_BLEND_ANDROID (1 << 30) -#define CHV_BLEND_MPO (2 << 30) -#define CHV_BLEND_MASK (3 << 30) +#define CHV_BLEND_MASK REG_GENMASK(31, 30) +#define CHV_BLEND_LEGACY REG_FIELD_PREP(CHV_BLEND_MASK, 0) +#define CHV_BLEND_ANDROID REG_FIELD_PREP(CHV_BLEND_MASK, 1) +#define CHV_BLEND_MPO REG_FIELD_PREP(CHV_BLEND_MASK, 2) #define _CHV_CANVAS_A 0x60a04 +#define CHV_CANVAS_RED_MASK REG_GENMASK(29, 20) +#define CHV_CANVAS_GREEN_MASK REG_GENMASK(19, 10) +#define CHV_CANVAS_BLUE_MASK REG_GENMASK(9, 0) #define _PRIMPOS_A 0x60a08 +#define PRIM_POS_Y_MASK REG_GENMASK(31, 16) +#define PRIM_POS_Y(y) REG_FIELD_PREP(PRIM_POS_Y_MASK, (y)) +#define PRIM_POS_X_MASK REG_GENMASK(15, 0) +#define PRIM_POS_X(x) REG_FIELD_PREP(PRIM_POS_X_MASK, (x)) #define _PRIMSIZE_A 0x60a0c +#define PRIM_HEIGHT_MASK REG_GENMASK(31, 16) +#define PRIM_HEIGHT(h) REG_FIELD_PREP(PRIM_HEIGHT_MASK, (h)) +#define PRIM_WIDTH_MASK REG_GENMASK(15, 0) +#define PRIM_WIDTH(w) REG_FIELD_PREP(PRIM_WIDTH_MASK, (w)) #define _PRIMCNSTALPHA_A 0x60a10 -#define PRIM_CONST_ALPHA_ENABLE (1 << 31) +#define PRIM_CONST_ALPHA_ENABLE REG_BIT(31) +#define PRIM_CONST_ALPHA_MASK REG_GENMASK(7, 0) +#define PRIM_CONST_ALPHA(alpha) REG_FIELD_PREP(PRIM_CONST_ALPHA_MASK, (alpha)) #define CHV_BLEND(pipe) _MMIO_TRANS2(pipe, _CHV_BLEND_A) #define CHV_CANVAS(pipe) _MMIO_TRANS2(pipe, _CHV_CANVAS_A) @@ -5937,10 +5955,8 @@ enum { /* Display B control */ #define _DSPBCNTR (DISPLAY_MMIO_BASE(dev_priv) + 0x71180) -#define DISPPLANE_ALPHA_TRANS_ENABLE (1 << 15) -#define DISPPLANE_ALPHA_TRANS_DISABLE 0 -#define DISPPLANE_SPRITE_ABOVE_DISPLAY 0 -#define DISPPLANE_SPRITE_ABOVE_OVERLAY (1) +#define DISP_ALPHA_TRANS_ENABLE REG_BIT(15) +#define DISP_SPRITE_ABOVE_OVERLAY REG_BIT(0) #define _DSPBADDR (DISPLAY_MMIO_BASE(dev_priv) + 0x71184) #define _DSPBSTRIDE (DISPLAY_MMIO_BASE(dev_priv) + 0x71188) #define _DSPBPOS (DISPLAY_MMIO_BASE(dev_priv) + 0x7118C) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index d6a46811acd1..488a1adc540f 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -7208,7 +7208,7 @@ static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv) for_each_pipe(dev_priv, pipe) { intel_uncore_write(&dev_priv->uncore, DSPCNTR(pipe), intel_uncore_read(&dev_priv->uncore, DSPCNTR(pipe)) | - DISPPLANE_TRICKLE_FEED_DISABLE); + DISP_TRICKLE_FEED_DISABLE); intel_uncore_write(&dev_priv->uncore, DSPSURF(pipe), intel_uncore_read(&dev_priv->uncore, DSPSURF(pipe))); intel_uncore_posting_read(&dev_priv->uncore, DSPSURF(pipe)); -- cgit From b4d775775877453b44834a621eb410aed7891875 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 15 Oct 2021 16:39:07 +0300 Subject: drm/i915/hdmi: Clean up TMDS clock limit exceeding user mode handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently we just use all the hdmi_deep_color_possible() stuff to compute whether deep color is possible, and leave the 8bpc case to do its own thing. That doesn't mesh super well with 4:2:0 handling because we might end up going for 8bpc RGB without considering that it's essentially illegal and we could instead go for a legal 4:2:0 config. So let's run through all the clock checks even for 8bpc first. If we've fully exhausted all options only then do we re-run the computation for 8bpc while ignoring the downstream TMDS clock limits. This will guarantee that if there's a config that respects all limits we will find it, and if there is not we still allow the user to override the mode manually. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20211015133921.4609-7-ville.syrjala@linux.intel.com Reviewed-by: Stanislav Lisovskiy --- drivers/gpu/drm/i915/display/intel_dp.c | 13 ++--- drivers/gpu/drm/i915/display/intel_hdmi.c | 92 ++++++++++++++++++------------- drivers/gpu/drm/i915/display/intel_hdmi.h | 4 +- 3 files changed, 62 insertions(+), 47 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index f4feeaf5ce4a..c94ad95442b3 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -1166,14 +1166,13 @@ static bool intel_dp_hdmi_tmds_clock_valid(struct intel_dp *intel_dp, return true; } -static bool intel_dp_hdmi_deep_color_possible(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state, - int bpc) +static bool intel_dp_hdmi_bpc_possible(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, + int bpc) { - return intel_hdmi_deep_color_possible(crtc_state, bpc, - intel_dp->has_hdmi_sink, - intel_dp_hdmi_ycbcr420(intel_dp, crtc_state)) && + return intel_hdmi_bpc_possible(crtc_state, bpc, intel_dp->has_hdmi_sink, + intel_dp_hdmi_ycbcr420(intel_dp, crtc_state)) && intel_dp_hdmi_tmds_clock_valid(intel_dp, crtc_state, bpc); } @@ -1191,7 +1190,7 @@ static int intel_dp_max_bpp(struct intel_dp *intel_dp, if (intel_dp->dfp.min_tmds_clock) { for (; bpc >= 10; bpc -= 2) { - if (intel_dp_hdmi_deep_color_possible(intel_dp, crtc_state, bpc)) + if (intel_dp_hdmi_bpc_possible(intel_dp, crtc_state, bpc)) break; } } diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index 3156dc3591d8..45cf0ab04009 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -2002,17 +2002,14 @@ intel_hdmi_mode_valid(struct drm_connector *connector, return intel_mode_valid_max_plane_size(dev_priv, mode, false); } -bool intel_hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, - int bpc, bool has_hdmi_sink, bool ycbcr420_output) +bool intel_hdmi_bpc_possible(const struct intel_crtc_state *crtc_state, + int bpc, bool has_hdmi_sink, bool ycbcr420_output) { struct drm_atomic_state *state = crtc_state->uapi.state; struct drm_connector_state *connector_state; struct drm_connector *connector; int i; - if (crtc_state->pipe_bpp < bpc * 3) - return false; - for_each_new_connector_in_state(state, connector, connector_state, i) { if (connector_state->crtc != crtc_state->uapi.crtc) continue; @@ -2024,8 +2021,7 @@ bool intel_hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, return true; } -static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, - int bpc) +static bool hdmi_bpc_possible(const struct intel_crtc_state *crtc_state, int bpc) { struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); @@ -2039,7 +2035,7 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, * HDMI deep color affects the clocks, so it's only possible * when not cloning with other encoder types. */ - if (crtc_state->output_types != BIT(INTEL_OUTPUT_HDMI)) + if (bpc > 8 && crtc_state->output_types != BIT(INTEL_OUTPUT_HDMI)) return false; /* Display Wa_1405510057:icl,ehl */ @@ -2049,35 +2045,50 @@ static bool hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, adjusted_mode->crtc_hblank_start) % 8 == 2) return false; - return intel_hdmi_deep_color_possible(crtc_state, bpc, - crtc_state->has_hdmi_sink, - intel_hdmi_is_ycbcr420(crtc_state)); + return intel_hdmi_bpc_possible(crtc_state, bpc, crtc_state->has_hdmi_sink, + intel_hdmi_is_ycbcr420(crtc_state)); } static int intel_hdmi_compute_bpc(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, - int clock) + int clock, bool respect_downstream_limits) { struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); bool ycbcr420_output = intel_hdmi_is_ycbcr420(crtc_state); int bpc; - for (bpc = 12; bpc >= 10; bpc -= 2) { - if (hdmi_deep_color_possible(crtc_state, bpc) && - hdmi_port_clock_valid(intel_hdmi, - intel_hdmi_tmds_clock(clock, bpc, ycbcr420_output), - true, crtc_state->has_hdmi_sink) == MODE_OK) + /* + * pipe_bpp could already be below 8bpc due to FDI + * bandwidth constraints. HDMI minimum is 8bpc however. + */ + bpc = max(crtc_state->pipe_bpp / 3, 8); + + /* + * We will never exceed downstream TMDS clock limits while + * attempting deep color. If the user insists on forcing an + * out of spec mode they will have to be satisfied with 8bpc. + */ + if (!respect_downstream_limits) + bpc = 8; + + for (; bpc >= 8; bpc -= 2) { + int tmds_clock = intel_hdmi_tmds_clock(clock, bpc, ycbcr420_output); + + if (hdmi_bpc_possible(crtc_state, bpc) && + hdmi_port_clock_valid(intel_hdmi, tmds_clock, + respect_downstream_limits, + crtc_state->has_hdmi_sink) == MODE_OK) return bpc; } - return 8; + return -EINVAL; } static int intel_hdmi_compute_clock(struct intel_encoder *encoder, - struct intel_crtc_state *crtc_state) + struct intel_crtc_state *crtc_state, + bool respect_downstream_limits) { struct drm_i915_private *i915 = to_i915(encoder->base.dev); - struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(encoder); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; int bpc, clock = adjusted_mode->crtc_clock; @@ -2085,31 +2096,25 @@ static int intel_hdmi_compute_clock(struct intel_encoder *encoder, if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) clock *= 2; - bpc = intel_hdmi_compute_bpc(encoder, crtc_state, clock); + bpc = intel_hdmi_compute_bpc(encoder, crtc_state, clock, + respect_downstream_limits); + if (bpc < 0) + return bpc; - crtc_state->port_clock = intel_hdmi_tmds_clock(clock, bpc, - intel_hdmi_is_ycbcr420(crtc_state)); + crtc_state->port_clock = + intel_hdmi_tmds_clock(clock, bpc, intel_hdmi_is_ycbcr420(crtc_state)); /* * pipe_bpp could already be below 8bpc due to * FDI bandwidth constraints. We shouldn't bump it - * back up to 8bpc in that case. + * back up to the HDMI minimum 8bpc in that case. */ - if (crtc_state->pipe_bpp > bpc * 3) - crtc_state->pipe_bpp = bpc * 3; + crtc_state->pipe_bpp = min(crtc_state->pipe_bpp, bpc * 3); drm_dbg_kms(&i915->drm, "picking %d bpc for HDMI output (pipe bpp: %d)\n", bpc, crtc_state->pipe_bpp); - if (hdmi_port_clock_valid(intel_hdmi, crtc_state->port_clock, - false, crtc_state->has_hdmi_sink) != MODE_OK) { - drm_dbg_kms(&i915->drm, - "unsupported HDMI clock (%d kHz), rejecting mode\n", - crtc_state->port_clock); - return -EINVAL; - } - return 0; } @@ -2170,7 +2175,8 @@ intel_hdmi_output_format(struct intel_connector *connector, static int intel_hdmi_compute_output_format(struct intel_encoder *encoder, struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state) + const struct drm_connector_state *conn_state, + bool respect_downstream_limits) { struct intel_connector *connector = to_intel_connector(conn_state->connector); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; @@ -2187,7 +2193,7 @@ static int intel_hdmi_compute_output_format(struct intel_encoder *encoder, crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB; } - ret = intel_hdmi_compute_clock(encoder, crtc_state); + ret = intel_hdmi_compute_clock(encoder, crtc_state, respect_downstream_limits); if (ret) { if (intel_hdmi_is_ycbcr420(crtc_state) || !connector->base.ycbcr_420_allowed || @@ -2195,7 +2201,7 @@ static int intel_hdmi_compute_output_format(struct intel_encoder *encoder, return ret; crtc_state->output_format = intel_hdmi_output_format(connector, true); - ret = intel_hdmi_compute_clock(encoder, crtc_state); + ret = intel_hdmi_compute_clock(encoder, crtc_state, respect_downstream_limits); } return ret; @@ -2231,9 +2237,19 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder, pipe_config->has_audio = intel_hdmi_has_audio(encoder, pipe_config, conn_state); - ret = intel_hdmi_compute_output_format(encoder, pipe_config, conn_state); + /* + * Try to respect downstream TMDS clock limits first, if + * that fails assume the user might know something we don't. + */ + ret = intel_hdmi_compute_output_format(encoder, pipe_config, conn_state, true); if (ret) + ret = intel_hdmi_compute_output_format(encoder, pipe_config, conn_state, false); + if (ret) { + drm_dbg_kms(&dev_priv->drm, + "unsupported HDMI clock (%d kHz), rejecting mode\n", + pipe_config->hw.adjusted_mode.crtc_clock); return ret; + } if (intel_hdmi_is_ycbcr420(pipe_config)) { ret = intel_panel_fitting(pipe_config, conn_state); diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.h b/drivers/gpu/drm/i915/display/intel_hdmi.h index 2bf440eb400a..b577c38fa90c 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.h +++ b/drivers/gpu/drm/i915/display/intel_hdmi.h @@ -46,8 +46,8 @@ void intel_read_infoframe(struct intel_encoder *encoder, union hdmi_infoframe *frame); bool intel_hdmi_limited_color_range(const struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); -bool intel_hdmi_deep_color_possible(const struct intel_crtc_state *crtc_state, int bpc, - bool has_hdmi_sink, bool ycbcr420_output); +bool intel_hdmi_bpc_possible(const struct intel_crtc_state *crtc_state, + int bpc, bool has_hdmi_sink, bool ycbcr420_output); int intel_hdmi_dsc_get_bpp(int src_fractional_bpp, int slice_width, int num_slices, int output_format, bool hdmi_all_bpp, int hdmi_max_chunk_bytes); -- cgit From ddec7abd4d93760ad5b2c7c61bf123a7707664ca Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Tue, 11 Jan 2022 11:07:08 +0100 Subject: platform/x86: x86-android-tablets: Correct crystal_cove_charger module name The module was renamed to intel_crystal_cove_charger before it was merged, updated bq24190_modules to match. Signed-off-by: Hans de Goede Link: https://lore.kernel.org/r/20220111100708.38585-1-hdegoede@redhat.com --- drivers/platform/x86/x86-android-tablets.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/platform/x86/x86-android-tablets.c b/drivers/platform/x86/x86-android-tablets.c index 3ba63ad91b28..8d6a3cad260b 100644 --- a/drivers/platform/x86/x86-android-tablets.c +++ b/drivers/platform/x86/x86-android-tablets.c @@ -187,8 +187,8 @@ static struct bq24190_platform_data bq24190_pdata = { }; static const char * const bq24190_modules[] __initconst = { - "crystal_cove_charger", /* For the bq24190 IRQ */ - "bq24190_charger", /* For the Vbus regulator for intel-int3496 */ + "intel_crystal_cove_charger", /* For the bq24190 IRQ */ + "bq24190_charger", /* For the Vbus regulator for intel-int3496 */ NULL }; -- cgit From 4ce2a32d40260374dfce5344960c419fde23ce87 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Mon, 10 Jan 2022 11:39:50 +0100 Subject: platform/x86: x86-android-tablets: Add support for disabling ACPI _AEI handlers Some of the broken DSDTs on these devices often also include broken / wrong _AEI (ACPI Event Interrupt) handlers, which can cause e.g. interrupt storms by listening to a floating GPIO pin. Add support for disabling these and disable them on the Asus ME176C and TF103C tablets. Signed-off-by: Hans de Goede Reviewed-By: Lubomir Rintel Link: https://lore.kernel.org/r/20220110103952.48760-1-hdegoede@redhat.com --- drivers/platform/x86/x86-android-tablets.c | 23 +++++++++++++++++++++-- 1 file changed, 21 insertions(+), 2 deletions(-) diff --git a/drivers/platform/x86/x86-android-tablets.c b/drivers/platform/x86/x86-android-tablets.c index 8d6a3cad260b..08c98e881d0e 100644 --- a/drivers/platform/x86/x86-android-tablets.c +++ b/drivers/platform/x86/x86-android-tablets.c @@ -26,6 +26,7 @@ #include /* For gpio_get_desc() which is EXPORT_SYMBOL_GPL() */ #include "../../gpio/gpiolib.h" +#include "../../gpio/gpiolib-acpi.h" /* * Helper code to get Linux IRQ numbers given a description of the IRQ source @@ -47,7 +48,7 @@ struct x86_acpi_irq_data { int polarity; /* ACPI_ACTIVE_HIGH / ACPI_ACTIVE_LOW / ACPI_ACTIVE_BOTH */ }; -static int x86_acpi_irq_helper_gpiochip_find(struct gpio_chip *gc, void *data) +static int gpiochip_find_match_label(struct gpio_chip *gc, void *data) { return gc->label && !strcmp(gc->label, data); } @@ -73,7 +74,7 @@ static int x86_acpi_irq_helper_get(const struct x86_acpi_irq_data *data) return irq; case X86_ACPI_IRQ_TYPE_GPIOINT: /* Like acpi_dev_gpio_irq_get(), but without parsing ACPI resources */ - chip = gpiochip_find(data->chip, x86_acpi_irq_helper_gpiochip_find); + chip = gpiochip_find(data->chip, gpiochip_find_match_label); if (!chip) { pr_err("error cannot find GPIO chip %s\n", data->chip); return -ENODEV; @@ -143,6 +144,7 @@ struct x86_serdev_info { }; struct x86_dev_info { + char *invalid_aei_gpiochip; const char * const *modules; struct gpiod_lookup_table **gpiod_lookup_tables; const struct x86_i2c_client_info *i2c_client_info; @@ -317,6 +319,7 @@ static const struct x86_dev_info asus_me176c_info __initconst = { .serdev_count = ARRAY_SIZE(asus_me176c_serdevs), .gpiod_lookup_tables = asus_me176c_gpios, .modules = bq24190_modules, + .invalid_aei_gpiochip = "INT33FC:02", }; /* Asus TF103C tablets have an Android factory img with everything hardcoded */ @@ -417,6 +420,7 @@ static const struct x86_dev_info asus_tf103c_info __initconst = { .pdev_count = ARRAY_SIZE(int3496_pdevs), .gpiod_lookup_tables = asus_tf103c_gpios, .modules = bq24190_modules, + .invalid_aei_gpiochip = "INT33FC:02", }; /* @@ -795,6 +799,7 @@ static __init int x86_android_tablet_init(void) { const struct x86_dev_info *dev_info; const struct dmi_system_id *id; + struct gpio_chip *chip; int i, ret = 0; id = dmi_first_match(x86_android_tablet_ids); @@ -803,6 +808,20 @@ static __init int x86_android_tablet_init(void) dev_info = id->driver_data; + /* + * The broken DSDTs on these devices often also include broken + * _AEI (ACPI Event Interrupt) handlers, disable these. + */ + if (dev_info->invalid_aei_gpiochip) { + chip = gpiochip_find(dev_info->invalid_aei_gpiochip, + gpiochip_find_match_label); + if (!chip) { + pr_err("error cannot find GPIO chip %s\n", dev_info->invalid_aei_gpiochip); + return -ENODEV; + } + acpi_gpiochip_free_interrupts(chip); + } + /* * Since this runs from module_init() it cannot use -EPROBE_DEFER, * instead pre-load any modules which are listed as requirements. -- cgit From 84c2dcdd475f3f5d1d30c87404cafba4dd4b75ec Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Mon, 10 Jan 2022 11:39:51 +0100 Subject: platform/x86: x86-android-tablets: Add an init() callback to struct x86_dev_info Add an init() callback to struct x86_dev_info, board descriptions can use this to do some custom setup before registering the i2c_clients, platform- devices and servdevs. Also add an exit() callback to also allow for cleanup of the custom setup. Signed-off-by: Hans de Goede Reviewed-By: Lubomir Rintel Link: https://lore.kernel.org/r/20220110103952.48760-2-hdegoede@redhat.com --- drivers/platform/x86/x86-android-tablets.c | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/drivers/platform/x86/x86-android-tablets.c b/drivers/platform/x86/x86-android-tablets.c index 08c98e881d0e..fe4dba5997fe 100644 --- a/drivers/platform/x86/x86-android-tablets.c +++ b/drivers/platform/x86/x86-android-tablets.c @@ -153,6 +153,8 @@ struct x86_dev_info { int i2c_client_count; int pdev_count; int serdev_count; + int (*init)(void); + void (*exit)(void); }; /* Generic / shared bq24190 settings */ @@ -674,6 +676,7 @@ static struct i2c_client **i2c_clients; static struct platform_device **pdevs; static struct serdev_device **serdevs; static struct gpiod_lookup_table **gpiod_lookup_tables; +static void (*exit_handler)(void); static __init int x86_instantiate_i2c_client(const struct x86_dev_info *dev_info, int idx) @@ -791,6 +794,9 @@ static void x86_android_tablet_cleanup(void) kfree(i2c_clients); + if (exit_handler) + exit_handler(); + for (i = 0; gpiod_lookup_tables && gpiod_lookup_tables[i]; i++) gpiod_remove_lookup_table(gpiod_lookup_tables[i]); } @@ -833,6 +839,15 @@ static __init int x86_android_tablet_init(void) for (i = 0; gpiod_lookup_tables && gpiod_lookup_tables[i]; i++) gpiod_add_lookup_table(gpiod_lookup_tables[i]); + if (dev_info->init) { + ret = dev_info->init(); + if (ret < 0) { + x86_android_tablet_cleanup(); + return ret; + } + exit_handler = dev_info->exit; + } + i2c_clients = kcalloc(dev_info->i2c_client_count, sizeof(*i2c_clients), GFP_KERNEL); if (!i2c_clients) { x86_android_tablet_cleanup(); -- cgit From 442bf564eb0c4577d98a77e87caa10f704dddcad Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Mon, 10 Jan 2022 11:39:52 +0100 Subject: platform/x86: x86-android-tablets: Constify the gpiod_lookup_tables arrays The individual gpiod_lookup_table structs cannot be const because they contain a list-head which gets used when registering them. But the array of pointers to the gpiod_lookup_table-s used by a board can be const, constify these. Signed-off-by: Hans de Goede Reviewed-By: Lubomir Rintel Link: https://lore.kernel.org/r/20220110103952.48760-3-hdegoede@redhat.com --- drivers/platform/x86/x86-android-tablets.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/platform/x86/x86-android-tablets.c b/drivers/platform/x86/x86-android-tablets.c index fe4dba5997fe..e1b22bb3dbd8 100644 --- a/drivers/platform/x86/x86-android-tablets.c +++ b/drivers/platform/x86/x86-android-tablets.c @@ -146,7 +146,7 @@ struct x86_serdev_info { struct x86_dev_info { char *invalid_aei_gpiochip; const char * const *modules; - struct gpiod_lookup_table **gpiod_lookup_tables; + struct gpiod_lookup_table * const *gpiod_lookup_tables; const struct x86_i2c_client_info *i2c_client_info; const struct platform_device_info *pdev_info; const struct x86_serdev_info *serdev_info; @@ -306,7 +306,7 @@ static struct gpiod_lookup_table asus_me176c_goodix_gpios = { }, }; -static struct gpiod_lookup_table *asus_me176c_gpios[] = { +static struct gpiod_lookup_table * const asus_me176c_gpios[] = { &int3496_gpo2_pin22_gpios, &asus_me176c_goodix_gpios, NULL @@ -410,7 +410,7 @@ static const struct x86_i2c_client_info asus_tf103c_i2c_clients[] __initconst = }, }; -static struct gpiod_lookup_table *asus_tf103c_gpios[] = { +static struct gpiod_lookup_table * const asus_tf103c_gpios[] = { &int3496_gpo2_pin22_gpios, NULL }; @@ -565,7 +565,7 @@ static struct gpiod_lookup_table whitelabel_tm800a550l_goodix_gpios = { }, }; -static struct gpiod_lookup_table *whitelabel_tm800a550l_gpios[] = { +static struct gpiod_lookup_table * const whitelabel_tm800a550l_gpios[] = { &whitelabel_tm800a550l_goodix_gpios, NULL }; @@ -675,7 +675,7 @@ static int serdev_count; static struct i2c_client **i2c_clients; static struct platform_device **pdevs; static struct serdev_device **serdevs; -static struct gpiod_lookup_table **gpiod_lookup_tables; +static struct gpiod_lookup_table * const *gpiod_lookup_tables; static void (*exit_handler)(void); static __init int x86_instantiate_i2c_client(const struct x86_dev_info *dev_info, -- cgit From 5de2ffd5acd33368e472dd3255a51cac528c730e Mon Sep 17 00:00:00 2001 From: Lubomir Rintel Date: Mon, 10 Jan 2022 07:35:12 +0100 Subject: platform/x86: x86-android-tablets: Fix the buttons on CZC P10T tablet This switches the P10T tablet to "Android" mode, where the Home button sends a single sancode instead of a Windows-specific key combination and the other button doesn't disable the Wi-Fi. Signed-off-by: Lubomir Rintel Link: https://lore.kernel.org/r/20220110063512.273252-1-lkundrak@v3.sk Reviewed-by: Hans de Goede Signed-off-by: Hans de Goede --- drivers/platform/x86/x86-android-tablets.c | 51 ++++++++++++++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/drivers/platform/x86/x86-android-tablets.c b/drivers/platform/x86/x86-android-tablets.c index e1b22bb3dbd8..bd1ba676bcc0 100644 --- a/drivers/platform/x86/x86-android-tablets.c +++ b/drivers/platform/x86/x86-android-tablets.c @@ -496,6 +496,39 @@ static const struct x86_dev_info chuwi_hi8_info __initconst = { .i2c_client_count = ARRAY_SIZE(chuwi_hi8_i2c_clients), }; +#define CZC_EC_EXTRA_PORT 0x68 +#define CZC_EC_ANDROID_KEYS 0x63 + +static int __init czc_p10t_init(void) +{ + /* + * The device boots up in "Windows 7" mode, when the home button sends a + * Windows specific key sequence (Left Meta + D) and the second button + * sends an unknown one while also toggling the Radio Kill Switch. + * This is a surprising behavior when the second button is labeled "Back". + * + * The vendor-supplied Android-x86 build switches the device to a "Android" + * mode by writing value 0x63 to the I/O port 0x68. This just seems to just + * set bit 6 on address 0x96 in the EC region; switching the bit directly + * seems to achieve the same result. It uses a "p10t_switcher" to do the + * job. It doesn't seem to be able to do anything else, and no other use + * of the port 0x68 is known. + * + * In the Android mode, the home button sends just a single scancode, + * which can be handled in Linux userspace more reasonably and the back + * button only sends a scancode without toggling the kill switch. + * The scancode can then be mapped either to Back or RF Kill functionality + * in userspace, depending on how the button is labeled on that particular + * model. + */ + outb(CZC_EC_ANDROID_KEYS, CZC_EC_EXTRA_PORT); + return 0; +} + +static const struct x86_dev_info czc_p10t __initconst = { + .init = czc_p10t_init, +}; + /* * Whitelabel (sold as various brands) TM800A550L tablets. * These tablet's DSDT contains a whole bunch of bogus ACPI I2C devices @@ -647,6 +680,24 @@ static const struct dmi_system_id x86_android_tablet_ids[] __initconst = { }, .driver_data = (void *)&chuwi_hi8_info, }, + { + /* CZC P10T */ + .ident = "CZC ODEON TPC-10 (\"P10T\")", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "CZC"), + DMI_MATCH(DMI_PRODUCT_NAME, "ODEON*TPC-10"), + }, + .driver_data = (void *)&czc_p10t, + }, + { + /* A variant of CZC P10T */ + .ident = "ViewSonic ViewPad 10", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "ViewSonic"), + DMI_MATCH(DMI_PRODUCT_NAME, "VPAD10"), + }, + .driver_data = (void *)&czc_p10t, + }, { /* Whitelabel (sold as various brands) TM800A550L */ .matches = { -- cgit From 17f6736a020ef195f4855e807c76d2360310d143 Mon Sep 17 00:00:00 2001 From: Lubomir Rintel Date: Mon, 10 Jan 2022 07:36:29 +0100 Subject: platform/x86: x86-android-tablets: Trivial typo fix for MODULE_AUTHOR Bring balance to the quoting of Hans' e-mail address. Signed-off-by: Lubomir Rintel Link: https://lore.kernel.org/r/20220110063629.273364-1-lkundrak@v3.sk Signed-off-by: Hans de Goede --- drivers/platform/x86/x86-android-tablets.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/platform/x86/x86-android-tablets.c b/drivers/platform/x86/x86-android-tablets.c index bd1ba676bcc0..9360a8a92486 100644 --- a/drivers/platform/x86/x86-android-tablets.c +++ b/drivers/platform/x86/x86-android-tablets.c @@ -950,6 +950,6 @@ static __init int x86_android_tablet_init(void) module_init(x86_android_tablet_init); module_exit(x86_android_tablet_cleanup); -MODULE_AUTHOR("Hans de Goede "); MODULE_DESCRIPTION("X86 Android tablets DSDT fixups driver"); MODULE_LICENSE("GPL"); -- cgit From c197e969e3082b9c19175d2f013a0dbd3ce52236 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Sat, 15 Jan 2022 15:08:49 +0100 Subject: platform/surface: Reinstate platform dependency Microsoft Surface platform-specific devices are only present on Microsoft Surface platforms, which are currently limited to arm64 and x86. Hence add a dependency on ARM64 || X86, to prevent asking the user about drivers for these devices when configuring a kernel for an architecture that does not support Microsoft Surface platforms. Fixes: 272479928172edf0 ("platform: surface: Propagate ACPI Dependency") Signed-off-by: Geert Uytterhoeven Acked-by: Maximilian Luz Link: https://lore.kernel.org/r/20220115140849.269479-1-geert@linux-m68k.org Signed-off-by: Hans de Goede --- drivers/platform/surface/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/platform/surface/Kconfig b/drivers/platform/surface/Kconfig index 5f0578e25f71..463f1ec5c14e 100644 --- a/drivers/platform/surface/Kconfig +++ b/drivers/platform/surface/Kconfig @@ -5,6 +5,7 @@ menuconfig SURFACE_PLATFORMS bool "Microsoft Surface Platform-Specific Device Drivers" + depends on ARM64 || X86 || COMPILE_TEST default y help Say Y here to get to see options for platform-specific device drivers -- cgit From 512eb73cfd1208898cf10cb06094e0ee0bb53b58 Mon Sep 17 00:00:00 2001 From: Yuka Kawajiri Date: Wed, 12 Jan 2022 00:40:21 +0900 Subject: platform/x86: touchscreen_dmi: Add info for the RWC NANOTE P8 AY07J 2-in-1 Add touchscreen info for RWC NANOTE P8 (AY07J) 2-in-1. Signed-off-by: Yuka Kawajiri Link: https://lore.kernel.org/r/20220111154019.4599-1-yukx00@gmail.com Reviewed-by: Hans de Goede Signed-off-by: Hans de Goede --- drivers/platform/x86/touchscreen_dmi.c | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/drivers/platform/x86/touchscreen_dmi.c b/drivers/platform/x86/touchscreen_dmi.c index 494f23052678..bc97bfa8e8a6 100644 --- a/drivers/platform/x86/touchscreen_dmi.c +++ b/drivers/platform/x86/touchscreen_dmi.c @@ -770,6 +770,21 @@ static const struct ts_dmi_data predia_basic_data = { .properties = predia_basic_props, }; +static const struct property_entry rwc_nanote_p8_props[] = { + PROPERTY_ENTRY_U32("touchscreen-min-y", 46), + PROPERTY_ENTRY_U32("touchscreen-size-x", 1728), + PROPERTY_ENTRY_U32("touchscreen-size-y", 1140), + PROPERTY_ENTRY_BOOL("touchscreen-inverted-y"), + PROPERTY_ENTRY_STRING("firmware-name", "gsl1680-rwc-nanote-p8.fw"), + PROPERTY_ENTRY_U32("silead,max-fingers", 10), + { } +}; + +static const struct ts_dmi_data rwc_nanote_p8_data = { + .acpi_name = "MSSL1680:00", + .properties = rwc_nanote_p8_props, +}; + static const struct property_entry schneider_sct101ctm_props[] = { PROPERTY_ENTRY_U32("touchscreen-size-x", 1715), PROPERTY_ENTRY_U32("touchscreen-size-y", 1140), @@ -1394,6 +1409,15 @@ const struct dmi_system_id touchscreen_dmi_table[] = { DMI_EXACT_MATCH(DMI_BOARD_NAME, "0E57"), }, }, + { + /* RWC NANOTE P8 */ + .driver_data = (void *)&rwc_nanote_p8_data, + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Default string"), + DMI_MATCH(DMI_PRODUCT_NAME, "AY07J"), + DMI_MATCH(DMI_PRODUCT_SKU, "0001") + }, + }, { /* Schneider SCT101CTM */ .driver_data = (void *)&schneider_sct101ctm_data, -- cgit From b288420e773f5a9db77115b9cc3767a8ada16648 Mon Sep 17 00:00:00 2001 From: Alexander Kobel Date: Wed, 12 Jan 2022 12:18:27 +0100 Subject: platform/x86: thinkpad_acpi: Add quirk for ThinkPads without a fan Some ThinkPad models, like the X1 Tablet 1st and 2nd Gen, are passively cooled without any fan. Currently, an entry in /proc/acpi/ibm/fan is nevertheless created, and misleadingly shows status: enabled speed: 65535 level: auto This patch adds a TPACPI_FAN_NOFAN quirk definition and corresponding handling to not initialize a fan interface at all. For the time being, the quirk is only applied for X1 Tablet 2nd Gen (types 20JB, 20JC; EC N1O...); further models (such as Gen1, types 20GG and 20GH) can be added easily once tested. Tested on a 20JCS00C00, BIOS N1OET58W (1.43), EC N1OHT34W. Signed-off-by: Alexander Kobel Link: https://lore.kernel.org/r/12d4b825-a2b9-8cb7-6ed3-db4d66f46a60@a-kobel.de Reviewed-by: Hans de Goede Signed-off-by: Hans de Goede --- drivers/platform/x86/thinkpad_acpi.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 098180fb1cfc..33f611af6e51 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -8679,9 +8679,10 @@ static const struct attribute_group fan_driver_attr_group = { .attrs = fan_driver_attributes, }; -#define TPACPI_FAN_Q1 0x0001 /* Unitialized HFSP */ -#define TPACPI_FAN_2FAN 0x0002 /* EC 0x31 bit 0 selects fan2 */ -#define TPACPI_FAN_2CTL 0x0004 /* selects fan2 control */ +#define TPACPI_FAN_Q1 0x0001 /* Uninitialized HFSP */ +#define TPACPI_FAN_2FAN 0x0002 /* EC 0x31 bit 0 selects fan2 */ +#define TPACPI_FAN_2CTL 0x0004 /* selects fan2 control */ +#define TPACPI_FAN_NOFAN 0x0008 /* no fan available */ static const struct tpacpi_quirk fan_quirk_table[] __initconst = { TPACPI_QEC_IBM('1', 'Y', TPACPI_FAN_Q1), @@ -8702,6 +8703,7 @@ static const struct tpacpi_quirk fan_quirk_table[] __initconst = { TPACPI_Q_LNV3('N', '4', '0', TPACPI_FAN_2CTL), /* P1 / X1 Extreme (4nd gen) */ TPACPI_Q_LNV3('N', '3', '0', TPACPI_FAN_2CTL), /* P15 (1st gen) / P15v (1st gen) */ TPACPI_Q_LNV3('N', '3', '2', TPACPI_FAN_2CTL), /* X1 Carbon (9th gen) */ + TPACPI_Q_LNV3('N', '1', 'O', TPACPI_FAN_NOFAN), /* X1 Tablet (2nd gen) */ }; static int __init fan_init(struct ibm_init_struct *iibm) @@ -8730,6 +8732,11 @@ static int __init fan_init(struct ibm_init_struct *iibm) quirks = tpacpi_check_quirks(fan_quirk_table, ARRAY_SIZE(fan_quirk_table)); + if (quirks & TPACPI_FAN_NOFAN) { + pr_info("No integrated ThinkPad fan available\n"); + return -ENODEV; + } + if (gfan_handle) { /* 570, 600e/x, 770e, 770x */ fan_status_access_mode = TPACPI_FAN_RD_ACPI_GFAN; -- cgit From a29012ab23163f78087a7e77719f05d201088700 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Wed, 12 Jan 2022 00:23:09 +0100 Subject: platform/x86: intel_crystal_cove_charger: Fix IRQ masking / unmasking The driver as originally submitted accidentally relied on Android having run before and Android having unmasked the 2nd level IRQ-mask for the charger IRQ. This worked since these are PMIC registers which are only reset when the battery is fully drained or disconnected. Fix the charger IRQ no longer working after loss of battery power by properly setting the 2nd level IRQ-mask for the charger IRQ. Note this removes the need to enable/disable our parent IRQ which just sets the mask bit in the 1st level IRQ-mask register, setting one of the 2 level masks is enough to stop the IRQ from getting reported. Fixes: 761db353d9e2 ("platform/x86: Add intel_crystal_cove_charger driver") Signed-off-by: Hans de Goede Link: https://lore.kernel.org/r/20220111232309.377642-1-hdegoede@redhat.com --- drivers/platform/x86/intel/crystal_cove_charger.c | 26 +++++++++++------------ 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/drivers/platform/x86/intel/crystal_cove_charger.c b/drivers/platform/x86/intel/crystal_cove_charger.c index 0374bc742513..e4299cfa2205 100644 --- a/drivers/platform/x86/intel/crystal_cove_charger.c +++ b/drivers/platform/x86/intel/crystal_cove_charger.c @@ -17,6 +17,7 @@ #include #define CHGRIRQ_REG 0x0a +#define MCHGRIRQ_REG 0x17 struct crystal_cove_charger_data { struct mutex buslock; /* irq_bus_lock */ @@ -25,8 +26,8 @@ struct crystal_cove_charger_data { struct irq_domain *irq_domain; int irq; int charger_irq; - bool irq_enabled; - bool irq_is_enabled; + u8 mask; + u8 new_mask; }; static irqreturn_t crystal_cove_charger_irq(int irq, void *data) @@ -53,13 +54,9 @@ static void crystal_cove_charger_irq_bus_sync_unlock(struct irq_data *data) { struct crystal_cove_charger_data *charger = irq_data_get_irq_chip_data(data); - if (charger->irq_is_enabled != charger->irq_enabled) { - if (charger->irq_enabled) - enable_irq(charger->irq); - else - disable_irq(charger->irq); - - charger->irq_is_enabled = charger->irq_enabled; + if (charger->mask != charger->new_mask) { + regmap_write(charger->regmap, MCHGRIRQ_REG, charger->new_mask); + charger->mask = charger->new_mask; } mutex_unlock(&charger->buslock); @@ -69,14 +66,14 @@ static void crystal_cove_charger_irq_unmask(struct irq_data *data) { struct crystal_cove_charger_data *charger = irq_data_get_irq_chip_data(data); - charger->irq_enabled = true; + charger->new_mask &= ~BIT(data->hwirq); } static void crystal_cove_charger_irq_mask(struct irq_data *data) { struct crystal_cove_charger_data *charger = irq_data_get_irq_chip_data(data); - charger->irq_enabled = false; + charger->new_mask |= BIT(data->hwirq); } static void crystal_cove_charger_rm_irq_domain(void *data) @@ -130,10 +127,13 @@ static int crystal_cove_charger_probe(struct platform_device *pdev) irq_set_nested_thread(charger->charger_irq, true); irq_set_noprobe(charger->charger_irq); + /* Mask the single 2nd level IRQ before enabling the 1st level IRQ */ + charger->mask = charger->new_mask = BIT(0); + regmap_write(charger->regmap, MCHGRIRQ_REG, charger->mask); + ret = devm_request_threaded_irq(&pdev->dev, charger->irq, NULL, crystal_cove_charger_irq, - IRQF_ONESHOT | IRQF_NO_AUTOEN, - KBUILD_MODNAME, charger); + IRQF_ONESHOT, KBUILD_MODNAME, charger); if (ret) return dev_err_probe(&pdev->dev, ret, "requesting irq\n"); -- cgit From 17da2d5f93692086dd096a975225ffd5622d0bf8 Mon Sep 17 00:00:00 2001 From: Srinivas Pandruvada Date: Tue, 11 Jan 2022 18:25:21 -0800 Subject: platform/x86: ISST: Fix possible circular locking dependency detected As reported: [ 256.104522] ====================================================== [ 256.113783] WARNING: possible circular locking dependency detected [ 256.120093] 5.16.0-rc6-yocto-standard+ #99 Not tainted [ 256.125362] ------------------------------------------------------ [ 256.131673] intel-speed-sel/844 is trying to acquire lock: [ 256.137290] ffffffffc036f0d0 (punit_misc_dev_lock){+.+.}-{3:3}, at: isst_if_open+0x18/0x90 [isst_if_common] [ 256.147171] [ 256.147171] but task is already holding lock: [ 256.153135] ffffffff8ee7cb50 (misc_mtx){+.+.}-{3:3}, at: misc_open+0x2a/0x170 [ 256.160407] [ 256.160407] which lock already depends on the new lock. [ 256.160407] [ 256.168712] [ 256.168712] the existing dependency chain (in reverse order) is: [ 256.176327] [ 256.176327] -> #1 (misc_mtx){+.+.}-{3:3}: [ 256.181946] lock_acquire+0x1e6/0x330 [ 256.186265] __mutex_lock+0x9b/0x9b0 [ 256.190497] mutex_lock_nested+0x1b/0x20 [ 256.195075] misc_register+0x32/0x1a0 [ 256.199390] isst_if_cdev_register+0x65/0x180 [isst_if_common] [ 256.205878] isst_if_probe+0x144/0x16e [isst_if_mmio] ... [ 256.241976] [ 256.241976] -> #0 (punit_misc_dev_lock){+.+.}-{3:3}: [ 256.248552] validate_chain+0xbc6/0x1750 [ 256.253131] __lock_acquire+0x88c/0xc10 [ 256.257618] lock_acquire+0x1e6/0x330 [ 256.261933] __mutex_lock+0x9b/0x9b0 [ 256.266165] mutex_lock_nested+0x1b/0x20 [ 256.270739] isst_if_open+0x18/0x90 [isst_if_common] [ 256.276356] misc_open+0x100/0x170 [ 256.280409] chrdev_open+0xa5/0x1e0 ... The call sequence suggested that misc_device /dev file can be opened before misc device is yet to be registered, which is done only once. Here punit_misc_dev_lock was used as common lock, to protect the registration by multiple ISST HW drivers, one time setup, prevent duplicate registry of misc device and prevent load/unload when device is open. We can split into locks: - One which just prevent duplicate call to misc_register() and one time setup. Also never call again if the misc_register() failed or required one time setup is failed. This lock is not shared with any misc device callbacks. - The other lock protects registry, load and unload of HW drivers. Sequence in isst_if_cdev_register() - Register callbacks under punit_misc_dev_open_lock - Call isst_misc_reg() which registers misc_device on the first registry which is under punit_misc_dev_reg_lock, which is not shared with callbacks. Sequence in isst_if_cdev_unregister Just opposite of isst_if_cdev_register Reported-and-tested-by: Liwei Song Signed-off-by: Srinivas Pandruvada Link: https://lore.kernel.org/r/20220112022521.54669-1-srinivas.pandruvada@linux.intel.com Reviewed-by: Hans de Goede Signed-off-by: Hans de Goede --- .../x86/intel/speed_select_if/isst_if_common.c | 97 ++++++++++++++-------- 1 file changed, 63 insertions(+), 34 deletions(-) diff --git a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c index c9a85eb2e860..e8424e70d81d 100644 --- a/drivers/platform/x86/intel/speed_select_if/isst_if_common.c +++ b/drivers/platform/x86/intel/speed_select_if/isst_if_common.c @@ -596,7 +596,10 @@ static long isst_if_def_ioctl(struct file *file, unsigned int cmd, return ret; } -static DEFINE_MUTEX(punit_misc_dev_lock); +/* Lock to prevent module registration when already opened by user space */ +static DEFINE_MUTEX(punit_misc_dev_open_lock); +/* Lock to allow one share misc device for all ISST interace */ +static DEFINE_MUTEX(punit_misc_dev_reg_lock); static int misc_usage_count; static int misc_device_ret; static int misc_device_open; @@ -606,7 +609,7 @@ static int isst_if_open(struct inode *inode, struct file *file) int i, ret = 0; /* Fail open, if a module is going away */ - mutex_lock(&punit_misc_dev_lock); + mutex_lock(&punit_misc_dev_open_lock); for (i = 0; i < ISST_IF_DEV_MAX; ++i) { struct isst_if_cmd_cb *cb = &punit_callbacks[i]; @@ -628,7 +631,7 @@ static int isst_if_open(struct inode *inode, struct file *file) } else { misc_device_open++; } - mutex_unlock(&punit_misc_dev_lock); + mutex_unlock(&punit_misc_dev_open_lock); return ret; } @@ -637,7 +640,7 @@ static int isst_if_relase(struct inode *inode, struct file *f) { int i; - mutex_lock(&punit_misc_dev_lock); + mutex_lock(&punit_misc_dev_open_lock); misc_device_open--; for (i = 0; i < ISST_IF_DEV_MAX; ++i) { struct isst_if_cmd_cb *cb = &punit_callbacks[i]; @@ -645,7 +648,7 @@ static int isst_if_relase(struct inode *inode, struct file *f) if (cb->registered) module_put(cb->owner); } - mutex_unlock(&punit_misc_dev_lock); + mutex_unlock(&punit_misc_dev_open_lock); return 0; } @@ -662,6 +665,43 @@ static struct miscdevice isst_if_char_driver = { .fops = &isst_if_char_driver_ops, }; +static int isst_misc_reg(void) +{ + mutex_lock(&punit_misc_dev_reg_lock); + if (misc_device_ret) + goto unlock_exit; + + if (!misc_usage_count) { + misc_device_ret = isst_if_cpu_info_init(); + if (misc_device_ret) + goto unlock_exit; + + misc_device_ret = misc_register(&isst_if_char_driver); + if (misc_device_ret) { + isst_if_cpu_info_exit(); + goto unlock_exit; + } + } + misc_usage_count++; + +unlock_exit: + mutex_unlock(&punit_misc_dev_reg_lock); + + return misc_device_ret; +} + +static void isst_misc_unreg(void) +{ + mutex_lock(&punit_misc_dev_reg_lock); + if (misc_usage_count) + misc_usage_count--; + if (!misc_usage_count && !misc_device_ret) { + misc_deregister(&isst_if_char_driver); + isst_if_cpu_info_exit(); + } + mutex_unlock(&punit_misc_dev_reg_lock); +} + /** * isst_if_cdev_register() - Register callback for IOCTL * @device_type: The device type this callback handling. @@ -679,38 +719,31 @@ static struct miscdevice isst_if_char_driver = { */ int isst_if_cdev_register(int device_type, struct isst_if_cmd_cb *cb) { - if (misc_device_ret) - return misc_device_ret; + int ret; if (device_type >= ISST_IF_DEV_MAX) return -EINVAL; - mutex_lock(&punit_misc_dev_lock); + mutex_lock(&punit_misc_dev_open_lock); + /* Device is already open, we don't want to add new callbacks */ if (misc_device_open) { - mutex_unlock(&punit_misc_dev_lock); + mutex_unlock(&punit_misc_dev_open_lock); return -EAGAIN; } - if (!misc_usage_count) { - int ret; - - misc_device_ret = misc_register(&isst_if_char_driver); - if (misc_device_ret) - goto unlock_exit; - - ret = isst_if_cpu_info_init(); - if (ret) { - misc_deregister(&isst_if_char_driver); - misc_device_ret = ret; - goto unlock_exit; - } - } memcpy(&punit_callbacks[device_type], cb, sizeof(*cb)); punit_callbacks[device_type].registered = 1; - misc_usage_count++; -unlock_exit: - mutex_unlock(&punit_misc_dev_lock); + mutex_unlock(&punit_misc_dev_open_lock); - return misc_device_ret; + ret = isst_misc_reg(); + if (ret) { + /* + * No need of mutex as the misc device register failed + * as no one can open device yet. Hence no contention. + */ + punit_callbacks[device_type].registered = 0; + return ret; + } + return 0; } EXPORT_SYMBOL_GPL(isst_if_cdev_register); @@ -725,16 +758,12 @@ EXPORT_SYMBOL_GPL(isst_if_cdev_register); */ void isst_if_cdev_unregister(int device_type) { - mutex_lock(&punit_misc_dev_lock); - misc_usage_count--; + isst_misc_unreg(); + mutex_lock(&punit_misc_dev_open_lock); punit_callbacks[device_type].registered = 0; if (device_type == ISST_IF_DEV_MBOX) isst_delete_hash(); - if (!misc_usage_count && !misc_device_ret) { - misc_deregister(&isst_if_char_driver); - isst_if_cpu_info_exit(); - } - mutex_unlock(&punit_misc_dev_lock); + mutex_unlock(&punit_misc_dev_open_lock); } EXPORT_SYMBOL_GPL(isst_if_cdev_unregister); -- cgit From f7086daab3b540c89951b9b4c00fc49111f7cfa6 Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Mon, 17 Jan 2022 12:26:43 +0100 Subject: platform/x86: amd-pmc: Make amd_pmc_stb_debugfs_fops static amd_pmc_stb_debugfs_fops is not used outside of amd-pmc.c, make it static. Cc: Sanket Goswami Reported-by: kernel test robot Signed-off-by: Hans de Goede Link: https://lore.kernel.org/r/20220117112644.260168-1-hdegoede@redhat.com --- drivers/platform/x86/amd-pmc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c index f794343d6aaa..85b680297934 100644 --- a/drivers/platform/x86/amd-pmc.c +++ b/drivers/platform/x86/amd-pmc.c @@ -226,7 +226,7 @@ static int amd_pmc_stb_debugfs_release(struct inode *inode, struct file *filp) return 0; } -const struct file_operations amd_pmc_stb_debugfs_fops = { +static const struct file_operations amd_pmc_stb_debugfs_fops = { .owner = THIS_MODULE, .open = amd_pmc_stb_debugfs_open, .read = amd_pmc_stb_debugfs_read, -- cgit From f8c28b93d2628610cf793b3528f6f40fd1c7cd5b Mon Sep 17 00:00:00 2001 From: Hans de Goede Date: Mon, 17 Jan 2022 12:26:44 +0100 Subject: platform/x86: asus-tf103c-dock: Make 2 global structs static tf103c_dock_hid_ll_driver and tf103c_dock_pm_ops are not used outside of the driver, make them both static. Reported-by: kernel test robot Signed-off-by: Hans de Goede Link: https://lore.kernel.org/r/20220117112644.260168-2-hdegoede@redhat.com --- drivers/platform/x86/asus-tf103c-dock.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/platform/x86/asus-tf103c-dock.c b/drivers/platform/x86/asus-tf103c-dock.c index d4ef8f362ee6..6fd0c9fea82d 100644 --- a/drivers/platform/x86/asus-tf103c-dock.c +++ b/drivers/platform/x86/asus-tf103c-dock.c @@ -250,7 +250,7 @@ static int tf103c_dock_hid_raw_request(struct hid_device *hid, u8 reportnum, return 0; } -struct hid_ll_driver tf103c_dock_hid_ll_driver = { +static struct hid_ll_driver tf103c_dock_hid_ll_driver = { .parse = tf103c_dock_hid_parse, .start = tf103c_dock_hid_start, .stop = tf103c_dock_hid_stop, @@ -921,7 +921,7 @@ static int __maybe_unused tf103c_dock_resume(struct device *dev) return 0; } -SIMPLE_DEV_PM_OPS(tf103c_dock_pm_ops, tf103c_dock_suspend, tf103c_dock_resume); +static SIMPLE_DEV_PM_OPS(tf103c_dock_pm_ops, tf103c_dock_suspend, tf103c_dock_resume); static const struct acpi_device_id tf103c_dock_acpi_match[] = { {"NPCE69A"}, -- cgit From fe6959a680a4c50f12dbb362c90f9d7157fea334 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 19 Jan 2022 14:21:50 +0200 Subject: drm/i915: Nuke dg2_ddi_pre_enable_dp() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit dg2_ddi_pre_enable_dp() has outlived its usefulness so eliminate it. The one thing that tgl_ddi_pre_enable_dp() is missing that we need is intel_ddi_config_transcoder_dp2(). So we'll bring that over. tgl_ddi_pre_enable_dp() does also have a few things that dg2_ddi_pre_enable_dp() didn't have: - icl_program_mg_dp_mode() -> nop due to intel_phy_is_tc()==false on DG2 - intel_ddi_power_up_lanes() -> nop due to intel_phy_is_combo()==false on DG2 - intel_ddi_mso_configure() -> only matters for MSO panels Another slight difference is that dg2_ddi_pre_enable_dp() was missing a bigjoiner check around intel_dsc_enable(), which tgl_ddi_pre_enable_dp() does have. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220119122150.12941-1-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_ddi.c | 117 ++----------------------------- 1 file changed, 4 insertions(+), 113 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 4e93eac926a5..2f20abc5122d 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -2289,116 +2289,6 @@ static void intel_ddi_mso_configure(const struct intel_crtc_state *crtc_state) OVERLAP_PIXELS_MASK, dss1); } -static void dg2_ddi_pre_enable_dp(struct intel_atomic_state *state, - struct intel_encoder *encoder, - const struct intel_crtc_state *crtc_state, - const struct drm_connector_state *conn_state) -{ - struct intel_dp *intel_dp = enc_to_intel_dp(encoder); - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_digital_port *dig_port = enc_to_dig_port(encoder); - bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST); - - intel_dp_set_link_params(intel_dp, crtc_state->port_clock, - crtc_state->lane_count); - - /* - * We only configure what the register value will be here. Actual - * enabling happens during link training farther down. - */ - intel_ddi_init_dp_buf_reg(encoder, crtc_state); - - /* - * 1. Enable Power Wells - * - * This was handled at the beginning of intel_atomic_commit_tail(), - * before we called down into this function. - */ - - /* 2. Enable Panel Power if PPS is required */ - intel_pps_on(intel_dp); - - /* - * 3. Enable the port PLL. - */ - intel_ddi_enable_clock(encoder, crtc_state); - - /* 4. Enable IO power */ - if (!intel_tc_port_in_tbt_alt_mode(dig_port)) - dig_port->ddi_io_wakeref = intel_display_power_get(dev_priv, - dig_port->ddi_io_power_domain); - - /* - * 5. The rest of the below are substeps under the bspec's "Enable and - * Train Display Port" step. Note that steps that are specific to - * MST will be handled by intel_mst_pre_enable_dp() before/after it - * calls into this function. Also intel_mst_pre_enable_dp() only calls - * us when active_mst_links==0, so any steps designated for "single - * stream or multi-stream master transcoder" can just be performed - * unconditionally here. - */ - - /* - * 5.a Configure Transcoder Clock Select to direct the Port clock to the - * Transcoder. - */ - intel_ddi_enable_pipe_clock(encoder, crtc_state); - - /* 5.b Configure transcoder for DP 2.0 128b/132b */ - intel_ddi_config_transcoder_dp2(encoder, crtc_state); - - /* - * 5.c Configure TRANS_DDI_FUNC_CTL DDI Select, DDI Mode Select & MST - * Transport Select - */ - intel_ddi_config_transcoder_func(encoder, crtc_state); - - /* - * 5.d Configure & enable DP_TP_CTL with link training pattern 1 - * selected - * - * This will be handled by the intel_dp_start_link_train() farther - * down this function. - */ - - /* 5.e Configure voltage swing and related IO settings */ - encoder->set_signal_levels(encoder, crtc_state); - - if (!is_mst) - intel_dp_set_power(intel_dp, DP_SET_POWER_D0); - - intel_dp_configure_protocol_converter(intel_dp, crtc_state); - intel_dp_sink_set_decompression_state(intel_dp, crtc_state, true); - /* - * DDI FEC: "anticipates enabling FEC encoding sets the FEC_READY bit - * in the FEC_CONFIGURATION register to 1 before initiating link - * training - */ - intel_dp_sink_set_fec_ready(intel_dp, crtc_state); - intel_dp_check_frl_training(intel_dp); - intel_dp_pcon_dsc_configure(intel_dp, crtc_state); - - /* - * 5.h Follow DisplayPort specification training sequence (see notes for - * failure handling) - * 5.i If DisplayPort multi-stream - Set DP_TP_CTL link training to Idle - * Pattern, wait for 5 idle patterns (DP_TP_STATUS Min_Idles_Sent) - * (timeout after 800 us) - */ - intel_dp_start_link_train(intel_dp, crtc_state); - - /* 5.j Set DP_TP_CTL link training to Normal */ - if (!is_trans_port_sync_mode(crtc_state)) - intel_dp_stop_link_train(intel_dp, crtc_state); - - /* 5.k Configure and enable FEC if needed */ - intel_ddi_enable_fec(encoder, crtc_state); - - intel_dsc_dp_pps_write(encoder, crtc_state); - - intel_dsc_enable(crtc_state); -} - static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, struct intel_encoder *encoder, const struct intel_crtc_state *crtc_state, @@ -2472,6 +2362,9 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, */ intel_ddi_enable_pipe_clock(encoder, crtc_state); + if (HAS_DP20(dev_priv)) + intel_ddi_config_transcoder_dp2(encoder, crtc_state); + /* * 7.b Configure TRANS_DDI_FUNC_CTL DDI Select, DDI Mode Select & MST * Transport Select @@ -2612,9 +2505,7 @@ static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state, { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - if (IS_DG2(dev_priv)) - dg2_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state); - else if (DISPLAY_VER(dev_priv) >= 12) + if (DISPLAY_VER(dev_priv) >= 12) tgl_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state); else hsw_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state); -- cgit From b8fb0d9b47660ddb8a8256412784aad7cee9f21a Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Thu, 20 Jan 2022 11:44:39 -0600 Subject: platform/x86: amd-pmc: Correct usage of SMU version Yellow carp has been outputting versions like `1093.24.0`, but this is supposed to be 69.24.0. That is the MSB is being interpreted incorrectly. The MSB is not part of the major version, but has generally been treated that way thus far. It's actually the program, and used to distinguish between two programs from a similar family but different codebase. Link: https://patchwork.freedesktop.org/patch/469993/ Signed-off-by: Mario Limonciello Link: https://lore.kernel.org/r/20220120174439.12770-1-mario.limonciello@amd.com Reviewed-by: Hans de Goede Signed-off-by: Hans de Goede --- drivers/platform/x86/amd-pmc.c | 13 ++++++++----- 1 file changed, 8 insertions(+), 5 deletions(-) diff --git a/drivers/platform/x86/amd-pmc.c b/drivers/platform/x86/amd-pmc.c index 85b680297934..4c72ba68b315 100644 --- a/drivers/platform/x86/amd-pmc.c +++ b/drivers/platform/x86/amd-pmc.c @@ -124,9 +124,10 @@ struct amd_pmc_dev { u32 cpu_id; u32 active_ips; /* SMU version information */ - u16 major; - u16 minor; - u16 rev; + u8 smu_program; + u8 major; + u8 minor; + u8 rev; struct device *dev; struct pci_dev *rdev; struct mutex lock; /* generic mutex lock */ @@ -180,11 +181,13 @@ static int amd_pmc_get_smu_version(struct amd_pmc_dev *dev) if (rc) return rc; - dev->major = (val >> 16) & GENMASK(15, 0); + dev->smu_program = (val >> 24) & GENMASK(7, 0); + dev->major = (val >> 16) & GENMASK(7, 0); dev->minor = (val >> 8) & GENMASK(7, 0); dev->rev = (val >> 0) & GENMASK(7, 0); - dev_dbg(dev->dev, "SMU version is %u.%u.%u\n", dev->major, dev->minor, dev->rev); + dev_dbg(dev->dev, "SMU program %u version is %u.%u.%u\n", + dev->smu_program, dev->major, dev->minor, dev->rev); return 0; } -- cgit From 9decff5f403f9a48f639736ec0271e2870cadbb6 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sun, 26 Dec 2021 15:32:45 +0100 Subject: optee: Use bitmap_free() to free bitmap kfree() and bitmap_free() are the same. But using the latter is more consistent when freeing memory allocated with bitmap_zalloc(). Signed-off-by: Christophe JAILLET Reviewed-by: Sumit Garg Signed-off-by: Jens Wiklander --- drivers/tee/optee/notif.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/tee/optee/notif.c b/drivers/tee/optee/notif.c index a28fa03dcd0e..05212842b0a5 100644 --- a/drivers/tee/optee/notif.c +++ b/drivers/tee/optee/notif.c @@ -121,5 +121,5 @@ int optee_notif_init(struct optee *optee, u_int max_key) void optee_notif_uninit(struct optee *optee) { - kfree(optee->notif.bitmap); + bitmap_free(optee->notif.bitmap); } -- cgit From abc8dc34d1f6e34ed346c6e3fc554127e421b769 Mon Sep 17 00:00:00 2001 From: Jerome Forissier Date: Thu, 13 Jan 2022 16:27:13 +0100 Subject: tee: optee: do not check memref size on return from Secure World Commit c650b8dc7a79 ("tee: optee: do not check memref size on return from Secure World") was mistakenly lost in commit 4602c5842f64 ("optee: refactor driver with internal callbacks"). Remove the unwanted code again. Fixes: 4602c5842f64 ("optee: refactor driver with internal callbacks") Signed-off-by: Jerome Forissier Reviewed-by: Sumit Garg Signed-off-by: Jens Wiklander --- drivers/tee/optee/smc_abi.c | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/drivers/tee/optee/smc_abi.c b/drivers/tee/optee/smc_abi.c index 449d6a72d289..dc40ae8b83b6 100644 --- a/drivers/tee/optee/smc_abi.c +++ b/drivers/tee/optee/smc_abi.c @@ -75,16 +75,6 @@ static int from_msg_param_tmp_mem(struct tee_param *p, u32 attr, p->u.memref.shm_offs = mp->u.tmem.buf_ptr - pa; p->u.memref.shm = shm; - /* Check that the memref is covered by the shm object */ - if (p->u.memref.size) { - size_t o = p->u.memref.shm_offs + - p->u.memref.size - 1; - - rc = tee_shm_get_pa(shm, o, NULL); - if (rc) - return rc; - } - return 0; } -- cgit From 4064c461148ab129dfe5eaeea129b4af6cf4b9b7 Mon Sep 17 00:00:00 2001 From: Jens Wiklander Date: Tue, 28 Dec 2021 21:25:57 +0100 Subject: optee: add error checks in optee_ffa_do_call_with_arg() Adds error checking in optee_ffa_do_call_with_arg() for correctness. Fixes: 4615e5a34b95 ("optee: add FF-A support") Reviewed-by: Sumit Garg Signed-off-by: Jens Wiklander --- drivers/tee/optee/ffa_abi.c | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/drivers/tee/optee/ffa_abi.c b/drivers/tee/optee/ffa_abi.c index 20a1b1a3d965..0775759a29c0 100644 --- a/drivers/tee/optee/ffa_abi.c +++ b/drivers/tee/optee/ffa_abi.c @@ -619,9 +619,18 @@ static int optee_ffa_do_call_with_arg(struct tee_context *ctx, .data2 = (u32)(shm->sec_world_id >> 32), .data3 = shm->offset, }; - struct optee_msg_arg *arg = tee_shm_get_va(shm, 0); - unsigned int rpc_arg_offs = OPTEE_MSG_GET_ARG_SIZE(arg->num_params); - struct optee_msg_arg *rpc_arg = tee_shm_get_va(shm, rpc_arg_offs); + struct optee_msg_arg *arg; + unsigned int rpc_arg_offs; + struct optee_msg_arg *rpc_arg; + + arg = tee_shm_get_va(shm, 0); + if (IS_ERR(arg)) + return PTR_ERR(arg); + + rpc_arg_offs = OPTEE_MSG_GET_ARG_SIZE(arg->num_params); + rpc_arg = tee_shm_get_va(shm, rpc_arg_offs); + if (IS_ERR(rpc_arg)) + return PTR_ERR(rpc_arg); return optee_ffa_yielding_call(ctx, &data, rpc_arg); } -- cgit From 17dd7b896abd2c81bbc76ed55899314b1c285677 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 21 Jan 2022 15:29:57 +0200 Subject: drm/i915: nuke local versions of WARN_ON/WARN_ON_ONCE In general, we should avoid redefining kernel macros like this. It can get confusing, and what gets used will depend on whether the header is included or not. Moreover, we should prefer drm_WARN_ON() and drm_WARN_ON_ONCE() anyway, which include the stringified error condition in the message. Signed-off-by: Jani Nikula Reviewed-by: Lucas De Marchi Link: https://patchwork.freedesktop.org/patch/msgid/20220121132957.3778555-1-jani.nikula@intel.com --- drivers/gpu/drm/i915/i915_utils.h | 15 --------------- 1 file changed, 15 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h index 7a5925072466..bfafd0afd117 100644 --- a/drivers/gpu/drm/i915/i915_utils.h +++ b/drivers/gpu/drm/i915/i915_utils.h @@ -37,21 +37,6 @@ struct timer_list; #define FDO_BUG_URL "https://gitlab.freedesktop.org/drm/intel/-/wikis/How-to-file-i915-bugs" -#undef WARN_ON -/* Many gcc seem to no see through this and fall over :( */ -#if 0 -#define WARN_ON(x) ({ \ - bool __i915_warn_cond = (x); \ - if (__builtin_constant_p(__i915_warn_cond)) \ - BUILD_BUG_ON(__i915_warn_cond); \ - WARN(__i915_warn_cond, "WARN_ON(" #x ")"); }) -#else -#define WARN_ON(x) WARN((x), "%s", "WARN_ON(" __stringify(x) ")") -#endif - -#undef WARN_ON_ONCE -#define WARN_ON_ONCE(x) WARN_ONCE((x), "%s", "WARN_ON_ONCE(" __stringify(x) ")") - #define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \ __stringify(x), (long)(x)) -- cgit From c5274e86da5fe7297fc28a4e12bd29defed1f435 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 21 Jan 2022 15:00:30 +0200 Subject: drm/i915/snps: convert to drm device based logging MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Prefer drm device based logging. Do some dev_priv->i915 conversions while at it. Cc: Ville Syrjälä Signed-off-by: Jani Nikula Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/ca6908452a63bd74a9c9d75ecd295182c80c7205.1642769982.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_snps_phy.c | 29 ++++++++++++++------------- 1 file changed, 15 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_snps_phy.c b/drivers/gpu/drm/i915/display/intel_snps_phy.c index 718bfdbae9c8..8573a458811a 100644 --- a/drivers/gpu/drm/i915/display/intel_snps_phy.c +++ b/drivers/gpu/drm/i915/display/intel_snps_phy.c @@ -24,18 +24,18 @@ * since it is not handled by the shared DPLL framework as on other platforms. */ -void intel_snps_phy_wait_for_calibration(struct drm_i915_private *dev_priv) +void intel_snps_phy_wait_for_calibration(struct drm_i915_private *i915) { enum phy phy; for_each_phy_masked(phy, ~0) { - if (!intel_phy_is_snps(dev_priv, phy)) + if (!intel_phy_is_snps(i915, phy)) continue; - if (intel_de_wait_for_clear(dev_priv, ICL_PHY_MISC(phy), + if (intel_de_wait_for_clear(i915, ICL_PHY_MISC(phy), DG2_PHY_DP_TX_ACK_MASK, 25)) - DRM_ERROR("SNPS PHY %c failed to calibrate after 25ms.\n", - phy); + drm_err(&i915->drm, "SNPS PHY %c failed to calibrate after 25ms.\n", + phy); } } @@ -776,6 +776,7 @@ intel_mpllb_tables_get(struct intel_crtc_state *crtc_state, int intel_mpllb_calc_state(struct intel_crtc_state *crtc_state, struct intel_encoder *encoder) { + struct drm_i915_private *i915 = to_i915(encoder->base.dev); const struct intel_mpllb_state * const *tables; int i; @@ -787,8 +788,8 @@ int intel_mpllb_calc_state(struct intel_crtc_state *crtc_state, * until we have a proper algorithm under a valid * license. */ - DRM_DEBUG_KMS("Can't support HDMI link rate %d\n", - crtc_state->port_clock); + drm_dbg_kms(&i915->drm, "Can't support HDMI link rate %d\n", + crtc_state->port_clock); return -EINVAL; } } @@ -855,7 +856,7 @@ void intel_mpllb_enable(struct intel_encoder *encoder, * dp_mpllb_state interface signal. */ if (intel_de_wait_for_set(dev_priv, enable_reg, PLL_LOCK, 5)) - DRM_ERROR("Port %c PLL not locked\n", phy_name(phy)); + drm_dbg_kms(&dev_priv->drm, "Port %c PLL not locked\n", phy_name(phy)); /* * 11. If the frequency will result in a change to the voltage @@ -868,8 +869,8 @@ void intel_mpllb_enable(struct intel_encoder *encoder, void intel_mpllb_disable(struct intel_encoder *encoder) { - struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - enum phy phy = intel_port_to_phy(dev_priv, encoder->port); + struct drm_i915_private *i915 = to_i915(encoder->base.dev); + enum phy phy = intel_port_to_phy(i915, encoder->port); i915_reg_t enable_reg = (phy <= PHY_D ? DG2_PLL_ENABLE(phy) : MG_PLL_ENABLE(0)); @@ -882,21 +883,21 @@ void intel_mpllb_disable(struct intel_encoder *encoder) */ /* 2. Software programs DPLL_ENABLE [PLL Enable] to "0" */ - intel_uncore_rmw(&dev_priv->uncore, enable_reg, PLL_ENABLE, 0); + intel_uncore_rmw(&i915->uncore, enable_reg, PLL_ENABLE, 0); /* * 4. Software programs SNPS_PHY_MPLLB_DIV dp_mpllb_force_en to "0". * This will allow the PLL to stop running. */ - intel_uncore_rmw(&dev_priv->uncore, SNPS_PHY_MPLLB_DIV(phy), + intel_uncore_rmw(&i915->uncore, SNPS_PHY_MPLLB_DIV(phy), SNPS_PHY_MPLLB_FORCE_EN, 0); /* * 5. Software polls DPLL_ENABLE [PLL Lock] for PHY acknowledgment * (dp_txX_ack) that the new transmitter setting request is completed. */ - if (intel_de_wait_for_clear(dev_priv, enable_reg, PLL_LOCK, 5)) - DRM_ERROR("Port %c PLL not locked\n", phy_name(phy)); + if (intel_de_wait_for_clear(i915, enable_reg, PLL_LOCK, 5)) + drm_err(&i915->drm, "Port %c PLL not locked\n", phy_name(phy)); /* * 6. If the frequency will result in a change to the voltage -- cgit From 51f2d00909c6153d23edf2344f6b57d45e391945 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 21 Jan 2022 15:00:31 +0200 Subject: drm/i915/pps: convert to drm device based logging MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Prefer drm device based logging. Cc: Ville Syrjälä Signed-off-by: Jani Nikula Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/3caf86f20680478763321e8e3a5fbfa30ab06ec3.1642769982.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_pps.c | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c index e9c679bb1b2e..9c986e8932f8 100644 --- a/drivers/gpu/drm/i915/display/intel_pps.c +++ b/drivers/gpu/drm/i915/display/intel_pps.c @@ -1131,16 +1131,20 @@ intel_pps_readout_hw_state(struct intel_dp *intel_dp, struct edp_power_seq *seq) } static void -intel_pps_dump_state(const char *state_name, const struct edp_power_seq *seq) +intel_pps_dump_state(struct intel_dp *intel_dp, const char *state_name, + const struct edp_power_seq *seq) { - DRM_DEBUG_KMS("%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", - state_name, - seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12); + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + + drm_dbg_kms(&i915->drm, "%s t1_t3 %d t8 %d t9 %d t10 %d t11_t12 %d\n", + state_name, + seq->t1_t3, seq->t8, seq->t9, seq->t10, seq->t11_t12); } static void intel_pps_verify_state(struct intel_dp *intel_dp) { + struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct edp_power_seq hw; struct edp_power_seq *sw = &intel_dp->pps.pps_delays; @@ -1148,9 +1152,9 @@ intel_pps_verify_state(struct intel_dp *intel_dp) if (hw.t1_t3 != sw->t1_t3 || hw.t8 != sw->t8 || hw.t9 != sw->t9 || hw.t10 != sw->t10 || hw.t11_t12 != sw->t11_t12) { - DRM_ERROR("PPS state mismatch\n"); - intel_pps_dump_state("sw", sw); - intel_pps_dump_state("hw", &hw); + drm_err(&i915->drm, "PPS state mismatch\n"); + intel_pps_dump_state(intel_dp, "sw", sw); + intel_pps_dump_state(intel_dp, "hw", &hw); } } @@ -1168,7 +1172,7 @@ static void pps_init_delays(struct intel_dp *intel_dp) intel_pps_readout_hw_state(intel_dp, &cur); - intel_pps_dump_state("cur", &cur); + intel_pps_dump_state(intel_dp, "cur", &cur); vbt = dev_priv->vbt.edp.pps; /* On Toshiba Satellite P50-C-18C system the VBT T12 delay @@ -1200,7 +1204,7 @@ static void pps_init_delays(struct intel_dp *intel_dp) * too. */ spec.t11_t12 = (510 + 100) * 10; - intel_pps_dump_state("vbt", &vbt); + intel_pps_dump_state(intel_dp, "vbt", &vbt); /* Use the max of the register settings and vbt. If both are * unset, fall back to the spec limits. */ -- cgit From 0bd6c4a1310336af511519a8a853ecff2120d11d Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 21 Jan 2022 15:00:32 +0200 Subject: drm/i915/hotplug: convert to drm device based logging MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Prefer drm device based logging. Cc: Ville Syrjälä Signed-off-by: Jani Nikula Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/a8276434c0a899009be05cb987fdbf80d25fd175.1642769982.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_hotplug.c | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c index 955f6d07b0e1..912b7003dcfa 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug.c @@ -281,13 +281,13 @@ intel_encoder_hotplug(struct intel_encoder *encoder, ret = true; if (ret) { - DRM_DEBUG_KMS("[CONNECTOR:%d:%s] status updated from %s to %s (epoch counter %llu->%llu)\n", - connector->base.base.id, - connector->base.name, - drm_get_connector_status_name(old_status), - drm_get_connector_status_name(connector->base.status), - old_epoch_counter, - connector->base.epoch_counter); + drm_dbg_kms(dev, "[CONNECTOR:%d:%s] status updated from %s to %s (epoch counter %llu->%llu)\n", + connector->base.base.id, + connector->base.name, + drm_get_connector_status_name(old_status), + drm_get_connector_status_name(connector->base.status), + old_epoch_counter, + connector->base.epoch_counter); return INTEL_HOTPLUG_CHANGED; } return INTEL_HOTPLUG_UNCHANGED; -- cgit From 9d0bfa7ac97c629542caa860bca903af62b86326 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 21 Jan 2022 15:00:33 +0200 Subject: drm/i915/dp: convert to drm device based logging MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Prefer drm device based logging. Cc: Ville Syrjälä Signed-off-by: Jani Nikula Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/b1cffaa70fcc614574f2dce4461e28be7a407e30.1642769982.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_dp.c | 35 +++++++++++++++++++-------------- 1 file changed, 20 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index c94ad95442b3..d6f11fe4130a 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -704,7 +704,7 @@ static u16 intel_dp_dsc_get_output_bpp(struct drm_i915_private *i915, i915->max_cdclk_freq * 48 / intel_dp_mode_to_fec_clock(mode_clock); - DRM_DEBUG_KMS("Max big joiner bpp: %u\n", max_bpp_bigjoiner); + drm_dbg_kms(&i915->drm, "Max big joiner bpp: %u\n", max_bpp_bigjoiner); bits_per_pixel = min(bits_per_pixel, max_bpp_bigjoiner); } @@ -2918,7 +2918,8 @@ out: } static ssize_t -intel_dp_hdr_metadata_infoframe_sdp_pack(const struct hdmi_drm_infoframe *drm_infoframe, +intel_dp_hdr_metadata_infoframe_sdp_pack(struct drm_i915_private *i915, + const struct hdmi_drm_infoframe *drm_infoframe, struct dp_sdp *sdp, size_t size) { @@ -2934,12 +2935,12 @@ intel_dp_hdr_metadata_infoframe_sdp_pack(const struct hdmi_drm_infoframe *drm_in len = hdmi_drm_infoframe_pack_only(drm_infoframe, buf, sizeof(buf)); if (len < 0) { - DRM_DEBUG_KMS("buffer size is smaller than hdr metadata infoframe\n"); + drm_dbg_kms(&i915->drm, "buffer size is smaller than hdr metadata infoframe\n"); return -ENOSPC; } if (len != infoframe_size) { - DRM_DEBUG_KMS("wrong static hdr metadata size\n"); + drm_dbg_kms(&i915->drm, "wrong static hdr metadata size\n"); return -ENOSPC; } @@ -3012,7 +3013,8 @@ static void intel_write_dp_sdp(struct intel_encoder *encoder, sizeof(sdp)); break; case HDMI_PACKET_TYPE_GAMUT_METADATA: - len = intel_dp_hdr_metadata_infoframe_sdp_pack(&crtc_state->infoframes.drm.drm, + len = intel_dp_hdr_metadata_infoframe_sdp_pack(dev_priv, + &crtc_state->infoframes.drm.drm, &sdp, sizeof(sdp)); break; default: @@ -3420,22 +3422,22 @@ static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp, switch (data->phy_pattern) { case DP_PHY_TEST_PATTERN_NONE: - DRM_DEBUG_KMS("Disable Phy Test Pattern\n"); + drm_dbg_kms(&dev_priv->drm, "Disable Phy Test Pattern\n"); intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), 0x0); break; case DP_PHY_TEST_PATTERN_D10_2: - DRM_DEBUG_KMS("Set D10.2 Phy Test Pattern\n"); + drm_dbg_kms(&dev_priv->drm, "Set D10.2 Phy Test Pattern\n"); intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_D10_2); break; case DP_PHY_TEST_PATTERN_ERROR_COUNT: - DRM_DEBUG_KMS("Set Error Count Phy Test Pattern\n"); + drm_dbg_kms(&dev_priv->drm, "Set Error Count Phy Test Pattern\n"); intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_SCRAMBLED_0); break; case DP_PHY_TEST_PATTERN_PRBS7: - DRM_DEBUG_KMS("Set PRBS7 Phy Test Pattern\n"); + drm_dbg_kms(&dev_priv->drm, "Set PRBS7 Phy Test Pattern\n"); intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_PRBS7); break; @@ -3445,7 +3447,8 @@ static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp, * current firmware of DPR-100 could not set it, so hardcoding * now for complaince test. */ - DRM_DEBUG_KMS("Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n"); + drm_dbg_kms(&dev_priv->drm, + "Set 80Bit Custom Phy Test Pattern 0x3e0f83e0 0x0f83e0f8 0x0000f83e\n"); pattern_val = 0x3e0f83e0; intel_de_write(dev_priv, DDI_DP_COMP_PAT(pipe, 0), pattern_val); pattern_val = 0x0f83e0f8; @@ -3462,7 +3465,7 @@ static void intel_dp_phy_pattern_update(struct intel_dp *intel_dp, * current firmware of DPR-100 could not set it, so hardcoding * now for complaince test. */ - DRM_DEBUG_KMS("Set HBR2 compliance Phy Test Pattern\n"); + drm_dbg_kms(&dev_priv->drm, "Set HBR2 compliance Phy Test Pattern\n"); pattern_val = 0xFB; intel_de_write(dev_priv, DDI_DP_COMP_CTL(pipe), DDI_DP_COMP_CTL_ENABLE | DDI_DP_COMP_CTL_HBR2 | @@ -3531,13 +3534,14 @@ intel_dp_autotest_phy_ddi_enable(struct intel_dp *intel_dp, static void intel_dp_process_phy_request(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { + struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct drm_dp_phy_test_params *data = &intel_dp->compliance.test_data.phytest; u8 link_status[DP_LINK_STATUS_SIZE]; if (drm_dp_dpcd_read_phy_link_status(&intel_dp->aux, DP_PHY_DPRX, link_status) < 0) { - DRM_DEBUG_KMS("failed to get link status\n"); + drm_dbg_kms(&i915->drm, "failed to get link status\n"); return; } @@ -3562,11 +3566,12 @@ static void intel_dp_process_phy_request(struct intel_dp *intel_dp, static u8 intel_dp_autotest_phy_pattern(struct intel_dp *intel_dp) { + struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct drm_dp_phy_test_params *data = &intel_dp->compliance.test_data.phytest; if (drm_dp_get_phy_test_pattern(&intel_dp->aux, data)) { - DRM_DEBUG_KMS("DP Phy Test pattern AUX read failure\n"); + drm_dbg_kms(&i915->drm, "DP Phy Test pattern AUX read failure\n"); return DP_TEST_NAK; } @@ -5074,8 +5079,8 @@ static void intel_dp_modeset_retry_work_fn(struct work_struct *work) intel_connector = container_of(work, typeof(*intel_connector), modeset_retry_work); connector = &intel_connector->base; - DRM_DEBUG_KMS("[CONNECTOR:%d:%s]\n", connector->base.id, - connector->name); + drm_dbg_kms(connector->dev, "[CONNECTOR:%d:%s]\n", connector->base.id, + connector->name); /* Grab the locks before changing connector property*/ mutex_lock(&connector->dev->mode_config.mutex); -- cgit From 5acbdcd1b12ecba04f1481004b6ce5b40c64b211 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 21 Jan 2022 15:00:34 +0200 Subject: drm/i915/plane: convert to drm device based logging and WARN MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Prefer drm device based logging and WARN. Cc: Ville Syrjälä Signed-off-by: Jani Nikula Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/9742b56ee0935a6b833f108ca8f72a29935853df.1642769982.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_atomic_plane.c | 5 +++-- drivers/gpu/drm/i915/display/skl_universal_plane.c | 10 ++++++---- 2 files changed, 9 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c index 89005628cc3a..c8bbbc7f8c66 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c @@ -601,6 +601,7 @@ int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state, int min_scale, int max_scale, bool can_position) { + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); struct drm_framebuffer *fb = plane_state->hw.fb; struct drm_rect *src = &plane_state->uapi.src; struct drm_rect *dst = &plane_state->uapi.dst; @@ -619,7 +620,7 @@ int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state, hscale = drm_rect_calc_hscale(src, dst, min_scale, max_scale); vscale = drm_rect_calc_vscale(src, dst, min_scale, max_scale); if (hscale < 0 || vscale < 0) { - DRM_DEBUG_KMS("Invalid scaling of plane\n"); + drm_dbg_kms(&i915->drm, "Invalid scaling of plane\n"); drm_rect_debug_print("src: ", src, true); drm_rect_debug_print("dst: ", dst, false); return -ERANGE; @@ -644,7 +645,7 @@ int intel_atomic_plane_check_clipping(struct intel_plane_state *plane_state, if (!can_position && plane_state->uapi.visible && !drm_rect_equals(dst, &clip)) { - DRM_DEBUG_KMS("Plane must cover entire CRTC\n"); + drm_dbg_kms(&i915->drm, "Plane must cover entire CRTC\n"); drm_rect_debug_print("dst: ", dst, false); drm_rect_debug_print("clip: ", &clip, false); return -EINVAL; diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c index ed6a9bbcf218..3ee3f5bf974b 100644 --- a/drivers/gpu/drm/i915/display/skl_universal_plane.c +++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c @@ -961,6 +961,7 @@ static u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, static u32 skl_surf_address(const struct intel_plane_state *plane_state, int color_plane) { + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); const struct drm_framebuffer *fb = plane_state->hw.fb; u32 offset = plane_state->view.color_plane[color_plane].offset; @@ -969,11 +970,11 @@ static u32 skl_surf_address(const struct intel_plane_state *plane_state, * The DPT object contains only one vma, so the VMA's offset * within the DPT is always 0. */ - WARN_ON(plane_state->dpt_vma->node.start); - WARN_ON(offset & 0x1fffff); + drm_WARN_ON(&i915->drm, plane_state->dpt_vma->node.start); + drm_WARN_ON(&i915->drm, offset & 0x1fffff); return offset >> 9; } else { - WARN_ON(offset & 0xfff); + drm_WARN_ON(&i915->drm, offset & 0xfff); return offset; } } @@ -1350,6 +1351,7 @@ static int skl_plane_check_dst_coordinates(const struct intel_crtc_state *crtc_s static int skl_plane_check_nv12_rotation(const struct intel_plane_state *plane_state) { + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); const struct drm_framebuffer *fb = plane_state->hw.fb; unsigned int rotation = plane_state->hw.rotation; int src_w = drm_rect_width(&plane_state->uapi.src) >> 16; @@ -1359,7 +1361,7 @@ static int skl_plane_check_nv12_rotation(const struct intel_plane_state *plane_s src_w & 3 && (rotation == DRM_MODE_ROTATE_270 || rotation == (DRM_MODE_REFLECT_X | DRM_MODE_ROTATE_90))) { - DRM_DEBUG_KMS("src width must be multiple of 4 for rotated planar YUV\n"); + drm_dbg_kms(&i915->drm, "src width must be multiple of 4 for rotated planar YUV\n"); return -EINVAL; } -- cgit From eb8d73aa63cde11e43ab0619308a5356a691850b Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 21 Jan 2022 15:00:35 +0200 Subject: drm/i915/sprite: convert to drm device based logging MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Prefer drm device based logging. Cc: Ville Syrjälä Signed-off-by: Jani Nikula Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/f2a3b656c8c63bc9474b5d9cb5b5c018cde28546.1642769982.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_sprite.c | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index 7ffca5669ab9..2d71294aaceb 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -53,6 +53,7 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state) { + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); const struct drm_framebuffer *fb = plane_state->hw.fb; struct drm_rect *src = &plane_state->uapi.src; u32 src_x, src_y, src_w, src_h, hsub, vsub; @@ -94,14 +95,14 @@ int intel_plane_check_src_coordinates(struct intel_plane_state *plane_state) hsub = vsub = max(hsub, vsub); if (src_x % hsub || src_w % hsub) { - DRM_DEBUG_KMS("src x/w (%u, %u) must be a multiple of %u (rotated: %s)\n", - src_x, src_w, hsub, yesno(rotated)); + drm_dbg_kms(&i915->drm, "src x/w (%u, %u) must be a multiple of %u (rotated: %s)\n", + src_x, src_w, hsub, yesno(rotated)); return -EINVAL; } if (src_y % vsub || src_h % vsub) { - DRM_DEBUG_KMS("src y/h (%u, %u) must be a multiple of %u (rotated: %s)\n", - src_y, src_h, vsub, yesno(rotated)); + drm_dbg_kms(&i915->drm, "src y/h (%u, %u) must be a multiple of %u (rotated: %s)\n", + src_y, src_h, vsub, yesno(rotated)); return -EINVAL; } @@ -1332,6 +1333,7 @@ static int g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state) { + struct drm_i915_private *i915 = to_i915(plane_state->uapi.plane->dev); const struct drm_framebuffer *fb = plane_state->hw.fb; const struct drm_rect *src = &plane_state->uapi.src; const struct drm_rect *dst = &plane_state->uapi.dst; @@ -1357,7 +1359,7 @@ g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state, if (adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE) { if (src_h & 1) { - DRM_DEBUG_KMS("Source height must be even with interlaced modes\n"); + drm_dbg_kms(&i915->drm, "Source height must be even with interlaced modes\n"); return -EINVAL; } min_height = 6; @@ -1369,20 +1371,20 @@ g4x_sprite_check_scaling(struct intel_crtc_state *crtc_state, if (src_w < min_width || src_h < min_height || src_w > 2048 || src_h > 2048) { - DRM_DEBUG_KMS("Source dimensions (%dx%d) exceed hardware limits (%dx%d - %dx%d)\n", - src_w, src_h, min_width, min_height, 2048, 2048); + drm_dbg_kms(&i915->drm, "Source dimensions (%dx%d) exceed hardware limits (%dx%d - %dx%d)\n", + src_w, src_h, min_width, min_height, 2048, 2048); return -EINVAL; } if (width_bytes > 4096) { - DRM_DEBUG_KMS("Fetch width (%d) exceeds hardware max with scaling (%u)\n", - width_bytes, 4096); + drm_dbg_kms(&i915->drm, "Fetch width (%d) exceeds hardware max with scaling (%u)\n", + width_bytes, 4096); return -EINVAL; } if (stride > 4096) { - DRM_DEBUG_KMS("Stride (%u) exceeds hardware max with scaling (%u)\n", - stride, 4096); + drm_dbg_kms(&i915->drm, "Stride (%u) exceeds hardware max with scaling (%u)\n", + stride, 4096); return -EINVAL; } -- cgit From 15d641c41796fa1e8c8965ea457aac737a28a88e Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 21 Jan 2022 15:00:36 +0200 Subject: drm/i915/lspcon: convert to drm device based logging MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Prefer drm device based logging. Do some related dev_priv->i915 and dp->intel_dp renames while at it. Cc: Ville Syrjälä Signed-off-by: Jani Nikula Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/8f83a4de60be1a4a964aa4334204db95d2db3689.1642769982.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_lspcon.c | 144 +++++++++++++++------------- 1 file changed, 77 insertions(+), 67 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c index 05d2d750fa53..6cc91d731ab0 100644 --- a/drivers/gpu/drm/i915/display/intel_lspcon.c +++ b/drivers/gpu/drm/i915/display/intel_lspcon.c @@ -78,11 +78,12 @@ static const char *lspcon_mode_name(enum drm_lspcon_mode mode) static bool lspcon_detect_vendor(struct intel_lspcon *lspcon) { struct intel_dp *dp = lspcon_to_intel_dp(lspcon); + struct drm_i915_private *i915 = dp_to_i915(dp); struct drm_dp_dpcd_ident *ident; u32 vendor_oui; if (drm_dp_read_desc(&dp->aux, &dp->desc, drm_dp_is_branch(dp->dpcd))) { - DRM_ERROR("Can't read description\n"); + drm_err(&i915->drm, "Can't read description\n"); return false; } @@ -93,16 +94,16 @@ static bool lspcon_detect_vendor(struct intel_lspcon *lspcon) switch (vendor_oui) { case LSPCON_VENDOR_MCA_OUI: lspcon->vendor = LSPCON_VENDOR_MCA; - DRM_DEBUG_KMS("Vendor: Mega Chips\n"); + drm_dbg_kms(&i915->drm, "Vendor: Mega Chips\n"); break; case LSPCON_VENDOR_PARADE_OUI: lspcon->vendor = LSPCON_VENDOR_PARADE; - DRM_DEBUG_KMS("Vendor: Parade Tech\n"); + drm_dbg_kms(&i915->drm, "Vendor: Parade Tech\n"); break; default: - DRM_ERROR("Invalid/Unknown vendor OUI\n"); + drm_err(&i915->drm, "Invalid/Unknown vendor OUI\n"); return false; } @@ -119,21 +120,19 @@ static u32 get_hdr_status_reg(struct intel_lspcon *lspcon) void lspcon_detect_hdr_capability(struct intel_lspcon *lspcon) { - struct intel_digital_port *dig_port = - container_of(lspcon, struct intel_digital_port, lspcon); - struct drm_device *dev = dig_port->base.base.dev; - struct intel_dp *dp = lspcon_to_intel_dp(lspcon); + struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon); + struct drm_i915_private *i915 = dp_to_i915(intel_dp); u8 hdr_caps; int ret; - ret = drm_dp_dpcd_read(&dp->aux, get_hdr_status_reg(lspcon), + ret = drm_dp_dpcd_read(&intel_dp->aux, get_hdr_status_reg(lspcon), &hdr_caps, 1); if (ret < 0) { - drm_dbg_kms(dev, "HDR capability detection failed\n"); + drm_dbg_kms(&i915->drm, "HDR capability detection failed\n"); lspcon->hdr_supported = false; } else if (hdr_caps & 0x1) { - drm_dbg_kms(dev, "LSPCON capable of HDR\n"); + drm_dbg_kms(&i915->drm, "LSPCON capable of HDR\n"); lspcon->hdr_supported = true; } } @@ -141,11 +140,12 @@ void lspcon_detect_hdr_capability(struct intel_lspcon *lspcon) static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon) { struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon); + struct drm_i915_private *i915 = dp_to_i915(intel_dp); enum drm_lspcon_mode current_mode; struct i2c_adapter *adapter = &intel_dp->aux.ddc; if (drm_lspcon_get_mode(intel_dp->aux.drm_dev, adapter, ¤t_mode)) { - DRM_DEBUG_KMS("Error reading LSPCON mode\n"); + drm_dbg_kms(&i915->drm, "Error reading LSPCON mode\n"); return DRM_LSPCON_MODE_INVALID; } return current_mode; @@ -154,22 +154,24 @@ static enum drm_lspcon_mode lspcon_get_current_mode(struct intel_lspcon *lspcon) static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon, enum drm_lspcon_mode mode) { + struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon); + struct drm_i915_private *i915 = dp_to_i915(intel_dp); enum drm_lspcon_mode current_mode; current_mode = lspcon_get_current_mode(lspcon); if (current_mode == mode) goto out; - DRM_DEBUG_KMS("Waiting for LSPCON mode %s to settle\n", - lspcon_mode_name(mode)); + drm_dbg_kms(&i915->drm, "Waiting for LSPCON mode %s to settle\n", + lspcon_mode_name(mode)); wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, 400); if (current_mode != mode) - DRM_ERROR("LSPCON mode hasn't settled\n"); + drm_err(&i915->drm, "LSPCON mode hasn't settled\n"); out: - DRM_DEBUG_KMS("Current LSPCON mode %s\n", - lspcon_mode_name(current_mode)); + drm_dbg_kms(&i915->drm, "Current LSPCON mode %s\n", + lspcon_mode_name(current_mode)); return current_mode; } @@ -178,44 +180,47 @@ static int lspcon_change_mode(struct intel_lspcon *lspcon, enum drm_lspcon_mode mode) { struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon); + struct drm_i915_private *i915 = dp_to_i915(intel_dp); int err; enum drm_lspcon_mode current_mode; struct i2c_adapter *adapter = &intel_dp->aux.ddc; err = drm_lspcon_get_mode(intel_dp->aux.drm_dev, adapter, ¤t_mode); if (err) { - DRM_ERROR("Error reading LSPCON mode\n"); + drm_err(&i915->drm, "Error reading LSPCON mode\n"); return err; } if (current_mode == mode) { - DRM_DEBUG_KMS("Current mode = desired LSPCON mode\n"); + drm_dbg_kms(&i915->drm, "Current mode = desired LSPCON mode\n"); return 0; } err = drm_lspcon_set_mode(intel_dp->aux.drm_dev, adapter, mode); if (err < 0) { - DRM_ERROR("LSPCON mode change failed\n"); + drm_err(&i915->drm, "LSPCON mode change failed\n"); return err; } lspcon->mode = mode; - DRM_DEBUG_KMS("LSPCON mode changed done\n"); + drm_dbg_kms(&i915->drm, "LSPCON mode changed done\n"); return 0; } static bool lspcon_wake_native_aux_ch(struct intel_lspcon *lspcon) { + struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon); + struct drm_i915_private *i915 = dp_to_i915(intel_dp); u8 rev; if (drm_dp_dpcd_readb(&lspcon_to_intel_dp(lspcon)->aux, DP_DPCD_REV, &rev) != 1) { - DRM_DEBUG_KMS("Native AUX CH down\n"); + drm_dbg_kms(&i915->drm, "Native AUX CH down\n"); return false; } - DRM_DEBUG_KMS("Native AUX CH up, DPCD version: %d.%d\n", - rev >> 4, rev & 0xf); + drm_dbg_kms(&i915->drm, "Native AUX CH up, DPCD version: %d.%d\n", + rev >> 4, rev & 0xf); return true; } @@ -225,6 +230,7 @@ static bool lspcon_probe(struct intel_lspcon *lspcon) int retry; enum drm_dp_dual_mode_type adaptor_type; struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon); + struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct i2c_adapter *adapter = &intel_dp->aux.ddc; enum drm_lspcon_mode expected_mode; @@ -242,13 +248,13 @@ static bool lspcon_probe(struct intel_lspcon *lspcon) } if (adaptor_type != DRM_DP_DUAL_MODE_LSPCON) { - DRM_DEBUG_KMS("No LSPCON detected, found %s\n", - drm_dp_get_dual_mode_type_name(adaptor_type)); + drm_dbg_kms(&i915->drm, "No LSPCON detected, found %s\n", + drm_dp_get_dual_mode_type_name(adaptor_type)); return false; } /* Yay ... got a LSPCON device */ - DRM_DEBUG_KMS("LSPCON detected\n"); + drm_dbg_kms(&i915->drm, "LSPCON detected\n"); lspcon->mode = lspcon_wait_mode(lspcon, expected_mode); /* @@ -258,7 +264,7 @@ static bool lspcon_probe(struct intel_lspcon *lspcon) */ if (lspcon->mode != DRM_LSPCON_MODE_PCON) { if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON) < 0) { - DRM_ERROR("LSPCON mode change to PCON failed\n"); + drm_err(&i915->drm, "LSPCON mode change to PCON failed\n"); return false; } } @@ -268,13 +274,14 @@ static bool lspcon_probe(struct intel_lspcon *lspcon) static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon) { struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon); + struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); unsigned long start = jiffies; while (1) { if (intel_digital_port_connected(&dig_port->base)) { - DRM_DEBUG_KMS("LSPCON recovering in PCON mode after %u ms\n", - jiffies_to_msecs(jiffies - start)); + drm_dbg_kms(&i915->drm, "LSPCON recovering in PCON mode after %u ms\n", + jiffies_to_msecs(jiffies - start)); return; } @@ -284,7 +291,7 @@ static void lspcon_resume_in_pcon_wa(struct intel_lspcon *lspcon) usleep_range(10000, 15000); } - DRM_DEBUG_KMS("LSPCON DP descriptor mismatch after resume\n"); + drm_dbg_kms(&i915->drm, "LSPCON DP descriptor mismatch after resume\n"); } static bool lspcon_parade_fw_ready(struct drm_dp_aux *aux) @@ -301,7 +308,7 @@ static bool lspcon_parade_fw_ready(struct drm_dp_aux *aux) ret = drm_dp_dpcd_read(aux, LSPCON_PARADE_AVI_IF_CTRL, &avi_if_ctrl, 1); if (ret < 0) { - DRM_ERROR("Failed to read AVI IF control\n"); + drm_err(aux->drm_dev, "Failed to read AVI IF control\n"); return false; } @@ -309,7 +316,7 @@ static bool lspcon_parade_fw_ready(struct drm_dp_aux *aux) return true; } - DRM_ERROR("Parade FW not ready to accept AVI IF\n"); + drm_err(aux->drm_dev, "Parade FW not ready to accept AVI IF\n"); return false; } @@ -324,8 +331,8 @@ static bool _lspcon_parade_write_infoframe_blocks(struct drm_dp_aux *aux, while (block_count < 4) { if (!lspcon_parade_fw_ready(aux)) { - DRM_DEBUG_KMS("LSPCON FW not ready, block %d\n", - block_count); + drm_dbg_kms(aux->drm_dev, "LSPCON FW not ready, block %d\n", + block_count); return false; } @@ -333,8 +340,8 @@ static bool _lspcon_parade_write_infoframe_blocks(struct drm_dp_aux *aux, data = avi_buf + block_count * 8; ret = drm_dp_dpcd_write(aux, reg, data, 8); if (ret < 0) { - DRM_ERROR("Failed to write AVI IF block %d\n", - block_count); + drm_err(aux->drm_dev, "Failed to write AVI IF block %d\n", + block_count); return false; } @@ -348,15 +355,15 @@ static bool _lspcon_parade_write_infoframe_blocks(struct drm_dp_aux *aux, avi_if_ctrl = LSPCON_PARADE_AVI_IF_KICKOFF | block_count; ret = drm_dp_dpcd_write(aux, reg, &avi_if_ctrl, 1); if (ret < 0) { - DRM_ERROR("Failed to update (0x%x), block %d\n", - reg, block_count); + drm_err(aux->drm_dev, "Failed to update (0x%x), block %d\n", + reg, block_count); return false; } block_count++; } - DRM_DEBUG_KMS("Wrote AVI IF blocks successfully\n"); + drm_dbg_kms(aux->drm_dev, "Wrote AVI IF blocks successfully\n"); return true; } @@ -378,14 +385,14 @@ static bool _lspcon_write_avi_infoframe_parade(struct drm_dp_aux *aux, */ if (len > LSPCON_PARADE_AVI_IF_DATA_SIZE - 1) { - DRM_ERROR("Invalid length of infoframes\n"); + drm_err(aux->drm_dev, "Invalid length of infoframes\n"); return false; } memcpy(&avi_if[1], frame, len); if (!_lspcon_parade_write_infoframe_blocks(aux, avi_if)) { - DRM_DEBUG_KMS("Failed to write infoframe blocks\n"); + drm_dbg_kms(aux->drm_dev, "Failed to write infoframe blocks\n"); return false; } @@ -412,7 +419,7 @@ static bool _lspcon_write_avi_infoframe_mca(struct drm_dp_aux *aux, mdelay(50); continue; } else { - DRM_ERROR("DPCD write failed at:0x%x\n", reg); + drm_err(aux->drm_dev, "DPCD write failed at:0x%x\n", reg); return false; } } @@ -423,7 +430,7 @@ static bool _lspcon_write_avi_infoframe_mca(struct drm_dp_aux *aux, reg = LSPCON_MCA_AVI_IF_CTRL; ret = drm_dp_dpcd_read(aux, reg, &val, 1); if (ret < 0) { - DRM_ERROR("DPCD read failed, address 0x%x\n", reg); + drm_err(aux->drm_dev, "DPCD read failed, address 0x%x\n", reg); return false; } @@ -433,19 +440,19 @@ static bool _lspcon_write_avi_infoframe_mca(struct drm_dp_aux *aux, ret = drm_dp_dpcd_write(aux, reg, &val, 1); if (ret < 0) { - DRM_ERROR("DPCD read failed, address 0x%x\n", reg); + drm_err(aux->drm_dev, "DPCD read failed, address 0x%x\n", reg); return false; } val = 0; ret = drm_dp_dpcd_read(aux, reg, &val, 1); if (ret < 0) { - DRM_ERROR("DPCD read failed, address 0x%x\n", reg); + drm_err(aux->drm_dev, "DPCD read failed, address 0x%x\n", reg); return false; } if (val == LSPCON_MCA_AVI_IF_HANDLED) - DRM_DEBUG_KMS("AVI IF handled by FW\n"); + drm_dbg_kms(aux->drm_dev, "AVI IF handled by FW\n"); return true; } @@ -457,6 +464,7 @@ void lspcon_write_infoframe(struct intel_encoder *encoder, { bool ret = true; struct intel_dp *intel_dp = enc_to_intel_dp(encoder); + struct drm_i915_private *i915 = dp_to_i915(intel_dp); struct intel_lspcon *lspcon = enc_to_intel_lspcon(encoder); switch (type) { @@ -469,7 +477,7 @@ void lspcon_write_infoframe(struct intel_encoder *encoder, frame, len); break; case HDMI_PACKET_TYPE_GAMUT_METADATA: - drm_dbg_kms(encoder->base.dev, "Update HDR metadata for lspcon\n"); + drm_dbg_kms(&i915->drm, "Update HDR metadata for lspcon\n"); /* It uses the legacy hsw implementation for the same */ hsw_write_infoframe(encoder, crtc_state, type, frame, len); break; @@ -478,7 +486,7 @@ void lspcon_write_infoframe(struct intel_encoder *encoder, } if (!ret) { - DRM_ERROR("Failed to write infoframes\n"); + drm_err(&i915->drm, "Failed to write infoframes\n"); return; } } @@ -504,11 +512,12 @@ void lspcon_set_infoframes(struct intel_encoder *encoder, u8 buf[VIDEO_DIP_DATA_SIZE]; struct intel_digital_port *dig_port = enc_to_dig_port(encoder); struct intel_lspcon *lspcon = &dig_port->lspcon; + struct drm_i915_private *i915 = to_i915(encoder->base.dev); const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; if (!lspcon->active) { - DRM_ERROR("Writing infoframes while LSPCON disabled ?\n"); + drm_err(&i915->drm, "Writing infoframes while LSPCON disabled ?\n"); return; } @@ -518,7 +527,7 @@ void lspcon_set_infoframes(struct intel_encoder *encoder, conn_state->connector, adjusted_mode); if (ret < 0) { - DRM_ERROR("couldn't fill AVI infoframe\n"); + drm_err(&i915->drm, "couldn't fill AVI infoframe\n"); return; } @@ -559,7 +568,7 @@ void lspcon_set_infoframes(struct intel_encoder *encoder, ret = hdmi_infoframe_pack(&frame, buf, sizeof(buf)); if (ret < 0) { - DRM_ERROR("Failed to pack AVI IF\n"); + drm_err(&i915->drm, "Failed to pack AVI IF\n"); return; } @@ -575,7 +584,7 @@ static bool _lspcon_read_avi_infoframe_enabled_mca(struct drm_dp_aux *aux) ret = drm_dp_dpcd_read(aux, reg, &val, 1); if (ret < 0) { - DRM_ERROR("DPCD read failed, address 0x%x\n", reg); + drm_err(aux->drm_dev, "DPCD read failed, address 0x%x\n", reg); return false; } @@ -590,7 +599,7 @@ static bool _lspcon_read_avi_infoframe_enabled_parade(struct drm_dp_aux *aux) ret = drm_dp_dpcd_read(aux, reg, &val, 1); if (ret < 0) { - DRM_ERROR("DPCD read failed, address 0x%x\n", reg); + drm_err(aux->drm_dev, "DPCD read failed, address 0x%x\n", reg); return false; } @@ -634,31 +643,32 @@ void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon) bool lspcon_init(struct intel_digital_port *dig_port) { - struct intel_dp *dp = &dig_port->dp; + struct intel_dp *intel_dp = &dig_port->dp; struct intel_lspcon *lspcon = &dig_port->lspcon; - struct drm_connector *connector = &dp->attached_connector->base; + struct drm_i915_private *i915 = dp_to_i915(intel_dp); + struct drm_connector *connector = &intel_dp->attached_connector->base; lspcon->active = false; lspcon->mode = DRM_LSPCON_MODE_INVALID; if (!lspcon_probe(lspcon)) { - DRM_ERROR("Failed to probe lspcon\n"); + drm_err(&i915->drm, "Failed to probe lspcon\n"); return false; } - if (drm_dp_read_dpcd_caps(&dp->aux, dp->dpcd) != 0) { - DRM_ERROR("LSPCON DPCD read failed\n"); + if (drm_dp_read_dpcd_caps(&intel_dp->aux, intel_dp->dpcd) != 0) { + drm_err(&i915->drm, "LSPCON DPCD read failed\n"); return false; } if (!lspcon_detect_vendor(lspcon)) { - DRM_ERROR("LSPCON vendor detection failed\n"); + drm_err(&i915->drm, "LSPCON vendor detection failed\n"); return false; } connector->ycbcr_420_allowed = true; lspcon->active = true; - DRM_DEBUG_KMS("Success: LSPCON init\n"); + drm_dbg_kms(&i915->drm, "Success: LSPCON init\n"); return true; } @@ -674,16 +684,16 @@ void lspcon_resume(struct intel_digital_port *dig_port) { struct intel_lspcon *lspcon = &dig_port->lspcon; struct drm_device *dev = dig_port->base.base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); + struct drm_i915_private *i915 = to_i915(dev); enum drm_lspcon_mode expected_mode; - if (!intel_bios_is_lspcon_present(dev_priv, dig_port->base.port)) + if (!intel_bios_is_lspcon_present(i915, dig_port->base.port)) return; if (!lspcon->active) { if (!lspcon_init(dig_port)) { - DRM_ERROR("LSPCON init failed on port %c\n", - port_name(dig_port->base.port)); + drm_err(&i915->drm, "LSPCON init failed on port %c\n", + port_name(dig_port->base.port)); return; } } @@ -699,7 +709,7 @@ void lspcon_resume(struct intel_digital_port *dig_port) return; if (lspcon_change_mode(lspcon, DRM_LSPCON_MODE_PCON)) - DRM_ERROR("LSPCON resume failed\n"); + drm_err(&i915->drm, "LSPCON resume failed\n"); else - DRM_DEBUG_KMS("LSPCON resume success\n"); + drm_dbg_kms(&i915->drm, "LSPCON resume success\n"); } -- cgit From 140f70aeef07e4516a338b275e36eb5f8cfb463a Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 21 Jan 2022 15:00:37 +0200 Subject: drm/i915/cdclk: update intel_dump_cdclk_config() logging MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Gather some intel_dump_cdclk_config() changes together to avoid extra churn: Rename to intel_cdclk_dump_config() to following naming conventions. Pass in i915. Use i915 for struct drm_device based logging. Switch to KMS drm debug class. Cc: Ville Syrjälä Signed-off-by: Jani Nikula Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/80469a83a74912ad69c4518d9cc68f07d65e9aaf.1642769982.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_cdclk.c | 21 +++++++++++---------- drivers/gpu/drm/i915/display/intel_cdclk.h | 3 ++- drivers/gpu/drm/i915/display/intel_display.c | 2 +- drivers/gpu/drm/i915/display/intel_display_power.c | 2 +- 4 files changed, 15 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 7e20967307df..c4b48b831ced 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -1156,7 +1156,7 @@ static void skl_sanitize_cdclk(struct drm_i915_private *dev_priv) goto sanitize; intel_update_cdclk(dev_priv); - intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK"); + intel_cdclk_dump_config(dev_priv, &dev_priv->cdclk.hw, "Current CDCLK"); /* Is PLL enabled and locked ? */ if (dev_priv->cdclk.hw.vco == 0 || @@ -1817,7 +1817,7 @@ static void bxt_sanitize_cdclk(struct drm_i915_private *dev_priv) int cdclk, clock, vco; intel_update_cdclk(dev_priv); - intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK"); + intel_cdclk_dump_config(dev_priv, &dev_priv->cdclk.hw, "Current CDCLK"); if (dev_priv->cdclk.hw.vco == 0 || dev_priv->cdclk.hw.cdclk == dev_priv->cdclk.hw.bypass) @@ -2057,13 +2057,14 @@ static bool intel_cdclk_changed(const struct intel_cdclk_config *a, a->voltage_level != b->voltage_level; } -void intel_dump_cdclk_config(const struct intel_cdclk_config *cdclk_config, +void intel_cdclk_dump_config(struct drm_i915_private *i915, + const struct intel_cdclk_config *cdclk_config, const char *context) { - DRM_DEBUG_DRIVER("%s %d kHz, VCO %d kHz, ref %d kHz, bypass %d kHz, voltage level %d\n", - context, cdclk_config->cdclk, cdclk_config->vco, - cdclk_config->ref, cdclk_config->bypass, - cdclk_config->voltage_level); + drm_dbg_kms(&i915->drm, "%s %d kHz, VCO %d kHz, ref %d kHz, bypass %d kHz, voltage level %d\n", + context, cdclk_config->cdclk, cdclk_config->vco, + cdclk_config->ref, cdclk_config->bypass, + cdclk_config->voltage_level); } /** @@ -2087,7 +2088,7 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv, if (drm_WARN_ON_ONCE(&dev_priv->drm, !dev_priv->cdclk_funcs->set_cdclk)) return; - intel_dump_cdclk_config(cdclk_config, "Changing CDCLK to"); + intel_cdclk_dump_config(dev_priv, cdclk_config, "Changing CDCLK to"); for_each_intel_encoder_with_psr(&dev_priv->drm, encoder) { struct intel_dp *intel_dp = enc_to_intel_dp(encoder); @@ -2130,8 +2131,8 @@ static void intel_set_cdclk(struct drm_i915_private *dev_priv, if (drm_WARN(&dev_priv->drm, intel_cdclk_changed(&dev_priv->cdclk.hw, cdclk_config), "cdclk state doesn't match!\n")) { - intel_dump_cdclk_config(&dev_priv->cdclk.hw, "[hw state]"); - intel_dump_cdclk_config(cdclk_config, "[sw state]"); + intel_cdclk_dump_config(dev_priv, &dev_priv->cdclk.hw, "[hw state]"); + intel_cdclk_dump_config(dev_priv, cdclk_config, "[sw state]"); } } diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.h b/drivers/gpu/drm/i915/display/intel_cdclk.h index 71dd84740ae3..df66f66fbad0 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.h +++ b/drivers/gpu/drm/i915/display/intel_cdclk.h @@ -62,7 +62,8 @@ bool intel_cdclk_needs_modeset(const struct intel_cdclk_config *a, const struct intel_cdclk_config *b); void intel_set_cdclk_pre_plane_update(struct intel_atomic_state *state); void intel_set_cdclk_post_plane_update(struct intel_atomic_state *state); -void intel_dump_cdclk_config(const struct intel_cdclk_config *cdclk_config, +void intel_cdclk_dump_config(struct drm_i915_private *i915, + const struct intel_cdclk_config *cdclk_config, const char *context); int intel_modeset_calc_cdclk(struct intel_atomic_state *state); void intel_cdclk_get_cdclk(struct drm_i915_private *dev_priv, diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index f8c7a2855139..8537d2373bb6 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -9478,7 +9478,7 @@ void intel_modeset_init_hw(struct drm_i915_private *i915) cdclk_state = to_intel_cdclk_state(i915->cdclk.obj.state); intel_update_cdclk(i915); - intel_dump_cdclk_config(&i915->cdclk.hw, "Current CDCLK"); + intel_cdclk_dump_config(i915, &i915->cdclk.hw, "Current CDCLK"); cdclk_state->logical = cdclk_state->actual = i915->cdclk.hw; } diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index ee4617299e64..369317805d24 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -5580,7 +5580,7 @@ static void hsw_restore_lcpll(struct drm_i915_private *dev_priv) intel_uncore_forcewake_put(&dev_priv->uncore, FORCEWAKE_ALL); intel_update_cdclk(dev_priv); - intel_dump_cdclk_config(&dev_priv->cdclk.hw, "Current CDCLK"); + intel_cdclk_dump_config(dev_priv, &dev_priv->cdclk.hw, "Current CDCLK"); } /* -- cgit From c9b06cc26f1daace605238adb4720560078b0eb6 Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Fri, 21 Jan 2022 15:00:38 +0200 Subject: drm/i915/cdclk: convert to drm device based logging MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Prefer drm device based logging. Cc: Ville Syrjälä Signed-off-by: Jani Nikula Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/0074a45193873aea0becdf7cc87c0f06754ab706.1642769982.git.jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_cdclk.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index c4b48b831ced..4b140a014ca8 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -1625,7 +1625,7 @@ static void adlp_cdclk_pll_crawl(struct drm_i915_private *dev_priv, int vco) /* Timeout 200us */ if (intel_de_wait_for_set(dev_priv, BXT_DE_PLL_ENABLE, BXT_DE_PLL_LOCK | BXT_DE_PLL_FREQ_REQ_ACK, 1)) - DRM_ERROR("timeout waiting for FREQ change request ack\n"); + drm_err(&dev_priv->drm, "timeout waiting for FREQ change request ack\n"); val &= ~BXT_DE_PLL_FREQ_REQ; intel_de_write(dev_priv, BXT_DE_PLL_ENABLE, val); -- cgit From bdac3bbd0dc63873a9c606b8e4f814e6d61d288d Mon Sep 17 00:00:00 2001 From: Nicolas Frattaroli Date: Fri, 26 Nov 2021 16:43:42 +0100 Subject: spi: spi-rockchip: Add rk3568-spi compatible This adds a compatible string for the SPI controller found on the RK3566 and RK3568 SoCs. Signed-off-by: Nicolas Frattaroli Link: https://lore.kernel.org/r/20211126154344.724316-2-frattaroli.nicolas@gmail.com Signed-off-by: Mark Brown --- Documentation/devicetree/bindings/spi/spi-rockchip.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/Documentation/devicetree/bindings/spi/spi-rockchip.yaml b/Documentation/devicetree/bindings/spi/spi-rockchip.yaml index 7f987e79337c..52a78a2e362e 100644 --- a/Documentation/devicetree/bindings/spi/spi-rockchip.yaml +++ b/Documentation/devicetree/bindings/spi/spi-rockchip.yaml @@ -33,6 +33,7 @@ properties: - rockchip,rk3328-spi - rockchip,rk3368-spi - rockchip,rk3399-spi + - rockchip,rk3568-spi - rockchip,rv1126-spi - const: rockchip,rk3066-spi -- cgit From 77311237eaffa240af6eae1d511b61e77a20a2ef Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 12 Jan 2022 22:58:46 +0200 Subject: pinctrl: Place correctly CONFIG_PINCTRL_ST in the Makefile Keep Makefile entries ordered in the same way as Kconfig ones. Reported-by: Linus Torvalds Signed-off-by: Andy Shevchenko --- drivers/pinctrl/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/pinctrl/Makefile b/drivers/pinctrl/Makefile index 08c364d611f5..f64d29f614ec 100644 --- a/drivers/pinctrl/Makefile +++ b/drivers/pinctrl/Makefile @@ -42,9 +42,9 @@ obj-$(CONFIG_PINCTRL_PISTACHIO) += pinctrl-pistachio.o obj-$(CONFIG_PINCTRL_RK805) += pinctrl-rk805.o obj-$(CONFIG_PINCTRL_ROCKCHIP) += pinctrl-rockchip.o obj-$(CONFIG_PINCTRL_SINGLE) += pinctrl-single.o +obj-$(CONFIG_PINCTRL_ST) += pinctrl-st.o obj-$(CONFIG_PINCTRL_STARFIVE) += pinctrl-starfive.o obj-$(CONFIG_PINCTRL_STMFX) += pinctrl-stmfx.o -obj-$(CONFIG_PINCTRL_ST) += pinctrl-st.o obj-$(CONFIG_PINCTRL_SX150X) += pinctrl-sx150x.o obj-$(CONFIG_PINCTRL_TB10X) += pinctrl-tb10x.o obj-$(CONFIG_PINCTRL_THUNDERBAY) += pinctrl-thunderbay.o -- cgit From e986f0e602f19ecb7880b04dd1db415ed9bca3f6 Mon Sep 17 00:00:00 2001 From: Łukasz Bartosik Date: Mon, 24 Jan 2022 13:55:29 +0100 Subject: pinctrl: intel: fix unexpected interrupt MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ASUS Chromebook C223 with Celeron N3350 crashes sometimes during cold booot. Inspection of the kernel log showed that it gets into an inifite loop logging the following message: ->handle_irq(): 000000009cdb51e8, handle_bad_irq+0x0/0x251 ->irq_data.chip(): 000000005ec212a7, 0xffffa043009d8e7 ->action(): 00000 IRQ_NOPROBE set unexpected IRQ trap at vector 7c The issue happens during cold boot but only if cold boot happens at most several dozen seconds after Chromebook is powered off. For longer intervals between power off and power on (cold boot) the issue does not reproduce. The unexpected interrupt is sourced from INT3452 GPIO pin which is used for SD card detect. Investigation relevealed that when the interval between power off and power on (cold boot) is less than several dozen seconds then values of INT3452 GPIO interrupt enable and interrupt pending registers survive power off and power on sequence and interrupt for SD card detect pin is enabled and pending during probe of SD controller which causes the unexpected IRQ message. "Intel Pentium and Celeron Processor N- and J- Series" volume 3 doc mentions that GPIO interrupt enable and status registers default value is 0x0. The fix clears INT3452 GPIO interrupt enabled and interrupt pending registers in its probe function. Fixes: 7981c0015af2 ("pinctrl: intel: Add Intel Sunrisepoint pin controller and GPIO support") Signed-off-by: Łukasz Bartosik Signed-off-by: Andy Shevchenko --- drivers/pinctrl/intel/pinctrl-intel.c | 54 ++++++++++++++++++++++------------- 1 file changed, 34 insertions(+), 20 deletions(-) diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c index 85750974d182..e9bb98cb9112 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.c +++ b/drivers/pinctrl/intel/pinctrl-intel.c @@ -1216,6 +1216,39 @@ static irqreturn_t intel_gpio_irq(int irq, void *data) return IRQ_RETVAL(ret); } +static void intel_gpio_irq_init(struct intel_pinctrl *pctrl) +{ + int i; + + for (i = 0; i < pctrl->ncommunities; i++) { + const struct intel_community *community; + void __iomem *base; + unsigned int gpp; + + community = &pctrl->communities[i]; + base = community->regs; + + for (gpp = 0; gpp < community->ngpps; gpp++) { + /* Mask and clear all interrupts */ + writel(0, base + community->ie_offset + gpp * 4); + writel(0xffff, base + community->is_offset + gpp * 4); + } + } +} + +static int intel_gpio_irq_init_hw(struct gpio_chip *gc) +{ + struct intel_pinctrl *pctrl = gpiochip_get_data(gc); + + /* + * Make sure the interrupt lines are in a proper state before + * further configuration. + */ + intel_gpio_irq_init(pctrl); + + return 0; +} + static int intel_gpio_add_community_ranges(struct intel_pinctrl *pctrl, const struct intel_community *community) { @@ -1320,6 +1353,7 @@ static int intel_gpio_probe(struct intel_pinctrl *pctrl, int irq) girq->num_parents = 0; girq->default_type = IRQ_TYPE_NONE; girq->handler = handle_bad_irq; + girq->init_hw = intel_gpio_irq_init_hw; ret = devm_gpiochip_add_data(pctrl->dev, &pctrl->chip, pctrl); if (ret) { @@ -1695,26 +1729,6 @@ int intel_pinctrl_suspend_noirq(struct device *dev) } EXPORT_SYMBOL_GPL(intel_pinctrl_suspend_noirq); -static void intel_gpio_irq_init(struct intel_pinctrl *pctrl) -{ - size_t i; - - for (i = 0; i < pctrl->ncommunities; i++) { - const struct intel_community *community; - void __iomem *base; - unsigned int gpp; - - community = &pctrl->communities[i]; - base = community->regs; - - for (gpp = 0; gpp < community->ngpps; gpp++) { - /* Mask and clear all interrupts */ - writel(0, base + community->ie_offset + gpp * 4); - writel(0xffff, base + community->is_offset + gpp * 4); - } - } -} - static bool intel_gpio_update_reg(void __iomem *reg, u32 mask, u32 value) { u32 curr, updated; -- cgit From e12963c453263d5321a2c610e98cbc731233b685 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Wed, 19 Jan 2022 20:19:15 +0200 Subject: pinctrl: intel: Fix a glitch when updating IRQ flags on a preconfigured line The commit af7e3eeb84e2 ("pinctrl: intel: Disable input and output buffer when switching to GPIO") hadn't taken into account an update of the IRQ flags scenario. When updating the IRQ flags on the preconfigured line the ->irq_set_type() is called again. In such case the sequential Rx buffer configuration changes may trigger a falling or rising edge interrupt that may lead, on some platforms, to an undesired event. This may happen because each of intel_gpio_set_gpio_mode() and __intel_gpio_set_direction() updates the pad configuration with a different value of the GPIORXDIS bit. Notable, that the intel_gpio_set_gpio_mode() is called only for the pads that are configured as an input. Due to this fact, integrate the logic of __intel_gpio_set_direction() call into the intel_gpio_set_gpio_mode() so that the Rx buffer won't be disabled and immediately re-enabled. Fixes: af7e3eeb84e2 ("pinctrl: intel: Disable input and output buffer when switching to GPIO") Reported-by: Kane Chen Signed-off-by: Andy Shevchenko Acked-by: Mika Westerberg Tested-by: Grace Kao --- drivers/pinctrl/intel/pinctrl-intel.c | 10 ++-------- 1 file changed, 2 insertions(+), 8 deletions(-) diff --git a/drivers/pinctrl/intel/pinctrl-intel.c b/drivers/pinctrl/intel/pinctrl-intel.c index e9bb98cb9112..826d494f3cc6 100644 --- a/drivers/pinctrl/intel/pinctrl-intel.c +++ b/drivers/pinctrl/intel/pinctrl-intel.c @@ -451,8 +451,8 @@ static void intel_gpio_set_gpio_mode(void __iomem *padcfg0) value &= ~PADCFG0_PMODE_MASK; value |= PADCFG0_PMODE_GPIO; - /* Disable input and output buffers */ - value |= PADCFG0_GPIORXDIS; + /* Disable TX buffer and enable RX (this will be input) */ + value &= ~PADCFG0_GPIORXDIS; value |= PADCFG0_GPIOTXDIS; /* Disable SCI/SMI/NMI generation */ @@ -497,9 +497,6 @@ static int intel_gpio_request_enable(struct pinctrl_dev *pctldev, intel_gpio_set_gpio_mode(padcfg0); - /* Disable TX buffer and enable RX (this will be input) */ - __intel_gpio_set_direction(padcfg0, true); - raw_spin_unlock_irqrestore(&pctrl->lock, flags); return 0; @@ -1115,9 +1112,6 @@ static int intel_gpio_irq_type(struct irq_data *d, unsigned int type) intel_gpio_set_gpio_mode(reg); - /* Disable TX buffer and enable RX (this will be input) */ - __intel_gpio_set_direction(reg, true); - value = readl(reg); value &= ~(PADCFG0_RXEVCFG_MASK | PADCFG0_RXINV); -- cgit From e33f42b20bcb2f55cb1eeeab9956a503dcf36107 Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Thu, 13 Jan 2022 13:18:45 +0800 Subject: erofs: fix fsdax partition offset handling After seeking time on testing today upstream fsdax, I found it actually doesn't work well as below: [ 186.492983] ------------[ cut here ]------------ [ 186.493629] WARNING: CPU: 1 PID: 205 at fs/iomap/iter.c:33 iomap_iter+0x2f6/0x310 The problem is that m_dax_part_off should be applied to physical addresses and very sorry about that I didn't catch this eariler. Anyway, let's fix it up now. Also, I need to find a way to set up a standalone testcase to look after this later. Link: https://lore.kernel.org/r/20220113051845.244461-1-hsiangkao@linux.alibaba.com Fixes: de2051147771 ("fsdax: shift partition offset handling into the file systems") Reviewed-by: Christoph Hellwig Reviewed-by: Chao Yu Signed-off-by: Gao Xiang --- fs/erofs/data.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/fs/erofs/data.c b/fs/erofs/data.c index fa7ddb7ad980..226a57c57ee6 100644 --- a/fs/erofs/data.c +++ b/fs/erofs/data.c @@ -252,12 +252,10 @@ static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length, return ret; iomap->offset = map.m_la; - if (flags & IOMAP_DAX) { + if (flags & IOMAP_DAX) iomap->dax_dev = mdev.m_daxdev; - iomap->offset += mdev.m_dax_part_off; - } else { + else iomap->bdev = mdev.m_bdev; - } iomap->length = map.m_llen; iomap->flags = 0; iomap->private = NULL; @@ -284,6 +282,8 @@ static int erofs_iomap_begin(struct inode *inode, loff_t offset, loff_t length, } else { iomap->type = IOMAP_MAPPED; iomap->addr = mdev.m_pa; + if (flags & IOMAP_DAX) + iomap->addr += mdev.m_dax_part_off; } return 0; } -- cgit From 7865827c432bf9885ee26e5767697c3d9e21a82c Mon Sep 17 00:00:00 2001 From: Gao Xiang Date: Fri, 21 Jan 2022 17:14:12 +0800 Subject: erofs: avoid unnecessary z_erofs_decompressqueue_work() declaration Just code rearrange. No logic changes. Link: https://lore.kernel.org/r/20220121091412.86086-1-hsiangkao@linux.alibaba.com Reviewed-by: Yue Hu Reviewed-by: Chao Yu Signed-off-by: Gao Xiang --- fs/erofs/zdata.c | 113 +++++++++++++++++++++++++++---------------------------- 1 file changed, 56 insertions(+), 57 deletions(-) diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c index 498b7666efe8..423bc1a61da5 100644 --- a/fs/erofs/zdata.c +++ b/fs/erofs/zdata.c @@ -810,68 +810,11 @@ static bool z_erofs_get_sync_decompress_policy(struct erofs_sb_info *sbi, return false; } -static void z_erofs_decompressqueue_work(struct work_struct *work); -static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io, - bool sync, int bios) -{ - struct erofs_sb_info *const sbi = EROFS_SB(io->sb); - - /* wake up the caller thread for sync decompression */ - if (sync) { - unsigned long flags; - - spin_lock_irqsave(&io->u.wait.lock, flags); - if (!atomic_add_return(bios, &io->pending_bios)) - wake_up_locked(&io->u.wait); - spin_unlock_irqrestore(&io->u.wait.lock, flags); - return; - } - - if (atomic_add_return(bios, &io->pending_bios)) - return; - /* Use workqueue and sync decompression for atomic contexts only */ - if (in_atomic() || irqs_disabled()) { - queue_work(z_erofs_workqueue, &io->u.work); - /* enable sync decompression for readahead */ - if (sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO) - sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_FORCE_ON; - return; - } - z_erofs_decompressqueue_work(&io->u.work); -} - static bool z_erofs_page_is_invalidated(struct page *page) { return !page->mapping && !z_erofs_is_shortlived_page(page); } -static void z_erofs_decompressqueue_endio(struct bio *bio) -{ - tagptr1_t t = tagptr_init(tagptr1_t, bio->bi_private); - struct z_erofs_decompressqueue *q = tagptr_unfold_ptr(t); - blk_status_t err = bio->bi_status; - struct bio_vec *bvec; - struct bvec_iter_all iter_all; - - bio_for_each_segment_all(bvec, bio, iter_all) { - struct page *page = bvec->bv_page; - - DBG_BUGON(PageUptodate(page)); - DBG_BUGON(z_erofs_page_is_invalidated(page)); - - if (err) - SetPageError(page); - - if (erofs_page_is_managed(EROFS_SB(q->sb), page)) { - if (!err) - SetPageUptodate(page); - unlock_page(page); - } - } - z_erofs_decompress_kickoff(q, tagptr_unfold_tags(t), -1); - bio_put(bio); -} - static int z_erofs_decompress_pcluster(struct super_block *sb, struct z_erofs_pcluster *pcl, struct page **pagepool) @@ -1123,6 +1066,35 @@ static void z_erofs_decompressqueue_work(struct work_struct *work) kvfree(bgq); } +static void z_erofs_decompress_kickoff(struct z_erofs_decompressqueue *io, + bool sync, int bios) +{ + struct erofs_sb_info *const sbi = EROFS_SB(io->sb); + + /* wake up the caller thread for sync decompression */ + if (sync) { + unsigned long flags; + + spin_lock_irqsave(&io->u.wait.lock, flags); + if (!atomic_add_return(bios, &io->pending_bios)) + wake_up_locked(&io->u.wait); + spin_unlock_irqrestore(&io->u.wait.lock, flags); + return; + } + + if (atomic_add_return(bios, &io->pending_bios)) + return; + /* Use workqueue and sync decompression for atomic contexts only */ + if (in_atomic() || irqs_disabled()) { + queue_work(z_erofs_workqueue, &io->u.work); + /* enable sync decompression for readahead */ + if (sbi->opt.sync_decompress == EROFS_SYNC_DECOMPRESS_AUTO) + sbi->opt.sync_decompress = EROFS_SYNC_DECOMPRESS_FORCE_ON; + return; + } + z_erofs_decompressqueue_work(&io->u.work); +} + static struct page *pickup_page_for_submission(struct z_erofs_pcluster *pcl, unsigned int nr, struct page **pagepool, @@ -1300,6 +1272,33 @@ static void move_to_bypass_jobqueue(struct z_erofs_pcluster *pcl, qtail[JQ_BYPASS] = &pcl->next; } +static void z_erofs_decompressqueue_endio(struct bio *bio) +{ + tagptr1_t t = tagptr_init(tagptr1_t, bio->bi_private); + struct z_erofs_decompressqueue *q = tagptr_unfold_ptr(t); + blk_status_t err = bio->bi_status; + struct bio_vec *bvec; + struct bvec_iter_all iter_all; + + bio_for_each_segment_all(bvec, bio, iter_all) { + struct page *page = bvec->bv_page; + + DBG_BUGON(PageUptodate(page)); + DBG_BUGON(z_erofs_page_is_invalidated(page)); + + if (err) + SetPageError(page); + + if (erofs_page_is_managed(EROFS_SB(q->sb), page)) { + if (!err) + SetPageUptodate(page); + unlock_page(page); + } + } + z_erofs_decompress_kickoff(q, tagptr_unfold_tags(t), -1); + bio_put(bio); +} + static void z_erofs_submit_queue(struct super_block *sb, struct z_erofs_decompress_frontend *f, struct page **pagepool, -- cgit From 40c67c291a93f8846c4a972c9ef1b7ba4544c8d0 Mon Sep 17 00:00:00 2001 From: Jiasheng Jiang Date: Wed, 12 Jan 2022 16:31:56 +0800 Subject: mmc: sdhci-of-esdhc: Check for error num after setting mask Because of the possible failure of the dma_supported(), the dma_set_mask_and_coherent() may return error num. Therefore, it should be better to check it and return the error if fails. And since the sdhci_setup_host() has already checked the return value of the enable_dma, we need not check it in sdhci_resume_host() again. Fixes: 5552d7ad596c ("mmc: sdhci-of-esdhc: set proper dma mask for ls104x chips") Signed-off-by: Jiasheng Jiang Acked-by: Adrian Hunter Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20220112083156.1124782-1-jiasheng@iscas.ac.cn Signed-off-by: Ulf Hansson --- drivers/mmc/host/sdhci-of-esdhc.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c index a593b1fbd69e..0f3658b36513 100644 --- a/drivers/mmc/host/sdhci-of-esdhc.c +++ b/drivers/mmc/host/sdhci-of-esdhc.c @@ -524,12 +524,16 @@ static void esdhc_of_adma_workaround(struct sdhci_host *host, u32 intmask) static int esdhc_of_enable_dma(struct sdhci_host *host) { + int ret; u32 value; struct device *dev = mmc_dev(host->mmc); if (of_device_is_compatible(dev->of_node, "fsl,ls1043a-esdhc") || - of_device_is_compatible(dev->of_node, "fsl,ls1046a-esdhc")) - dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40)); + of_device_is_compatible(dev->of_node, "fsl,ls1046a-esdhc")) { + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40)); + if (ret) + return ret; + } value = sdhci_readl(host, ESDHC_DMA_SYSCTL); -- cgit From 4d315357b3d6c315a7260420c6c6fc076e58d14b Mon Sep 17 00:00:00 2001 From: Jiasheng Jiang Date: Wed, 19 Jan 2022 20:00:06 +0800 Subject: mmc: sh_mmcif: Check for null res pointer If there is no suitable resource, platform_get_resource() will return NULL. Therefore in order to avoid the dereference of the NULL pointer, it should be better to check the 'res'. Signed-off-by: Jiasheng Jiang Cc: stable@vger.kernel.org # v5.16+ Link: https://lore.kernel.org/r/20220119120006.1426964-1-jiasheng@iscas.ac.cn Signed-off-by: Ulf Hansson --- drivers/mmc/host/sh_mmcif.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c index bcc595c70a9f..104dcd702870 100644 --- a/drivers/mmc/host/sh_mmcif.c +++ b/drivers/mmc/host/sh_mmcif.c @@ -405,6 +405,9 @@ static int sh_mmcif_dma_slave_config(struct sh_mmcif_host *host, struct dma_slave_config cfg = { 0, }; res = platform_get_resource(host->pd, IORESOURCE_MEM, 0); + if (!res) + return -EINVAL; + cfg.direction = direction; if (direction == DMA_DEV_TO_MEM) { -- cgit From 379f56c24e698f14242f532b1d0a0f1747725e08 Mon Sep 17 00:00:00 2001 From: Andrey Skvortsov Date: Sat, 15 Jan 2022 15:14:46 +0300 Subject: mmc: core: Wait for command setting 'Power Off Notification' bit to complete SD card is allowed to signal busy on DAT0 up to 1s after the CMD49. According to SD spec (version 6.0 section 5.8.1.3) first host waits until busy of CMD49 is released and only then polls Power Management Status register up to 1s until the card indicates ready to power off. Without waiting for busy before polling status register sometimes card becomes unresponsive and system fails to suspend: [ 205.907459] Freezing remaining freezable tasks ... (elapsed 0.001 seconds) done. [ 206.421274] sunxi-mmc 1c0f000.mmc: data error, sending stop command [ 206.421321] sunxi-mmc 1c0f000.mmc: send stop command failed [ 206.421347] mmc0: error -110 reading status reg of PM func [ 206.421366] PM: dpm_run_callback(): mmc_bus_suspend+0x0/0x74 returns -110 [ 206.421402] mmcblk mmc0:aaaa: PM: failed to suspend async: error -110 [ 206.437064] PM: Some devices failed to suspend, or early wake event detected Tested with Sandisk Extreme PRO A2 64GB on Allwinner A64 system. Signed-off-by: Andrey Skvortsov Fixes: 2c5d42769038 ("mmc: core: Add support for Power Off Notification for SD cards") Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20220115121447.641524-1-andrej.skvortzov@gmail.com Signed-off-by: Ulf Hansson --- drivers/mmc/core/sd.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c index 45f578793980..bd87012c220c 100644 --- a/drivers/mmc/core/sd.c +++ b/drivers/mmc/core/sd.c @@ -67,7 +67,7 @@ static const unsigned int sd_au_size[] = { __res & __mask; \ }) -#define SD_POWEROFF_NOTIFY_TIMEOUT_MS 2000 +#define SD_POWEROFF_NOTIFY_TIMEOUT_MS 1000 #define SD_WRITE_EXTR_SINGLE_TIMEOUT_MS 1000 struct sd_busy_data { @@ -1664,6 +1664,12 @@ static int sd_poweroff_notify(struct mmc_card *card) goto out; } + /* Find out when the command is completed. */ + err = mmc_poll_for_busy(card, SD_WRITE_EXTR_SINGLE_TIMEOUT_MS, false, + MMC_BUSY_EXTR_SINGLE); + if (err) + goto out; + cb_data.card = card; cb_data.reg_buf = reg_buf; err = __mmc_poll_for_busy(card->host, SD_POWEROFF_NOTIFY_TIMEOUT_MS, -- cgit From 82880283d7fcd0a1d20964a56d6d1a5cc0df0713 Mon Sep 17 00:00:00 2001 From: Sergei Trofimovich Date: Thu, 20 Jan 2022 23:37:48 +0000 Subject: objtool: Fix truncated string warning MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On GCC 12, the build fails due to a possible truncated string: check.c: In function 'validate_call': check.c:2865:58: error: '%d' directive output may be truncated writing between 1 and 10 bytes into a region of size 9 [-Werror=format-truncation=] 2865 | snprintf(pvname, sizeof(pvname), "pv_ops[%d]", idx); | ^~ In theory it's a valid bug: static char pvname[16]; int idx; ... idx = (rel->addend / sizeof(void *)); snprintf(pvname, sizeof(pvname), "pv_ops[%d]", idx); There are only 7 chars for %d while it could take up to 9, so the printed "pv_ops[%d]" string could get truncated. In reality the bug should never happen, because pv_ops only has ~80 entries, so 7 chars for the integer is more than enough. Still, it's worth fixing. Bump the buffer size by 2 bytes to silence the warning. [ jpoimboe: changed size to 19; massaged changelog ] Fixes: db2b0c5d7b6f ("objtool: Support pv_opsindirect calls for noinstr") Reported-by: Adam Borowski Reported-by: Martin Liška Signed-off-by: Sergei Trofimovich Signed-off-by: Josh Poimboeuf Link: https://lore.kernel.org/r/20220120233748.2062559-1-slyich@gmail.com --- tools/objtool/check.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/objtool/check.c b/tools/objtool/check.c index c2d2ab9a2861..7c33ec67c4a9 100644 --- a/tools/objtool/check.c +++ b/tools/objtool/check.c @@ -2854,7 +2854,7 @@ static inline bool func_uaccess_safe(struct symbol *func) static inline const char *call_dest_name(struct instruction *insn) { - static char pvname[16]; + static char pvname[19]; struct reloc *rel; int idx; -- cgit From 165216533dda560f2620ce8f61381a9ee0ca57ba Mon Sep 17 00:00:00 2001 From: Aswath Govindraju Date: Thu, 23 Dec 2021 17:46:49 +0530 Subject: arm64: dts: ti: k3-j721s2: Move aliases to board dts Aliases are board specific and should be in board dts files. So, move aliases to board dts and trim the list to interfaces that are actually enabled. Signed-off-by: Aswath Govindraju Signed-off-by: Vignesh Raghavendra Signed-off-by: Nishanth Menon Reviewed-by: Kishon Vijay Abraham I Link: https://lore.kernel.org/r/20211223121650.26868-2-vigneshr@ti.com --- .../boot/dts/ti/k3-j721s2-common-proc-board.dts | 10 ++++++++++ arch/arm64/boot/dts/ti/k3-j721s2.dtsi | 22 ---------------------- 2 files changed, 10 insertions(+), 22 deletions(-) diff --git a/arch/arm64/boot/dts/ti/k3-j721s2-common-proc-board.dts b/arch/arm64/boot/dts/ti/k3-j721s2-common-proc-board.dts index a5a24f9f46c5..708c14338eb7 100644 --- a/arch/arm64/boot/dts/ti/k3-j721s2-common-proc-board.dts +++ b/arch/arm64/boot/dts/ti/k3-j721s2-common-proc-board.dts @@ -19,6 +19,16 @@ bootargs = "console=ttyS10,115200n8 earlycon=ns16550a,mmio32,2880000"; }; + aliases { + serial1 = &mcu_uart0; + serial10 = &main_uart8; + mmc0 = &main_sdhci0; + mmc1 = &main_sdhci1; + can0 = &main_mcan16; + can1 = &mcu_mcan0; + can2 = &mcu_mcan1; + }; + evm_12v0: fixedregulator-evm12v0 { /* main supply */ compatible = "regulator-fixed"; diff --git a/arch/arm64/boot/dts/ti/k3-j721s2.dtsi b/arch/arm64/boot/dts/ti/k3-j721s2.dtsi index 80d3cae03e88..fe5234c40f6c 100644 --- a/arch/arm64/boot/dts/ti/k3-j721s2.dtsi +++ b/arch/arm64/boot/dts/ti/k3-j721s2.dtsi @@ -21,28 +21,6 @@ #address-cells = <2>; #size-cells = <2>; - aliases { - serial0 = &wkup_uart0; - serial1 = &mcu_uart0; - serial2 = &main_uart0; - serial3 = &main_uart1; - serial4 = &main_uart2; - serial5 = &main_uart3; - serial6 = &main_uart4; - serial7 = &main_uart5; - serial8 = &main_uart6; - serial9 = &main_uart7; - serial10 = &main_uart8; - serial11 = &main_uart9; - mmc0 = &main_sdhci0; - mmc1 = &main_sdhci1; - can0 = &main_mcan16; - can1 = &mcu_mcan0; - can2 = &mcu_mcan1; - can3 = &main_mcan3; - can4 = &main_mcan5; - }; - chosen { }; cpus { -- cgit From aee744a37aaf277e74557e683cc524fbe6daeef7 Mon Sep 17 00:00:00 2001 From: Aswath Govindraju Date: Thu, 23 Dec 2021 17:46:50 +0530 Subject: arm64: dts: ti: k3-j721s2-common-proc-board: Alias console uart to serial2 On J721s2 Linux console is on main_uart8 but to be consistent with other J7 family of devices, alias it to ttyS2 (serial2). This also eliminates need to have higher number of 8250 runtime UARTs. Signed-off-by: Aswath Govindraju Signed-off-by: Vignesh Raghavendra Signed-off-by: Nishanth Menon Reviewed-by: Kishon Vijay Abraham I Link: https://lore.kernel.org/r/20211223121650.26868-3-vigneshr@ti.com --- arch/arm64/boot/dts/ti/k3-j721s2-common-proc-board.dts | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/arch/arm64/boot/dts/ti/k3-j721s2-common-proc-board.dts b/arch/arm64/boot/dts/ti/k3-j721s2-common-proc-board.dts index 708c14338eb7..b210cc07c539 100644 --- a/arch/arm64/boot/dts/ti/k3-j721s2-common-proc-board.dts +++ b/arch/arm64/boot/dts/ti/k3-j721s2-common-proc-board.dts @@ -15,13 +15,13 @@ model = "Texas Instruments J721S2 EVM"; chosen { - stdout-path = "serial10:115200n8"; - bootargs = "console=ttyS10,115200n8 earlycon=ns16550a,mmio32,2880000"; + stdout-path = "serial2:115200n8"; + bootargs = "console=ttyS2,115200n8 earlycon=ns16550a,mmio32,2880000"; }; aliases { serial1 = &mcu_uart0; - serial10 = &main_uart8; + serial2 = &main_uart8; mmc0 = &main_sdhci0; mmc1 = &main_sdhci1; can0 = &main_mcan16; -- cgit From 926fd9f23b27ca6587492c3f58f4c7f4cd01dad5 Mon Sep 17 00:00:00 2001 From: Eric Biggers Date: Thu, 13 Jan 2022 11:44:38 -0800 Subject: ima: fix reference leak in asymmetric_verify() Don't leak a reference to the key if its algorithm is unknown. Fixes: 947d70597236 ("ima: Support EC keys for signature verification") Cc: # v5.13+ Signed-off-by: Eric Biggers Reviewed-by: Stefan Berger Reviewed-by: Tianjia Zhang Signed-off-by: Mimi Zohar --- security/integrity/digsig_asymmetric.c | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/security/integrity/digsig_asymmetric.c b/security/integrity/digsig_asymmetric.c index 23240d793b07..895f4b9ce8c6 100644 --- a/security/integrity/digsig_asymmetric.c +++ b/security/integrity/digsig_asymmetric.c @@ -109,22 +109,25 @@ int asymmetric_verify(struct key *keyring, const char *sig, pk = asymmetric_key_public_key(key); pks.pkey_algo = pk->pkey_algo; - if (!strcmp(pk->pkey_algo, "rsa")) + if (!strcmp(pk->pkey_algo, "rsa")) { pks.encoding = "pkcs1"; - else if (!strncmp(pk->pkey_algo, "ecdsa-", 6)) + } else if (!strncmp(pk->pkey_algo, "ecdsa-", 6)) { /* edcsa-nist-p192 etc. */ pks.encoding = "x962"; - else if (!strcmp(pk->pkey_algo, "ecrdsa") || - !strcmp(pk->pkey_algo, "sm2")) + } else if (!strcmp(pk->pkey_algo, "ecrdsa") || + !strcmp(pk->pkey_algo, "sm2")) { pks.encoding = "raw"; - else - return -ENOPKG; + } else { + ret = -ENOPKG; + goto out; + } pks.digest = (u8 *)data; pks.digest_size = datalen; pks.s = hdr->sig; pks.s_size = siglen; ret = verify_signature(key, &pks); +out: key_put(key); pr_debug("%s() = %d\n", __func__, ret); return ret; -- cgit From 63ee956f69d8c181e5251c7ce58b84c1edec0f6a Mon Sep 17 00:00:00 2001 From: Alexei Starovoitov Date: Mon, 24 Jan 2022 20:20:51 -0800 Subject: bpf: Fix renaming task_getsecid_subj->current_getsecid_subj. The commit 6326948f940d missed renaming of task->current LSM hook in BTF_ID. Fix it to silence build warning: WARN: resolve_btfids: unresolved symbol bpf_lsm_task_getsecid_subj Fixes: 6326948f940d ("lsm: security_task_getsecid_subj() -> security_current_getsecid_subj()") Acked-by: Andrii Nakryiko Signed-off-by: Alexei Starovoitov --- kernel/bpf/bpf_lsm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/bpf/bpf_lsm.c b/kernel/bpf/bpf_lsm.c index 06062370c3b8..9e4ecc990647 100644 --- a/kernel/bpf/bpf_lsm.c +++ b/kernel/bpf/bpf_lsm.c @@ -207,7 +207,7 @@ BTF_ID(func, bpf_lsm_socket_socketpair) BTF_ID(func, bpf_lsm_syslog) BTF_ID(func, bpf_lsm_task_alloc) -BTF_ID(func, bpf_lsm_task_getsecid_subj) +BTF_ID(func, bpf_lsm_current_getsecid_subj) BTF_ID(func, bpf_lsm_task_getsecid_obj) BTF_ID(func, bpf_lsm_task_prctl) BTF_ID(func, bpf_lsm_task_setscheduler) -- cgit From cf5b64f7f10b28bebb9b7c9d25e7aee5cbe43918 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 24 Jan 2022 15:24:09 +0300 Subject: drm/i915/overlay: Prevent divide by zero bugs in scaling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Smatch detected a divide by zero bug in check_overlay_scaling(). drivers/gpu/drm/i915/display/intel_overlay.c:976 check_overlay_scaling() error: potential divide by zero bug '/ rec->dst_height'. drivers/gpu/drm/i915/display/intel_overlay.c:980 check_overlay_scaling() error: potential divide by zero bug '/ rec->dst_width'. Prevent this by ensuring that the dst height and width are non-zero. Fixes: 02e792fbaadb ("drm/i915: implement drmmode overlay support v4") Signed-off-by: Dan Carpenter Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220124122409.GA31673@kili --- drivers/gpu/drm/i915/display/intel_overlay.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c index 23f30fdb3519..be0a74b0bb64 100644 --- a/drivers/gpu/drm/i915/display/intel_overlay.c +++ b/drivers/gpu/drm/i915/display/intel_overlay.c @@ -960,6 +960,9 @@ static int check_overlay_dst(struct intel_overlay *overlay, const struct intel_crtc_state *pipe_config = overlay->crtc->config; + if (rec->dst_height == 0 || rec->dst_width == 0) + return -EINVAL; + if (rec->dst_x < pipe_config->pipe_src_w && rec->dst_x + rec->dst_width <= pipe_config->pipe_src_w && rec->dst_y < pipe_config->pipe_src_h && -- cgit From 817f7c9335ec01e0f5e8caffc4f1dcd5e458a4c0 Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 24 Jan 2022 15:32:51 +0000 Subject: ASoC: ops: Reject out of bounds values in snd_soc_put_volsw() We don't currently validate that the values being set are within the range we advertised to userspace as being valid, do so and reject any values that are out of range. Signed-off-by: Mark Brown Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20220124153253.3548853-2-broonie@kernel.org Signed-off-by: Mark Brown --- sound/soc/soc-ops.c | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c index 08eaa9ddf191..fbe5d326b0f2 100644 --- a/sound/soc/soc-ops.c +++ b/sound/soc/soc-ops.c @@ -316,13 +316,27 @@ int snd_soc_put_volsw(struct snd_kcontrol *kcontrol, if (sign_bit) mask = BIT(sign_bit + 1) - 1; - val = ((ucontrol->value.integer.value[0] + min) & mask); + val = ucontrol->value.integer.value[0]; + if (mc->platform_max && val > mc->platform_max) + return -EINVAL; + if (val > max - min) + return -EINVAL; + if (val < 0) + return -EINVAL; + val = (val + min) & mask; if (invert) val = max - val; val_mask = mask << shift; val = val << shift; if (snd_soc_volsw_is_stereo(mc)) { - val2 = ((ucontrol->value.integer.value[1] + min) & mask); + val2 = ucontrol->value.integer.value[1]; + if (mc->platform_max && val2 > mc->platform_max) + return -EINVAL; + if (val2 > max - min) + return -EINVAL; + if (val2 < 0) + return -EINVAL; + val2 = (val2 + min) & mask; if (invert) val2 = max - val2; if (reg == reg2) { -- cgit From 4f1e50d6a9cf9c1b8c859d449b5031cacfa8404e Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 24 Jan 2022 15:32:52 +0000 Subject: ASoC: ops: Reject out of bounds values in snd_soc_put_volsw_sx() We don't currently validate that the values being set are within the range we advertised to userspace as being valid, do so and reject any values that are out of range. Signed-off-by: Mark Brown Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20220124153253.3548853-3-broonie@kernel.org Signed-off-by: Mark Brown --- sound/soc/soc-ops.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c index fbe5d326b0f2..c31e63b27193 100644 --- a/sound/soc/soc-ops.c +++ b/sound/soc/soc-ops.c @@ -423,8 +423,15 @@ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol, int err = 0; unsigned int val, val_mask; + val = ucontrol->value.integer.value[0]; + if (mc->platform_max && val > mc->platform_max) + return -EINVAL; + if (val > max - min) + return -EINVAL; + if (val < 0) + return -EINVAL; val_mask = mask << shift; - val = (ucontrol->value.integer.value[0] + min) & mask; + val = (val + min) & mask; val = val << shift; err = snd_soc_component_update_bits(component, reg, val_mask, val); -- cgit From 4cf28e9ae6e2e11a044be1bcbcfa1b0d8675fe4d Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Mon, 24 Jan 2022 15:32:53 +0000 Subject: ASoC: ops: Reject out of bounds values in snd_soc_put_xr_sx() We don't currently validate that the values being set are within the range we advertised to userspace as being valid, do so and reject any values that are out of range. Signed-off-by: Mark Brown Cc: stable@vger.kernel.org Link: https://lore.kernel.org/r/20220124153253.3548853-4-broonie@kernel.org Signed-off-by: Mark Brown --- sound/soc/soc-ops.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c index c31e63b27193..dc0e7c8d31f3 100644 --- a/sound/soc/soc-ops.c +++ b/sound/soc/soc-ops.c @@ -879,6 +879,8 @@ int snd_soc_put_xr_sx(struct snd_kcontrol *kcontrol, long val = ucontrol->value.integer.value[0]; unsigned int i; + if (val < mc->min || val > mc->max) + return -EINVAL; if (invert) val = max - val; val &= mask; -- cgit From 0cea730cac824edf78ffd3302938ed5fe2b9d50d Mon Sep 17 00:00:00 2001 From: Padmanabha Srinivasaiah Date: Fri, 31 Dec 2021 20:54:03 +0100 Subject: staging: vc04_services: Fix RCU dereference check In service_callback path RCU dereferenced pointer struct vchiq_service need to be accessed inside rcu read-critical section. Also userdata/user_service part of vchiq_service is accessed around different synchronization mechanism, getting an extra reference to a pointer keeps sematics simpler and avoids prolonged graceperiod. Accessing vchiq_service with rcu_read_[lock/unlock] fixes below issue. [ 32.201659] ============================= [ 32.201664] WARNING: suspicious RCU usage [ 32.201670] 5.15.11-rt24-v8+ #3 Not tainted [ 32.201680] ----------------------------- [ 32.201685] drivers/staging/vc04_services/interface/vchiq_arm/vchiq_core.h:529 suspicious rcu_dereference_check() usage! [ 32.201695] [ 32.201695] other info that might help us debug this: [ 32.201695] [ 32.201700] [ 32.201700] rcu_scheduler_active = 2, debug_locks = 1 [ 32.201708] no locks held by vchiq-slot/0/98. [ 32.201715] [ 32.201715] stack backtrace: [ 32.201723] CPU: 1 PID: 98 Comm: vchiq-slot/0 Not tainted 5.15.11-rt24-v8+ #3 [ 32.201733] Hardware name: Raspberry Pi 4 Model B Rev 1.4 (DT) [ 32.201739] Call trace: [ 32.201742] dump_backtrace+0x0/0x1b8 [ 32.201772] show_stack+0x20/0x30 [ 32.201784] dump_stack_lvl+0x8c/0xb8 [ 32.201799] dump_stack+0x18/0x34 [ 32.201808] lockdep_rcu_suspicious+0xe4/0xf8 [ 32.201817] service_callback+0x124/0x400 [ 32.201830] slot_handler_func+0xf60/0x1e20 [ 32.201839] kthread+0x19c/0x1a8 [ 32.201849] ret_from_fork+0x10/0x20 Tested-by: Stefan Wahren Signed-off-by: Padmanabha Srinivasaiah Link: https://lore.kernel.org/r/20211231195406.5479-1-treasure4paddy@gmail.com Signed-off-by: Greg Kroah-Hartman --- .../vc04_services/interface/vchiq_arm/vchiq_arm.c | 20 ++++++++++++++++++-- 1 file changed, 18 insertions(+), 2 deletions(-) diff --git a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c index 6759a6261500..3a2e4582db8e 100644 --- a/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c +++ b/drivers/staging/vc04_services/interface/vchiq_arm/vchiq_arm.c @@ -1058,15 +1058,27 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header, DEBUG_TRACE(SERVICE_CALLBACK_LINE); + rcu_read_lock(); service = handle_to_service(handle); - if (WARN_ON(!service)) + if (WARN_ON(!service)) { + rcu_read_unlock(); return VCHIQ_SUCCESS; + } user_service = (struct user_service *)service->base.userdata; instance = user_service->instance; - if (!instance || instance->closing) + if (!instance || instance->closing) { + rcu_read_unlock(); return VCHIQ_SUCCESS; + } + + /* + * As hopping around different synchronization mechanism, + * taking an extra reference results in simpler implementation. + */ + vchiq_service_get(service); + rcu_read_unlock(); vchiq_log_trace(vchiq_arm_log_level, "%s - service %lx(%d,%p), reason %d, header %lx, instance %lx, bulk_userdata %lx", @@ -1097,6 +1109,7 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header, bulk_userdata); if (status != VCHIQ_SUCCESS) { DEBUG_TRACE(SERVICE_CALLBACK_LINE); + vchiq_service_put(service); return status; } } @@ -1105,10 +1118,12 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header, if (wait_for_completion_interruptible(&user_service->remove_event)) { vchiq_log_info(vchiq_arm_log_level, "%s interrupted", __func__); DEBUG_TRACE(SERVICE_CALLBACK_LINE); + vchiq_service_put(service); return VCHIQ_RETRY; } else if (instance->closing) { vchiq_log_info(vchiq_arm_log_level, "%s closing", __func__); DEBUG_TRACE(SERVICE_CALLBACK_LINE); + vchiq_service_put(service); return VCHIQ_ERROR; } DEBUG_TRACE(SERVICE_CALLBACK_LINE); @@ -1137,6 +1152,7 @@ service_callback(enum vchiq_reason reason, struct vchiq_header *header, header = NULL; } DEBUG_TRACE(SERVICE_CALLBACK_LINE); + vchiq_service_put(service); if (skip_completion) return VCHIQ_SUCCESS; -- cgit From 426aca16e903b387a0b0001d62207a745c67cfd3 Mon Sep 17 00:00:00 2001 From: Uwe Kleine-König Date: Tue, 18 Jan 2022 19:13:37 +0100 Subject: staging: fbtft: Fix error path in fbtft_driver_module_init() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If registering the platform driver fails, the function must not return without undoing the spi driver registration first. Fixes: c296d5f9957c ("staging: fbtft: core support") Signed-off-by: Uwe Kleine-König Link: https://lore.kernel.org/r/20220118181338.207943-1-u.kleine-koenig@pengutronix.de Signed-off-by: Greg Kroah-Hartman --- drivers/staging/fbtft/fbtft.h | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/drivers/staging/fbtft/fbtft.h b/drivers/staging/fbtft/fbtft.h index 4cdec34e23d2..55677efc0138 100644 --- a/drivers/staging/fbtft/fbtft.h +++ b/drivers/staging/fbtft/fbtft.h @@ -334,7 +334,10 @@ static int __init fbtft_driver_module_init(void) \ ret = spi_register_driver(&fbtft_driver_spi_driver); \ if (ret < 0) \ return ret; \ - return platform_driver_register(&fbtft_driver_platform_driver); \ + ret = platform_driver_register(&fbtft_driver_platform_driver); \ + if (ret < 0) \ + spi_unregister_driver(&fbtft_driver_spi_driver); \ + return ret; \ } \ \ static void __exit fbtft_driver_module_exit(void) \ -- cgit From f26d04331360d42dbd6b58448bd98e4edbfbe1c5 Mon Sep 17 00:00:00 2001 From: Paul Moore Date: Thu, 13 Jan 2022 18:54:38 -0500 Subject: audit: improve audit queue handling when "audit=1" on cmdline When an admin enables audit at early boot via the "audit=1" kernel command line the audit queue behavior is slightly different; the audit subsystem goes to greater lengths to avoid dropping records, which unfortunately can result in problems when the audit daemon is forcibly stopped for an extended period of time. This patch makes a number of changes designed to improve the audit queuing behavior so that leaving the audit daemon in a stopped state for an extended period does not cause a significant impact to the system. - kauditd_send_queue() is now limited to looping through the passed queue only once per call. This not only prevents the function from looping indefinitely when records are returned to the current queue, it also allows any recovery handling in kauditd_thread() to take place when kauditd_send_queue() returns. - Transient netlink send errors seen as -EAGAIN now cause the record to be returned to the retry queue instead of going to the hold queue. The intention of the hold queue is to store, perhaps for an extended period of time, the events which led up to the audit daemon going offline. The retry queue remains a temporary queue intended to protect against transient issues between the kernel and the audit daemon. - The retry queue is now limited by the audit_backlog_limit setting, the same as the other queues. This allows admins to bound the size of all of the audit queues on the system. - kauditd_rehold_skb() now returns records to the end of the hold queue to ensure ordering is preserved in the face of recent changes to kauditd_send_queue(). Cc: stable@vger.kernel.org Fixes: 5b52330bbfe63 ("audit: fix auditd/kernel connection state tracking") Fixes: f4b3ee3c85551 ("audit: improve robustness of the audit queue handling") Reported-by: Gaosheng Cui Tested-by: Gaosheng Cui Reviewed-by: Richard Guy Briggs Signed-off-by: Paul Moore --- kernel/audit.c | 62 ++++++++++++++++++++++++++++++++++++++++------------------ 1 file changed, 43 insertions(+), 19 deletions(-) diff --git a/kernel/audit.c b/kernel/audit.c index e4bbe2c70c26..7690c29d4ee4 100644 --- a/kernel/audit.c +++ b/kernel/audit.c @@ -541,20 +541,22 @@ static void kauditd_printk_skb(struct sk_buff *skb) /** * kauditd_rehold_skb - Handle a audit record send failure in the hold queue * @skb: audit record + * @error: error code (unused) * * Description: * This should only be used by the kauditd_thread when it fails to flush the * hold queue. */ -static void kauditd_rehold_skb(struct sk_buff *skb) +static void kauditd_rehold_skb(struct sk_buff *skb, __always_unused int error) { - /* put the record back in the queue at the same place */ - skb_queue_head(&audit_hold_queue, skb); + /* put the record back in the queue */ + skb_queue_tail(&audit_hold_queue, skb); } /** * kauditd_hold_skb - Queue an audit record, waiting for auditd * @skb: audit record + * @error: error code * * Description: * Queue the audit record, waiting for an instance of auditd. When this @@ -564,19 +566,31 @@ static void kauditd_rehold_skb(struct sk_buff *skb) * and queue it, if we have room. If we want to hold on to the record, but we * don't have room, record a record lost message. */ -static void kauditd_hold_skb(struct sk_buff *skb) +static void kauditd_hold_skb(struct sk_buff *skb, int error) { /* at this point it is uncertain if we will ever send this to auditd so * try to send the message via printk before we go any further */ kauditd_printk_skb(skb); /* can we just silently drop the message? */ - if (!audit_default) { - kfree_skb(skb); - return; + if (!audit_default) + goto drop; + + /* the hold queue is only for when the daemon goes away completely, + * not -EAGAIN failures; if we are in a -EAGAIN state requeue the + * record on the retry queue unless it's full, in which case drop it + */ + if (error == -EAGAIN) { + if (!audit_backlog_limit || + skb_queue_len(&audit_retry_queue) < audit_backlog_limit) { + skb_queue_tail(&audit_retry_queue, skb); + return; + } + audit_log_lost("kauditd retry queue overflow"); + goto drop; } - /* if we have room, queue the message */ + /* if we have room in the hold queue, queue the message */ if (!audit_backlog_limit || skb_queue_len(&audit_hold_queue) < audit_backlog_limit) { skb_queue_tail(&audit_hold_queue, skb); @@ -585,24 +599,32 @@ static void kauditd_hold_skb(struct sk_buff *skb) /* we have no other options - drop the message */ audit_log_lost("kauditd hold queue overflow"); +drop: kfree_skb(skb); } /** * kauditd_retry_skb - Queue an audit record, attempt to send again to auditd * @skb: audit record + * @error: error code (unused) * * Description: * Not as serious as kauditd_hold_skb() as we still have a connected auditd, * but for some reason we are having problems sending it audit records so * queue the given record and attempt to resend. */ -static void kauditd_retry_skb(struct sk_buff *skb) +static void kauditd_retry_skb(struct sk_buff *skb, __always_unused int error) { - /* NOTE: because records should only live in the retry queue for a - * short period of time, before either being sent or moved to the hold - * queue, we don't currently enforce a limit on this queue */ - skb_queue_tail(&audit_retry_queue, skb); + if (!audit_backlog_limit || + skb_queue_len(&audit_retry_queue) < audit_backlog_limit) { + skb_queue_tail(&audit_retry_queue, skb); + return; + } + + /* we have to drop the record, send it via printk as a last effort */ + kauditd_printk_skb(skb); + audit_log_lost("kauditd retry queue overflow"); + kfree_skb(skb); } /** @@ -640,7 +662,7 @@ static void auditd_reset(const struct auditd_connection *ac) /* flush the retry queue to the hold queue, but don't touch the main * queue since we need to process that normally for multicast */ while ((skb = skb_dequeue(&audit_retry_queue))) - kauditd_hold_skb(skb); + kauditd_hold_skb(skb, -ECONNREFUSED); } /** @@ -714,16 +736,18 @@ static int kauditd_send_queue(struct sock *sk, u32 portid, struct sk_buff_head *queue, unsigned int retry_limit, void (*skb_hook)(struct sk_buff *skb), - void (*err_hook)(struct sk_buff *skb)) + void (*err_hook)(struct sk_buff *skb, int error)) { int rc = 0; - struct sk_buff *skb; + struct sk_buff *skb = NULL; + struct sk_buff *skb_tail; unsigned int failed = 0; /* NOTE: kauditd_thread takes care of all our locking, we just use * the netlink info passed to us (e.g. sk and portid) */ - while ((skb = skb_dequeue(queue))) { + skb_tail = skb_peek_tail(queue); + while ((skb != skb_tail) && (skb = skb_dequeue(queue))) { /* call the skb_hook for each skb we touch */ if (skb_hook) (*skb_hook)(skb); @@ -731,7 +755,7 @@ static int kauditd_send_queue(struct sock *sk, u32 portid, /* can we send to anyone via unicast? */ if (!sk) { if (err_hook) - (*err_hook)(skb); + (*err_hook)(skb, -ECONNREFUSED); continue; } @@ -745,7 +769,7 @@ retry: rc == -ECONNREFUSED || rc == -EPERM) { sk = NULL; if (err_hook) - (*err_hook)(skb); + (*err_hook)(skb, rc); if (rc == -EAGAIN) rc = 0; /* continue to drain the queue */ -- cgit From 235528072f28b3b0a1446279b7eaddda36dbf743 Mon Sep 17 00:00:00 2001 From: Michał Winiarski Date: Thu, 13 Jan 2022 00:36:57 +0100 Subject: kunit: tool: Import missing importlib.abc MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Python 3.10.0 contains: 9e09849d20 ("bpo-41006: importlib.util no longer imports typing (GH-20938)") It causes importlib.util to no longer import importlib.abs, which leads to the following error when trying to use kunit with qemu: AttributeError: module 'importlib' has no attribute 'abc'. Did you mean: '_abc'? Add the missing import. Signed-off-by: Michał Winiarski Reviewed-by: Daniel Latypov Reviewed-by: Brendan Higgins Signed-off-by: Shuah Khan --- tools/testing/kunit/kunit_kernel.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tools/testing/kunit/kunit_kernel.py b/tools/testing/kunit/kunit_kernel.py index 44bbe54f25f1..3c4196cef3ed 100644 --- a/tools/testing/kunit/kunit_kernel.py +++ b/tools/testing/kunit/kunit_kernel.py @@ -6,6 +6,7 @@ # Author: Felix Guo # Author: Brendan Higgins +import importlib.abc import importlib.util import logging import subprocess -- cgit From f034cc1301e7d83d4ec428dd6b8ffb57ca446efb Mon Sep 17 00:00:00 2001 From: "Nícolas F. R. A. Prado" Date: Wed, 12 Jan 2022 14:41:42 -0500 Subject: selftests: rtc: Increase test timeout so that all tests run MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The timeout setting for the rtc kselftest is currently 90 seconds. This setting is used by the kselftest runner to stop running a test if it takes longer than the assigned value. However, two of the test cases inside rtc set alarms. These alarms are set to the next beginning of the minute, so each of these test cases may take up to, in the worst case, 60 seconds. In order to allow for all test cases in rtc to run, even in the worst case, when using the kselftest runner, the timeout value should be increased to at least 120. Set it to 180, so there's some additional slack. Correct operation can be tested by running the following command right after the start of a minute (low second count), and checking that all test cases run: ./run_kselftest.sh -c rtc Signed-off-by: Nícolas F. R. A. Prado Acked-by: Alexandre Belloni Signed-off-by: Shuah Khan --- tools/testing/selftests/rtc/settings | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/rtc/settings b/tools/testing/selftests/rtc/settings index ba4d85f74cd6..a953c96aa16e 100644 --- a/tools/testing/selftests/rtc/settings +++ b/tools/testing/selftests/rtc/settings @@ -1 +1 @@ -timeout=90 +timeout=180 -- cgit From 40d70d4d60974c28054a60316f2aec8810833526 Mon Sep 17 00:00:00 2001 From: "Nícolas F. R. A. Prado" Date: Fri, 14 Jan 2022 18:21:26 -0500 Subject: selftests: cpufreq: Write test output to stdout as well MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use 'tee' to send the test output to stdout in addition to the current output file. This makes the output easier to handle in automated test systems and is superior to only later dumping the output file contents to stdout, since this way the test output can be interleaved with other log messages, like from the kernel, so that chronology is preserved, making it easier to detect issues. Signed-off-by: Nícolas F. R. A. Prado Acked-by: Viresh Kumar Signed-off-by: Shuah Khan --- tools/testing/selftests/cpufreq/main.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/cpufreq/main.sh b/tools/testing/selftests/cpufreq/main.sh index 31f8c9a76c5f..60ce18ed0666 100755 --- a/tools/testing/selftests/cpufreq/main.sh +++ b/tools/testing/selftests/cpufreq/main.sh @@ -194,5 +194,5 @@ prerequisite # Run requested functions clear_dumps $OUTFILE -do_test >> $OUTFILE.txt +do_test | tee -a $OUTFILE.txt dmesg_dumps $OUTFILE -- cgit From 92d25637a3a45904292c93f1863c6bbda4e3e38f Mon Sep 17 00:00:00 2001 From: Li Zhijian Date: Fri, 17 Dec 2021 17:29:55 +0800 Subject: kselftest: signal all child processes We have some many cases that will create child process as well, such as pidfd_wait. Previously, we will signal/kill the parent process when it is time out, but this signal will not be sent to its child process. In such case, if child process doesn't terminate itself, ksefltest framework will hang forever. Here we group all its child processes so that kill() can signal all of them in timeout. Fixed change log: Shuah Khan Suggested-by: yang xu Signed-off-by: Li Zhijian Acked-by: Christian Brauner Signed-off-by: Shuah Khan --- tools/testing/selftests/kselftest_harness.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/kselftest_harness.h b/tools/testing/selftests/kselftest_harness.h index 471eaa7b3a3f..11779405dc80 100644 --- a/tools/testing/selftests/kselftest_harness.h +++ b/tools/testing/selftests/kselftest_harness.h @@ -877,7 +877,8 @@ static void __timeout_handler(int sig, siginfo_t *info, void *ucontext) } t->timed_out = true; - kill(t->pid, SIGKILL); + // signal process group + kill(-(t->pid), SIGKILL); } void __wait_for_test(struct __test_metadata *t) @@ -987,6 +988,7 @@ void __run_test(struct __fixture_metadata *f, ksft_print_msg("ERROR SPAWNING TEST CHILD\n"); t->passed = 0; } else if (t->pid == 0) { + setpgrp(); t->fn(t, variant); if (t->skip) _exit(255); -- cgit From 901abf367d3eecd54f21829ced48c20f53c74c57 Mon Sep 17 00:00:00 2001 From: shaoyunl Date: Thu, 20 Jan 2022 10:39:41 -0500 Subject: drm/amdgpu: Disable FRU EEPROM access for SRIOV VF acces the EEPROM is blocked by security policy, we might need other way to get SKUs info for VF v2: squash in compilation fix from Luben Signed-off-by: shaoyunl Acked-by: Alex Deucher Reviewed-by: Kent Russell Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c index 2a786e788627..60e7e637eaa3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c @@ -40,6 +40,12 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev) */ struct atom_context *atom_ctx = adev->mode_info.atom_context; + /* The i2c access is blocked on VF + * TODO: Need other way to get the info + */ + if (amdgpu_sriov_vf(adev)) + return false; + /* VBIOS is of the format ###-DXXXYY-##. For SKU identification, * we can use just the "DXXX" portion. If there were more models, we * could convert the 3 characters to a hex integer and use a switch -- cgit From 828904660a2e0a31d5c8a2ce75711f7123896bd5 Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Tue, 18 Jan 2022 19:10:42 -0600 Subject: drm/amd: Fix MSB of SMU version printing Yellow carp has been outputting versions like `1093.24.0`, but this is supposed to be 69.24.0. That is the MSB is being interpreted incorrectly. The MSB is not part of the major version, but has generally been treated that way thus far. It's actually the program, and used to distinguish between two programs from a similar family but different codebase. Reviewed-by: Lijo Lazar Signed-off-by: Mario Limonciello Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 10 +++++----- drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c | 10 +++++----- drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c | 10 +++++----- drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c | 14 +++++++------- 4 files changed, 22 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 85f06396d184..e5e69fcc3af3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -1425,8 +1425,7 @@ static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused) struct drm_amdgpu_info_firmware fw_info; struct drm_amdgpu_query_fw query_fw; struct atom_context *ctx = adev->mode_info.atom_context; - uint8_t smu_minor, smu_debug; - uint16_t smu_major; + uint8_t smu_program, smu_major, smu_minor, smu_debug; int ret, i; static const char *ta_fw_name[TA_FW_TYPE_MAX_INDEX] = { @@ -1572,11 +1571,12 @@ static int amdgpu_debugfs_firmware_info_show(struct seq_file *m, void *unused) ret = amdgpu_firmware_info(&fw_info, &query_fw, adev); if (ret) return ret; - smu_major = (fw_info.ver >> 16) & 0xffff; + smu_program = (fw_info.ver >> 24) & 0xff; + smu_major = (fw_info.ver >> 16) & 0xff; smu_minor = (fw_info.ver >> 8) & 0xff; smu_debug = (fw_info.ver >> 0) & 0xff; - seq_printf(m, "SMC feature version: %u, firmware version: 0x%08x (%d.%d.%d)\n", - fw_info.feature, fw_info.ver, smu_major, smu_minor, smu_debug); + seq_printf(m, "SMC feature version: %u, program: %d, firmware version: 0x%08x (%d.%d.%d)\n", + fw_info.feature, smu_program, fw_info.ver, smu_major, smu_minor, smu_debug); /* SDMA */ query_fw.fw_type = AMDGPU_INFO_FW_SDMA; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index 7029e5deb6b3..e94a400db669 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -225,15 +225,15 @@ int smu_v11_0_check_fw_version(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; uint32_t if_version = 0xff, smu_version = 0xff; - uint16_t smu_major; - uint8_t smu_minor, smu_debug; + uint8_t smu_program, smu_major, smu_minor, smu_debug; int ret = 0; ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version); if (ret) return ret; - smu_major = (smu_version >> 16) & 0xffff; + smu_program = (smu_version >> 24) & 0xff; + smu_major = (smu_version >> 16) & 0xff; smu_minor = (smu_version >> 8) & 0xff; smu_debug = (smu_version >> 0) & 0xff; if (smu->is_apu) @@ -287,9 +287,9 @@ int smu_v11_0_check_fw_version(struct smu_context *smu) */ if (if_version != smu->smc_driver_if_version) { dev_info(smu->adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, " - "smu fw version = 0x%08x (%d.%d.%d)\n", + "smu fw program = %d, version = 0x%08x (%d.%d.%d)\n", smu->smc_driver_if_version, if_version, - smu_version, smu_major, smu_minor, smu_debug); + smu_program, smu_version, smu_major, smu_minor, smu_debug); dev_warn(smu->adev->dev, "SMU driver if version not matched\n"); } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c index 9c91e79c955f..56a02bc60cee 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c @@ -74,15 +74,15 @@ int smu_v12_0_check_fw_version(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; uint32_t if_version = 0xff, smu_version = 0xff; - uint16_t smu_major; - uint8_t smu_minor, smu_debug; + uint8_t smu_program, smu_major, smu_minor, smu_debug; int ret = 0; ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version); if (ret) return ret; - smu_major = (smu_version >> 16) & 0xffff; + smu_program = (smu_version >> 24) & 0xff; + smu_major = (smu_version >> 16) & 0xff; smu_minor = (smu_version >> 8) & 0xff; smu_debug = (smu_version >> 0) & 0xff; if (smu->is_apu) @@ -98,9 +98,9 @@ int smu_v12_0_check_fw_version(struct smu_context *smu) */ if (if_version != smu->smc_driver_if_version) { dev_info(smu->adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, " - "smu fw version = 0x%08x (%d.%d.%d)\n", + "smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n", smu->smc_driver_if_version, if_version, - smu_version, smu_major, smu_minor, smu_debug); + smu_program, smu_version, smu_major, smu_minor, smu_debug); dev_warn(smu->adev->dev, "SMU driver if version not matched\n"); } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index f66d8b9135ca..d9e3ebfeee73 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -198,15 +198,15 @@ int smu_v13_0_check_fw_version(struct smu_context *smu) { struct amdgpu_device *adev = smu->adev; uint32_t if_version = 0xff, smu_version = 0xff; - uint16_t smu_major; - uint8_t smu_minor, smu_debug; + uint8_t smu_program, smu_major, smu_minor, smu_debug; int ret = 0; ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version); if (ret) return ret; - smu_major = (smu_version >> 16) & 0xffff; + smu_program = (smu_version >> 24) & 0xff; + smu_major = (smu_version >> 16) & 0xff; smu_minor = (smu_version >> 8) & 0xff; smu_debug = (smu_version >> 0) & 0xff; if (smu->is_apu) @@ -229,8 +229,8 @@ int smu_v13_0_check_fw_version(struct smu_context *smu) /* only for dGPU w/ SMU13*/ if (adev->pm.fw) - dev_dbg(adev->dev, "smu fw reported version = 0x%08x (%d.%d.%d)\n", - smu_version, smu_major, smu_minor, smu_debug); + dev_dbg(smu->adev->dev, "smu fw reported program %d, version = 0x%08x (%d.%d.%d)\n", + smu_program, smu_version, smu_major, smu_minor, smu_debug); /* * 1. if_version mismatch is not critical as our fw is designed @@ -242,9 +242,9 @@ int smu_v13_0_check_fw_version(struct smu_context *smu) */ if (if_version != smu->smc_driver_if_version) { dev_info(adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, " - "smu fw version = 0x%08x (%d.%d.%d)\n", + "smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n", smu->smc_driver_if_version, if_version, - smu_version, smu_major, smu_minor, smu_debug); + smu_program, smu_version, smu_major, smu_minor, smu_debug); dev_warn(adev->dev, "SMU driver if version not matched\n"); } -- cgit From 6a6c2ab687c8eabaec4a55a4f13eb5ee68181403 Mon Sep 17 00:00:00 2001 From: Lang Yu Date: Thu, 20 Jan 2022 14:01:29 +0800 Subject: drm/amdgpu: enable amdgpu_dc module parameter It doesn't work under IP discovery mode. Make it work! Signed-off-by: Lang Yu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index e6a26b554254..8dd15a7b8dcc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -846,8 +846,14 @@ static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev) { if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) { amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block); + return 0; + } + + if (!amdgpu_device_has_dc_support(adev)) + return 0; + #if defined(CONFIG_DRM_AMD_DC) - } else if (adev->ip_versions[DCE_HWIP][0]) { + if (adev->ip_versions[DCE_HWIP][0]) { switch (adev->ip_versions[DCE_HWIP][0]) { case IP_VERSION(1, 0, 0): case IP_VERSION(1, 0, 1): @@ -882,8 +888,8 @@ static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev) adev->ip_versions[DCI_HWIP][0]); return -EINVAL; } -#endif } +#endif return 0; } -- cgit From 239d6de307b0dd1d48ec9b935b57531f07f6db11 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Thu, 20 Jan 2022 15:03:37 +0800 Subject: drm/amdgpu: suppress the warning about enum value 'AMD_IP_BLOCK_TYPE_NUM' Suppress the warning below on building htmldocs: drivers/gpu/drm/amd/include/amd_shared.h:103: warning: Enum value 'AMD_IP_BLOCK_TYPE_NUM' not described in enum 'amd_ip_block_type' Fixes: 6ee27ee27ba8 ("drm/amd/pm: avoid duplicate powergate/ungate setting") Signed-off-by: Evan Quan Reviewed-by: Guchun Chen Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/include/amd_shared.h | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h index fb6ad56ad6f1..fe4e585781bb 100644 --- a/drivers/gpu/drm/amd/include/amd_shared.h +++ b/drivers/gpu/drm/amd/include/amd_shared.h @@ -83,6 +83,7 @@ enum amd_apu_flags { * @AMD_IP_BLOCK_TYPE_VCN: Video Core/Codec Next * @AMD_IP_BLOCK_TYPE_MES: Micro-Engine Scheduler * @AMD_IP_BLOCK_TYPE_JPEG: JPEG Engine +* @AMD_IP_BLOCK_TYPE_NUM: Total number of IP block types */ enum amd_ip_block_type { AMD_IP_BLOCK_TYPE_COMMON, -- cgit From e0638c7abc4d3d3c46e8f2fc07e02c3877c3c402 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 29 Nov 2021 11:05:28 +0800 Subject: drm/amd/pm: drop unneeded lock protection smu->mutex As all those APIs are already protected either by adev->pm.mutex or smu->message_lock. Signed-off-by: Evan Quan Reviewed-by: Guchun Chen Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 316 ++------------------- drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h | 1 - drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c | 4 +- drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c | 4 +- .../drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c | 4 +- drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c | 4 +- 6 files changed, 34 insertions(+), 299 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 828cb932f6a9..eaaa5b033d46 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -55,8 +55,7 @@ static int smu_force_smuclk_levels(struct smu_context *smu, uint32_t mask); static int smu_handle_task(struct smu_context *smu, enum amd_dpm_forced_level level, - enum amd_pp_task task_id, - bool lock_needed); + enum amd_pp_task task_id); static int smu_reset(struct smu_context *smu); static int smu_set_fan_speed_pwm(void *handle, u32 speed); static int smu_set_fan_control_mode(void *handle, u32 value); @@ -68,36 +67,22 @@ static int smu_sys_get_pp_feature_mask(void *handle, char *buf) { struct smu_context *smu = handle; - int size = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; - mutex_lock(&smu->mutex); - - size = smu_get_pp_feature_mask(smu, buf); - - mutex_unlock(&smu->mutex); - - return size; + return smu_get_pp_feature_mask(smu, buf); } static int smu_sys_set_pp_feature_mask(void *handle, uint64_t new_mask) { struct smu_context *smu = handle; - int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; - mutex_lock(&smu->mutex); - - ret = smu_set_pp_feature_mask(smu, new_mask); - - mutex_unlock(&smu->mutex); - - return ret; + return smu_set_pp_feature_mask(smu, new_mask); } int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value) @@ -117,16 +102,12 @@ int smu_set_soft_freq_range(struct smu_context *smu, { int ret = 0; - mutex_lock(&smu->mutex); - if (smu->ppt_funcs->set_soft_freq_limited_range) ret = smu->ppt_funcs->set_soft_freq_limited_range(smu, clk_type, min, max); - mutex_unlock(&smu->mutex); - return ret; } @@ -140,16 +121,12 @@ int smu_get_dpm_freq_range(struct smu_context *smu, if (!min && !max) return -EINVAL; - mutex_lock(&smu->mutex); - if (smu->ppt_funcs->get_dpm_ultimate_freq) ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu, clk_type, min, max); - mutex_unlock(&smu->mutex); - return ret; } @@ -482,7 +459,6 @@ static int smu_sys_get_pp_table(void *handle, { struct smu_context *smu = handle; struct smu_table_context *smu_table = &smu->smu_table; - uint32_t powerplay_table_size; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; @@ -490,18 +466,12 @@ static int smu_sys_get_pp_table(void *handle, if (!smu_table->power_play_table && !smu_table->hardcode_pptable) return -EINVAL; - mutex_lock(&smu->mutex); - if (smu_table->hardcode_pptable) *table = smu_table->hardcode_pptable; else *table = smu_table->power_play_table; - powerplay_table_size = smu_table->power_play_table_size; - - mutex_unlock(&smu->mutex); - - return powerplay_table_size; + return smu_table->power_play_table_size; } static int smu_sys_set_pp_table(void *handle, @@ -521,12 +491,10 @@ static int smu_sys_set_pp_table(void *handle, return -EIO; } - mutex_lock(&smu->mutex); - if (!smu_table->hardcode_pptable) - smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL); if (!smu_table->hardcode_pptable) { - ret = -ENOMEM; - goto failed; + smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL); + if (!smu_table->hardcode_pptable) + return -ENOMEM; } memcpy(smu_table->hardcode_pptable, buf, size); @@ -545,8 +513,6 @@ static int smu_sys_set_pp_table(void *handle, smu->uploading_custom_pp_table = false; -failed: - mutex_unlock(&smu->mutex); return ret; } @@ -633,7 +599,6 @@ static int smu_early_init(void *handle) smu->adev = adev; smu->pm_enabled = !!amdgpu_dpm; smu->is_apu = false; - mutex_init(&smu->mutex); mutex_init(&smu->smu_baco.mutex); smu->smu_baco.state = SMU_BACO_STATE_EXIT; smu->smu_baco.platform_support = false; @@ -736,8 +701,7 @@ static int smu_late_init(void *handle) smu_handle_task(smu, smu->smu_dpm.dpm_level, - AMD_PP_TASK_COMPLETE_INIT, - false); + AMD_PP_TASK_COMPLETE_INIT); smu_restore_dpm_user_profile(smu); @@ -1013,12 +977,8 @@ static void smu_interrupt_work_fn(struct work_struct *work) struct smu_context *smu = container_of(work, struct smu_context, interrupt_work); - mutex_lock(&smu->mutex); - if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work) smu->ppt_funcs->interrupt_work(smu); - - mutex_unlock(&smu->mutex); } static int smu_sw_init(void *handle) @@ -1632,8 +1592,6 @@ static int smu_display_configuration_change(void *handle, if (!display_config) return -EINVAL; - mutex_lock(&smu->mutex); - smu_set_min_dcef_deep_sleep(smu, display_config->min_dcef_deep_sleep_set_clk / 100); @@ -1642,8 +1600,6 @@ static int smu_display_configuration_change(void *handle, num_of_active_display++; } - mutex_unlock(&smu->mutex); - return 0; } @@ -1766,22 +1722,18 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu, static int smu_handle_task(struct smu_context *smu, enum amd_dpm_forced_level level, - enum amd_pp_task task_id, - bool lock_needed) + enum amd_pp_task task_id) { int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; - if (lock_needed) - mutex_lock(&smu->mutex); - switch (task_id) { case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE: ret = smu_pre_display_config_changed(smu); if (ret) - goto out; + return ret; ret = smu_adjust_power_state_dynamic(smu, level, false); break; case AMD_PP_TASK_COMPLETE_INIT: @@ -1792,10 +1744,6 @@ static int smu_handle_task(struct smu_context *smu, break; } -out: - if (lock_needed) - mutex_unlock(&smu->mutex); - return ret; } @@ -1806,7 +1754,7 @@ static int smu_handle_dpm_task(void *handle, struct smu_context *smu = handle; struct smu_dpm_context *smu_dpm = &smu->smu_dpm; - return smu_handle_task(smu, smu_dpm->dpm_level, task_id, true); + return smu_handle_task(smu, smu_dpm->dpm_level, task_id); } @@ -1825,8 +1773,6 @@ static int smu_switch_power_profile(void *handle, if (!(type < PP_SMC_POWER_PROFILE_CUSTOM)) return -EINVAL; - mutex_lock(&smu->mutex); - if (!en) { smu->workload_mask &= ~(1 << smu->workload_prority[type]); index = fls(smu->workload_mask); @@ -1843,8 +1789,6 @@ static int smu_switch_power_profile(void *handle, smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM) smu_bump_power_profile_mode(smu, &workload, 0); - mutex_unlock(&smu->mutex); - return 0; } @@ -1852,7 +1796,6 @@ static enum amd_dpm_forced_level smu_get_performance_level(void *handle) { struct smu_context *smu = handle; struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm); - enum amd_dpm_forced_level level; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; @@ -1860,11 +1803,7 @@ static enum amd_dpm_forced_level smu_get_performance_level(void *handle) if (!smu->is_apu && !smu_dpm_ctx->dpm_context) return -EINVAL; - mutex_lock(&(smu->mutex)); - level = smu_dpm_ctx->dpm_level; - mutex_unlock(&(smu->mutex)); - - return level; + return smu_dpm_ctx->dpm_level; } static int smu_force_performance_level(void *handle, @@ -1880,19 +1819,12 @@ static int smu_force_performance_level(void *handle, if (!smu->is_apu && !smu_dpm_ctx->dpm_context) return -EINVAL; - mutex_lock(&smu->mutex); - ret = smu_enable_umd_pstate(smu, &level); - if (ret) { - mutex_unlock(&smu->mutex); + if (ret) return ret; - } ret = smu_handle_task(smu, level, - AMD_PP_TASK_READJUST_POWER_STATE, - false); - - mutex_unlock(&smu->mutex); + AMD_PP_TASK_READJUST_POWER_STATE); /* reset user dpm clock state */ if (!ret && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { @@ -1906,16 +1838,11 @@ static int smu_force_performance_level(void *handle, static int smu_set_display_count(void *handle, uint32_t count) { struct smu_context *smu = handle; - int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; - mutex_lock(&smu->mutex); - ret = smu_init_display_count(smu, count); - mutex_unlock(&smu->mutex); - - return ret; + return smu_init_display_count(smu, count); } static int smu_force_smuclk_levels(struct smu_context *smu, @@ -1933,8 +1860,6 @@ static int smu_force_smuclk_levels(struct smu_context *smu, return -EINVAL; } - mutex_lock(&smu->mutex); - if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) { ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask); if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { @@ -1943,8 +1868,6 @@ static int smu_force_smuclk_levels(struct smu_context *smu, } } - mutex_unlock(&smu->mutex); - return ret; } @@ -2003,14 +1926,10 @@ static int smu_set_mp1_state(void *handle, if (!smu->pm_enabled) return -EOPNOTSUPP; - mutex_lock(&smu->mutex); - if (smu->ppt_funcs && smu->ppt_funcs->set_mp1_state) ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state); - mutex_unlock(&smu->mutex); - return ret; } @@ -2026,14 +1945,10 @@ static int smu_set_df_cstate(void *handle, if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate) return 0; - mutex_lock(&smu->mutex); - ret = smu->ppt_funcs->set_df_cstate(smu, state); if (ret) dev_err(smu->adev->dev, "[SetDfCstate] failed!\n"); - mutex_unlock(&smu->mutex); - return ret; } @@ -2047,38 +1962,25 @@ int smu_allow_xgmi_power_down(struct smu_context *smu, bool en) if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down) return 0; - mutex_lock(&smu->mutex); - ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en); if (ret) dev_err(smu->adev->dev, "[AllowXgmiPowerDown] failed!\n"); - mutex_unlock(&smu->mutex); - return ret; } int smu_write_watermarks_table(struct smu_context *smu) { - int ret = 0; - if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; - mutex_lock(&smu->mutex); - - ret = smu_set_watermarks_table(smu, NULL); - - mutex_unlock(&smu->mutex); - - return ret; + return smu_set_watermarks_table(smu, NULL); } static int smu_set_watermarks_for_clock_ranges(void *handle, struct pp_smu_wm_range_sets *clock_ranges) { struct smu_context *smu = handle; - int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; @@ -2086,13 +1988,7 @@ static int smu_set_watermarks_for_clock_ranges(void *handle, if (smu->disable_watermark) return 0; - mutex_lock(&smu->mutex); - - ret = smu_set_watermarks_table(smu, clock_ranges); - - mutex_unlock(&smu->mutex); - - return ret; + return smu_set_watermarks_table(smu, clock_ranges); } int smu_set_ac_dc(struct smu_context *smu) @@ -2106,14 +2002,12 @@ int smu_set_ac_dc(struct smu_context *smu) if (smu->dc_controlled_by_gpio) return 0; - mutex_lock(&smu->mutex); ret = smu_set_power_source(smu, smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC : SMU_POWER_SOURCE_DC); if (ret) dev_err(smu->adev->dev, "Failed to switch to %s mode!\n", smu->adev->pm.ac_power ? "AC" : "DC"); - mutex_unlock(&smu->mutex); return ret; } @@ -2200,13 +2094,9 @@ static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled) { int ret = 0; - mutex_lock(&smu->mutex); - if (smu->ppt_funcs->set_gfx_cgpg) ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled); - mutex_unlock(&smu->mutex); - return ret; } @@ -2224,8 +2114,6 @@ static int smu_set_fan_speed_rpm(void *handle, uint32_t speed) if (speed == U32_MAX) return -EINVAL; - mutex_lock(&smu->mutex); - ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed); if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM; @@ -2236,8 +2124,6 @@ static int smu_set_fan_speed_rpm(void *handle, uint32_t speed) smu->user_dpm_profile.fan_speed_pwm = 0; } - mutex_unlock(&smu->mutex); - return ret; } @@ -2293,8 +2179,6 @@ int smu_get_power_limit(void *handle, break; } - mutex_lock(&smu->mutex); - if (limit_type != SMU_DEFAULT_PPT_LIMIT) { if (smu->ppt_funcs->get_ppt_limit) ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level); @@ -2328,8 +2212,6 @@ int smu_get_power_limit(void *handle, } } - mutex_unlock(&smu->mutex); - return ret; } @@ -2342,21 +2224,16 @@ static int smu_set_power_limit(void *handle, uint32_t limit) if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; - mutex_lock(&smu->mutex); - limit &= (1<<24)-1; if (limit_type != SMU_DEFAULT_PPT_LIMIT) - if (smu->ppt_funcs->set_power_limit) { - ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit); - goto out; - } + if (smu->ppt_funcs->set_power_limit) + return smu->ppt_funcs->set_power_limit(smu, limit_type, limit); if (limit > smu->max_power_limit) { dev_err(smu->adev->dev, "New power limit (%d) is over the max allowed %d\n", limit, smu->max_power_limit); - ret = -EINVAL; - goto out; + return -EINVAL; } if (!limit) @@ -2368,9 +2245,6 @@ static int smu_set_power_limit(void *handle, uint32_t limit) smu->user_dpm_profile.power_limit = limit; } -out: - mutex_unlock(&smu->mutex); - return ret; } @@ -2381,13 +2255,9 @@ static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type cl if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; - mutex_lock(&smu->mutex); - if (smu->ppt_funcs->print_clk_levels) ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf); - mutex_unlock(&smu->mutex); - return ret; } @@ -2444,14 +2314,10 @@ static int smu_od_edit_dpm_table(void *handle, if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; - mutex_lock(&smu->mutex); - if (smu->ppt_funcs->od_edit_dpm_table) { ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size); } - mutex_unlock(&smu->mutex); - return ret; } @@ -2475,8 +2341,6 @@ static int smu_read_sensor(void *handle, size_val = *size_arg; size = &size_val; - mutex_lock(&smu->mutex); - if (smu->ppt_funcs->read_sensor) if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size)) goto unlock; @@ -2517,8 +2381,6 @@ static int smu_read_sensor(void *handle, } unlock: - mutex_unlock(&smu->mutex); - // assign uint32_t to int *size_arg = size_val; @@ -2528,7 +2390,6 @@ unlock: static int smu_get_power_profile_mode(void *handle, char *buf) { struct smu_context *smu = handle; - int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || !smu->ppt_funcs->get_power_profile_mode) @@ -2536,13 +2397,7 @@ static int smu_get_power_profile_mode(void *handle, char *buf) if (!buf) return -EINVAL; - mutex_lock(&smu->mutex); - - ret = smu->ppt_funcs->get_power_profile_mode(smu, buf); - - mutex_unlock(&smu->mutex); - - return ret; + return smu->ppt_funcs->get_power_profile_mode(smu, buf); } static int smu_set_power_profile_mode(void *handle, @@ -2550,19 +2405,12 @@ static int smu_set_power_profile_mode(void *handle, uint32_t param_size) { struct smu_context *smu = handle; - int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled || !smu->ppt_funcs->set_power_profile_mode) return -EOPNOTSUPP; - mutex_lock(&smu->mutex); - - smu_bump_power_profile_mode(smu, param, param_size); - - mutex_unlock(&smu->mutex); - - return ret; + return smu_bump_power_profile_mode(smu, param, param_size); } @@ -2579,12 +2427,8 @@ static int smu_get_fan_control_mode(void *handle, u32 *fan_mode) if (!fan_mode) return -EINVAL; - mutex_lock(&smu->mutex); - *fan_mode = smu->ppt_funcs->get_fan_control_mode(smu); - mutex_unlock(&smu->mutex); - return 0; } @@ -2602,8 +2446,6 @@ static int smu_set_fan_control_mode(void *handle, u32 value) if (value == U32_MAX) return -EINVAL; - mutex_lock(&smu->mutex); - ret = smu->ppt_funcs->set_fan_control_mode(smu, value); if (ret) goto out; @@ -2620,8 +2462,6 @@ static int smu_set_fan_control_mode(void *handle, u32 value) } out: - mutex_unlock(&smu->mutex); - return ret; } @@ -2639,12 +2479,8 @@ static int smu_get_fan_speed_pwm(void *handle, u32 *speed) if (!speed) return -EINVAL; - mutex_lock(&smu->mutex); - ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed); - mutex_unlock(&smu->mutex); - return ret; } @@ -2662,8 +2498,6 @@ static int smu_set_fan_speed_pwm(void *handle, u32 speed) if (speed == U32_MAX) return -EINVAL; - mutex_lock(&smu->mutex); - ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed); if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) { smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM; @@ -2674,8 +2508,6 @@ static int smu_set_fan_speed_pwm(void *handle, u32 speed) smu->user_dpm_profile.fan_speed_rpm = 0; } - mutex_unlock(&smu->mutex); - return ret; } @@ -2693,30 +2525,19 @@ static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed) if (!speed) return -EINVAL; - mutex_lock(&smu->mutex); - ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed); - mutex_unlock(&smu->mutex); - return ret; } static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk) { struct smu_context *smu = handle; - int ret = 0; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; - mutex_lock(&smu->mutex); - - ret = smu_set_min_dcef_deep_sleep(smu, clk); - - mutex_unlock(&smu->mutex); - - return ret; + return smu_set_min_dcef_deep_sleep(smu, clk); } static int smu_get_clock_by_type_with_latency(void *handle, @@ -2730,8 +2551,6 @@ static int smu_get_clock_by_type_with_latency(void *handle, if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; - mutex_lock(&smu->mutex); - if (smu->ppt_funcs->get_clock_by_type_with_latency) { switch (type) { case amd_pp_sys_clock: @@ -2748,15 +2567,12 @@ static int smu_get_clock_by_type_with_latency(void *handle, break; default: dev_err(smu->adev->dev, "Invalid clock type!\n"); - mutex_unlock(&smu->mutex); return -EINVAL; } ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks); } - mutex_unlock(&smu->mutex); - return ret; } @@ -2769,13 +2585,9 @@ static int smu_display_clock_voltage_request(void *handle, if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; - mutex_lock(&smu->mutex); - if (smu->ppt_funcs->display_clock_voltage_request) ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req); - mutex_unlock(&smu->mutex); - return ret; } @@ -2789,13 +2601,9 @@ static int smu_display_disable_memory_clock_switch(void *handle, if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; - mutex_lock(&smu->mutex); - if (smu->ppt_funcs->display_disable_memory_clock_switch) ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch); - mutex_unlock(&smu->mutex); - return ret; } @@ -2808,13 +2616,9 @@ static int smu_set_xgmi_pstate(void *handle, if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; - mutex_lock(&smu->mutex); - if (smu->ppt_funcs->set_xgmi_pstate) ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate); - mutex_unlock(&smu->mutex); - if(ret) dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n"); @@ -2824,21 +2628,16 @@ static int smu_set_xgmi_pstate(void *handle, static int smu_get_baco_capability(void *handle, bool *cap) { struct smu_context *smu = handle; - int ret = 0; *cap = false; if (!smu->pm_enabled) return 0; - mutex_lock(&smu->mutex); - if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support) *cap = smu->ppt_funcs->baco_is_support(smu); - mutex_unlock(&smu->mutex); - - return ret; + return 0; } static int smu_baco_set_state(void *handle, int state) @@ -2850,20 +2649,11 @@ static int smu_baco_set_state(void *handle, int state) return -EOPNOTSUPP; if (state == 0) { - mutex_lock(&smu->mutex); - if (smu->ppt_funcs->baco_exit) ret = smu->ppt_funcs->baco_exit(smu); - - mutex_unlock(&smu->mutex); } else if (state == 1) { - mutex_lock(&smu->mutex); - if (smu->ppt_funcs->baco_enter) ret = smu->ppt_funcs->baco_enter(smu); - - mutex_unlock(&smu->mutex); - } else { return -EINVAL; } @@ -2882,13 +2672,9 @@ bool smu_mode1_reset_is_support(struct smu_context *smu) if (!smu->pm_enabled) return false; - mutex_lock(&smu->mutex); - if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support) ret = smu->ppt_funcs->mode1_reset_is_support(smu); - mutex_unlock(&smu->mutex); - return ret; } @@ -2899,13 +2685,9 @@ bool smu_mode2_reset_is_support(struct smu_context *smu) if (!smu->pm_enabled) return false; - mutex_lock(&smu->mutex); - if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support) ret = smu->ppt_funcs->mode2_reset_is_support(smu); - mutex_unlock(&smu->mutex); - return ret; } @@ -2916,13 +2698,9 @@ int smu_mode1_reset(struct smu_context *smu) if (!smu->pm_enabled) return -EOPNOTSUPP; - mutex_lock(&smu->mutex); - if (smu->ppt_funcs->mode1_reset) ret = smu->ppt_funcs->mode1_reset(smu); - mutex_unlock(&smu->mutex); - return ret; } @@ -2934,13 +2712,9 @@ static int smu_mode2_reset(void *handle) if (!smu->pm_enabled) return -EOPNOTSUPP; - mutex_lock(&smu->mutex); - if (smu->ppt_funcs->mode2_reset) ret = smu->ppt_funcs->mode2_reset(smu); - mutex_unlock(&smu->mutex); - if (ret) dev_err(smu->adev->dev, "Mode2 reset failed!\n"); @@ -2956,13 +2730,9 @@ static int smu_get_max_sustainable_clocks_by_dc(void *handle, if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; - mutex_lock(&smu->mutex); - if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc) ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks); - mutex_unlock(&smu->mutex); - return ret; } @@ -2976,13 +2746,9 @@ static int smu_get_uclk_dpm_states(void *handle, if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; - mutex_lock(&smu->mutex); - if (smu->ppt_funcs->get_uclk_dpm_states) ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states); - mutex_unlock(&smu->mutex); - return ret; } @@ -2994,13 +2760,9 @@ static enum amd_pm_state_type smu_get_current_power_state(void *handle) if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; - mutex_lock(&smu->mutex); - if (smu->ppt_funcs->get_current_power_state) pm_state = smu->ppt_funcs->get_current_power_state(smu); - mutex_unlock(&smu->mutex); - return pm_state; } @@ -3013,20 +2775,15 @@ static int smu_get_dpm_clock_table(void *handle, if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; - mutex_lock(&smu->mutex); - if (smu->ppt_funcs->get_dpm_clock_table) ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table); - mutex_unlock(&smu->mutex); - return ret; } static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table) { struct smu_context *smu = handle; - ssize_t size; if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; @@ -3034,13 +2791,7 @@ static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table) if (!smu->ppt_funcs->get_gpu_metrics) return -EOPNOTSUPP; - mutex_lock(&smu->mutex); - - size = smu->ppt_funcs->get_gpu_metrics(smu, table); - - mutex_unlock(&smu->mutex); - - return size; + return smu->ppt_funcs->get_gpu_metrics(smu, table); } static int smu_enable_mgpu_fan_boost(void *handle) @@ -3051,13 +2802,9 @@ static int smu_enable_mgpu_fan_boost(void *handle) if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled) return -EOPNOTSUPP; - mutex_lock(&smu->mutex); - if (smu->ppt_funcs->enable_mgpu_fan_boost) ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu); - mutex_unlock(&smu->mutex); - return ret; } @@ -3067,10 +2814,8 @@ static int smu_gfx_state_change_set(void *handle, struct smu_context *smu = handle; int ret = 0; - mutex_lock(&smu->mutex); if (smu->ppt_funcs->gfx_state_change_set) ret = smu->ppt_funcs->gfx_state_change_set(smu, state); - mutex_unlock(&smu->mutex); return ret; } @@ -3079,10 +2824,8 @@ int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable) { int ret = 0; - mutex_lock(&smu->mutex); if (smu->ppt_funcs->smu_handle_passthrough_sbr) ret = smu->ppt_funcs->smu_handle_passthrough_sbr(smu, enable); - mutex_unlock(&smu->mutex); return ret; } @@ -3091,11 +2834,9 @@ int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc) { int ret = -EOPNOTSUPP; - mutex_lock(&smu->mutex); if (smu->ppt_funcs && smu->ppt_funcs->get_ecc_info) ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc); - mutex_unlock(&smu->mutex); return ret; @@ -3112,12 +2853,10 @@ static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size) *addr = NULL; *size = 0; - mutex_lock(&smu->mutex); if (memory_pool->bo) { *addr = memory_pool->cpu_addr; *size = memory_pool->size; } - mutex_unlock(&smu->mutex); return 0; } @@ -3181,11 +2920,8 @@ int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event, { int ret = -EINVAL; - if (smu->ppt_funcs->wait_for_event) { - mutex_lock(&smu->mutex); + if (smu->ppt_funcs->wait_for_event) ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg); - mutex_unlock(&smu->mutex); - } return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index 3fdab6a44901..00760f3c6da5 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -488,7 +488,6 @@ struct smu_context const struct cmn2asic_mapping *table_map; const struct cmn2asic_mapping *pwr_src_map; const struct cmn2asic_mapping *workload_map; - struct mutex mutex; struct mutex sensor_lock; struct mutex metrics_lock; struct mutex message_lock; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index d3963bfe5c89..1b265b8c7996 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -2118,9 +2118,9 @@ static int arcturus_i2c_xfer(struct i2c_adapter *i2c_adap, } } } - mutex_lock(&smu->mutex); + mutex_lock(&adev->pm.mutex); r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true); - mutex_unlock(&smu->mutex); + mutex_unlock(&adev->pm.mutex); if (r) goto fail; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 37e11716e919..5cdf88f3bb6c 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -2826,9 +2826,9 @@ static int navi10_i2c_xfer(struct i2c_adapter *i2c_adap, } } } - mutex_lock(&smu->mutex); + mutex_lock(&adev->pm.mutex); r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true); - mutex_unlock(&smu->mutex); + mutex_unlock(&adev->pm.mutex); if (r) goto fail; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index c29353e05fd2..5c0ad4dd6543 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -3497,9 +3497,9 @@ static int sienna_cichlid_i2c_xfer(struct i2c_adapter *i2c_adap, } } } - mutex_lock(&smu->mutex); + mutex_lock(&adev->pm.mutex); r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true); - mutex_unlock(&smu->mutex); + mutex_unlock(&adev->pm.mutex); if (r) goto fail; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index ac8ba5e0e697..3085a07e70d2 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -1521,9 +1521,9 @@ static int aldebaran_i2c_xfer(struct i2c_adapter *i2c_adap, } } } - mutex_lock(&smu->mutex); + mutex_lock(&adev->pm.mutex); r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true); - mutex_unlock(&smu->mutex); + mutex_unlock(&adev->pm.mutex); if (r) goto fail; -- cgit From 974d5ef0dd9d251dd4571e30d5e79e7e4940d424 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 29 Nov 2021 13:15:57 +0800 Subject: drm/amd/pm: drop unneeded vcn/jpeg_gate_lock As those related APIs are already protected by adev->pm.mutex. Signed-off-by: Evan Quan Reviewed-by: Guchun Chen Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 63 +++++---------------------- drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h | 2 - 2 files changed, 11 insertions(+), 54 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index eaaa5b033d46..8ac87733d211 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -158,8 +158,8 @@ static u32 smu_get_sclk(void *handle, bool low) return clk_freq * 100; } -static int smu_dpm_set_vcn_enable_locked(struct smu_context *smu, - bool enable) +static int smu_dpm_set_vcn_enable(struct smu_context *smu, + bool enable) { struct smu_power_context *smu_power = &smu->smu_power; struct smu_power_gate *power_gate = &smu_power->power_gate; @@ -178,24 +178,8 @@ static int smu_dpm_set_vcn_enable_locked(struct smu_context *smu, return ret; } -static int smu_dpm_set_vcn_enable(struct smu_context *smu, - bool enable) -{ - struct smu_power_context *smu_power = &smu->smu_power; - struct smu_power_gate *power_gate = &smu_power->power_gate; - int ret = 0; - - mutex_lock(&power_gate->vcn_gate_lock); - - ret = smu_dpm_set_vcn_enable_locked(smu, enable); - - mutex_unlock(&power_gate->vcn_gate_lock); - - return ret; -} - -static int smu_dpm_set_jpeg_enable_locked(struct smu_context *smu, - bool enable) +static int smu_dpm_set_jpeg_enable(struct smu_context *smu, + bool enable) { struct smu_power_context *smu_power = &smu->smu_power; struct smu_power_gate *power_gate = &smu_power->power_gate; @@ -214,22 +198,6 @@ static int smu_dpm_set_jpeg_enable_locked(struct smu_context *smu, return ret; } -static int smu_dpm_set_jpeg_enable(struct smu_context *smu, - bool enable) -{ - struct smu_power_context *smu_power = &smu->smu_power; - struct smu_power_gate *power_gate = &smu_power->power_gate; - int ret = 0; - - mutex_lock(&power_gate->jpeg_gate_lock); - - ret = smu_dpm_set_jpeg_enable_locked(smu, enable); - - mutex_unlock(&power_gate->jpeg_gate_lock); - - return ret; -} - /** * smu_dpm_set_power_gate - power gate/ungate the specific IP block * @@ -620,32 +588,25 @@ static int smu_set_default_dpm_table(struct smu_context *smu) if (!smu->ppt_funcs->set_default_dpm_table) return 0; - mutex_lock(&power_gate->vcn_gate_lock); - mutex_lock(&power_gate->jpeg_gate_lock); - vcn_gate = atomic_read(&power_gate->vcn_gated); jpeg_gate = atomic_read(&power_gate->jpeg_gated); - ret = smu_dpm_set_vcn_enable_locked(smu, true); + ret = smu_dpm_set_vcn_enable(smu, true); if (ret) - goto err0_out; + return ret; - ret = smu_dpm_set_jpeg_enable_locked(smu, true); + ret = smu_dpm_set_jpeg_enable(smu, true); if (ret) - goto err1_out; + goto err_out; ret = smu->ppt_funcs->set_default_dpm_table(smu); if (ret) dev_err(smu->adev->dev, "Failed to setup default dpm clock tables!\n"); - smu_dpm_set_jpeg_enable_locked(smu, !jpeg_gate); -err1_out: - smu_dpm_set_vcn_enable_locked(smu, !vcn_gate); -err0_out: - mutex_unlock(&power_gate->jpeg_gate_lock); - mutex_unlock(&power_gate->vcn_gate_lock); - + smu_dpm_set_jpeg_enable(smu, !jpeg_gate); +err_out: + smu_dpm_set_vcn_enable(smu, !vcn_gate); return ret; } @@ -1007,8 +968,6 @@ static int smu_sw_init(void *handle) atomic_set(&smu->smu_power.power_gate.vcn_gated, 1); atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1); - mutex_init(&smu->smu_power.power_gate.vcn_gate_lock); - mutex_init(&smu->smu_power.power_gate.jpeg_gate_lock); smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT]; smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0; diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index 00760f3c6da5..c3efe4fea5e0 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -376,8 +376,6 @@ struct smu_power_gate { bool vce_gated; atomic_t vcn_gated; atomic_t jpeg_gated; - struct mutex vcn_gate_lock; - struct mutex jpeg_gate_lock; }; struct smu_power_context { -- cgit From da11407f066c28c49bb7a4ff6a6b742b7a18d7ca Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 29 Nov 2021 13:37:26 +0800 Subject: drm/amd/pm: drop unneeded smu->metrics_lock As all those related APIs are already well protected by adev->pm.mutex and smu->message_lock. Signed-off-by: Evan Quan Reviewed-by: Guchun Chen Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 1 - drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h | 1 - drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c | 14 +-- .../drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c | 10 +- drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c | 112 ++++++--------------- .../drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c | 27 ++--- drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c | 28 ++---- drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c | 14 +-- drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c | 23 ++--- .../gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c | 10 +- drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c | 21 +--- drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h | 4 - 12 files changed, 70 insertions(+), 195 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 8ac87733d211..f10a0665a286 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -956,7 +956,6 @@ static int smu_sw_init(void *handle) bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX); mutex_init(&smu->sensor_lock); - mutex_init(&smu->metrics_lock); mutex_init(&smu->message_lock); INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn); diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index c3efe4fea5e0..63ed807c96f5 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -487,7 +487,6 @@ struct smu_context const struct cmn2asic_mapping *pwr_src_map; const struct cmn2asic_mapping *workload_map; struct mutex sensor_lock; - struct mutex metrics_lock; struct mutex message_lock; uint64_t pool_size; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index 1b265b8c7996..5a8b2d549c2b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -602,15 +602,11 @@ static int arcturus_get_smu_metrics_data(struct smu_context *smu, SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; int ret = 0; - mutex_lock(&smu->metrics_lock); - - ret = smu_cmn_get_metrics_table_locked(smu, - NULL, - false); - if (ret) { - mutex_unlock(&smu->metrics_lock); + ret = smu_cmn_get_metrics_table(smu, + NULL, + false); + if (ret) return ret; - } switch (member) { case METRICS_CURR_GFXCLK: @@ -693,8 +689,6 @@ static int arcturus_get_smu_metrics_data(struct smu_context *smu, break; } - mutex_unlock(&smu->metrics_lock); - return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c index 2238ee19c222..7ae6b1bd648a 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c @@ -150,13 +150,9 @@ cyan_skillfish_get_smu_metrics_data(struct smu_context *smu, SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; int ret = 0; - mutex_lock(&smu->metrics_lock); - - ret = smu_cmn_get_metrics_table_locked(smu, NULL, false); - if (ret) { - mutex_unlock(&smu->metrics_lock); + ret = smu_cmn_get_metrics_table(smu, NULL, false); + if (ret) return ret; - } switch (member) { case METRICS_CURR_GFXCLK: @@ -200,8 +196,6 @@ cyan_skillfish_get_smu_metrics_data(struct smu_context *smu, break; } - mutex_unlock(&smu->metrics_lock); - return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 5cdf88f3bb6c..68ead70673c5 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -546,15 +546,11 @@ static int navi10_get_legacy_smu_metrics_data(struct smu_context *smu, (SmuMetrics_legacy_t *)smu_table->metrics_table; int ret = 0; - mutex_lock(&smu->metrics_lock); - - ret = smu_cmn_get_metrics_table_locked(smu, - NULL, - false); - if (ret) { - mutex_unlock(&smu->metrics_lock); + ret = smu_cmn_get_metrics_table(smu, + NULL, + false); + if (ret) return ret; - } switch (member) { case METRICS_CURR_GFXCLK: @@ -624,8 +620,6 @@ static int navi10_get_legacy_smu_metrics_data(struct smu_context *smu, break; } - mutex_unlock(&smu->metrics_lock); - return ret; } @@ -638,15 +632,11 @@ static int navi10_get_smu_metrics_data(struct smu_context *smu, (SmuMetrics_t *)smu_table->metrics_table; int ret = 0; - mutex_lock(&smu->metrics_lock); - - ret = smu_cmn_get_metrics_table_locked(smu, - NULL, - false); - if (ret) { - mutex_unlock(&smu->metrics_lock); + ret = smu_cmn_get_metrics_table(smu, + NULL, + false); + if (ret) return ret; - } switch (member) { case METRICS_CURR_GFXCLK: @@ -719,8 +709,6 @@ static int navi10_get_smu_metrics_data(struct smu_context *smu, break; } - mutex_unlock(&smu->metrics_lock); - return ret; } @@ -733,15 +721,11 @@ static int navi12_get_legacy_smu_metrics_data(struct smu_context *smu, (SmuMetrics_NV12_legacy_t *)smu_table->metrics_table; int ret = 0; - mutex_lock(&smu->metrics_lock); - - ret = smu_cmn_get_metrics_table_locked(smu, - NULL, - false); - if (ret) { - mutex_unlock(&smu->metrics_lock); + ret = smu_cmn_get_metrics_table(smu, + NULL, + false); + if (ret) return ret; - } switch (member) { case METRICS_CURR_GFXCLK: @@ -811,8 +795,6 @@ static int navi12_get_legacy_smu_metrics_data(struct smu_context *smu, break; } - mutex_unlock(&smu->metrics_lock); - return ret; } @@ -825,15 +807,11 @@ static int navi12_get_smu_metrics_data(struct smu_context *smu, (SmuMetrics_NV12_t *)smu_table->metrics_table; int ret = 0; - mutex_lock(&smu->metrics_lock); - - ret = smu_cmn_get_metrics_table_locked(smu, - NULL, - false); - if (ret) { - mutex_unlock(&smu->metrics_lock); + ret = smu_cmn_get_metrics_table(smu, + NULL, + false); + if (ret) return ret; - } switch (member) { case METRICS_CURR_GFXCLK: @@ -906,8 +884,6 @@ static int navi12_get_smu_metrics_data(struct smu_context *smu, break; } - mutex_unlock(&smu->metrics_lock); - return ret; } @@ -2708,20 +2684,14 @@ static ssize_t navi10_get_legacy_gpu_metrics(struct smu_context *smu, SmuMetrics_legacy_t metrics; int ret = 0; - mutex_lock(&smu->metrics_lock); - - ret = smu_cmn_get_metrics_table_locked(smu, - NULL, - true); - if (ret) { - mutex_unlock(&smu->metrics_lock); + ret = smu_cmn_get_metrics_table(smu, + NULL, + true); + if (ret) return ret; - } memcpy(&metrics, smu_table->metrics_table, sizeof(SmuMetrics_legacy_t)); - mutex_unlock(&smu->metrics_lock); - smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3); gpu_metrics->temperature_edge = metrics.TemperatureEdge; @@ -2901,20 +2871,14 @@ static ssize_t navi10_get_gpu_metrics(struct smu_context *smu, SmuMetrics_t metrics; int ret = 0; - mutex_lock(&smu->metrics_lock); - - ret = smu_cmn_get_metrics_table_locked(smu, - NULL, - true); - if (ret) { - mutex_unlock(&smu->metrics_lock); + ret = smu_cmn_get_metrics_table(smu, + NULL, + true); + if (ret) return ret; - } memcpy(&metrics, smu_table->metrics_table, sizeof(SmuMetrics_t)); - mutex_unlock(&smu->metrics_lock); - smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3); gpu_metrics->temperature_edge = metrics.TemperatureEdge; @@ -2979,20 +2943,14 @@ static ssize_t navi12_get_legacy_gpu_metrics(struct smu_context *smu, SmuMetrics_NV12_legacy_t metrics; int ret = 0; - mutex_lock(&smu->metrics_lock); - - ret = smu_cmn_get_metrics_table_locked(smu, - NULL, - true); - if (ret) { - mutex_unlock(&smu->metrics_lock); + ret = smu_cmn_get_metrics_table(smu, + NULL, + true); + if (ret) return ret; - } memcpy(&metrics, smu_table->metrics_table, sizeof(SmuMetrics_NV12_legacy_t)); - mutex_unlock(&smu->metrics_lock); - smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3); gpu_metrics->temperature_edge = metrics.TemperatureEdge; @@ -3060,20 +3018,14 @@ static ssize_t navi12_get_gpu_metrics(struct smu_context *smu, SmuMetrics_NV12_t metrics; int ret = 0; - mutex_lock(&smu->metrics_lock); - - ret = smu_cmn_get_metrics_table_locked(smu, - NULL, - true); - if (ret) { - mutex_unlock(&smu->metrics_lock); + ret = smu_cmn_get_metrics_table(smu, + NULL, + true); + if (ret) return ret; - } memcpy(&metrics, smu_table->metrics_table, sizeof(SmuMetrics_NV12_t)); - mutex_unlock(&smu->metrics_lock); - smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3); gpu_metrics->temperature_edge = metrics.TemperatureEdge; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 5c0ad4dd6543..73579996a1ab 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -539,15 +539,11 @@ static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu, uint16_t average_gfx_activity; int ret = 0; - mutex_lock(&smu->metrics_lock); - - ret = smu_cmn_get_metrics_table_locked(smu, - NULL, - false); - if (ret) { - mutex_unlock(&smu->metrics_lock); + ret = smu_cmn_get_metrics_table(smu, + NULL, + false); + if (ret) return ret; - } switch (member) { case METRICS_CURR_GFXCLK: @@ -647,8 +643,6 @@ static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu, break; } - mutex_unlock(&smu->metrics_lock); - return ret; } @@ -3580,14 +3574,11 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu, uint16_t average_gfx_activity; int ret = 0; - mutex_lock(&smu->metrics_lock); - ret = smu_cmn_get_metrics_table_locked(smu, - &metrics_external, - true); - if (ret) { - mutex_unlock(&smu->metrics_lock); + ret = smu_cmn_get_metrics_table(smu, + &metrics_external, + true); + if (ret) return ret; - } smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3); @@ -3677,8 +3668,6 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu, smu_v11_0_get_current_pcie_link_speed(smu); } - mutex_unlock(&smu->metrics_lock); - gpu_metrics->system_clock_counter = ktime_get_boottime_ns(); *table = (void *)gpu_metrics; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index 5cb07ed227fb..c736adca6fbb 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -273,15 +273,11 @@ static int vangogh_get_legacy_smu_metrics_data(struct smu_context *smu, SmuMetrics_legacy_t *metrics = (SmuMetrics_legacy_t *)smu_table->metrics_table; int ret = 0; - mutex_lock(&smu->metrics_lock); - - ret = smu_cmn_get_metrics_table_locked(smu, - NULL, - false); - if (ret) { - mutex_unlock(&smu->metrics_lock); + ret = smu_cmn_get_metrics_table(smu, + NULL, + false); + if (ret) return ret; - } switch (member) { case METRICS_CURR_GFXCLK: @@ -335,8 +331,6 @@ static int vangogh_get_legacy_smu_metrics_data(struct smu_context *smu, break; } - mutex_unlock(&smu->metrics_lock); - return ret; } @@ -348,15 +342,11 @@ static int vangogh_get_smu_metrics_data(struct smu_context *smu, SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; int ret = 0; - mutex_lock(&smu->metrics_lock); - - ret = smu_cmn_get_metrics_table_locked(smu, - NULL, - false); - if (ret) { - mutex_unlock(&smu->metrics_lock); + ret = smu_cmn_get_metrics_table(smu, + NULL, + false); + if (ret) return ret; - } switch (member) { case METRICS_CURR_GFXCLK: @@ -410,8 +400,6 @@ static int vangogh_get_smu_metrics_data(struct smu_context *smu, break; } - mutex_unlock(&smu->metrics_lock); - return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c index 25c4b135f830..d75508085578 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c @@ -1128,15 +1128,11 @@ static int renoir_get_smu_metrics_data(struct smu_context *smu, SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; int ret = 0; - mutex_lock(&smu->metrics_lock); - - ret = smu_cmn_get_metrics_table_locked(smu, - NULL, - false); - if (ret) { - mutex_unlock(&smu->metrics_lock); + ret = smu_cmn_get_metrics_table(smu, + NULL, + false); + if (ret) return ret; - } switch (member) { case METRICS_AVERAGE_GFXCLK: @@ -1201,8 +1197,6 @@ static int renoir_get_smu_metrics_data(struct smu_context *smu, break; } - mutex_unlock(&smu->metrics_lock); - return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index 3085a07e70d2..05b7d8e38196 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -571,15 +571,11 @@ static int aldebaran_get_smu_metrics_data(struct smu_context *smu, SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; int ret = 0; - mutex_lock(&smu->metrics_lock); - - ret = smu_cmn_get_metrics_table_locked(smu, - NULL, - false); - if (ret) { - mutex_unlock(&smu->metrics_lock); + ret = smu_cmn_get_metrics_table(smu, + NULL, + false); + if (ret) return ret; - } switch (member) { case METRICS_CURR_GFXCLK: @@ -653,8 +649,6 @@ static int aldebaran_get_smu_metrics_data(struct smu_context *smu, break; } - mutex_unlock(&smu->metrics_lock); - return ret; } @@ -1594,17 +1588,14 @@ static void aldebaran_get_unique_id(struct smu_context *smu) uint32_t upper32 = 0, lower32 = 0; int ret; - mutex_lock(&smu->metrics_lock); - ret = smu_cmn_get_metrics_table_locked(smu, NULL, false); + ret = smu_cmn_get_metrics_table(smu, NULL, false); if (ret) - goto out_unlock; + goto out; upper32 = metrics->PublicSerialNumUpper32; lower32 = metrics->PublicSerialNumLower32; -out_unlock: - mutex_unlock(&smu->metrics_lock); - +out: adev->unique_id = ((uint64_t)upper32 << 32) | lower32; if (adev->serial[0] == '\0') sprintf(adev->serial, "%016llx", adev->unique_id); diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c index caf1775d48ef..451d30dcc639 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c @@ -310,13 +310,9 @@ static int yellow_carp_get_smu_metrics_data(struct smu_context *smu, SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table; int ret = 0; - mutex_lock(&smu->metrics_lock); - - ret = smu_cmn_get_metrics_table_locked(smu, NULL, false); - if (ret) { - mutex_unlock(&smu->metrics_lock); + ret = smu_cmn_get_metrics_table(smu, NULL, false); + if (ret) return ret; - } switch (member) { case METRICS_AVERAGE_GFXCLK: @@ -387,8 +383,6 @@ static int yellow_carp_get_smu_metrics_data(struct smu_context *smu, break; } - mutex_unlock(&smu->metrics_lock); - return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index ee1a312fd497..849e4403cc0e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -964,9 +964,9 @@ int smu_cmn_write_pptable(struct smu_context *smu) true); } -int smu_cmn_get_metrics_table_locked(struct smu_context *smu, - void *metrics_table, - bool bypass_cache) +int smu_cmn_get_metrics_table(struct smu_context *smu, + void *metrics_table, + bool bypass_cache) { struct smu_table_context *smu_table= &smu->smu_table; uint32_t table_size = @@ -994,21 +994,6 @@ int smu_cmn_get_metrics_table_locked(struct smu_context *smu, return 0; } -int smu_cmn_get_metrics_table(struct smu_context *smu, - void *metrics_table, - bool bypass_cache) -{ - int ret = 0; - - mutex_lock(&smu->metrics_lock); - ret = smu_cmn_get_metrics_table_locked(smu, - metrics_table, - bypass_cache); - mutex_unlock(&smu->metrics_lock); - - return ret; -} - void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev) { struct metrics_table_header *header = (struct metrics_table_header *)table; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h index 67a25da79256..f0b4fb2a0960 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h @@ -101,10 +101,6 @@ int smu_cmn_write_watermarks_table(struct smu_context *smu); int smu_cmn_write_pptable(struct smu_context *smu); -int smu_cmn_get_metrics_table_locked(struct smu_context *smu, - void *metrics_table, - bool bypass_cache); - int smu_cmn_get_metrics_table(struct smu_context *smu, void *metrics_table, bool bypass_cache); -- cgit From 56383e8f4d296a33cc5b2a11864025d8205e9438 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 29 Nov 2021 13:51:34 +0800 Subject: drm/amd/pm: drop unneeded smu->sensor_lock As all those related APIs are already well protected by adev->pm.mutex and smu->message_lock. Signed-off-by: Evan Quan Reviewed-by: Guchun Chen Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 1 - drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h | 1 - drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c | 2 -- drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c | 4 ---- drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c | 2 -- drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c | 2 -- drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c | 2 -- drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c | 2 -- drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c | 2 -- drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c | 2 -- 10 files changed, 20 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index f10a0665a286..57f3d26b03fe 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -955,7 +955,6 @@ static int smu_sw_init(void *handle) bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX); bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX); - mutex_init(&smu->sensor_lock); mutex_init(&smu->message_lock); INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn); diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index 63ed807c96f5..2cef7ff46010 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -486,7 +486,6 @@ struct smu_context const struct cmn2asic_mapping *table_map; const struct cmn2asic_mapping *pwr_src_map; const struct cmn2asic_mapping *workload_map; - struct mutex sensor_lock; struct mutex message_lock; uint64_t pool_size; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index 5a8b2d549c2b..2c78d04d5611 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -1113,7 +1113,6 @@ static int arcturus_read_sensor(struct smu_context *smu, if (!data || !size) return -EINVAL; - mutex_lock(&smu->sensor_lock); switch (sensor) { case AMDGPU_PP_SENSOR_MAX_FAN_RPM: *(uint32_t *)data = pptable->FanMaximumRpm; @@ -1174,7 +1173,6 @@ static int arcturus_read_sensor(struct smu_context *smu, ret = -EOPNOTSUPP; break; } - mutex_unlock(&smu->sensor_lock); return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c index 7ae6b1bd648a..2acd7470431e 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c @@ -209,8 +209,6 @@ static int cyan_skillfish_read_sensor(struct smu_context *smu, if (!data || !size) return -EINVAL; - mutex_lock(&smu->sensor_lock); - switch (sensor) { case AMDGPU_PP_SENSOR_GFX_SCLK: ret = cyan_skillfish_get_smu_metrics_data(smu, @@ -261,8 +259,6 @@ static int cyan_skillfish_read_sensor(struct smu_context *smu, break; } - mutex_unlock(&smu->sensor_lock); - return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 68ead70673c5..84834c24a7e9 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -1964,7 +1964,6 @@ static int navi10_read_sensor(struct smu_context *smu, if(!data || !size) return -EINVAL; - mutex_lock(&smu->sensor_lock); switch (sensor) { case AMDGPU_PP_SENSOR_MAX_FAN_RPM: *(uint32_t *)data = pptable->FanMaximumRpm; @@ -2024,7 +2023,6 @@ static int navi10_read_sensor(struct smu_context *smu, ret = -EOPNOTSUPP; break; } - mutex_unlock(&smu->sensor_lock); return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 73579996a1ab..651fe748e423 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -1619,7 +1619,6 @@ static int sienna_cichlid_read_sensor(struct smu_context *smu, if(!data || !size) return -EINVAL; - mutex_lock(&smu->sensor_lock); switch (sensor) { case AMDGPU_PP_SENSOR_MAX_FAN_RPM: GET_PPTABLE_MEMBER(FanMaximumRpm, &temp); @@ -1680,7 +1679,6 @@ static int sienna_cichlid_read_sensor(struct smu_context *smu, ret = -EOPNOTSUPP; break; } - mutex_unlock(&smu->sensor_lock); return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c index c736adca6fbb..721027917f81 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c @@ -1494,7 +1494,6 @@ static int vangogh_read_sensor(struct smu_context *smu, if (!data || !size) return -EINVAL; - mutex_lock(&smu->sensor_lock); switch (sensor) { case AMDGPU_PP_SENSOR_GPU_LOAD: ret = vangogh_common_get_smu_metrics_data(smu, @@ -1556,7 +1555,6 @@ static int vangogh_read_sensor(struct smu_context *smu, ret = -EOPNOTSUPP; break; } - mutex_unlock(&smu->sensor_lock); return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c index d75508085578..e99e7b2bf25b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c @@ -1209,7 +1209,6 @@ static int renoir_read_sensor(struct smu_context *smu, if (!data || !size) return -EINVAL; - mutex_lock(&smu->sensor_lock); switch (sensor) { case AMDGPU_PP_SENSOR_GPU_LOAD: ret = renoir_get_smu_metrics_data(smu, @@ -1277,7 +1276,6 @@ static int renoir_read_sensor(struct smu_context *smu, ret = -EOPNOTSUPP; break; } - mutex_unlock(&smu->sensor_lock); return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index 05b7d8e38196..8c02adbf446a 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -1141,7 +1141,6 @@ static int aldebaran_read_sensor(struct smu_context *smu, if (!data || !size) return -EINVAL; - mutex_lock(&smu->sensor_lock); switch (sensor) { case AMDGPU_PP_SENSOR_MEM_LOAD: case AMDGPU_PP_SENSOR_GPU_LOAD: @@ -1180,7 +1179,6 @@ static int aldebaran_read_sensor(struct smu_context *smu, ret = -EOPNOTSUPP; break; } - mutex_unlock(&smu->sensor_lock); return ret; } diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c index 451d30dcc639..bd24a2632214 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c @@ -395,7 +395,6 @@ static int yellow_carp_read_sensor(struct smu_context *smu, if (!data || !size) return -EINVAL; - mutex_lock(&smu->sensor_lock); switch (sensor) { case AMDGPU_PP_SENSOR_GPU_LOAD: ret = yellow_carp_get_smu_metrics_data(smu, @@ -463,7 +462,6 @@ static int yellow_carp_read_sensor(struct smu_context *smu, ret = -EOPNOTSUPP; break; } - mutex_unlock(&smu->sensor_lock); return ret; } -- cgit From 1c4dba5e14c0085d412429d50cbcf8e9e2a18924 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 29 Nov 2021 14:02:25 +0800 Subject: drm/amd/pm: drop unneeded smu_baco->mutex As those APIs related are already well protected by adev->pm.mutex. Signed-off-by: Evan Quan Reviewed-by: Guchun Chen Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 1 - drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h | 1 - drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c | 19 +++++-------------- 3 files changed, 5 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 57f3d26b03fe..fe155a7874b0 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -567,7 +567,6 @@ static int smu_early_init(void *handle) smu->adev = adev; smu->pm_enabled = !!amdgpu_dpm; smu->is_apu = false; - mutex_init(&smu->smu_baco.mutex); smu->smu_baco.state = SMU_BACO_STATE_EXIT; smu->smu_baco.platform_support = false; smu->user_dpm_profile.fan_mode = -1; diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index 2cef7ff46010..79b2a817491c 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -428,7 +428,6 @@ enum smu_baco_state struct smu_baco_context { - struct mutex mutex; uint32_t state; bool platform_support; }; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c index e94a400db669..b58a4c2823c2 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c @@ -1617,13 +1617,8 @@ bool smu_v11_0_baco_is_support(struct smu_context *smu) enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu) { struct smu_baco_context *smu_baco = &smu->smu_baco; - enum smu_baco_state baco_state; - mutex_lock(&smu_baco->mutex); - baco_state = smu_baco->state; - mutex_unlock(&smu_baco->mutex); - - return baco_state; + return smu_baco->state; } #define D3HOT_BACO_SEQUENCE 0 @@ -1640,8 +1635,6 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) if (smu_v11_0_baco_get_state(smu) == state) return 0; - mutex_lock(&smu_baco->mutex); - if (state == SMU_BACO_STATE_ENTER) { switch (adev->ip_versions[MP1_HWIP][0]) { case IP_VERSION(11, 0, 7): @@ -1682,18 +1675,16 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state) } else { ret = smu_cmn_send_smc_msg(smu, SMU_MSG_ExitBaco, NULL); if (ret) - goto out; + return ret; /* clear vbios scratch 6 and 7 for coming asic reinit */ WREG32(adev->bios_scratch_reg_offset + 6, 0); WREG32(adev->bios_scratch_reg_offset + 7, 0); } - if (ret) - goto out; - smu_baco->state = state; -out: - mutex_unlock(&smu_baco->mutex); + if (!ret) + smu_baco->state = state; + return ret; } -- cgit From 1f2cf08aa010594036ccfb19d207e5b80b5bb7a0 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 29 Nov 2021 14:36:32 +0800 Subject: drm/amd/pm: drop unneeded feature->mutex As all those related APIs are already well protected by adev->pm.mutex. Signed-off-by: Evan Quan Reviewed-by: Guchun Chen Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 1 - drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h | 1 - drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c | 20 ++++++++------------ drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c | 19 ++----------------- 4 files changed, 10 insertions(+), 31 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index fe155a7874b0..5ace30434e60 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -949,7 +949,6 @@ static int smu_sw_init(void *handle) smu->pool_size = adev->pm.smu_prv_buffer_size; smu->smu_feature.feature_num = SMU_FEATURE_MAX; - mutex_init(&smu->smu_feature.mutex); bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX); bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX); bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX); diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index 79b2a817491c..18f24db7d202 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -391,7 +391,6 @@ struct smu_feature DECLARE_BITMAP(supported, SMU_FEATURE_MAX); DECLARE_BITMAP(allowed, SMU_FEATURE_MAX); DECLARE_BITMAP(enabled, SMU_FEATURE_MAX); - struct mutex mutex; }; struct smu_clocks { diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c index d9e3ebfeee73..1754a3720179 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c @@ -722,25 +722,21 @@ int smu_v13_0_set_allowed_mask(struct smu_context *smu) int ret = 0; uint32_t feature_mask[2]; - mutex_lock(&feature->mutex); - if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64) - goto failed; + if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || + feature->feature_num < 64) + return -EINVAL; bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64); ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh, feature_mask[1], NULL); if (ret) - goto failed; - - ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow, - feature_mask[0], NULL); - if (ret) - goto failed; + return ret; -failed: - mutex_unlock(&feature->mutex); - return ret; + return smu_cmn_send_smc_msg_with_param(smu, + SMU_MSG_SetAllowedFeaturesMaskLow, + feature_mask[0], + NULL); } int smu_v13_0_gfx_off_control(struct smu_context *smu, bool enable) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c index 849e4403cc0e..2c1eff9fe4f2 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c @@ -481,7 +481,6 @@ int smu_cmn_feature_is_supported(struct smu_context *smu, { struct smu_feature *feature = &smu->smu_feature; int feature_id; - int ret = 0; feature_id = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_FEATURE, @@ -491,11 +490,7 @@ int smu_cmn_feature_is_supported(struct smu_context *smu, WARN_ON(feature_id > feature->feature_num); - mutex_lock(&feature->mutex); - ret = test_bit(feature_id, feature->supported); - mutex_unlock(&feature->mutex); - - return ret; + return test_bit(feature_id, feature->supported); } int smu_cmn_feature_is_enabled(struct smu_context *smu, @@ -504,7 +499,6 @@ int smu_cmn_feature_is_enabled(struct smu_context *smu, struct smu_feature *feature = &smu->smu_feature; struct amdgpu_device *adev = smu->adev; int feature_id; - int ret = 0; if (smu->is_apu && adev->family < AMDGPU_FAMILY_VGH) return 1; @@ -517,11 +511,7 @@ int smu_cmn_feature_is_enabled(struct smu_context *smu, WARN_ON(feature_id > feature->feature_num); - mutex_lock(&feature->mutex); - ret = test_bit(feature_id, feature->enabled); - mutex_unlock(&feature->mutex); - - return ret; + return test_bit(feature_id, feature->enabled); } bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu, @@ -666,14 +656,12 @@ int smu_cmn_feature_update_enable_state(struct smu_context *smu, return ret; } - mutex_lock(&feature->mutex); if (enabled) bitmap_or(feature->enabled, feature->enabled, (unsigned long *)(&feature_mask), SMU_FEATURE_MAX); else bitmap_andnot(feature->enabled, feature->enabled, (unsigned long *)(&feature_mask), SMU_FEATURE_MAX); - mutex_unlock(&feature->mutex); return ret; } @@ -843,11 +831,8 @@ int smu_cmn_disable_all_features_with_exception(struct smu_context *smu, } if (no_hw_disablement) { - mutex_lock(&feature->mutex); bitmap_andnot(feature->enabled, feature->enabled, (unsigned long *)(&features_to_disable), SMU_FEATURE_MAX); - mutex_unlock(&feature->mutex); - return 0; } else { return smu_cmn_feature_update_enable_state(smu, -- cgit From a746c77e5ee86829c03dfaf718e18b589f849be3 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 29 Nov 2021 14:57:00 +0800 Subject: drm/amd/pm: drop unneeded hwmgr->smu_lock As all those related APIs are already well protected by adev->pm.mutex. Signed-off-by: Evan Quan Reviewed-by: Guchun Chen Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c | 278 ++++------------------- drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h | 1 - 2 files changed, 38 insertions(+), 241 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c index 76c26ae368f9..a2da46bf3985 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c +++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c @@ -50,7 +50,6 @@ static int amd_powerplay_create(struct amdgpu_device *adev) hwmgr->adev = adev; hwmgr->not_vf = !amdgpu_sriov_vf(adev); hwmgr->device = amdgpu_cgs_create_device(adev); - mutex_init(&hwmgr->smu_lock); mutex_init(&hwmgr->msg_lock); hwmgr->chip_family = adev->family; hwmgr->chip_id = adev->asic_type; @@ -178,12 +177,9 @@ static int pp_late_init(void *handle) struct amdgpu_device *adev = handle; struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; - if (hwmgr && hwmgr->pm_en) { - mutex_lock(&hwmgr->smu_lock); + if (hwmgr && hwmgr->pm_en) hwmgr_handle_task(hwmgr, AMD_PP_TASK_COMPLETE_INIT, NULL); - mutex_unlock(&hwmgr->smu_lock); - } if (adev->pm.smu_prv_buffer_size != 0) pp_reserve_vram_for_smu(adev); @@ -345,11 +341,9 @@ static int pp_dpm_force_performance_level(void *handle, if (level == hwmgr->dpm_level) return 0; - mutex_lock(&hwmgr->smu_lock); pp_dpm_en_umd_pstate(hwmgr, &level); hwmgr->request_dpm_level = level; hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL); - mutex_unlock(&hwmgr->smu_lock); return 0; } @@ -358,21 +352,16 @@ static enum amd_dpm_forced_level pp_dpm_get_performance_level( void *handle) { struct pp_hwmgr *hwmgr = handle; - enum amd_dpm_forced_level level; if (!hwmgr || !hwmgr->pm_en) return -EINVAL; - mutex_lock(&hwmgr->smu_lock); - level = hwmgr->dpm_level; - mutex_unlock(&hwmgr->smu_lock); - return level; + return hwmgr->dpm_level; } static uint32_t pp_dpm_get_sclk(void *handle, bool low) { struct pp_hwmgr *hwmgr = handle; - uint32_t clk = 0; if (!hwmgr || !hwmgr->pm_en) return 0; @@ -381,16 +370,12 @@ static uint32_t pp_dpm_get_sclk(void *handle, bool low) pr_info_ratelimited("%s was not implemented.\n", __func__); return 0; } - mutex_lock(&hwmgr->smu_lock); - clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low); - mutex_unlock(&hwmgr->smu_lock); - return clk; + return hwmgr->hwmgr_func->get_sclk(hwmgr, low); } static uint32_t pp_dpm_get_mclk(void *handle, bool low) { struct pp_hwmgr *hwmgr = handle; - uint32_t clk = 0; if (!hwmgr || !hwmgr->pm_en) return 0; @@ -399,10 +384,7 @@ static uint32_t pp_dpm_get_mclk(void *handle, bool low) pr_info_ratelimited("%s was not implemented.\n", __func__); return 0; } - mutex_lock(&hwmgr->smu_lock); - clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low); - mutex_unlock(&hwmgr->smu_lock); - return clk; + return hwmgr->hwmgr_func->get_mclk(hwmgr, low); } static void pp_dpm_powergate_vce(void *handle, bool gate) @@ -416,9 +398,7 @@ static void pp_dpm_powergate_vce(void *handle, bool gate) pr_info_ratelimited("%s was not implemented.\n", __func__); return; } - mutex_lock(&hwmgr->smu_lock); hwmgr->hwmgr_func->powergate_vce(hwmgr, gate); - mutex_unlock(&hwmgr->smu_lock); } static void pp_dpm_powergate_uvd(void *handle, bool gate) @@ -432,25 +412,18 @@ static void pp_dpm_powergate_uvd(void *handle, bool gate) pr_info_ratelimited("%s was not implemented.\n", __func__); return; } - mutex_lock(&hwmgr->smu_lock); hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate); - mutex_unlock(&hwmgr->smu_lock); } static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id, enum amd_pm_state_type *user_state) { - int ret = 0; struct pp_hwmgr *hwmgr = handle; if (!hwmgr || !hwmgr->pm_en) return -EINVAL; - mutex_lock(&hwmgr->smu_lock); - ret = hwmgr_handle_task(hwmgr, task_id, user_state); - mutex_unlock(&hwmgr->smu_lock); - - return ret; + return hwmgr_handle_task(hwmgr, task_id, user_state); } static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle) @@ -462,8 +435,6 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle) if (!hwmgr || !hwmgr->pm_en || !hwmgr->current_ps) return -EINVAL; - mutex_lock(&hwmgr->smu_lock); - state = hwmgr->current_ps; switch (state->classification.ui_label) { @@ -483,7 +454,6 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle) pm_type = POWER_STATE_TYPE_DEFAULT; break; } - mutex_unlock(&hwmgr->smu_lock); return pm_type; } @@ -501,9 +471,7 @@ static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode) if (mode == U32_MAX) return -EINVAL; - mutex_lock(&hwmgr->smu_lock); hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode); - mutex_unlock(&hwmgr->smu_lock); return 0; } @@ -521,16 +489,13 @@ static int pp_dpm_get_fan_control_mode(void *handle, uint32_t *fan_mode) if (!fan_mode) return -EINVAL; - mutex_lock(&hwmgr->smu_lock); *fan_mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr); - mutex_unlock(&hwmgr->smu_lock); return 0; } static int pp_dpm_set_fan_speed_pwm(void *handle, uint32_t speed) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; if (!hwmgr || !hwmgr->pm_en) return -EOPNOTSUPP; @@ -541,16 +506,12 @@ static int pp_dpm_set_fan_speed_pwm(void *handle, uint32_t speed) if (speed == U32_MAX) return -EINVAL; - mutex_lock(&hwmgr->smu_lock); - ret = hwmgr->hwmgr_func->set_fan_speed_pwm(hwmgr, speed); - mutex_unlock(&hwmgr->smu_lock); - return ret; + return hwmgr->hwmgr_func->set_fan_speed_pwm(hwmgr, speed); } static int pp_dpm_get_fan_speed_pwm(void *handle, uint32_t *speed) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; if (!hwmgr || !hwmgr->pm_en) return -EOPNOTSUPP; @@ -561,16 +522,12 @@ static int pp_dpm_get_fan_speed_pwm(void *handle, uint32_t *speed) if (!speed) return -EINVAL; - mutex_lock(&hwmgr->smu_lock); - ret = hwmgr->hwmgr_func->get_fan_speed_pwm(hwmgr, speed); - mutex_unlock(&hwmgr->smu_lock); - return ret; + return hwmgr->hwmgr_func->get_fan_speed_pwm(hwmgr, speed); } static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; if (!hwmgr || !hwmgr->pm_en) return -EOPNOTSUPP; @@ -581,16 +538,12 @@ static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm) if (!rpm) return -EINVAL; - mutex_lock(&hwmgr->smu_lock); - ret = hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm); - mutex_unlock(&hwmgr->smu_lock); - return ret; + return hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm); } static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; if (!hwmgr || !hwmgr->pm_en) return -EOPNOTSUPP; @@ -601,10 +554,7 @@ static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm) if (rpm == U32_MAX) return -EINVAL; - mutex_lock(&hwmgr->smu_lock); - ret = hwmgr->hwmgr_func->set_fan_speed_rpm(hwmgr, rpm); - mutex_unlock(&hwmgr->smu_lock); - return ret; + return hwmgr->hwmgr_func->set_fan_speed_rpm(hwmgr, rpm); } static int pp_dpm_get_pp_num_states(void *handle, @@ -618,8 +568,6 @@ static int pp_dpm_get_pp_num_states(void *handle, if (!hwmgr || !hwmgr->pm_en ||!hwmgr->ps) return -EINVAL; - mutex_lock(&hwmgr->smu_lock); - data->nums = hwmgr->num_ps; for (i = 0; i < hwmgr->num_ps; i++) { @@ -642,23 +590,18 @@ static int pp_dpm_get_pp_num_states(void *handle, data->states[i] = POWER_STATE_TYPE_DEFAULT; } } - mutex_unlock(&hwmgr->smu_lock); return 0; } static int pp_dpm_get_pp_table(void *handle, char **table) { struct pp_hwmgr *hwmgr = handle; - int size = 0; if (!hwmgr || !hwmgr->pm_en ||!hwmgr->soft_pp_table) return -EINVAL; - mutex_lock(&hwmgr->smu_lock); *table = (char *)hwmgr->soft_pp_table; - size = hwmgr->soft_pp_table_size; - mutex_unlock(&hwmgr->smu_lock); - return size; + return hwmgr->soft_pp_table_size; } static int amd_powerplay_reset(void *handle) @@ -685,13 +628,12 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size) if (!hwmgr || !hwmgr->pm_en) return -EINVAL; - mutex_lock(&hwmgr->smu_lock); if (!hwmgr->hardcode_pp_table) { hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table, hwmgr->soft_pp_table_size, GFP_KERNEL); if (!hwmgr->hardcode_pp_table) - goto err; + return ret; } memcpy(hwmgr->hardcode_pp_table, buf, size); @@ -700,17 +642,11 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size) ret = amd_powerplay_reset(handle); if (ret) - goto err; + return ret; - if (hwmgr->hwmgr_func->avfs_control) { + if (hwmgr->hwmgr_func->avfs_control) ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false); - if (ret) - goto err; - } - mutex_unlock(&hwmgr->smu_lock); - return 0; -err: - mutex_unlock(&hwmgr->smu_lock); + return ret; } @@ -718,7 +654,6 @@ static int pp_dpm_force_clock_level(void *handle, enum pp_clock_type type, uint32_t mask) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; if (!hwmgr || !hwmgr->pm_en) return -EINVAL; @@ -733,17 +668,13 @@ static int pp_dpm_force_clock_level(void *handle, return -EINVAL; } - mutex_lock(&hwmgr->smu_lock); - ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask); - mutex_unlock(&hwmgr->smu_lock); - return ret; + return hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask); } static int pp_dpm_print_clock_levels(void *handle, enum pp_clock_type type, char *buf) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; if (!hwmgr || !hwmgr->pm_en) return -EINVAL; @@ -752,16 +683,12 @@ static int pp_dpm_print_clock_levels(void *handle, pr_info_ratelimited("%s was not implemented.\n", __func__); return 0; } - mutex_lock(&hwmgr->smu_lock); - ret = hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf); - mutex_unlock(&hwmgr->smu_lock); - return ret; + return hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf); } static int pp_dpm_get_sclk_od(void *handle) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; if (!hwmgr || !hwmgr->pm_en) return -EINVAL; @@ -770,16 +697,12 @@ static int pp_dpm_get_sclk_od(void *handle) pr_info_ratelimited("%s was not implemented.\n", __func__); return 0; } - mutex_lock(&hwmgr->smu_lock); - ret = hwmgr->hwmgr_func->get_sclk_od(hwmgr); - mutex_unlock(&hwmgr->smu_lock); - return ret; + return hwmgr->hwmgr_func->get_sclk_od(hwmgr); } static int pp_dpm_set_sclk_od(void *handle, uint32_t value) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; if (!hwmgr || !hwmgr->pm_en) return -EINVAL; @@ -789,16 +712,12 @@ static int pp_dpm_set_sclk_od(void *handle, uint32_t value) return 0; } - mutex_lock(&hwmgr->smu_lock); - ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value); - mutex_unlock(&hwmgr->smu_lock); - return ret; + return hwmgr->hwmgr_func->set_sclk_od(hwmgr, value); } static int pp_dpm_get_mclk_od(void *handle) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; if (!hwmgr || !hwmgr->pm_en) return -EINVAL; @@ -807,16 +726,12 @@ static int pp_dpm_get_mclk_od(void *handle) pr_info_ratelimited("%s was not implemented.\n", __func__); return 0; } - mutex_lock(&hwmgr->smu_lock); - ret = hwmgr->hwmgr_func->get_mclk_od(hwmgr); - mutex_unlock(&hwmgr->smu_lock); - return ret; + return hwmgr->hwmgr_func->get_mclk_od(hwmgr); } static int pp_dpm_set_mclk_od(void *handle, uint32_t value) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; if (!hwmgr || !hwmgr->pm_en) return -EINVAL; @@ -825,17 +740,13 @@ static int pp_dpm_set_mclk_od(void *handle, uint32_t value) pr_info_ratelimited("%s was not implemented.\n", __func__); return 0; } - mutex_lock(&hwmgr->smu_lock); - ret = hwmgr->hwmgr_func->set_mclk_od(hwmgr, value); - mutex_unlock(&hwmgr->smu_lock); - return ret; + return hwmgr->hwmgr_func->set_mclk_od(hwmgr, value); } static int pp_dpm_read_sensor(void *handle, int idx, void *value, int *size) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; if (!hwmgr || !hwmgr->pm_en || !value) return -EINVAL; @@ -854,10 +765,7 @@ static int pp_dpm_read_sensor(void *handle, int idx, *((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMaxRPM; return 0; default: - mutex_lock(&hwmgr->smu_lock); - ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size); - mutex_unlock(&hwmgr->smu_lock); - return ret; + return hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size); } } @@ -877,36 +785,28 @@ pp_dpm_get_vce_clock_state(void *handle, unsigned idx) static int pp_get_power_profile_mode(void *handle, char *buf) { struct pp_hwmgr *hwmgr = handle; - int ret; if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->get_power_profile_mode) return -EOPNOTSUPP; if (!buf) return -EINVAL; - mutex_lock(&hwmgr->smu_lock); - ret = hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf); - mutex_unlock(&hwmgr->smu_lock); - return ret; + return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf); } static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size) { struct pp_hwmgr *hwmgr = handle; - int ret = -EOPNOTSUPP; if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->set_power_profile_mode) - return ret; + return -EOPNOTSUPP; if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) { pr_debug("power profile setting is for manual dpm mode only.\n"); return -EINVAL; } - mutex_lock(&hwmgr->smu_lock); - ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size); - mutex_unlock(&hwmgr->smu_lock); - return ret; + return hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size); } static int pp_set_fine_grain_clk_vol(void *handle, uint32_t type, long *input, uint32_t size) @@ -971,8 +871,6 @@ static int pp_dpm_switch_power_profile(void *handle, if (!(type < PP_SMC_POWER_PROFILE_CUSTOM)) return -EINVAL; - mutex_lock(&hwmgr->smu_lock); - if (!en) { hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]); index = fls(hwmgr->workload_mask); @@ -987,15 +885,12 @@ static int pp_dpm_switch_power_profile(void *handle, if (type == PP_SMC_POWER_PROFILE_COMPUTE && hwmgr->hwmgr_func->disable_power_features_for_compute_performance) { - if (hwmgr->hwmgr_func->disable_power_features_for_compute_performance(hwmgr, en)) { - mutex_unlock(&hwmgr->smu_lock); + if (hwmgr->hwmgr_func->disable_power_features_for_compute_performance(hwmgr, en)) return -EINVAL; - } } if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0); - mutex_unlock(&hwmgr->smu_lock); return 0; } @@ -1025,10 +920,8 @@ static int pp_set_power_limit(void *handle, uint32_t limit) if (limit > max_power_limit) return -EINVAL; - mutex_lock(&hwmgr->smu_lock); hwmgr->hwmgr_func->set_power_limit(hwmgr, limit); hwmgr->power_limit = limit; - mutex_unlock(&hwmgr->smu_lock); return 0; } @@ -1045,8 +938,6 @@ static int pp_get_power_limit(void *handle, uint32_t *limit, if (power_type != PP_PWR_TYPE_SUSTAINED) return -EOPNOTSUPP; - mutex_lock(&hwmgr->smu_lock); - switch (pp_limit_level) { case PP_PWR_LIMIT_CURRENT: *limit = hwmgr->power_limit; @@ -1066,8 +957,6 @@ static int pp_get_power_limit(void *handle, uint32_t *limit, break; } - mutex_unlock(&hwmgr->smu_lock); - return ret; } @@ -1079,9 +968,7 @@ static int pp_display_configuration_change(void *handle, if (!hwmgr || !hwmgr->pm_en) return -EINVAL; - mutex_lock(&hwmgr->smu_lock); phm_store_dal_configuration_data(hwmgr, display_config); - mutex_unlock(&hwmgr->smu_lock); return 0; } @@ -1089,15 +976,11 @@ static int pp_get_display_power_level(void *handle, struct amd_pp_simple_clock_info *output) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; if (!hwmgr || !hwmgr->pm_en ||!output) return -EINVAL; - mutex_lock(&hwmgr->smu_lock); - ret = phm_get_dal_power_level(hwmgr, output); - mutex_unlock(&hwmgr->smu_lock); - return ret; + return phm_get_dal_power_level(hwmgr, output); } static int pp_get_current_clocks(void *handle, @@ -1111,8 +994,6 @@ static int pp_get_current_clocks(void *handle, if (!hwmgr || !hwmgr->pm_en) return -EINVAL; - mutex_lock(&hwmgr->smu_lock); - phm_get_dal_power_level(hwmgr, &simple_clocks); if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, @@ -1125,7 +1006,6 @@ static int pp_get_current_clocks(void *handle, if (ret) { pr_debug("Error in phm_get_clock_info \n"); - mutex_unlock(&hwmgr->smu_lock); return -EINVAL; } @@ -1148,14 +1028,12 @@ static int pp_get_current_clocks(void *handle, clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk; clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk; } - mutex_unlock(&hwmgr->smu_lock); return 0; } static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; if (!hwmgr || !hwmgr->pm_en) return -EINVAL; @@ -1163,10 +1041,7 @@ static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struc if (clocks == NULL) return -EINVAL; - mutex_lock(&hwmgr->smu_lock); - ret = phm_get_clock_by_type(hwmgr, type, clocks); - mutex_unlock(&hwmgr->smu_lock); - return ret; + return phm_get_clock_by_type(hwmgr, type, clocks); } static int pp_get_clock_by_type_with_latency(void *handle, @@ -1174,15 +1049,11 @@ static int pp_get_clock_by_type_with_latency(void *handle, struct pp_clock_levels_with_latency *clocks) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; if (!hwmgr || !hwmgr->pm_en ||!clocks) return -EINVAL; - mutex_lock(&hwmgr->smu_lock); - ret = phm_get_clock_by_type_with_latency(hwmgr, type, clocks); - mutex_unlock(&hwmgr->smu_lock); - return ret; + return phm_get_clock_by_type_with_latency(hwmgr, type, clocks); } static int pp_get_clock_by_type_with_voltage(void *handle, @@ -1190,50 +1061,34 @@ static int pp_get_clock_by_type_with_voltage(void *handle, struct pp_clock_levels_with_voltage *clocks) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; if (!hwmgr || !hwmgr->pm_en ||!clocks) return -EINVAL; - mutex_lock(&hwmgr->smu_lock); - - ret = phm_get_clock_by_type_with_voltage(hwmgr, type, clocks); - - mutex_unlock(&hwmgr->smu_lock); - return ret; + return phm_get_clock_by_type_with_voltage(hwmgr, type, clocks); } static int pp_set_watermarks_for_clocks_ranges(void *handle, void *clock_ranges) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; if (!hwmgr || !hwmgr->pm_en || !clock_ranges) return -EINVAL; - mutex_lock(&hwmgr->smu_lock); - ret = phm_set_watermarks_for_clocks_ranges(hwmgr, - clock_ranges); - mutex_unlock(&hwmgr->smu_lock); - - return ret; + return phm_set_watermarks_for_clocks_ranges(hwmgr, + clock_ranges); } static int pp_display_clock_voltage_request(void *handle, struct pp_display_clock_request *clock) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; if (!hwmgr || !hwmgr->pm_en ||!clock) return -EINVAL; - mutex_lock(&hwmgr->smu_lock); - ret = phm_display_clock_voltage_request(hwmgr, clock); - mutex_unlock(&hwmgr->smu_lock); - - return ret; + return phm_display_clock_voltage_request(hwmgr, clock); } static int pp_get_display_mode_validation_clocks(void *handle, @@ -1247,12 +1102,9 @@ static int pp_get_display_mode_validation_clocks(void *handle, clocks->level = PP_DAL_POWERLEVEL_7; - mutex_lock(&hwmgr->smu_lock); - if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState)) ret = phm_get_max_high_clocks(hwmgr, clocks); - mutex_unlock(&hwmgr->smu_lock); return ret; } @@ -1364,9 +1216,7 @@ static int pp_notify_smu_enable_pwe(void *handle) return -EINVAL; } - mutex_lock(&hwmgr->smu_lock); hwmgr->hwmgr_func->smus_notify_pwe(hwmgr); - mutex_unlock(&hwmgr->smu_lock); return 0; } @@ -1382,9 +1232,7 @@ static int pp_enable_mgpu_fan_boost(void *handle) hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL) return 0; - mutex_lock(&hwmgr->smu_lock); hwmgr->hwmgr_func->enable_mgpu_fan_boost(hwmgr); - mutex_unlock(&hwmgr->smu_lock); return 0; } @@ -1401,9 +1249,7 @@ static int pp_set_min_deep_sleep_dcefclk(void *handle, uint32_t clock) return -EINVAL; } - mutex_lock(&hwmgr->smu_lock); hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, clock); - mutex_unlock(&hwmgr->smu_lock); return 0; } @@ -1420,9 +1266,7 @@ static int pp_set_hard_min_dcefclk_by_freq(void *handle, uint32_t clock) return -EINVAL; } - mutex_lock(&hwmgr->smu_lock); hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq(hwmgr, clock); - mutex_unlock(&hwmgr->smu_lock); return 0; } @@ -1439,9 +1283,7 @@ static int pp_set_hard_min_fclk_by_freq(void *handle, uint32_t clock) return -EINVAL; } - mutex_lock(&hwmgr->smu_lock); hwmgr->hwmgr_func->set_hard_min_fclk_by_freq(hwmgr, clock); - mutex_unlock(&hwmgr->smu_lock); return 0; } @@ -1449,16 +1291,11 @@ static int pp_set_hard_min_fclk_by_freq(void *handle, uint32_t clock) static int pp_set_active_display_count(void *handle, uint32_t count) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; if (!hwmgr || !hwmgr->pm_en) return -EINVAL; - mutex_lock(&hwmgr->smu_lock); - ret = phm_set_active_display_count(hwmgr, count); - mutex_unlock(&hwmgr->smu_lock); - - return ret; + return phm_set_active_display_count(hwmgr, count); } static int pp_get_asic_baco_capability(void *handle, bool *cap) @@ -1473,9 +1310,7 @@ static int pp_get_asic_baco_capability(void *handle, bool *cap) !hwmgr->hwmgr_func->get_asic_baco_capability) return 0; - mutex_lock(&hwmgr->smu_lock); hwmgr->hwmgr_func->get_asic_baco_capability(hwmgr, cap); - mutex_unlock(&hwmgr->smu_lock); return 0; } @@ -1490,9 +1325,7 @@ static int pp_get_asic_baco_state(void *handle, int *state) if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state) return 0; - mutex_lock(&hwmgr->smu_lock); hwmgr->hwmgr_func->get_asic_baco_state(hwmgr, (enum BACO_STATE *)state); - mutex_unlock(&hwmgr->smu_lock); return 0; } @@ -1508,9 +1341,7 @@ static int pp_set_asic_baco_state(void *handle, int state) !hwmgr->hwmgr_func->set_asic_baco_state) return 0; - mutex_lock(&hwmgr->smu_lock); hwmgr->hwmgr_func->set_asic_baco_state(hwmgr, (enum BACO_STATE)state); - mutex_unlock(&hwmgr->smu_lock); return 0; } @@ -1518,7 +1349,6 @@ static int pp_set_asic_baco_state(void *handle, int state) static int pp_get_ppfeature_status(void *handle, char *buf) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; if (!hwmgr || !hwmgr->pm_en || !buf) return -EINVAL; @@ -1528,17 +1358,12 @@ static int pp_get_ppfeature_status(void *handle, char *buf) return -EINVAL; } - mutex_lock(&hwmgr->smu_lock); - ret = hwmgr->hwmgr_func->get_ppfeature_status(hwmgr, buf); - mutex_unlock(&hwmgr->smu_lock); - - return ret; + return hwmgr->hwmgr_func->get_ppfeature_status(hwmgr, buf); } static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; if (!hwmgr || !hwmgr->pm_en) return -EINVAL; @@ -1548,17 +1373,12 @@ static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks) return -EINVAL; } - mutex_lock(&hwmgr->smu_lock); - ret = hwmgr->hwmgr_func->set_ppfeature_status(hwmgr, ppfeature_masks); - mutex_unlock(&hwmgr->smu_lock); - - return ret; + return hwmgr->hwmgr_func->set_ppfeature_status(hwmgr, ppfeature_masks); } static int pp_asic_reset_mode_2(void *handle) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; if (!hwmgr || !hwmgr->pm_en) return -EINVAL; @@ -1568,17 +1388,12 @@ static int pp_asic_reset_mode_2(void *handle) return -EINVAL; } - mutex_lock(&hwmgr->smu_lock); - ret = hwmgr->hwmgr_func->asic_reset(hwmgr, SMU_ASIC_RESET_MODE_2); - mutex_unlock(&hwmgr->smu_lock); - - return ret; + return hwmgr->hwmgr_func->asic_reset(hwmgr, SMU_ASIC_RESET_MODE_2); } static int pp_smu_i2c_bus_access(void *handle, bool acquire) { struct pp_hwmgr *hwmgr = handle; - int ret = 0; if (!hwmgr || !hwmgr->pm_en) return -EINVAL; @@ -1588,11 +1403,7 @@ static int pp_smu_i2c_bus_access(void *handle, bool acquire) return -EINVAL; } - mutex_lock(&hwmgr->smu_lock); - ret = hwmgr->hwmgr_func->smu_i2c_bus_access(hwmgr, acquire); - mutex_unlock(&hwmgr->smu_lock); - - return ret; + return hwmgr->hwmgr_func->smu_i2c_bus_access(hwmgr, acquire); } static int pp_set_df_cstate(void *handle, enum pp_df_cstate state) @@ -1605,9 +1416,7 @@ static int pp_set_df_cstate(void *handle, enum pp_df_cstate state) if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_df_cstate) return 0; - mutex_lock(&hwmgr->smu_lock); hwmgr->hwmgr_func->set_df_cstate(hwmgr, state); - mutex_unlock(&hwmgr->smu_lock); return 0; } @@ -1622,9 +1431,7 @@ static int pp_set_xgmi_pstate(void *handle, uint32_t pstate) if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_xgmi_pstate) return 0; - mutex_lock(&hwmgr->smu_lock); hwmgr->hwmgr_func->set_xgmi_pstate(hwmgr, pstate); - mutex_unlock(&hwmgr->smu_lock); return 0; } @@ -1632,7 +1439,6 @@ static int pp_set_xgmi_pstate(void *handle, uint32_t pstate) static ssize_t pp_get_gpu_metrics(void *handle, void **table) { struct pp_hwmgr *hwmgr = handle; - ssize_t size; if (!hwmgr) return -EINVAL; @@ -1640,11 +1446,7 @@ static ssize_t pp_get_gpu_metrics(void *handle, void **table) if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_gpu_metrics) return -EOPNOTSUPP; - mutex_lock(&hwmgr->smu_lock); - size = hwmgr->hwmgr_func->get_gpu_metrics(hwmgr, table); - mutex_unlock(&hwmgr->smu_lock); - - return size; + return hwmgr->hwmgr_func->get_gpu_metrics(hwmgr, table); } static int pp_gfx_state_change_set(void *handle, uint32_t state) @@ -1659,9 +1461,7 @@ static int pp_gfx_state_change_set(void *handle, uint32_t state) return -EINVAL; } - mutex_lock(&hwmgr->smu_lock); hwmgr->hwmgr_func->gfx_state_change(hwmgr, state); - mutex_unlock(&hwmgr->smu_lock); return 0; } @@ -1675,12 +1475,10 @@ static int pp_get_prv_buffer_details(void *handle, void **addr, size_t *size) *addr = NULL; *size = 0; - mutex_lock(&hwmgr->smu_lock); if (adev->pm.smu_prv_buffer) { amdgpu_bo_kmap(adev->pm.smu_prv_buffer, addr); *size = adev->pm.smu_prv_buffer_size; } - mutex_unlock(&hwmgr->smu_lock); return 0; } diff --git a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h index 03226baea65e..4f7f2f455301 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h +++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h @@ -748,7 +748,6 @@ struct pp_hwmgr { bool not_vf; bool pm_en; bool pp_one_vf; - struct mutex smu_lock; struct mutex msg_lock; uint32_t pp_table_version; -- cgit From 83a3766b147053e542f3c91c121cb9594239e644 Mon Sep 17 00:00:00 2001 From: Rajib Mahapatra Date: Mon, 10 Jan 2022 13:15:19 +0530 Subject: drm/amd/display: Not to call dpcd_set_source_specific_data during resume. [Why] During resume path, dpcd_set_source_specific_data is taking extra time when core_link_write_dpcd fails on DP_SOURCE_OUI+0x03 and DP_SOURCE_MINIMUM_HBLANK_SUPPORTED. Here,aux->transfer fails with multiple retries and consume significant amount time during S0i3 resume. [How] Not to call dpcd_set_source_specific_data during resume path when there is no oled panel connected and achieve faster resume during S0i3. Reviewed-by: Hersen Wu Signed-off-by: Rajib Mahapatra Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 0c64dea4fdd8..e5d63b0d2594 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -862,7 +862,8 @@ static bool dc_link_detect_helper(struct dc_link *link, (!link->dc->config.allow_edp_hotplug_detection)) && link->local_sink) { // need to re-write OUI and brightness in resume case - if (link->connector_signal == SIGNAL_TYPE_EDP) { + if (link->connector_signal == SIGNAL_TYPE_EDP && + (link->dpcd_sink_ext_caps.bits.oled == 1)) { dpcd_set_source_specific_data(link); msleep(post_oui_delay); dc_link_set_default_brightness_aux(link); -- cgit From f9130b81aea2de3fb6d356e9495a384b2d35b1d1 Mon Sep 17 00:00:00 2001 From: Guchun Chen Date: Fri, 21 Jan 2022 16:43:33 +0800 Subject: drm/amdgpu: drop WARN_ON in amdgpu_gart_bind/unbind MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit NULL pointer check has guarded it already. calltrace: amdgpu_ttm_gart_bind+0x49/0xa0 [amdgpu] amdgpu_ttm_alloc_gart+0x13f/0x180 [amdgpu] amdgpu_bo_create_reserved+0x139/0x2c0 [amdgpu] ? amdgpu_ttm_debugfs_init+0x120/0x120 [amdgpu] amdgpu_bo_create_kernel+0x17/0x80 [amdgpu] amdgpu_ttm_init+0x542/0x5e0 [amdgpu] Fixes: 1b08dfb889b2c5 ("drm/amdgpu: remove gart.ready flag") Signed-off-by: Guchun Chen Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c index 53cc844346f0..91d8207336c1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gart.c @@ -161,7 +161,7 @@ void amdgpu_gart_unbind(struct amdgpu_device *adev, uint64_t offset, uint64_t flags = 0; int idx; - if (WARN_ON(!adev->gart.ptr)) + if (!adev->gart.ptr) return; if (!drm_dev_enter(adev_to_drm(adev), &idx)) @@ -241,7 +241,7 @@ void amdgpu_gart_bind(struct amdgpu_device *adev, uint64_t offset, int pages, dma_addr_t *dma_addr, uint64_t flags) { - if (WARN_ON(!adev->gart.ptr)) + if (!adev->gart.ptr) return; amdgpu_gart_map(adev, offset, pages, dma_addr, flags, adev->gart.ptr); -- cgit From a685572c91b08e2e5143e52f4c5bbdd3d22271b3 Mon Sep 17 00:00:00 2001 From: Guchun Chen Date: Fri, 21 Jan 2022 10:40:40 +0800 Subject: drm/amd/pm: use dev_*** to print output in multiple GPUs In multiple GPU configuration, when failed to send a SMU message, it's hard to figure out which GPU has such problem. So it's not comfortable to user. [40190.142181] amdgpu: [powerplay] last message was failed ret is 65535 [40190.242420] amdgpu: [powerplay] failed to send message 201 ret is 65535 [40190.392763] amdgpu: [powerplay] last message was failed ret is 65535 [40190.492997] amdgpu: [powerplay] failed to send message 200 ret is 65535 [40190.743575] amdgpu: [powerplay] last message was failed ret is 65535 [40190.843812] amdgpu: [powerplay] failed to send message 282 ret is 65535 Signed-off-by: Guchun Chen Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c | 4 +++- drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c | 4 ++-- drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c | 11 +++++++---- drivers/gpu/drm/amd/pm/powerplay/smumgr/smu9_smumgr.c | 2 +- drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c | 4 ++-- 5 files changed, 15 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c index 93a1c7248e26..5ca3c422f7d4 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c @@ -208,6 +208,7 @@ static int ci_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr, static int ci_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) { + struct amdgpu_device *adev = hwmgr->adev; int ret; cgs_write_register(hwmgr->device, mmSMC_RESP_0, 0); @@ -218,7 +219,8 @@ static int ci_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP); if (ret != 1) - pr_info("\n failed to send message %x ret is %d\n", msg, ret); + dev_info(adev->dev, + "failed to send message %x ret is %d\n", msg,ret); return 0; } diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c index 47b34c6ca924..88a5641465dc 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c @@ -87,7 +87,7 @@ static int smu10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) smu10_send_msg_to_smc_without_waiting(hwmgr, msg); if (smu10_wait_for_response(hwmgr) == 0) - printk("Failed to send Message %x.\n", msg); + dev_err(adev->dev, "Failed to send Message %x.\n", msg); return 0; } @@ -108,7 +108,7 @@ static int smu10_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, if (smu10_wait_for_response(hwmgr) == 0) - printk("Failed to send Message %x.\n", msg); + dev_err(adev->dev, "Failed to send Message %x.\n", msg); return 0; } diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c index aae25243eb10..5a010cd38303 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c @@ -165,6 +165,7 @@ bool smu7_is_smc_ram_running(struct pp_hwmgr *hwmgr) int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) { + struct amdgpu_device *adev = hwmgr->adev; int ret; PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0); @@ -172,9 +173,10 @@ int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP); if (ret == 0xFE) - pr_debug("last message was not supported\n"); + dev_dbg(adev->dev, "last message was not supported\n"); else if (ret != 1) - pr_info("\n last message was failed ret is %d\n", ret); + dev_info(adev->dev, + "\nlast message was failed ret is %d\n", ret); cgs_write_register(hwmgr->device, mmSMC_RESP_0, 0); cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg); @@ -184,9 +186,10 @@ int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP); if (ret == 0xFE) - pr_debug("message %x was not supported\n", msg); + dev_dbg(adev->dev, "message %x was not supported\n", msg); else if (ret != 1) - pr_info("\n failed to send message %x ret is %d \n", msg, ret); + dev_dbg(adev->dev, + "failed to send message %x ret is %d \n", msg, ret); return 0; } diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu9_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu9_smumgr.c index 23e5de3c4ec1..8c9bf4940dc1 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu9_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu9_smumgr.c @@ -126,7 +126,7 @@ int smu9_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) ret = smu9_wait_for_response(hwmgr); if (ret != 1) - pr_err("Failed to send message: 0x%x, ret value: 0x%x\n", msg, ret); + dev_err(adev->dev, "Failed to send message: 0x%x, ret value: 0x%x\n", msg, ret); return 0; } diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c index 741fbc87467f..9ad26c285ecd 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c @@ -115,7 +115,7 @@ static int vega20_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg) ret = vega20_wait_for_response(hwmgr); if (ret != PPSMC_Result_OK) - pr_err("Failed to send message 0x%x, response 0x%x\n", msg, ret); + dev_err(adev->dev, "Failed to send message 0x%x, response 0x%x\n", msg, ret); return (ret == PPSMC_Result_OK) ? 0 : -EIO; } @@ -143,7 +143,7 @@ static int vega20_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr, ret = vega20_wait_for_response(hwmgr); if (ret != PPSMC_Result_OK) - pr_err("Failed to send message 0x%x, response 0x%x\n", msg, ret); + dev_err(adev->dev, "Failed to send message 0x%x, response 0x%x\n", msg, ret); return (ret == PPSMC_Result_OK) ? 0 : -EIO; } -- cgit From 25c6aefceee60850bf78e16ae9d7fcc4a9d20884 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 20 Jan 2022 12:17:07 -0500 Subject: drm/amdgpu: filter out radeon secondary ids as well MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Older radeon boards (r2xx-r5xx) had secondary PCI functions which we solely there for supporting multi-head on OSs with special requirements. Add them to the unsupported list as well so we don't attempt to bind to them. The driver would fail to bind to them anyway, but this does so in a cleaner way that should not confuse the user. Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 81 +++++++++++++++++++++++++++++++++ 1 file changed, 81 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index cc6585193236..1a691a801928 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -1497,6 +1497,87 @@ static const u16 amdgpu_unsupported_pciidlist[] = { 0x99A0, 0x99A2, 0x99A4, + /* radeon secondary ids */ + 0x3171, + 0x3e70, + 0x4164, + 0x4165, + 0x4166, + 0x4168, + 0x4170, + 0x4171, + 0x4172, + 0x4173, + 0x496e, + 0x4a69, + 0x4a6a, + 0x4a6b, + 0x4a70, + 0x4a74, + 0x4b69, + 0x4b6b, + 0x4b6c, + 0x4c6e, + 0x4e64, + 0x4e65, + 0x4e66, + 0x4e67, + 0x4e68, + 0x4e69, + 0x4e6a, + 0x4e71, + 0x4f73, + 0x5569, + 0x556b, + 0x556d, + 0x556f, + 0x5571, + 0x5854, + 0x5874, + 0x5940, + 0x5941, + 0x5b72, + 0x5b73, + 0x5b74, + 0x5b75, + 0x5d44, + 0x5d45, + 0x5d6d, + 0x5d6f, + 0x5d72, + 0x5d77, + 0x5e6b, + 0x5e6d, + 0x7120, + 0x7124, + 0x7129, + 0x712e, + 0x712f, + 0x7162, + 0x7163, + 0x7166, + 0x7167, + 0x7172, + 0x7173, + 0x71a0, + 0x71a1, + 0x71a3, + 0x71a7, + 0x71bb, + 0x71e0, + 0x71e1, + 0x71e2, + 0x71e6, + 0x71e7, + 0x71f2, + 0x7269, + 0x726b, + 0x726e, + 0x72a0, + 0x72a8, + 0x72b1, + 0x72b3, + 0x793f, }; static const struct pci_device_id pciidlist[] = { -- cgit From 5e0c8ddf029e0a8533bfb04e4542b46356cbcade Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 20 Jan 2022 12:52:13 -0500 Subject: drm/amdgpu/display: adjust msleep limit in dp_wait_for_training_aux_rd_interval Some architectures (e.g., ARM) have relatively low udelay limits. On most architectures, anything longer than 2000us is not recommended. Change the check to align with other similar checks in DC. Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index cb2f96690b4a..5831eef78fb1 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -202,7 +202,7 @@ void dp_wait_for_training_aux_rd_interval( uint32_t wait_in_micro_secs) { #if defined(CONFIG_DRM_AMD_DC_DCN) - if (wait_in_micro_secs > 16000) + if (wait_in_micro_secs > 1000) msleep(wait_in_micro_secs/1000); else udelay(wait_in_micro_secs); -- cgit From 0fc2549d55a238da5e4c1b1ae20ebc3856922334 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 20 Jan 2022 12:57:33 -0500 Subject: drm/amdgpu/display: use msleep rather than udelay for long delays Some architectures (e.g., ARM) throw an compilation error if the udelay is too long. In general udelays of longer than 2000us are not recommended on any architecture. Switch to msleep in these cases. Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 5831eef78fb1..6f6929435885 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -6935,7 +6935,7 @@ bool dpcd_write_128b_132b_sst_payload_allocation_table( } } retries++; - udelay(5000); + msleep(5); } if (!result && retries == max_retries) { @@ -6987,7 +6987,7 @@ bool dpcd_poll_for_allocation_change_trigger(struct dc_link *link) break; } - udelay(5000); + msleep(5); } if (result == ACT_FAILED) { -- cgit From dfced44f122c500004a48ecc8db516bb6a295a1b Mon Sep 17 00:00:00 2001 From: Xin Xiong Date: Fri, 21 Jan 2022 15:46:23 -0500 Subject: drm/amd/amdgpu/amdgpu_cs: fix refcount leak of a dma_fence obj MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This issue takes place in an error path in amdgpu_cs_fence_to_handle_ioctl(). When `info->in.what` falls into default case, the function simply returns -EINVAL, forgetting to decrement the reference count of a dma_fence obj, which is bumped earlier by amdgpu_cs_get_fence(). This may result in reference count leaks. Fix it by decreasing the refcount of specific object before returning the error code. Reviewed-by: Christian König Signed-off-by: Xin Xiong Signed-off-by: Xin Tan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 06d07502a1f6..a34be65c9eaa 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -1509,6 +1509,7 @@ int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data, return 0; default: + dma_fence_put(fence); return -EINVAL; } } -- cgit From 1418b9c38270f4c7843627cb0e9047b19eb3365a Mon Sep 17 00:00:00 2001 From: Jiapeng Chong Date: Fri, 21 Jan 2022 19:48:00 +0800 Subject: drm/amd/pm: remove useless if Clean the following coccicheck warning: ./drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c:7035:2-4: WARNING: possible condition with no effect (if == else). Reported-by: Abaci Robot Signed-off-by: Jiapeng Chong Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c index 23ff0d812e4b..7427c50409d4 100644 --- a/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c +++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c @@ -7032,10 +7032,7 @@ static int si_power_control_set_level(struct amdgpu_device *adev) ret = si_resume_smc(adev); if (ret) return ret; - ret = si_set_sw_state(adev); - if (ret) - return ret; - return 0; + return si_set_sw_state(adev); } static void si_set_vce_clock(struct amdgpu_device *adev, -- cgit From 5d5c6dba2b43e28845d7d7ed32a36802329a5f52 Mon Sep 17 00:00:00 2001 From: Yongzhi Liu Date: Fri, 21 Jan 2022 11:26:13 +0000 Subject: drm/amd/display: Fix memory leak [why] Resource release is needed on the error handling path to prevent memory leak. [how] Fix this by adding kfree on the error handling path. Reviewed-by: Harry Wentland Signed-off-by: Yongzhi Liu Signed-off-by: Alex Deucher --- .../drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c | 80 ++++++++++++++++------ 1 file changed, 60 insertions(+), 20 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 26719efa5396..12d437d9a0e4 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -227,8 +227,10 @@ static ssize_t dp_link_settings_read(struct file *f, char __user *buf, break; r = put_user(*(rd_buf + result), buf); - if (r) + if (r) { + kfree(rd_buf); return r; /* r = -EFAULT */ + } buf += 1; size -= 1; @@ -389,8 +391,10 @@ static ssize_t dp_phy_settings_read(struct file *f, char __user *buf, break; r = put_user((*(rd_buf + result)), buf); - if (r) + if (r) { + kfree(rd_buf); return r; /* r = -EFAULT */ + } buf += 1; size -= 1; @@ -1359,8 +1363,10 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf, break; } - if (!pipe_ctx) + if (!pipe_ctx) { + kfree(rd_buf); return -ENXIO; + } dsc = pipe_ctx->stream_res.dsc; if (dsc) @@ -1376,8 +1382,10 @@ static ssize_t dp_dsc_clock_en_read(struct file *f, char __user *buf, break; r = put_user(*(rd_buf + result), buf); - if (r) + if (r) { + kfree(rd_buf); return r; /* r = -EFAULT */ + } buf += 1; size -= 1; @@ -1546,8 +1554,10 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf, break; } - if (!pipe_ctx) + if (!pipe_ctx) { + kfree(rd_buf); return -ENXIO; + } dsc = pipe_ctx->stream_res.dsc; if (dsc) @@ -1563,8 +1573,10 @@ static ssize_t dp_dsc_slice_width_read(struct file *f, char __user *buf, break; r = put_user(*(rd_buf + result), buf); - if (r) + if (r) { + kfree(rd_buf); return r; /* r = -EFAULT */ + } buf += 1; size -= 1; @@ -1731,8 +1743,10 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf, break; } - if (!pipe_ctx) + if (!pipe_ctx) { + kfree(rd_buf); return -ENXIO; + } dsc = pipe_ctx->stream_res.dsc; if (dsc) @@ -1748,8 +1762,10 @@ static ssize_t dp_dsc_slice_height_read(struct file *f, char __user *buf, break; r = put_user(*(rd_buf + result), buf); - if (r) + if (r) { + kfree(rd_buf); return r; /* r = -EFAULT */ + } buf += 1; size -= 1; @@ -1912,8 +1928,10 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf, break; } - if (!pipe_ctx) + if (!pipe_ctx) { + kfree(rd_buf); return -ENXIO; + } dsc = pipe_ctx->stream_res.dsc; if (dsc) @@ -1929,8 +1947,10 @@ static ssize_t dp_dsc_bits_per_pixel_read(struct file *f, char __user *buf, break; r = put_user(*(rd_buf + result), buf); - if (r) + if (r) { + kfree(rd_buf); return r; /* r = -EFAULT */ + } buf += 1; size -= 1; @@ -2088,8 +2108,10 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf, break; } - if (!pipe_ctx) + if (!pipe_ctx) { + kfree(rd_buf); return -ENXIO; + } dsc = pipe_ctx->stream_res.dsc; if (dsc) @@ -2105,8 +2127,10 @@ static ssize_t dp_dsc_pic_width_read(struct file *f, char __user *buf, break; r = put_user(*(rd_buf + result), buf); - if (r) + if (r) { + kfree(rd_buf); return r; /* r = -EFAULT */ + } buf += 1; size -= 1; @@ -2145,8 +2169,10 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf, break; } - if (!pipe_ctx) + if (!pipe_ctx) { + kfree(rd_buf); return -ENXIO; + } dsc = pipe_ctx->stream_res.dsc; if (dsc) @@ -2162,8 +2188,10 @@ static ssize_t dp_dsc_pic_height_read(struct file *f, char __user *buf, break; r = put_user(*(rd_buf + result), buf); - if (r) + if (r) { + kfree(rd_buf); return r; /* r = -EFAULT */ + } buf += 1; size -= 1; @@ -2217,8 +2245,10 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf, break; } - if (!pipe_ctx) + if (!pipe_ctx) { + kfree(rd_buf); return -ENXIO; + } dsc = pipe_ctx->stream_res.dsc; if (dsc) @@ -2234,8 +2264,10 @@ static ssize_t dp_dsc_chunk_size_read(struct file *f, char __user *buf, break; r = put_user(*(rd_buf + result), buf); - if (r) + if (r) { + kfree(rd_buf); return r; /* r = -EFAULT */ + } buf += 1; size -= 1; @@ -2289,8 +2321,10 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf, break; } - if (!pipe_ctx) + if (!pipe_ctx) { + kfree(rd_buf); return -ENXIO; + } dsc = pipe_ctx->stream_res.dsc; if (dsc) @@ -2306,8 +2340,10 @@ static ssize_t dp_dsc_slice_bpg_offset_read(struct file *f, char __user *buf, break; r = put_user(*(rd_buf + result), buf); - if (r) + if (r) { + kfree(rd_buf); return r; /* r = -EFAULT */ + } buf += 1; size -= 1; @@ -3459,8 +3495,10 @@ static ssize_t dcc_en_bits_read( dc->hwss.get_dcc_en_bits(dc, dcc_en_bits); rd_buf = kcalloc(rd_buf_size, sizeof(char), GFP_KERNEL); - if (!rd_buf) + if (!rd_buf) { + kfree(dcc_en_bits); return -ENOMEM; + } for (i = 0; i < num_pipes; i++) offset += snprintf(rd_buf + offset, rd_buf_size - offset, @@ -3473,8 +3511,10 @@ static ssize_t dcc_en_bits_read( if (*pos >= rd_buf_size) break; r = put_user(*(rd_buf + result), buf); - if (r) + if (r) { + kfree(rd_buf); return r; /* r = -EFAULT */ + } buf += 1; size -= 1; *pos += 1; -- cgit From 5bb1465fbdc291593a7c385cede0416ff6873280 Mon Sep 17 00:00:00 2001 From: Yang Li Date: Fri, 21 Jan 2022 06:10:21 +0000 Subject: drm/amd/display: clean up some inconsistent indenting Eliminate the follow smatch warning: drivers/gpu/drm/amd/display/dc/dml/calcs/dce_calcs.c:3415 bw_calcs() warn: inconsistent indenting Reported-by: Abaci Robot Signed-off-by: Yang Li Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/dml/calcs/dce_calcs.c | 48 +++++++++++----------- 1 file changed, 23 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dce_calcs.c index ff5bb152ef49..8f136db39f3e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dce_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dce_calcs.c @@ -3411,35 +3411,33 @@ bool bw_calcs(struct dc_context *ctx, calcs_output->stutter_exit_wm_ns[5].c_mark = bw_fixed_to_int(bw_mul(data-> stutter_exit_watermark[9], bw_int_to_fixed(1000))); - - calcs_output->stutter_entry_wm_ns[0].c_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[4], bw_int_to_fixed(1000))); - calcs_output->stutter_entry_wm_ns[1].c_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[5], bw_int_to_fixed(1000))); - calcs_output->stutter_entry_wm_ns[2].c_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[6], bw_int_to_fixed(1000))); - if (ctx->dc->caps.max_slave_planes) { - calcs_output->stutter_entry_wm_ns[3].c_mark = + calcs_output->stutter_entry_wm_ns[0].c_mark = bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[0], bw_int_to_fixed(1000))); - calcs_output->stutter_entry_wm_ns[4].c_mark = + stutter_entry_watermark[4], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[1].c_mark = bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[1], bw_int_to_fixed(1000))); - } else { - calcs_output->stutter_entry_wm_ns[3].c_mark = + stutter_entry_watermark[5], bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[2].c_mark = bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[7], bw_int_to_fixed(1000))); - calcs_output->stutter_entry_wm_ns[4].c_mark = + stutter_entry_watermark[6], bw_int_to_fixed(1000))); + if (ctx->dc->caps.max_slave_planes) { + calcs_output->stutter_entry_wm_ns[3].c_mark = + bw_fixed_to_int(bw_mul(data->stutter_entry_watermark[0], + bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[4].c_mark = + bw_fixed_to_int(bw_mul(data->stutter_entry_watermark[1], + bw_int_to_fixed(1000))); + } else { + calcs_output->stutter_entry_wm_ns[3].c_mark = + bw_fixed_to_int(bw_mul(data->stutter_entry_watermark[7], + bw_int_to_fixed(1000))); + calcs_output->stutter_entry_wm_ns[4].c_mark = + bw_fixed_to_int(bw_mul(data->stutter_entry_watermark[8], + bw_int_to_fixed(1000))); + } + calcs_output->stutter_entry_wm_ns[5].c_mark = bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[8], bw_int_to_fixed(1000))); - } - calcs_output->stutter_entry_wm_ns[5].c_mark = - bw_fixed_to_int(bw_mul(data-> - stutter_entry_watermark[9], bw_int_to_fixed(1000))); - + stutter_entry_watermark[9], bw_int_to_fixed(1000))); calcs_output->urgent_wm_ns[0].c_mark = bw_fixed_to_int(bw_mul(data-> urgent_watermark[4], bw_int_to_fixed(1000))); -- cgit From 29dbcac82f96d06608f3658aacd3e14efb7ac0cd Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Tue, 18 Jan 2022 16:04:02 +0800 Subject: drm/amdgpu: add helper to query rlcg reg access flag MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Query rlc indirect register access approach specified by sriov host driver per ip blocks Signed-off-by: Hawking Zhang Reviewed-by: Zhou, Peng Ju Acked-by: Christian König Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 35 ++++++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | 8 ++++++++ 2 files changed, 43 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 07bc0f504713..a40e4fcdfa46 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -820,3 +820,38 @@ void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev, } } } + +bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev, u32 acc_flags, + u32 hwip, bool write, u32 *rlcg_flag) +{ + bool ret = false; + + switch (hwip) { + case GC_HWIP: + if (amdgpu_sriov_reg_indirect_gc(adev)) { + *rlcg_flag = + write ? AMDGPU_RLCG_GC_WRITE : AMDGPU_RLCG_GC_READ; + ret = true; + /* only in new version, AMDGPU_REGS_NO_KIQ and + * AMDGPU_REGS_RLC are enabled simultaneously */ + } else if ((acc_flags & AMDGPU_REGS_RLC) && + !(acc_flags & AMDGPU_REGS_NO_KIQ)) { + *rlcg_flag = AMDGPU_RLCG_GC_WRITE_LEGACY; + ret = true; + } + break; + case MMHUB_HWIP: + if (amdgpu_sriov_reg_indirect_mmhub(adev) && + (acc_flags & AMDGPU_REGS_RLC) && write) { + *rlcg_flag = AMDGPU_RLCG_MMHUB_WRITE; + ret = true; + } + break; + default: + dev_err(adev->dev, + "indirect registers access through rlcg is not supported\n"); + ret = false; + break; + } + return ret; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index 9adfb8d63280..404a06e57f30 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -32,6 +32,12 @@ #define AMDGPU_PASSTHROUGH_MODE (1 << 3) /* thw whole GPU is pass through for VM */ #define AMDGPU_SRIOV_CAPS_RUNTIME (1 << 4) /* is out of full access mode */ +/* flags for indirect register access path supported by rlcg for sriov */ +#define AMDGPU_RLCG_GC_WRITE_LEGACY (0x8 << 28) +#define AMDGPU_RLCG_GC_WRITE (0x0 << 28) +#define AMDGPU_RLCG_GC_READ (0x1 << 28) +#define AMDGPU_RLCG_MMHUB_WRITE (0x2 << 28) + /* all asic after AI use this offset */ #define mmRCC_IOV_FUNC_IDENTIFIER 0xDE5 /* tonga/fiji use this offset */ @@ -321,4 +327,6 @@ enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *ad void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev, struct amdgpu_video_codec_info *encode, uint32_t encode_array_size, struct amdgpu_video_codec_info *decode, uint32_t decode_array_size); +bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev, u32 acc_flags, + u32 hwip, bool write, u32 *rlcg_flag); #endif -- cgit From 97d1a3b967a3cbeb0dd29a8b5bcd4ac1fd9ccd9b Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Sun, 16 Jan 2022 16:45:54 +0800 Subject: drm/amdgpu: switch to get_rlcg_reg_access_flag for gfx9 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Switch to common helper to query rlcg access flag specified by sriov host driver for gfx9 Signed-off-by: Hawking Zhang Reviewed-by: Zhou, Peng Ju Acked-by: Christian König Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 33 ++++----------------------------- 1 file changed, 4 insertions(+), 29 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index e12f9f5c3beb..17704cd99aaf 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -63,9 +63,6 @@ #define mmGCEA_PROBE_MAP 0x070c #define mmGCEA_PROBE_MAP_BASE_IDX 0 -#define GFX9_RLCG_GC_WRITE_OLD (0x8 << 28) -#define GFX9_RLCG_GC_WRITE (0x0 << 28) -#define GFX9_RLCG_GC_READ (0x1 << 28) #define GFX9_RLCG_VFGATE_DISABLED 0x4000000 #define GFX9_RLCG_WRONG_OPERATION_TYPE 0x2000000 #define GFX9_RLCG_NOT_IN_RANGE 0x1000000 @@ -815,35 +812,12 @@ static u32 gfx_v9_0_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, uint3 return ret; } -static bool gfx_v9_0_get_rlcg_flag(struct amdgpu_device *adev, u32 acc_flags, u32 hwip, - int write, u32 *rlcg_flag) -{ - - switch (hwip) { - case GC_HWIP: - if (amdgpu_sriov_reg_indirect_gc(adev)) { - *rlcg_flag = write ? GFX9_RLCG_GC_WRITE : GFX9_RLCG_GC_READ; - - return true; - /* only in new version, AMDGPU_REGS_NO_KIQ and AMDGPU_REGS_RLC enabled simultaneously */ - } else if ((acc_flags & AMDGPU_REGS_RLC) && !(acc_flags & AMDGPU_REGS_NO_KIQ) && write) { - *rlcg_flag = GFX9_RLCG_GC_WRITE_OLD; - return true; - } - - break; - default: - return false; - } - - return false; -} - static u32 gfx_v9_0_sriov_rreg(struct amdgpu_device *adev, u32 offset, u32 acc_flags, u32 hwip) { u32 rlcg_flag; - if (!amdgpu_sriov_runtime(adev) && gfx_v9_0_get_rlcg_flag(adev, acc_flags, hwip, 0, &rlcg_flag)) + if (!amdgpu_sriov_runtime(adev) && + amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, false, &rlcg_flag)) return gfx_v9_0_rlcg_rw(adev, offset, 0, rlcg_flag); if (acc_flags & AMDGPU_REGS_NO_KIQ) @@ -857,7 +831,8 @@ static void gfx_v9_0_sriov_wreg(struct amdgpu_device *adev, u32 offset, { u32 rlcg_flag; - if (!amdgpu_sriov_runtime(adev) && gfx_v9_0_get_rlcg_flag(adev, acc_flags, hwip, 1, &rlcg_flag)) { + if (!amdgpu_sriov_runtime(adev) && + amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, true, &rlcg_flag)) { gfx_v9_0_rlcg_rw(adev, offset, value, rlcg_flag); return; } -- cgit From 7bbe43f8a4e7775daf6ca62807e0023b0642a20b Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Sun, 16 Jan 2022 17:04:09 +0800 Subject: drm/amdgpu: switch to get_rlcg_reg_access_flag for gfx10 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Switch to common helper to query rlcg access flag specified by sriov host driver for gfx10 Signed-off-by: Hawking Zhang Reviewed-by: Zhou, Peng Ju Acked-by: Christian König Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 41 ++-------------------------------- 1 file changed, 2 insertions(+), 39 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index dbe7442fb25c..588c922573e9 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -180,11 +180,6 @@ #define mmRLC_SPARE_INT_0_Sienna_Cichlid 0x4ca5 #define mmRLC_SPARE_INT_0_Sienna_Cichlid_BASE_IDX 1 -#define GFX_RLCG_GC_WRITE_OLD (0x8 << 28) -#define GFX_RLCG_GC_WRITE (0x0 << 28) -#define GFX_RLCG_GC_READ (0x1 << 28) -#define GFX_RLCG_MMHUB_WRITE (0x2 << 28) - #define RLCG_ERROR_REPORT_ENABLED(adev) \ (amdgpu_sriov_reg_indirect_mmhub(adev) || amdgpu_sriov_reg_indirect_gc(adev)) @@ -1463,38 +1458,6 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00c00000) }; -static bool gfx_v10_get_rlcg_flag(struct amdgpu_device *adev, u32 acc_flags, u32 hwip, - int write, u32 *rlcg_flag) -{ - switch (hwip) { - case GC_HWIP: - if (amdgpu_sriov_reg_indirect_gc(adev)) { - *rlcg_flag = write ? GFX_RLCG_GC_WRITE : GFX_RLCG_GC_READ; - - return true; - /* only in new version, AMDGPU_REGS_NO_KIQ and AMDGPU_REGS_RLC enabled simultaneously */ - } else if ((acc_flags & AMDGPU_REGS_RLC) && !(acc_flags & AMDGPU_REGS_NO_KIQ)) { - *rlcg_flag = GFX_RLCG_GC_WRITE_OLD; - - return true; - } - - break; - case MMHUB_HWIP: - if (amdgpu_sriov_reg_indirect_mmhub(adev) && - (acc_flags & AMDGPU_REGS_RLC) && write) { - *rlcg_flag = GFX_RLCG_MMHUB_WRITE; - return true; - } - - break; - default: - DRM_DEBUG("Not program register by RLCG\n"); - } - - return false; -} - static u32 gfx_v10_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, uint32_t flag) { static void *scratch_reg0; @@ -1575,7 +1538,7 @@ static void gfx_v10_sriov_wreg(struct amdgpu_device *adev, u32 offset, u32 value u32 rlcg_flag; if (!amdgpu_sriov_runtime(adev) && - gfx_v10_get_rlcg_flag(adev, acc_flags, hwip, 1, &rlcg_flag)) { + amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, true, &rlcg_flag)) { gfx_v10_rlcg_rw(adev, offset, value, rlcg_flag); return; } @@ -1591,7 +1554,7 @@ static u32 gfx_v10_sriov_rreg(struct amdgpu_device *adev, u32 offset, u32 acc_fl u32 rlcg_flag; if (!amdgpu_sriov_runtime(adev) && - gfx_v10_get_rlcg_flag(adev, acc_flags, hwip, 0, &rlcg_flag)) + amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, false, &rlcg_flag)) return gfx_v10_rlcg_rw(adev, offset, 0, rlcg_flag); if (acc_flags & AMDGPU_REGS_NO_KIQ) -- cgit From b12252b0538183d8b88bd4a8d8d05a808c46472c Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Tue, 18 Jan 2022 16:11:02 +0800 Subject: drm/amdgpu: add structures for rlcg indirect reg access MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Add structures that are used to cache registers offsets for rlcg indirect reg access ctrl and flag availability of such interface Signed-off-by: Hawking Zhang Reviewed-by: Zhou, Peng Ju Acked-by: Christian König Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h index 00afd0dcae86..286b2347d063 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h @@ -132,6 +132,16 @@ struct amdgpu_rlc_funcs { bool (*is_rlcg_access_range)(struct amdgpu_device *adev, uint32_t reg); }; +struct amdgpu_rlcg_reg_access_ctrl { + uint32_t scratch_reg0; + uint32_t scratch_reg1; + uint32_t scratch_reg2; + uint32_t scratch_reg3; + uint32_t grbm_cntl; + uint32_t grbm_idx; + uint32_t spare_int; +}; + struct amdgpu_rlc { /* for power gating */ struct amdgpu_bo *save_restore_obj; @@ -191,6 +201,10 @@ struct amdgpu_rlc { struct amdgpu_bo *rlc_toc_bo; uint64_t rlc_toc_gpu_addr; void *rlc_toc_buf; + + bool rlcg_reg_access_supported; + /* registers for rlcg indirect reg access */ + struct amdgpu_rlcg_reg_access_ctrl reg_access_ctrl; }; void amdgpu_gfx_rlc_enter_safe_mode(struct amdgpu_device *adev); -- cgit From 4819732f5986ed8b3d88bf4272d2d5bf1ccff9da Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Tue, 18 Jan 2022 21:28:04 +0800 Subject: drm/amdgpu: init rlcg_reg_access_ctrl for gfx9 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Initialize all the register offsets that will be used in rlcg indirect reg access path for gfx9 in sw_init phase Signed-off-by: Hawking Zhang Reviewed-by: Zhou, Peng Ju Acked-by: Christian König Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 30 ++++++++++++++++++++++++++---- 1 file changed, 26 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index 17704cd99aaf..c7bccf1a28b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -1983,6 +1983,21 @@ static int gfx_v9_0_cp_jump_table_num(struct amdgpu_device *adev) return 4; } +static void gfx_v9_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev) +{ + struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl; + + reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl; + reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0); + reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG1); + reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG2); + reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG3); + reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL); + reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX); + reg_access_ctrl->spare_int = SOC15_REG_OFFSET(GC, 0, mmRLC_SPARE_INT); + adev->gfx.rlc.rlcg_reg_access_supported = true; +} + static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) { const struct cs_section_def *cs_data; @@ -2023,6 +2038,9 @@ static int gfx_v9_0_rlc_init(struct amdgpu_device *adev) if (adev->gfx.rlc.funcs->update_spm_vmid) adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf); + /* init rlcg reg access ctrl */ + gfx_v9_0_init_rlcg_reg_access_ctrl(adev); + return 0; } @@ -2432,10 +2450,14 @@ static int gfx_v9_0_sw_init(void *handle) return r; } - r = adev->gfx.rlc.funcs->init(adev); - if (r) { - DRM_ERROR("Failed to init rlc BOs!\n"); - return r; + if (adev->gfx.rlc.funcs) { + if (adev->gfx.rlc.funcs->init) { + r = adev->gfx.rlc.funcs->init(adev); + if (r) { + dev_err(adev->dev, "Failed to init rlc BOs!\n"); + return r; + } + } } r = gfx_v9_0_mec_init(adev); -- cgit From f8f96b17f0fa302b246e35773074f39e43add023 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Tue, 18 Jan 2022 21:33:05 +0800 Subject: drm/amdgpu: init rlcg_reg_access_ctrl for gfx10 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Initialize all the register offsets that will be used in rlcg indirect reg access path for gfx10 in sw_init phase Signed-off-by: Hawking Zhang Reviewed-by: Zhou, Peng Ju Acked-by: Christian König Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 38 ++++++++++++++++++++++++++++++---- 1 file changed, 34 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 588c922573e9..3fb484214d3a 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -4411,6 +4411,30 @@ static void gfx_v10_0_rlc_fini(struct amdgpu_device *adev) (void **)&adev->gfx.rlc.cp_table_ptr); } +static void gfx_v10_0_init_rlcg_reg_access_ctrl(struct amdgpu_device *adev) +{ + struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl; + + reg_access_ctrl = &adev->gfx.rlc.reg_access_ctrl; + reg_access_ctrl->scratch_reg0 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG0); + reg_access_ctrl->scratch_reg1 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG1); + reg_access_ctrl->scratch_reg2 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG2); + reg_access_ctrl->scratch_reg3 = SOC15_REG_OFFSET(GC, 0, mmSCRATCH_REG3); + reg_access_ctrl->grbm_cntl = SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_CNTL); + reg_access_ctrl->grbm_idx = SOC15_REG_OFFSET(GC, 0, mmGRBM_GFX_INDEX); + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(10, 3, 0): + reg_access_ctrl->spare_int = + SOC15_REG_OFFSET(GC, 0, mmRLC_SPARE_INT_0_Sienna_Cichlid); + break; + default: + reg_access_ctrl->spare_int = + SOC15_REG_OFFSET(GC, 0, mmRLC_SPARE_INT); + break; + } + adev->gfx.rlc.rlcg_reg_access_supported = true; +} + static int gfx_v10_0_rlc_init(struct amdgpu_device *adev) { const struct cs_section_def *cs_data; @@ -4431,6 +4455,8 @@ static int gfx_v10_0_rlc_init(struct amdgpu_device *adev) if (adev->gfx.rlc.funcs->update_spm_vmid) adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf); + gfx_v10_0_init_rlcg_reg_access_ctrl(adev); + return 0; } @@ -4828,10 +4854,14 @@ static int gfx_v10_0_sw_init(void *handle) if (r) return r; - r = gfx_v10_0_rlc_init(adev); - if (r) { - DRM_ERROR("Failed to init rlc BOs!\n"); - return r; + if (adev->gfx.rlc.funcs) { + if (adev->gfx.rlc.funcs->init) { + r = adev->gfx.rlc.funcs->init(adev); + if (r) { + dev_err(adev->dev, "Failed to init rlc BOs!\n"); + return r; + } + } } r = gfx_v10_0_mec_init(adev); -- cgit From 5d447e296701484f3df5b31a7a078cbf1e3a9cc9 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Tue, 18 Jan 2022 21:44:06 +0800 Subject: drm/amdgpu: add helper for rlcg indirect reg access MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The helper will be used to access registers from sriov guest in full access time Signed-off-by: Hawking Zhang Reviewed-by: Zhou, Peng Ju Acked-by: Christian König Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 111 +++++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | 14 +++- 2 files changed, 124 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index a40e4fcdfa46..8c27d31f3e53 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -855,3 +855,114 @@ bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev, u32 acc_fl } return ret; } + +static u32 amdgpu_virt_rlcg_reg_rw(struct amdgpu_device *adev, u32 offset, u32 v, u32 flag) +{ + struct amdgpu_rlcg_reg_access_ctrl *reg_access_ctrl; + uint32_t timeout = 50000; + uint32_t i, tmp; + uint32_t ret = 0; + static void *scratch_reg0; + static void *scratch_reg1; + static void *scratch_reg2; + static void *scratch_reg3; + static void *spare_int; + + if (!adev->gfx.rlc.rlcg_reg_access_supported) { + dev_err(adev->dev, + "indirect registers access through rlcg is not available\n"); + return 0; + } + + scratch_reg0 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg0; + scratch_reg1 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg1; + scratch_reg2 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg2; + scratch_reg3 = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->scratch_reg3; + if (reg_access_ctrl->spare_int) + spare_int = (void __iomem *)adev->rmmio + 4 * reg_access_ctrl->spare_int; + + if (offset == reg_access_ctrl->grbm_cntl) { + /* if the target reg offset is grbm_cntl, write to scratch_reg2 */ + writel(v, scratch_reg2); + writel(v, ((void __iomem *)adev->rmmio) + (offset * 4)); + } else if (offset == reg_access_ctrl->grbm_idx) { + /* if the target reg offset is grbm_idx, write to scratch_reg3 */ + writel(v, scratch_reg3); + writel(v, ((void __iomem *)adev->rmmio) + (offset * 4)); + } else { + /* + * SCRATCH_REG0 = read/write value + * SCRATCH_REG1[30:28] = command + * SCRATCH_REG1[19:0] = address in dword + * SCRATCH_REG1[26:24] = Error reporting + */ + writel(v, scratch_reg0); + writel((offset | flag), scratch_reg1); + if (reg_access_ctrl->spare_int) + writel(1, spare_int); + + for (i = 0; i < timeout; i++) { + tmp = readl(scratch_reg1); + if (!(tmp & flag)) + break; + udelay(10); + } + + if (i >= timeout) { + if (amdgpu_sriov_rlcg_error_report_enabled(adev)) { + if (tmp & AMDGPU_RLCG_VFGATE_DISABLED) { + dev_err(adev->dev, + "vfgate is disabled, rlcg failed to program reg: 0x%05x\n", offset); + } else if (tmp & AMDGPU_RLCG_WRONG_OPERATION_TYPE) { + dev_err(adev->dev, + "wrong operation type, rlcg failed to program reg: 0x%05x\n", offset); + } else if (tmp & AMDGPU_RLCG_REG_NOT_IN_RANGE) { + dev_err(adev->dev, + "regiser is not in range, rlcg failed to program reg: 0x%05x\n", offset); + } else { + dev_err(adev->dev, + "unknown error type, rlcg failed to program reg: 0x%05x\n", offset); + } + } else { + dev_err(adev->dev, + "timeout: rlcg faled to program reg: 0x%05x\n", offset); + } + } + } + + ret = readl(scratch_reg0); + return ret; +} + +void amdgpu_sriov_wreg(struct amdgpu_device *adev, + u32 offset, u32 value, + u32 acc_flags, u32 hwip) +{ + u32 rlcg_flag; + + if (!amdgpu_sriov_runtime(adev) && + amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, true, &rlcg_flag)) { + amdgpu_virt_rlcg_reg_rw(adev, offset, value, rlcg_flag); + return; + } + + if (acc_flags & AMDGPU_REGS_NO_KIQ) + WREG32_NO_KIQ(offset, value); + else + WREG32(offset, value); +} + +u32 amdgpu_sriov_rreg(struct amdgpu_device *adev, + u32 offset, u32 acc_flags, u32 hwip) +{ + u32 rlcg_flag; + + if (!amdgpu_sriov_runtime(adev) && + amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, false, &rlcg_flag)) + return amdgpu_virt_rlcg_reg_rw(adev, offset, 0, rlcg_flag); + + if (acc_flags & AMDGPU_REGS_NO_KIQ) + return RREG32_NO_KIQ(offset); + else + return RREG32(offset); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index 404a06e57f30..dbfa3ba445c3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -38,6 +38,11 @@ #define AMDGPU_RLCG_GC_READ (0x1 << 28) #define AMDGPU_RLCG_MMHUB_WRITE (0x2 << 28) +/* error code for indirect register access path supported by rlcg for sriov */ +#define AMDGPU_RLCG_VFGATE_DISABLED 0x4000000 +#define AMDGPU_RLCG_WRONG_OPERATION_TYPE 0x2000000 +#define AMDGPU_RLCG_REG_NOT_IN_RANGE 0x1000000 + /* all asic after AI use this offset */ #define mmRCC_IOV_FUNC_IDENTIFIER 0xDE5 /* tonga/fiji use this offset */ @@ -281,6 +286,9 @@ struct amdgpu_video_codec_info; (amdgpu_sriov_vf((adev)) && \ ((adev)->virt.reg_access & (AMDGIM_FEATURE_GC_REG_RLC_EN))) +#define amdgpu_sriov_rlcg_error_report_enabled(adev) \ + (amdgpu_sriov_reg_indirect_mmhub(adev) || amdgpu_sriov_reg_indirect_gc(adev)) + #define amdgpu_passthrough(adev) \ ((adev)->virt.caps & AMDGPU_PASSTHROUGH_MODE) @@ -299,7 +307,6 @@ static inline bool is_virtual_machine(void) ((!amdgpu_in_reset(adev)) && adev->virt.tdr_debug) #define amdgpu_sriov_is_normal(adev) \ ((!amdgpu_in_reset(adev)) && (!adev->virt.tdr_debug)) - bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev); void amdgpu_virt_init_setting(struct amdgpu_device *adev); void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev, @@ -329,4 +336,9 @@ void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev, struct amdgpu_video_codec_info *decode, uint32_t decode_array_size); bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev, u32 acc_flags, u32 hwip, bool write, u32 *rlcg_flag); +void amdgpu_sriov_wreg(struct amdgpu_device *adev, + u32 offset, u32 value, + u32 acc_flags, u32 hwip); +u32 amdgpu_sriov_rreg(struct amdgpu_device *adev, + u32 offset, u32 acc_flags, u32 hwip); #endif -- cgit From 1b2dc99e2dc6f35f55f0487e12fc9166fbd023ed Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Tue, 18 Jan 2022 21:47:39 +0800 Subject: drm/amdgpu: switch to amdgpu_sriov_rreg/wreg MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of ip specific implementation for rlcg indirect register access Signed-off-by: Hawking Zhang Reviewed-by: Zhou, Peng Ju Acked-by: Christian König Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 2 +- drivers/gpu/drm/amd/amdgpu/soc15_common.h | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index fcde99c69c47..5159c6dedc04 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -566,7 +566,7 @@ void amdgpu_mm_wreg_mmio_rlc(struct amdgpu_device *adev, adev->gfx.rlc.funcs && adev->gfx.rlc.funcs->is_rlcg_access_range) { if (adev->gfx.rlc.funcs->is_rlcg_access_range(adev, reg)) - return adev->gfx.rlc.funcs->sriov_wreg(adev, reg, v, 0, 0); + return amdgpu_sriov_wreg(adev, reg, v, 0, 0); } else if ((reg * 4) >= adev->rmmio_size) { adev->pcie_wreg(adev, reg * 4, v); } else { diff --git a/drivers/gpu/drm/amd/amdgpu/soc15_common.h b/drivers/gpu/drm/amd/amdgpu/soc15_common.h index 473767e03676..acce8c2e0328 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15_common.h +++ b/drivers/gpu/drm/amd/amdgpu/soc15_common.h @@ -28,13 +28,13 @@ #define SOC15_REG_OFFSET(ip, inst, reg) (adev->reg_offset[ip##_HWIP][inst][reg##_BASE_IDX] + reg) #define __WREG32_SOC15_RLC__(reg, value, flag, hwip) \ - ((amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs && adev->gfx.rlc.funcs->sriov_wreg) ? \ - adev->gfx.rlc.funcs->sriov_wreg(adev, reg, value, flag, hwip) : \ + ((amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported) ? \ + amdgpu_sriov_wreg(adev, reg, value, flag, hwip) : \ WREG32(reg, value)) #define __RREG32_SOC15_RLC__(reg, flag, hwip) \ - ((amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs && adev->gfx.rlc.funcs->sriov_rreg) ? \ - adev->gfx.rlc.funcs->sriov_rreg(adev, reg, flag, hwip) : \ + ((amdgpu_sriov_vf(adev) && adev->gfx.rlc.funcs && adev->gfx.rlc.rlcg_reg_access_supported) ? \ + amdgpu_sriov_rreg(adev, reg, flag, hwip) : \ RREG32(reg)) #define WREG32_FIELD15(ip, idx, reg, field, val) \ -- cgit From 381519dff88845bbe522e7446ec1e32e351c670d Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Mon, 17 Jan 2022 14:33:23 +0800 Subject: drm/amdgpu: retire rlc callbacks sriov_rreg/wreg MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Not needed anymore. Signed-off-by: Hawking Zhang Reviewed-by: Zhou, Peng Ju Acked-by: Christian König Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h | 2 - drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 5 +- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | 2 - drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c | 114 ------------------------------- drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c | 106 ---------------------------- 5 files changed, 3 insertions(+), 226 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h index 286b2347d063..3f671a62b009 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_rlc.h @@ -127,8 +127,6 @@ struct amdgpu_rlc_funcs { void (*reset)(struct amdgpu_device *adev); void (*start)(struct amdgpu_device *adev); void (*update_spm_vmid)(struct amdgpu_device *adev, unsigned vmid); - void (*sriov_wreg)(struct amdgpu_device *adev, u32 offset, u32 v, u32 acc_flags, u32 hwip); - u32 (*sriov_rreg)(struct amdgpu_device *adev, u32 offset, u32 acc_flags, u32 hwip); bool (*is_rlcg_access_range)(struct amdgpu_device *adev, uint32_t reg); }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 8c27d31f3e53..80c25176c993 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -821,8 +821,9 @@ void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev, } } -bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev, u32 acc_flags, - u32 hwip, bool write, u32 *rlcg_flag) +static bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev, + u32 acc_flags, u32 hwip, + bool write, u32 *rlcg_flag) { bool ret = false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index dbfa3ba445c3..c5edd84c1c12 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -334,8 +334,6 @@ enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *ad void amdgpu_virt_update_sriov_video_codec(struct amdgpu_device *adev, struct amdgpu_video_codec_info *encode, uint32_t encode_array_size, struct amdgpu_video_codec_info *decode, uint32_t decode_array_size); -bool amdgpu_virt_get_rlcg_reg_access_flag(struct amdgpu_device *adev, u32 acc_flags, - u32 hwip, bool write, u32 *rlcg_flag); void amdgpu_sriov_wreg(struct amdgpu_device *adev, u32 offset, u32 value, u32 acc_flags, u32 hwip); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 3fb484214d3a..f54e106e2b86 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -56,10 +56,6 @@ #define GFX10_NUM_GFX_RINGS_Sienna_Cichlid 1 #define GFX10_MEC_HPD_SIZE 2048 -#define RLCG_VFGATE_DISABLED 0x4000000 -#define RLCG_WRONG_OPERATION_TYPE 0x2000000 -#define RLCG_NOT_IN_RANGE 0x1000000 - #define F32_CE_PROGRAM_RAM_SIZE 65536 #define RLCG_UCODE_LOADING_START_ADDRESS 0x00002000L @@ -180,9 +176,6 @@ #define mmRLC_SPARE_INT_0_Sienna_Cichlid 0x4ca5 #define mmRLC_SPARE_INT_0_Sienna_Cichlid_BASE_IDX 1 -#define RLCG_ERROR_REPORT_ENABLED(adev) \ - (amdgpu_sriov_reg_indirect_mmhub(adev) || amdgpu_sriov_reg_indirect_gc(adev)) - MODULE_FIRMWARE("amdgpu/navi10_ce.bin"); MODULE_FIRMWARE("amdgpu/navi10_pfp.bin"); MODULE_FIRMWARE("amdgpu/navi10_me.bin"); @@ -1458,111 +1451,6 @@ static const struct soc15_reg_golden golden_settings_gc_10_1_2[] = SOC15_REG_GOLDEN_VALUE(GC, 0, mmUTCL1_CTRL, 0xffffffff, 0x00c00000) }; -static u32 gfx_v10_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, uint32_t flag) -{ - static void *scratch_reg0; - static void *scratch_reg1; - static void *scratch_reg2; - static void *scratch_reg3; - static void *spare_int; - static uint32_t grbm_cntl; - static uint32_t grbm_idx; - uint32_t i = 0; - uint32_t retries = 50000; - u32 ret = 0; - u32 tmp; - - scratch_reg0 = adev->rmmio + - (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0) * 4; - scratch_reg1 = adev->rmmio + - (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1) * 4; - scratch_reg2 = adev->rmmio + - (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG2) * 4; - scratch_reg3 = adev->rmmio + - (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG3) * 4; - - if (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(10, 3, 0)) { - spare_int = adev->rmmio + - (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_0_Sienna_Cichlid_BASE_IDX] - + mmRLC_SPARE_INT_0_Sienna_Cichlid) * 4; - } else { - spare_int = adev->rmmio + - (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT) * 4; - } - - grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL; - grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX; - - if (offset == grbm_cntl || offset == grbm_idx) { - if (offset == grbm_cntl) - writel(v, scratch_reg2); - else if (offset == grbm_idx) - writel(v, scratch_reg3); - - writel(v, ((void __iomem *)adev->rmmio) + (offset * 4)); - } else { - writel(v, scratch_reg0); - writel(offset | flag, scratch_reg1); - writel(1, spare_int); - - for (i = 0; i < retries; i++) { - tmp = readl(scratch_reg1); - if (!(tmp & flag)) - break; - - udelay(10); - } - - if (i >= retries) { - if (RLCG_ERROR_REPORT_ENABLED(adev)) { - if (tmp & RLCG_VFGATE_DISABLED) - pr_err("The vfgate is disabled, program reg:0x%05x failed!\n", offset); - else if (tmp & RLCG_WRONG_OPERATION_TYPE) - pr_err("Wrong operation type, program reg:0x%05x failed!\n", offset); - else if (tmp & RLCG_NOT_IN_RANGE) - pr_err("The register is not in range, program reg:0x%05x failed!\n", offset); - else - pr_err("Unknown error type, program reg:0x%05x failed!\n", offset); - } else - pr_err("timeout: rlcg program reg:0x%05x failed!\n", offset); - } - } - - ret = readl(scratch_reg0); - - return ret; -} - -static void gfx_v10_sriov_wreg(struct amdgpu_device *adev, u32 offset, u32 value, u32 acc_flags, u32 hwip) -{ - u32 rlcg_flag; - - if (!amdgpu_sriov_runtime(adev) && - amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, true, &rlcg_flag)) { - gfx_v10_rlcg_rw(adev, offset, value, rlcg_flag); - return; - } - - if (acc_flags & AMDGPU_REGS_NO_KIQ) - WREG32_NO_KIQ(offset, value); - else - WREG32(offset, value); -} - -static u32 gfx_v10_sriov_rreg(struct amdgpu_device *adev, u32 offset, u32 acc_flags, u32 hwip) -{ - u32 rlcg_flag; - - if (!amdgpu_sriov_runtime(adev) && - amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, false, &rlcg_flag)) - return gfx_v10_rlcg_rw(adev, offset, 0, rlcg_flag); - - if (acc_flags & AMDGPU_REGS_NO_KIQ) - return RREG32_NO_KIQ(offset); - else - return RREG32(offset); -} - static const struct soc15_reg_golden golden_settings_gc_10_1_nv14[] = { /* Pending on emulation bring up */ @@ -8370,8 +8258,6 @@ static const struct amdgpu_rlc_funcs gfx_v10_0_rlc_funcs_sriov = { .reset = gfx_v10_0_rlc_reset, .start = gfx_v10_0_rlc_start, .update_spm_vmid = gfx_v10_0_update_spm_vmid, - .sriov_wreg = gfx_v10_sriov_wreg, - .sriov_rreg = gfx_v10_sriov_rreg, .is_rlcg_access_range = gfx_v10_0_is_rlcg_access_range, }; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c index c7bccf1a28b4..ca7b886c6ce6 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c @@ -63,10 +63,6 @@ #define mmGCEA_PROBE_MAP 0x070c #define mmGCEA_PROBE_MAP_BASE_IDX 0 -#define GFX9_RLCG_VFGATE_DISABLED 0x4000000 -#define GFX9_RLCG_WRONG_OPERATION_TYPE 0x2000000 -#define GFX9_RLCG_NOT_IN_RANGE 0x1000000 - MODULE_FIRMWARE("amdgpu/vega10_ce.bin"); MODULE_FIRMWARE("amdgpu/vega10_pfp.bin"); MODULE_FIRMWARE("amdgpu/vega10_me.bin"); @@ -743,106 +739,6 @@ static const u32 GFX_RLC_SRM_INDEX_CNTL_DATA_OFFSETS[] = mmRLC_SRM_INDEX_CNTL_DATA_7 - mmRLC_SRM_INDEX_CNTL_DATA_0, }; -static u32 gfx_v9_0_rlcg_rw(struct amdgpu_device *adev, u32 offset, u32 v, uint32_t flag) -{ - static void *scratch_reg0; - static void *scratch_reg1; - static void *scratch_reg2; - static void *scratch_reg3; - static void *spare_int; - static uint32_t grbm_cntl; - static uint32_t grbm_idx; - uint32_t i = 0; - uint32_t retries = 50000; - u32 ret = 0; - u32 tmp; - - scratch_reg0 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG0_BASE_IDX] + mmSCRATCH_REG0)*4; - scratch_reg1 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG1_BASE_IDX] + mmSCRATCH_REG1)*4; - scratch_reg2 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG2_BASE_IDX] + mmSCRATCH_REG2)*4; - scratch_reg3 = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmSCRATCH_REG3_BASE_IDX] + mmSCRATCH_REG3)*4; - spare_int = adev->rmmio + (adev->reg_offset[GC_HWIP][0][mmRLC_SPARE_INT_BASE_IDX] + mmRLC_SPARE_INT)*4; - - grbm_cntl = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_CNTL_BASE_IDX] + mmGRBM_GFX_CNTL; - grbm_idx = adev->reg_offset[GC_HWIP][0][mmGRBM_GFX_INDEX_BASE_IDX] + mmGRBM_GFX_INDEX; - - if (offset == grbm_cntl || offset == grbm_idx) { - if (offset == grbm_cntl) - writel(v, scratch_reg2); - else if (offset == grbm_idx) - writel(v, scratch_reg3); - - writel(v, ((void __iomem *)adev->rmmio) + (offset * 4)); - } else { - /* - * SCRATCH_REG0 = read/write value - * SCRATCH_REG1[30:28] = command - * SCRATCH_REG1[19:0] = address in dword - * SCRATCH_REG1[26:24] = Error reporting - */ - writel(v, scratch_reg0); - writel(offset | flag, scratch_reg1); - writel(1, spare_int); - - for (i = 0; i < retries; i++) { - tmp = readl(scratch_reg1); - if (!(tmp & flag)) - break; - - udelay(10); - } - - if (i >= retries) { - if (amdgpu_sriov_reg_indirect_gc(adev)) { - if (tmp & GFX9_RLCG_VFGATE_DISABLED) - pr_err("The vfgate is disabled, program reg:0x%05x failed!\n", offset); - else if (tmp & GFX9_RLCG_WRONG_OPERATION_TYPE) - pr_err("Wrong operation type, program reg:0x%05x failed!\n", offset); - else if (tmp & GFX9_RLCG_NOT_IN_RANGE) - pr_err("The register is not in range, program reg:0x%05x failed!\n", offset); - else - pr_err("Unknown error type, program reg:0x%05x failed!\n", offset); - } else - pr_err("timeout: rlcg program reg:0x%05x failed!\n", offset); - } - } - - ret = readl(scratch_reg0); - - return ret; -} - -static u32 gfx_v9_0_sriov_rreg(struct amdgpu_device *adev, u32 offset, u32 acc_flags, u32 hwip) -{ - u32 rlcg_flag; - - if (!amdgpu_sriov_runtime(adev) && - amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, false, &rlcg_flag)) - return gfx_v9_0_rlcg_rw(adev, offset, 0, rlcg_flag); - - if (acc_flags & AMDGPU_REGS_NO_KIQ) - return RREG32_NO_KIQ(offset); - else - return RREG32(offset); -} - -static void gfx_v9_0_sriov_wreg(struct amdgpu_device *adev, u32 offset, - u32 value, u32 acc_flags, u32 hwip) -{ - u32 rlcg_flag; - - if (!amdgpu_sriov_runtime(adev) && - amdgpu_virt_get_rlcg_reg_access_flag(adev, acc_flags, hwip, true, &rlcg_flag)) { - gfx_v9_0_rlcg_rw(adev, offset, value, rlcg_flag); - return; - } - - if (acc_flags & AMDGPU_REGS_NO_KIQ) - WREG32_NO_KIQ(offset, value); - else - WREG32(offset, value); -} - #define VEGA10_GB_ADDR_CONFIG_GOLDEN 0x2a114042 #define VEGA12_GB_ADDR_CONFIG_GOLDEN 0x24104041 #define RAVEN_GB_ADDR_CONFIG_GOLDEN 0x24000042 @@ -5268,8 +5164,6 @@ static const struct amdgpu_rlc_funcs gfx_v9_0_rlc_funcs = { .reset = gfx_v9_0_rlc_reset, .start = gfx_v9_0_rlc_start, .update_spm_vmid = gfx_v9_0_update_spm_vmid, - .sriov_wreg = gfx_v9_0_sriov_wreg, - .sriov_rreg = gfx_v9_0_sriov_rreg, .is_rlcg_access_range = gfx_v9_0_is_rlcg_access_range, }; -- cgit From 04022982fc5ddac6cc783d66846f2464fe4985fb Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Mon, 10 Jan 2022 17:31:27 +0800 Subject: drm/amdgpu: switch to common helper to read bios from rom create a common helper function for soc15 and onwards to read bios image from rom Signed-off-by: Hawking Zhang Reviewed-by: Lijo Lazar Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 3 ++- drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c | 38 ++++++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/nv.c | 34 +--------------------------- drivers/gpu/drm/amd/amdgpu/soc15.c | 37 ++----------------------------- 4 files changed, 43 insertions(+), 69 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 8658312764bc..845c92dc73f6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -372,7 +372,8 @@ int amdgpu_device_ip_block_add(struct amdgpu_device *adev, */ bool amdgpu_get_bios(struct amdgpu_device *adev); bool amdgpu_read_bios(struct amdgpu_device *adev); - +bool amdgpu_soc15_read_bios_from_rom(struct amdgpu_device *adev, + u8 *bios, u32 length_bytes); /* * Clocks */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c index 27b19503773b..0eddca795e96 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bios.c @@ -464,3 +464,41 @@ success: adev->is_atom_fw = (adev->asic_type >= CHIP_VEGA10) ? true : false; return true; } + +/* helper function for soc15 and onwards to read bios from rom */ +bool amdgpu_soc15_read_bios_from_rom(struct amdgpu_device *adev, + u8 *bios, u32 length_bytes) +{ + u32 *dw_ptr; + u32 i, length_dw; + u32 rom_index_offset; + u32 rom_data_offset; + + if (bios == NULL) + return false; + if (length_bytes == 0) + return false; + /* APU vbios image is part of sbios image */ + if (adev->flags & AMD_IS_APU) + return false; + if (!adev->smuio.funcs || + !adev->smuio.funcs->get_rom_index_offset || + !adev->smuio.funcs->get_rom_data_offset) + return false; + + dw_ptr = (u32 *)bios; + length_dw = ALIGN(length_bytes, 4) / 4; + + rom_index_offset = + adev->smuio.funcs->get_rom_index_offset(adev); + rom_data_offset = + adev->smuio.funcs->get_rom_data_offset(adev); + + /* set rom index to 0 */ + WREG32(rom_index_offset, 0); + /* read out the rom data */ + for (i = 0; i < length_dw; i++) + dw_ptr[i] = RREG32(rom_data_offset); + + return true; +} diff --git a/drivers/gpu/drm/amd/amdgpu/nv.c b/drivers/gpu/drm/amd/amdgpu/nv.c index 8f0c92cbdc4a..f76834085b34 100644 --- a/drivers/gpu/drm/amd/amdgpu/nv.c +++ b/drivers/gpu/drm/amd/amdgpu/nv.c @@ -330,38 +330,6 @@ static bool nv_read_disabled_bios(struct amdgpu_device *adev) return false; } -static bool nv_read_bios_from_rom(struct amdgpu_device *adev, - u8 *bios, u32 length_bytes) -{ - u32 *dw_ptr; - u32 i, length_dw; - u32 rom_index_offset, rom_data_offset; - - if (bios == NULL) - return false; - if (length_bytes == 0) - return false; - /* APU vbios image is part of sbios image */ - if (adev->flags & AMD_IS_APU) - return false; - - dw_ptr = (u32 *)bios; - length_dw = ALIGN(length_bytes, 4) / 4; - - rom_index_offset = - adev->smuio.funcs->get_rom_index_offset(adev); - rom_data_offset = - adev->smuio.funcs->get_rom_data_offset(adev); - - /* set rom index to 0 */ - WREG32(rom_index_offset, 0); - /* read out the rom data */ - for (i = 0; i < length_dw; i++) - dw_ptr[i] = RREG32(rom_data_offset); - - return true; -} - static struct soc15_allowed_register_entry nv_allowed_read_registers[] = { { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)}, { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)}, @@ -678,7 +646,7 @@ static int nv_update_umd_stable_pstate(struct amdgpu_device *adev, static const struct amdgpu_asic_funcs nv_asic_funcs = { .read_disabled_bios = &nv_read_disabled_bios, - .read_bios_from_rom = &nv_read_bios_from_rom, + .read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom, .read_register = &nv_read_register, .reset = &nv_asic_reset, .reset_method = &nv_asic_reset_method, diff --git a/drivers/gpu/drm/amd/amdgpu/soc15.c b/drivers/gpu/drm/amd/amdgpu/soc15.c index 6c8fcc4e29f4..a0235f75dbcb 100644 --- a/drivers/gpu/drm/amd/amdgpu/soc15.c +++ b/drivers/gpu/drm/amd/amdgpu/soc15.c @@ -375,39 +375,6 @@ static bool soc15_read_disabled_bios(struct amdgpu_device *adev) return false; } -static bool soc15_read_bios_from_rom(struct amdgpu_device *adev, - u8 *bios, u32 length_bytes) -{ - u32 *dw_ptr; - u32 i, length_dw; - uint32_t rom_index_offset; - uint32_t rom_data_offset; - - if (bios == NULL) - return false; - if (length_bytes == 0) - return false; - /* APU vbios image is part of sbios image */ - if (adev->flags & AMD_IS_APU) - return false; - - dw_ptr = (u32 *)bios; - length_dw = ALIGN(length_bytes, 4) / 4; - - rom_index_offset = - adev->smuio.funcs->get_rom_index_offset(adev); - rom_data_offset = - adev->smuio.funcs->get_rom_data_offset(adev); - - /* set rom index to 0 */ - WREG32(rom_index_offset, 0); - /* read out the rom data */ - for (i = 0; i < length_dw; i++) - dw_ptr[i] = RREG32(rom_data_offset); - - return true; -} - static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = { { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)}, { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)}, @@ -925,7 +892,7 @@ static void soc15_pre_asic_init(struct amdgpu_device *adev) static const struct amdgpu_asic_funcs soc15_asic_funcs = { .read_disabled_bios = &soc15_read_disabled_bios, - .read_bios_from_rom = &soc15_read_bios_from_rom, + .read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom, .read_register = &soc15_read_register, .reset = &soc15_asic_reset, .reset_method = &soc15_asic_reset_method, @@ -947,7 +914,7 @@ static const struct amdgpu_asic_funcs soc15_asic_funcs = static const struct amdgpu_asic_funcs vega20_asic_funcs = { .read_disabled_bios = &soc15_read_disabled_bios, - .read_bios_from_rom = &soc15_read_bios_from_rom, + .read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom, .read_register = &soc15_read_register, .reset = &soc15_asic_reset, .reset_method = &soc15_asic_reset_method, -- cgit From d6dac2bc12bd968acfcec7a0c92c59d2e19dacc9 Mon Sep 17 00:00:00 2001 From: "Stanley.Yang" Date: Fri, 21 Jan 2022 16:50:48 +0800 Subject: drm/amdgpu: fix channel index mapping for SIENNA_CICHLID Pmfw read ecc info registers in the following order, umc0: ch_inst 0, 1, 2 ... 7 umc1: ch_inst 0, 1, 2 ... 7 The position of the register value stored in eccinfo table is calculated according to the below formula, channel_index = umc_inst * channel_in_umc + ch_inst Driver directly use the index of eccinfo table array as channel index, it's not correct, driver needs convert eccinfo table array index to channel index according to channel_idx_tbl. Reviewed-by: Tao Zhou Signed-off-by: Stanley.Yang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/umc_v8_7.c | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c index cd57f39df7d1..d70417196662 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c @@ -55,29 +55,36 @@ static inline uint32_t get_umc_v8_7_channel_index(struct amdgpu_device *adev, } static void umc_v8_7_ecc_info_query_correctable_error_count(struct amdgpu_device *adev, - uint32_t channel_index, + uint32_t umc_inst, uint32_t ch_inst, unsigned long *error_count) { uint64_t mc_umc_status; + uint32_t eccinfo_table_idx; struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + + eccinfo_table_idx = umc_inst * adev->umc.channel_inst_num + ch_inst; + /* check for SRAM correctable error * MCUMC_STATUS is a 64 bit register */ - mc_umc_status = ras->umc_ecc.ecc[channel_index].mca_umc_status; + mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status; if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1) *error_count += 1; } static void umc_v8_7_ecc_info_querry_uncorrectable_error_count(struct amdgpu_device *adev, - uint32_t channel_index, + uint32_t umc_inst, uint32_t ch_inst, unsigned long *error_count) { uint64_t mc_umc_status; + uint32_t eccinfo_table_idx; struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + eccinfo_table_idx = umc_inst * adev->umc.channel_inst_num + ch_inst; + /* check the MCUMC_STATUS */ - mc_umc_status = ras->umc_ecc.ecc[channel_index].mca_umc_status; + mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status; if ((REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1) && (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Deferred) == 1 || REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || @@ -94,20 +101,16 @@ static void umc_v8_7_ecc_info_query_ras_error_count(struct amdgpu_device *adev, uint32_t umc_inst = 0; uint32_t ch_inst = 0; - uint32_t channel_index = 0; /* TODO: driver needs to toggle DF Cstate to ensure * safe access of UMC registers. Will add the protection */ LOOP_UMC_INST_AND_CH(umc_inst, ch_inst) { - channel_index = get_umc_v8_7_channel_index(adev, - umc_inst, - ch_inst); umc_v8_7_ecc_info_query_correctable_error_count(adev, - channel_index, + umc_inst, ch_inst, &(err_data->ce_count)); umc_v8_7_ecc_info_querry_uncorrectable_error_count(adev, - channel_index, + umc_inst, ch_inst, &(err_data->ue_count)); } } @@ -120,12 +123,14 @@ static void umc_v8_7_ecc_info_query_error_address(struct amdgpu_device *adev, uint64_t mc_umc_status, err_addr, retired_page; struct eeprom_table_record *err_rec; uint32_t channel_index; + uint32_t eccinfo_table_idx; struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); + eccinfo_table_idx = umc_inst * adev->umc.channel_inst_num + ch_inst; channel_index = adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst]; - mc_umc_status = ras->umc_ecc.ecc[channel_index].mca_umc_status; + mc_umc_status = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_status; if (mc_umc_status == 0) return; @@ -140,7 +145,7 @@ static void umc_v8_7_ecc_info_query_error_address(struct amdgpu_device *adev, (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, CECC) == 1)) { - err_addr = ras->umc_ecc.ecc[channel_index].mca_umc_addr; + err_addr = ras->umc_ecc.ecc[eccinfo_table_idx].mca_umc_addr; err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr); /* translate umc channel address to soc pa, 3 parts are included */ -- cgit From 1f33bd18d703ecdf7b664168d640439e867d1605 Mon Sep 17 00:00:00 2001 From: yipechai Date: Tue, 18 Jan 2022 17:59:45 +0800 Subject: drm/amdgpu: Move xgmi ras initialization from .late_init to .early_init Move xgmi ras initialization from .late_init to .early_init, which let xgmi ras can be initialized only once. Signed-off-by: yipechai Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 15 ++++++++++----- drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h | 1 + drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c | 5 +++++ drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 5 +++++ 4 files changed, 21 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index e15f6b8a62ea..ec1203d49799 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -436,6 +436,16 @@ void amdgpu_gmc_filter_faults_remove(struct amdgpu_device *adev, uint64_t addr, } while (fault->timestamp < tmp); } +int amdgpu_gmc_ras_early_init(struct amdgpu_device *adev) +{ + if (!adev->gmc.xgmi.connected_to_cpu) { + adev->gmc.xgmi.ras = &xgmi_ras; + amdgpu_ras_register_ras_block(adev, &adev->gmc.xgmi.ras->ras_block); + } + + return 0; +} + int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev) { int r; @@ -452,11 +462,6 @@ int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev) return r; } - if (!adev->gmc.xgmi.connected_to_cpu) { - adev->gmc.xgmi.ras = &xgmi_ras; - amdgpu_ras_register_ras_block(adev, &adev->gmc.xgmi.ras->ras_block); - } - if (adev->gmc.xgmi.ras && adev->gmc.xgmi.ras->ras_block.ras_late_init) { r = adev->gmc.xgmi.ras->ras_block.ras_late_init(adev, NULL); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index a5e8e0e08970..93505bb0a36c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -318,6 +318,7 @@ bool amdgpu_gmc_filter_faults(struct amdgpu_device *adev, uint16_t pasid, uint64_t timestamp); void amdgpu_gmc_filter_faults_remove(struct amdgpu_device *adev, uint64_t addr, uint16_t pasid); +int amdgpu_gmc_ras_early_init(struct amdgpu_device *adev); int amdgpu_gmc_ras_late_init(struct amdgpu_device *adev); void amdgpu_gmc_ras_fini(struct amdgpu_device *adev); int amdgpu_gmc_allocate_vm_inv_eng(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index 5e88655cdfa5..73ab0eebe4e2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -719,6 +719,7 @@ static void gmc_v10_0_set_gfxhub_funcs(struct amdgpu_device *adev) static int gmc_v10_0_early_init(void *handle) { + int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; gmc_v10_0_set_mmhub_funcs(adev); @@ -734,6 +735,10 @@ static int gmc_v10_0_early_init(void *handle) adev->gmc.private_aperture_end = adev->gmc.private_aperture_start + (4ULL << 30) - 1; + r = amdgpu_gmc_ras_early_init(adev); + if (r) + return r; + return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index de32dbca9ab8..600ff658ab1b 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1318,6 +1318,7 @@ static void gmc_v9_0_set_mca_funcs(struct amdgpu_device *adev) static int gmc_v9_0_early_init(void *handle) { + int r; struct amdgpu_device *adev = (struct amdgpu_device *)handle; /* ARCT and VEGA20 don't have XGMI defined in their IP discovery tables */ @@ -1347,6 +1348,10 @@ static int gmc_v9_0_early_init(void *handle) adev->gmc.private_aperture_end = adev->gmc.private_aperture_start + (4ULL << 30) - 1; + r = amdgpu_gmc_ras_early_init(adev); + if (r) + return r; + return 0; } -- cgit From e9287ef8d46cee538c9a71bb8978b2f7e975c452 Mon Sep 17 00:00:00 2001 From: yipechai Date: Wed, 19 Jan 2022 16:49:42 +0800 Subject: Revert "drm/amdgpu: No longer insert ras blocks into ras_list if it already exists in ras_list" This reverts commit df4f0041c6ef497e598a67e367db835489162754. Xgmi ras initialization had been moved from .late_init to early_init, the defect of repeated calling amdgpu_ras_register_ras_block had been fixed, so revert this patch. Signed-off-by: yipechai Reviewed-by: Tao Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 7 ------- 1 file changed, 7 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index 4992bc554c0c..d4e07d0acb66 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -2763,19 +2763,12 @@ int amdgpu_ras_reset_gpu(struct amdgpu_device *adev) int amdgpu_ras_register_ras_block(struct amdgpu_device *adev, struct amdgpu_ras_block_object *ras_block_obj) { - struct amdgpu_ras_block_object *obj, *tmp; if (!adev || !ras_block_obj) return -EINVAL; if (!amdgpu_ras_asic_supported(adev)) return 0; - /* If the ras object is in ras_list, don't add it again */ - list_for_each_entry_safe(obj, tmp, &adev->ras_list, node) { - if (obj == ras_block_obj) - return 0; - } - INIT_LIST_HEAD(&ras_block_obj->node); list_add_tail(&ras_block_obj->node, &adev->ras_list); -- cgit From d435c1ed56b9f9347189924395588cfdf7489af5 Mon Sep 17 00:00:00 2001 From: Lang Yu Date: Mon, 24 Jan 2022 14:25:34 +0800 Subject: drm/amd/pm: add get_dpm_ultimate_freq function for cyan skillfish Some clients(e.g., kfd) query sclk/mclk through this function. As cyan skillfish doesn't support dpm, for sclk, set min/max to CYAN_SKILLFISH_SCLK_MIN/CYAN_SKILLFISH_SCLK_MAX(to maintain the existing logic).For others, set both min and max to current value. Before this patch: # /opt/rocm/opencl/bin/clinfo Max clock frequency: 0Mhz After this patch: # /opt/rocm/opencl/bin/clinfo Max clock frequency: 2000Mhz v2: - Maintain the existing min/max sclk logic.(Lijo) v3: - Avoid fetching metrics table twice.(Lijo) Signed-off-by: Lang Yu Reviewed-by: Lijo Lazar Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- .../drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c | 31 ++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c index 2acd7470431e..d743984b68a2 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c @@ -542,6 +542,36 @@ static int cyan_skillfish_od_edit_dpm_table(struct smu_context *smu, return ret; } +static int cyan_skillfish_get_dpm_ultimate_freq(struct smu_context *smu, + enum smu_clk_type clk_type, + uint32_t *min, + uint32_t *max) +{ + int ret = 0; + uint32_t low, high; + + switch (clk_type) { + case SMU_GFXCLK: + case SMU_SCLK: + low = CYAN_SKILLFISH_SCLK_MIN; + high = CYAN_SKILLFISH_SCLK_MAX; + break; + default: + ret = cyan_skillfish_get_current_clk_freq(smu, clk_type, &low); + if (ret) + return ret; + high = low; + break; + } + + if (min) + *min = low; + if (max) + *max = high; + + return 0; +} + static const struct pptable_funcs cyan_skillfish_ppt_funcs = { .check_fw_status = smu_v11_0_check_fw_status, @@ -555,6 +585,7 @@ static const struct pptable_funcs cyan_skillfish_ppt_funcs = { .is_dpm_running = cyan_skillfish_is_dpm_running, .get_gpu_metrics = cyan_skillfish_get_gpu_metrics, .od_edit_dpm_table = cyan_skillfish_od_edit_dpm_table, + .get_dpm_ultimate_freq = cyan_skillfish_get_dpm_ultimate_freq, .register_irq_handler = smu_v11_0_register_irq_handler, .notify_memory_pool_location = smu_v11_0_notify_memory_pool_location, .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param, -- cgit From 5b680dbfbf6899afaf8bbe59b859509149d59129 Mon Sep 17 00:00:00 2001 From: Lang Yu Date: Mon, 24 Jan 2022 14:34:48 +0800 Subject: drm/amd/pm: use existing fini_smc_tables function for cyan skillfish Remove redundant code and use general smu_v11_0_fini_smc_tables function. Signed-off-by: Lang Yu Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- .../gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c | 18 +----------------- 1 file changed, 1 insertion(+), 17 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c index d743984b68a2..2f57333e6071 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c @@ -125,22 +125,6 @@ static int cyan_skillfish_init_smc_tables(struct smu_context *smu) return smu_v11_0_init_smc_tables(smu); } -static int cyan_skillfish_finit_smc_tables(struct smu_context *smu) -{ - struct smu_table_context *smu_table = &smu->smu_table; - - kfree(smu_table->metrics_table); - smu_table->metrics_table = NULL; - - kfree(smu_table->gpu_metrics_table); - smu_table->gpu_metrics_table = NULL; - smu_table->gpu_metrics_table_size = 0; - - smu_table->metrics_time = 0; - - return 0; -} - static int cyan_skillfish_get_smu_metrics_data(struct smu_context *smu, MetricsMember_t member, @@ -579,7 +563,7 @@ static const struct pptable_funcs cyan_skillfish_ppt_funcs = { .init_power = smu_v11_0_init_power, .fini_power = smu_v11_0_fini_power, .init_smc_tables = cyan_skillfish_init_smc_tables, - .fini_smc_tables = cyan_skillfish_finit_smc_tables, + .fini_smc_tables = smu_v11_0_fini_smc_tables, .read_sensor = cyan_skillfish_read_sensor, .print_clk_levels = cyan_skillfish_print_clk_levels, .is_dpm_running = cyan_skillfish_is_dpm_running, -- cgit From c282d9512cdd167384fe0d233d13deea538658f2 Mon Sep 17 00:00:00 2001 From: Wenjing Liu Date: Wed, 19 Jan 2022 16:24:36 +0800 Subject: drm/amd/display: factor out dp detection link training and mst top detection [why] dc_link_detect_helper has multiple purpose: 1. Detect link and local sink. 2. Verify link capability by performing detection link training. 3. Start mst topology discovery and if succeeded fails the current interface. This is difficult to maintain as item 2 has become more destructive to current dc state. The change is to decouple these sequences to its own functions. Eventually only item 2 is a destructive method and we will redesign this sequence with a more unified policy across different encoder types. Tested-by: Daniel Wheeler Reviewed-by: Chris Park Acked-by: Rodrigo Siqueira Signed-off-by: Wenjing Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 362 +++++++++++++-------- drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 164 ++-------- drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c | 16 - drivers/gpu/drm/amd/display/dc/dc.h | 1 + drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h | 7 +- 5 files changed, 259 insertions(+), 291 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index e5d63b0d2594..af2d7f3abfca 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -720,35 +720,8 @@ static bool detect_dp(struct dc_link *link, sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT; if (!detect_dp_sink_caps(link)) return false; - if (is_mst_supported(link)) { - sink_caps->signal = SIGNAL_TYPE_DISPLAY_PORT_MST; - link->type = dc_connection_mst_branch; - dal_ddc_service_set_transaction_type(link->ddc, - sink_caps->transaction_type); - -#if defined(CONFIG_DRM_AMD_DC_DCN) - /* Apply work around for tunneled MST on certain USB4 docks. Always use DSC if dock - * reports DSC support. - */ - if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && - link->type == dc_connection_mst_branch && - link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && - link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT && - !link->dc->debug.dpia_debug.bits.disable_mst_dsc_work_around) - link->wa_flags.dpia_mst_dsc_always_on = true; -#endif - -#if defined(CONFIG_DRM_AMD_DC_HDCP) - /* In case of fallback to SST when topology discovery below fails - * HDCP caps will be querried again later by the upper layer (caller - * of this function). */ - query_hdcp_capability(SIGNAL_TYPE_DISPLAY_PORT_MST, link); -#endif - } - - if (link->type != dc_connection_mst_branch && - is_dp_branch_device(link)) + if (is_dp_branch_device(link)) /* DP SST branch */ link->type = dc_connection_sst_branch; } else { @@ -824,15 +797,215 @@ static bool wait_for_entering_dp_alt_mode(struct dc_link *link) return false; } -/* - * dc_link_detect() - Detect if a sink is attached to a given link +static void apply_dpia_mst_dsc_always_on_wa(struct dc_link *link) +{ +#if defined(CONFIG_DRM_AMD_DC_DCN) + /* Apply work around for tunneled MST on certain USB4 docks. Always use DSC if dock + * reports DSC support. + */ + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && + link->type == dc_connection_mst_branch && + link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && + link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT && + !link->dc->debug.dpia_debug.bits.disable_mst_dsc_work_around) + link->wa_flags.dpia_mst_dsc_always_on = true; +#endif +} + +static void revert_dpia_mst_dsc_always_on_wa(struct dc_link *link) +{ + /* Disable work around which keeps DSC on for tunneled MST on certain USB4 docks. */ + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) + link->wa_flags.dpia_mst_dsc_always_on = false; +} + +static bool discover_dp_mst_topology(struct dc_link *link, enum dc_detect_reason reason) +{ + DC_LOGGER_INIT(link->ctx->logger); + + LINK_INFO("link=%d, mst branch is now Connected\n", + link->link_index); + + apply_dpia_mst_dsc_always_on_wa(link); + link->type = dc_connection_mst_branch; + dm_helpers_dp_update_branch_info(link->ctx, link); + if (dm_helpers_dp_mst_start_top_mgr(link->ctx, + link, reason == DETECT_REASON_BOOT)) { + link_disconnect_sink(link); + } else { + link->type = dc_connection_sst_branch; + } + + return link->type == dc_connection_mst_branch; +} + +static void reset_cur_dp_mst_topology(struct dc_link *link) +{ + DC_LOGGER_INIT(link->ctx->logger); + + LINK_INFO("link=%d, mst branch is now Disconnected\n", + link->link_index); + + revert_dpia_mst_dsc_always_on_wa(link); + dm_helpers_dp_mst_stop_top_mgr(link->ctx, link); + + link->mst_stream_alloc_table.stream_count = 0; + memset(link->mst_stream_alloc_table.stream_allocations, + 0, + sizeof(link->mst_stream_alloc_table.stream_allocations)); +} + +static bool should_prepare_phy_clocks_for_link_verification(const struct dc *dc, + enum dc_detect_reason reason) +{ + int i; + bool can_apply_seamless_boot = false; + + for (i = 0; i < dc->current_state->stream_count; i++) { + if (dc->current_state->streams[i]->apply_seamless_boot_optimization) { + can_apply_seamless_boot = true; + break; + } + } + + return !can_apply_seamless_boot && reason != DETECT_REASON_BOOT; +} + +static void prepare_phy_clocks_for_destructive_link_verification(const struct dc *dc) +{ +#if defined(CONFIG_DRM_AMD_DC_DCN) + dc_z10_restore(dc); +#endif + clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr); +} + +static void restore_phy_clocks_for_destructive_link_verification(const struct dc *dc) +{ + clk_mgr_optimize_pwr_state(dc, dc->clk_mgr); +} + +static void set_all_streams_dpms_off_for_link(struct dc_link *link) +{ + int i; + struct pipe_ctx *pipe_ctx; + struct dc_stream_update stream_update; + bool dpms_off = true; + + memset(&stream_update, 0, sizeof(stream_update)); + stream_update.dpms_off = &dpms_off; + + for (i = 0; i < MAX_PIPES; i++) { + pipe_ctx = &link->dc->current_state->res_ctx.pipe_ctx[i]; + if (pipe_ctx && pipe_ctx->stream && !pipe_ctx->stream->dpms_off && + pipe_ctx->stream->link == link && !pipe_ctx->prev_odm_pipe) { + stream_update.stream = pipe_ctx->stream; + dc_commit_updates_for_stream(link->ctx->dc, NULL, 0, + pipe_ctx->stream, &stream_update, + link->ctx->dc->current_state); + } + } +} + +static void verify_link_capability_destructive(struct dc_link *link, + struct dc_sink *sink, + enum dc_detect_reason reason) +{ + struct link_resource link_res = { 0 }; + bool should_prepare_phy_clocks = + should_prepare_phy_clocks_for_link_verification(link->dc, reason); + + if (should_prepare_phy_clocks) + prepare_phy_clocks_for_destructive_link_verification(link->dc); + + + if (dc_is_dp_signal(link->local_sink->sink_signal)) { + struct dc_link_settings known_limit_link_setting = + dp_get_max_link_cap(link); + + set_all_streams_dpms_off_for_link(link); +#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dp_get_link_encoding_format(&known_limit_link_setting) == + DP_128b_132b_ENCODING) + link_res.hpo_dp_link_enc = resource_get_hpo_dp_link_enc_for_det_lt( + &link->dc->current_state->res_ctx, + link->dc->res_pool, + link); +#endif + dp_verify_link_cap_with_retries( + link, &link_res, &known_limit_link_setting, + LINK_TRAINING_MAX_VERIFY_RETRY); + } else { + ASSERT(0); + } + + if (should_prepare_phy_clocks) + restore_phy_clocks_for_destructive_link_verification(link->dc); +} + +static void verify_link_capability_non_destructive(struct dc_link *link) +{ + if (dc_is_dp_signal(link->local_sink->sink_signal)) { + if (dc_is_embedded_signal(link->local_sink->sink_signal) || + link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) + /* TODO - should we check link encoder's max link caps here? + * How do we know which link encoder to check from? + */ + link->verified_link_cap = link->reported_link_cap; + else + link->verified_link_cap = dp_get_max_link_cap(link); + } +} + +static bool should_verify_link_capability_destructively(struct dc_link *link, + enum dc_detect_reason reason) +{ + bool destrictive = false; + struct dc_link_settings max_link_cap; + bool is_link_enc_unavailable = link->link_enc && + link->dc->res_pool->funcs->link_encs_assign && + !link_enc_cfg_is_link_enc_avail( + link->ctx->dc, + link->link_enc->preferred_engine, + link); + + if (dc_is_dp_signal(link->local_sink->sink_signal)) { + max_link_cap = dp_get_max_link_cap(link); + destrictive = true; + + if (link->dc->debug.skip_detection_link_training || + dc_is_embedded_signal(link->local_sink->sink_signal) || + link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { + destrictive = false; + } else if (dp_get_link_encoding_format(&max_link_cap) == + DP_8b_10b_ENCODING) { + if (link->dpcd_caps.is_mst_capable || + is_link_enc_unavailable) { + destrictive = false; + } + } + } + + return destrictive; +} + +static void verify_link_capability(struct dc_link *link, struct dc_sink *sink, + enum dc_detect_reason reason) +{ + if (should_verify_link_capability_destructively(link, reason)) + verify_link_capability_destructive(link, sink, reason); + else + verify_link_capability_non_destructive(link); +} + + +/** + * detect_link_and_local_sink() - Detect if a sink is attached to a given link * * link->local_sink is created or destroyed as needed. * - * This does not create remote sinks but will trigger DM - * to start MST detection if a branch is detected. + * This does not create remote sinks. */ -static bool dc_link_detect_helper(struct dc_link *link, +static bool detect_link_and_local_sink(struct dc_link *link, enum dc_detect_reason reason) { struct dc_sink_init_data sink_init_data = { 0 }; @@ -848,9 +1021,7 @@ static bool dc_link_detect_helper(struct dc_link *link, struct dpcd_caps prev_dpcd_caps; enum dc_connection_type new_connection_type = dc_connection_none; enum dc_connection_type pre_connection_type = dc_connection_none; - bool perform_dp_seamless_boot = false; const uint32_t post_oui_delay = 30; // 30ms - struct link_resource link_res = { 0 }; DC_LOGGER_INIT(link->ctx->logger); @@ -944,61 +1115,6 @@ static bool dc_link_detect_helper(struct dc_link *link, return false; } -#if defined(CONFIG_DRM_AMD_DC_DCN) - if (dp_get_link_encoding_format(&link->reported_link_cap) == DP_128b_132b_ENCODING) - link_res.hpo_dp_link_enc = resource_get_hpo_dp_link_enc_for_det_lt( - &link->dc->current_state->res_ctx, - link->dc->res_pool, - link); -#endif - - if (link->type == dc_connection_mst_branch) { - LINK_INFO("link=%d, mst branch is now Connected\n", - link->link_index); - /* Need to setup mst link_cap struct here - * otherwise dc_link_detect() will leave mst link_cap - * empty which leads to allocate_mst_payload() has "0" - * pbn_per_slot value leading to exception on dc_fixpt_div() - */ - dp_verify_mst_link_cap(link, &link_res); - - /* - * This call will initiate MST topology discovery. Which - * will detect MST ports and add new DRM connector DRM - * framework. Then read EDID via remote i2c over aux. In - * the end, will notify DRM detect result and save EDID - * into DRM framework. - * - * .detect is called by .fill_modes. - * .fill_modes is called by user mode ioctl - * DRM_IOCTL_MODE_GETCONNECTOR. - * - * .get_modes is called by .fill_modes. - * - * call .get_modes, AMDGPU DM implementation will create - * new dc_sink and add to dc_link. For long HPD plug - * in/out, MST has its own handle. - * - * Therefore, just after dc_create, link->sink is not - * created for MST until user mode app calls - * DRM_IOCTL_MODE_GETCONNECTOR. - * - * Need check ->sink usages in case ->sink = NULL - * TODO: s3 resume check - */ - - dm_helpers_dp_update_branch_info(link->ctx, link); - if (dm_helpers_dp_mst_start_top_mgr(link->ctx, - link, reason == DETECT_REASON_BOOT)) { - if (prev_sink) - dc_sink_release(prev_sink); - return false; - } else { - link->type = dc_connection_sst_branch; - sink_caps.signal = SIGNAL_TYPE_DISPLAY_PORT; - } - } - /* Active SST downstream branch device unplug*/ if (link->type == dc_connection_sst_branch && link->dpcd_caps.sink_count.bits.SINK_COUNT == 0) { @@ -1019,7 +1135,6 @@ static bool dc_link_detect_helper(struct dc_link *link, if (pre_connection_type == dc_connection_mst_branch && link->type != dc_connection_mst_branch) dm_helpers_dp_mst_stop_top_mgr(link->ctx, link); - break; } @@ -1108,13 +1223,6 @@ static bool dc_link_detect_helper(struct dc_link *link, #if defined(CONFIG_DRM_AMD_DC_HDCP) query_hdcp_capability(sink->sink_signal, link); #endif - - // verify link cap for SST non-seamless boot - if (!perform_dp_seamless_boot) - dp_verify_link_cap_with_retries(link, - &link_res, - &link->reported_link_cap, - LINK_TRAINING_MAX_VERIFY_RETRY); } else { // If edid is the same, then discard new sink and revert back to original sink if (same_edid) { @@ -1174,27 +1282,6 @@ static bool dc_link_detect_helper(struct dc_link *link, } } else { /* From Connected-to-Disconnected. */ - if (link->type == dc_connection_mst_branch) { - LINK_INFO("link=%d, mst branch is now Disconnected\n", - link->link_index); - - /* Disable work around which keeps DSC on for tunneled MST on certain USB4 docks. */ - if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) - link->wa_flags.dpia_mst_dsc_always_on = false; - - dm_helpers_dp_mst_stop_top_mgr(link->ctx, link); - - link->mst_stream_alloc_table.stream_count = 0; - memset(link->mst_stream_alloc_table.stream_allocations, - 0, - sizeof(link->mst_stream_alloc_table.stream_allocations)); - } - -#if defined(CONFIG_DRM_AMD_DC_DCN) - if (dp_get_link_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING) - reset_dp_hpo_stream_encoders_for_link(link); -#endif - link->type = dc_connection_none; sink_caps.signal = SIGNAL_TYPE_NONE; /* When we unplug a passive DP-HDMI dongle connection, dongle_max_pix_clk @@ -1219,33 +1306,26 @@ static bool dc_link_detect_helper(struct dc_link *link, bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) { - const struct dc *dc = link->dc; - bool ret; - bool can_apply_seamless_boot = false; - int i; - - for (i = 0; i < dc->current_state->stream_count; i++) { - if (dc->current_state->streams[i]->apply_seamless_boot_optimization) { - can_apply_seamless_boot = true; - break; - } - } + bool is_local_sink_detect_success; + bool is_remote_sink_detect_required = false; + enum dc_connection_type pre_link_type = link->type; -#if defined(CONFIG_DRM_AMD_DC_DCN) - dc_z10_restore(dc); -#endif + is_local_sink_detect_success = detect_link_and_local_sink(link, reason); - /* get out of low power state */ - if (!can_apply_seamless_boot && reason != DETECT_REASON_BOOT) - clk_mgr_exit_optimized_pwr_state(dc, dc->clk_mgr); + if (is_local_sink_detect_success && link->local_sink) + verify_link_capability(link, link->local_sink, reason); - ret = dc_link_detect_helper(link, reason); + if (is_local_sink_detect_success && link->local_sink && + dc_is_dp_signal(link->local_sink->sink_signal) && + link->dpcd_caps.is_mst_capable) + is_remote_sink_detect_required = discover_dp_mst_topology(link, reason); - /* Go back to power optimized state */ - if (!can_apply_seamless_boot && reason != DETECT_REASON_BOOT) - clk_mgr_optimize_pwr_state(dc, dc->clk_mgr); + if (is_local_sink_detect_success && + pre_link_type == dc_connection_mst_branch && + link->type != dc_connection_mst_branch) + reset_cur_dp_mst_topology(link); - return ret; + return is_local_sink_detect_success && !is_remote_sink_detect_required; } bool dc_link_get_hpd_state(struct dc_link *dc_link) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 6f6929435885..8dc03c41fabf 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -104,9 +104,6 @@ static bool decide_fallback_link_setting( struct dc_link_settings initial_link_settings, struct dc_link_settings *current_link_setting, enum link_training_result training_result); -static struct dc_link_settings get_common_supported_link_settings( - struct dc_link_settings link_setting_a, - struct dc_link_settings link_setting_b); static void maximize_lane_settings(const struct link_training_settings *lt_settings, struct dc_lane_settings lane_settings[LANE_COUNT_DP_MAX]); static void override_lane_settings(const struct link_training_settings *lt_settings, @@ -3186,13 +3183,11 @@ bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_ return false; } -static struct dc_link_settings get_max_link_cap(struct dc_link *link, - const struct link_resource *link_res) + +struct dc_link_settings dp_get_max_link_cap(struct dc_link *link) { struct dc_link_settings max_link_cap = {0}; -#if defined(CONFIG_DRM_AMD_DC_DCN) enum dc_link_rate lttpr_max_link_rate; -#endif struct link_encoder *link_enc = NULL; /* Links supporting dynamically assigned link encoder will be assigned next @@ -3210,13 +3205,6 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link, /* get max link encoder capability */ if (link_enc) link_enc->funcs->get_max_link_cap(link_enc, &max_link_cap); -#if defined(CONFIG_DRM_AMD_DC_DCN) - if (max_link_cap.link_rate >= LINK_RATE_UHBR10) { - if (!link_res->hpo_dp_link_enc || - link->dc->debug.disable_uhbr) - max_link_cap.link_rate = LINK_RATE_HIGH3; - } -#endif /* Lower link settings based on sink's link cap */ if (link->reported_link_cap.lane_count < max_link_cap.lane_count) @@ -3236,22 +3224,21 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link, if (link->lttpr_mode != LTTPR_MODE_NON_LTTPR) { if (link->dpcd_caps.lttpr_caps.max_lane_count < max_link_cap.lane_count) max_link_cap.lane_count = link->dpcd_caps.lttpr_caps.max_lane_count; - -#if defined(CONFIG_DRM_AMD_DC_DCN) lttpr_max_link_rate = get_lttpr_max_link_rate(link); if (lttpr_max_link_rate < max_link_cap.link_rate) max_link_cap.link_rate = lttpr_max_link_rate; -#else - if (link->dpcd_caps.lttpr_caps.max_link_rate < max_link_cap.link_rate) - max_link_cap.link_rate = link->dpcd_caps.lttpr_caps.max_link_rate; -#endif DC_LOG_HW_LINK_TRAINING("%s\n Training with LTTPR, max_lane count %d max_link rate %d \n", __func__, max_link_cap.lane_count, max_link_cap.link_rate); } + + if (dp_get_link_encoding_format(&max_link_cap) == DP_128b_132b_ENCODING && + link->dc->debug.disable_uhbr) + max_link_cap.link_rate = LINK_RATE_HIGH3; + return max_link_cap; } @@ -3376,10 +3363,9 @@ bool dp_verify_link_cap( struct dc_link_settings *known_limit_link_setting, int *fail_count) { - struct dc_link_settings max_link_cap = {0}; struct dc_link_settings cur_link_setting = {0}; struct dc_link_settings *cur = &cur_link_setting; - struct dc_link_settings initial_link_settings = {0}; + struct dc_link_settings initial_link_settings = *known_limit_link_setting; bool success; bool skip_link_training; bool skip_video_pattern; @@ -3387,28 +3373,6 @@ bool dp_verify_link_cap( enum link_training_result status; union hpd_irq_data irq_data; - /* link training starts with the maximum common settings - * supported by both sink and ASIC. - */ - max_link_cap = get_max_link_cap(link, link_res); - initial_link_settings = get_common_supported_link_settings( - *known_limit_link_setting, - max_link_cap); - - /* Accept reported capabilities if link supports flexible encoder mapping or encoder already in use. */ - if (link->dc->debug.skip_detection_link_training || - link->is_dig_mapping_flexible) { - /* TODO - should we check link encoder's max link caps here? - * How do we know which link encoder to check from? - */ - link->verified_link_cap = *known_limit_link_setting; - return true; - } else if (link->link_enc && link->dc->res_pool->funcs->link_encs_assign && - !link_enc_cfg_is_link_enc_avail(link->ctx->dc, link->link_enc->preferred_engine, link)) { - link->verified_link_cap = initial_link_settings; - return true; - } - memset(&irq_data, 0, sizeof(irq_data)); success = false; skip_link_training = false; @@ -3420,10 +3384,6 @@ bool dp_verify_link_cap( core_link_write_dpcd(link, DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT, &grant, sizeof(grant)); } -#if defined(CONFIG_DRM_AMD_DC_DCN) - if (dp_get_link_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING) - reset_dp_hpo_stream_encoders_for_link(link); -#endif /* TODO implement override and monitor patch later */ /* try to train the link from high to low to @@ -3540,79 +3500,32 @@ bool dp_verify_link_cap_with_retries( return success; } -bool dp_verify_mst_link_cap( - struct dc_link *link, const struct link_resource *link_res) -{ - struct dc_link_settings max_link_cap = {0}; - - if (dp_get_link_encoding_format(&link->reported_link_cap) == - DP_8b_10b_ENCODING) { - max_link_cap = get_max_link_cap(link, link_res); - link->verified_link_cap = get_common_supported_link_settings( - link->reported_link_cap, - max_link_cap); - } -#if defined(CONFIG_DRM_AMD_DC_DCN) - else if (dp_get_link_encoding_format(&link->reported_link_cap) == - DP_128b_132b_ENCODING) { - dp_verify_link_cap_with_retries(link, - link_res, - &link->reported_link_cap, - LINK_TRAINING_MAX_VERIFY_RETRY); - } -#endif - return true; -} - -static struct dc_link_settings get_common_supported_link_settings( - struct dc_link_settings link_setting_a, - struct dc_link_settings link_setting_b) +/* in DP compliance test, DPR-120 may have + * a random value in its MAX_LINK_BW dpcd field. + * We map it to the maximum supported link rate that + * is smaller than MAX_LINK_BW in this case. + */ +static enum dc_link_rate get_link_rate_from_max_link_bw( + uint8_t max_link_bw) { - struct dc_link_settings link_settings = {0}; + enum dc_link_rate link_rate; - link_settings.lane_count = - (link_setting_a.lane_count <= - link_setting_b.lane_count) ? - link_setting_a.lane_count : - link_setting_b.lane_count; - link_settings.link_rate = - (link_setting_a.link_rate <= - link_setting_b.link_rate) ? - link_setting_a.link_rate : - link_setting_b.link_rate; - link_settings.link_spread = LINK_SPREAD_DISABLED; - - /* in DP compliance test, DPR-120 may have - * a random value in its MAX_LINK_BW dpcd field. - * We map it to the maximum supported link rate that - * is smaller than MAX_LINK_BW in this case. - */ -#if defined(CONFIG_DRM_AMD_DC_DCN) - if (link_settings.link_rate > LINK_RATE_UHBR20) { - link_settings.link_rate = LINK_RATE_UHBR20; - } else if (link_settings.link_rate < LINK_RATE_UHBR20 && - link_settings.link_rate > LINK_RATE_UHBR13_5) { - link_settings.link_rate = LINK_RATE_UHBR13_5; - } else if (link_settings.link_rate < LINK_RATE_UHBR10 && - link_settings.link_rate > LINK_RATE_HIGH3) { -#else - if (link_settings.link_rate > LINK_RATE_HIGH3) { -#endif - link_settings.link_rate = LINK_RATE_HIGH3; - } else if (link_settings.link_rate < LINK_RATE_HIGH3 - && link_settings.link_rate > LINK_RATE_HIGH2) { - link_settings.link_rate = LINK_RATE_HIGH2; - } else if (link_settings.link_rate < LINK_RATE_HIGH2 - && link_settings.link_rate > LINK_RATE_HIGH) { - link_settings.link_rate = LINK_RATE_HIGH; - } else if (link_settings.link_rate < LINK_RATE_HIGH - && link_settings.link_rate > LINK_RATE_LOW) { - link_settings.link_rate = LINK_RATE_LOW; - } else if (link_settings.link_rate < LINK_RATE_LOW) { - link_settings.link_rate = LINK_RATE_UNKNOWN; + if (max_link_bw >= LINK_RATE_HIGH3) { + link_rate = LINK_RATE_HIGH3; + } else if (max_link_bw < LINK_RATE_HIGH3 + && max_link_bw >= LINK_RATE_HIGH2) { + link_rate = LINK_RATE_HIGH2; + } else if (max_link_bw < LINK_RATE_HIGH2 + && max_link_bw >= LINK_RATE_HIGH) { + link_rate = LINK_RATE_HIGH; + } else if (max_link_bw < LINK_RATE_HIGH + && max_link_bw >= LINK_RATE_LOW) { + link_rate = LINK_RATE_LOW; + } else { + link_rate = LINK_RATE_UNKNOWN; } - return link_settings; + return link_rate; } static inline bool reached_minimum_lane_count(enum dc_lane_count lane_count) @@ -5516,6 +5429,9 @@ static bool retrieve_link_cap(struct dc_link *link) read_dp_device_vendor_id(link); + /* TODO - decouple raw mst capability from policy decision */ + link->dpcd_caps.is_mst_capable = is_mst_supported(link); + get_active_converter_info(ds_port.byte, link); dp_wa_power_up_0010FA(link, dpcd_data, sizeof(dpcd_data)); @@ -5534,8 +5450,8 @@ static bool retrieve_link_cap(struct dc_link *link) link->reported_link_cap.lane_count = link->dpcd_caps.max_ln_count.bits.MAX_LANE_COUNT; - link->reported_link_cap.link_rate = dpcd_data[ - DP_MAX_LINK_RATE - DP_DPCD_REV]; + link->reported_link_cap.link_rate = get_link_rate_from_max_link_bw( + dpcd_data[DP_MAX_LINK_RATE - DP_DPCD_REV]); link->reported_link_cap.link_spread = link->dpcd_caps.max_down_spread.bits.MAX_DOWN_SPREAD ? LINK_SPREAD_05_DOWNSPREAD_30KHZ : LINK_SPREAD_DISABLED; @@ -5759,14 +5675,6 @@ bool dp_overwrite_extended_receiver_cap(struct dc_link *link) bool detect_dp_sink_caps(struct dc_link *link) { return retrieve_link_cap(link); - - /* dc init_hw has power encoder using default - * signal for connector. For native DP, no - * need to power up encoder again. If not native - * DP, hw_init may need check signal or power up - * encoder here. - */ - /* TODO save sink caps in link->sink */ } static enum dc_link_rate linkRateInKHzToLinkRateMultiplier(uint32_t link_rate_in_khz) @@ -5844,8 +5752,6 @@ void detect_edp_sink_caps(struct dc_link *link) } } } - link->verified_link_cap = link->reported_link_cap; - core_link_read_dpcd(link, DP_EDP_BACKLIGHT_ADJUSTMENT_CAP, &backlight_adj_cap, sizeof(backlight_adj_cap)); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c index 45d03d3a95c3..e2409c14df34 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c @@ -897,21 +897,5 @@ void setup_dp_hpo_stream(struct pipe_ctx *pipe_ctx, bool enable) } } -void reset_dp_hpo_stream_encoders_for_link(struct dc_link *link) -{ - const struct dc *dc = link->dc; - struct dc_state *state = dc->current_state; - uint8_t i; - - for (i = 0; i < MAX_PIPES; i++) { - if (state->res_ctx.pipe_ctx[i].stream_res.hpo_dp_stream_enc && - state->res_ctx.pipe_ctx[i].stream && - state->res_ctx.pipe_ctx[i].stream->link == link && - !state->res_ctx.pipe_ctx[i].stream->dpms_off) { - setup_dp_hpo_stream(&state->res_ctx.pipe_ctx[i], false); - } - } -} - #undef DC_LOGGER #endif diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 235e33f73913..5ba6abdbd3aa 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -1212,6 +1212,7 @@ struct dpcd_caps { bool is_branch_dev; /* Dongle's downstream count. */ union sink_count sink_count; + bool is_mst_capable; /* If dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER, indicates 'Frame Sequential-to-lllFrame Pack' conversion capability.*/ struct dc_dongle_caps dongle_caps; diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h index cd52813a8432..b18c8198f66d 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h @@ -54,6 +54,8 @@ enum { PEAK_FACTOR_X1000 = 1006, }; +struct dc_link_settings dp_get_max_link_cap(struct dc_link *link); + bool dp_verify_link_cap( struct dc_link *link, const struct link_resource *link_res, @@ -66,10 +68,6 @@ bool dp_verify_link_cap_with_retries( struct dc_link_settings *known_limit_link_setting, int attempts); -bool dp_verify_mst_link_cap( - struct dc_link *link, - const struct link_resource *link_res); - bool dp_validate_mode_timing( struct dc_link *link, const struct dc_crtc_timing *timing); @@ -222,7 +220,6 @@ void disable_dp_hpo_output(struct dc_link *link, enum signal_type signal); void setup_dp_hpo_stream(struct pipe_ctx *pipe_ctx, bool enable); bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx); -void reset_dp_hpo_stream_encoders_for_link(struct dc_link *link); bool dp_retrieve_lttpr_cap(struct dc_link *link); void edp_panel_backlight_power_on(struct dc_link *link); -- cgit From 31d64b8219e057705d7a9debffcf6abbccb7781e Mon Sep 17 00:00:00 2001 From: Jimmy Kizito Date: Wed, 19 Jan 2022 16:24:37 +0800 Subject: drm/amd/display: Add work around to enforce TBT3 compatibility. [Why] According to the USB4 specification, FEC and DSC should be disabled when a USB4 DPIA operates in TBT3 compatibility mode. [How] Upon detecting that a USB4 DPIA is connected to a device that is known to operate in TBT3 mode, disable FEC and DSC support if they have been reported by the TBT3 device. Tested-by: Daniel Wheeler Reviewed-by: Meenakshikumar Somasundaram Acked-by: Rodrigo Siqueira Signed-off-by: Jimmy Kizito Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 1 + drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 22 ++++++++++++++++++++++ drivers/gpu/drm/amd/display/dc/dc.h | 13 +++++++------ drivers/gpu/drm/amd/display/dc/dc_link.h | 2 ++ .../drm/amd/display/include/ddc_service_types.h | 2 ++ 5 files changed, 34 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index af2d7f3abfca..81402c1d68ff 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -806,6 +806,7 @@ static void apply_dpia_mst_dsc_always_on_wa(struct dc_link *link) if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && link->type == dc_connection_mst_branch && link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && + link->dpcd_caps.branch_hw_revision == DP_BRANCH_HW_REV_20 && link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT && !link->dc->debug.dpia_debug.bits.disable_mst_dsc_work_around) link->wa_flags.dpia_mst_dsc_always_on = true; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 8dc03c41fabf..5122c1de4492 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -5550,6 +5550,28 @@ static bool retrieve_link_cap(struct dc_link *link) link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw, sizeof(link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw)); #endif + + /* Apply work around to disable FEC and DSC for USB4 tunneling in TBT3 compatibility mode + * only if required. + */ + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && +#if defined(CONFIG_DRM_AMD_DC_DCN3_1) + !link->dc->debug.dpia_debug.bits.disable_force_tbt3_work_around && +#endif + link->dpcd_caps.is_branch_dev && + link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && + link->dpcd_caps.branch_hw_revision == DP_BRANCH_HW_REV_10 && + (link->dpcd_caps.fec_cap.bits.FEC_CAPABLE || + link->dpcd_caps.dsc_caps.dsc_basic_caps.fields.dsc_support.DSC_SUPPORT)) { + /* A TBT3 device is expected to report no support for FEC or DSC to a USB4 DPIA. + * Clear FEC and DSC capabilities as a work around if that is not the case. + */ + link->wa_flags.dpia_forced_tbt3_mode = true; + memset(&link->dpcd_caps.dsc_caps, '\0', sizeof(link->dpcd_caps.dsc_caps)); + memset(&link->dpcd_caps.fec_cap, '\0', sizeof(link->dpcd_caps.fec_cap)); + DC_LOG_DSC("Clear DSC SUPPORT for USB4 link(%d) in TBT3 compatibility mode", link->link_index); + } else + link->wa_flags.dpia_forced_tbt3_mode = false; } if (!dpcd_read_sink_ext_caps(link)) diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 5ba6abdbd3aa..14abba495f4a 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -519,12 +519,13 @@ union root_clock_optimization_options { union dpia_debug_options { struct { - uint32_t disable_dpia:1; - uint32_t force_non_lttpr:1; - uint32_t extend_aux_rd_interval:1; - uint32_t disable_mst_dsc_work_around:1; - uint32_t hpd_delay_in_ms:12; - uint32_t reserved:16; + uint32_t disable_dpia:1; /* bit 0 */ + uint32_t force_non_lttpr:1; /* bit 1 */ + uint32_t extend_aux_rd_interval:1; /* bit 2 */ + uint32_t disable_mst_dsc_work_around:1; /* bit 3 */ + uint32_t hpd_delay_in_ms:12; /* bits 4-15 */ + uint32_t disable_force_tbt3_work_around:1; /* bit 16 */ + uint32_t reserved:15; } bits; uint32_t raw; }; diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index c0e37ad0e26c..704323654468 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -197,6 +197,8 @@ struct dc_link { bool dp_mot_reset_segment; /* Some USB4 docks do not handle turning off MST DSC once it has been enabled. */ bool dpia_mst_dsc_always_on; + /* Forced DPIA into TBT3 compatibility mode. */ + bool dpia_forced_tbt3_mode; } wa_flags; struct link_mst_stream_allocation_table mst_stream_alloc_table; diff --git a/drivers/gpu/drm/amd/display/include/ddc_service_types.h b/drivers/gpu/drm/amd/display/include/ddc_service_types.h index a2b80514d83e..57f92bd2814f 100644 --- a/drivers/gpu/drm/amd/display/include/ddc_service_types.h +++ b/drivers/gpu/drm/amd/display/include/ddc_service_types.h @@ -34,6 +34,8 @@ #define DP_BRANCH_DEVICE_ID_90CC24 0x90CC24 #define DP_BRANCH_DEVICE_ID_00E04C 0x00E04C #define DP_BRANCH_DEVICE_ID_006037 0x006037 +#define DP_BRANCH_HW_REV_10 0x10 +#define DP_BRANCH_HW_REV_20 0x20 #define DP_DEVICE_ID_38EC11 0x38EC11 enum ddc_result { -- cgit From d9eb8fea6862e63421f7b9c93e32bef348488c41 Mon Sep 17 00:00:00 2001 From: Wenjing Liu Date: Wed, 19 Jan 2022 16:24:38 +0800 Subject: drm/amd/display: Drop DCN for DP2.x logic [Why & How] DCN guard is not necessary for DP2.x relevant logic. Drop them. v2: squash in fix for misplaced #endif (Alex) Tested-by: Daniel Wheeler Reviewed-by: Rodrigo Siqueira Reviewed-by: Jerry Zuo Acked-by: Rodrigo Siqueira Signed-off-by: Wenjing Liu Signed-off-by: Alex Deucher --- .../drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 6 +- drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c | 2 - .../gpu/drm/amd/display/dc/bios/command_table2.c | 4 - drivers/gpu/drm/amd/display/dc/core/dc.c | 4 - drivers/gpu/drm/amd/display/dc/core/dc_link.c | 292 ++++----------------- drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 249 ++---------------- drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c | 78 +----- drivers/gpu/drm/amd/display/dc/core/dc_resource.c | 14 - drivers/gpu/drm/amd/display/dc/dc.h | 13 +- drivers/gpu/drm/amd/display/dc/dc_dp_types.h | 43 +-- drivers/gpu/drm/amd/display/dc/dc_link.h | 6 - drivers/gpu/drm/amd/display/dc/dc_stream.h | 6 - drivers/gpu/drm/amd/display/dc/dc_types.h | 6 - .../amd/display/dc/dce110/dce110_hw_sequencer.c | 54 +--- drivers/gpu/drm/amd/display/dc/inc/core_types.h | 12 - drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h | 4 +- drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h | 2 - .../gpu/drm/amd/display/dc/inc/hw/link_encoder.h | 2 - .../gpu/drm/amd/display/dc/inc/hw/stream_encoder.h | 2 - .../drm/amd/display/dc/inc/hw/timing_generator.h | 2 - .../drm/amd/display/dc/inc/hw_sequencer_private.h | 2 - drivers/gpu/drm/amd/display/dc/inc/resource.h | 6 - .../drm/amd/display/include/bios_parser_types.h | 2 - drivers/gpu/drm/amd/display/include/dpcd_defs.h | 4 - .../gpu/drm/amd/display/include/grph_object_defs.h | 2 - .../gpu/drm/amd/display/include/grph_object_id.h | 2 - .../drm/amd/display/include/link_service_types.h | 12 - 27 files changed, 83 insertions(+), 748 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 29f07c26d080..b32a68a3586a 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -797,16 +797,12 @@ void dm_helpers_mst_enable_stream_features(const struct dc_stream_state *stream) sizeof(new_downspread)); } -#if defined(CONFIG_DRM_AMD_DC_DCN) void dm_set_phyd32clk(struct dc_context *ctx, int freq_khz) { - // FPGA programming for this clock in diags framework that - // needs to go through dm layer, therefore leave dummy interace here + // TODO } - void dm_helpers_enable_periodic_detection(struct dc_context *ctx, bool enable) { /* TODO: add peridic detection implementation */ } -#endif diff --git a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c index 1e385d55e7fb..23a3b640f0ee 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/bios_parser2.c @@ -1692,7 +1692,6 @@ static enum bp_result bios_parser_get_encoder_cap_info( ATOM_ENCODER_CAP_RECORD_HBR3_EN) ? 1 : 0; info->HDMI_6GB_EN = (record->encodercaps & ATOM_ENCODER_CAP_RECORD_HDMI6Gbps_EN) ? 1 : 0; -#if defined(CONFIG_DRM_AMD_DC_DCN) info->IS_DP2_CAPABLE = (record->encodercaps & ATOM_ENCODER_CAP_RECORD_DP2) ? 1 : 0; info->DP_UHBR10_EN = (record->encodercaps & @@ -1701,7 +1700,6 @@ static enum bp_result bios_parser_get_encoder_cap_info( ATOM_ENCODER_CAP_RECORD_UHBR13_5_EN) ? 1 : 0; info->DP_UHBR20_EN = (record->encodercaps & ATOM_ENCODER_CAP_RECORD_UHBR20_EN) ? 1 : 0; -#endif info->DP_IS_USB_C = (record->encodercaps & ATOM_ENCODER_CAP_RECORD_USB_C_TYPE) ? 1 : 0; diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c index 9afa5eb2e6d3..f52f7ff7ead4 100644 --- a/drivers/gpu/drm/amd/display/dc/bios/command_table2.c +++ b/drivers/gpu/drm/amd/display/dc/bios/command_table2.c @@ -338,12 +338,10 @@ static enum bp_result transmitter_control_v1_7( const struct command_table_helper *cmd = bp->cmd_helper; struct dmub_dig_transmitter_control_data_v1_7 dig_v1_7 = {0}; -#if defined(CONFIG_DRM_AMD_DC_DCN) uint8_t hpo_instance = (uint8_t)cntl->hpo_engine_id - ENGINE_ID_HPO_0; if (dc_is_dp_signal(cntl->signal)) hpo_instance = (uint8_t)cntl->hpo_engine_id - ENGINE_ID_HPO_DP_0; -#endif dig_v1_7.phyid = cmd->phy_id_to_atom(cntl->transmitter); dig_v1_7.action = (uint8_t)cntl->action; @@ -358,9 +356,7 @@ static enum bp_result transmitter_control_v1_7( dig_v1_7.hpdsel = cmd->hpd_sel_to_atom(cntl->hpd_sel); dig_v1_7.digfe_sel = cmd->dig_encoder_sel_to_atom(cntl->engine_id); dig_v1_7.connobj_id = (uint8_t)cntl->connector_obj_id.id; -#if defined(CONFIG_DRM_AMD_DC_DCN) dig_v1_7.HPO_instance = hpo_instance; -#endif dig_v1_7.symclk_units.symclk_10khz = cntl->pixel_clock/10; if (cntl->action == TRANSMITTER_CONTROL_ENABLE || diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 91c4874473d6..0b8811ef3717 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -2370,10 +2370,8 @@ static enum surface_update_type check_update_surfaces_for_stream( if (stream_update->dsc_config) su_flags->bits.dsc_changed = 1; -#if defined(CONFIG_DRM_AMD_DC_DCN) if (stream_update->mst_bw_update) su_flags->bits.mst_bw = 1; -#endif if (su_flags->raw != 0) overall_type = UPDATE_TYPE_FULL; @@ -2752,14 +2750,12 @@ static void commit_planes_do_stream_update(struct dc *dc, if (stream_update->dsc_config) dp_update_dsc_config(pipe_ctx); -#if defined(CONFIG_DRM_AMD_DC_DCN) if (stream_update->mst_bw_update) { if (stream_update->mst_bw_update->is_increase) dc_link_increase_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw); else dc_link_reduce_mst_payload(pipe_ctx, stream_update->mst_bw_update->mst_stream_bw); } -#endif if (stream_update->pending_test_pattern) { dc_link_dp_set_test_pattern(stream->link, diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 81402c1d68ff..84afe0d1a791 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -924,14 +924,12 @@ static void verify_link_capability_destructive(struct dc_link *link, dp_get_max_link_cap(link); set_all_streams_dpms_off_for_link(link); -#if defined(CONFIG_DRM_AMD_DC_DCN) if (dp_get_link_encoding_format(&known_limit_link_setting) == DP_128b_132b_ENCODING) link_res.hpo_dp_link_enc = resource_get_hpo_dp_link_enc_for_det_lt( &link->dc->current_state->res_ctx, link->dc->res_pool, link); -#endif dp_verify_link_cap_with_retries( link, &link_res, &known_limit_link_setting, LINK_TRAINING_MAX_VERIFY_RETRY); @@ -1656,9 +1654,7 @@ static bool dc_link_construct_legacy(struct dc_link *link, } DC_LOG_DC("BIOS object table - DP_IS_USB_C: %d", link->link_enc->features.flags.bits.DP_IS_USB_C); -#if defined(CONFIG_DRM_AMD_DC_DCN) DC_LOG_DC("BIOS object table - IS_DP2_CAPABLE: %d", link->link_enc->features.flags.bits.IS_DP2_CAPABLE); -#endif /* Update link encoder tracking variables. These are used for the dynamic * assignment of link encoders to streams. @@ -1941,7 +1937,6 @@ static enum dc_status enable_link_dp(struct dc_state *state, if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) do_fallback = true; -#if defined(CONFIG_DRM_AMD_DC_DCN) /* * Temporary w/a to get DP2.0 link rates to work with SST. * TODO DP2.0 - Workaround: Remove w/a if and when the issue is resolved. @@ -1951,7 +1946,6 @@ static enum dc_status enable_link_dp(struct dc_state *state, link->dc->debug.set_mst_en_for_sst) { dp_enable_mst_on_sink(link, true); } -#endif if (pipe_ctx->stream->signal == SIGNAL_TYPE_EDP) { /*in case it is not on*/ @@ -1959,7 +1953,6 @@ static enum dc_status enable_link_dp(struct dc_state *state, link->dc->hwss.edp_wait_for_hpd_ready(link, true); } -#if defined(CONFIG_DRM_AMD_DC_DCN) if (dp_get_link_encoding_format(&link_settings) == DP_128b_132b_ENCODING) { /* TODO - DP2.0 HW: calculate 32 symbol clock for HPO encoder */ } else { @@ -1969,13 +1962,6 @@ static enum dc_status enable_link_dp(struct dc_state *state, state->clk_mgr->funcs->update_clocks(state->clk_mgr, state, false); } -#else - pipe_ctx->stream_res.pix_clk_params.requested_sym_clk = - link_settings.link_rate * LINK_RATE_REF_FREQ_IN_KHZ; - if (state->clk_mgr && !apply_seamless_boot_optimization) - state->clk_mgr->funcs->update_clocks(state->clk_mgr, - state, false); -#endif // during mode switch we do DP_SET_POWER off then on, and OUI is lost dpcd_set_source_specific_data(link); @@ -2004,12 +1990,8 @@ static enum dc_status enable_link_dp(struct dc_state *state, else fec_enable = true; -#if defined(CONFIG_DRM_AMD_DC_DCN) if (dp_get_link_encoding_format(&link_settings) == DP_8b_10b_ENCODING) dp_set_fec_enable(link, fec_enable); -#else - dp_set_fec_enable(link, fec_enable); -#endif // during mode set we do DP_SET_POWER off then on, aux writes are lost if (link->dpcd_sink_ext_caps.bits.oled == 1 || @@ -2565,9 +2547,7 @@ static void disable_link(struct dc_link *link, const struct link_resource *link_ if (dc_is_dp_signal(signal)) { /* SST DP, eDP */ -#if defined(CONFIG_DRM_AMD_DC_DCN) struct dc_link_settings link_settings = link->cur_link_settings; -#endif if (dc_is_dp_sst_signal(signal)) dp_disable_link_phy(link, link_res, signal); else @@ -2575,15 +2555,10 @@ static void disable_link(struct dc_link *link, const struct link_resource *link_ if (dc_is_dp_sst_signal(signal) || link->mst_stream_alloc_table.stream_count == 0) { -#if defined(CONFIG_DRM_AMD_DC_DCN) if (dp_get_link_encoding_format(&link_settings) == DP_8b_10b_ENCODING) { dp_set_fec_enable(link, false); dp_set_fec_ready(link, link_res, false); } -#else - dp_set_fec_enable(link, false); - dp_set_fec_ready(link, link_res, false); -#endif } } else { if (signal != SIGNAL_TYPE_VIRTUAL) @@ -2766,72 +2741,63 @@ static bool dp_active_dongle_validate_timing( break; } -#if defined(CONFIG_DRM_AMD_DC_DCN) if (dpcd_caps->dongle_type == DISPLAY_DONGLE_DP_HDMI_CONVERTER && dongle_caps->extendedCapValid == true) { -#else - if (dpcd_caps->dongle_type != DISPLAY_DONGLE_DP_HDMI_CONVERTER || - dongle_caps->extendedCapValid == false) - return true; -#endif - - /* Check Pixel Encoding */ - switch (timing->pixel_encoding) { - case PIXEL_ENCODING_RGB: - case PIXEL_ENCODING_YCBCR444: - break; - case PIXEL_ENCODING_YCBCR422: - if (!dongle_caps->is_dp_hdmi_ycbcr422_pass_through) - return false; - break; - case PIXEL_ENCODING_YCBCR420: - if (!dongle_caps->is_dp_hdmi_ycbcr420_pass_through) + /* Check Pixel Encoding */ + switch (timing->pixel_encoding) { + case PIXEL_ENCODING_RGB: + case PIXEL_ENCODING_YCBCR444: + break; + case PIXEL_ENCODING_YCBCR422: + if (!dongle_caps->is_dp_hdmi_ycbcr422_pass_through) + return false; + break; + case PIXEL_ENCODING_YCBCR420: + if (!dongle_caps->is_dp_hdmi_ycbcr420_pass_through) + return false; + break; + default: + /* Invalid Pixel Encoding*/ return false; - break; - default: - /* Invalid Pixel Encoding*/ - return false; - } + } - switch (timing->display_color_depth) { - case COLOR_DEPTH_666: - case COLOR_DEPTH_888: - /*888 and 666 should always be supported*/ - break; - case COLOR_DEPTH_101010: - if (dongle_caps->dp_hdmi_max_bpc < 10) - return false; - break; - case COLOR_DEPTH_121212: - if (dongle_caps->dp_hdmi_max_bpc < 12) + switch (timing->display_color_depth) { + case COLOR_DEPTH_666: + case COLOR_DEPTH_888: + /*888 and 666 should always be supported*/ + break; + case COLOR_DEPTH_101010: + if (dongle_caps->dp_hdmi_max_bpc < 10) + return false; + break; + case COLOR_DEPTH_121212: + if (dongle_caps->dp_hdmi_max_bpc < 12) + return false; + break; + case COLOR_DEPTH_141414: + case COLOR_DEPTH_161616: + default: + /* These color depths are currently not supported */ return false; - break; - case COLOR_DEPTH_141414: - case COLOR_DEPTH_161616: - default: - /* These color depths are currently not supported */ - return false; - } + } #if defined(CONFIG_DRM_AMD_DC_DCN) - if (dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps > 0) { // DP to HDMI FRL converter - struct dc_crtc_timing outputTiming = *timing; + if (dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps > 0) { // DP to HDMI FRL converter + struct dc_crtc_timing outputTiming = *timing; - if (timing->flags.DSC && !timing->dsc_cfg.is_frl) - /* DP input has DSC, HDMI FRL output doesn't have DSC, remove DSC from output timing */ - outputTiming.flags.DSC = 0; - if (dc_bandwidth_in_kbps_from_timing(&outputTiming) > dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps) - return false; - } else { // DP to HDMI TMDS converter + if (timing->flags.DSC && !timing->dsc_cfg.is_frl) + /* DP input has DSC, HDMI FRL output doesn't have DSC, remove DSC from output timing */ + outputTiming.flags.DSC = 0; + if (dc_bandwidth_in_kbps_from_timing(&outputTiming) > dongle_caps->dp_hdmi_frl_max_link_bw_in_kbps) + return false; + } else { // DP to HDMI TMDS converter + if (get_timing_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10)) + return false; + } +#else if (get_timing_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10)) return false; - } -#else - if (get_timing_pixel_clock_100hz(timing) > (dongle_caps->dp_hdmi_max_pixel_clk_in_khz * 10)) - return false; #endif - -#if defined(CONFIG_DRM_AMD_DC_DCN) } if (dpcd_caps->channel_coding_cap.bits.DP_128b_132b_SUPPORTED == 0 && @@ -2912,7 +2878,6 @@ static bool dp_active_dongle_validate_timing( return false; } } -#endif return true; } @@ -3385,9 +3350,7 @@ static struct fixed31_32 get_pbn_from_timing(struct pipe_ctx *pipe_ctx) static void update_mst_stream_alloc_table( struct dc_link *link, struct stream_encoder *stream_enc, -#if defined(CONFIG_DRM_AMD_DC_DCN) struct hpo_dp_stream_encoder *hpo_dp_stream_enc, // TODO: Rename stream_enc to dio_stream_enc? -#endif const struct dp_mst_stream_allocation_table *proposed_table) { struct link_mst_stream_allocation work_table[MAX_CONTROLLER_NUM] = { 0 }; @@ -3423,9 +3386,7 @@ static void update_mst_stream_alloc_table( work_table[i].slot_count = proposed_table->stream_allocations[i].slot_count; work_table[i].stream_enc = stream_enc; -#if defined(CONFIG_DRM_AMD_DC_DCN) work_table[i].hpo_dp_stream_enc = hpo_dp_stream_enc; -#endif } } @@ -3436,7 +3397,7 @@ static void update_mst_stream_alloc_table( link->mst_stream_alloc_table.stream_allocations[i] = work_table[i]; } -#if defined(CONFIG_DRM_AMD_DC_DCN) + static void dc_log_vcp_x_y(const struct dc_link *link, struct fixed31_32 avg_time_slots_per_mtp) { const uint32_t VCP_Y_PRECISION = 1000; @@ -3538,7 +3499,6 @@ static enum dc_status dc_link_update_sst_payload(struct pipe_ctx *pipe_ctx, */ return DC_OK; } -#endif /* convert link_mst_stream_alloc_table to dm dp_mst_stream_alloc_table * because stream_encoder is not exposed to dm @@ -3549,10 +3509,8 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx) struct dc_link *link = stream->link; struct link_encoder *link_encoder = NULL; struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc; -#if defined(CONFIG_DRM_AMD_DC_DCN) struct hpo_dp_link_encoder *hpo_dp_link_encoder = pipe_ctx->link_res.hpo_dp_link_enc; struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc; -#endif struct dp_mst_stream_allocation_table proposed_table = {0}; struct fixed31_32 avg_time_slots_per_mtp; struct fixed31_32 pbn; @@ -3578,17 +3536,12 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx) stream->ctx, stream, &proposed_table, - true)) { + true)) update_mst_stream_alloc_table( -#if defined(CONFIG_DRM_AMD_DC_DCN) link, pipe_ctx->stream_res.stream_enc, pipe_ctx->stream_res.hpo_dp_stream_enc, &proposed_table); -#else - link, pipe_ctx->stream_res.stream_enc, &proposed_table); -#endif - } else DC_LOG_WARNING("Failed to update" "MST allocation table for" @@ -3601,7 +3554,6 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx) link->mst_stream_alloc_table.stream_count); for (i = 0; i < MAX_CONTROLLER_NUM; i++) { -#if defined(CONFIG_DRM_AMD_DC_DCN) DC_LOG_MST("stream_enc[%d]: %p " "stream[%d].hpo_dp_stream_enc: %p " "stream[%d].vcp_id: %d " @@ -3614,17 +3566,6 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx) link->mst_stream_alloc_table.stream_allocations[i].vcp_id, i, link->mst_stream_alloc_table.stream_allocations[i].slot_count); -#else - DC_LOG_MST("stream_enc[%d]: %p " - "stream[%d].vcp_id: %d " - "stream[%d].slot_count: %d\n", - i, - (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, - i, - link->mst_stream_alloc_table.stream_allocations[i].vcp_id, - i, - link->mst_stream_alloc_table.stream_allocations[i].slot_count); -#endif } ASSERT(proposed_table.stream_count > 0); @@ -3644,7 +3585,6 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx) } /* program DP source TX for payload */ -#if defined(CONFIG_DRM_AMD_DC_DCN) switch (dp_get_link_encoding_format(&link->cur_link_settings)) { case DP_8b_10b_ENCODING: link_encoder->funcs->update_mst_stream_allocation_table( @@ -3660,11 +3600,6 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx) DC_LOG_ERROR("Failure: unknown encoding format\n"); return DC_ERROR_UNEXPECTED; } -#else - link_encoder->funcs->update_mst_stream_allocation_table( - link_encoder, - &link->mst_stream_alloc_table); -#endif /* send down message */ ret = dm_helpers_dp_mst_poll_for_allocation_change_trigger( @@ -3687,7 +3622,6 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx) pbn = get_pbn_from_timing(pipe_ctx); avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot); -#if defined(CONFIG_DRM_AMD_DC_DCN) switch (dp_get_link_encoding_format(&link->cur_link_settings)) { case DP_8b_10b_ENCODING: stream_encoder->funcs->set_throttled_vcp_size( @@ -3704,17 +3638,11 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx) DC_LOG_ERROR("Failure: unknown encoding format\n"); return DC_ERROR_UNEXPECTED; } -#else - stream_encoder->funcs->set_throttled_vcp_size( - stream_encoder, - avg_time_slots_per_mtp); -#endif return DC_OK; } -#if defined(CONFIG_DRM_AMD_DC_DCN) enum dc_status dc_link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw_in_kbps) { struct dc_stream_state *stream = pipe_ctx->stream; @@ -3871,7 +3799,6 @@ enum dc_status dc_link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t return DC_OK; } -#endif static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) { @@ -3879,10 +3806,8 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) struct dc_link *link = stream->link; struct link_encoder *link_encoder = NULL; struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc; -#if defined(CONFIG_DRM_AMD_DC_DCN) struct hpo_dp_link_encoder *hpo_dp_link_encoder = pipe_ctx->link_res.hpo_dp_link_enc; struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc; -#endif struct dp_mst_stream_allocation_table proposed_table = {0}; struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0); int i; @@ -3904,7 +3829,6 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) */ /* slot X.Y */ -#if defined(CONFIG_DRM_AMD_DC_DCN) switch (dp_get_link_encoding_format(&link->cur_link_settings)) { case DP_8b_10b_ENCODING: stream_encoder->funcs->set_throttled_vcp_size( @@ -3921,11 +3845,6 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) DC_LOG_ERROR("Failure: unknown encoding format\n"); return DC_ERROR_UNEXPECTED; } -#else - stream_encoder->funcs->set_throttled_vcp_size( - stream_encoder, - avg_time_slots_per_mtp); -#endif /* TODO: which component is responsible for remove payload table? */ if (mst_mode) { @@ -3935,16 +3854,11 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) &proposed_table, false)) { -#if defined(CONFIG_DRM_AMD_DC_DCN) update_mst_stream_alloc_table( link, pipe_ctx->stream_res.stream_enc, pipe_ctx->stream_res.hpo_dp_stream_enc, &proposed_table); -#else - update_mst_stream_alloc_table( - link, pipe_ctx->stream_res.stream_enc, &proposed_table); -#endif } else { DC_LOG_WARNING("Failed to update" @@ -3960,7 +3874,6 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) link->mst_stream_alloc_table.stream_count); for (i = 0; i < MAX_CONTROLLER_NUM; i++) { -#if defined(CONFIG_DRM_AMD_DC_DCN) DC_LOG_MST("stream_enc[%d]: %p " "stream[%d].hpo_dp_stream_enc: %p " "stream[%d].vcp_id: %d " @@ -3973,17 +3886,6 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) link->mst_stream_alloc_table.stream_allocations[i].vcp_id, i, link->mst_stream_alloc_table.stream_allocations[i].slot_count); -#else - DC_LOG_MST("stream_enc[%d]: %p " - "stream[%d].vcp_id: %d " - "stream[%d].slot_count: %d\n", - i, - (void *) link->mst_stream_alloc_table.stream_allocations[i].stream_enc, - i, - link->mst_stream_alloc_table.stream_allocations[i].vcp_id, - i, - link->mst_stream_alloc_table.stream_allocations[i].slot_count); -#endif } if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) { @@ -4000,7 +3902,6 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) status, mst_alloc_slots, prev_mst_slots_in_use); } -#if defined(CONFIG_DRM_AMD_DC_DCN) switch (dp_get_link_encoding_format(&link->cur_link_settings)) { case DP_8b_10b_ENCODING: link_encoder->funcs->update_mst_stream_allocation_table( @@ -4016,11 +3917,6 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) DC_LOG_ERROR("Failure: unknown encoding format\n"); return DC_ERROR_UNEXPECTED; } -#else - link_encoder->funcs->update_mst_stream_allocation_table( - link_encoder, - &link->mst_stream_alloc_table); -#endif if (mst_mode) { dm_helpers_dp_mst_poll_for_allocation_change_trigger( @@ -4068,21 +3964,18 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off) /* stream encoder index */ config.stream_enc_idx = pipe_ctx->stream_res.stream_enc->id - ENGINE_ID_DIGA; -#if defined(CONFIG_DRM_AMD_DC_DCN) if (is_dp_128b_132b_signal(pipe_ctx)) config.stream_enc_idx = pipe_ctx->stream_res.hpo_dp_stream_enc->id - ENGINE_ID_HPO_DP_0; -#endif /* dig back end */ config.dig_be = pipe_ctx->stream->link->link_enc_hw_inst; /* link encoder index */ config.link_enc_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A; -#if defined(CONFIG_DRM_AMD_DC_DCN) if (is_dp_128b_132b_signal(pipe_ctx)) config.link_enc_idx = pipe_ctx->link_res.hpo_dp_link_enc->inst; -#endif + /* dio output index */ config.dio_output_idx = link_enc->transmitter - TRANSMITTER_UNIPHY_A; @@ -4097,9 +3990,7 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off) config.assr_enabled = (panel_mode == DP_PANEL_MODE_EDP) ? 1 : 0; config.mst_enabled = (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) ? 1 : 0; -#if defined(CONFIG_DRM_AMD_DC_DCN) config.dp2_enabled = is_dp_128b_132b_signal(pipe_ctx) ? 1 : 0; -#endif config.usb4_enabled = (pipe_ctx->stream->link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) ? 1 : 0; config.dpms_off = dpms_off; @@ -4111,7 +4002,6 @@ static void update_psp_stream_config(struct pipe_ctx *pipe_ctx, bool dpms_off) } #endif -#if defined(CONFIG_DRM_AMD_DC_DCN) static void fpga_dp_hpo_enable_link_and_stream(struct dc_state *state, struct pipe_ctx *pipe_ctx) { struct dc *dc = pipe_ctx->stream->ctx->dc; @@ -4191,7 +4081,6 @@ static void fpga_dp_hpo_enable_link_and_stream(struct dc_state *state, struct pi dc->hwss.unblank_stream(pipe_ctx, &stream->link->cur_link_settings); } -#endif void core_link_enable_stream( struct dc_state *state, @@ -4202,13 +4091,12 @@ void core_link_enable_stream( struct dc_link *link = stream->sink->link; enum dc_status status; struct link_encoder *link_enc; -#if defined(CONFIG_DRM_AMD_DC_DCN) enum otg_out_mux_dest otg_out_dest = OUT_MUX_DIO; struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg; if (is_dp_128b_132b_signal(pipe_ctx)) vpg = pipe_ctx->stream_res.hpo_dp_stream_enc->vpg; -#endif + DC_LOGGER_INIT(pipe_ctx->stream->ctx->logger); if (!IS_DIAG_DC(dc->ctx->dce_environment) && @@ -4221,12 +4109,8 @@ void core_link_enable_stream( link_enc = stream->link->link_enc; ASSERT(link_enc); -#if defined(CONFIG_DRM_AMD_DC_DCN) if (!dc_is_virtual_signal(pipe_ctx->stream->signal) && !is_dp_128b_132b_signal(pipe_ctx)) { -#else - if (!dc_is_virtual_signal(pipe_ctx->stream->signal)) { -#endif if (link_enc) link_enc->funcs->setup( link_enc, @@ -4237,7 +4121,6 @@ void core_link_enable_stream( stream->timing.timing_3d_format != TIMING_3D_FORMAT_NONE); } -#if defined(CONFIG_DRM_AMD_DC_DCN) if (is_dp_128b_132b_signal(pipe_ctx)) { pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->set_stream_attribute( pipe_ctx->stream_res.hpo_dp_stream_enc, @@ -4255,14 +4138,6 @@ void core_link_enable_stream( stream->use_vsc_sdp_for_colorimetry, stream->link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP); } -#else - pipe_ctx->stream_res.stream_enc->funcs->dp_set_stream_attribute( - pipe_ctx->stream_res.stream_enc, - &stream->timing, - stream->output_color_space, - stream->use_vsc_sdp_for_colorimetry, - stream->link->dpcd_caps.dprx_feature.bits.SST_SPLIT_SDP_CAP); -#endif if (dc_is_dp_signal(pipe_ctx->stream->signal)) dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DP_STREAM_ATTR); @@ -4276,10 +4151,8 @@ void core_link_enable_stream( pipe_ctx->stream->link->link_state_valid = true; -#if defined(CONFIG_DRM_AMD_DC_DCN) if (pipe_ctx->stream_res.tg->funcs->set_out_mux) pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, otg_out_dest); -#endif if (dc_is_dvi_signal(pipe_ctx->stream->signal)) pipe_ctx->stream_res.stream_enc->funcs->dvi_set_stream_attribute( @@ -4299,11 +4172,9 @@ void core_link_enable_stream( pipe_ctx->stream->apply_edp_fast_boot_optimization = false; -#if defined(CONFIG_DRM_AMD_DC_DCN) // Enable VPG before building infoframe if (vpg && vpg->funcs->vpg_poweron) vpg->funcs->vpg_poweron(vpg); -#endif resource_build_info_frame(pipe_ctx); dc->hwss.update_info_frame(pipe_ctx); @@ -4389,12 +4260,8 @@ void core_link_enable_stream( * as a workaround for the incorrect value being applied * from transmitter control. */ -#if defined(CONFIG_DRM_AMD_DC_DCN) if (!(dc_is_virtual_signal(pipe_ctx->stream->signal) || is_dp_128b_132b_signal(pipe_ctx))) -#else - if (!dc_is_virtual_signal(pipe_ctx->stream->signal)) -#endif if (link_enc) link_enc->funcs->setup( link_enc, @@ -4413,11 +4280,9 @@ void core_link_enable_stream( if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) dc_link_allocate_mst_payload(pipe_ctx); -#if defined(CONFIG_DRM_AMD_DC_DCN) else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && is_dp_128b_132b_signal(pipe_ctx)) dc_link_update_sst_payload(pipe_ctx, true); -#endif dc->hwss.unblank_stream(pipe_ctx, &pipe_ctx->stream->link->cur_link_settings); @@ -4434,11 +4299,9 @@ void core_link_enable_stream( dc->hwss.enable_audio_stream(pipe_ctx); } else { // if (IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) -#if defined(CONFIG_DRM_AMD_DC_DCN) if (is_dp_128b_132b_signal(pipe_ctx)) { fpga_dp_hpo_enable_link_and_stream(state, pipe_ctx); } -#endif if (dc_is_dp_signal(pipe_ctx->stream->signal) || dc_is_virtual_signal(pipe_ctx->stream->signal)) dp_set_dsc_enable(pipe_ctx, true); @@ -4455,12 +4318,10 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx) struct dc *dc = pipe_ctx->stream->ctx->dc; struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->sink->link; -#if defined(CONFIG_DRM_AMD_DC_DCN) struct vpg *vpg = pipe_ctx->stream_res.stream_enc->vpg; if (is_dp_128b_132b_signal(pipe_ctx)) vpg = pipe_ctx->stream_res.hpo_dp_stream_enc->vpg; -#endif if (!IS_DIAG_DC(dc->ctx->dce_environment) && dc_is_virtual_signal(pipe_ctx->stream->signal)) @@ -4480,11 +4341,9 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx) if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST) deallocate_mst_payload(pipe_ctx); -#if defined(CONFIG_DRM_AMD_DC_DCN) else if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && is_dp_128b_132b_signal(pipe_ctx)) dc_link_update_sst_payload(pipe_ctx, false); -#endif if (dc_is_hdmi_signal(pipe_ctx->stream->signal)) { struct ext_hdmi_settings settings = {0}; @@ -4511,7 +4370,6 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx) } } -#if defined(CONFIG_DRM_AMD_DC_DCN) if (pipe_ctx->stream->signal == SIGNAL_TYPE_DISPLAY_PORT && !is_dp_128b_132b_signal(pipe_ctx)) { @@ -4528,27 +4386,18 @@ void core_link_disable_stream(struct pipe_ctx *pipe_ctx) dc->hwss.disable_stream(pipe_ctx); disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal); } -#else - disable_link(pipe_ctx->stream->link, &pipe_ctx->link_res, pipe_ctx->stream->signal); - - dc->hwss.disable_stream(pipe_ctx); -#endif if (pipe_ctx->stream->timing.flags.DSC) { if (dc_is_dp_signal(pipe_ctx->stream->signal)) dp_set_dsc_enable(pipe_ctx, false); } -#if defined(CONFIG_DRM_AMD_DC_DCN) if (is_dp_128b_132b_signal(pipe_ctx)) { if (pipe_ctx->stream_res.tg->funcs->set_out_mux) pipe_ctx->stream_res.tg->funcs->set_out_mux(pipe_ctx->stream_res.tg, OUT_MUX_DIO); } -#endif -#if defined(CONFIG_DRM_AMD_DC_DCN) if (vpg && vpg->funcs->vpg_powerdown) vpg->funcs->vpg_powerdown(vpg); -#endif } void core_link_set_avmute(struct pipe_ctx *pipe_ctx, bool enable) @@ -4687,11 +4536,9 @@ void dc_link_set_preferred_training_settings(struct dc *dc, if (link_setting != NULL) { link->preferred_link_setting = *link_setting; -#if defined(CONFIG_DRM_AMD_DC_DCN) if (dp_get_link_encoding_format(link_setting) == DP_128b_132b_ENCODING) /* TODO: add dc update for acquiring link res */ skip_immediate_retrain = true; -#endif } else { link->preferred_link_setting.lane_count = LANE_COUNT_UNKNOWN; link->preferred_link_setting.link_rate = LINK_RATE_UNKNOWN; @@ -4733,7 +4580,6 @@ uint32_t dc_link_bandwidth_kbps( const struct dc_link *link, const struct dc_link_settings *link_setting) { -#if defined(CONFIG_DRM_AMD_DC_DCN) uint32_t total_data_bw_efficiency_x10000 = 0; uint32_t link_rate_per_lane_kbps = 0; @@ -4764,40 +4610,6 @@ uint32_t dc_link_bandwidth_kbps( /* overall effective link bandwidth = link rate per lane * lane count * total data bandwidth efficiency */ return link_rate_per_lane_kbps * link_setting->lane_count / 10000 * total_data_bw_efficiency_x10000; -#else - uint32_t link_bw_kbps = - link_setting->link_rate * LINK_RATE_REF_FREQ_IN_KHZ; /* bytes per sec */ - - link_bw_kbps *= 8; /* 8 bits per byte*/ - link_bw_kbps *= link_setting->lane_count; - - if (dc_link_should_enable_fec(link)) { - /* Account for FEC overhead. - * We have to do it based on caps, - * and not based on FEC being set ready, - * because FEC is set ready too late in - * the process to correctly be picked up - * by mode enumeration. - * - * There's enough zeros at the end of 'kbps' - * that make the below operation 100% precise - * for our purposes. - * 'long long' makes it work even for HDMI 2.1 - * max bandwidth (and much, much bigger bandwidths - * than that, actually). - * - * NOTE: Reducing link BW by 3% may not be precise - * because it may be a stream BT that increases by 3%, and so - * 1/1.03 = 0.970873 factor should have been used instead, - * but the difference is minimal and is in a safe direction, - * which all works well around potential ambiguity of DP 1.4a spec. - */ - long long fec_link_bw_kbps = link_bw_kbps * 970LL; - link_bw_kbps = (uint32_t)(div64_s64(fec_link_bw_kbps, 1000LL)); - } - return link_bw_kbps; - -#endif } const struct dc_link_settings *dc_link_get_link_cap( @@ -4953,7 +4765,6 @@ const struct link_resource *dc_link_get_cur_link_res(const struct dc_link *link) */ void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map) { -#if defined(CONFIG_DRM_AMD_DC_DCN) struct dc_link *link; uint8_t i; uint32_t hpo_dp_recycle_map = 0; @@ -4973,7 +4784,6 @@ void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map) } *map |= (hpo_dp_recycle_map << LINK_RES_HPO_DP_REC_MAP__SHIFT); } -#endif } /** @@ -4996,7 +4806,6 @@ void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map) */ void dc_restore_link_res_map(const struct dc *dc, uint32_t *map) { -#if defined(CONFIG_DRM_AMD_DC_DCN) struct dc_link *link; uint8_t i; unsigned int available_hpo_dp_count; @@ -5034,5 +4843,4 @@ void dc_restore_link_res_map(const struct dc *dc, uint32_t *map) } } } -#endif } diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 5122c1de4492..68f9e099beae 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -62,7 +62,6 @@ enum { POST_LT_ADJ_REQ_TIMEOUT = 200 }; -#if defined(CONFIG_DRM_AMD_DC_DCN) struct dp_lt_fallback_entry { enum dc_lane_count lane_count; enum dc_link_rate link_rate; @@ -97,7 +96,6 @@ static const struct dp_lt_fallback_entry dp_lt_fallbacks[] = { {LANE_COUNT_ONE, LINK_RATE_HIGH}, {LANE_COUNT_ONE, LINK_RATE_LOW}, }; -#endif static bool decide_fallback_link_setting( struct dc_link *link, @@ -114,7 +112,7 @@ static uint32_t get_cr_training_aux_rd_interval(struct dc_link *link, { union training_aux_rd_interval training_rd_interval; uint32_t wait_in_micro_secs = 100; -#if defined(CONFIG_DRM_AMD_DC_DCN) + memset(&training_rd_interval, 0, sizeof(training_rd_interval)); if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING && link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) { @@ -126,15 +124,7 @@ static uint32_t get_cr_training_aux_rd_interval(struct dc_link *link, if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL) wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000; } -#else - core_link_read_dpcd( - link, - DP_TRAINING_AUX_RD_INTERVAL, - (uint8_t *)&training_rd_interval, - sizeof(training_rd_interval)); - if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL) - wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000; -#endif + return wait_in_micro_secs; } @@ -142,7 +132,6 @@ static uint32_t get_eq_training_aux_rd_interval( struct dc_link *link, const struct dc_link_settings *link_settings) { -#if defined(CONFIG_DRM_AMD_DC_DCN) union training_aux_rd_interval training_rd_interval; memset(&training_rd_interval, 0, sizeof(training_rd_interval)); @@ -171,41 +160,16 @@ static uint32_t get_eq_training_aux_rd_interval( case 6: return 64000; default: return 400; } -#else - union training_aux_rd_interval training_rd_interval; - uint32_t wait_in_micro_secs = 400; - - memset(&training_rd_interval, 0, sizeof(training_rd_interval)); - /* overwrite the delay if rev > 1.1*/ - if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_12) { - /* DP 1.2 or later - retrieve delay through - * "DPCD_ADDR_TRAINING_AUX_RD_INTERVAL" register */ - core_link_read_dpcd( - link, - DP_TRAINING_AUX_RD_INTERVAL, - (uint8_t *)&training_rd_interval, - sizeof(training_rd_interval)); - - if (training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL) - wait_in_micro_secs = training_rd_interval.bits.TRAINIG_AUX_RD_INTERVAL * 4000; - } - - return wait_in_micro_secs; -#endif } void dp_wait_for_training_aux_rd_interval( struct dc_link *link, uint32_t wait_in_micro_secs) { -#if defined(CONFIG_DRM_AMD_DC_DCN) if (wait_in_micro_secs > 1000) msleep(wait_in_micro_secs/1000); else udelay(wait_in_micro_secs); -#else - udelay(wait_in_micro_secs); -#endif DC_LOG_HW_LINK_TRAINING("%s:\n wait = %d\n", __func__, @@ -233,7 +197,6 @@ enum dpcd_training_patterns case DP_TRAINING_PATTERN_SEQUENCE_4: dpcd_tr_pattern = DPCD_TRAINING_PATTERN_4; break; -#if defined(CONFIG_DRM_AMD_DC_DCN) case DP_128b_132b_TPS1: dpcd_tr_pattern = DPCD_128b_132b_TPS1; break; @@ -243,7 +206,6 @@ enum dpcd_training_patterns case DP_128b_132b_TPS2_CDS: dpcd_tr_pattern = DPCD_128b_132b_TPS2_CDS; break; -#endif case DP_TRAINING_PATTERN_VIDEOIDLE: dpcd_tr_pattern = DPCD_TRAINING_PATTERN_VIDEOIDLE; break; @@ -286,10 +248,8 @@ static enum dc_dp_training_pattern decide_cr_training_pattern( case DP_8b_10b_ENCODING: default: return DP_TRAINING_PATTERN_SEQUENCE_1; -#if defined(CONFIG_DRM_AMD_DC_DCN) case DP_128b_132b_ENCODING: return DP_128b_132b_TPS1; -#endif } } @@ -297,7 +257,6 @@ static enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *li const struct dc_link_settings *link_settings) { struct link_encoder *link_enc; -#if defined(CONFIG_DRM_AMD_DC_DCN) struct encoder_feature_support *enc_caps; struct dpcd_caps *rx_caps = &link->dpcd_caps; enum dc_dp_training_pattern pattern = DP_TRAINING_PATTERN_SEQUENCE_2; @@ -332,41 +291,8 @@ static enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *li break; } return pattern; -#else - enum dc_dp_training_pattern highest_tp = DP_TRAINING_PATTERN_SEQUENCE_2; - struct encoder_feature_support *features; - struct dpcd_caps *dpcd_caps = &link->dpcd_caps; - - /* Access link encoder capability based on whether it is statically - * or dynamically assigned to a link. - */ - if (link->is_dig_mapping_flexible && - link->dc->res_pool->funcs->link_encs_assign) - link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link); - else - link_enc = link->link_enc; - ASSERT(link_enc); - features = &link_enc->features; - - if (features->flags.bits.IS_TPS3_CAPABLE) - highest_tp = DP_TRAINING_PATTERN_SEQUENCE_3; - - if (features->flags.bits.IS_TPS4_CAPABLE) - highest_tp = DP_TRAINING_PATTERN_SEQUENCE_4; - - if (dpcd_caps->max_down_spread.bits.TPS4_SUPPORTED && - highest_tp >= DP_TRAINING_PATTERN_SEQUENCE_4) - return DP_TRAINING_PATTERN_SEQUENCE_4; - - if (dpcd_caps->max_ln_count.bits.TPS3_SUPPORTED && - highest_tp >= DP_TRAINING_PATTERN_SEQUENCE_3) - return DP_TRAINING_PATTERN_SEQUENCE_3; - - return DP_TRAINING_PATTERN_SEQUENCE_2; -#endif } -#if defined(CONFIG_DRM_AMD_DC_DCN) static uint8_t get_dpcd_link_rate(const struct dc_link_settings *link_settings) { uint8_t link_rate = 0; @@ -394,7 +320,6 @@ static uint8_t get_dpcd_link_rate(const struct dc_link_settings *link_settings) return link_rate; } -#endif static void vendor_specific_lttpr_wa_one_start(struct dc_link *link) { @@ -537,10 +462,8 @@ static void vendor_specific_lttpr_wa_four( const uint8_t offset = dp_convert_to_count( link->dpcd_caps.lttpr_caps.phy_repeater_cnt); uint32_t vendor_lttpr_write_address = 0xF004F; -#if defined(CONFIG_DRM_AMD_DC_DP2_0) uint8_t sink_status = 0; uint8_t i; -#endif if (offset != 0xFF) vendor_lttpr_write_address += @@ -566,7 +489,6 @@ static void vendor_specific_lttpr_wa_four( sizeof(vendor_lttpr_write_data_two)); } -#if defined(CONFIG_DRM_AMD_DC_DP2_0) /* poll for intra-hop disable */ for (i = 0; i < 10; i++) { if ((core_link_read_dpcd(link, DP_SINK_STATUS, &sink_status, 1) == DC_OK) && @@ -574,7 +496,6 @@ static void vendor_specific_lttpr_wa_four( break; udelay(1000); } -#endif } static void vendor_specific_lttpr_wa_five( @@ -662,11 +583,7 @@ enum dc_status dpcd_set_link_settings( status = core_link_write_dpcd(link, DP_LINK_RATE_SET, <_settings->link_settings.link_rate_set, 1); } else { -#if defined(CONFIG_DRM_AMD_DC_DCN) rate = get_dpcd_link_rate(<_settings->link_settings); -#else - rate = (uint8_t) (lt_settings->link_settings.link_rate); -#endif if (link->dc->debug.apply_vendor_specific_lttpr_wa && (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN) && link->lttpr_mode == LTTPR_MODE_TRANSPARENT) @@ -717,10 +634,8 @@ uint8_t dc_dp_initialize_scrambling_data_symbols( disable_scrabled_data_symbols = 1; break; case DP_TRAINING_PATTERN_SEQUENCE_4: -#if defined(CONFIG_DRM_AMD_DC_DCN) case DP_128b_132b_TPS1: case DP_128b_132b_TPS2: -#endif disable_scrabled_data_symbols = 0; break; default: @@ -791,7 +706,6 @@ static void dpcd_set_lt_pattern_and_lane_settings( size_in_bytes); if (is_repeater(link, offset)) { -#if defined(CONFIG_DRM_AMD_DC_DCN) if (dp_get_link_encoding_format(<_settings->link_settings) == DP_128b_132b_ENCODING) DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" @@ -802,7 +716,6 @@ static void dpcd_set_lt_pattern_and_lane_settings( lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE); else if (dp_get_link_encoding_format(<_settings->link_settings) == DP_8b_10b_ENCODING) -#endif DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" " 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", __func__, @@ -813,7 +726,6 @@ static void dpcd_set_lt_pattern_and_lane_settings( lt_settings->dpcd_lane_settings[0].bits.MAX_SWING_REACHED, lt_settings->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED); } else { -#if defined(CONFIG_DRM_AMD_DC_DCN) if (dp_get_link_encoding_format(<_settings->link_settings) == DP_128b_132b_ENCODING) DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n", @@ -822,14 +734,13 @@ static void dpcd_set_lt_pattern_and_lane_settings( lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE); else if (dp_get_link_encoding_format(<_settings->link_settings) == DP_8b_10b_ENCODING) -#endif - DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", - __func__, - dpcd_base_lt_offset, - lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET, - lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET, - lt_settings->dpcd_lane_settings[0].bits.MAX_SWING_REACHED, - lt_settings->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED); + DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", + __func__, + dpcd_base_lt_offset, + lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET, + lt_settings->dpcd_lane_settings[0].bits.PRE_EMPHASIS_SET, + lt_settings->dpcd_lane_settings[0].bits.MAX_SWING_REACHED, + lt_settings->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED); } if (edp_workaround) { /* for eDP write in 2 parts because the 5-byte burst is @@ -847,7 +758,6 @@ static void dpcd_set_lt_pattern_and_lane_settings( (uint8_t *)(lt_settings->dpcd_lane_settings), size_in_bytes); -#if defined(CONFIG_DRM_AMD_DC_DCN) } else if (dp_get_link_encoding_format(<_settings->link_settings) == DP_128b_132b_ENCODING) { core_link_write_dpcd( @@ -855,10 +765,9 @@ static void dpcd_set_lt_pattern_and_lane_settings( dpcd_base_lt_offset, dpcd_lt_buffer, sizeof(dpcd_lt_buffer)); -#endif - } else + } else /* write it all in (1 + number-of-lanes)-byte burst*/ - core_link_write_dpcd( + core_link_write_dpcd( link, dpcd_base_lt_offset, dpcd_lt_buffer, @@ -925,13 +834,11 @@ void dp_hw_to_dpcd_lane_settings( (hw_lane_settings[lane].PRE_EMPHASIS == PRE_EMPHASIS_MAX_LEVEL ? 1 : 0); } -#if defined(CONFIG_DRM_AMD_DC_DCN) else if (dp_get_link_encoding_format(<_settings->link_settings) == DP_128b_132b_ENCODING) { dpcd_lane_settings[lane].tx_ffe.PRESET_VALUE = hw_lane_settings[lane].FFE_PRESET.settings.level; } -#endif } } @@ -953,13 +860,11 @@ void dp_decide_lane_settings( (enum dc_pre_emphasis)(ln_adjust[lane].bits. PRE_EMPHASIS_LANE); } -#if defined(CONFIG_DRM_AMD_DC_DCN) else if (dp_get_link_encoding_format(<_settings->link_settings) == DP_128b_132b_ENCODING) { hw_lane_settings[lane].FFE_PRESET.raw = ln_adjust[lane].tx_ffe.PRESET_VALUE; } -#endif } dp_hw_to_dpcd_lane_settings(lt_settings, hw_lane_settings, dpcd_lane_settings); @@ -1010,9 +915,7 @@ static void maximize_lane_settings(const struct link_training_settings *lt_setti max_requested.VOLTAGE_SWING = lane_settings[0].VOLTAGE_SWING; max_requested.PRE_EMPHASIS = lane_settings[0].PRE_EMPHASIS; -#if defined(CONFIG_DRM_AMD_DC_DCN) max_requested.FFE_PRESET = lane_settings[0].FFE_PRESET; -#endif /* Determine what the maximum of the requested settings are*/ for (lane = 1; lane < lt_settings->link_settings.lane_count; lane++) { @@ -1021,12 +924,10 @@ static void maximize_lane_settings(const struct link_training_settings *lt_setti if (lane_settings[lane].PRE_EMPHASIS > max_requested.PRE_EMPHASIS) max_requested.PRE_EMPHASIS = lane_settings[lane].PRE_EMPHASIS; -#if defined(CONFIG_DRM_AMD_DC_DCN) if (lane_settings[lane].FFE_PRESET.settings.level > max_requested.FFE_PRESET.settings.level) max_requested.FFE_PRESET.settings.level = lane_settings[lane].FFE_PRESET.settings.level; -#endif } /* make sure the requested settings are @@ -1036,10 +937,8 @@ static void maximize_lane_settings(const struct link_training_settings *lt_setti if (max_requested.PRE_EMPHASIS > PRE_EMPHASIS_MAX_LEVEL) max_requested.PRE_EMPHASIS = PRE_EMPHASIS_MAX_LEVEL; -#if defined(CONFIG_DRM_AMD_DC_DCN) if (max_requested.FFE_PRESET.settings.level > DP_FFE_PRESET_MAX_LEVEL) max_requested.FFE_PRESET.settings.level = DP_FFE_PRESET_MAX_LEVEL; -#endif /* make sure the pre-emphasis matches the voltage swing*/ if (max_requested.PRE_EMPHASIS > @@ -1052,9 +951,7 @@ static void maximize_lane_settings(const struct link_training_settings *lt_setti for (lane = 0; lane < LANE_COUNT_DP_MAX; lane++) { lane_settings[lane].VOLTAGE_SWING = max_requested.VOLTAGE_SWING; lane_settings[lane].PRE_EMPHASIS = max_requested.PRE_EMPHASIS; -#if defined(CONFIG_DRM_AMD_DC_DCN) lane_settings[lane].FFE_PRESET = max_requested.FFE_PRESET; -#endif } } @@ -1065,9 +962,7 @@ static void override_lane_settings(const struct link_training_settings *lt_setti if (lt_settings->voltage_swing == NULL && lt_settings->pre_emphasis == NULL && -#if defined(CONFIG_DRM_AMD_DC_DCN) lt_settings->ffe_preset == NULL && -#endif lt_settings->post_cursor2 == NULL) return; @@ -1079,10 +974,8 @@ static void override_lane_settings(const struct link_training_settings *lt_setti lane_settings[lane].PRE_EMPHASIS = *lt_settings->pre_emphasis; if (lt_settings->post_cursor2) lane_settings[lane].POST_CURSOR2 = *lt_settings->post_cursor2; -#if defined(CONFIG_DRM_AMD_DC_DCN) if (lt_settings->ffe_preset) lane_settings[lane].FFE_PRESET = *lt_settings->ffe_preset; -#endif } } @@ -1186,7 +1079,6 @@ enum dc_status dpcd_set_lane_settings( link_training_setting->link_settings.lane_count); if (is_repeater(link, offset)) { -#if defined(CONFIG_DRM_AMD_DC_DCN) if (dp_get_link_encoding_format(&link_training_setting->link_settings) == DP_128b_132b_ENCODING) DC_LOG_HW_LINK_TRAINING("%s:\n LTTPR Repeater ID: %d\n" @@ -1197,7 +1089,6 @@ enum dc_status dpcd_set_lane_settings( link_training_setting->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE); else if (dp_get_link_encoding_format(&link_training_setting->link_settings) == DP_8b_10b_ENCODING) -#endif DC_LOG_HW_LINK_TRAINING("%s\n LTTPR Repeater ID: %d\n" " 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", __func__, @@ -1209,7 +1100,6 @@ enum dc_status dpcd_set_lane_settings( link_training_setting->dpcd_lane_settings[0].bits.MAX_PRE_EMPHASIS_REACHED); } else { -#if defined(CONFIG_DRM_AMD_DC_DCN) if (dp_get_link_encoding_format(&link_training_setting->link_settings) == DP_128b_132b_ENCODING) DC_LOG_HW_LINK_TRAINING("%s:\n 0x%X TX_FFE_PRESET_VALUE = %x\n", @@ -1218,7 +1108,6 @@ enum dc_status dpcd_set_lane_settings( link_training_setting->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE); else if (dp_get_link_encoding_format(&link_training_setting->link_settings) == DP_8b_10b_ENCODING) -#endif DC_LOG_HW_LINK_TRAINING("%s\n 0x%X VS set = %x PE set = %x max VS Reached = %x max PE Reached = %x\n", __func__, lane0_set_address, @@ -1354,14 +1243,12 @@ uint32_t dp_translate_training_aux_read_interval(uint32_t dpcd_aux_read_interval case 0x04: aux_rd_interval_us = 16000; break; -#if defined(CONFIG_DRM_AMD_DC_DCN) case 0x05: aux_rd_interval_us = 32000; break; case 0x06: aux_rd_interval_us = 64000; break; -#endif default: break; } @@ -1402,13 +1289,8 @@ static enum link_training_result perform_channel_equalization_sequence( /* Note: also check that TPS4 is a supported feature*/ tr_pattern = lt_settings->pattern_for_eq; -#if defined(CONFIG_DRM_AMD_DC_DCN) if (is_repeater(link, offset) && dp_get_link_encoding_format(<_settings->link_settings) == DP_8b_10b_ENCODING) tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4; -#else - if (is_repeater(link, offset)) - tr_pattern = DP_TRAINING_PATTERN_SEQUENCE_4; -#endif dp_set_hw_training_pattern(link, link_res, tr_pattern, offset); @@ -1579,15 +1461,10 @@ static enum link_training_result perform_clock_recovery_sequence( return LINK_TRAINING_SUCCESS; /* 6. max VS reached*/ -#if defined(CONFIG_DRM_AMD_DC_DCN) if ((dp_get_link_encoding_format(<_settings->link_settings) == DP_8b_10b_ENCODING) && dp_is_max_vs_reached(lt_settings)) break; -#else - if (dp_is_max_vs_reached(lt_settings)) - break; -#endif /* 7. same lane settings*/ /* Note: settings are the same for all lanes, @@ -1596,12 +1473,10 @@ static enum link_training_result perform_clock_recovery_sequence( lt_settings->dpcd_lane_settings[0].bits.VOLTAGE_SWING_SET == dpcd_lane_adjust[0].bits.VOLTAGE_SWING_LANE) retries_cr++; -#if defined(CONFIG_DRM_AMD_DC_DCN) else if ((dp_get_link_encoding_format(<_settings->link_settings) == DP_128b_132b_ENCODING) && lt_settings->dpcd_lane_settings[0].tx_ffe.PRESET_VALUE == dpcd_lane_adjust[0].tx_ffe.PRESET_VALUE) retries_cr++; -#endif else retries_cr = 0; @@ -1639,11 +1514,7 @@ static inline enum link_training_result dp_transition_to_video_idle( * TPS4 must be used instead of POST_LT_ADJ_REQ. */ if (link->dpcd_caps.max_ln_count.bits.POST_LT_ADJ_REQ_SUPPORTED != 1 || -#if defined(CONFIG_DRM_AMD_DC_DCN) lt_settings->pattern_for_eq >= DP_TRAINING_PATTERN_SEQUENCE_4) { -#else - lt_settings->pattern_for_eq == DP_TRAINING_PATTERN_SEQUENCE_4) { -#endif /* delay 5ms after Main Link output idle pattern and then check * DPCD 0202h. */ @@ -1742,7 +1613,6 @@ static inline void decide_8b_10b_training_settings( dp_hw_to_dpcd_lane_settings(lt_settings, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); } -#if defined(CONFIG_DRM_AMD_DC_DCN) static inline void decide_128b_132b_training_settings(struct dc_link *link, const struct dc_link_settings *link_settings, struct link_training_settings *lt_settings) @@ -1769,7 +1639,6 @@ static inline void decide_128b_132b_training_settings(struct dc_link *link, dp_hw_to_dpcd_lane_settings(lt_settings, lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); } -#endif void dp_decide_training_settings( struct dc_link *link, @@ -1778,10 +1647,8 @@ void dp_decide_training_settings( { if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) decide_8b_10b_training_settings(link, link_settings, lt_settings); -#if defined(CONFIG_DRM_AMD_DC_DCN) else if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) decide_128b_132b_training_settings(link, link_settings, lt_settings); -#endif } static void override_training_settings( @@ -1804,10 +1671,8 @@ static void override_training_settings( lt_settings->pre_emphasis = overrides->pre_emphasis; if (overrides->post_cursor2 != NULL) lt_settings->post_cursor2 = overrides->post_cursor2; -#if defined(CONFIG_DRM_AMD_DC_DCN) if (overrides->ffe_preset != NULL) lt_settings->ffe_preset = overrides->ffe_preset; -#endif /* Override HW lane settings with BIOS forced values if present */ if (link->chip_caps & EXT_DISPLAY_PATH_CAPS__DP_FIXED_VS_EN && link->lttpr_mode == LTTPR_MODE_TRANSPARENT) { @@ -2011,7 +1876,6 @@ static void print_status_message( case LINK_RATE_HIGH3: link_rate = "HBR3"; break; -#if defined(CONFIG_DRM_AMD_DC_DCN) case LINK_RATE_UHBR10: link_rate = "UHBR10"; break; @@ -2021,7 +1885,6 @@ static void print_status_message( case LINK_RATE_UHBR20: link_rate = "UHBR20"; break; -#endif default: break; } @@ -2051,7 +1914,6 @@ static void print_status_message( case LINK_TRAINING_LINK_LOSS: lt_result = "Link loss"; break; -#if defined(CONFIG_DRM_AMD_DC_DCN) case DP_128b_132b_LT_FAILED: lt_result = "LT_FAILED received"; break; @@ -2064,7 +1926,6 @@ static void print_status_message( case DP_128b_132b_CDS_DONE_TIMEOUT: lt_result = "CDS timeout"; break; -#endif default: break; } @@ -2084,9 +1945,9 @@ static void print_status_message( } /* Connectivity log: link training */ -#if defined(CONFIG_DRM_AMD_DC_DCN) + /* TODO - DP2.0 Log: add connectivity log for FFE PRESET */ -#endif + CONN_MSG_LT(link, "%sx%d %s VS=%d, PE=%d, DS=%s", link_rate, lt_settings->link_settings.lane_count, @@ -2174,15 +2035,12 @@ enum dc_status dpcd_configure_lttpr_mode(struct dc_link *link, struct link_train static void dpcd_exit_training_mode(struct dc_link *link) { -#if defined(CONFIG_DRM_AMD_DC_DCN) uint8_t sink_status = 0; uint8_t i; -#endif /* clear training pattern set */ dpcd_set_training_pattern(link, DP_TRAINING_PATTERN_VIDEOIDLE); -#if defined(CONFIG_DRM_AMD_DC_DCN) /* poll for intra-hop disable */ for (i = 0; i < 10; i++) { if ((core_link_read_dpcd(link, DP_SINK_STATUS, &sink_status, 1) == DC_OK) && @@ -2190,7 +2048,6 @@ static void dpcd_exit_training_mode(struct dc_link *link) break; udelay(1000); } -#endif } enum dc_status dpcd_configure_channel_coding(struct dc_link *link, @@ -2214,7 +2071,6 @@ enum dc_status dpcd_configure_channel_coding(struct dc_link *link, return status; } -#if defined(CONFIG_DRM_AMD_DC_DCN) static void dpcd_128b_132b_get_aux_rd_interval(struct dc_link *link, uint32_t *interval_in_us) { @@ -2345,7 +2201,6 @@ static enum link_training_result dp_perform_128b_132b_cds_done_sequence( return status; } -#endif static enum link_training_result dp_perform_8b_10b_link_training( struct dc_link *link, @@ -2406,7 +2261,6 @@ static enum link_training_result dp_perform_8b_10b_link_training( return status; } -#if defined(CONFIG_DRM_AMD_DC_DCN) static enum link_training_result dp_perform_128b_132b_link_training( struct dc_link *link, const struct link_resource *link_res, @@ -2434,7 +2288,6 @@ static enum link_training_result dp_perform_128b_132b_link_training( return result; } -#endif static enum link_training_result dc_link_dp_perform_fixed_vs_pe_training_sequence( struct dc_link *link, @@ -2511,11 +2364,7 @@ static enum link_training_result dc_link_dp_perform_fixed_vs_pe_training_sequenc core_link_write_dpcd(link, DP_LANE_COUNT_SET, &lane_count_set.raw, 1); -#if defined(CONFIG_DRM_AMD_DC_DCN) rate = get_dpcd_link_rate(<_settings->link_settings); -#else - rate = (uint8_t) (lt_settings->link_settings.link_rate); -#endif /* Vendor specific: Toggle link rate */ toggle_rate = (rate == 0x6) ? 0xA : 0x6; @@ -2816,10 +2665,8 @@ enum link_training_result dc_link_dp_perform_link_training( status = dc_link_dp_perform_fixed_vs_pe_training_sequence(link, link_res, <_settings); else if (encoding == DP_8b_10b_ENCODING) status = dp_perform_8b_10b_link_training(link, link_res, <_settings); -#if defined(CONFIG_DRM_AMD_DC_DCN) else if (encoding == DP_128b_132b_ENCODING) status = dp_perform_128b_132b_link_training(link, link_res, <_settings); -#endif else ASSERT(0); @@ -3071,14 +2918,10 @@ enum link_training_result dc_link_dp_sync_lt_attempt( dp_cs_id, link_settings); /* Set FEC enable */ -#if defined(CONFIG_DRM_AMD_DC_DCN) if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) { -#endif fec_enable = lt_overrides->fec_enable && *lt_overrides->fec_enable; dp_set_fec_ready(link, NULL, fec_enable); -#if defined(CONFIG_DRM_AMD_DC_DCN) } -#endif if (lt_overrides->alternate_scrambler_reset) { if (*lt_overrides->alternate_scrambler_reset) @@ -3121,13 +2964,9 @@ bool dc_link_dp_sync_lt_end(struct dc_link *link, bool link_down) * Still shouldn't turn off dp_receiver (DPCD:600h) */ if (link_down == true) { -#if defined(CONFIG_DRM_AMD_DC_DCN) struct dc_link_settings link_settings = link->cur_link_settings; -#endif dp_disable_link_phy(link, NULL, link->connector_signal); -#if defined(CONFIG_DRM_AMD_DC_DCN) if (dp_get_link_encoding_format(&link_settings) == DP_8b_10b_ENCODING) -#endif dp_set_fec_ready(link, NULL, false); } @@ -3135,7 +2974,6 @@ bool dc_link_dp_sync_lt_end(struct dc_link *link, bool link_down) return true; } -#if defined(CONFIG_DRM_AMD_DC_DCN) static enum dc_link_rate get_lttpr_max_link_rate(struct dc_link *link) { enum dc_link_rate lttpr_max_link_rate = link->dpcd_caps.lttpr_caps.max_link_rate; @@ -3149,7 +2987,6 @@ static enum dc_link_rate get_lttpr_max_link_rate(struct dc_link *link) return lttpr_max_link_rate; } -#endif bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap) { @@ -3555,14 +3392,12 @@ static enum dc_lane_count reduce_lane_count(enum dc_lane_count lane_count) static enum dc_link_rate reduce_link_rate(enum dc_link_rate link_rate) { switch (link_rate) { -#if defined(CONFIG_DRM_AMD_DC_DCN) case LINK_RATE_UHBR20: return LINK_RATE_UHBR13_5; case LINK_RATE_UHBR13_5: return LINK_RATE_UHBR10; case LINK_RATE_UHBR10: return LINK_RATE_HIGH3; -#endif case LINK_RATE_HIGH3: return LINK_RATE_HIGH2; case LINK_RATE_HIGH2: @@ -3597,20 +3432,17 @@ static enum dc_link_rate increase_link_rate(enum dc_link_rate link_rate) return LINK_RATE_HIGH2; case LINK_RATE_HIGH2: return LINK_RATE_HIGH3; -#if defined(CONFIG_DRM_AMD_DC_DCN) case LINK_RATE_HIGH3: return LINK_RATE_UHBR10; case LINK_RATE_UHBR10: return LINK_RATE_UHBR13_5; case LINK_RATE_UHBR13_5: return LINK_RATE_UHBR20; -#endif default: return LINK_RATE_UNKNOWN; } } -#if defined(CONFIG_DRM_AMD_DC_DCN) static bool decide_fallback_link_setting_max_bw_policy( const struct dc_link_settings *max, struct dc_link_settings *cur) @@ -3644,7 +3476,6 @@ static bool decide_fallback_link_setting_max_bw_policy( return found; } -#endif /* * function: set link rate and lane count fallback based @@ -3662,12 +3493,10 @@ static bool decide_fallback_link_setting( { if (!current_link_setting) return false; -#if defined(CONFIG_DRM_AMD_DC_DCN) if (dp_get_link_encoding_format(&initial_link_settings) == DP_128b_132b_ENCODING || link->dc->debug.force_dp2_lt_fallback_method) return decide_fallback_link_setting_max_bw_policy(&initial_link_settings, current_link_setting); -#endif switch (training_result) { case LINK_TRAINING_CR_FAIL_LANE0: @@ -4205,15 +4034,9 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link) union phy_test_pattern dpcd_test_pattern; union lane_adjust dpcd_lane_adjustment[2]; unsigned char dpcd_post_cursor_2_adjustment = 0; -#if defined(CONFIG_DRM_AMD_DC_DCN) unsigned char test_pattern_buffer[ (DP_TEST_264BIT_CUSTOM_PATTERN_263_256 - DP_TEST_264BIT_CUSTOM_PATTERN_7_0)+1] = {0}; -#else - unsigned char test_pattern_buffer[ - (DP_TEST_80BIT_CUSTOM_PATTERN_79_72 - - DP_TEST_80BIT_CUSTOM_PATTERN_7_0)+1] = {0}; -#endif unsigned int test_pattern_size = 0; enum dp_test_pattern test_pattern; union lane_adjust dpcd_lane_adjust; @@ -4284,7 +4107,6 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link) case PHY_TEST_PATTERN_CP2520_3: test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4; break; -#if defined(CONFIG_DRM_AMD_DC_DCN) case PHY_TEST_PATTERN_128b_132b_TPS1: test_pattern = DP_TEST_PATTERN_128b_132b_TPS1; break; @@ -4312,7 +4134,6 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link) case PHY_TEST_PATTERN_SQUARE_PULSE: test_pattern = DP_TEST_PATTERN_SQUARE_PULSE; break; -#endif default: test_pattern = DP_TEST_PATTERN_VIDEO_MODE; break; @@ -4328,7 +4149,6 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link) test_pattern_size); } -#if defined(CONFIG_DRM_AMD_DC_DCN) if (test_pattern == DP_TEST_PATTERN_SQUARE_PULSE) { test_pattern_size = 1; // Square pattern data is 1 byte (DP spec) core_link_read_dpcd( @@ -4347,7 +4167,6 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link) test_pattern_buffer, test_pattern_size); } -#endif /* prepare link training settings */ link_training_settings.link_settings = link->cur_link_settings; @@ -4368,14 +4187,11 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link) link_training_settings.hw_lane_settings[lane].POST_CURSOR2 = (enum dc_post_cursor2) ((dpcd_post_cursor_2_adjustment >> (lane * 2)) & 0x03); - } -#if defined(CONFIG_DRM_AMD_DC_DCN) - else if (dp_get_link_encoding_format(&link->cur_link_settings) == + } else if (dp_get_link_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING) { link_training_settings.hw_lane_settings[lane].FFE_PRESET.raw = dpcd_lane_adjust.tx_ffe.PRESET_VALUE; } -#endif } dp_hw_to_dpcd_lane_settings(&link_training_settings, @@ -5072,7 +4888,6 @@ static void get_active_converter_info( dp_hw_fw_revision.ieee_fw_rev, sizeof(dp_hw_fw_revision.ieee_fw_rev)); } -#if defined(CONFIG_DRM_AMD_DC_DCN) if (link->dpcd_caps.dpcd_rev.raw >= DPCD_REV_14 && link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) { union dp_dfp_cap_ext dfp_cap_ext; @@ -5108,7 +4923,6 @@ static void get_active_converter_info( DC_LOG_DP2("\tdfp_cap_ext.max_video_h_active_width = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_h_active_width); DC_LOG_DP2("\tdfp_cap_ext.max_video_v_active_height = %d", link->dpcd_caps.dongle_caps.dfp_cap_ext.max_video_v_active_height); } -#endif } static void dp_wa_power_up_0010FA(struct dc_link *link, uint8_t *dpcd_data, @@ -5168,12 +4982,8 @@ static bool dpcd_read_sink_ext_caps(struct dc_link *link) bool dp_retrieve_lttpr_cap(struct dc_link *link) { -#if defined(CONFIG_DRM_AMD_DC_DCN) uint8_t lttpr_dpcd_data[8]; bool allow_lttpr_non_transparent_mode = 0; -#else - uint8_t lttpr_dpcd_data[6]; -#endif bool vbios_lttpr_enable = link->dc->caps.vbios_lttpr_enable; bool vbios_lttpr_interop = link->dc->caps.vbios_lttpr_aware; enum dc_status status = DC_ERROR_UNEXPECTED; @@ -5181,7 +4991,6 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link) memset(lttpr_dpcd_data, '\0', sizeof(lttpr_dpcd_data)); -#if defined(CONFIG_DRM_AMD_DC_DCN) if ((link->dc->config.allow_lttpr_non_transparent_mode.bits.DP2_0 && link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED)) { allow_lttpr_non_transparent_mode = 1; @@ -5189,7 +4998,6 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link) !link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) { allow_lttpr_non_transparent_mode = 1; } -#endif /* * Logic to determine LTTPR mode @@ -5198,21 +5006,12 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link) if (vbios_lttpr_enable && vbios_lttpr_interop) link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT; else if (!vbios_lttpr_enable && vbios_lttpr_interop) { -#if defined(CONFIG_DRM_AMD_DC_DCN) if (allow_lttpr_non_transparent_mode) -#else - if (link->dc->config.allow_lttpr_non_transparent_mode) -#endif link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT; else link->lttpr_mode = LTTPR_MODE_TRANSPARENT; } else if (!vbios_lttpr_enable && !vbios_lttpr_interop) { -#if defined(CONFIG_DRM_AMD_DC_DCN) if (!allow_lttpr_non_transparent_mode || !link->dc->caps.extended_aux_timeout_support) -#else - if (!link->dc->config.allow_lttpr_non_transparent_mode - || !link->dc->caps.extended_aux_timeout_support) -#endif link->lttpr_mode = LTTPR_MODE_NON_LTTPR; else link->lttpr_mode = LTTPR_MODE_NON_TRANSPARENT; @@ -5262,7 +5061,6 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link) lttpr_dpcd_data[DP_PHY_REPEATER_EXTENDED_WAIT_TIMEOUT - DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; -#if defined(CONFIG_DRM_AMD_DC_DCN) link->dpcd_caps.lttpr_caps.main_link_channel_coding.raw = lttpr_dpcd_data[DP_MAIN_LINK_CHANNEL_CODING_PHY_REPEATER - DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; @@ -5270,7 +5068,6 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link) link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.raw = lttpr_dpcd_data[DP_PHY_REPEATER_128b_132b_RATES - DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; -#endif /* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */ is_lttpr_present = (dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) != 0 && @@ -5528,7 +5325,6 @@ static bool retrieve_link_cap(struct dc_link *link) DP_DSC_SUPPORT, link->dpcd_caps.dsc_caps.dsc_basic_caps.raw, sizeof(link->dpcd_caps.dsc_caps.dsc_basic_caps.raw)); -#if defined(CONFIG_DRM_AMD_DC_DCN) if (link->dpcd_caps.dongle_type != DISPLAY_DONGLE_NONE) { status = core_link_read_dpcd( link, @@ -5543,19 +5339,12 @@ static bool retrieve_link_cap(struct dc_link *link) DC_LOG_DSC("\tBRANCH_MAX_LINE_WIDTH 0x%02x", link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.fields.BRANCH_MAX_LINE_WIDTH); } -#else - status = core_link_read_dpcd( - link, - DP_DSC_BRANCH_OVERALL_THROUGHPUT_0, - link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw, - sizeof(link->dpcd_caps.dsc_caps.dsc_branch_decoder_caps.raw)); -#endif /* Apply work around to disable FEC and DSC for USB4 tunneling in TBT3 compatibility mode * only if required. */ if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && -#if defined(CONFIG_DRM_AMD_DC_DCN3_1) +#if defined(CONFIG_DRM_AMD_DC_DCN) !link->dc->debug.dpia_debug.bits.disable_force_tbt3_work_around && #endif link->dpcd_caps.is_branch_dev && @@ -5577,7 +5366,6 @@ static bool retrieve_link_cap(struct dc_link *link) if (!dpcd_read_sink_ext_caps(link)) link->dpcd_sink_ext_caps.raw = 0; -#if defined(CONFIG_DRM_AMD_DC_DCN) link->dpcd_caps.channel_coding_cap.raw = dpcd_data[DP_MAIN_LINK_CHANNEL_CODING_CAP - DP_DPCD_REV]; if (link->dpcd_caps.channel_coding_cap.bits.DP_128b_132b_SUPPORTED) { @@ -5624,7 +5412,6 @@ static bool retrieve_link_cap(struct dc_link *link) if (link->dpcd_caps.fec_cap1.bits.AGGREGATED_ERROR_COUNTERS_CAPABLE) DC_LOG_DP2("\tFEC aggregated error counters are supported"); } -#endif /* Connectivity log: detection */ CONN_DATA_DETECT(link, dpcd_data, sizeof(dpcd_data), "Rx Caps: "); @@ -6096,7 +5883,6 @@ bool dc_link_dp_set_test_pattern( case DP_TEST_PATTERN_CP2520_3: pattern = PHY_TEST_PATTERN_CP2520_3; break; -#if defined(CONFIG_DRM_AMD_DC_DCN) case DP_TEST_PATTERN_128b_132b_TPS1: pattern = PHY_TEST_PATTERN_128b_132b_TPS1; break; @@ -6124,7 +5910,6 @@ bool dc_link_dp_set_test_pattern( case DP_TEST_PATTERN_SQUARE_PULSE: pattern = PHY_TEST_PATTERN_SQUARE_PULSE; break; -#endif default: return false; } @@ -6727,15 +6512,12 @@ enum dp_link_encoding dp_get_link_encoding_format(const struct dc_link_settings if ((link_settings->link_rate >= LINK_RATE_LOW) && (link_settings->link_rate <= LINK_RATE_HIGH3)) return DP_8b_10b_ENCODING; -#if defined(CONFIG_DRM_AMD_DC_DCN) else if ((link_settings->link_rate >= LINK_RATE_UHBR10) && (link_settings->link_rate <= LINK_RATE_UHBR20)) return DP_128b_132b_ENCODING; -#endif return DP_UNKNOWN_ENCODING; } -#if defined(CONFIG_DRM_AMD_DC_DCN) enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(const struct dc_link *link) { struct dc_link_settings link_settings = {0}; @@ -6952,7 +6734,6 @@ bool is_dp_128b_132b_signal(struct pipe_ctx *pipe_ctx) pipe_ctx->link_res.hpo_dp_link_enc && dc_is_dp_signal(pipe_ctx->stream->signal)); } -#endif void edp_panel_backlight_power_on(struct dc_link *link) { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c index e2409c14df34..c84822cd7e53 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c @@ -120,49 +120,32 @@ void dp_enable_link_phy( link->cur_link_settings = *link_settings; -#if defined(CONFIG_DRM_AMD_DC_DCN) if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) { /* TODO - DP2.0 HW: notify link rate change here */ } else if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) { if (dc->clk_mgr->funcs->notify_link_rate_change) dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link); } -#else - if (dc->clk_mgr->funcs->notify_link_rate_change) - dc->clk_mgr->funcs->notify_link_rate_change(dc->clk_mgr, link); -#endif + if (dmcu != NULL && dmcu->funcs->lock_phy) dmcu->funcs->lock_phy(dmcu); -#if defined(CONFIG_DRM_AMD_DC_DCN) if (dp_get_link_encoding_format(link_settings) == DP_128b_132b_ENCODING) { enable_dp_hpo_output(link, link_res, link_settings); } else if (dp_get_link_encoding_format(link_settings) == DP_8b_10b_ENCODING) { if (dc_is_dp_sst_signal(signal)) { link_enc->funcs->enable_dp_output( - link_enc, - link_settings, - clock_source); + link_enc, + link_settings, + clock_source); } else { link_enc->funcs->enable_dp_mst_output( - link_enc, - link_settings, - clock_source); + link_enc, + link_settings, + clock_source); } } -#else - if (dc_is_dp_sst_signal(signal)) { - link_enc->funcs->enable_dp_output( - link_enc, - link_settings, - clock_source); - } else { - link_enc->funcs->enable_dp_mst_output( - link_enc, - link_settings, - clock_source); - } -#endif + if (dmcu != NULL && dmcu->funcs->unlock_phy) dmcu->funcs->unlock_phy(dmcu); @@ -242,9 +225,7 @@ void dp_disable_link_phy(struct dc_link *link, const struct link_resource *link_ { struct dc *dc = link->ctx->dc; struct dmcu *dmcu = dc->res_pool->dmcu; -#if defined(CONFIG_DRM_AMD_DC_DCN) struct hpo_dp_link_encoder *hpo_link_enc = link_res->hpo_dp_link_enc; -#endif struct link_encoder *link_enc; /* Link should always be assigned encoder when en-/disabling. */ @@ -260,28 +241,22 @@ void dp_disable_link_phy(struct dc_link *link, const struct link_resource *link_ if (signal == SIGNAL_TYPE_EDP) { if (link->dc->hwss.edp_backlight_control) link->dc->hwss.edp_backlight_control(link, false); -#if defined(CONFIG_DRM_AMD_DC_DCN) + if (dp_get_link_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING) disable_dp_hpo_output(link, link_res, signal); else link_enc->funcs->disable_output(link_enc, signal); -#else - link_enc->funcs->disable_output(link_enc, signal); -#endif link->dc->hwss.edp_power_control(link, false); } else { if (dmcu != NULL && dmcu->funcs->lock_phy) dmcu->funcs->lock_phy(dmcu); -#if defined(CONFIG_DRM_AMD_DC_DCN) if (dp_get_link_encoding_format(&link->cur_link_settings) == DP_128b_132b_ENCODING && hpo_link_enc) disable_dp_hpo_output(link, link_res, signal); else link_enc->funcs->disable_output(link_enc, signal); -#else - link_enc->funcs->disable_output(link_enc, signal); -#endif + if (dmcu != NULL && dmcu->funcs->unlock_phy) dmcu->funcs->unlock_phy(dmcu); } @@ -330,14 +305,12 @@ bool dp_set_hw_training_pattern( case DP_TRAINING_PATTERN_SEQUENCE_4: test_pattern = DP_TEST_PATTERN_TRAINING_PATTERN4; break; -#if defined(CONFIG_DRM_AMD_DC_DCN) case DP_128b_132b_TPS1: test_pattern = DP_TEST_PATTERN_128b_132b_TPS1_TRAINING_MODE; break; case DP_128b_132b_TPS2: test_pattern = DP_TEST_PATTERN_128b_132b_TPS2_TRAINING_MODE; break; -#endif default: break; } @@ -347,10 +320,8 @@ bool dp_set_hw_training_pattern( return true; } -#if defined(CONFIG_DRM_AMD_DC_DCN) #define DC_LOGGER \ link->ctx->logger -#endif void dp_set_hw_lane_settings( struct dc_link *link, const struct link_resource *link_res, @@ -363,7 +334,6 @@ void dp_set_hw_lane_settings( return; /* call Encoder to set lane settings */ -#if defined(CONFIG_DRM_AMD_DC_DCN) if (dp_get_link_encoding_format(&link_settings->link_settings) == DP_128b_132b_ENCODING) { link_res->hpo_dp_link_enc->funcs->set_ffe( @@ -374,9 +344,6 @@ void dp_set_hw_lane_settings( == DP_8b_10b_ENCODING) { encoder->funcs->dp_set_lane_settings(encoder, link_settings); } -#else - encoder->funcs->dp_set_lane_settings(encoder, link_settings); -#endif memmove(link->cur_lane_setting, link_settings->lane_settings, sizeof(link->cur_lane_setting)); @@ -391,9 +358,7 @@ void dp_set_hw_test_pattern( { struct encoder_set_dp_phy_pattern_param pattern_param = {0}; struct link_encoder *encoder; -#if defined(CONFIG_DRM_AMD_DC_DCN) enum dp_link_encoding link_encoding_format = dp_get_link_encoding_format(&link->cur_link_settings); -#endif /* Access link encoder based on whether it is statically * or dynamically assigned to a link. @@ -409,7 +374,6 @@ void dp_set_hw_test_pattern( pattern_param.custom_pattern_size = custom_pattern_size; pattern_param.dp_panel_mode = dp_get_panel_mode(link); -#if defined(CONFIG_DRM_AMD_DC_DCN) switch (link_encoding_format) { case DP_128b_132b_ENCODING: link_res->hpo_dp_link_enc->funcs->set_link_test_pattern( @@ -423,14 +387,10 @@ void dp_set_hw_test_pattern( DC_LOG_ERROR("%s: Unknown link encoding format.", __func__); break; } -#else - encoder->funcs->dp_set_phy_pattern(encoder, &pattern_param); -#endif + dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_SET_SOURCE_PATTERN); } -#if defined(CONFIG_DRM_AMD_DC_DCN) #undef DC_LOGGER -#endif void dp_retrain_link_dp_test(struct dc_link *link, struct dc_link_settings *link_setting, @@ -579,12 +539,8 @@ void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) optc_dsc_mode = dsc_optc_cfg.is_pixel_format_444 ? OPTC_DSC_ENABLED_444 : OPTC_DSC_ENABLED_NATIVE_SUBSAMPLED; /* Enable DSC in encoder */ -#if defined(CONFIG_DRM_AMD_DC_DCN) if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment) && !is_dp_128b_132b_signal(pipe_ctx)) { -#else - if (dc_is_dp_signal(stream->signal) && !IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { -#endif DC_LOG_DSC("Setting stream encoder DSC config for engine %d:", (int)pipe_ctx->stream_res.stream_enc->id); dsc_optc_config_log(dsc, &dsc_optc_cfg); pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config(pipe_ctx->stream_res.stream_enc, @@ -610,17 +566,13 @@ void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) /* disable DSC in stream encoder */ if (dc_is_dp_signal(stream->signal)) { - -#if defined(CONFIG_DRM_AMD_DC_DCN) if (is_dp_128b_132b_signal(pipe_ctx)) pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet( pipe_ctx->stream_res.hpo_dp_stream_enc, false, NULL, true); - else -#endif - if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { + else if (!IS_FPGA_MAXIMUS_DC(dc->ctx->dce_environment)) { pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_config( pipe_ctx->stream_res.stream_enc, OPTC_DSC_DISABLED, 0, 0); @@ -696,7 +648,6 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable, bool immediate_u dsc->funcs->dsc_get_packed_pps(dsc, &dsc_cfg, &dsc_packed_pps[0]); if (dc_is_dp_signal(stream->signal)) { DC_LOG_DSC("Setting stream encoder DSC PPS SDP for engine %d\n", (int)pipe_ctx->stream_res.stream_enc->id); -#if defined(CONFIG_DRM_AMD_DC_DCN) if (is_dp_128b_132b_signal(pipe_ctx)) pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet( pipe_ctx->stream_res.hpo_dp_stream_enc, @@ -704,7 +655,6 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable, bool immediate_u &dsc_packed_pps[0], immediate_update); else -#endif pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet( pipe_ctx->stream_res.stream_enc, true, @@ -714,7 +664,6 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable, bool immediate_u } else { /* disable DSC PPS in stream encoder */ if (dc_is_dp_signal(stream->signal)) { -#if defined(CONFIG_DRM_AMD_DC_DCN) if (is_dp_128b_132b_signal(pipe_ctx)) pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_set_dsc_pps_info_packet( pipe_ctx->stream_res.hpo_dp_stream_enc, @@ -722,7 +671,6 @@ bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable, bool immediate_u NULL, true); else -#endif pipe_ctx->stream_res.stream_enc->funcs->dp_set_dsc_pps_info_packet( pipe_ctx->stream_res.stream_enc, false, NULL, true); } @@ -746,7 +694,6 @@ bool dp_update_dsc_config(struct pipe_ctx *pipe_ctx) return true; } -#if defined(CONFIG_DRM_AMD_DC_DCN) #undef DC_LOGGER #define DC_LOGGER \ link->ctx->logger @@ -898,4 +845,3 @@ void setup_dp_hpo_stream(struct pipe_ctx *pipe_ctx, bool enable) } #undef DC_LOGGER -#endif diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c index b34bf59cf54b..3aa6d347d73c 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c @@ -356,7 +356,6 @@ bool resource_construct( } } -#if defined(CONFIG_DRM_AMD_DC_DCN) pool->hpo_dp_stream_enc_count = 0; if (create_funcs->create_hpo_dp_stream_encoder) { for (i = 0; i < caps->num_hpo_dp_stream_encoder; i++) { @@ -377,7 +376,6 @@ bool resource_construct( pool->hpo_dp_link_enc_count++; } } -#endif #if defined(CONFIG_DRM_AMD_DC_DCN) for (i = 0; i < caps->num_mpc_3dlut; i++) { @@ -1713,7 +1711,6 @@ static void update_stream_engine_usage( } } -#if defined(CONFIG_DRM_AMD_DC_DCN) static void update_hpo_dp_stream_engine_usage( struct resource_context *res_ctx, const struct resource_pool *pool, @@ -1815,7 +1812,6 @@ static void remove_hpo_dp_link_enc_from_ctx(struct resource_context *res_ctx, pipe_ctx->link_res.hpo_dp_link_enc = NULL; } } -#endif /* TODO: release audio object */ void update_audio_usage( @@ -1861,7 +1857,6 @@ static int acquire_first_free_pipe( return -1; } -#if defined(CONFIG_DRM_AMD_DC_DCN) static struct hpo_dp_stream_encoder *find_first_free_match_hpo_dp_stream_enc_for_link( struct resource_context *res_ctx, const struct resource_pool *pool, @@ -1879,7 +1874,6 @@ static struct hpo_dp_stream_encoder *find_first_free_match_hpo_dp_stream_enc_for return NULL; } -#endif static struct audio *find_first_free_audio( struct resource_context *res_ctx, @@ -1971,7 +1965,6 @@ enum dc_status dc_remove_stream_from_ctx( if (dc->res_pool->funcs->link_enc_unassign) dc->res_pool->funcs->link_enc_unassign(new_ctx, del_pipe->stream); -#if defined(CONFIG_DRM_AMD_DC_DCN) if (is_dp_128b_132b_signal(del_pipe)) { update_hpo_dp_stream_engine_usage( &new_ctx->res_ctx, dc->res_pool, @@ -1979,7 +1972,6 @@ enum dc_status dc_remove_stream_from_ctx( false); remove_hpo_dp_link_enc_from_ctx(&new_ctx->res_ctx, del_pipe, del_pipe->stream); } -#endif if (del_pipe->stream_res.audio) update_audio_usage( @@ -2232,7 +2224,6 @@ enum dc_status resource_map_pool_resources( pipe_ctx->stream_res.stream_enc, true); -#if defined(CONFIG_DRM_AMD_DC_DCN) /* Allocate DP HPO Stream Encoder based on signal, hw capabilities * and link settings */ @@ -2257,7 +2248,6 @@ enum dc_status resource_map_pool_resources( return DC_NO_LINK_ENC_RESOURCE; } } -#endif /* TODO: Add check if ASIC support and EDID audio */ if (!stream->converter_disable_audio && @@ -2927,12 +2917,10 @@ bool pipe_need_reprogram( if (pipe_ctx_old->stream_res.dsc != pipe_ctx->stream_res.dsc) return true; -#if defined(CONFIG_DRM_AMD_DC_DCN) if (pipe_ctx_old->stream_res.hpo_dp_stream_enc != pipe_ctx->stream_res.hpo_dp_stream_enc) return true; if (pipe_ctx_old->link_res.hpo_dp_link_enc != pipe_ctx->link_res.hpo_dp_link_enc) return true; -#endif /* DIG link encoder resource assignment for stream changed. */ if (pipe_ctx_old->stream->ctx->dc->res_pool->funcs->link_encs_assign) { @@ -3199,7 +3187,6 @@ void get_audio_check(struct audio_info *aud_modes, } } -#if defined(CONFIG_DRM_AMD_DC_DCN) struct hpo_dp_link_encoder *resource_get_hpo_dp_link_enc_for_det_lt( const struct resource_context *res_ctx, const struct resource_pool *pool, @@ -3218,7 +3205,6 @@ struct hpo_dp_link_encoder *resource_get_hpo_dp_link_enc_for_det_lt( return hpo_dp_link_enc; } -#endif void reset_syncd_pipes_from_disabled_pipes(struct dc *dc, struct dc_state *context) diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 14abba495f4a..ff5093e52f2d 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -195,10 +195,8 @@ struct dc_caps { unsigned int cursor_cache_size; struct dc_plane_cap planes[MAX_PLANES]; struct dc_color_caps color; -#if defined(CONFIG_DRM_AMD_DC_DCN) bool dp_hpo; bool hdmi_frl_pcon_support; -#endif bool edp_dsc_support; bool vbios_lttpr_aware; bool vbios_lttpr_enable; @@ -306,7 +304,6 @@ struct dc_cap_funcs { struct link_training_settings; -#if defined(CONFIG_DRM_AMD_DC_DCN) union allow_lttpr_non_transparent_mode { struct { bool DP1_4A : 1; @@ -314,7 +311,7 @@ union allow_lttpr_non_transparent_mode { } bits; unsigned char raw; }; -#endif + /* Structure to hold configuration flags set by dm at dc creation. */ struct dc_config { bool gpu_vm_support; @@ -327,11 +324,7 @@ struct dc_config { bool edp_no_power_sequencing; bool force_enum_edp; bool forced_clocks; -#if defined(CONFIG_DRM_AMD_DC_DCN) union allow_lttpr_non_transparent_mode allow_lttpr_non_transparent_mode; -#else - bool allow_lttpr_non_transparent_mode; -#endif bool multi_mon_pp_mclk_switch; bool disable_dmcu; bool enable_4to1MPC; @@ -689,13 +682,11 @@ struct dc_debug_options { bool disable_dsc_edp; unsigned int force_dsc_edp_policy; bool enable_dram_clock_change_one_display_vactive; -#if defined(CONFIG_DRM_AMD_DC_DCN) /* TODO - remove once tested */ bool legacy_dp2_lt; bool set_mst_en_for_sst; bool disable_uhbr; bool force_dp2_lt_fallback_method; -#endif union mem_low_power_enable_options enable_mem_low_power; union root_clock_optimization_options root_clock_optimization; bool hpo_optimization; @@ -1239,12 +1230,10 @@ struct dpcd_caps { struct psr_caps psr_caps; struct dpcd_usb4_dp_tunneling_info usb4_dp_tun_info; -#if defined(CONFIG_DRM_AMD_DC_DCN) union dp_128b_132b_supported_link_rates dp_128b_132b_supported_link_rates; union dp_main_line_channel_coding_cap channel_coding_cap; union dp_sink_video_fallback_formats fallback_formats; union dp_fec_capability1 fec_cap1; -#endif }; union dpcd_sink_ext_caps { diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index 353dac420f34..7b1103e9f7a2 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -53,7 +53,6 @@ enum dc_link_rate { LINK_RATE_RBR2 = 0x0C, // Rate_5 (RBR2)- 3.24 Gbps/Lane LINK_RATE_RATE_6 = 0x10, // Rate_6 - 4.32 Gbps/Lane LINK_RATE_HIGH2 = 0x14, // Rate_7 (HBR2)- 5.40 Gbps/Lane -#if defined(CONFIG_DRM_AMD_DC_DCN) LINK_RATE_HIGH3 = 0x1E, // Rate_8 (HBR3)- 8.10 Gbps/Lane /* Starting from DP2.0 link rate enum directly represents actual * link rate value in unit of 10 mbps @@ -61,9 +60,6 @@ enum dc_link_rate { LINK_RATE_UHBR10 = 1000, // UHBR10 - 10.0 Gbps/Lane LINK_RATE_UHBR13_5 = 1350, // UHBR13.5 - 13.5 Gbps/Lane LINK_RATE_UHBR20 = 2000, // UHBR10 - 20.0 Gbps/Lane -#else - LINK_RATE_HIGH3 = 0x1E // Rate_8 (HBR3)- 8.10 Gbps/Lane -#endif }; enum dc_link_spread { @@ -100,7 +96,6 @@ enum dc_post_cursor2 { POST_CURSOR2_MAX_LEVEL = POST_CURSOR2_LEVEL3, }; -#if defined(CONFIG_DRM_AMD_DC_DCN) enum dc_dp_ffe_preset_level { DP_FFE_PRESET_LEVEL0 = 0, DP_FFE_PRESET_LEVEL1, @@ -120,7 +115,6 @@ enum dc_dp_ffe_preset_level { DP_FFE_PRESET_LEVEL15, DP_FFE_PRESET_MAX_LEVEL = DP_FFE_PRESET_LEVEL15, }; -#endif enum dc_dp_training_pattern { DP_TRAINING_PATTERN_SEQUENCE_1 = 0, @@ -128,19 +122,15 @@ enum dc_dp_training_pattern { DP_TRAINING_PATTERN_SEQUENCE_3, DP_TRAINING_PATTERN_SEQUENCE_4, DP_TRAINING_PATTERN_VIDEOIDLE, -#if defined(CONFIG_DRM_AMD_DC_DCN) DP_128b_132b_TPS1, DP_128b_132b_TPS2, DP_128b_132b_TPS2_CDS, -#endif }; enum dp_link_encoding { DP_UNKNOWN_ENCODING = 0, DP_8b_10b_ENCODING = 1, -#if defined(CONFIG_DRM_AMD_DC_DCN) DP_128b_132b_ENCODING = 2, -#endif }; struct dc_link_settings { @@ -152,7 +142,6 @@ struct dc_link_settings { bool dpcd_source_device_specific_field_support; }; -#if defined(CONFIG_DRM_AMD_DC_DCN) union dc_dp_ffe_preset { struct { uint8_t level : 4; @@ -163,24 +152,19 @@ union dc_dp_ffe_preset { } settings; uint8_t raw; }; -#endif struct dc_lane_settings { enum dc_voltage_swing VOLTAGE_SWING; enum dc_pre_emphasis PRE_EMPHASIS; enum dc_post_cursor2 POST_CURSOR2; -#if defined(CONFIG_DRM_AMD_DC_DCN) union dc_dp_ffe_preset FFE_PRESET; -#endif }; struct dc_link_training_overrides { enum dc_voltage_swing *voltage_swing; enum dc_pre_emphasis *pre_emphasis; enum dc_post_cursor2 *post_cursor2; -#if defined(CONFIG_DRM_AMD_DC_DCN) union dc_dp_ffe_preset *ffe_preset; -#endif uint16_t *cr_pattern_time; uint16_t *eq_pattern_time; @@ -194,7 +178,6 @@ struct dc_link_training_overrides { bool *fec_enable; }; -#if defined(CONFIG_DRM_AMD_DC_DCN) union payload_table_update_status { struct { uint8_t VC_PAYLOAD_TABLE_UPDATED:1; @@ -202,7 +185,6 @@ union payload_table_update_status { } bits; uint8_t raw; }; -#endif union dpcd_rev { struct { @@ -291,14 +273,10 @@ union lane_align_status_updated { struct { uint8_t INTERLANE_ALIGN_DONE:1; uint8_t POST_LT_ADJ_REQ_IN_PROGRESS:1; -#if defined(CONFIG_DRM_AMD_DC_DCN) uint8_t EQ_INTERLANE_ALIGN_DONE_128b_132b:1; uint8_t CDS_INTERLANE_ALIGN_DONE_128b_132b:1; uint8_t LT_FAILED_128b_132b:1; uint8_t RESERVED:1; -#else - uint8_t RESERVED:4; -#endif uint8_t DOWNSTREAM_PORT_STATUS_CHANGED:1; uint8_t LINK_STATUS_UPDATED:1; } bits; @@ -311,12 +289,10 @@ union lane_adjust { uint8_t PRE_EMPHASIS_LANE:2; uint8_t RESERVED:4; } bits; -#if defined(CONFIG_DRM_AMD_DC_DCN) struct { uint8_t PRESET_VALUE :4; uint8_t RESERVED :4; } tx_ffe; -#endif uint8_t raw; }; @@ -346,12 +322,10 @@ union dpcd_training_lane { uint8_t MAX_PRE_EMPHASIS_REACHED:1; uint8_t RESERVED:2; } bits; -#if defined(CONFIG_DRM_AMD_DC_DCN) struct { uint8_t PRESET_VALUE :4; uint8_t RESERVED :4; } tx_ffe; -#endif uint8_t raw; }; @@ -665,18 +639,9 @@ union test_response { union phy_test_pattern { struct { -#if defined(CONFIG_DRM_AMD_DC_DCN) /* This field is 7 bits for DP2.0 */ uint8_t PATTERN :7; uint8_t RESERVED :1; -#else - /* DpcdPhyTestPatterns. This field is 2 bits for DP1.1 - * and 3 bits for DP1.2. - */ - uint8_t PATTERN :3; - /* BY speci, bit7:2 is 0 for DP1.1. */ - uint8_t RESERVED :5; -#endif } bits; uint8_t raw; }; @@ -754,14 +719,10 @@ union dpcd_fec_capability { uint8_t UNCORRECTED_BLOCK_ERROR_COUNT_CAPABLE:1; uint8_t CORRECTED_BLOCK_ERROR_COUNT_CAPABLE:1; uint8_t BIT_ERROR_COUNT_CAPABLE:1; -#if defined(CONFIG_DRM_AMD_DC_DCN) uint8_t PARITY_BLOCK_ERROR_COUNT_CAPABLE:1; uint8_t ARITY_BIT_ERROR_COUNT_CAPABLE:1; uint8_t FEC_RUNNING_INDICATOR_SUPPORTED:1; uint8_t FEC_ERROR_REPORTING_POLICY_SUPPORTED:1; -#else - uint8_t RESERVED:4; -#endif } bits; uint8_t raw; }; @@ -925,7 +886,6 @@ struct dpcd_usb4_dp_tunneling_info { uint8_t usb4_topology_id[DPCD_USB4_TOPOLOGY_ID_LEN]; }; -#if defined(CONFIG_DRM_AMD_DC_DCN) #ifndef DP_MAIN_LINK_CHANNEL_CODING_CAP #define DP_MAIN_LINK_CHANNEL_CODING_CAP 0x006 #endif @@ -994,8 +954,8 @@ struct dpcd_usb4_dp_tunneling_info { #endif #ifndef DP_INTRA_HOP_AUX_REPLY_INDICATION #define DP_INTRA_HOP_AUX_REPLY_INDICATION (1 << 3) -#endif /* TODO - Use DRM header to replace above once available */ +#endif // DP_INTRA_HOP_AUX_REPLY_INDICATION union dp_main_line_channel_coding_cap { struct { @@ -1091,6 +1051,5 @@ union dp_128b_132b_training_aux_rd_interval { } bits; uint8_t raw; }; -#endif #endif /* DC_DP_TYPES_H */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index 704323654468..9014c0a0a63b 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -47,10 +47,8 @@ struct dc_link_status { struct link_mst_stream_allocation { /* DIG front */ const struct stream_encoder *stream_enc; -#if defined(CONFIG_DRM_AMD_DC_DCN) /* HPO DP Stream Encoder */ const struct hpo_dp_stream_encoder *hpo_dp_stream_enc; -#endif /* associate DRM payload table with DC stream encoder */ uint8_t vcp_id; /* number of slots required for the DP stream in transport packet */ @@ -318,10 +316,8 @@ enum dc_detect_reason { bool dc_link_detect(struct dc_link *dc_link, enum dc_detect_reason reason); bool dc_link_get_hpd_state(struct dc_link *dc_link); enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx); -#if defined(CONFIG_DRM_AMD_DC_DCN) enum dc_status dc_link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t req_pbn); enum dc_status dc_link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t req_pbn); -#endif /* Notify DC about DP RX Interrupt (aka Short Pulse Interrupt). * Return: @@ -455,10 +451,8 @@ uint32_t dc_bandwidth_in_kbps_from_timing( bool dc_link_is_fec_supported(const struct dc_link *link); bool dc_link_should_enable_fec(const struct dc_link *link); -#if defined(CONFIG_DRM_AMD_DC_DCN) uint32_t dc_link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw); enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(const struct dc_link *link); -#endif const struct link_resource *dc_link_get_cur_link_res(const struct dc_link *link); /* take a snapshot of current link resource allocation state */ diff --git a/drivers/gpu/drm/amd/display/dc/dc_stream.h b/drivers/gpu/drm/amd/display/dc/dc_stream.h index e37c4a10bfd5..f631b61abedd 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_stream.h +++ b/drivers/gpu/drm/amd/display/dc/dc_stream.h @@ -115,12 +115,10 @@ struct periodic_interrupt_config { int lines_offset; }; -#if defined(CONFIG_DRM_AMD_DC_DCN) struct dc_mst_stream_bw_update { bool is_increase; // is bandwidth reduced or increased uint32_t mst_stream_bw; // new mst bandwidth in kbps }; -#endif union stream_update_flags { struct { @@ -132,9 +130,7 @@ union stream_update_flags { uint32_t gamut_remap:1; uint32_t wb_update:1; uint32_t dsc_changed : 1; -#if defined(CONFIG_DRM_AMD_DC_DCN) uint32_t mst_bw : 1; -#endif } bits; uint32_t raw; @@ -288,9 +284,7 @@ struct dc_stream_update { struct dc_writeback_update *wb_update; struct dc_dsc_config *dsc_config; -#if defined(CONFIG_DRM_AMD_DC_DCN) struct dc_mst_stream_bw_update *mst_bw_update; -#endif struct dc_transfer_func *func_shaper; struct dc_3dlut *lut3d_func; diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index 0285a4b38d05..48859d5fc172 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -395,14 +395,11 @@ struct dc_lttpr_caps { uint8_t max_link_rate; uint8_t phy_repeater_cnt; uint8_t max_ext_timeout; -#if defined(CONFIG_DRM_AMD_DC_DCN) union dp_main_link_channel_coding_lttpr_cap main_link_channel_coding; union dp_128b_132b_supported_lttpr_link_rates supported_128b_132b_rates; -#endif uint8_t aux_rd_interval[MAX_REPEATER_CNT - 1]; }; -#if defined(CONFIG_DRM_AMD_DC_DCN) struct dc_dongle_dfp_cap_ext { bool supported; uint16_t max_pixel_rate_in_mps; @@ -414,7 +411,6 @@ struct dc_dongle_dfp_cap_ext { struct dp_color_depth_caps ycbcr422_color_depth_caps; struct dp_color_depth_caps ycbcr420_color_depth_caps; }; -#endif struct dc_dongle_caps { /* dongle type (DP converter, CV smart dongle) */ @@ -429,10 +425,8 @@ struct dc_dongle_caps { bool is_dp_hdmi_ycbcr420_converter; uint32_t dp_hdmi_max_bpc; uint32_t dp_hdmi_max_pixel_clk_in_khz; -#if defined(CONFIG_DRM_AMD_DC_DCN) uint32_t dp_hdmi_frl_max_link_bw_in_kbps; struct dc_dongle_dfp_cap_ext dfp_cap_ext; -#endif }; /* Scaling format */ enum scaling_transformation { diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index f3ff141b706a..b83c5544247d 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -49,9 +49,7 @@ #include "link_enc_cfg.h" #include "link_hwss.h" #include "dc_link_dp.h" -#if defined(CONFIG_DRM_AMD_DC_DCN) #include "dccg.h" -#endif #include "clock_source.h" #include "clk_mgr.h" #include "abm.h" @@ -1112,17 +1110,12 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx) clk_mgr->funcs->enable_pme_wa(clk_mgr); /* un-mute audio */ /* TODO: audio should be per stream rather than per link */ -#if defined(CONFIG_DRM_AMD_DC_DCN) if (is_dp_128b_132b_signal(pipe_ctx)) pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->audio_mute_control( pipe_ctx->stream_res.hpo_dp_stream_enc, false); else pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( pipe_ctx->stream_res.stream_enc, false); -#else - pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( - pipe_ctx->stream_res.stream_enc, false); -#endif if (pipe_ctx->stream_res.audio) pipe_ctx->stream_res.audio->enabled = true; } @@ -1145,32 +1138,22 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx) if (pipe_ctx->stream_res.audio && pipe_ctx->stream_res.audio->enabled == false) return; -#if defined(CONFIG_DRM_AMD_DC_DCN) if (is_dp_128b_132b_signal(pipe_ctx)) pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->audio_mute_control( pipe_ctx->stream_res.hpo_dp_stream_enc, true); else pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( pipe_ctx->stream_res.stream_enc, true); -#else - pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( - pipe_ctx->stream_res.stream_enc, true); -#endif if (pipe_ctx->stream_res.audio) { pipe_ctx->stream_res.audio->enabled = false; if (dc_is_dp_signal(pipe_ctx->stream->signal)) -#if defined(CONFIG_DRM_AMD_DC_DCN) if (is_dp_128b_132b_signal(pipe_ctx)) pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_audio_disable( pipe_ctx->stream_res.hpo_dp_stream_enc); else pipe_ctx->stream_res.stream_enc->funcs->dp_audio_disable( pipe_ctx->stream_res.stream_enc); -#else - pipe_ctx->stream_res.stream_enc->funcs->dp_audio_disable( - pipe_ctx->stream_res.stream_enc); -#endif else pipe_ctx->stream_res.stream_enc->funcs->hdmi_audio_disable( pipe_ctx->stream_res.stream_enc); @@ -1204,14 +1187,10 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx) pipe_ctx->stream_res.stream_enc); } -#if defined(CONFIG_DRM_AMD_DC_DCN) if (is_dp_128b_132b_signal(pipe_ctx)) { pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->stop_dp_info_packets( pipe_ctx->stream_res.hpo_dp_stream_enc); } else if (dc_is_dp_signal(pipe_ctx->stream->signal)) -#else - if (dc_is_dp_signal(pipe_ctx->stream->signal)) -#endif pipe_ctx->stream_res.stream_enc->funcs->stop_dp_info_packets( pipe_ctx->stream_res.stream_enc); @@ -1224,7 +1203,6 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx) link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link); ASSERT(link_enc); -#if defined(CONFIG_DRM_AMD_DC_DCN) if (is_dp_128b_132b_signal(pipe_ctx)) { pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->disable( pipe_ctx->stream_res.hpo_dp_stream_enc); @@ -1237,13 +1215,7 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx) pipe_ctx->stream_res.stream_enc->id, false); } -#else - if (link_enc) - link_enc->funcs->connect_dig_be_to_fe( - link->link_enc, - pipe_ctx->stream_res.stream_enc->id, - false); -#endif + if (dc_is_dp_signal(pipe_ctx->stream->signal)) dp_source_sequence_trace(link, DPCD_SOURCE_SEQ_AFTER_DISCONNECT_DIG_FE_BE); @@ -1285,15 +1257,11 @@ void dce110_blank_stream(struct pipe_ctx *pipe_ctx) link->dc->hwss.set_abm_immediate_disable(pipe_ctx); } -#if defined(CONFIG_DRM_AMD_DC_DCN) if (is_dp_128b_132b_signal(pipe_ctx)) { /* TODO - DP2.0 HW: Set ODM mode in dp hpo encoder here */ pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_blank( pipe_ctx->stream_res.hpo_dp_stream_enc); } else if (dc_is_dp_signal(pipe_ctx->stream->signal)) { -#else - if (dc_is_dp_signal(pipe_ctx->stream->signal)) { -#endif pipe_ctx->stream_res.stream_enc->funcs->dp_blank(link, pipe_ctx->stream_res.stream_enc); if (!dc_is_embedded_signal(pipe_ctx->stream->signal)) { @@ -1535,7 +1503,6 @@ static enum dc_status apply_single_controller_ctx_to_hw( build_audio_output(context, pipe_ctx, &audio_output); if (dc_is_dp_signal(pipe_ctx->stream->signal)) -#if defined(CONFIG_DRM_AMD_DC_DCN) if (is_dp_128b_132b_signal(pipe_ctx)) pipe_ctx->stream_res.hpo_dp_stream_enc->funcs->dp_audio_setup( pipe_ctx->stream_res.hpo_dp_stream_enc, @@ -1546,12 +1513,6 @@ static enum dc_status apply_single_controller_ctx_to_hw( pipe_ctx->stream_res.stream_enc, pipe_ctx->stream_res.audio->inst, &pipe_ctx->stream->audio_info); -#else - pipe_ctx->stream_res.stream_enc->funcs->dp_audio_setup( - pipe_ctx->stream_res.stream_enc, - pipe_ctx->stream_res.audio->inst, - &pipe_ctx->stream->audio_info); -#endif else pipe_ctx->stream_res.stream_enc->funcs->hdmi_audio_setup( pipe_ctx->stream_res.stream_enc, @@ -1570,14 +1531,12 @@ static enum dc_status apply_single_controller_ctx_to_hw( if (!pipe_ctx->stream->apply_seamless_boot_optimization && dc->config.use_pipe_ctx_sync_logic) check_syncd_pipes_for_disabled_master_pipe(dc, context, pipe_ctx->pipe_idx); -#if defined(CONFIG_DRM_AMD_DC_DCN) /* DCN3.1 FPGA Workaround * Need to enable HPO DP Stream Encoder before setting OTG master enable. * To do so, move calling function enable_stream_timing to only be done AFTER calling * function core_link_enable_stream */ if (!(hws->wa.dp_hpo_and_otg_sequence && is_dp_128b_132b_signal(pipe_ctx))) -#endif /* */ /* Do not touch stream timing on seamless boot optimization. */ if (!pipe_ctx->stream->apply_seamless_boot_optimization) @@ -1643,7 +1602,6 @@ static enum dc_status apply_single_controller_ctx_to_hw( if (!stream->dpms_off) core_link_enable_stream(context, pipe_ctx); -#if defined(CONFIG_DRM_AMD_DC_DCN) /* DCN3.1 FPGA Workaround * Need to enable HPO DP Stream Encoder before setting OTG master enable. * To do so, move calling function enable_stream_timing to only be done AFTER calling @@ -1653,7 +1611,6 @@ static enum dc_status apply_single_controller_ctx_to_hw( if (!pipe_ctx->stream->apply_seamless_boot_optimization) hws->funcs.enable_stream_timing(pipe_ctx, context, dc); } -#endif pipe_ctx->plane_res.scl_data.lb_params.alpha_en = pipe_ctx->bottom_pipe != 0; @@ -2233,8 +2190,6 @@ static void dce110_setup_audio_dto( build_audio_output(context, pipe_ctx, &audio_output); -#if defined(CONFIG_DRM_AMD_DC_DCN) - /* For DCN3.1, audio to HPO FRL encoder is using audio DTBCLK DTO */ if (dc->res_pool->dccg && dc->res_pool->dccg->funcs->set_audio_dtbclk_dto) { /* disable audio DTBCLK DTO */ dc->res_pool->dccg->funcs->set_audio_dtbclk_dto( @@ -2251,13 +2206,6 @@ static void dce110_setup_audio_dto( pipe_ctx->stream->signal, &audio_output.crtc_info, &audio_output.pll_info); -#else - pipe_ctx->stream_res.audio->funcs->wall_dto_setup( - pipe_ctx->stream_res.audio, - pipe_ctx->stream->signal, - &audio_output.crtc_info, - &audio_output.pll_info); -#endif break; } } diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h index 943240e2809e..e90123b0ee0e 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h +++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h @@ -249,12 +249,10 @@ struct resource_pool { /* Number of USB4 DPIA (DisplayPort Input Adapter) link objects created.*/ unsigned int usb4_dpia_count; -#if defined(CONFIG_DRM_AMD_DC_DCN) unsigned int hpo_dp_stream_enc_count; struct hpo_dp_stream_encoder *hpo_dp_stream_enc[MAX_HPO_DP2_ENCODERS]; unsigned int hpo_dp_link_enc_count; struct hpo_dp_link_encoder *hpo_dp_link_enc[MAX_HPO_DP2_LINK_ENCODERS]; -#endif #if defined(CONFIG_DRM_AMD_DC_DCN) struct dc_3dlut *mpc_lut[MAX_PIPES]; struct dc_transfer_func *mpc_shaper[MAX_PIPES]; @@ -307,9 +305,7 @@ struct stream_resource { struct display_stream_compressor *dsc; struct timing_generator *tg; struct stream_encoder *stream_enc; -#if defined(CONFIG_DRM_AMD_DC_DCN) struct hpo_dp_stream_encoder *hpo_dp_stream_enc; -#endif struct audio *audio; struct pixel_clk_params pix_clk_params; @@ -334,18 +330,12 @@ struct plane_resource { struct dcn_fe_bandwidth bw; }; -#if defined(CONFIG_DRM_AMD_DC_DCN) #define LINK_RES_HPO_DP_REC_MAP__MASK 0xFFFF #define LINK_RES_HPO_DP_REC_MAP__SHIFT 0 -#endif /* all mappable hardware resources used to enable a link */ struct link_resource { -#if defined(CONFIG_DRM_AMD_DC_DCN) struct hpo_dp_link_encoder *hpo_dp_link_enc; -#else - void *dummy; -#endif }; union pipe_update_flags { @@ -425,11 +415,9 @@ struct resource_context { uint8_t dp_clock_source_ref_count; bool is_dsc_acquired[MAX_PIPES]; struct link_enc_cfg_context link_enc_cfg_ctx; -#if defined(CONFIG_DRM_AMD_DC_DCN) bool is_hpo_dp_stream_enc_acquired[MAX_HPO_DP2_ENCODERS]; unsigned int hpo_dp_link_enc_to_link_idx[MAX_HPO_DP2_LINK_ENCODERS]; int hpo_dp_link_enc_ref_cnts[MAX_HPO_DP2_LINK_ENCODERS]; -#endif #if defined(CONFIG_DRM_AMD_DC_DCN) bool is_mpc_3dlut_acquired[MAX_PIPES]; #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h index c940fdfda144..b2fa4de47734 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dccg.h @@ -79,7 +79,7 @@ struct dccg_funcs { void (*otg_drop_pixel)(struct dccg *dccg, uint32_t otg_inst); void (*dccg_init)(struct dccg *dccg); -#if defined(CONFIG_DRM_AMD_DC_DCN) + void (*set_dpstreamclk)( struct dccg *dccg, enum hdmistreamclk_source src, @@ -102,7 +102,7 @@ struct dccg_funcs { void (*disable_symclk32_le)( struct dccg *dccg, int hpo_le_inst); -#endif + void (*set_physymclk)( struct dccg *dccg, int phy_inst, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h index 10ecbc667ffa..d89bd55f110f 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/hw_shared.h @@ -38,10 +38,8 @@ #define MAX_PIPES 6 #define MAX_DIG_LINK_ENCODERS 7 #define MAX_DWB_PIPES 1 -#if defined(CONFIG_DRM_AMD_DC_DCN) #define MAX_HPO_DP2_ENCODERS 4 #define MAX_HPO_DP2_LINK_ENCODERS 2 -#endif struct gamma_curve { uint32_t offset; diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h index 2ce15cd10d80..101444c6f145 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/link_encoder.h @@ -220,7 +220,6 @@ enum link_enc_cfg_mode { LINK_ENC_CFG_TRANSIENT /* During commit state - use state to be committed. */ }; -#if defined(CONFIG_DRM_AMD_DC_DCN) enum dp2_link_mode { DP2_LINK_TRAINING_TPS1, DP2_LINK_TRAINING_TPS2, @@ -306,6 +305,5 @@ struct hpo_dp_link_encoder_funcs { const struct dc_link_settings *link_settings, uint8_t ffe_preset); }; -#endif #endif /* LINK_ENCODER_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h index 073f8b667eff..d9a3a204cc23 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h @@ -249,7 +249,6 @@ struct stream_encoder_funcs { struct stream_encoder *enc); }; -#if defined(CONFIG_DRM_AMD_DC_DCN) struct hpo_dp_stream_encoder_state { uint32_t stream_enc_enabled; uint32_t vid_stream_enabled; @@ -329,6 +328,5 @@ struct hpo_dp_stream_encoder_funcs { struct hpo_dp_stream_encoder *enc, struct hpo_dp_stream_encoder_state *state); }; -#endif #endif /* STREAM_ENCODER_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h index c29320b3855d..59a704781e34 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h @@ -100,9 +100,7 @@ enum crc_selection { enum otg_out_mux_dest { OUT_MUX_DIO = 0, -#if defined(CONFIG_DRM_AMD_DC_DCN) OUT_MUX_HPO_DP = 2, -#endif }; enum h_timing_div_mode { diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h index c2008258c50a..280c8764b636 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw_sequencer_private.h @@ -41,9 +41,7 @@ struct dce_hwseq_wa { bool DEGVIDCN10_254; bool DEGVIDCN21; bool disallow_self_refresh_during_multi_plane_transition; -#if defined(CONFIG_DRM_AMD_DC_DCN) bool dp_hpo_and_otg_sequence; -#endif }; struct hwseq_wa_state { diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h index 028180f58f71..ccb13dcb867b 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/resource.h +++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h @@ -54,10 +54,8 @@ struct resource_caps { int num_dsc; unsigned int num_dig_link_enc; // Total number of DIGs (digital encoders) in DIO (Display Input/Output). unsigned int num_usb4_dpia; // Total number of USB4 DPIA (DisplayPort Input Adapters). -#if defined(CONFIG_DRM_AMD_DC_DCN) int num_hpo_dp_stream_encoder; int num_hpo_dp_link_encoder; -#endif int num_mpc_3dlut; }; @@ -77,14 +75,12 @@ struct resource_create_funcs { struct stream_encoder *(*create_stream_encoder)( enum engine_id eng_id, struct dc_context *ctx); -#if defined(CONFIG_DRM_AMD_DC_DCN) struct hpo_dp_stream_encoder *(*create_hpo_dp_stream_encoder)( enum engine_id eng_id, struct dc_context *ctx); struct hpo_dp_link_encoder *(*create_hpo_dp_link_encoder)( uint8_t inst, struct dc_context *ctx); -#endif struct dce_hwseq *(*create_hwseq)( struct dc_context *ctx); @@ -205,12 +201,10 @@ int get_num_mpc_splits(struct pipe_ctx *pipe); int get_num_odm_splits(struct pipe_ctx *pipe); -#if defined(CONFIG_DRM_AMD_DC_DCN) struct hpo_dp_link_encoder *resource_get_hpo_dp_link_enc_for_det_lt( const struct resource_context *res_ctx, const struct resource_pool *pool, const struct dc_link *link); -#endif void reset_syncd_pipes_from_disabled_pipes(struct dc *dc, struct dc_state *context); diff --git a/drivers/gpu/drm/amd/display/include/bios_parser_types.h b/drivers/gpu/drm/amd/display/include/bios_parser_types.h index b8ffb216ebc4..cf4027cc3f4c 100644 --- a/drivers/gpu/drm/amd/display/include/bios_parser_types.h +++ b/drivers/gpu/drm/amd/display/include/bios_parser_types.h @@ -152,10 +152,8 @@ struct bp_transmitter_control { enum signal_type signal; enum dc_color_depth color_depth; /* not used for DCE6.0 */ enum hpd_source_id hpd_sel; /* ucHPDSel, used for DCe6.0 */ -#if defined(CONFIG_DRM_AMD_DC_DCN) enum tx_ffe_id txffe_sel; /* used for DCN3 */ enum engine_id hpo_engine_id; /* used for DCN3 */ -#endif struct graphics_object_id connector_obj_id; /* symClock; in 10kHz, pixel clock, in HDMI deep color mode, it should * be pixel clock * deep_color_ratio (in KHz) diff --git a/drivers/gpu/drm/amd/display/include/dpcd_defs.h b/drivers/gpu/drm/amd/display/include/dpcd_defs.h index ffd0df1701e6..4edaa3318714 100644 --- a/drivers/gpu/drm/amd/display/include/dpcd_defs.h +++ b/drivers/gpu/drm/amd/display/include/dpcd_defs.h @@ -144,14 +144,10 @@ enum dpcd_training_patterns { DPCD_TRAINING_PATTERN_1, DPCD_TRAINING_PATTERN_2, DPCD_TRAINING_PATTERN_3, -#if defined(CONFIG_DRM_AMD_DC_DCN) DPCD_TRAINING_PATTERN_4 = 7, DPCD_128b_132b_TPS1 = 1, DPCD_128b_132b_TPS2 = 2, DPCD_128b_132b_TPS2_CDS = 3, -#else - DPCD_TRAINING_PATTERN_4 = 7 -#endif }; /* This enum is for use with PsrSinkPsrStatus.bits.sinkSelfRefreshStatus diff --git a/drivers/gpu/drm/amd/display/include/grph_object_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_defs.h index 84b299ff500a..05ba188a7c3b 100644 --- a/drivers/gpu/drm/amd/display/include/grph_object_defs.h +++ b/drivers/gpu/drm/amd/display/include/grph_object_defs.h @@ -140,7 +140,6 @@ enum sync_source { SYNC_SOURCE_DUAL_GPU_PIN }; -#if defined(CONFIG_DRM_AMD_DC_DCN) enum tx_ffe_id { TX_FFE0 = 0, TX_FFE1, @@ -150,7 +149,6 @@ enum tx_ffe_id { TX_FFE_PreShoot_Only, TX_FFE_No_FFE, }; -#endif /* connector sizes in millimeters - from BiosParserTypes.hpp */ #define CONNECTOR_SIZE_DVI 40 diff --git a/drivers/gpu/drm/amd/display/include/grph_object_id.h b/drivers/gpu/drm/amd/display/include/grph_object_id.h index 01775417cf4b..fed1edc038d8 100644 --- a/drivers/gpu/drm/amd/display/include/grph_object_id.h +++ b/drivers/gpu/drm/amd/display/include/grph_object_id.h @@ -184,14 +184,12 @@ enum engine_id { ENGINE_ID_DACA, ENGINE_ID_DACB, ENGINE_ID_VCE, /* wireless display pseudo-encoder */ -#if defined(CONFIG_DRM_AMD_DC_DCN) ENGINE_ID_HPO_0, ENGINE_ID_HPO_1, ENGINE_ID_HPO_DP_0, ENGINE_ID_HPO_DP_1, ENGINE_ID_HPO_DP_2, ENGINE_ID_HPO_DP_3, -#endif ENGINE_ID_VIRTUAL, ENGINE_ID_COUNT, diff --git a/drivers/gpu/drm/amd/display/include/link_service_types.h b/drivers/gpu/drm/amd/display/include/link_service_types.h index 424bccd36434..447a56286dd0 100644 --- a/drivers/gpu/drm/amd/display/include/link_service_types.h +++ b/drivers/gpu/drm/amd/display/include/link_service_types.h @@ -74,12 +74,10 @@ enum link_training_result { LINK_TRAINING_LINK_LOSS, /* Abort link training (because sink unplugged) */ LINK_TRAINING_ABORT, -#if defined(CONFIG_DRM_AMD_DC_DCN) DP_128b_132b_LT_FAILED, DP_128b_132b_MAX_LOOP_COUNT_REACHED, DP_128b_132b_CHANNEL_EQ_DONE_TIMEOUT, DP_128b_132b_CDS_DONE_TIMEOUT, -#endif }; enum lttpr_mode { @@ -99,23 +97,19 @@ struct link_training_settings { enum dc_pre_emphasis *pre_emphasis; enum dc_post_cursor2 *post_cursor2; bool should_set_fec_ready; -#if defined(CONFIG_DRM_AMD_DC_DCN) /* TODO - factor lane_settings out because it changes during LT */ union dc_dp_ffe_preset *ffe_preset; -#endif uint16_t cr_pattern_time; uint16_t eq_pattern_time; uint16_t cds_pattern_time; enum dc_dp_training_pattern pattern_for_cr; enum dc_dp_training_pattern pattern_for_eq; -#if defined(CONFIG_DRM_AMD_DC_DCN) enum dc_dp_training_pattern pattern_for_cds; uint32_t eq_wait_time_limit; uint8_t eq_loop_count_limit; uint32_t cds_wait_time_limit; -#endif bool enhanced_framing; enum lttpr_mode lttpr_mode; @@ -161,7 +155,6 @@ enum dp_test_pattern { DP_TEST_PATTERN_CP2520_2, DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE = DP_TEST_PATTERN_CP2520_2, DP_TEST_PATTERN_CP2520_3, -#if defined(CONFIG_DRM_AMD_DC_DCN) DP_TEST_PATTERN_128b_132b_TPS1, DP_TEST_PATTERN_128b_132b_TPS2, DP_TEST_PATTERN_PRBS9, @@ -171,20 +164,15 @@ enum dp_test_pattern { DP_TEST_PATTERN_PRBS31, DP_TEST_PATTERN_264BIT_CUSTOM, DP_TEST_PATTERN_SQUARE_PULSE, -#endif /* Link Training Patterns */ DP_TEST_PATTERN_TRAINING_PATTERN1, DP_TEST_PATTERN_TRAINING_PATTERN2, DP_TEST_PATTERN_TRAINING_PATTERN3, DP_TEST_PATTERN_TRAINING_PATTERN4, -#if defined(CONFIG_DRM_AMD_DC_DCN) DP_TEST_PATTERN_128b_132b_TPS1_TRAINING_MODE, DP_TEST_PATTERN_128b_132b_TPS2_TRAINING_MODE, DP_TEST_PATTERN_PHY_PATTERN_END = DP_TEST_PATTERN_128b_132b_TPS2_TRAINING_MODE, -#else - DP_TEST_PATTERN_PHY_PATTERN_END = DP_TEST_PATTERN_TRAINING_PATTERN4, -#endif /* link test patterns*/ DP_TEST_PATTERN_COLOR_SQUARES, -- cgit From 5279e091616b74ff0e4a24e220e0552b10d88d46 Mon Sep 17 00:00:00 2001 From: Wenjing Liu Date: Wed, 19 Jan 2022 16:24:39 +0800 Subject: drm/amd/display: abstract encoder related hwseq across different types [why] Current we have hundreds of if/else or switch statement to check encoder type in dc_link level. The reason is because depending on the type of encoder dc_link needs to perform similar programming task but with different encoder interfaces. The story is to abstract these interfaces so dc_link can just perform the programming task without knowing the detail of which encoder it's dealing with. Tested-by: Daniel Wheeler Reviewed-by: Aric Cyr Acked-by: Rodrigo Siqueira Signed-off-by: Wenjing Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 83 +++++++++------------ drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c | 85 ++++++++++++++++++++++ .../gpu/drm/amd/display/dc/inc/hw/stream_encoder.h | 4 + drivers/gpu/drm/amd/display/dc/inc/link_hwss.h | 19 +++++ 4 files changed, 143 insertions(+), 48 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 84afe0d1a791..fafe05608818 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -3433,6 +3433,8 @@ static enum dc_status dc_link_update_sst_payload(struct pipe_ctx *pipe_ctx, struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc; struct link_mst_stream_allocation_table proposed_table = {0}; struct fixed31_32 avg_time_slots_per_mtp; + const struct dc_link_settings empty_link_settings = {0}; + const struct dc_link_hwss *link_hwss = dc_link_hwss_get(link, &pipe_ctx->link_res); DC_LOGGER_INIT(link->ctx->logger); /* slot X.Y for SST payload deallocate */ @@ -3441,10 +3443,11 @@ static enum dc_status dc_link_update_sst_payload(struct pipe_ctx *pipe_ctx, dc_log_vcp_x_y(link, avg_time_slots_per_mtp); - hpo_dp_link_encoder->funcs->set_throttled_vcp_size( - hpo_dp_link_encoder, - hpo_dp_stream_encoder->inst, - avg_time_slots_per_mtp); + link_hwss->set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); + if (link_hwss->set_hblank_min_symbol_width) + link_hwss->set_hblank_min_symbol_width(pipe_ctx, + &empty_link_settings, + avg_time_slots_per_mtp); } /* calculate VC payload and update branch with new payload allocation table*/ @@ -3488,10 +3491,11 @@ static enum dc_status dc_link_update_sst_payload(struct pipe_ctx *pipe_ctx, dc_log_vcp_x_y(link, avg_time_slots_per_mtp); - hpo_dp_link_encoder->funcs->set_throttled_vcp_size( - hpo_dp_link_encoder, - hpo_dp_stream_encoder->inst, - avg_time_slots_per_mtp); + link_hwss->set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); + if (link_hwss->set_hblank_min_symbol_width) + link_hwss->set_hblank_min_symbol_width(pipe_ctx, + &link->cur_link_settings, + avg_time_slots_per_mtp); } /* Always return DC_OK. @@ -3508,15 +3512,14 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx) struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; struct link_encoder *link_encoder = NULL; - struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc; struct hpo_dp_link_encoder *hpo_dp_link_encoder = pipe_ctx->link_res.hpo_dp_link_enc; - struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc; struct dp_mst_stream_allocation_table proposed_table = {0}; struct fixed31_32 avg_time_slots_per_mtp; struct fixed31_32 pbn; struct fixed31_32 pbn_per_slot; int i; enum act_return_status ret; + const struct dc_link_hwss *link_hwss = dc_link_hwss_get(link, &pipe_ctx->link_res); DC_LOGGER_INIT(link->ctx->logger); /* Link encoder may have been dynamically assigned to non-physical display endpoint. */ @@ -3622,22 +3625,13 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx) pbn = get_pbn_from_timing(pipe_ctx); avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot); - switch (dp_get_link_encoding_format(&link->cur_link_settings)) { - case DP_8b_10b_ENCODING: - stream_encoder->funcs->set_throttled_vcp_size( - stream_encoder, - avg_time_slots_per_mtp); - break; - case DP_128b_132b_ENCODING: - hpo_dp_link_encoder->funcs->set_throttled_vcp_size( - hpo_dp_link_encoder, - hpo_dp_stream_encoder->inst, + dc_log_vcp_x_y(link, avg_time_slots_per_mtp); + + link_hwss->set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); + if (link_hwss->set_hblank_min_symbol_width) + link_hwss->set_hblank_min_symbol_width(pipe_ctx, + &link->cur_link_settings, avg_time_slots_per_mtp); - break; - case DP_UNKNOWN_ENCODING: - DC_LOG_ERROR("Failure: unknown encoding format\n"); - return DC_ERROR_UNEXPECTED; - } return DC_OK; @@ -3651,10 +3645,10 @@ enum dc_status dc_link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw struct fixed31_32 pbn; struct fixed31_32 pbn_per_slot; struct link_encoder *link_encoder = link->link_enc; - struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc; struct dp_mst_stream_allocation_table proposed_table = {0}; uint8_t i; enum act_return_status ret; + const struct dc_link_hwss *link_hwss = dc_link_hwss_get(link, &pipe_ctx->link_res); DC_LOGGER_INIT(link->ctx->logger); /* decrease throttled vcp size */ @@ -3662,8 +3656,10 @@ enum dc_status dc_link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw pbn = get_pbn_from_bw_in_kbps(bw_in_kbps); avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot); - stream_encoder->funcs->set_throttled_vcp_size( - stream_encoder, + link_hwss->set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); + if (link_hwss->set_hblank_min_symbol_width) + link_hwss->set_hblank_min_symbol_width(pipe_ctx, + &link->cur_link_settings, avg_time_slots_per_mtp); /* send ALLOCATE_PAYLOAD sideband message with updated pbn */ @@ -3731,10 +3727,10 @@ enum dc_status dc_link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t struct fixed31_32 pbn; struct fixed31_32 pbn_per_slot; struct link_encoder *link_encoder = link->link_enc; - struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc; struct dp_mst_stream_allocation_table proposed_table = {0}; uint8_t i; enum act_return_status ret; + const struct dc_link_hwss *link_hwss = dc_link_hwss_get(link, &pipe_ctx->link_res); DC_LOGGER_INIT(link->ctx->logger); /* notify immediate branch device table update */ @@ -3793,8 +3789,10 @@ enum dc_status dc_link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t pbn_per_slot = get_pbn_per_slot(stream); avg_time_slots_per_mtp = dc_fixpt_div(pbn, pbn_per_slot); - stream_encoder->funcs->set_throttled_vcp_size( - stream_encoder, + link_hwss->set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); + if (link_hwss->set_hblank_min_symbol_width) + link_hwss->set_hblank_min_symbol_width(pipe_ctx, + &link->cur_link_settings, avg_time_slots_per_mtp); return DC_OK; @@ -3805,13 +3803,13 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) struct dc_stream_state *stream = pipe_ctx->stream; struct dc_link *link = stream->link; struct link_encoder *link_encoder = NULL; - struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc; struct hpo_dp_link_encoder *hpo_dp_link_encoder = pipe_ctx->link_res.hpo_dp_link_enc; - struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc; struct dp_mst_stream_allocation_table proposed_table = {0}; struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0); int i; bool mst_mode = (link->type == dc_connection_mst_branch); + const struct dc_link_hwss *link_hwss = dc_link_hwss_get(link, &pipe_ctx->link_res); + const struct dc_link_settings empty_link_settings = {0}; DC_LOGGER_INIT(link->ctx->logger); /* Link encoder may have been dynamically assigned to non-physical display endpoint. */ @@ -3829,22 +3827,11 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) */ /* slot X.Y */ - switch (dp_get_link_encoding_format(&link->cur_link_settings)) { - case DP_8b_10b_ENCODING: - stream_encoder->funcs->set_throttled_vcp_size( - stream_encoder, - avg_time_slots_per_mtp); - break; - case DP_128b_132b_ENCODING: - hpo_dp_link_encoder->funcs->set_throttled_vcp_size( - hpo_dp_link_encoder, - hpo_dp_stream_encoder->inst, + link_hwss->set_throttled_vcp_size(pipe_ctx, avg_time_slots_per_mtp); + if (link_hwss->set_hblank_min_symbol_width) + link_hwss->set_hblank_min_symbol_width(pipe_ctx, + &empty_link_settings, avg_time_slots_per_mtp); - break; - case DP_UNKNOWN_ENCODING: - DC_LOG_ERROR("Failure: unknown encoding format\n"); - return DC_ERROR_UNEXPECTED; - } /* TODO: which component is responsible for remove payload table? */ if (mst_mode) { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c index c84822cd7e53..93392c67c909 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c @@ -844,4 +844,89 @@ void setup_dp_hpo_stream(struct pipe_ctx *pipe_ctx, bool enable) } } +/******************************* dio_link_hwss ********************************/ +static void set_dio_throttled_vcp_size(struct pipe_ctx *pipe_ctx, + struct fixed31_32 throttled_vcp_size) +{ + struct stream_encoder *stream_encoder = pipe_ctx->stream_res.stream_enc; + + stream_encoder->funcs->set_throttled_vcp_size( + stream_encoder, + throttled_vcp_size); +} + +/***************************** hpo_dp_link_hwss *******************************/ +static void set_dp_hpo_throttled_vcp_size(struct pipe_ctx *pipe_ctx, + struct fixed31_32 throttled_vcp_size) +{ + struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc; + struct hpo_dp_link_encoder *hpo_dp_link_encoder = pipe_ctx->link_res.hpo_dp_link_enc; + + hpo_dp_link_encoder->funcs->set_throttled_vcp_size(hpo_dp_link_encoder, + hpo_dp_stream_encoder->inst, + throttled_vcp_size); +} + +static void set_dp_hpo_hblank_min_symbol_width(struct pipe_ctx *pipe_ctx, + const struct dc_link_settings *link_settings, + struct fixed31_32 throttled_vcp_size) +{ + struct hpo_dp_stream_encoder *hpo_dp_stream_encoder = pipe_ctx->stream_res.hpo_dp_stream_enc; + struct dc_crtc_timing *timing = &pipe_ctx->stream->timing; + struct fixed31_32 h_blank_in_ms, time_slot_in_ms, mtp_cnt_per_h_blank; + uint32_t link_bw_in_kbps = dc_link_bandwidth_kbps(pipe_ctx->stream->link, link_settings); + uint16_t hblank_min_symbol_width = 0; + + if (link_bw_in_kbps > 0) { + h_blank_in_ms = dc_fixpt_div(dc_fixpt_from_int(timing->h_total-timing->h_addressable), + dc_fixpt_from_fraction(timing->pix_clk_100hz, 10)); + time_slot_in_ms = dc_fixpt_from_fraction(32 * 4, link_bw_in_kbps); + mtp_cnt_per_h_blank = dc_fixpt_div(h_blank_in_ms, dc_fixpt_mul_int(time_slot_in_ms, 64)); + hblank_min_symbol_width = dc_fixpt_floor( + dc_fixpt_mul(mtp_cnt_per_h_blank, throttled_vcp_size)); + } + + hpo_dp_stream_encoder->funcs->set_hblank_min_symbol_width(hpo_dp_stream_encoder, + hblank_min_symbol_width); +} + +static const struct dc_link_hwss hpo_dp_link_hwss = { + .set_throttled_vcp_size = set_dp_hpo_throttled_vcp_size, + + /* function pointers below this point require check for NULL + * ********************************************************************* + */ + .set_hblank_min_symbol_width = set_dp_hpo_hblank_min_symbol_width, +}; + +static const struct dc_link_hwss dio_link_hwss = { + .set_throttled_vcp_size = set_dio_throttled_vcp_size, +}; + +const struct dc_link_hwss *dc_link_hwss_get(const struct dc_link *link, + const struct link_resource *link_res) +{ + if (link_res->hpo_dp_link_enc) + /* TODO: some assumes that if decided link settings is 128b/132b + * channel coding format hpo_dp_link_enc should be used. + * Others believe that if hpo_dp_link_enc is available in link + * resource then hpo_dp_link_enc must be used. This bound between + * hpo_dp_link_enc != NULL and decided link settings is loosely coupled + * with a premise that both hpo_dp_link_enc pointer and decided link + * settings are determined based on single policy function like + * "decide_link_settings" from upper layer. This "convention" + * cannot be maintained and enforced at current level. + * Therefore a refactor is due so we can enforce a strong bound + * between those two parameters at this level. + * + * To put it simple, we want to make enforcement at low level so that + * we will not return link hwss if caller plans to do 8b/10b + * with an hpo encoder. Or we can return a very dummy one that doesn't + * do work for all functions + */ + return &hpo_dp_link_hwss; + else + return &dio_link_hwss; +} + #undef DC_LOGGER diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h index d9a3a204cc23..36ec56524afd 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/stream_encoder.h @@ -327,6 +327,10 @@ struct hpo_dp_stream_encoder_funcs { void (*read_state)( struct hpo_dp_stream_encoder *enc, struct hpo_dp_stream_encoder_state *state); + + void (*set_hblank_min_symbol_width)( + struct hpo_dp_stream_encoder *enc, + uint16_t width); }; #endif /* STREAM_ENCODER_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h index 69d63763a10e..bd3b2b807431 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h +++ b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h @@ -72,4 +72,23 @@ void dp_retrain_link_dp_test(struct dc_link *link, struct dc_link_settings *link_setting, bool skip_video_pattern); +struct dc_link; +struct link_resource; +struct fixed31_32; +struct pipe_ctx; + +struct dc_link_hwss { + void (*set_throttled_vcp_size)(struct pipe_ctx *pipe_ctx, + struct fixed31_32 throttled_vcp_size); + + /* function pointers below this point require check for NULL + * ********************************************************************* + */ + void (*set_hblank_min_symbol_width)(struct pipe_ctx *pipe_ctx, + const struct dc_link_settings *link_settings, + struct fixed31_32 throttled_vcp_size); +}; + +const struct dc_link_hwss *dc_link_hwss_get(const struct dc_link *link, const struct link_resource *link_res); + #endif /* __DC_LINK_HWSS_H__ */ -- cgit From ccba4e5bc856471ed009d92747376ee506fcc6ad Mon Sep 17 00:00:00 2001 From: Wenjing Liu Date: Wed, 19 Jan 2022 16:24:40 +0800 Subject: drm/amd/display: add more link_hwss types and method to decide which one [why] as we add more link_hwss we are making a generic way to determine which type of link_hwss we should use. Later on we may think of introduce a link policy layer. it could be a thin layer that decide the type of link_hwss we use. So instead of passing in link and link_res we can just pass in link_policy and swtich based on link_policy->get_link_hwss_type. Tested-by: Daniel Wheeler Reviewed-by: Aric Cyr Acked-by: Rodrigo Siqueira Signed-off-by: Wenjing Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c | 53 +++++++++++++++++++--- drivers/gpu/drm/amd/display/dc/inc/link_hwss.h | 3 ++ 2 files changed, 50 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c index 93392c67c909..c65955eafaa2 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c @@ -844,7 +844,16 @@ void setup_dp_hpo_stream(struct pipe_ctx *pipe_ctx, bool enable) } } -/******************************* dio_link_hwss ********************************/ +static void set_dummy_throttled_vcp_size(struct pipe_ctx *pipe_ctx, + struct fixed31_32 throttled_vcp_size); + +/************************* below goes to dio_link_hwss ************************/ +static bool can_use_dio_link_hwss(const struct dc_link *link, + const struct link_resource *link_res) +{ + return link->link_enc != NULL; +} + static void set_dio_throttled_vcp_size(struct pipe_ctx *pipe_ctx, struct fixed31_32 throttled_vcp_size) { @@ -855,7 +864,17 @@ static void set_dio_throttled_vcp_size(struct pipe_ctx *pipe_ctx, throttled_vcp_size); } -/***************************** hpo_dp_link_hwss *******************************/ +static const struct dc_link_hwss dio_link_hwss = { + .set_throttled_vcp_size = set_dio_throttled_vcp_size, +}; + +/*********************** below goes to hpo_dp_link_hwss ***********************/ +static bool can_use_dp_hpo_link_hwss(const struct dc_link *link, + const struct link_resource *link_res) +{ + return link_res->hpo_dp_link_enc != NULL; +} + static void set_dp_hpo_throttled_vcp_size(struct pipe_ctx *pipe_ctx, struct fixed31_32 throttled_vcp_size) { @@ -898,15 +917,33 @@ static const struct dc_link_hwss hpo_dp_link_hwss = { */ .set_hblank_min_symbol_width = set_dp_hpo_hblank_min_symbol_width, }; +/*********************** below goes to dpia_link_hwss *************************/ +static bool can_use_dpia_link_hwss(const struct dc_link *link, + const struct link_resource *link_res) +{ + return link->is_dig_mapping_flexible && + link->dc->res_pool->funcs->link_encs_assign; +} -static const struct dc_link_hwss dio_link_hwss = { - .set_throttled_vcp_size = set_dio_throttled_vcp_size, +static const struct dc_link_hwss dpia_link_hwss = { + .set_throttled_vcp_size = set_dummy_throttled_vcp_size, +}; + +/*********************** below goes to link_hwss ******************************/ +static void set_dummy_throttled_vcp_size(struct pipe_ctx *pipe_ctx, + struct fixed31_32 throttled_vcp_size) +{ + return; +} + +static const struct dc_link_hwss dummy_link_hwss = { + .set_throttled_vcp_size = set_dummy_throttled_vcp_size, }; const struct dc_link_hwss *dc_link_hwss_get(const struct dc_link *link, const struct link_resource *link_res) { - if (link_res->hpo_dp_link_enc) + if (can_use_dp_hpo_link_hwss(link, link_res)) /* TODO: some assumes that if decided link settings is 128b/132b * channel coding format hpo_dp_link_enc should be used. * Others believe that if hpo_dp_link_enc is available in link @@ -925,8 +962,12 @@ const struct dc_link_hwss *dc_link_hwss_get(const struct dc_link *link, * do work for all functions */ return &hpo_dp_link_hwss; - else + else if (can_use_dpia_link_hwss(link, link_res)) + return &dpia_link_hwss; + else if (can_use_dio_link_hwss(link, link_res)) return &dio_link_hwss; + else + return &dummy_link_hwss; } #undef DC_LOGGER diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h index bd3b2b807431..8eff386da95e 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h +++ b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h @@ -78,6 +78,9 @@ struct fixed31_32; struct pipe_ctx; struct dc_link_hwss { + /* you must define a dummy implementation and assign the function to + * dummy_link_hwss if you don't want to check for NULL pointer + */ void (*set_throttled_vcp_size)(struct pipe_ctx *pipe_ctx, struct fixed31_32 throttled_vcp_size); -- cgit From 3ea07c89fbb777669c668452e94275a98dc8afbe Mon Sep 17 00:00:00 2001 From: Wenjing Liu Date: Wed, 19 Jan 2022 16:24:41 +0800 Subject: drm/amd/display: rename dc_link_hwss struct to link_hwss [why] This is internal to dc so we don't have to add a prefix to indicate it is dc link_hwss instead someone else link_hwss. Tested-by: Daniel Wheeler Reviewed-by: Aric Cyr Acked-by: Rodrigo Siqueira Signed-off-by: Wenjing Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 10 +++++----- drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c | 10 +++++----- drivers/gpu/drm/amd/display/dc/inc/link_hwss.h | 4 ++-- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index fafe05608818..64584a472f4a 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -3434,7 +3434,7 @@ static enum dc_status dc_link_update_sst_payload(struct pipe_ctx *pipe_ctx, struct link_mst_stream_allocation_table proposed_table = {0}; struct fixed31_32 avg_time_slots_per_mtp; const struct dc_link_settings empty_link_settings = {0}; - const struct dc_link_hwss *link_hwss = dc_link_hwss_get(link, &pipe_ctx->link_res); + const struct link_hwss *link_hwss = dc_link_hwss_get(link, &pipe_ctx->link_res); DC_LOGGER_INIT(link->ctx->logger); /* slot X.Y for SST payload deallocate */ @@ -3519,7 +3519,7 @@ enum dc_status dc_link_allocate_mst_payload(struct pipe_ctx *pipe_ctx) struct fixed31_32 pbn_per_slot; int i; enum act_return_status ret; - const struct dc_link_hwss *link_hwss = dc_link_hwss_get(link, &pipe_ctx->link_res); + const struct link_hwss *link_hwss = dc_link_hwss_get(link, &pipe_ctx->link_res); DC_LOGGER_INIT(link->ctx->logger); /* Link encoder may have been dynamically assigned to non-physical display endpoint. */ @@ -3648,7 +3648,7 @@ enum dc_status dc_link_reduce_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t bw struct dp_mst_stream_allocation_table proposed_table = {0}; uint8_t i; enum act_return_status ret; - const struct dc_link_hwss *link_hwss = dc_link_hwss_get(link, &pipe_ctx->link_res); + const struct link_hwss *link_hwss = dc_link_hwss_get(link, &pipe_ctx->link_res); DC_LOGGER_INIT(link->ctx->logger); /* decrease throttled vcp size */ @@ -3730,7 +3730,7 @@ enum dc_status dc_link_increase_mst_payload(struct pipe_ctx *pipe_ctx, uint32_t struct dp_mst_stream_allocation_table proposed_table = {0}; uint8_t i; enum act_return_status ret; - const struct dc_link_hwss *link_hwss = dc_link_hwss_get(link, &pipe_ctx->link_res); + const struct link_hwss *link_hwss = dc_link_hwss_get(link, &pipe_ctx->link_res); DC_LOGGER_INIT(link->ctx->logger); /* notify immediate branch device table update */ @@ -3808,7 +3808,7 @@ static enum dc_status deallocate_mst_payload(struct pipe_ctx *pipe_ctx) struct fixed31_32 avg_time_slots_per_mtp = dc_fixpt_from_int(0); int i; bool mst_mode = (link->type == dc_connection_mst_branch); - const struct dc_link_hwss *link_hwss = dc_link_hwss_get(link, &pipe_ctx->link_res); + const struct link_hwss *link_hwss = dc_link_hwss_get(link, &pipe_ctx->link_res); const struct dc_link_settings empty_link_settings = {0}; DC_LOGGER_INIT(link->ctx->logger); diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c index c65955eafaa2..01254b4e7ac6 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c @@ -864,7 +864,7 @@ static void set_dio_throttled_vcp_size(struct pipe_ctx *pipe_ctx, throttled_vcp_size); } -static const struct dc_link_hwss dio_link_hwss = { +static const struct link_hwss dio_link_hwss = { .set_throttled_vcp_size = set_dio_throttled_vcp_size, }; @@ -909,7 +909,7 @@ static void set_dp_hpo_hblank_min_symbol_width(struct pipe_ctx *pipe_ctx, hblank_min_symbol_width); } -static const struct dc_link_hwss hpo_dp_link_hwss = { +static const struct link_hwss hpo_dp_link_hwss = { .set_throttled_vcp_size = set_dp_hpo_throttled_vcp_size, /* function pointers below this point require check for NULL @@ -925,7 +925,7 @@ static bool can_use_dpia_link_hwss(const struct dc_link *link, link->dc->res_pool->funcs->link_encs_assign; } -static const struct dc_link_hwss dpia_link_hwss = { +static const struct link_hwss dpia_link_hwss = { .set_throttled_vcp_size = set_dummy_throttled_vcp_size, }; @@ -936,11 +936,11 @@ static void set_dummy_throttled_vcp_size(struct pipe_ctx *pipe_ctx, return; } -static const struct dc_link_hwss dummy_link_hwss = { +static const struct link_hwss dummy_link_hwss = { .set_throttled_vcp_size = set_dummy_throttled_vcp_size, }; -const struct dc_link_hwss *dc_link_hwss_get(const struct dc_link *link, +const struct link_hwss *dc_link_hwss_get(const struct dc_link *link, const struct link_resource *link_res) { if (can_use_dp_hpo_link_hwss(link, link_res)) diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h index 8eff386da95e..5b361924ed8f 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h +++ b/drivers/gpu/drm/amd/display/dc/inc/link_hwss.h @@ -77,7 +77,7 @@ struct link_resource; struct fixed31_32; struct pipe_ctx; -struct dc_link_hwss { +struct link_hwss { /* you must define a dummy implementation and assign the function to * dummy_link_hwss if you don't want to check for NULL pointer */ @@ -92,6 +92,6 @@ struct dc_link_hwss { struct fixed31_32 throttled_vcp_size); }; -const struct dc_link_hwss *dc_link_hwss_get(const struct dc_link *link, const struct link_resource *link_res); +const struct link_hwss *dc_link_hwss_get(const struct dc_link *link, const struct link_resource *link_res); #endif /* __DC_LINK_HWSS_H__ */ -- cgit From 1f6c40d5c0095959a260c014a6251a0ac124c721 Mon Sep 17 00:00:00 2001 From: Wenjing Liu Date: Wed, 19 Jan 2022 16:24:42 +0800 Subject: drm/amd/display: fix a coding error causing set throttled vcp size skipped for dpia [why] There is a recent refactor that contains a typo to cause set throttled vcp size skipped for dpia link. The change is to fix this typo so it won't cause black screen Tested-by: Daniel Wheeler Reviewed-by: Jun Lei Acked-by: Wayne Lin Signed-off-by: Wenjing Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c index 01254b4e7ac6..c5146263f93c 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c @@ -926,7 +926,7 @@ static bool can_use_dpia_link_hwss(const struct dc_link *link, } static const struct link_hwss dpia_link_hwss = { - .set_throttled_vcp_size = set_dummy_throttled_vcp_size, + .set_throttled_vcp_size = set_dio_throttled_vcp_size, }; /*********************** below goes to link_hwss ******************************/ -- cgit From 09ece5ac397f8e3539ea9ffb776e1cc6702acb9a Mon Sep 17 00:00:00 2001 From: Wayne Lin Date: Wed, 19 Jan 2022 16:24:43 +0800 Subject: drm/amd/display: Don't update drm connector when read local EDID [Why] Currently, when we're tentatively detecting link configuration under mst case, we will wrongly notify a none existing stream sink to drm layer. It's due to we'll call amdgpu_dm_update_connector_after_detect() within dm_helpers_read_local_edid() to update connector when we're still under tentative detection procedure. [How] dm_helpers_read_local_edid() was designed to do "read" only. Don't update drm connector status when we're still under detection process. Tested-by: Daniel Wheeler Reviewed-by: Rodrigo Siqueira Signed-off-by: Wayne Lin Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 8 -------- 1 file changed, 8 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index b32a68a3586a..bfeace371ac1 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -648,14 +648,6 @@ enum dc_edid_status dm_helpers_read_local_edid( /* We don't need the original edid anymore */ kfree(edid); - /* connector->display_info is parsed from EDID and saved - * into drm_connector->display_info - * - * drm_connector->display_info will be used by amdgpu_dm funcs, - * like fill_stream_properties_from_drm_display_mode - */ - amdgpu_dm_update_connector_after_detect(aconnector); - edid_status = dm_helpers_parse_edid_caps( link, &sink->dc_edid, -- cgit From d715c9a2cbd4b5dbf72bec82b033698db3c6eca5 Mon Sep 17 00:00:00 2001 From: Martin Tsai Date: Sun, 23 Jan 2022 13:19:58 -0500 Subject: drm/amd/display: add protection in link encoder matching logic [Why] Link encoders are created based on its engine ID. The position of a link encoder in an array could be null since it didn't be allocated. Current matching logic didn't consider this situation and could get null encoder. [How] To add null encoder check to make the matching logic can go to next to get a valid one. Tested-by: Daniel Wheeler Reviewed-by: Aric Cyr Acked-by: Rodrigo Siqueira Signed-off-by: Martin Tsai Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 +- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 11 +--- drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 66 ++-------------------- .../gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c | 23 +++++++- drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c | 22 ++------ drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h | 1 - drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h | 3 + 7 files changed, 36 insertions(+), 92 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 8a46b8430f1e..c0ab23274f96 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -8366,7 +8366,7 @@ void amdgpu_dm_connector_init_helper(struct amdgpu_display_manager *dm, break; case DRM_MODE_CONNECTOR_DisplayPort: aconnector->base.polled = DRM_CONNECTOR_POLL_HPD; - link->link_enc = dp_get_link_enc(link); + link->link_enc = link_enc_cfg_get_link_enc(link); ASSERT(link->link_enc); if (link->link_enc) aconnector->base.ycbcr_420_allowed = diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 64584a472f4a..147a03a1500b 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -4621,16 +4621,7 @@ bool dc_link_is_fec_supported(const struct dc_link *link) */ struct link_encoder *link_enc = NULL; - /* Links supporting dynamically assigned link encoder will be assigned next - * available encoder if one not already assigned. - */ - if (link->is_dig_mapping_flexible && - link->dc->res_pool->funcs->link_encs_assign) { - link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link); - if (link_enc == NULL) - link_enc = link_enc_cfg_get_next_avail_link_enc(link->ctx->dc); - } else - link_enc = link->link_enc; + link_enc = link_enc_cfg_get_link_enc(link); ASSERT(link_enc); return (dc_is_dp_signal(link->connector_signal) && link_enc && diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 68f9e099beae..a514f19d8f8b 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -261,14 +261,7 @@ static enum dc_dp_training_pattern decide_eq_training_pattern(struct dc_link *li struct dpcd_caps *rx_caps = &link->dpcd_caps; enum dc_dp_training_pattern pattern = DP_TRAINING_PATTERN_SEQUENCE_2; - /* Access link encoder capability based on whether it is statically - * or dynamically assigned to a link. - */ - if (link->is_dig_mapping_flexible && - link->dc->res_pool->funcs->link_encs_assign) - link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link); - else - link_enc = link->link_enc; + link_enc = link_enc_cfg_get_link_enc(link); ASSERT(link_enc); enc_caps = &link_enc->features; @@ -2997,16 +2990,7 @@ bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_ return false; } - /* Links supporting dynamically assigned link encoder will be assigned next - * available encoder if one not already assigned. - */ - if (link->is_dig_mapping_flexible && - link->dc->res_pool->funcs->link_encs_assign) { - link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link); - if (link_enc == NULL) - link_enc = link_enc_cfg_get_next_avail_link_enc(link->ctx->dc); - } else - link_enc = link->link_enc; + link_enc = link_enc_cfg_get_link_enc(link); ASSERT(link_enc); if (link_enc && link_enc->funcs->get_max_link_cap) { @@ -3027,16 +3011,7 @@ struct dc_link_settings dp_get_max_link_cap(struct dc_link *link) enum dc_link_rate lttpr_max_link_rate; struct link_encoder *link_enc = NULL; - /* Links supporting dynamically assigned link encoder will be assigned next - * available encoder if one not already assigned. - */ - if (link->is_dig_mapping_flexible && - link->dc->res_pool->funcs->link_encs_assign) { - link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link); - if (link_enc == NULL) - link_enc = link_enc_cfg_get_next_avail_link_enc(link->ctx->dc); - } else - link_enc = link->link_enc; + link_enc = link_enc_cfg_get_link_enc(link); ASSERT(link_enc); /* get max link encoder capability */ @@ -6175,14 +6150,7 @@ enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource enum dc_status status = DC_OK; uint8_t fec_config = 0; - /* Access link encoder based on whether it is statically - * or dynamically assigned to a link. - */ - if (link->is_dig_mapping_flexible && - link->dc->res_pool->funcs->link_encs_assign) - link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link); - else - link_enc = link->link_enc; + link_enc = link_enc_cfg_get_link_enc(link); ASSERT(link_enc); if (!dc_link_should_enable_fec(link)) @@ -6222,14 +6190,7 @@ void dp_set_fec_enable(struct dc_link *link, bool enable) { struct link_encoder *link_enc = NULL; - /* Access link encoder based on whether it is statically - * or dynamically assigned to a link. - */ - if (link->is_dig_mapping_flexible && - link->dc->res_pool->funcs->link_encs_assign) - link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link); - else - link_enc = link->link_enc; + link_enc = link_enc_cfg_get_link_enc(link); ASSERT(link_enc); if (!dc_link_should_enable_fec(link)) @@ -6255,23 +6216,6 @@ void dp_set_fec_enable(struct dc_link *link, bool enable) } } -struct link_encoder *dp_get_link_enc(struct dc_link *link) -{ - struct link_encoder *link_enc; - - link_enc = link->link_enc; - if (link->is_dig_mapping_flexible && - link->dc->res_pool->funcs->link_encs_assign) { - link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, - link); - if (!link->link_enc) - link_enc = link_enc_cfg_get_next_avail_link_enc( - link->ctx->dc); - } - - return link_enc; -} - void dpcd_set_source_specific_data(struct dc_link *link) { if (!link->dc->vendor_signature.is_valid) { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c index a55944da8d53..a380611b1e42 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c @@ -486,7 +486,8 @@ struct link_encoder *link_enc_cfg_get_next_avail_link_enc(struct dc *dc) } for (i = 0; i < dc->res_pool->res_cap->num_dig_link_enc; i++) { - if (encs_assigned[i] == ENGINE_ID_UNKNOWN) { + if (encs_assigned[i] == ENGINE_ID_UNKNOWN && + dc->res_pool->link_encoders[i] != NULL) { link_enc = dc->res_pool->link_encoders[i]; break; } @@ -506,6 +507,26 @@ struct link_encoder *link_enc_cfg_get_link_enc_used_by_stream( return link_enc; } +struct link_encoder *link_enc_cfg_get_link_enc( + const struct dc_link *link) +{ + struct link_encoder *link_enc = NULL; + + /* Links supporting dynamically assigned link encoder will be assigned next + * available encoder if one not already assigned. + */ + if (link->is_dig_mapping_flexible && + link->dc->res_pool->funcs->link_encs_assign) { + link_enc = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link); + if (link_enc == NULL) + link_enc = link_enc_cfg_get_next_avail_link_enc( + link->ctx->dc); + } else + link_enc = link->link_enc; + + return link_enc; +} + bool link_enc_cfg_is_link_enc_avail(struct dc *dc, enum engine_id eng_id, struct dc_link *link) { bool is_avail = true; diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c index c5146263f93c..5e9054fdb339 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_hwss.c @@ -86,11 +86,7 @@ void dp_enable_link_phy( link->dc->res_pool->dp_clock_source; unsigned int i; - /* Link should always be assigned encoder when en-/disabling. */ - if (link->is_dig_mapping_flexible && dc->res_pool->funcs->link_encs_assign) - link_enc = link_enc_cfg_get_link_enc_used_by_link(dc, link); - else - link_enc = link->link_enc; + link_enc = link_enc_cfg_get_link_enc(link); ASSERT(link_enc); if (link->connector_signal == SIGNAL_TYPE_EDP) { @@ -228,11 +224,7 @@ void dp_disable_link_phy(struct dc_link *link, const struct link_resource *link_ struct hpo_dp_link_encoder *hpo_link_enc = link_res->hpo_dp_link_enc; struct link_encoder *link_enc; - /* Link should always be assigned encoder when en-/disabling. */ - if (link->is_dig_mapping_flexible && dc->res_pool->funcs->link_encs_assign) - link_enc = link_enc_cfg_get_link_enc_used_by_link(dc, link); - else - link_enc = link->link_enc; + link_enc = link_enc_cfg_get_link_enc(link); ASSERT(link_enc); if (!link->wa_flags.dp_keep_receiver_powered) @@ -360,14 +352,8 @@ void dp_set_hw_test_pattern( struct link_encoder *encoder; enum dp_link_encoding link_encoding_format = dp_get_link_encoding_format(&link->cur_link_settings); - /* Access link encoder based on whether it is statically - * or dynamically assigned to a link. - */ - if (link->is_dig_mapping_flexible && - link->dc->res_pool->funcs->link_encs_assign) - encoder = link_enc_cfg_get_link_enc_used_by_link(link->ctx->dc, link); - else - encoder = link->link_enc; + encoder = link_enc_cfg_get_link_enc(link); + ASSERT(encoder); pattern_param.dp_phy_pattern = test_pattern; pattern_param.custom_pattern = custom_pattern; diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h index b18c8198f66d..b7c5c42d67ed 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h @@ -171,7 +171,6 @@ uint8_t dc_dp_initialize_scrambling_data_symbols( enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource *link_res, bool ready); void dp_set_fec_enable(struct dc_link *link, bool enable); -struct link_encoder *dp_get_link_enc(struct dc_link *link); bool dp_set_dsc_enable(struct pipe_ctx *pipe_ctx, bool enable); bool dp_set_dsc_pps_sdp(struct pipe_ctx *pipe_ctx, bool enable, bool immediate_update); void dp_set_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable); diff --git a/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h b/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h index a4e43b4826e0..c2f08514a1d9 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h +++ b/drivers/gpu/drm/amd/display/dc/inc/link_enc_cfg.h @@ -96,6 +96,9 @@ struct link_encoder *link_enc_cfg_get_link_enc_used_by_stream( struct dc *dc, const struct dc_stream_state *stream); +/* Return DIG link encoder. NULL if unused. */ +struct link_encoder *link_enc_cfg_get_link_enc(const struct dc_link *link); + /* Return true if encoder available to use. */ bool link_enc_cfg_is_link_enc_avail(struct dc *dc, enum engine_id eng_id, struct dc_link *link); -- cgit From 9506b8d9626f4fbc230dad30cea1b3b095f1e4d4 Mon Sep 17 00:00:00 2001 From: Josip Pavic Date: Sun, 23 Jan 2022 13:19:59 -0500 Subject: drm/amd/display: do not compare integers of different widths [Why & How] Increase width of some variables to avoid comparing integers of different widths Tested-by: Daniel Wheeler Reviewed-by: Anthony Koo Acked-by: Rodrigo Siqueira Signed-off-by: Josip Pavic Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 147a03a1500b..f8f363773920 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -4744,7 +4744,7 @@ const struct link_resource *dc_link_get_cur_link_res(const struct dc_link *link) void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map) { struct dc_link *link; - uint8_t i; + uint32_t i; uint32_t hpo_dp_recycle_map = 0; *map = 0; @@ -4785,7 +4785,7 @@ void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map) void dc_restore_link_res_map(const struct dc *dc, uint32_t *map) { struct dc_link *link; - uint8_t i; + uint32_t i; unsigned int available_hpo_dp_count; uint32_t hpo_dp_recycle_map = (*map & LINK_RES_HPO_DP_REC_MAP__MASK) >> LINK_RES_HPO_DP_REC_MAP__SHIFT; -- cgit From 2eee829ed48747181b17f3f8d458b23981f2bb7b Mon Sep 17 00:00:00 2001 From: Alvin Lee Date: Sun, 23 Jan 2022 13:20:00 -0500 Subject: drm/amd/display: Driver support for MCLK query tool Implement handling for escape call to query the MCLK switch support for the current display config. Tested-by: Daniel Wheeler Reviewed-by: Jun Lei Acked-by: Rodrigo Siqueira Signed-off-by: Alvin Lee Signed-off-by: Alex Deucher --- .../gpu/drm/amd/display/dc/dml/display_mode_vba.c | 24 ++++++++++++++++++++++ .../gpu/drm/amd/display/dc/dml/display_mode_vba.h | 1 + 2 files changed, 25 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c index 0fad15020c74..c0740dbdcc2e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.c @@ -47,6 +47,7 @@ static void recalculate_params( unsigned int num_pipes); static unsigned int CursorBppEnumToBits(enum cursor_bpp ebpp); +static void cache_debug_params(struct display_mode_lib *mode_lib); unsigned int dml_get_voltage_level( struct display_mode_lib *mode_lib, @@ -73,6 +74,7 @@ unsigned int dml_get_voltage_level( PixelClockAdjustmentForProgressiveToInterlaceUnit(mode_lib); } mode_lib->funcs.validate(mode_lib); + cache_debug_params(mode_lib); return mode_lib->vba.VoltageLevel; } @@ -745,6 +747,28 @@ static void fetch_pipe_params(struct display_mode_lib *mode_lib) mode_lib->vba.HostVMEnable = mode_lib->vba.HostVMEnable && !!ip->hostvm_enable; } +/** + * ******************************************************************************************** + * cache_debug_params: Cache any params that needed to be maintained from the initial validation + * for debug purposes. + * + * The DML getters can modify some of the VBA params that we are interested in (for example when + * calculating with dummy p-state latency), so cache any params here that we want for debugging + * + * @param [in] mode_lib: mode_lib input/output of validate call + * + * @return: void + * + * ******************************************************************************************** + */ +static void cache_debug_params(struct display_mode_lib *mode_lib) +{ + int k = 0; + + for (k = 0; k < mode_lib->vba.NumberOfActivePlanes; k++) + mode_lib->vba.CachedActiveDRAMClockChangeLatencyMargin[k] = mode_lib->vba.ActiveDRAMClockChangeLatencyMargin[k]; +} + // in wm mode we pull the parameters needed from the display_e2e_pipe_params_st structs // rather than working them out as in recalculate_ms static void recalculate_params( diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h index 8fe74a3b39a8..0603b32971a6 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_vba.h @@ -195,6 +195,7 @@ struct vba_vars_st { unsigned int LBLatencyHidingSourceLinesY; unsigned int LBLatencyHidingSourceLinesC; double ActiveDRAMClockChangeLatencyMargin[DC__NUM_DPP__MAX]; + double CachedActiveDRAMClockChangeLatencyMargin[DC__NUM_DPP__MAX]; // Cache in dml_get_voltage_level for debug purposes only double MinActiveDRAMClockChangeMargin; double InitFillLevel; double FinalFillMargin; -- cgit From e2d8ea4320c6fcf9f042e432232240f858ae9ed8 Mon Sep 17 00:00:00 2001 From: Varone Date: Sun, 23 Jan 2022 13:20:01 -0500 Subject: drm/amd/display: Change error to warning when hpd remains low for eDP [WHY?] SKUs that contain an unused eDP connector will throw an error when no display is connected. [HOW?] Change error to a warning. Tested-by: Daniel Wheeler Reviewed-by: Martin Leung Acked-by: Rodrigo Siqueira Signed-off-by: Dillon Varone Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c index b83c5544247d..72dd41e7a7d6 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c +++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c @@ -793,7 +793,7 @@ void dce110_edp_wait_for_hpd_ready( dal_gpio_destroy_irq(&hpd); if (false == edp_hpd_high) { - DC_LOG_ERROR( + DC_LOG_WARNING( "%s: wait timed out!\n", __func__); } } -- cgit From 1210b17dd4ece454d68a9283f391e3b036aeb010 Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Sun, 23 Jan 2022 13:20:02 -0500 Subject: drm/amd/display: Only set PSR version when valid [Why] DMCUB will hang if we send a PSR unsupported set version command. This can occur if we fall-through into the default case in the switch statement for PSR version. [How] Add an unsupported check after the switch statement. Tested-by: Daniel Wheeler Reviewed-by: Anthony Koo Acked-by: Rodrigo Siqueira Signed-off-by: Nicholas Kazlauskas Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c index 87ed48d5530d..8bd265b40847 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dmub_psr.c @@ -138,6 +138,10 @@ static bool dmub_psr_set_version(struct dmub_psr *dmub, struct dc_stream_state * cmd.psr_set_version.psr_set_version_data.version = PSR_VERSION_UNSUPPORTED; break; } + + if (cmd.psr_set_version.psr_set_version_data.version == PSR_VERSION_UNSUPPORTED) + return false; + cmd.psr_set_version.psr_set_version_data.cmd_version = DMUB_CMD_PSR_CONTROL_VERSION_1; cmd.psr_set_version.psr_set_version_data.panel_inst = panel_inst; cmd.psr_set_version.header.payload_bytes = sizeof(struct dmub_cmd_psr_set_version_data); -- cgit From 87e298d6e3a2169831a2f4a29e35575ee5747036 Mon Sep 17 00:00:00 2001 From: Ian Chen Date: Sun, 23 Jan 2022 13:20:03 -0500 Subject: drm/amd/display: Change return type of dm_helpers_dp_mst_stop_top_mgr Prepare for future dm can have different implementation depends on the return value. Tested-by: Daniel Wheeler Reviewed-by: Wenjing Liu Acked-by: Rodrigo Siqueira Signed-off-by: Ian Chen Signed-off-by: Alex Deucher --- .../drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 24 ++++------------------ drivers/gpu/drm/amd/display/dc/core/dc_link.c | 14 +++++++------ drivers/gpu/drm/amd/display/dc/dm_helpers.h | 2 +- 3 files changed, 13 insertions(+), 27 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index bfeace371ac1..575840c4db58 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -445,40 +445,24 @@ bool dm_helpers_dp_mst_start_top_mgr( return (drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, true) == 0); } -void dm_helpers_dp_mst_stop_top_mgr( +bool dm_helpers_dp_mst_stop_top_mgr( struct dc_context *ctx, struct dc_link *link) { struct amdgpu_dm_connector *aconnector = link->priv; - uint8_t i; if (!aconnector) { DRM_ERROR("Failed to find connector for link!"); - return; + return false; } DRM_INFO("DM_MST: stopping TM on aconnector: %p [id: %d]\n", aconnector, aconnector->base.base.id); - if (aconnector->mst_mgr.mst_state == true) { + if (aconnector->mst_mgr.mst_state == true) drm_dp_mst_topology_mgr_set_mst(&aconnector->mst_mgr, false); - for (i = 0; i < MAX_SINKS_PER_LINK; i++) { - if (link->remote_sinks[i] == NULL) - continue; - - if (link->remote_sinks[i]->sink_signal == - SIGNAL_TYPE_DISPLAY_PORT_MST) { - dc_link_remove_remote_sink(link, link->remote_sinks[i]); - - if (aconnector->dc_sink) { - dc_sink_release(aconnector->dc_sink); - aconnector->dc_sink = NULL; - aconnector->dc_link->cur_link_settings.lane_count = 0; - } - } - } - } + return false; } bool dm_helpers_dp_read_dpcd( diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index f8f363773920..08fbf0ab047a 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -840,20 +840,22 @@ static bool discover_dp_mst_topology(struct dc_link *link, enum dc_detect_reason return link->type == dc_connection_mst_branch; } -static void reset_cur_dp_mst_topology(struct dc_link *link) +static bool reset_cur_dp_mst_topology(struct dc_link *link) { + bool result = false; DC_LOGGER_INIT(link->ctx->logger); LINK_INFO("link=%d, mst branch is now Disconnected\n", link->link_index); revert_dpia_mst_dsc_always_on_wa(link); - dm_helpers_dp_mst_stop_top_mgr(link->ctx, link); + result = dm_helpers_dp_mst_stop_top_mgr(link->ctx, link); link->mst_stream_alloc_table.stream_count = 0; memset(link->mst_stream_alloc_table.stream_allocations, 0, sizeof(link->mst_stream_alloc_table.stream_allocations)); + return result; } static bool should_prepare_phy_clocks_for_link_verification(const struct dc *dc, @@ -1306,7 +1308,7 @@ static bool detect_link_and_local_sink(struct dc_link *link, bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) { bool is_local_sink_detect_success; - bool is_remote_sink_detect_required = false; + bool is_delegated_to_mst_top_mgr = false; enum dc_connection_type pre_link_type = link->type; is_local_sink_detect_success = detect_link_and_local_sink(link, reason); @@ -1317,14 +1319,14 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason) if (is_local_sink_detect_success && link->local_sink && dc_is_dp_signal(link->local_sink->sink_signal) && link->dpcd_caps.is_mst_capable) - is_remote_sink_detect_required = discover_dp_mst_topology(link, reason); + is_delegated_to_mst_top_mgr = discover_dp_mst_topology(link, reason); if (is_local_sink_detect_success && pre_link_type == dc_connection_mst_branch && link->type != dc_connection_mst_branch) - reset_cur_dp_mst_topology(link); + is_delegated_to_mst_top_mgr = reset_cur_dp_mst_topology(link); - return is_local_sink_detect_success && !is_remote_sink_detect_required; + return is_local_sink_detect_success && !is_delegated_to_mst_top_mgr; } bool dc_link_get_hpd_state(struct dc_link *dc_link) diff --git a/drivers/gpu/drm/amd/display/dc/dm_helpers.h b/drivers/gpu/drm/amd/display/dc/dm_helpers.h index a5497ba89f15..b71b5fb894e2 100644 --- a/drivers/gpu/drm/amd/display/dc/dm_helpers.h +++ b/drivers/gpu/drm/amd/display/dc/dm_helpers.h @@ -113,7 +113,7 @@ bool dm_helpers_dp_mst_start_top_mgr( const struct dc_link *link, bool boot); -void dm_helpers_dp_mst_stop_top_mgr( +bool dm_helpers_dp_mst_stop_top_mgr( struct dc_context *ctx, struct dc_link *link); /** -- cgit From b80ddeb29d9df449f875f0b6f5de08d7537c02b8 Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Sun, 23 Jan 2022 13:20:04 -0500 Subject: drm/amd/display: Use PSR version selected during set_psr_caps [Why] If the DPCD caps specifies a PSR version newer than PSR_VERSION_1 then we fallback to using PSR_VERSION_1 in amdgpu_dm_set_psr_caps. This gets overriden with the raw DPCD value in amdgpu_dm_link_setup_psr, which can result in DMCUB hanging if we pass in an unsupported PSR version number. [How] Fix the hang by using link->psr_settings.psr_version directly during amdgpu_dm_link_setup_psr. Tested-by: Daniel Wheeler Reviewed-by: Anthony Koo Acked-by: Rodrigo Siqueira Signed-off-by: Nicholas Kazlauskas Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c index c510638b4f99..a009fc654ac9 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c @@ -149,10 +149,8 @@ bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream) link = stream->link; - psr_config.psr_version = link->dpcd_caps.psr_caps.psr_version; - - if (psr_config.psr_version > 0) { - psr_config.psr_exit_link_training_required = 0x1; + if (link->psr_settings.psr_version != DC_PSR_VERSION_UNSUPPORTED) { + psr_config.psr_version = link->psr_settings.psr_version; psr_config.psr_frame_capture_indication_req = 0; psr_config.psr_rfb_setup_time = 0x37; psr_config.psr_sdp_transmit_line_num_deadline = 0x20; -- cgit From 4a3ad932b2c538c62e4a60dd4b7411f802e018ec Mon Sep 17 00:00:00 2001 From: "Shen, George" Date: Sun, 23 Jan 2022 13:20:05 -0500 Subject: drm/amd/display: Add Cable ID support for native DP [Why/How] We need to handle cable capabilities for cables that support cable ID. The cable attributes are intersected with the verified link caps to determine appropriate max link rate. After determining cable attributes we update the DP receiver. Tested-by: Daniel Wheeler Reviewed-by: Wenjing Liu Acked-by: Rodrigo Siqueira Signed-off-by: George Shen Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 5 ++ drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 59 ++++++++++++++++++++++++ drivers/gpu/drm/amd/display/dc/dc.h | 2 + drivers/gpu/drm/amd/display/dc/dc_dp_types.h | 16 +++++++ drivers/gpu/drm/amd/display/dc/dc_link.h | 7 +++ drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h | 3 ++ 6 files changed, 92 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 08fbf0ab047a..47cced994bfb 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -1291,6 +1291,8 @@ static bool detect_link_and_local_sink(struct dc_link *link, * Clear dongle_max_pix_clk on disconnect to fix this */ link->dongle_max_pix_clk = 0; + + dc_link_dp_clear_rx_status(link); } LINK_INFO("link=%d, dc_sink_in=%p is now %s prev_sink=%p edid same=%d\n", @@ -1970,6 +1972,9 @@ static enum dc_status enable_link_dp(struct dc_state *state, if (link->dpcd_sink_ext_caps.raw != 0) msleep(post_oui_delay); + // similarly, mode switch can cause loss of cable ID + dpcd_update_cable_id(link); + skip_video_pattern = true; if (link_settings.link_rate == LINK_RATE_LOW) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index a514f19d8f8b..062bdbadc781 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -2981,6 +2981,20 @@ static enum dc_link_rate get_lttpr_max_link_rate(struct dc_link *link) return lttpr_max_link_rate; } +static enum dc_link_rate get_cable_max_link_rate(struct dc_link *link) +{ + enum dc_link_rate cable_max_link_rate = LINK_RATE_HIGH3; + + if (link->dpcd_caps.cable_attributes.bits.UHBR10_20_CAPABILITY & DP_UHBR20) + cable_max_link_rate = LINK_RATE_UHBR20; + else if (link->dpcd_caps.cable_attributes.bits.UHBR13_5_CAPABILITY) + cable_max_link_rate = LINK_RATE_UHBR13_5; + else if (link->dpcd_caps.cable_attributes.bits.UHBR10_20_CAPABILITY & DP_UHBR10) + cable_max_link_rate = LINK_RATE_UHBR10; + + return cable_max_link_rate; +} + bool dc_link_dp_get_max_link_enc_cap(const struct dc_link *link, struct dc_link_settings *max_link_enc_cap) { struct link_encoder *link_enc = NULL; @@ -3009,8 +3023,10 @@ struct dc_link_settings dp_get_max_link_cap(struct dc_link *link) { struct dc_link_settings max_link_cap = {0}; enum dc_link_rate lttpr_max_link_rate; + enum dc_link_rate cable_max_link_rate; struct link_encoder *link_enc = NULL; + link_enc = link_enc_cfg_get_link_enc(link); ASSERT(link_enc); @@ -3029,6 +3045,14 @@ struct dc_link_settings dp_get_max_link_cap(struct dc_link *link) max_link_cap.link_spread) max_link_cap.link_spread = link->reported_link_cap.link_spread; + + /* Lower link settings based on cable attributes */ + cable_max_link_rate = get_cable_max_link_rate(link); + + if (!link->dc->debug.ignore_cable_id && + cable_max_link_rate < max_link_cap.link_rate) + max_link_cap.link_rate = cable_max_link_rate; + /* * account for lttpr repeaters cap * notes: repeaters do not snoop in the DPRX Capabilities addresses (3.6.3). @@ -5059,6 +5083,13 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link) return is_lttpr_present; } + +static bool is_usbc_connector(struct dc_link *link) +{ + return link->link_enc && + link->link_enc->features.flags.bits.DP_IS_USB_C; +} + static bool retrieve_link_cap(struct dc_link *link) { /* DP_ADAPTER_CAP - DP_DPCD_REV + 1 == 16 and also DP_DSC_BITS_PER_PIXEL_INC - DP_DSC_SUPPORT + 1 == 16, @@ -5115,6 +5146,9 @@ static bool retrieve_link_cap(struct dc_link *link) */ msleep(post_oui_delay); + /* Read cable ID and update receiver */ + dpcd_update_cable_id(link); + for (i = 0; i < read_dpcd_retry_cnt; i++) { status = core_link_read_dpcd( link, @@ -6292,6 +6326,26 @@ void dpcd_set_source_specific_data(struct dc_link *link) } } +void dpcd_update_cable_id(struct dc_link *link) +{ + if (!link->link_enc->features.flags.bits.IS_UHBR10_CAPABLE || + link->dprx_status.cable_id_updated) + return; + + /* Retrieve cable attributes */ + if (!is_usbc_connector(link)) + core_link_read_dpcd(link, DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX, + &link->dpcd_caps.cable_attributes.raw, + sizeof(uint8_t)); + + /* Update receiver with cable attributes */ + core_link_write_dpcd(link, DP_CABLE_ATTRIBUTES_UPDATED_BY_DPTX, + &link->dpcd_caps.cable_attributes.raw, + sizeof(link->dpcd_caps.cable_attributes.raw)); + + link->dprx_status.cable_id_updated = 1; +} + bool dc_link_set_backlight_level_nits(struct dc_link *link, bool isHDR, uint32_t backlight_millinits, @@ -6689,3 +6743,8 @@ void edp_panel_backlight_power_on(struct dc_link *link) if (link->dc->hwss.edp_backlight_control) link->dc->hwss.edp_backlight_control(link, true); } + +void dc_link_dp_clear_rx_status(struct dc_link *link) +{ + memset(&link->dprx_status, 0, sizeof(link->dprx_status)); +} diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index ff5093e52f2d..0dc183d6af5d 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -687,6 +687,7 @@ struct dc_debug_options { bool set_mst_en_for_sst; bool disable_uhbr; bool force_dp2_lt_fallback_method; + bool ignore_cable_id; union mem_low_power_enable_options enable_mem_low_power; union root_clock_optimization_options root_clock_optimization; bool hpo_optimization; @@ -1234,6 +1235,7 @@ struct dpcd_caps { union dp_main_line_channel_coding_cap channel_coding_cap; union dp_sink_video_fallback_formats fallback_formats; union dp_fec_capability1 fec_cap1; + union dp_cable_attributes cable_attributes; }; union dpcd_sink_ext_caps { diff --git a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h index 7b1103e9f7a2..772084406795 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_dp_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_dp_types.h @@ -901,6 +901,9 @@ struct dpcd_usb4_dp_tunneling_info { #ifndef DP_LINK_SQUARE_PATTERN #define DP_LINK_SQUARE_PATTERN 0x10F #endif +#ifndef DP_CABLE_ATTRIBUTES_UPDATED_BY_DPTX +#define DP_CABLE_ATTRIBUTES_UPDATED_BY_DPTX 0x110 +#endif #ifndef DP_DSC_CONFIGURATION #define DP_DSC_CONFIGURATION 0x161 #endif @@ -913,6 +916,9 @@ struct dpcd_usb4_dp_tunneling_info { #ifndef DP_128b_132b_TRAINING_AUX_RD_INTERVAL #define DP_128b_132b_TRAINING_AUX_RD_INTERVAL 0x2216 #endif +#ifndef DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX +#define DP_CABLE_ATTRIBUTES_UPDATED_BY_DPRX 0x2217 +#endif #ifndef DP_TEST_264BIT_CUSTOM_PATTERN_7_0 #define DP_TEST_264BIT_CUSTOM_PATTERN_7_0 0X2230 #endif @@ -1012,6 +1018,16 @@ union dp_fec_capability1 { uint8_t raw; }; +union dp_cable_attributes { + struct { + uint8_t UHBR10_20_CAPABILITY :2; + uint8_t UHBR13_5_CAPABILITY :1; + uint8_t CABLE_TYPE :3; + uint8_t RESERVED :2; + } bits; + uint8_t raw; +}; + struct dp_color_depth_caps { uint8_t support_6bpc :1; uint8_t support_8bpc :1; diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index 9014c0a0a63b..b1c79b3f26aa 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -43,6 +43,10 @@ struct dc_link_status { struct dpcd_caps *dpcd_caps; }; +struct dp_receiver_status { + bool cable_id_updated; +}; + /* DP MST stream allocation (payload bandwidth number) */ struct link_mst_stream_allocation { /* DIG front */ @@ -201,6 +205,7 @@ struct dc_link { struct link_mst_stream_allocation_table mst_stream_alloc_table; struct dc_link_status link_status; + struct dp_receiver_status dprx_status; struct link_trace link_trace; struct gpio *hpd_gpio; @@ -459,4 +464,6 @@ const struct link_resource *dc_link_get_cur_link_res(const struct dc_link *link) void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map); /* restore link resource allocation state from a snapshot */ void dc_restore_link_res_map(const struct dc *dc, uint32_t *map); + +void dc_link_dp_clear_rx_status(struct dc_link *link); #endif /* DC_LINK_H_ */ diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h index b7c5c42d67ed..3ed2dbbf5642 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h @@ -112,6 +112,9 @@ void dp_set_panel_mode(struct dc_link *link, enum dp_panel_mode panel_mode); bool dp_overwrite_extended_receiver_cap(struct dc_link *link); void dpcd_set_source_specific_data(struct dc_link *link); + +void dpcd_update_cable_id(struct dc_link *link); + /* Write DPCD link configuration data. */ enum dc_status dpcd_set_link_settings( struct dc_link *link, -- cgit From 2ca97adccdc90247c907541089e85101b2d87574 Mon Sep 17 00:00:00 2001 From: Fangzhi Zuo Date: Sun, 23 Jan 2022 13:20:06 -0500 Subject: drm/amd/display: Add Synaptics Fifo Reset Workaround Sequence to reset synaptics SDP fifo before enabling first stream Tested-by: Daniel Wheeler Reviewed-by: Hersen Wu Acked-by: Rodrigo Siqueira Signed-off-by: Fangzhi Zuo Signed-off-by: Alex Deucher --- .../drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 133 +++++++++++++++++++++ .../amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 2 +- .../amd/display/amdgpu_dm/amdgpu_dm_mst_types.h | 10 ++ 3 files changed, 144 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 575840c4db58..8554c93c9fa9 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -536,6 +536,139 @@ bool dm_helpers_submit_i2c( return result; } + +static bool execute_synatpics_rc_command(struct drm_dp_aux *aux, + bool is_write_cmd, + unsigned char cmd, + unsigned int length, + unsigned int offset, + unsigned char *data) +{ + bool success = false; + unsigned char rc_data[16] = {0}; + unsigned char rc_offset[4] = {0}; + unsigned char rc_length[2] = {0}; + unsigned char rc_cmd = 0; + unsigned char rc_result = 0xFF; + unsigned char i = 0; + uint8_t ret = 0; + + if (is_write_cmd) { + // write rc data + memmove(rc_data, data, length); + ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_DATA, rc_data, sizeof(rc_data)); + } + + // write rc offset + rc_offset[0] = (unsigned char) offset & 0xFF; + rc_offset[1] = (unsigned char) (offset >> 8) & 0xFF; + rc_offset[2] = (unsigned char) (offset >> 16) & 0xFF; + rc_offset[3] = (unsigned char) (offset >> 24) & 0xFF; + ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_OFFSET, rc_offset, sizeof(rc_offset)); + + // write rc length + rc_length[0] = (unsigned char) length & 0xFF; + rc_length[1] = (unsigned char) (length >> 8) & 0xFF; + ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_LENGTH, rc_length, sizeof(rc_length)); + + // write rc cmd + rc_cmd = cmd | 0x80; + ret = drm_dp_dpcd_write(aux, SYNAPTICS_RC_COMMAND, &rc_cmd, sizeof(rc_cmd)); + + if (ret < 0) { + DRM_ERROR(" execute_synatpics_rc_command - write cmd ..., err = %d\n", ret); + return false; + } + + // poll until active is 0 + for (i = 0; i < 10; i++) { + drm_dp_dpcd_read(aux, SYNAPTICS_RC_COMMAND, &rc_cmd, sizeof(rc_cmd)); + if (rc_cmd == cmd) + // active is 0 + break; + msleep(10); + } + + // read rc result + drm_dp_dpcd_read(aux, SYNAPTICS_RC_RESULT, &rc_result, sizeof(rc_result)); + success = (rc_result == 0); + + if (success && !is_write_cmd) { + // read rc data + drm_dp_dpcd_read(aux, SYNAPTICS_RC_DATA, data, length); + } + + DC_LOG_DC(" execute_synatpics_rc_command - success = %d\n", success); + + return success; +} + +static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux) +{ + unsigned char data[16] = {0}; + + DC_LOG_DC("Start apply_synaptics_fifo_reset_wa\n"); + + // Step 2 + data[0] = 'P'; + data[1] = 'R'; + data[2] = 'I'; + data[3] = 'U'; + data[4] = 'S'; + + if (!execute_synatpics_rc_command(aux, true, 0x01, 5, 0, data)) + return; + + // Step 3 and 4 + if (!execute_synatpics_rc_command(aux, false, 0x31, 4, 0x220998, data)) + return; + + data[0] &= (~(1 << 1)); // set bit 1 to 0 + if (!execute_synatpics_rc_command(aux, true, 0x21, 4, 0x220998, data)) + return; + + if (!execute_synatpics_rc_command(aux, false, 0x31, 4, 0x220D98, data)) + return; + + data[0] &= (~(1 << 1)); // set bit 1 to 0 + if (!execute_synatpics_rc_command(aux, true, 0x21, 4, 0x220D98, data)) + return; + + if (!execute_synatpics_rc_command(aux, false, 0x31, 4, 0x221198, data)) + return; + + data[0] &= (~(1 << 1)); // set bit 1 to 0 + if (!execute_synatpics_rc_command(aux, true, 0x21, 4, 0x221198, data)) + return; + + // Step 3 and 5 + if (!execute_synatpics_rc_command(aux, false, 0x31, 4, 0x220998, data)) + return; + + data[0] |= (1 << 1); // set bit 1 to 1 + if (!execute_synatpics_rc_command(aux, true, 0x21, 4, 0x220998, data)) + return; + + if (!execute_synatpics_rc_command(aux, false, 0x31, 4, 0x220D98, data)) + return; + + data[0] |= (1 << 1); // set bit 1 to 1 + return; + + if (!execute_synatpics_rc_command(aux, false, 0x31, 4, 0x221198, data)) + return; + + data[0] |= (1 << 1); // set bit 1 to 1 + if (!execute_synatpics_rc_command(aux, true, 0x21, 4, 0x221198, data)) + return; + + // Step 6 + if (!execute_synatpics_rc_command(aux, true, 0x02, 0, 0, NULL)) + return; + + DC_LOG_DC("Done apply_synaptics_fifo_reset_wa\n"); +} + bool dm_helpers_dp_write_dsc_enable( struct dc_context *ctx, const struct dc_stream_state *stream, diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index cc34a35d0bcb..23cc6a6fe70e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -159,7 +159,7 @@ static const struct drm_connector_funcs dm_dp_mst_connector_funcs = { }; #if defined(CONFIG_DRM_AMD_DC_DCN) -static bool needs_dsc_aux_workaround(struct dc_link *link) +bool needs_dsc_aux_workaround(struct dc_link *link) { if (link->dpcd_caps.branch_dev_id == DP_BRANCH_DEVICE_ID_90CC24 && (link->dpcd_caps.dpcd_rev.raw == DPCD_REV_14 || link->dpcd_caps.dpcd_rev.raw == DPCD_REV_12) && diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h index 900d3f7a8498..5da28ca03372 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.h @@ -26,6 +26,14 @@ #ifndef __DAL_AMDGPU_DM_MST_TYPES_H__ #define __DAL_AMDGPU_DM_MST_TYPES_H__ +#define DP_BRANCH_DEVICE_ID_90CC24 0x90CC24 + +#define SYNAPTICS_RC_COMMAND 0x4B2 +#define SYNAPTICS_RC_RESULT 0x4B3 +#define SYNAPTICS_RC_LENGTH 0x4B8 +#define SYNAPTICS_RC_OFFSET 0x4BC +#define SYNAPTICS_RC_DATA 0x4C0 + struct amdgpu_display_manager; struct amdgpu_dm_connector; @@ -50,6 +58,8 @@ struct dsc_mst_fairness_vars { bool compute_mst_dsc_configs_for_state(struct drm_atomic_state *state, struct dc_state *dc_state, struct dsc_mst_fairness_vars *vars); + +bool needs_dsc_aux_workaround(struct dc_link *link); #endif #endif -- cgit From 09db246ceef70bc6bd9c3e0d02b3c855f8fc25fb Mon Sep 17 00:00:00 2001 From: Fangzhi Zuo Date: Sun, 23 Jan 2022 13:20:07 -0500 Subject: drm/amd/display: Retrieve MST Downstream Port Status Determine if DFP present and the type of downstream device based on dsc_aux Tested-by: Daniel Wheeler Reviewed-by: Hersen Wu Acked-by: Rodrigo Siqueira Signed-off-by: Fangzhi Zuo Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h | 1 + .../amd/display/amdgpu_dm/amdgpu_dm_mst_types.c | 23 ++++++++++++++++++++++ 2 files changed, 24 insertions(+) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index b9a69b0cef23..e35977fda5c1 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -604,6 +604,7 @@ struct amdgpu_dm_connector { #endif bool force_yuv420_output; struct dsc_preferred_settings dsc_settings; + union dp_downstream_port_present mst_downstream_port_present; /* Cached display modes */ struct drm_display_mode freesync_vid_base; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c index 23cc6a6fe70e..8e97d21bdf5c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c @@ -209,6 +209,25 @@ static bool validate_dsc_caps_on_connector(struct amdgpu_dm_connector *aconnecto return true; } + +bool retrieve_downstream_port_device(struct amdgpu_dm_connector *aconnector) +{ + union dp_downstream_port_present ds_port_present; + + if (!aconnector->dsc_aux) + return false; + + if (drm_dp_dpcd_read(aconnector->dsc_aux, DP_DOWNSTREAMPORT_PRESENT, &ds_port_present, 1) < 0) { + DRM_INFO("Failed to read downstream_port_present 0x05 from DFP of branch device\n"); + return false; + } + + aconnector->mst_downstream_port_present = ds_port_present; + DRM_INFO("Downstream port present %d, type %d\n", + ds_port_present.fields.PORT_PRESENT, ds_port_present.fields.PORT_TYPE); + + return true; +} #endif static int dm_dp_mst_get_modes(struct drm_connector *connector) @@ -289,6 +308,10 @@ static int dm_dp_mst_get_modes(struct drm_connector *connector) if (!validate_dsc_caps_on_connector(aconnector)) memset(&aconnector->dc_sink->dsc_caps, 0, sizeof(aconnector->dc_sink->dsc_caps)); + + if (!retrieve_downstream_port_device(aconnector)) + memset(&aconnector->mst_downstream_port_present, + 0, sizeof(aconnector->mst_downstream_port_present)); #endif } } -- cgit From 9cc370435cde6b672b6e2221115c2f535e8bd4ec Mon Sep 17 00:00:00 2001 From: Fangzhi Zuo Date: Sun, 23 Jan 2022 13:20:08 -0500 Subject: drm/amd/display: Add DSC Enable for Synaptics Hub DSC sequence for non virtual dpcd synaptics hub Tested-by: Daniel Wheeler Reviewed-by: Hersen Wu Acked-by: Rodrigo Siqueira Signed-off-by: Fangzhi Zuo Signed-off-by: Alex Deucher --- .../drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c | 48 ++++++++++++++++++++++ .../drm/amd/display/include/ddc_service_types.h | 3 ++ 2 files changed, 51 insertions(+) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c index 8554c93c9fa9..75b5299b3576 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_helpers.c @@ -39,6 +39,7 @@ #include "amdgpu_dm_mst_types.h" #include "dm_helpers.h" +#include "ddc_service_types.h" struct monitor_patch_info { unsigned int manufacturer_id; @@ -537,6 +538,7 @@ bool dm_helpers_submit_i2c( return result; } +#if defined(CONFIG_DRM_AMD_DC_DCN) static bool execute_synatpics_rc_command(struct drm_dp_aux *aux, bool is_write_cmd, unsigned char cmd, @@ -669,6 +671,43 @@ static void apply_synaptics_fifo_reset_wa(struct drm_dp_aux *aux) DC_LOG_DC("Done apply_synaptics_fifo_reset_wa\n"); } +static uint8_t write_dsc_enable_synaptics_non_virtual_dpcd_mst( + struct drm_dp_aux *aux, + const struct dc_stream_state *stream, + bool enable) +{ + uint8_t ret = 0; + + DC_LOG_DC("Configure DSC to non-virtual dpcd synaptics\n"); + + if (enable) { + /* When DSC is enabled on previous boot and reboot with the hub, + * there is a chance that Synaptics hub gets stuck during reboot sequence. + * Applying a workaround to reset Synaptics SDP fifo before enabling the first stream + */ + if (!stream->link->link_status.link_active && + memcmp(stream->link->dpcd_caps.branch_dev_name, + (int8_t *)SYNAPTICS_DEVICE_ID, 4) == 0) + apply_synaptics_fifo_reset_wa(aux); + + ret = drm_dp_dpcd_write(aux, DP_DSC_ENABLE, &enable, 1); + DRM_INFO("Send DSC enable to synaptics\n"); + + } else { + /* Synaptics hub not support virtual dpcd, + * external monitor occur garbage while disable DSC, + * Disable DSC only when entire link status turn to false, + */ + if (!stream->link->link_status.link_active) { + ret = drm_dp_dpcd_write(aux, DP_DSC_ENABLE, &enable, 1); + DRM_INFO("Send DSC disable to synaptics\n"); + } + } + + return ret; +} +#endif + bool dm_helpers_dp_write_dsc_enable( struct dc_context *ctx, const struct dc_stream_state *stream, @@ -687,7 +726,16 @@ bool dm_helpers_dp_write_dsc_enable( if (!aconnector->dsc_aux) return false; +#if defined(CONFIG_DRM_AMD_DC_DCN) + // apply w/a to synaptics + if (needs_dsc_aux_workaround(aconnector->dc_link) && + (aconnector->mst_downstream_port_present.byte & 0x7) != 0x3) + return write_dsc_enable_synaptics_non_virtual_dpcd_mst( + aconnector->dsc_aux, stream, enable_dsc); +#endif + ret = drm_dp_dpcd_write(aconnector->dsc_aux, DP_DSC_ENABLE, &enable_dsc, 1); + DC_LOG_DC("Send DSC %s to MST RX\n", enable_dsc ? "enable" : "disable"); } if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || stream->signal == SIGNAL_TYPE_EDP) { diff --git a/drivers/gpu/drm/amd/display/include/ddc_service_types.h b/drivers/gpu/drm/amd/display/include/ddc_service_types.h index 57f92bd2814f..fb289a5c873a 100644 --- a/drivers/gpu/drm/amd/display/include/ddc_service_types.h +++ b/drivers/gpu/drm/amd/display/include/ddc_service_types.h @@ -123,4 +123,7 @@ struct av_sync_data { static const uint8_t DP_SINK_DEVICE_STR_ID_1[] = {7, 1, 8, 7, 3, 0}; static const uint8_t DP_SINK_DEVICE_STR_ID_2[] = {7, 1, 8, 7, 5, 0}; +/*MST Dock*/ +static const uint8_t SYNAPTICS_DEVICE_ID[] = "SYNA"; + #endif /* __DAL_DDC_SERVICE_TYPES_H__ */ -- cgit From 5f2c1192eba42f6926253a1f2c9b18da05b3c31f Mon Sep 17 00:00:00 2001 From: Roy Chan Date: Sun, 23 Jan 2022 13:20:09 -0500 Subject: drm/amd/display: Support synchronized indirect reg access [Why] indirect register index/data pair may be used by multi-threads. when it happens, it would cause register access issue that is hard to trace. [How] Using cgs service, which provide a sync indirect reg access api. Tested-by: Daniel Wheeler Reviewed-by: Aric Cyr Acked-by: Rodrigo Siqueira Signed-off-by: Roy Chan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc_helper.c | 60 +++++++++++++++++++++++++ drivers/gpu/drm/amd/display/dc/inc/reg_helper.h | 34 ++++++++++++++ 2 files changed, 94 insertions(+) diff --git a/drivers/gpu/drm/amd/display/dc/dc_helper.c b/drivers/gpu/drm/amd/display/dc/dc_helper.c index ab6bc5d79012..f43cce16bb6c 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dc_helper.c @@ -588,6 +588,66 @@ uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx, return reg_val; } + +uint32_t generic_indirect_reg_update_ex_sync(const struct dc_context *ctx, + uint32_t index, uint32_t reg_val, int n, + uint8_t shift1, uint32_t mask1, uint32_t field_value1, + ...) +{ + uint32_t shift, mask, field_value; + int i = 1; + + va_list ap; + + va_start(ap, field_value1); + + reg_val = set_reg_field_value_ex(reg_val, field_value1, mask1, shift1); + + while (i < n) { + shift = va_arg(ap, uint32_t); + mask = va_arg(ap, uint32_t); + field_value = va_arg(ap, uint32_t); + + reg_val = set_reg_field_value_ex(reg_val, field_value, mask, shift); + i++; + } + + dm_write_index_reg(ctx, CGS_IND_REG__PCIE, index, reg_val); + va_end(ap); + + return reg_val; +} + +uint32_t generic_indirect_reg_get_sync(const struct dc_context *ctx, + uint32_t index, int n, + uint8_t shift1, uint32_t mask1, uint32_t *field_value1, + ...) +{ + uint32_t shift, mask, *field_value; + uint32_t value = 0; + int i = 1; + + va_list ap; + + va_start(ap, field_value1); + + value = dm_read_index_reg(ctx, CGS_IND_REG__PCIE, index); + *field_value1 = get_reg_field_value_ex(value, mask1, shift1); + + while (i < n) { + shift = va_arg(ap, uint32_t); + mask = va_arg(ap, uint32_t); + field_value = va_arg(ap, uint32_t *); + + *field_value = get_reg_field_value_ex(value, mask, shift); + i++; + } + + va_end(ap); + + return value; +} + void reg_sequence_start_gather(const struct dc_context *ctx) { /* if reg sequence is supported and enabled, set flag to diff --git a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h index 2470405e996b..a402df225a76 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h +++ b/drivers/gpu/drm/amd/display/dc/inc/reg_helper.h @@ -498,6 +498,40 @@ uint32_t generic_indirect_reg_update_ex(const struct dc_context *ctx, uint8_t shift1, uint32_t mask1, uint32_t field_value1, ...); +/* indirect register access + * underlying implementation determines which index/data pair to be used + * in a synchronous way + */ +#define IX_REG_SET_N_SYNC(index, n, initial_val, ...) \ + generic_indirect_reg_update_ex_sync(CTX, \ + IND_REG(index), \ + initial_val, \ + n, __VA_ARGS__) + +#define IX_REG_SET_2_SYNC(index, init_value, f1, v1, f2, v2) \ + IX_REG_SET_N_SYNC(index, 2, init_value, \ + FN(reg, f1), v1,\ + FN(reg, f2), v2) + +#define IX_REG_GET_N_SYNC(index, n, ...) \ + generic_indirect_reg_get_sync(CTX, \ + IND_REG(index), \ + n, __VA_ARGS__) + +#define IX_REG_GET_SYNC(index, field, val) \ + IX_REG_GET_N_SYNC(index, 1, \ + FN(data_reg_name, field), val) + +uint32_t generic_indirect_reg_get_sync(const struct dc_context *ctx, + uint32_t index, int n, + uint8_t shift1, uint32_t mask1, uint32_t *field_value1, + ...); + +uint32_t generic_indirect_reg_update_ex_sync(const struct dc_context *ctx, + uint32_t index, uint32_t reg_val, int n, + uint8_t shift1, uint32_t mask1, uint32_t field_value1, + ...); + /* register offload macros * * instead of MMIO to register directly, in some cases we want -- cgit From 72b90723b3760c69721b04f59436f38cd215e2df Mon Sep 17 00:00:00 2001 From: Sung Joon Kim Date: Sun, 23 Jan 2022 13:20:11 -0500 Subject: drm/amd/display: remove PHY repeater count check for LTTPR mode [why] Due to bad hardware, the PHY repeater count in LTTPR cap is read as 0xFF in some monitors while the LTTPR is actually present. [how] Remove PHY repeater counter check when configuring LTTPR mode. Tested-by: Daniel Wheeler Reviewed-by: Jun Lei Acked-by: Rodrigo Siqueira Signed-off-by: Sung Joon Kim Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 062bdbadc781..538ace042961 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -5069,9 +5069,8 @@ bool dp_retrieve_lttpr_cap(struct dc_link *link) DP_LT_TUNABLE_PHY_REPEATER_FIELD_DATA_STRUCTURE_REV]; /* Attempt to train in LTTPR transparent mode if repeater count exceeds 8. */ - is_lttpr_present = (dp_convert_to_count(link->dpcd_caps.lttpr_caps.phy_repeater_cnt) != 0 && + is_lttpr_present = (link->dpcd_caps.lttpr_caps.max_lane_count > 0 && link->dpcd_caps.lttpr_caps.phy_repeater_cnt < 0xff && - link->dpcd_caps.lttpr_caps.max_lane_count > 0 && link->dpcd_caps.lttpr_caps.max_lane_count <= 4 && link->dpcd_caps.lttpr_caps.revision.raw >= 0x14); if (is_lttpr_present) { -- cgit From 0c5a0bbb7379625eb2a5b6a931415c008b7a1a16 Mon Sep 17 00:00:00 2001 From: Reza Amini Date: Sun, 23 Jan 2022 13:20:12 -0500 Subject: drm/amd/display: Update VSC HDR infoPacket on TF change [why] OnSetSourceContentAttribute it does not trigger an update for the VSC with TF change. [how] In this call, create a new VSC infoPacket based on the new config, and allow DisplayTarget decide if an update and pursuant passive flip is necessary Tested-by: Daniel Wheeler Reviewed-by: Krunoslav Kovac Acked-by: Rodrigo Siqueira Signed-off-by: Reza Amini Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 2 +- .../drm/amd/display/modules/inc/mod_info_packet.h | 3 ++- .../amd/display/modules/info_packet/info_packet.c | 25 +++++++++++----------- 3 files changed, 16 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index c0ab23274f96..44f4d862a022 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -6494,7 +6494,7 @@ create_stream_for_sink(struct amdgpu_dm_connector *aconnector, if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) stream->use_vsc_sdp_for_colorimetry = true; } - mod_build_vsc_infopacket(stream, &stream->vsc_infopacket); + mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space); aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY; } diff --git a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h index 1ab813b4fd14..1d8b746b02f2 100644 --- a/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h +++ b/drivers/gpu/drm/amd/display/modules/inc/mod_info_packet.h @@ -34,7 +34,8 @@ struct dc_info_packet; struct mod_vrr_params; void mod_build_vsc_infopacket(const struct dc_stream_state *stream, - struct dc_info_packet *info_packet); + struct dc_info_packet *info_packet, + enum dc_color_space cs); void mod_build_hf_vsif_infopacket(const struct dc_stream_state *stream, struct dc_info_packet *info_packet); diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c index 57f198de5e2c..b691aa45e84f 100644 --- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c +++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c @@ -130,7 +130,8 @@ enum ColorimetryYCCDP { }; void mod_build_vsc_infopacket(const struct dc_stream_state *stream, - struct dc_info_packet *info_packet) + struct dc_info_packet *info_packet, + enum dc_color_space cs) { unsigned int vsc_packet_revision = vsc_packet_undefined; unsigned int i; @@ -331,13 +332,13 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream, /* Set Colorimetry format based on pixel encoding */ switch (stream->timing.pixel_encoding) { case PIXEL_ENCODING_RGB: - if ((stream->output_color_space == COLOR_SPACE_SRGB) || - (stream->output_color_space == COLOR_SPACE_SRGB_LIMITED)) + if ((cs == COLOR_SPACE_SRGB) || + (cs == COLOR_SPACE_SRGB_LIMITED)) colorimetryFormat = ColorimetryRGB_DP_sRGB; - else if (stream->output_color_space == COLOR_SPACE_ADOBERGB) + else if (cs == COLOR_SPACE_ADOBERGB) colorimetryFormat = ColorimetryRGB_DP_AdobeRGB; - else if ((stream->output_color_space == COLOR_SPACE_2020_RGB_FULLRANGE) || - (stream->output_color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE)) + else if ((cs == COLOR_SPACE_2020_RGB_FULLRANGE) || + (cs == COLOR_SPACE_2020_RGB_LIMITEDRANGE)) colorimetryFormat = ColorimetryRGB_DP_ITU_R_BT2020RGB; break; @@ -347,13 +348,13 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream, /* Note: xvYCC probably not supported correctly here on DP since colorspace translation * loses distinction between BT601 vs xvYCC601 in translation */ - if (stream->output_color_space == COLOR_SPACE_YCBCR601) + if (cs == COLOR_SPACE_YCBCR601) colorimetryFormat = ColorimetryYCC_DP_ITU601; - else if (stream->output_color_space == COLOR_SPACE_YCBCR709) + else if (cs == COLOR_SPACE_YCBCR709) colorimetryFormat = ColorimetryYCC_DP_ITU709; - else if (stream->output_color_space == COLOR_SPACE_ADOBERGB) + else if (cs == COLOR_SPACE_ADOBERGB) colorimetryFormat = ColorimetryYCC_DP_AdobeYCC; - else if (stream->output_color_space == COLOR_SPACE_2020_YCBCR) + else if (cs == COLOR_SPACE_2020_YCBCR) colorimetryFormat = ColorimetryYCC_DP_ITU2020YCbCr; break; @@ -391,8 +392,8 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream, } /* all YCbCr are always limited range */ - if ((stream->output_color_space == COLOR_SPACE_SRGB_LIMITED) || - (stream->output_color_space == COLOR_SPACE_2020_RGB_LIMITEDRANGE) || + if ((cs == COLOR_SPACE_SRGB_LIMITED) || + (cs == COLOR_SPACE_2020_RGB_LIMITEDRANGE) || (pixelEncoding != 0x0)) { info_packet->sb[17] |= 0x80; /* DB17 bit 7 set to 1 for CEA timing. */ } -- cgit From 5cef7e8e2fcc6f9e8d2134668848a31b15c523b8 Mon Sep 17 00:00:00 2001 From: Anthony Koo Date: Sun, 23 Jan 2022 13:20:13 -0500 Subject: drm/amd/display: [FW Promotion] Release 0.0.101.0 - Add Scr8 for GPINT messaging between driver and fw Tested-by: Daniel Wheeler Acked-by: Rodrigo Siqueira Signed-off-by: Anthony Koo Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index e13d50abc281..3ef6a7a0c7b6 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -46,10 +46,10 @@ /* Firmware versioning. */ #ifdef DMUB_EXPOSE_VERSION -#define DMUB_FW_VERSION_GIT_HASH 0x56a29f36 +#define DMUB_FW_VERSION_GIT_HASH 0x1288a7b7 #define DMUB_FW_VERSION_MAJOR 0 #define DMUB_FW_VERSION_MINOR 0 -#define DMUB_FW_VERSION_REVISION 100 +#define DMUB_FW_VERSION_REVISION 101 #define DMUB_FW_VERSION_TEST 0 #define DMUB_FW_VERSION_VBIOS 0 #define DMUB_FW_VERSION_HOTFIX 0 @@ -367,8 +367,9 @@ union dmub_fw_boot_options { /**< 1 if all root clock gating is enabled and low power memory is enabled*/ uint32_t power_optimization: 1; uint32_t diag_env: 1; /* 1 if diagnostic environment */ + uint32_t gpint_scratch8: 1; /* 1 if GPINT is in scratch8*/ - uint32_t reserved : 19; /**< reserved */ + uint32_t reserved : 18; /**< reserved */ } bits; /**< boot bits */ uint32_t all; /**< 32-bit access to bits */ }; @@ -644,6 +645,7 @@ enum dmub_cmd_type { * Command type used for OUTBOX1 notification enable */ DMUB_CMD__OUTBOX1_ENABLE = 71, + /** * Command type used for all idle optimization commands. */ @@ -656,6 +658,7 @@ enum dmub_cmd_type { * Command type used for all panel control commands. */ DMUB_CMD__PANEL_CNTL = 74, + /** * Command type used for interfacing with DPIA. */ -- cgit From d52e77a3ffcf2da2be1a7892bc8fa67b0e907058 Mon Sep 17 00:00:00 2001 From: Wayne Lin Date: Sun, 23 Jan 2022 13:20:14 -0500 Subject: drm/amd/display: Reset preferred training settings immediately [Why & How] In order to easily test ilr by immediately reset the preferred training settings, fix the code to disable skip_immediate_retrain. Tested-by: Daniel Wheeler Reviewed-by: Solomon Chiu Acked-by: Rodrigo Siqueira Signed-off-by: Wayne Lin Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index 12d437d9a0e4..aca725874bcf 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -2887,7 +2887,7 @@ static ssize_t edp_ilr_write(struct file *f, const char __user *buf, kfree(wr_buf); DRM_DEBUG_DRIVER("Invalid Input value. No HW will be programmed\n"); prefer_link_settings.use_link_rate_set = false; - dc_link_set_preferred_training_settings(dc, NULL, NULL, link, true); + dc_link_set_preferred_training_settings(dc, NULL, NULL, link, false); return size; } -- cgit From 53a35edfd0a767dbd04537596d95f56e94582f50 Mon Sep 17 00:00:00 2001 From: Aric Cyr Date: Sun, 23 Jan 2022 13:20:15 -0500 Subject: drm/amd/display: 3.2.170 This version brings along the following fixes: - Z9 improvements - Clocks management adjustments - Code cleanup - Improve DSC and MST code Tested-by: Daniel Wheeler Acked-by: Rodrigo Siqueira Signed-off-by: Aric Cyr Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 0dc183d6af5d..69cf78fe78cf 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -47,7 +47,7 @@ struct aux_payload; struct set_config_cmd_payload; struct dmub_notification; -#define DC_VER "3.2.169" +#define DC_VER "3.2.170" #define MAX_SURFACES 3 #define MAX_PLANES 6 -- cgit From d063e70c6acad14987242f266e9448669db4624e Mon Sep 17 00:00:00 2001 From: Aric Cyr Date: Sun, 23 Jan 2022 13:20:16 -0500 Subject: drm/amd/display: Remove unnecessary function definition Tested-by: Daniel Wheeler Acked-by: Rodrigo Siqueira Signed-off-by: Aric Cyr Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c | 5 ----- drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h | 3 --- drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c | 2 +- 3 files changed, 1 insertion(+), 9 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c index dec8604a0612..ed0a0e5fd805 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.c @@ -355,11 +355,6 @@ void dcn30_prepare_bandwidth(struct dc *dc, dcn20_prepare_bandwidth(dc, context); } -void dcn30_optimize_bandwidth(struct dc *dc, struct dc_state *context) -{ - dcn20_optimize_bandwidth(dc, context); -} - void dcn30_disable_writeback( struct dc *dc, unsigned int dwb_pipe_inst) diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h index 357ee14711ad..73e7b690e82c 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_hwseq.h @@ -50,9 +50,6 @@ void dcn30_disable_writeback( void dcn30_prepare_bandwidth(struct dc *dc, struct dc_state *context); -void dcn30_optimize_bandwidth(struct dc *dc, - struct dc_state *context); - bool dcn30_mmhubbub_warmup( struct dc *dc, unsigned int num_dwb, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c index 73a416cba563..bb347319de83 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_init.c @@ -60,7 +60,7 @@ static const struct hw_sequencer_funcs dcn30_funcs = { .interdependent_update_lock = dcn10_lock_all_pipes, .cursor_lock = dcn10_cursor_lock, .prepare_bandwidth = dcn20_prepare_bandwidth, - .optimize_bandwidth = dcn30_optimize_bandwidth, + .optimize_bandwidth = dcn20_optimize_bandwidth, .update_bandwidth = dcn20_update_bandwidth, .set_drr = dcn10_set_drr, .get_position = dcn10_get_position, -- cgit From f6a3795d35c69bd34a556e1d93000057aed78599 Mon Sep 17 00:00:00 2001 From: Wenjing Liu Date: Sun, 23 Jan 2022 13:20:17 -0500 Subject: drm/amd/display: allow set dp drive setting when stream is not present [why] There is a change previously to disallow DM to set dp drive setings when stream is not present. The logic might not work well with DP PHY complaince scenario with a PHY test fixture attachment. We need to make the method allow DP link drive settings changes even without stream attached to it. [how] revert back to previous code in set drive setting function then add an empty link_resource structure, then assign link resource based on current link resource if link resource is allocated to the current pipe. Tested-by: Daniel Wheeler Reviewed-by: Wayne Lin Acked-by: Rodrigo Siqueira Signed-off-by: Wenjing Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link.c | 30 ++++++++++++--------------- drivers/gpu/drm/amd/display/dc/dc_link.h | 3 ++- 2 files changed, 15 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c index 47cced994bfb..1e596f1ea494 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c @@ -4455,22 +4455,17 @@ void dc_link_set_drive_settings(struct dc *dc, { int i; - struct pipe_ctx *pipe = NULL; - const struct link_resource *link_res; + struct link_resource link_res; - link_res = dc_link_get_cur_link_res(link); + for (i = 0; i < dc->link_count; i++) + if (dc->links[i] == link) + break; - for (i = 0; i < MAX_PIPES; i++) { - pipe = &dc->current_state->res_ctx.pipe_ctx[i]; - if (pipe->stream && pipe->stream->link) { - if (pipe->stream->link == link) - break; - } - } - if (pipe && link_res) - dc_link_dp_set_drive_settings(pipe->stream->link, link_res, lt_settings); - else + if (i >= dc->link_count) ASSERT_CRITICAL(false); + + dc_link_get_cur_link_res(link, &link_res); + dc_link_dp_set_drive_settings(dc->links[i], &link_res, lt_settings); } void dc_link_set_preferred_link_settings(struct dc *dc, @@ -4712,23 +4707,24 @@ uint32_t dc_bandwidth_in_kbps_from_timing( } -const struct link_resource *dc_link_get_cur_link_res(const struct dc_link *link) +void dc_link_get_cur_link_res(const struct dc_link *link, + struct link_resource *link_res) { int i; struct pipe_ctx *pipe = NULL; - const struct link_resource *link_res = NULL; + + memset(link_res, 0, sizeof(*link_res)); for (i = 0; i < MAX_PIPES; i++) { pipe = &link->dc->current_state->res_ctx.pipe_ctx[i]; if (pipe->stream && pipe->stream->link && pipe->top_pipe == NULL) { if (pipe->stream->link == link) { - link_res = &pipe->link_res; + *link_res = pipe->link_res; break; } } } - return link_res; } /** diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h index b1c79b3f26aa..6c02244e7fe7 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_link.h +++ b/drivers/gpu/drm/amd/display/dc/dc_link.h @@ -459,7 +459,8 @@ bool dc_link_should_enable_fec(const struct dc_link *link); uint32_t dc_link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw); enum dp_link_encoding dc_link_dp_mst_decide_link_encoding_format(const struct dc_link *link); -const struct link_resource *dc_link_get_cur_link_res(const struct dc_link *link); +void dc_link_get_cur_link_res(const struct dc_link *link, + struct link_resource *link_res); /* take a snapshot of current link resource allocation state */ void dc_get_cur_link_res_map(const struct dc *dc, uint32_t *map); /* restore link resource allocation state from a snapshot */ -- cgit From 0015cce5cf04d3bd7b2ae4f62d5cea5d35383e8c Mon Sep 17 00:00:00 2001 From: David Galiffi Date: Sun, 23 Jan 2022 13:20:18 -0500 Subject: drm/amd/display: Fix disabling dccg clocks [How & Why] Updated procedure to match hardware programming guide. Tested-by: Daniel Wheeler Reviewed-by: Eric Yang Acked-by: Rodrigo Siqueira Signed-off-by: David Galiffi Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h | 7 ++++ drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c | 44 +++++++++++++++-------- drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h | 1 + 3 files changed, 37 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h index f98aba308028..493c47a3d06e 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h @@ -183,8 +183,14 @@ type SYMCLK32_ROOT_SE1_GATE_DISABLE;\ type SYMCLK32_ROOT_SE2_GATE_DISABLE;\ type SYMCLK32_ROOT_SE3_GATE_DISABLE;\ + type SYMCLK32_SE0_GATE_DISABLE;\ + type SYMCLK32_SE1_GATE_DISABLE;\ + type SYMCLK32_SE2_GATE_DISABLE;\ + type SYMCLK32_SE3_GATE_DISABLE;\ type SYMCLK32_ROOT_LE0_GATE_DISABLE;\ type SYMCLK32_ROOT_LE1_GATE_DISABLE;\ + type SYMCLK32_LE0_GATE_DISABLE;\ + type SYMCLK32_LE1_GATE_DISABLE;\ type DPSTREAMCLK_ROOT_GATE_DISABLE;\ type DPSTREAMCLK_GATE_DISABLE;\ type HDMISTREAMCLK0_DTO_PHASE;\ @@ -233,6 +239,7 @@ struct dccg_registers { uint32_t DSCCLK2_DTO_PARAM; uint32_t DPSTREAMCLK_ROOT_GATE_DISABLE; uint32_t DPSTREAMCLK_GATE_DISABLE; + uint32_t DCCG_GATE_DISABLE_CNTL2; uint32_t DCCG_GATE_DISABLE_CNTL3; uint32_t HDMISTREAMCLK0_DTO_PARAM; uint32_t DCCG_GATE_DISABLE_CNTL4; diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c index ea4f8e06b07c..720bd35582b0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c @@ -121,7 +121,8 @@ static void dccg31_enable_dpstreamclk(struct dccg *dccg, int otg_inst) return; } if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, + DPSTREAMCLK_GATE_DISABLE, 1, DPSTREAMCLK_ROOT_GATE_DISABLE, 1); } @@ -130,8 +131,9 @@ static void dccg31_disable_dpstreamclk(struct dccg *dccg, int otg_inst) struct dcn_dccg *dccg_dcn = TO_DCN_DCCG(dccg); if (dccg->ctx->dc->debug.root_clock_optimization.bits.dpstream) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, - DPSTREAMCLK_ROOT_GATE_DISABLE, 0); + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, + DPSTREAMCLK_ROOT_GATE_DISABLE, 0, + DPSTREAMCLK_GATE_DISABLE, 0); switch (otg_inst) { case 0: @@ -180,7 +182,8 @@ void dccg31_enable_symclk32_se( switch (hpo_se_inst) { case 0: if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_SE0_GATE_DISABLE, 1, SYMCLK32_ROOT_SE0_GATE_DISABLE, 1); REG_UPDATE_2(SYMCLK32_SE_CNTL, SYMCLK32_SE0_SRC_SEL, phyd32clk, @@ -188,7 +191,8 @@ void dccg31_enable_symclk32_se( break; case 1: if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_SE1_GATE_DISABLE, 1, SYMCLK32_ROOT_SE1_GATE_DISABLE, 1); REG_UPDATE_2(SYMCLK32_SE_CNTL, SYMCLK32_SE1_SRC_SEL, phyd32clk, @@ -196,7 +200,8 @@ void dccg31_enable_symclk32_se( break; case 2: if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_SE2_GATE_DISABLE, 1, SYMCLK32_ROOT_SE2_GATE_DISABLE, 1); REG_UPDATE_2(SYMCLK32_SE_CNTL, SYMCLK32_SE2_SRC_SEL, phyd32clk, @@ -204,7 +209,8 @@ void dccg31_enable_symclk32_se( break; case 3: if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_SE3_GATE_DISABLE, 1, SYMCLK32_ROOT_SE3_GATE_DISABLE, 1); REG_UPDATE_2(SYMCLK32_SE_CNTL, SYMCLK32_SE3_SRC_SEL, phyd32clk, @@ -229,7 +235,8 @@ void dccg31_disable_symclk32_se( SYMCLK32_SE0_SRC_SEL, 0, SYMCLK32_SE0_EN, 0); if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_SE0_GATE_DISABLE, 0, SYMCLK32_ROOT_SE0_GATE_DISABLE, 0); break; case 1: @@ -237,7 +244,8 @@ void dccg31_disable_symclk32_se( SYMCLK32_SE1_SRC_SEL, 0, SYMCLK32_SE1_EN, 0); if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_SE1_GATE_DISABLE, 0, SYMCLK32_ROOT_SE1_GATE_DISABLE, 0); break; case 2: @@ -245,7 +253,8 @@ void dccg31_disable_symclk32_se( SYMCLK32_SE2_SRC_SEL, 0, SYMCLK32_SE2_EN, 0); if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_SE2_GATE_DISABLE, 0, SYMCLK32_ROOT_SE2_GATE_DISABLE, 0); break; case 3: @@ -253,7 +262,8 @@ void dccg31_disable_symclk32_se( SYMCLK32_SE3_SRC_SEL, 0, SYMCLK32_SE3_EN, 0); if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_se) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_SE3_GATE_DISABLE, 0, SYMCLK32_ROOT_SE3_GATE_DISABLE, 0); break; default: @@ -275,7 +285,8 @@ void dccg31_enable_symclk32_le( switch (hpo_le_inst) { case 0: if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_LE0_GATE_DISABLE, 1, SYMCLK32_ROOT_LE0_GATE_DISABLE, 1); REG_UPDATE_2(SYMCLK32_LE_CNTL, SYMCLK32_LE0_SRC_SEL, phyd32clk, @@ -283,7 +294,8 @@ void dccg31_enable_symclk32_le( break; case 1: if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_LE1_GATE_DISABLE, 1, SYMCLK32_ROOT_LE1_GATE_DISABLE, 1); REG_UPDATE_2(SYMCLK32_LE_CNTL, SYMCLK32_LE1_SRC_SEL, phyd32clk, @@ -308,7 +320,8 @@ void dccg31_disable_symclk32_le( SYMCLK32_LE0_SRC_SEL, 0, SYMCLK32_LE0_EN, 0); if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_LE0_GATE_DISABLE, 0, SYMCLK32_ROOT_LE0_GATE_DISABLE, 0); break; case 1: @@ -316,7 +329,8 @@ void dccg31_disable_symclk32_le( SYMCLK32_LE1_SRC_SEL, 0, SYMCLK32_LE1_EN, 0); if (dccg->ctx->dc->debug.root_clock_optimization.bits.symclk32_le) - REG_UPDATE(DCCG_GATE_DISABLE_CNTL3, + REG_UPDATE_2(DCCG_GATE_DISABLE_CNTL3, + SYMCLK32_LE1_GATE_DISABLE, 0, SYMCLK32_ROOT_LE1_GATE_DISABLE, 0); break; default: diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h index a013a32bbaf7..4039273872be 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h @@ -66,6 +66,7 @@ SR(DSCCLK1_DTO_PARAM),\ SR(DSCCLK2_DTO_PARAM),\ SR(DSCCLK_DTO_CTRL),\ + SR(DCCG_GATE_DISABLE_CNTL2),\ SR(DCCG_GATE_DISABLE_CNTL3),\ SR(HDMISTREAMCLK0_DTO_PARAM) -- cgit From 05d6aea36a69e65b071e6ba897bf83a4aebaeab2 Mon Sep 17 00:00:00 2001 From: David Galiffi Date: Sun, 23 Jan 2022 13:20:19 -0500 Subject: drm/amd/display: Disable physym clock [How & Why] Disable physym clock when it's not in use. Tested-by: Daniel Wheeler Reviewed-by: Eric Yang Acked-by: Rodrigo Siqueira Signed-off-by: David Galiffi Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h | 8 ++- drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c | 62 +++++++++++++++++++---- drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h | 5 ++ 3 files changed, 63 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h index 493c47a3d06e..b3c9a9724efd 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_dccg.h @@ -196,8 +196,12 @@ type HDMISTREAMCLK0_DTO_PHASE;\ type HDMISTREAMCLK0_DTO_MODULO;\ type HDMICHARCLK0_GATE_DISABLE;\ - type HDMICHARCLK0_ROOT_GATE_DISABLE; - + type HDMICHARCLK0_ROOT_GATE_DISABLE; \ + type PHYASYMCLK_GATE_DISABLE; \ + type PHYBSYMCLK_GATE_DISABLE; \ + type PHYCSYMCLK_GATE_DISABLE; \ + type PHYDSYMCLK_GATE_DISABLE; \ + type PHYESYMCLK_GATE_DISABLE; struct dccg_shift { DCCG_REG_FIELD_LIST(uint8_t) diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c index 720bd35582b0..287a1066b547 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.c @@ -420,54 +420,89 @@ void dccg31_set_physymclk( /* Force PHYSYMCLK on and Select phyd32clk as the source of clock which is output to PHY through DCIO */ switch (phy_inst) { case 0: - if (force_enable) + if (force_enable) { REG_UPDATE_2(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_EN, 1, PHYASYMCLK_FORCE_SRC_SEL, clk_src); - else + if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, + PHYASYMCLK_GATE_DISABLE, 1); + } else { REG_UPDATE_2(PHYASYMCLK_CLOCK_CNTL, PHYASYMCLK_FORCE_EN, 0, PHYASYMCLK_FORCE_SRC_SEL, 0); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, + PHYASYMCLK_GATE_DISABLE, 0); + } break; case 1: - if (force_enable) + if (force_enable) { REG_UPDATE_2(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_EN, 1, PHYBSYMCLK_FORCE_SRC_SEL, clk_src); - else + if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, + PHYBSYMCLK_GATE_DISABLE, 1); + } else { REG_UPDATE_2(PHYBSYMCLK_CLOCK_CNTL, PHYBSYMCLK_FORCE_EN, 0, PHYBSYMCLK_FORCE_SRC_SEL, 0); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, + PHYBSYMCLK_GATE_DISABLE, 0); + } break; case 2: - if (force_enable) + if (force_enable) { REG_UPDATE_2(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_FORCE_EN, 1, PHYCSYMCLK_FORCE_SRC_SEL, clk_src); - else + if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, + PHYCSYMCLK_GATE_DISABLE, 1); + } else { REG_UPDATE_2(PHYCSYMCLK_CLOCK_CNTL, PHYCSYMCLK_FORCE_EN, 0, PHYCSYMCLK_FORCE_SRC_SEL, 0); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, + PHYCSYMCLK_GATE_DISABLE, 0); + } break; case 3: - if (force_enable) + if (force_enable) { REG_UPDATE_2(PHYDSYMCLK_CLOCK_CNTL, PHYDSYMCLK_FORCE_EN, 1, PHYDSYMCLK_FORCE_SRC_SEL, clk_src); - else + if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, + PHYDSYMCLK_GATE_DISABLE, 1); + } else { REG_UPDATE_2(PHYDSYMCLK_CLOCK_CNTL, PHYDSYMCLK_FORCE_EN, 0, PHYDSYMCLK_FORCE_SRC_SEL, 0); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, + PHYDSYMCLK_GATE_DISABLE, 0); + } break; case 4: - if (force_enable) + if (force_enable) { REG_UPDATE_2(PHYESYMCLK_CLOCK_CNTL, PHYESYMCLK_FORCE_EN, 1, PHYESYMCLK_FORCE_SRC_SEL, clk_src); - else + if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, + PHYESYMCLK_GATE_DISABLE, 1); + } else { REG_UPDATE_2(PHYESYMCLK_CLOCK_CNTL, PHYESYMCLK_FORCE_EN, 0, PHYESYMCLK_FORCE_SRC_SEL, 0); + if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) + REG_UPDATE(DCCG_GATE_DISABLE_CNTL2, + PHYESYMCLK_GATE_DISABLE, 0); + } break; default: BREAK_TO_DEBUGGER(); @@ -629,6 +664,13 @@ void dccg31_init(struct dccg *dccg) dccg31_disable_dpstreamclk(dccg, 3); } + if (dccg->ctx->dc->debug.root_clock_optimization.bits.physymclk) { + dccg31_set_physymclk(dccg, 0, PHYSYMCLK_FORCE_SRC_SYMCLK, false); + dccg31_set_physymclk(dccg, 1, PHYSYMCLK_FORCE_SRC_SYMCLK, false); + dccg31_set_physymclk(dccg, 2, PHYSYMCLK_FORCE_SRC_SYMCLK, false); + dccg31_set_physymclk(dccg, 3, PHYSYMCLK_FORCE_SRC_SYMCLK, false); + dccg31_set_physymclk(dccg, 4, PHYSYMCLK_FORCE_SRC_SYMCLK, false); + } } static const struct dccg_funcs dccg31_funcs = { diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h index 4039273872be..269cabbea72a 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_dccg.h @@ -136,6 +136,11 @@ DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK0_DTO_ENABLE, mask_sh),\ DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK1_DTO_ENABLE, mask_sh),\ DCCG_SF(DSCCLK_DTO_CTRL, DSCCLK2_DTO_ENABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYASYMCLK_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYBSYMCLK_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYCSYMCLK_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYDSYMCLK_GATE_DISABLE, mask_sh),\ + DCCG_SF(DCCG_GATE_DISABLE_CNTL2, PHYESYMCLK_GATE_DISABLE, mask_sh),\ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, DPSTREAMCLK_ROOT_GATE_DISABLE, mask_sh),\ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, DPSTREAMCLK_GATE_DISABLE, mask_sh),\ DCCG_SF(DCCG_GATE_DISABLE_CNTL3, SYMCLK32_ROOT_SE0_GATE_DISABLE, mask_sh),\ -- cgit From 85b8f62b564120943cc0db1e754d6649037e6c7a Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Sun, 23 Jan 2022 13:20:20 -0500 Subject: drm/amd/display: fix zstate allow interface to PMFW [Why] psr_feature_enabled flag is dynamically updated, and sometimes when zstate allow status is determined the flag has not been set to true yet even on PSR enabled config, lid off/on is such a case, which will result in zstate disabled even though PSR is supported. [How] Check the supported PSR version and the PSR disable status instead. Tested-by: Daniel Wheeler Reviewed-by: Nicholas Kazlauskas Acked-by: Rodrigo Siqueira Signed-off-by: Eric Yang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c index d0a5c7afa265..fcf388b509db 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_resource.c @@ -3099,7 +3099,7 @@ static enum dcn_zstate_support_state decide_zstate_support(struct dc *dc, struc if (context->bw_ctx.dml.vba.StutterPeriod > 5000.0) return DCN_ZSTATE_SUPPORT_ALLOW; - else if (link->psr_settings.psr_feature_enabled) + else if (link->psr_settings.psr_version == DC_PSR_VERSION_1 && !dc->debug.disable_psr) return DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY; else return DCN_ZSTATE_SUPPORT_DISALLOW; -- cgit From b5d9a483685c37a480420cfe2d41a03d529bc0a5 Mon Sep 17 00:00:00 2001 From: Eric Yang Date: Sun, 23 Jan 2022 13:20:21 -0500 Subject: drm/amd/display: add debug option for z9 disable interface [Why] To help triage issues and coordinate driver/bios release dependency [How] Only enable the new Z9 interface when debug option is set, otherwise treat Z10 only support case as Zstate disallowed. Tested-by: Daniel Wheeler Reviewed-by: Nicholas Kazlauskas Acked-by: Rodrigo Siqueira Signed-off-by: Eric Yang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c | 6 +++++- drivers/gpu/drm/amd/display/dc/dc.h | 1 + drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c | 1 + 3 files changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c index 1c0415366216..9b4836350547 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c @@ -308,12 +308,16 @@ void dcn31_smu_transfer_wm_table_dram_2_smu(struct clk_mgr_internal *clk_mgr) void dcn31_smu_set_zstate_support(struct clk_mgr_internal *clk_mgr, enum dcn_zstate_support_state support) { - //TODO: Work with smu team to define optimization options. unsigned int msg_id, param; if (!clk_mgr->smu_present) return; + if (!clk_mgr->base.ctx->dc->debug.enable_z9_disable_interface && + (support == DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY)) + support = DCN_ZSTATE_SUPPORT_DISALLOW; + + if (support == DCN_ZSTATE_SUPPORT_ALLOW_Z10_ONLY) param = 1; else diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h index 69cf78fe78cf..4f9dacd09856 100644 --- a/drivers/gpu/drm/amd/display/dc/dc.h +++ b/drivers/gpu/drm/amd/display/dc/dc.h @@ -703,6 +703,7 @@ struct dc_debug_options { int crb_alloc_policy_min_disp_count; #if defined(CONFIG_DRM_AMD_DC_DCN) bool disable_z10; + bool enable_z9_disable_interface; bool enable_sw_cntl_psr; union dpia_debug_options dpia_debug; #endif diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c index 8d64187478e4..f9e312b20598 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_resource.c @@ -1033,6 +1033,7 @@ static const struct dc_debug_options debug_defaults_drv = { .optimize_edp_link_rate = true, .enable_sw_cntl_psr = true, .apply_vendor_specific_lttpr_wa = true, + .enable_z9_disable_interface = false }; static const struct dc_debug_options debug_defaults_diags = { -- cgit From f2bde8349c35d01d7c50456ea06a5c7d5e0e5ed0 Mon Sep 17 00:00:00 2001 From: Nicholas Kazlauskas Date: Mon, 24 Jan 2022 13:50:49 -0500 Subject: drm/amd/display: Call dc_stream_release for remove link enc assignment [Why] A porting error resulted in the stream assignment for the link being retained without being released - a memory leak. [How] Fix the porting error by adding back the dc_stream_release() intended as part of the original patch. Fixes: 0bb245558584 ("drm/amd/display: retain/release at proper places in link_enc assignment") Signed-off-by: Nicholas Kazlauskas Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c index a380611b1e42..047c626a4a34 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_enc_cfg.c @@ -122,6 +122,7 @@ static void remove_link_enc_assignment( stream->link_enc = NULL; state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].eng_id = ENGINE_ID_UNKNOWN; state->res_ctx.link_enc_cfg_ctx.link_enc_assignments[i].stream = NULL; + dc_stream_release(stream); break; } } -- cgit From b9610edcfec216d7a2a5ea2c942734b3e5e26ffc Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 21 Jan 2022 09:23:47 -0500 Subject: drm/amdgpu/pm/smu7: drop message about VI performance levels Earlier chips only had two performance levels, but newer ones potentially had more. The message is harmless. Drop the message to avoid spamming the log. Bug: https://gitlab.freedesktop.org/drm/amd/-/issues/1874 Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c index cd99db0dc2be..a1e11037831a 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c @@ -3295,10 +3295,6 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr, request_ps->classification.ui_label); data->mclk_ignore_signal = false; - PP_ASSERT_WITH_CODE(smu7_ps->performance_level_count == 2, - "VI should always have 2 performance levels", - ); - max_limits = adev->pm.ac_power ? &(hwmgr->dyn_state.max_clock_voltage_on_ac) : &(hwmgr->dyn_state.max_clock_voltage_on_dc); -- cgit From 212021297eafe23b79ac117db9b5159d1df2ff30 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 21 Jan 2022 09:31:38 -0500 Subject: drm/amdgpu: set APU flag based on IP discovery table Use the IP versions to set the APU flag when necessary. Reviewed-by: Aaron Liu Acked-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c index 8dd15a7b8dcc..eb4b7059633d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_discovery.c @@ -1259,6 +1259,19 @@ int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev) return -EINVAL; } + switch (adev->ip_versions[GC_HWIP][0]) { + case IP_VERSION(9, 1, 0): + case IP_VERSION(9, 2, 2): + case IP_VERSION(9, 3, 0): + case IP_VERSION(10, 1, 3): + case IP_VERSION(10, 3, 1): + case IP_VERSION(10, 3, 3): + adev->flags |= AMD_IS_APU; + break; + default: + break; + } + if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(4, 8, 0)) adev->gmc.xgmi.supported = true; -- cgit From 901e2be20dc55079997ea1885ea77fc72e6826e7 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Mon, 24 Jan 2022 13:42:30 -0500 Subject: drm/amdgpu: move PX checking into amdgpu_device_ip_early_init We need to set the APU flag from IP discovery before we evaluate this code. Acked-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 13 +++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 11 ----------- 2 files changed, 13 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 5159c6dedc04..7cec3a0f61b9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include @@ -2073,6 +2074,8 @@ out: */ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) { + struct drm_device *dev = adev_to_drm(adev); + struct pci_dev *parent; int i, r; amdgpu_device_enable_virtual_display(adev); @@ -2137,6 +2140,16 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev) break; } + if (amdgpu_has_atpx() && + (amdgpu_is_atpx_hybrid() || + amdgpu_has_atpx_dgpu_power_cntl()) && + ((adev->flags & AMD_IS_APU) == 0) && + !pci_is_thunderbolt_attached(to_pci_dev(dev->dev))) + adev->flags |= AMD_IS_PX; + + parent = pci_upstream_bridge(adev->pdev); + adev->has_pr3 = parent ? pci_pr3_present(parent) : false; + amdgpu_amdkfd_device_probe(adev); adev->pm.pp_feature = amdgpu_pp_feature_mask; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index e5e69fcc3af3..36dedceb6ded 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -152,21 +152,10 @@ static void amdgpu_get_audio_func(struct amdgpu_device *adev) int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags) { struct drm_device *dev; - struct pci_dev *parent; int r, acpi_status; dev = adev_to_drm(adev); - if (amdgpu_has_atpx() && - (amdgpu_is_atpx_hybrid() || - amdgpu_has_atpx_dgpu_power_cntl()) && - ((flags & AMD_IS_APU) == 0) && - !pci_is_thunderbolt_attached(to_pci_dev(dev->dev))) - flags |= AMD_IS_PX; - - parent = pci_upstream_bridge(adev->pdev); - adev->has_pr3 = parent ? pci_pr3_present(parent) : false; - /* amdgpu_device_init should report only fatal error * like memory allocation failure or iomapping failure, * or memory manager initialization failure, it must -- cgit From d0d66b8c66d04363eff3a5b09da4074fc1dfc758 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 25 Jan 2022 14:58:31 -0500 Subject: drm/amdgpu: move runtime pm init after drm and fbdev init Seems more logical to enable runtime pm at the end of the init sequence so we don't end up entering runtime suspend before init is finished. Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 65 +++++++++++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c | 69 +-------------------------------- 2 files changed, 66 insertions(+), 68 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 1a691a801928..4a5da4913d4e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -1946,6 +1946,22 @@ MODULE_DEVICE_TABLE(pci, pciidlist); static const struct drm_driver amdgpu_kms_driver; +static void amdgpu_get_audio_func(struct amdgpu_device *adev) +{ + struct pci_dev *p = NULL; + + p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus), + adev->pdev->bus->number, 1); + if (p) { + pm_runtime_get_sync(&p->dev); + + pm_runtime_mark_last_busy(&p->dev); + pm_runtime_put_autosuspend(&p->dev); + + pci_dev_put(p); + } +} + static int amdgpu_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { @@ -2071,6 +2087,48 @@ retry_init: if (ret) DRM_ERROR("Creating debugfs files failed (%d).\n", ret); + if (adev->runpm) { + /* only need to skip on ATPX */ + if (amdgpu_device_supports_px(ddev)) + dev_pm_set_driver_flags(ddev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); + /* we want direct complete for BOCO */ + if (amdgpu_device_supports_boco(ddev)) + dev_pm_set_driver_flags(ddev->dev, DPM_FLAG_SMART_PREPARE | + DPM_FLAG_SMART_SUSPEND | + DPM_FLAG_MAY_SKIP_RESUME); + pm_runtime_use_autosuspend(ddev->dev); + pm_runtime_set_autosuspend_delay(ddev->dev, 5000); + + pm_runtime_allow(ddev->dev); + + pm_runtime_mark_last_busy(ddev->dev); + pm_runtime_put_autosuspend(ddev->dev); + + /* + * For runpm implemented via BACO, PMFW will handle the + * timing for BACO in and out: + * - put ASIC into BACO state only when both video and + * audio functions are in D3 state. + * - pull ASIC out of BACO state when either video or + * audio function is in D0 state. + * Also, at startup, PMFW assumes both functions are in + * D0 state. + * + * So if snd driver was loaded prior to amdgpu driver + * and audio function was put into D3 state, there will + * be no PMFW-aware D-state transition(D0->D3) on runpm + * suspend. Thus the BACO will be not correctly kicked in. + * + * Via amdgpu_get_audio_func(), the audio dev is put + * into D0 state. Then there will be a PMFW-aware D-state + * transition(D0->D3) on runpm suspend. + */ + if (amdgpu_device_supports_baco(ddev) && + !(adev->flags & AMD_IS_APU) && + (adev->asic_type >= CHIP_NAVI10)) + amdgpu_get_audio_func(adev); + } + return 0; err_pci: @@ -2082,8 +2140,15 @@ static void amdgpu_pci_remove(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); + struct amdgpu_device *adev = drm_to_adev(dev); drm_dev_unplug(dev); + + if (adev->runpm) { + pm_runtime_get_sync(dev->dev); + pm_runtime_forbid(dev->dev); + } + amdgpu_driver_unload_kms(dev); /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c index 36dedceb6ded..c2cb345b1421 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c @@ -87,11 +87,6 @@ void amdgpu_driver_unload_kms(struct drm_device *dev) if (adev->rmmio == NULL) return; - if (adev->runpm) { - pm_runtime_get_sync(dev->dev); - pm_runtime_forbid(dev->dev); - } - if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_UNLOAD)) DRM_WARN("smart shift update failed\n"); @@ -124,22 +119,6 @@ void amdgpu_register_gpu_instance(struct amdgpu_device *adev) mutex_unlock(&mgpu_info.mutex); } -static void amdgpu_get_audio_func(struct amdgpu_device *adev) -{ - struct pci_dev *p = NULL; - - p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus), - adev->pdev->bus->number, 1); - if (p) { - pm_runtime_get_sync(&p->dev); - - pm_runtime_mark_last_busy(&p->dev); - pm_runtime_put_autosuspend(&p->dev); - - pci_dev_put(p); - } -} - /** * amdgpu_driver_load_kms - Main load function for KMS. * @@ -207,58 +186,12 @@ int amdgpu_driver_load_kms(struct amdgpu_device *adev, unsigned long flags) if (acpi_status) dev_dbg(dev->dev, "Error during ACPI methods call\n"); - if (adev->runpm) { - /* only need to skip on ATPX */ - if (amdgpu_device_supports_px(dev)) - dev_pm_set_driver_flags(dev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); - /* we want direct complete for BOCO */ - if (amdgpu_device_supports_boco(dev)) - dev_pm_set_driver_flags(dev->dev, DPM_FLAG_SMART_PREPARE | - DPM_FLAG_SMART_SUSPEND | - DPM_FLAG_MAY_SKIP_RESUME); - pm_runtime_use_autosuspend(dev->dev); - pm_runtime_set_autosuspend_delay(dev->dev, 5000); - - pm_runtime_allow(dev->dev); - - pm_runtime_mark_last_busy(dev->dev); - pm_runtime_put_autosuspend(dev->dev); - - /* - * For runpm implemented via BACO, PMFW will handle the - * timing for BACO in and out: - * - put ASIC into BACO state only when both video and - * audio functions are in D3 state. - * - pull ASIC out of BACO state when either video or - * audio function is in D0 state. - * Also, at startup, PMFW assumes both functions are in - * D0 state. - * - * So if snd driver was loaded prior to amdgpu driver - * and audio function was put into D3 state, there will - * be no PMFW-aware D-state transition(D0->D3) on runpm - * suspend. Thus the BACO will be not correctly kicked in. - * - * Via amdgpu_get_audio_func(), the audio dev is put - * into D0 state. Then there will be a PMFW-aware D-state - * transition(D0->D3) on runpm suspend. - */ - if (amdgpu_device_supports_baco(dev) && - !(adev->flags & AMD_IS_APU) && - (adev->asic_type >= CHIP_NAVI10)) - amdgpu_get_audio_func(adev); - } - if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DRV_LOAD)) DRM_WARN("smart shift update failed\n"); out: - if (r) { - /* balance pm_runtime_get_sync in amdgpu_driver_unload_kms */ - if (adev->rmmio && adev->runpm) - pm_runtime_put_noidle(dev->dev); + if (r) amdgpu_driver_unload_kms(dev); - } return r; } -- cgit From 243c719e872a1322b22efccff80776353357b296 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 25 Jan 2022 15:00:14 -0500 Subject: drm/amdgpu: handle BACO synchronization with secondary funcs Extend secondary function handling for runtime pm beyond audio to USB and UCSI. Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 30 ++++++++++++++++++------------ 1 file changed, 18 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 4a5da4913d4e..9c799645f2e7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -1946,19 +1946,25 @@ MODULE_DEVICE_TABLE(pci, pciidlist); static const struct drm_driver amdgpu_kms_driver; -static void amdgpu_get_audio_func(struct amdgpu_device *adev) +static void amdgpu_get_secondary_funcs(struct amdgpu_device *adev) { struct pci_dev *p = NULL; + int i; - p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus), - adev->pdev->bus->number, 1); - if (p) { - pm_runtime_get_sync(&p->dev); - - pm_runtime_mark_last_busy(&p->dev); - pm_runtime_put_autosuspend(&p->dev); - - pci_dev_put(p); + /* 0 - GPU + * 1 - audio + * 2 - USB + * 3 - UCSI + */ + for (i = 1; i < 4; i++) { + p = pci_get_domain_bus_and_slot(pci_domain_nr(adev->pdev->bus), + adev->pdev->bus->number, i); + if (p) { + pm_runtime_get_sync(&p->dev); + pm_runtime_mark_last_busy(&p->dev); + pm_runtime_put_autosuspend(&p->dev); + pci_dev_put(p); + } } } @@ -2119,14 +2125,14 @@ retry_init: * be no PMFW-aware D-state transition(D0->D3) on runpm * suspend. Thus the BACO will be not correctly kicked in. * - * Via amdgpu_get_audio_func(), the audio dev is put + * Via amdgpu_get_secondary_funcs(), the audio dev is put * into D0 state. Then there will be a PMFW-aware D-state * transition(D0->D3) on runpm suspend. */ if (amdgpu_device_supports_baco(ddev) && !(adev->flags & AMD_IS_APU) && (adev->asic_type >= CHIP_NAVI10)) - amdgpu_get_audio_func(adev); + amdgpu_get_secondary_funcs(adev); } return 0; -- cgit From 82c3a7a5edbf5f6feae9602d19567d2b5b55121f Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 21 Jan 2022 11:18:44 -0500 Subject: drm/amdgpu: convert amdgpu_display_supported_domains() to IP versions Check IP versions rather than asic types. Acked-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | 29 +++++++++++++++++------------ 1 file changed, 17 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 82011e75ed85..6cad39c31c58 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -510,19 +510,24 @@ uint32_t amdgpu_display_supported_domains(struct amdgpu_device *adev, case CHIP_STONEY: domain |= AMDGPU_GEM_DOMAIN_GTT; break; - case CHIP_RAVEN: - /* enable S/G on PCO and RV2 */ - if ((adev->apu_flags & AMD_APU_IS_RAVEN2) || - (adev->apu_flags & AMD_APU_IS_PICASSO)) - domain |= AMDGPU_GEM_DOMAIN_GTT; - break; - case CHIP_RENOIR: - case CHIP_VANGOGH: - case CHIP_YELLOW_CARP: - domain |= AMDGPU_GEM_DOMAIN_GTT; - break; - default: + switch (adev->ip_versions[DCE_HWIP][0]) { + case IP_VERSION(1, 0, 0): + case IP_VERSION(1, 0, 1): + /* enable S/G on PCO and RV2 */ + if ((adev->apu_flags & AMD_APU_IS_RAVEN2) || + (adev->apu_flags & AMD_APU_IS_PICASSO)) + domain |= AMDGPU_GEM_DOMAIN_GTT; + break; + case IP_VERSION(2, 1, 0): + case IP_VERSION(3, 0, 1): + case IP_VERSION(3, 1, 2): + case IP_VERSION(3, 1, 3): + domain |= AMDGPU_GEM_DOMAIN_GTT; + break; + default: + break; + } break; } } -- cgit From 153a9529d7f372ce7ceb5eae7e2c312c0cd64d41 Mon Sep 17 00:00:00 2001 From: Zhou Qingyang Date: Tue, 25 Jan 2022 00:55:51 +0800 Subject: drm/amd/display/dc/calcs/dce_calcs: Fix a memleak in calculate_bandwidth() In calculate_bandwidth(), the tag free_sclk and free_yclk are reversed, which could lead to a memory leak of yclk. Fix this bug by changing the location of free_sclk and free_yclk. This bug was found by a static analyzer. Builds with 'make allyesconfig' show no new warnings, and our static analyzer no longer warns about this code. Fixes: 2be8989d0fc2 ("drm/amd/display/dc/calcs/dce_calcs: Move some large variables from the stack to the heap") Signed-off-by: Zhou Qingyang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dml/calcs/dce_calcs.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dce_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dce_calcs.c index 8f136db39f3e..0100a6053ab6 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dce_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dce_calcs.c @@ -2033,10 +2033,10 @@ static void calculate_bandwidth( kfree(surface_type); free_tiling_mode: kfree(tiling_mode); -free_yclk: - kfree(yclk); free_sclk: kfree(sclk); +free_yclk: + kfree(yclk); } /******************************************************************************* -- cgit From 588a70177df3b1777484267584ef38ab2ca899a2 Mon Sep 17 00:00:00 2001 From: Zhou Qingyang Date: Tue, 25 Jan 2022 00:57:29 +0800 Subject: drm/amd/display: Fix a NULL pointer dereference in amdgpu_dm_connector_add_common_modes() In amdgpu_dm_connector_add_common_modes(), amdgpu_dm_create_common_mode() is assigned to mode and is passed to drm_mode_probed_add() directly after that. drm_mode_probed_add() passes &mode->head to list_add_tail(), and there is a dereference of it in list_add_tail() without recoveries, which could lead to NULL pointer dereference on failure of amdgpu_dm_create_common_mode(). Fix this by adding a NULL check of mode. This bug was found by a static analyzer. Builds with 'make allyesconfig' show no new warnings, and our static analyzer no longer warns about this code. Fixes: e7b07ceef2a6 ("drm/amd/display: Merge amdgpu_dm_types and amdgpu_dm") Signed-off-by: Zhou Qingyang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 44f4d862a022..e6ff40fdf18b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -8138,6 +8138,9 @@ static void amdgpu_dm_connector_add_common_modes(struct drm_encoder *encoder, mode = amdgpu_dm_create_common_mode(encoder, common_modes[i].name, common_modes[i].w, common_modes[i].h); + if (!mode) + continue; + drm_mode_probed_add(connector, mode); amdgpu_dm_connector->num_modes++; } -- cgit From 94ca070cd3737dd5c3b98f615bd4cc950f82a597 Mon Sep 17 00:00:00 2001 From: Bas Nieuwenhuizen Date: Mon, 24 Jan 2022 01:23:35 +0100 Subject: drm/amd/display: Fix FP start/end for dcn30_internal_validate_bw. It calls populate_dml_pipes which uses doubles to initialize the scale_ratio_depth params. Mirrors the dcn20 logic. Signed-off-by: Bas Nieuwenhuizen Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c index 602ec9a08549..8ca26383b568 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_resource.c @@ -1878,7 +1878,6 @@ noinline bool dcn30_internal_validate_bw( dc->res_pool->funcs->update_soc_for_wm_a(dc, context); pipe_cnt = dc->res_pool->funcs->populate_dml_pipes(dc, context, pipes, fast_validate); - DC_FP_START(); if (!pipe_cnt) { out = true; goto validate_out; @@ -2104,7 +2103,6 @@ validate_fail: out = false; validate_out: - DC_FP_END(); return out; } @@ -2306,7 +2304,9 @@ bool dcn30_validate_bandwidth(struct dc *dc, BW_VAL_TRACE_COUNT(); + DC_FP_START(); out = dcn30_internal_validate_bw(dc, context, pipes, &pipe_cnt, &vlevel, fast_validate); + DC_FP_END(); if (pipe_cnt == 0) goto validate_out; -- cgit From 5e6d72c643e1262ff02e057620b9f87d7d81141a Mon Sep 17 00:00:00 2001 From: Bas Nieuwenhuizen Date: Mon, 24 Jan 2022 01:23:36 +0100 Subject: drm/amd/display: Wrap dcn301_calculate_wm_and_dlg for FPU. Mirrors the logic for dcn30. Cue lots of WARNs and some kernel panics without this fix. Signed-off-by: Bas Nieuwenhuizen Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c | 11 +++++++++++ drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c | 2 +- drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.h | 2 +- 3 files changed, 13 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c index b4001233867c..5d9637b07429 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c +++ b/drivers/gpu/drm/amd/display/dc/dcn301/dcn301_resource.c @@ -1380,6 +1380,17 @@ static void set_wm_ranges( pp_smu->nv_funcs.set_wm_ranges(&pp_smu->nv_funcs.pp_smu, &ranges); } +static void dcn301_calculate_wm_and_dlg( + struct dc *dc, struct dc_state *context, + display_e2e_pipe_params_st *pipes, + int pipe_cnt, + int vlevel) +{ + DC_FP_START(); + dcn301_calculate_wm_and_dlg_fp(dc, context, pipes, pipe_cnt, vlevel); + DC_FP_END(); +} + static struct resource_funcs dcn301_res_pool_funcs = { .destroy = dcn301_destroy_resource_pool, .link_enc_create = dcn301_link_encoder_create, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c index 94c32832a0e7..0a7a33864973 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.c @@ -327,7 +327,7 @@ void dcn301_fpu_init_soc_bounding_box(struct bp_soc_bb_info bb_info) dcn3_01_soc.sr_exit_time_us = bb_info.dram_sr_exit_latency_100ns * 10; } -void dcn301_calculate_wm_and_dlg(struct dc *dc, +void dcn301_calculate_wm_and_dlg_fp(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, int pipe_cnt, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.h b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.h index fc7065d17842..774b0fdfc80b 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.h +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn301/dcn301_fpu.h @@ -34,7 +34,7 @@ void dcn301_fpu_set_wm_ranges(int i, void dcn301_fpu_init_soc_bounding_box(struct bp_soc_bb_info bb_info); -void dcn301_calculate_wm_and_dlg(struct dc *dc, +void dcn301_calculate_wm_and_dlg_fp(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes, int pipe_cnt, -- cgit From 430e6a0212b2a0eb1de5e9d47a016fa79edf3978 Mon Sep 17 00:00:00 2001 From: Tom Rix Date: Mon, 24 Jan 2022 12:18:12 -0800 Subject: drm/amd/pm: return -ENOTSUPP if there is no get_dpm_ultimate_freq function clang static analysis reports this represenative problem amdgpu_smu.c:144:18: warning: The left operand of '*' is a garbage value return clk_freq * 100; ~~~~~~~~ ^ If there is no get_dpm_ultimate_freq function, smu_get_dpm_freq_range returns success without setting the output min,max parameters. So return an -ENOTSUPP error. Fixes: e5ef784b1e17 ("drm/amd/powerplay: revise calling chain on retrieving frequency range") Signed-off-by: Tom Rix Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 5ace30434e60..264eb09ccfd5 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -116,7 +116,7 @@ int smu_get_dpm_freq_range(struct smu_context *smu, uint32_t *min, uint32_t *max) { - int ret = 0; + int ret = -ENOTSUPP; if (!min && !max) return -EINVAL; -- cgit From ee2016b4b5bb70483e1c06218e7b6288704284cb Mon Sep 17 00:00:00 2001 From: Colin Ian King Date: Wed, 19 Jan 2022 22:46:28 +0000 Subject: drm/radeon: remove redundant assignment to reg The pointer reg is being assigned a value that is not read, the exit path via label 'out' never accesses it. The assignment is redundant and can be removed. Cleans up clang scan build warning: drivers/gpu/drm/radeon/radeon_object.c:570:3: warning: Value stored to 'reg' is never read [deadcode.DeadStores] Reviewed-by: Nick Desaulniers Signed-off-by: Colin Ian King Signed-off-by: Alex Deucher --- drivers/gpu/drm/radeon/radeon_object.c | 1 - 1 file changed, 1 deletion(-) diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 56ede9d63b12..87536d205593 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -567,7 +567,6 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo) return 0; if (bo->surface_reg >= 0) { - reg = &rdev->surface_regs[bo->surface_reg]; i = bo->surface_reg; goto out; } -- cgit From e6f62afe9b2731b7a94b31fe59ef2c5134ad426b Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Fri, 21 Jan 2022 17:23:43 -0800 Subject: drm/amd/display: don't use /** for non-kernel-doc comments Change a static function's comment from "/**" (indicating kernel-doc notation) to "/*" (indicating a regular C language comment). This prevents multiple kernel-doc warnings: drivers/gpu/drm/amd/amdgpu/../display/dc/core/dc_link_dp.c:4343: warning: Function parameter or member 'max_supported_frl_bw_in_kbps' not described in 'intersect_frl_link_bw_support' drivers/gpu/drm/amd/amdgpu/../display/dc/core/dc_link_dp.c:4343: warning: Function parameter or member 'hdmi_encoded_link_bw' not described in 'intersect_frl_link_bw_support' drivers/gpu/drm/amd/amdgpu/../display/dc/core/dc_link_dp.c:4343: warning: expecting prototype for Return PCON's post FRL link training supported BW if its non(). Prototype was for intersect_frl_link_bw_support() instead Fixes: c022375ae095 ("drm/amd/display: Add DP-HDMI FRL PCON Support in DC") Reviewed-by: Harry Wentland Signed-off-by: Randy Dunlap Reported-by: kernel test robot Cc: Fangzhi Zuo Cc: Alex Deucher Cc: Nicholas Kazlauskas Cc: Harry Wentland Cc: Leo Li Cc: Rodrigo Siqueira Cc: amd-gfx@lists.freedesktop.org Cc: dri-devel@lists.freedesktop.org Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c index 538ace042961..8cfc9a8197df 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c @@ -4698,7 +4698,7 @@ uint32_t dc_link_bw_kbps_from_raw_frl_link_rate_data(uint8_t bw) return 0; } -/** +/* * Return PCON's post FRL link training supported BW if its non-zero, otherwise return max_supported_frl_bw. */ static uint32_t intersect_frl_link_bw_support( -- cgit From d726d43c20e744bab1e346c1f77b7a71eff0c40c Mon Sep 17 00:00:00 2001 From: Tim Huang Date: Tue, 25 Jan 2022 11:12:07 +0800 Subject: drm/amdgpu: convert to NBIO IP version checking Use IP versions rather than asic_type to differentiate IP version specific features. Signed-off-by: Tim Huang Reviewed-by: Aaron Liu Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c | 44 ++++++++++++++++++++++++---------- 1 file changed, 31 insertions(+), 13 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c index 3444332ea110..44f17bbfeb6a 100644 --- a/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c +++ b/drivers/gpu/drm/amd/amdgpu/nbio_v7_2.c @@ -59,10 +59,15 @@ static u32 nbio_v7_2_get_rev_id(struct amdgpu_device *adev) { u32 tmp; - if (adev->asic_type == CHIP_YELLOW_CARP) + switch (adev->ip_versions[NBIO_HWIP][0]) { + case IP_VERSION(7, 2, 1): + case IP_VERSION(7, 5, 0): tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0_YC); - else + break; + default: tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0); + break; + } tmp &= RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK; tmp >>= RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT; @@ -72,20 +77,25 @@ static u32 nbio_v7_2_get_rev_id(struct amdgpu_device *adev) static void nbio_v7_2_mc_access_enable(struct amdgpu_device *adev, bool enable) { - if (enable) - if (adev->asic_type == CHIP_YELLOW_CARP) + switch (adev->ip_versions[NBIO_HWIP][0]) { + case IP_VERSION(7, 2, 1): + case IP_VERSION(7, 5, 0): + if (enable) WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN_YC, BIF_BX0_BIF_FB_EN__FB_READ_EN_MASK | BIF_BX0_BIF_FB_EN__FB_WRITE_EN_MASK); else + WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN_YC, 0); + break; + default: + if (enable) WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN, BIF_BX0_BIF_FB_EN__FB_READ_EN_MASK | BIF_BX0_BIF_FB_EN__FB_WRITE_EN_MASK); - else - if (adev->asic_type == CHIP_YELLOW_CARP) - WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN_YC, 0); else WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN, 0); + break; + } } static u32 nbio_v7_2_get_memsize(struct amdgpu_device *adev) @@ -250,7 +260,9 @@ static void nbio_v7_2_update_medium_grain_light_sleep(struct amdgpu_device *adev { uint32_t def, data; - if (adev->asic_type == CHIP_YELLOW_CARP) { + switch (adev->ip_versions[NBIO_HWIP][0]) { + case IP_VERSION(7, 2, 1): + case IP_VERSION(7, 5, 0): def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CNTL2)); if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK; @@ -260,8 +272,8 @@ static void nbio_v7_2_update_medium_grain_light_sleep(struct amdgpu_device *adev if (def != data) WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CNTL2), data); - data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regBIF1_PCIE_TX_POWER_CTRL_1)); - def = data; + def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, + regBIF1_PCIE_TX_POWER_CTRL_1)); if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) data |= (BIF1_PCIE_TX_POWER_CTRL_1__MST_MEM_LS_EN_MASK | BIF1_PCIE_TX_POWER_CTRL_1__REPLAY_MEM_LS_EN_MASK); @@ -272,7 +284,8 @@ static void nbio_v7_2_update_medium_grain_light_sleep(struct amdgpu_device *adev if (def != data) WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regBIF1_PCIE_TX_POWER_CTRL_1), data); - } else { + break; + default: def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CNTL2)); if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) data |= (PCIE_CNTL2__SLV_MEM_LS_EN_MASK | @@ -285,6 +298,7 @@ static void nbio_v7_2_update_medium_grain_light_sleep(struct amdgpu_device *adev if (def != data) WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CNTL2), data); + break; } } @@ -352,7 +366,9 @@ const struct nbio_hdp_flush_reg nbio_v7_2_hdp_flush_reg = { static void nbio_v7_2_init_registers(struct amdgpu_device *adev) { uint32_t def, data; - if (adev->asic_type == CHIP_YELLOW_CARP) { + switch (adev->ip_versions[NBIO_HWIP][0]) { + case IP_VERSION(7, 2, 1): + case IP_VERSION(7, 5, 0): def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regBIF1_PCIE_MST_CTRL_3)); data = REG_SET_FIELD(data, BIF1_PCIE_MST_CTRL_3, CI_SWUS_MAX_READ_REQUEST_SIZE_MODE, 1); @@ -361,7 +377,8 @@ static void nbio_v7_2_init_registers(struct amdgpu_device *adev) if (def != data) WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regBIF1_PCIE_MST_CTRL_3), data); - } else { + break; + default: def = data = RREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CONFIG_CNTL)); data = REG_SET_FIELD(data, PCIE_CONFIG_CNTL, CI_SWUS_MAX_READ_REQUEST_SIZE_MODE, 1); @@ -370,6 +387,7 @@ static void nbio_v7_2_init_registers(struct amdgpu_device *adev) if (def != data) WREG32_PCIE_PORT(SOC15_REG_OFFSET(NBIO, 0, regPCIE_CONFIG_CNTL), data); + break; } if (amdgpu_sriov_vf(adev)) -- cgit From 37d6b1506b80a5cf76238b6b00926070ab544058 Mon Sep 17 00:00:00 2001 From: Tim Huang Date: Tue, 25 Jan 2022 11:13:39 +0800 Subject: drm/amdgpu: convert to UVD IP version checking Use IP versions rather than asic_type to differentiate IP version specific features. Signed-off-by: Tim Huang Reviewed-by: Aaron Liu Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c index 01c242c5abc3..41a00851b6c5 100644 --- a/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/jpeg_v3_0.c @@ -50,11 +50,16 @@ static int jpeg_v3_0_early_init(void *handle) { struct amdgpu_device *adev = (struct amdgpu_device *)handle; - if (adev->asic_type != CHIP_YELLOW_CARP) { - u32 harvest = RREG32_SOC15(JPEG, 0, mmCC_UVD_HARVESTING); + u32 harvest; + switch (adev->ip_versions[UVD_HWIP][0]) { + case IP_VERSION(3, 1, 1): + break; + default: + harvest = RREG32_SOC15(JPEG, 0, mmCC_UVD_HARVESTING); if (harvest & CC_UVD_HARVESTING__UVD_DISABLE_MASK) return -ENOENT; + break; } adev->jpeg.num_jpeg_inst = 1; -- cgit From 3b36f50d3a69ed720e4c464bc9c5fa2c3fadc750 Mon Sep 17 00:00:00 2001 From: Tim Huang Date: Tue, 25 Jan 2022 11:14:46 +0800 Subject: drm/amd/display: convert to DCE IP version checking Use IP versions rather than asic_type to differentiate IP version specific features. Signed-off-by: Tim Huang Reviewed-by: Aaron Liu Acked-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index e6ff40fdf18b..84342f27ee64 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1119,14 +1119,12 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) for (i = 0; i < fb_info->num_fb; ++i) hw_params.fb[i] = &fb_info->fb[i]; - switch (adev->asic_type) { - case CHIP_YELLOW_CARP: - if (dc->ctx->asic_id.hw_internal_rev != YELLOW_CARP_A0) { - hw_params.dpia_supported = true; + switch (adev->ip_versions[DCE_HWIP][0]) { + case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */ + hw_params.dpia_supported = true; #if defined(CONFIG_DRM_AMD_DC_DCN) - hw_params.disable_dpia = dc->debug.dpia_debug.bits.disable_dpia; + hw_params.disable_dpia = dc->debug.dpia_debug.bits.disable_dpia; #endif - } break; default: break; -- cgit From 0e3135d3bfa5dfb658145238d2bc723a8e30c3a3 Mon Sep 17 00:00:00 2001 From: He Fengqing Date: Sat, 22 Jan 2022 10:29:36 +0000 Subject: bpf: Fix possible race in inc_misses_counter It seems inc_misses_counter() suffers from same issue fixed in the commit d979617aa84d ("bpf: Fixes possible race in update_prog_stats() for 32bit arches"): As it can run while interrupts are enabled, it could be re-entered and the u64_stats syncp could be mangled. Fixes: 9ed9e9ba2337 ("bpf: Count the number of times recursion was prevented") Signed-off-by: He Fengqing Acked-by: John Fastabend Link: https://lore.kernel.org/r/20220122102936.1219518-1-hefengqing@huawei.com Signed-off-by: Alexei Starovoitov --- kernel/bpf/trampoline.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/kernel/bpf/trampoline.c b/kernel/bpf/trampoline.c index 4b6974a195c1..5e7edf913060 100644 --- a/kernel/bpf/trampoline.c +++ b/kernel/bpf/trampoline.c @@ -550,11 +550,12 @@ static __always_inline u64 notrace bpf_prog_start_time(void) static void notrace inc_misses_counter(struct bpf_prog *prog) { struct bpf_prog_stats *stats; + unsigned int flags; stats = this_cpu_ptr(prog->stats); - u64_stats_update_begin(&stats->syncp); + flags = u64_stats_update_begin_irqsave(&stats->syncp); u64_stats_inc(&stats->misses); - u64_stats_update_end(&stats->syncp); + u64_stats_update_end_irqrestore(&stats->syncp, flags); } /* The logic is similar to bpf_prog_run(), but with an explicit -- cgit From e2bcbd7769ee8f05e1b3d10848aace98973844e4 Mon Sep 17 00:00:00 2001 From: Sean Young Date: Mon, 24 Jan 2022 15:30:28 +0000 Subject: tools headers UAPI: remove stale lirc.h The lirc.h file is an old copy of lirc.h from the kernel sources. It is out of date, and the bpf lirc tests don't need a new copy anyway. As long as /usr/include/linux/lirc.h is from kernel v5.2 or newer, the tests will compile fine. Signed-off-by: Sean Young Reviewed-by: Shuah Khan Link: https://lore.kernel.org/r/20220124153028.394409-1-sean@mess.org Signed-off-by: Alexei Starovoitov --- tools/include/uapi/linux/lirc.h | 229 --------------------- tools/testing/selftests/bpf/test_lirc_mode2_user.c | 1 - 2 files changed, 230 deletions(-) delete mode 100644 tools/include/uapi/linux/lirc.h diff --git a/tools/include/uapi/linux/lirc.h b/tools/include/uapi/linux/lirc.h deleted file mode 100644 index 45fcbf99d72e..000000000000 --- a/tools/include/uapi/linux/lirc.h +++ /dev/null @@ -1,229 +0,0 @@ -/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ -/* - * lirc.h - linux infrared remote control header file - * last modified 2010/07/13 by Jarod Wilson - */ - -#ifndef _LINUX_LIRC_H -#define _LINUX_LIRC_H - -#include -#include - -#define PULSE_BIT 0x01000000 -#define PULSE_MASK 0x00FFFFFF - -#define LIRC_MODE2_SPACE 0x00000000 -#define LIRC_MODE2_PULSE 0x01000000 -#define LIRC_MODE2_FREQUENCY 0x02000000 -#define LIRC_MODE2_TIMEOUT 0x03000000 - -#define LIRC_VALUE_MASK 0x00FFFFFF -#define LIRC_MODE2_MASK 0xFF000000 - -#define LIRC_SPACE(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_SPACE) -#define LIRC_PULSE(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_PULSE) -#define LIRC_FREQUENCY(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_FREQUENCY) -#define LIRC_TIMEOUT(val) (((val)&LIRC_VALUE_MASK) | LIRC_MODE2_TIMEOUT) - -#define LIRC_VALUE(val) ((val)&LIRC_VALUE_MASK) -#define LIRC_MODE2(val) ((val)&LIRC_MODE2_MASK) - -#define LIRC_IS_SPACE(val) (LIRC_MODE2(val) == LIRC_MODE2_SPACE) -#define LIRC_IS_PULSE(val) (LIRC_MODE2(val) == LIRC_MODE2_PULSE) -#define LIRC_IS_FREQUENCY(val) (LIRC_MODE2(val) == LIRC_MODE2_FREQUENCY) -#define LIRC_IS_TIMEOUT(val) (LIRC_MODE2(val) == LIRC_MODE2_TIMEOUT) - -/* used heavily by lirc userspace */ -#define lirc_t int - -/*** lirc compatible hardware features ***/ - -#define LIRC_MODE2SEND(x) (x) -#define LIRC_SEND2MODE(x) (x) -#define LIRC_MODE2REC(x) ((x) << 16) -#define LIRC_REC2MODE(x) ((x) >> 16) - -#define LIRC_MODE_RAW 0x00000001 -#define LIRC_MODE_PULSE 0x00000002 -#define LIRC_MODE_MODE2 0x00000004 -#define LIRC_MODE_SCANCODE 0x00000008 -#define LIRC_MODE_LIRCCODE 0x00000010 - - -#define LIRC_CAN_SEND_RAW LIRC_MODE2SEND(LIRC_MODE_RAW) -#define LIRC_CAN_SEND_PULSE LIRC_MODE2SEND(LIRC_MODE_PULSE) -#define LIRC_CAN_SEND_MODE2 LIRC_MODE2SEND(LIRC_MODE_MODE2) -#define LIRC_CAN_SEND_LIRCCODE LIRC_MODE2SEND(LIRC_MODE_LIRCCODE) - -#define LIRC_CAN_SEND_MASK 0x0000003f - -#define LIRC_CAN_SET_SEND_CARRIER 0x00000100 -#define LIRC_CAN_SET_SEND_DUTY_CYCLE 0x00000200 -#define LIRC_CAN_SET_TRANSMITTER_MASK 0x00000400 - -#define LIRC_CAN_REC_RAW LIRC_MODE2REC(LIRC_MODE_RAW) -#define LIRC_CAN_REC_PULSE LIRC_MODE2REC(LIRC_MODE_PULSE) -#define LIRC_CAN_REC_MODE2 LIRC_MODE2REC(LIRC_MODE_MODE2) -#define LIRC_CAN_REC_SCANCODE LIRC_MODE2REC(LIRC_MODE_SCANCODE) -#define LIRC_CAN_REC_LIRCCODE LIRC_MODE2REC(LIRC_MODE_LIRCCODE) - -#define LIRC_CAN_REC_MASK LIRC_MODE2REC(LIRC_CAN_SEND_MASK) - -#define LIRC_CAN_SET_REC_CARRIER (LIRC_CAN_SET_SEND_CARRIER << 16) -#define LIRC_CAN_SET_REC_DUTY_CYCLE (LIRC_CAN_SET_SEND_DUTY_CYCLE << 16) - -#define LIRC_CAN_SET_REC_DUTY_CYCLE_RANGE 0x40000000 -#define LIRC_CAN_SET_REC_CARRIER_RANGE 0x80000000 -#define LIRC_CAN_GET_REC_RESOLUTION 0x20000000 -#define LIRC_CAN_SET_REC_TIMEOUT 0x10000000 -#define LIRC_CAN_SET_REC_FILTER 0x08000000 - -#define LIRC_CAN_MEASURE_CARRIER 0x02000000 -#define LIRC_CAN_USE_WIDEBAND_RECEIVER 0x04000000 - -#define LIRC_CAN_SEND(x) ((x)&LIRC_CAN_SEND_MASK) -#define LIRC_CAN_REC(x) ((x)&LIRC_CAN_REC_MASK) - -#define LIRC_CAN_NOTIFY_DECODE 0x01000000 - -/*** IOCTL commands for lirc driver ***/ - -#define LIRC_GET_FEATURES _IOR('i', 0x00000000, __u32) - -#define LIRC_GET_SEND_MODE _IOR('i', 0x00000001, __u32) -#define LIRC_GET_REC_MODE _IOR('i', 0x00000002, __u32) -#define LIRC_GET_REC_RESOLUTION _IOR('i', 0x00000007, __u32) - -#define LIRC_GET_MIN_TIMEOUT _IOR('i', 0x00000008, __u32) -#define LIRC_GET_MAX_TIMEOUT _IOR('i', 0x00000009, __u32) - -/* code length in bits, currently only for LIRC_MODE_LIRCCODE */ -#define LIRC_GET_LENGTH _IOR('i', 0x0000000f, __u32) - -#define LIRC_SET_SEND_MODE _IOW('i', 0x00000011, __u32) -#define LIRC_SET_REC_MODE _IOW('i', 0x00000012, __u32) -/* Note: these can reset the according pulse_width */ -#define LIRC_SET_SEND_CARRIER _IOW('i', 0x00000013, __u32) -#define LIRC_SET_REC_CARRIER _IOW('i', 0x00000014, __u32) -#define LIRC_SET_SEND_DUTY_CYCLE _IOW('i', 0x00000015, __u32) -#define LIRC_SET_TRANSMITTER_MASK _IOW('i', 0x00000017, __u32) - -/* - * when a timeout != 0 is set the driver will send a - * LIRC_MODE2_TIMEOUT data packet, otherwise LIRC_MODE2_TIMEOUT is - * never sent, timeout is disabled by default - */ -#define LIRC_SET_REC_TIMEOUT _IOW('i', 0x00000018, __u32) - -/* 1 enables, 0 disables timeout reports in MODE2 */ -#define LIRC_SET_REC_TIMEOUT_REPORTS _IOW('i', 0x00000019, __u32) - -/* - * if enabled from the next key press on the driver will send - * LIRC_MODE2_FREQUENCY packets - */ -#define LIRC_SET_MEASURE_CARRIER_MODE _IOW('i', 0x0000001d, __u32) - -/* - * to set a range use LIRC_SET_REC_CARRIER_RANGE with the - * lower bound first and later LIRC_SET_REC_CARRIER with the upper bound - */ -#define LIRC_SET_REC_CARRIER_RANGE _IOW('i', 0x0000001f, __u32) - -#define LIRC_SET_WIDEBAND_RECEIVER _IOW('i', 0x00000023, __u32) - -/* - * Return the recording timeout, which is either set by - * the ioctl LIRC_SET_REC_TIMEOUT or by the kernel after setting the protocols. - */ -#define LIRC_GET_REC_TIMEOUT _IOR('i', 0x00000024, __u32) - -/* - * struct lirc_scancode - decoded scancode with protocol for use with - * LIRC_MODE_SCANCODE - * - * @timestamp: Timestamp in nanoseconds using CLOCK_MONOTONIC when IR - * was decoded. - * @flags: should be 0 for transmit. When receiving scancodes, - * LIRC_SCANCODE_FLAG_TOGGLE or LIRC_SCANCODE_FLAG_REPEAT can be set - * depending on the protocol - * @rc_proto: see enum rc_proto - * @keycode: the translated keycode. Set to 0 for transmit. - * @scancode: the scancode received or to be sent - */ -struct lirc_scancode { - __u64 timestamp; - __u16 flags; - __u16 rc_proto; - __u32 keycode; - __u64 scancode; -}; - -/* Set if the toggle bit of rc-5 or rc-6 is enabled */ -#define LIRC_SCANCODE_FLAG_TOGGLE 1 -/* Set if this is a nec or sanyo repeat */ -#define LIRC_SCANCODE_FLAG_REPEAT 2 - -/** - * enum rc_proto - the Remote Controller protocol - * - * @RC_PROTO_UNKNOWN: Protocol not known - * @RC_PROTO_OTHER: Protocol known but proprietary - * @RC_PROTO_RC5: Philips RC5 protocol - * @RC_PROTO_RC5X_20: Philips RC5x 20 bit protocol - * @RC_PROTO_RC5_SZ: StreamZap variant of RC5 - * @RC_PROTO_JVC: JVC protocol - * @RC_PROTO_SONY12: Sony 12 bit protocol - * @RC_PROTO_SONY15: Sony 15 bit protocol - * @RC_PROTO_SONY20: Sony 20 bit protocol - * @RC_PROTO_NEC: NEC protocol - * @RC_PROTO_NECX: Extended NEC protocol - * @RC_PROTO_NEC32: NEC 32 bit protocol - * @RC_PROTO_SANYO: Sanyo protocol - * @RC_PROTO_MCIR2_KBD: RC6-ish MCE keyboard - * @RC_PROTO_MCIR2_MSE: RC6-ish MCE mouse - * @RC_PROTO_RC6_0: Philips RC6-0-16 protocol - * @RC_PROTO_RC6_6A_20: Philips RC6-6A-20 protocol - * @RC_PROTO_RC6_6A_24: Philips RC6-6A-24 protocol - * @RC_PROTO_RC6_6A_32: Philips RC6-6A-32 protocol - * @RC_PROTO_RC6_MCE: MCE (Philips RC6-6A-32 subtype) protocol - * @RC_PROTO_SHARP: Sharp protocol - * @RC_PROTO_XMP: XMP protocol - * @RC_PROTO_CEC: CEC protocol - * @RC_PROTO_IMON: iMon Pad protocol - * @RC_PROTO_RCMM12: RC-MM protocol 12 bits - * @RC_PROTO_RCMM24: RC-MM protocol 24 bits - * @RC_PROTO_RCMM32: RC-MM protocol 32 bits - */ -enum rc_proto { - RC_PROTO_UNKNOWN = 0, - RC_PROTO_OTHER = 1, - RC_PROTO_RC5 = 2, - RC_PROTO_RC5X_20 = 3, - RC_PROTO_RC5_SZ = 4, - RC_PROTO_JVC = 5, - RC_PROTO_SONY12 = 6, - RC_PROTO_SONY15 = 7, - RC_PROTO_SONY20 = 8, - RC_PROTO_NEC = 9, - RC_PROTO_NECX = 10, - RC_PROTO_NEC32 = 11, - RC_PROTO_SANYO = 12, - RC_PROTO_MCIR2_KBD = 13, - RC_PROTO_MCIR2_MSE = 14, - RC_PROTO_RC6_0 = 15, - RC_PROTO_RC6_6A_20 = 16, - RC_PROTO_RC6_6A_24 = 17, - RC_PROTO_RC6_6A_32 = 18, - RC_PROTO_RC6_MCE = 19, - RC_PROTO_SHARP = 20, - RC_PROTO_XMP = 21, - RC_PROTO_CEC = 22, - RC_PROTO_IMON = 23, - RC_PROTO_RCMM12 = 24, - RC_PROTO_RCMM24 = 25, - RC_PROTO_RCMM32 = 26, -}; - -#endif diff --git a/tools/testing/selftests/bpf/test_lirc_mode2_user.c b/tools/testing/selftests/bpf/test_lirc_mode2_user.c index ebf68dce5504..2893e9f2f1e0 100644 --- a/tools/testing/selftests/bpf/test_lirc_mode2_user.c +++ b/tools/testing/selftests/bpf/test_lirc_mode2_user.c @@ -28,7 +28,6 @@ // 5. We can read keycode from same /dev/lirc device #include -#include #include #include #include -- cgit From d8f7f8831bce9ef6a1f562037e137f57b5951501 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Mon, 24 Jan 2022 21:31:36 +0200 Subject: drm/i915: Introduce ilk_pch_pre_enable() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Complete the ilk pch modeset abstraction by adding ilk_pch_pre_enable(). I did the disable vs. post_disable split already for the disable sequence, but the enable sequence was still left with the naked ilk_fdi_pll_enable() call for some reason. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220124193136.2397-2-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_display.c | 5 +---- drivers/gpu/drm/i915/display/intel_pch_display.c | 14 ++++++++++++++ drivers/gpu/drm/i915/display/intel_pch_display.h | 2 ++ 3 files changed, 17 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 8537d2373bb6..f3f8704378f8 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -1858,10 +1858,7 @@ static void ilk_crtc_enable(struct intel_atomic_state *state, intel_encoders_pre_enable(state, crtc); if (new_crtc_state->has_pch_encoder) { - /* Note: FDI PLL enabling _must_ be done before we enable the - * cpu pipes, hence this is separate from all the other fdi/pch - * enabling. */ - ilk_fdi_pll_enable(new_crtc_state); + ilk_pch_pre_enable(state, crtc); } else { assert_fdi_tx_disabled(dev_priv, pipe); assert_fdi_rx_disabled(dev_priv, pipe); diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c index a55c4bfacd0d..0c528c612cb2 100644 --- a/drivers/gpu/drm/i915/display/intel_pch_display.c +++ b/drivers/gpu/drm/i915/display/intel_pch_display.c @@ -211,6 +211,20 @@ static void ilk_disable_pch_transcoder(struct intel_crtc *crtc) } } +void ilk_pch_pre_enable(struct intel_atomic_state *state, + struct intel_crtc *crtc) +{ + const struct intel_crtc_state *crtc_state = + intel_atomic_get_new_crtc_state(state, crtc); + + /* + * Note: FDI PLL enabling _must_ be done before we enable the + * cpu pipes, hence this is separate from all the other fdi/pch + * enabling. + */ + ilk_fdi_pll_enable(crtc_state); +} + /* * Enable PCH resources required for PCH ports: * - PCH PLLs diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.h b/drivers/gpu/drm/i915/display/intel_pch_display.h index 2c387fe3a467..f915fa4241d7 100644 --- a/drivers/gpu/drm/i915/display/intel_pch_display.h +++ b/drivers/gpu/drm/i915/display/intel_pch_display.h @@ -10,6 +10,8 @@ struct intel_atomic_state; struct intel_crtc; struct intel_crtc_state; +void ilk_pch_pre_enable(struct intel_atomic_state *state, + struct intel_crtc *crtc); void ilk_pch_enable(struct intel_atomic_state *state, struct intel_crtc *crtc); void ilk_pch_disable(struct intel_atomic_state *state, -- cgit From 41e096da18b357ff1d2108c514b9634d67364c41 Mon Sep 17 00:00:00 2001 From: Stanislav Lisovskiy Date: Mon, 24 Jan 2022 11:06:50 +0200 Subject: drm/i915: Pass plane to watermark calculation functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Sometimes we might need to change the way we calculate watermarks, based on which particular plane it is calculated for. Thus it would be convenient to pass plane struct to those functions. v2: Pass plane instead of plane_id v3: Do not pass plane to skl_cursor_allocation(Ville Syrjälä) v4: - Make intel_crtc_get_plane static again(Ville Syrjälä) - s/cursor_plane/plane(Ville Syrjälä) - Pass plane to skl_compute_wm_* instead of plane_id(Ville Syrjälä) Signed-off-by: Stanislav Lisovskiy Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220124090653.14547-2-stanislav.lisovskiy@intel.com --- drivers/gpu/drm/i915/display/intel_atomic_plane.h | 1 + drivers/gpu/drm/i915/intel_pm.c | 37 +++++++++++++---------- 2 files changed, 22 insertions(+), 16 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.h b/drivers/gpu/drm/i915/display/intel_atomic_plane.h index 7907f601598e..ead789709477 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.h +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.h @@ -16,6 +16,7 @@ struct intel_crtc; struct intel_crtc_state; struct intel_plane; struct intel_plane_state; +enum plane_id; unsigned int intel_adjusted_rate(const struct drm_rect *src, const struct drm_rect *dst, diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 488a1adc540f..f7cd936e7be0 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -4240,7 +4240,9 @@ static int skl_compute_wm_params(const struct intel_crtc_state *crtc_state, u64 modifier, unsigned int rotation, u32 plane_pixel_rate, struct skl_wm_params *wp, int color_plane); + static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, + struct intel_plane *plane, int level, unsigned int latency, const struct skl_wm_params *wp, @@ -4251,6 +4253,7 @@ static unsigned int skl_cursor_allocation(const struct intel_crtc_state *crtc_state, int num_active) { + struct intel_plane *plane = to_intel_plane(crtc_state->uapi.crtc->cursor); struct drm_i915_private *dev_priv = to_i915(crtc_state->uapi.crtc->dev); int level, max_level = ilk_wm_max_level(dev_priv); struct skl_wm_level wm = {}; @@ -4267,7 +4270,7 @@ skl_cursor_allocation(const struct intel_crtc_state *crtc_state, for (level = 0; level <= max_level; level++) { unsigned int latency = dev_priv->wm.skl_latency[level]; - skl_compute_plane_wm(crtc_state, level, latency, &wp, &wm, &wm); + skl_compute_plane_wm(crtc_state, plane, level, latency, &wp, &wm, &wm); if (wm.min_ddb_alloc == U16_MAX) break; @@ -5495,6 +5498,7 @@ static int skl_wm_max_lines(struct drm_i915_private *dev_priv) } static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, + struct intel_plane *plane, int level, unsigned int latency, const struct skl_wm_params *wp, @@ -5622,6 +5626,7 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, static void skl_compute_wm_levels(const struct intel_crtc_state *crtc_state, + struct intel_plane *plane, const struct skl_wm_params *wm_params, struct skl_wm_level *levels) { @@ -5633,7 +5638,7 @@ skl_compute_wm_levels(const struct intel_crtc_state *crtc_state, struct skl_wm_level *result = &levels[level]; unsigned int latency = dev_priv->wm.skl_latency[level]; - skl_compute_plane_wm(crtc_state, level, latency, + skl_compute_plane_wm(crtc_state, plane, level, latency, wm_params, result_prev, result); result_prev = result; @@ -5641,6 +5646,7 @@ skl_compute_wm_levels(const struct intel_crtc_state *crtc_state, } static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state, + struct intel_plane *plane, const struct skl_wm_params *wm_params, struct skl_plane_wm *plane_wm) { @@ -5649,7 +5655,7 @@ static void tgl_compute_sagv_wm(const struct intel_crtc_state *crtc_state, struct skl_wm_level *levels = plane_wm->wm; unsigned int latency = dev_priv->wm.skl_latency[0] + dev_priv->sagv_block_time_us; - skl_compute_plane_wm(crtc_state, 0, latency, + skl_compute_plane_wm(crtc_state, plane, 0, latency, wm_params, &levels[0], sagv_wm); } @@ -5719,11 +5725,11 @@ static void skl_compute_transition_wm(struct drm_i915_private *dev_priv, static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, - enum plane_id plane_id, int color_plane) + struct intel_plane *plane, int color_plane) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id]; + struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id]; struct skl_wm_params wm_params; int ret; @@ -5732,13 +5738,13 @@ static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state, if (ret) return ret; - skl_compute_wm_levels(crtc_state, &wm_params, wm->wm); + skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->wm); skl_compute_transition_wm(dev_priv, &wm->trans_wm, &wm->wm[0], &wm_params); if (DISPLAY_VER(dev_priv) >= 12) { - tgl_compute_sagv_wm(crtc_state, &wm_params, wm); + tgl_compute_sagv_wm(crtc_state, plane, &wm_params, wm); skl_compute_transition_wm(dev_priv, &wm->sagv.trans_wm, &wm->sagv.wm0, &wm_params); @@ -5749,9 +5755,9 @@ static int skl_build_plane_wm_single(struct intel_crtc_state *crtc_state, static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, - enum plane_id plane_id) + struct intel_plane *plane) { - struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane_id]; + struct skl_plane_wm *wm = &crtc_state->wm.skl.raw.planes[plane->id]; struct skl_wm_params wm_params; int ret; @@ -5763,7 +5769,7 @@ static int skl_build_plane_wm_uv(struct intel_crtc_state *crtc_state, if (ret) return ret; - skl_compute_wm_levels(crtc_state, &wm_params, wm->uv_wm); + skl_compute_wm_levels(crtc_state, plane, &wm_params, wm->uv_wm); return 0; } @@ -5783,13 +5789,13 @@ static int skl_build_plane_wm(struct intel_crtc_state *crtc_state, return 0; ret = skl_build_plane_wm_single(crtc_state, plane_state, - plane_id, 0); + plane, 0); if (ret) return ret; if (fb->format->is_yuv && fb->format->num_planes > 1) { ret = skl_build_plane_wm_uv(crtc_state, plane_state, - plane_id); + plane); if (ret) return ret; } @@ -5814,7 +5820,6 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state, if (plane_state->planar_linked_plane) { const struct drm_framebuffer *fb = plane_state->hw.fb; - enum plane_id y_plane_id = plane_state->planar_linked_plane->id; drm_WARN_ON(&dev_priv->drm, !intel_wm_plane_visible(crtc_state, plane_state)); @@ -5822,17 +5827,17 @@ static int icl_build_plane_wm(struct intel_crtc_state *crtc_state, fb->format->num_planes == 1); ret = skl_build_plane_wm_single(crtc_state, plane_state, - y_plane_id, 0); + plane_state->planar_linked_plane, 0); if (ret) return ret; ret = skl_build_plane_wm_single(crtc_state, plane_state, - plane_id, 1); + plane, 1); if (ret) return ret; } else if (intel_wm_plane_visible(crtc_state, plane_state)) { ret = skl_build_plane_wm_single(crtc_state, plane_state, - plane_id, 0); + plane, 0); if (ret) return ret; } -- cgit From 20f6ac2d5e00d8ff154d9617a5c0b52ff12f3320 Mon Sep 17 00:00:00 2001 From: Stanislav Lisovskiy Date: Mon, 24 Jan 2022 11:06:51 +0200 Subject: drm/i915: Introduce do_async_flip flag to intel_plane_state MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There might be various logical contructs when we might want to enable async flip, so lets calculate those and set this flag, so that there is no need in long conditions in other places. v2: - Set do_async_flip flag to False, if no async flip needed. Lets not rely that it will be 0-initialized, but set explicitly, so that the logic is clear as well. v3: - Clear do_async_flip in intel_plane_duplicate_state(Ville Syrjälä) - Check with do_async_flip also when calling intel_crtc_{enable,disable}_flip_done(Ville Syrjälä) Reviewed-by: Ville Syrjälä Signed-off-by: Stanislav Lisovskiy Link: https://patchwork.freedesktop.org/patch/msgid/20220124090653.14547-3-stanislav.lisovskiy@intel.com --- drivers/gpu/drm/i915/display/intel_atomic_plane.c | 3 ++- drivers/gpu/drm/i915/display/intel_display.c | 9 +++++++-- drivers/gpu/drm/i915/display/intel_display_types.h | 3 +++ 3 files changed, 12 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_atomic_plane.c b/drivers/gpu/drm/i915/display/intel_atomic_plane.c index c8bbbc7f8c66..314c64e662dc 100644 --- a/drivers/gpu/drm/i915/display/intel_atomic_plane.c +++ b/drivers/gpu/drm/i915/display/intel_atomic_plane.c @@ -109,6 +109,7 @@ intel_plane_duplicate_state(struct drm_plane *plane) intel_state->ggtt_vma = NULL; intel_state->dpt_vma = NULL; intel_state->flags = 0; + intel_state->do_async_flip = false; /* add reference to fb */ if (intel_state->hw.fb) @@ -491,7 +492,7 @@ void intel_plane_update_arm(struct intel_plane *plane, trace_intel_plane_update_arm(&plane->base, crtc); - if (crtc_state->uapi.async_flip && plane->async_flip) + if (plane_state->do_async_flip) plane->async_flip(plane, crtc_state, plane_state, true); else plane->update_arm(plane, crtc_state, plane_state); diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index f3f8704378f8..a78b16fe20fd 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -1369,7 +1369,8 @@ static void intel_crtc_enable_flip_done(struct intel_atomic_state *state, for_each_new_intel_plane_in_state(state, plane, plane_state, i) { if (plane->enable_flip_done && plane->pipe == crtc->pipe && - update_planes & BIT(plane->id)) + update_planes & BIT(plane->id) && + plane_state->do_async_flip) plane->enable_flip_done(plane); } } @@ -1387,7 +1388,8 @@ static void intel_crtc_disable_flip_done(struct intel_atomic_state *state, for_each_new_intel_plane_in_state(state, plane, plane_state, i) { if (plane->disable_flip_done && plane->pipe == crtc->pipe && - update_planes & BIT(plane->id)) + update_planes & BIT(plane->id) && + plane_state->do_async_flip) plane->disable_flip_done(plane); } } @@ -5024,6 +5026,9 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat needs_scaling(new_plane_state)))) new_crtc_state->disable_lp_wm = true; + if (new_crtc_state->uapi.async_flip && plane->async_flip) + new_plane_state->do_async_flip = true; + return 0; } diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index c9c6efadf8b4..e83cb799427b 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -634,6 +634,9 @@ struct intel_plane_state { struct intel_fb_view view; + /* Indicates if async flip is required */ + bool do_async_flip; + /* Plane pxp decryption state */ bool decrypt; -- cgit From c3639f3be480529ac82b592e627fa7dd712de83e Mon Sep 17 00:00:00 2001 From: Stanislav Lisovskiy Date: Mon, 24 Jan 2022 11:49:29 +0200 Subject: drm/i915: Use wm0 only during async flips for DG2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This optimization allows to achieve higher perfomance during async flips. For the first async flip we have to still temporarily switch to sync flip, in order to reprogram plane watermarks, so this requires taking into account old plane state's do_async_flip flag. v2: - Removed redundant new_plane_state->do_async_flip check from needs_async_flip_wm_override condition (Ville Syrjälä) - Extract dg2_async_flip_optimization to separate function(Ville Syrjälä) - Check for plane->async_flip instead of plane_id (Ville Syrjälä) v3: - Rename "needs_async_flip_wm_override" to "intel_plane_do_async_flip" and move all the required checks there (Ville Syrjälä) - Rename "dg2_async_flip_optimization" to "use_minimal_wm0_only" (Ville Syrjälä) v4: - Swap new/old_crtc_state in intel_plane_do_async_flip argument list(Ville Syrjälä) - Use plane->base.dev to grab i915 pointer in intel_plane_do_async_flip(Ville Syrjälä) - Remove const modifier from plane parameter in use_minimal_wm0_only(Ville Syrjälä) - Swap also new/old_crtc_state at intel_plane_do_async_flip call site(Ville Syrjälä) Reviewed-by: Ville Syrjälä Signed-off-by: Stanislav Lisovskiy Link: https://patchwork.freedesktop.org/patch/msgid/20220124094929.31722-1-stanislav.lisovskiy@intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 24 +++++++++++++++++++++++- drivers/gpu/drm/i915/intel_pm.c | 14 +++++++++++++- 2 files changed, 36 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index a78b16fe20fd..2bf0c3cd23dd 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -4907,6 +4907,28 @@ static bool needs_scaling(const struct intel_plane_state *state) return (src_w != dst_w || src_h != dst_h); } +static bool intel_plane_do_async_flip(struct intel_plane *plane, + const struct intel_crtc_state *old_crtc_state, + const struct intel_crtc_state *new_crtc_state) +{ + struct drm_i915_private *i915 = to_i915(plane->base.dev); + + if (!plane->async_flip) + return false; + + if (!new_crtc_state->uapi.async_flip) + return false; + + /* + * In platforms after DISPLAY13, we might need to override + * first async flip in order to change watermark levels + * as part of optimization. + * So for those, we are checking if this is a first async flip. + * For platforms earlier than DISPLAY13 we always do async flip. + */ + return DISPLAY_VER(i915) < 13 || old_crtc_state->uapi.async_flip; +} + int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_state, struct intel_crtc_state *new_crtc_state, const struct intel_plane_state *old_plane_state, @@ -5026,7 +5048,7 @@ int intel_plane_atomic_calc_changes(const struct intel_crtc_state *old_crtc_stat needs_scaling(new_plane_state)))) new_crtc_state->disable_lp_wm = true; - if (new_crtc_state->uapi.async_flip && plane->async_flip) + if (intel_plane_do_async_flip(plane, old_crtc_state, new_crtc_state)) new_plane_state->do_async_flip = true; return 0; diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index f7cd936e7be0..467e89dafe37 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -4906,6 +4906,17 @@ static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes) return active_pipes & BIT(pipe) ? BIT(DBUF_S1) : 0; } +static bool +use_minimal_wm0_only(const struct intel_crtc_state *crtc_state, + struct intel_plane *plane) +{ + struct drm_i915_private *i915 = to_i915(plane->base.dev); + + return DISPLAY_VER(i915) >= 13 && + crtc_state->uapi.async_flip && + plane->async_flip; +} + static u64 skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state, @@ -5510,7 +5521,8 @@ static void skl_compute_plane_wm(const struct intel_crtc_state *crtc_state, uint_fixed_16_16_t selected_result; u32 blocks, lines, min_ddb_alloc = 0; - if (latency == 0) { + if (latency == 0 || + (use_minimal_wm0_only(crtc_state, plane) && level > 0)) { /* reject it */ result->min_ddb_alloc = U16_MAX; return; -- cgit From 6a4d8cc6bbbfea4469a063bff0ff0114507df524 Mon Sep 17 00:00:00 2001 From: Stanislav Lisovskiy Date: Mon, 24 Jan 2022 15:52:34 +0200 Subject: drm/i915: Don't allocate extra ddb during async flip for DG2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In terms of async flip optimization we don't to allocate extra ddb space, so lets skip it. v2: - Extracted min ddb async flip check to separate function (Ville Syrjälä) - Used this function to prevent false positive WARN to be triggered(Ville Syrjälä) v3: - Renamed dg2_need_min_ddb to need_min_ddb thus making it more universal. - Also used DISPLAY_VER instead of IS_DG2(Ville Syrjälä) - Use rate = 0 instead of just setting extra = 0, thus letting other planes to use extra ddb and avoiding WARN (Ville Syrjälä) v4: - Renamed needs_min_ddb as s/needs/use/ to match the wm0 counterpart(Ville Syrjälä) - Added plane->async_flip check to use_min_ddb(now passing plane as a parameter to do that)(Ville Syrjälä) - Account for use_min_ddb also when calculating total data rate (Ville Syrjälä) v5: - Use for_each_intel_plane_on_crtc instead of for_each_intel_plane_id to get plane->async_flip check and account for all planes(Ville Syrjälä) - Fix line wrapping(Ville Syrjälä) - Set plane data rate conditionally, avoiding on redundant assignment (Ville Syrjälä) - Removed redundant whitespace(Ville Syrjälä) - Handle use_min_ddb case in skl_plane_relative_data_rate instead of icl_get_total_relative_data_rate(Ville Syrjälä) Signed-off-by: Stanislav Lisovskiy Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220124090653.14547-2-stanislav.lisovskiy@intel.com --- drivers/gpu/drm/i915/intel_pm.c | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 467e89dafe37..23d3342081b8 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -4906,6 +4906,17 @@ static u8 skl_compute_dbuf_slices(struct intel_crtc *crtc, u8 active_pipes) return active_pipes & BIT(pipe) ? BIT(DBUF_S1) : 0; } +static bool +use_min_ddb(const struct intel_crtc_state *crtc_state, + struct intel_plane *plane) +{ + struct drm_i915_private *i915 = to_i915(plane->base.dev); + + return DISPLAY_VER(i915) >= 13 && + crtc_state->uapi.async_flip && + plane->async_flip; +} + static bool use_minimal_wm0_only(const struct intel_crtc_state *crtc_state, struct intel_plane *plane) @@ -4935,6 +4946,14 @@ skl_plane_relative_data_rate(const struct intel_crtc_state *crtc_state, if (plane->id == PLANE_CURSOR) return 0; + /* + * We calculate extra ddb based on ratio plane rate/total data rate + * in case, in some cases we should not allocate extra ddb for the plane, + * so do not count its data rate, if this is the case. + */ + if (use_min_ddb(crtc_state, plane)) + return 0; + if (color_plane == 1 && !intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier)) return 0; -- cgit From 9f807822abf5e210d8656fb5304f662bee64ca80 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Mon, 24 Jan 2022 21:26:34 +0200 Subject: drm/i915: Skip dsc readout if the transcoder is disabled MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Trying to do readout when we don't even have a cpu transcoder is not a great idea. Don't do it. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220124192638.26262-1-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_display.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 2bf0c3cd23dd..a10f693c3b6e 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -4379,13 +4379,13 @@ static bool hsw_get_pipe_config(struct intel_crtc *crtc, active = true; } + if (!active) + goto out; + intel_dsc_get_config(pipe_config); if (DISPLAY_VER(dev_priv) >= 13 && !pipe_config->dsc.compression_enable) intel_uncompressed_joiner_get_config(pipe_config); - if (!active) - goto out; - if (!transcoder_is_dsi(pipe_config->cpu_transcoder) || DISPLAY_VER(dev_priv) >= 11) intel_get_transcoder_timings(crtc, pipe_config); -- cgit From df216b37333cf2ddb0db86b966e86a53d239a013 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Mon, 24 Jan 2022 21:26:35 +0200 Subject: drm/i915: Simplify intel_dsc_source_support() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We can simplify the icl check in intel_dsc_source_support() by noting that the only case when DSC is not supported is when using transcoder A. Reviewed-by: Jani Nikula Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220124192638.26262-2-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/display/intel_vdsc.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_vdsc.c b/drivers/gpu/drm/i915/display/intel_vdsc.c index 9b05f93ed8bc..3faea903b9ae 100644 --- a/drivers/gpu/drm/i915/display/intel_vdsc.c +++ b/drivers/gpu/drm/i915/display/intel_vdsc.c @@ -341,19 +341,14 @@ bool intel_dsc_source_support(const struct intel_crtc_state *crtc_state) const struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *i915 = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; - enum pipe pipe = crtc->pipe; if (!INTEL_INFO(i915)->display.has_dsc) return false; - /* On TGL, DSC is supported on all Pipes */ if (DISPLAY_VER(i915) >= 12) return true; - if (DISPLAY_VER(i915) >= 11 && - (pipe != PIPE_A || cpu_transcoder == TRANSCODER_EDP || - cpu_transcoder == TRANSCODER_DSI_0 || - cpu_transcoder == TRANSCODER_DSI_1)) + if (DISPLAY_VER(i915) >= 11 && cpu_transcoder != TRANSCODER_A) return true; return false; -- cgit From c20b5d41e9de40a51b7e5516198c08a906fb7770 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Mon, 24 Jan 2022 21:26:36 +0200 Subject: drm/i915: Use per-device debugs for bigjoiner stuff MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Specify which device we're talking about when spewing bigjoiner debugs. Reviewed-by: Jani Nikula Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220124192638.26262-3-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index a10f693c3b6e..250561eab1e5 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -7624,6 +7624,7 @@ static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state, struct intel_crtc_state *old_crtc_state, struct intel_crtc_state *new_crtc_state) { + struct drm_i915_private *i915 = to_i915(state->base.dev); struct intel_crtc_state *slave_crtc_state, *master_crtc_state; struct intel_crtc *slave_crtc, *master_crtc; @@ -7641,9 +7642,10 @@ static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state, slave_crtc = intel_dsc_get_bigjoiner_secondary(crtc); if (!slave_crtc) { - DRM_DEBUG_KMS("[CRTC:%d:%s] Big joiner configuration requires " - "CRTC + 1 to be used, doesn't exist\n", - crtc->base.base.id, crtc->base.name); + drm_dbg_kms(&i915->drm, + "[CRTC:%d:%s] Big joiner configuration requires " + "CRTC + 1 to be used, doesn't exist\n", + crtc->base.base.id, crtc->base.name); return -EINVAL; } @@ -7657,16 +7659,18 @@ static int intel_atomic_check_bigjoiner(struct intel_atomic_state *state, if (slave_crtc_state->uapi.enable) goto claimed; - DRM_DEBUG_KMS("[CRTC:%d:%s] Used as slave for big joiner\n", - slave_crtc->base.base.id, slave_crtc->base.name); + drm_dbg_kms(&i915->drm, + "[CRTC:%d:%s] Used as slave for big joiner\n", + slave_crtc->base.base.id, slave_crtc->base.name); return copy_bigjoiner_crtc_state(slave_crtc_state, new_crtc_state); claimed: - DRM_DEBUG_KMS("[CRTC:%d:%s] Slave is enabled as normal CRTC, but " - "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n", - slave_crtc->base.base.id, slave_crtc->base.name, - master_crtc->base.base.id, master_crtc->base.name); + drm_dbg_kms(&i915->drm, + "[CRTC:%d:%s] Slave is enabled as normal CRTC, but " + "[CRTC:%d:%s] claiming this CRTC for bigjoiner.\n", + slave_crtc->base.base.id, slave_crtc->base.name, + master_crtc->base.base.id, master_crtc->base.name); return -EINVAL; } -- cgit From 1d894ce88eca35ef8627901c47c3881cb1f3e74a Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Mon, 24 Jan 2022 21:26:37 +0200 Subject: drm/i915: Extract hsw_configure_cpu_transcoder() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Pull the transcoder specific modeset steps into a single place. With bigoiner we need to keep in mind wheher we're dealing with the transcoder or the pipe, and a slightly higher level split makes that easier. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220124192638.26262-4-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_display.c | 38 +++++++++++++++++----------- 1 file changed, 23 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 250561eab1e5..98fad1bae6ff 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -2011,6 +2011,27 @@ static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state, intel_uncompressed_joiner_enable(crtc_state); } +static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + + intel_set_transcoder_timings(crtc_state); + + if (cpu_transcoder != TRANSCODER_EDP) + intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder), + crtc_state->pixel_multiplier - 1); + + if (crtc_state->has_pch_encoder) + intel_cpu_transcoder_set_m_n(crtc_state, + &crtc_state->fdi_m_n, NULL); + + hsw_set_frame_start_delay(crtc_state); + + hsw_set_transconf(crtc_state); +} + static void hsw_crtc_enable(struct intel_atomic_state *state, struct intel_crtc *crtc) { @@ -2039,21 +2060,8 @@ static void hsw_crtc_enable(struct intel_atomic_state *state, if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) bdw_set_pipemisc(new_crtc_state); - if (!new_crtc_state->bigjoiner_slave && !transcoder_is_dsi(cpu_transcoder)) { - intel_set_transcoder_timings(new_crtc_state); - - if (cpu_transcoder != TRANSCODER_EDP) - intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder), - new_crtc_state->pixel_multiplier - 1); - - if (new_crtc_state->has_pch_encoder) - intel_cpu_transcoder_set_m_n(new_crtc_state, - &new_crtc_state->fdi_m_n, NULL); - - hsw_set_frame_start_delay(new_crtc_state); - - hsw_set_transconf(new_crtc_state); - } + if (!new_crtc_state->bigjoiner_slave && !transcoder_is_dsi(cpu_transcoder)) + hsw_configure_cpu_transcoder(new_crtc_state); crtc->active = true; -- cgit From bc1ce503769c51c1c06f5ed126b07a545996d697 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Tue, 25 Jan 2022 08:39:37 +0200 Subject: drm/i915: Move dsc/joiner enable into hsw_crtc_enable() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Lift the dsc/joiner enable up from the wonky places where it currently sits (ddi .pre_enable() or icl_ddi_bigjoiner_pre_enable()) into hsw_crtc_enable() where we write the other per-pipe stuff as well. Makes the transcoder vs. pipe split less confusing. For DSI this results in slight reordering between the dsc/joiner enable vs. transcoder timings setup, but I can't really think why that should cause any issues since the transcoder isn't yet enabled at that point. v2: Take care of dsi (Jani) Cc: Jani Nikula Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220125063937.7003-1-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/display/icl_dsi.c | 2 -- drivers/gpu/drm/i915/display/intel_ddi.c | 6 ------ drivers/gpu/drm/i915/display/intel_display.c | 12 +++++------- 3 files changed, 5 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index 95f49535fa6e..16a611f7d659 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -1233,8 +1233,6 @@ static void gen11_dsi_pre_enable(struct intel_atomic_state *state, intel_dsc_dsi_pps_write(encoder, pipe_config); - intel_dsc_enable(pipe_config); - /* step6c: configure transcoder timings */ gen11_dsi_set_transcoder_timings(encoder, pipe_config); } diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 2f20abc5122d..5d1f7d6218c5 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -2425,9 +2425,6 @@ static void tgl_ddi_pre_enable_dp(struct intel_atomic_state *state, intel_ddi_enable_fec(encoder, crtc_state); intel_dsc_dp_pps_write(encoder, crtc_state); - - if (!crtc_state->bigjoiner) - intel_dsc_enable(crtc_state); } static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state, @@ -2493,9 +2490,6 @@ static void hsw_ddi_pre_enable_dp(struct intel_atomic_state *state, intel_ddi_enable_pipe_clock(encoder, crtc_state); intel_dsc_dp_pps_write(encoder, crtc_state); - - if (!crtc_state->bigjoiner) - intel_dsc_enable(crtc_state); } static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state, diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 98fad1bae6ff..69244ad19eec 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -1973,7 +1973,6 @@ static void hsw_set_frame_start_delay(const struct intel_crtc_state *crtc_state) static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state, const struct intel_crtc_state *crtc_state) { - struct drm_i915_private *dev_priv = to_i915(state->base.dev); struct intel_crtc_state *master_crtc_state; struct intel_crtc *master_crtc; struct drm_connector_state *conn_state; @@ -2003,12 +2002,6 @@ static void icl_ddi_bigjoiner_pre_enable(struct intel_atomic_state *state, if (crtc_state->bigjoiner_slave) intel_encoders_pre_enable(state, master_crtc); - - /* need to enable VDSC, which we skipped in pre-enable */ - intel_dsc_enable(crtc_state); - - if (DISPLAY_VER(dev_priv) >= 13) - intel_uncompressed_joiner_enable(crtc_state); } static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) @@ -2056,6 +2049,11 @@ static void hsw_crtc_enable(struct intel_atomic_state *state, icl_ddi_bigjoiner_pre_enable(state, new_crtc_state); } + intel_dsc_enable(new_crtc_state); + + if (DISPLAY_VER(dev_priv) >= 13) + intel_uncompressed_joiner_enable(new_crtc_state); + intel_set_pipe_src_size(new_crtc_state); if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) bdw_set_pipemisc(new_crtc_state); -- cgit From 0c566618e27f17b5807086dba8c222ca8ca3dc1e Mon Sep 17 00:00:00 2001 From: Tim Harvey Date: Thu, 16 Dec 2021 08:12:27 -0800 Subject: arm64: dts: imx8mn-venice-gw7902: disable gpu Since commit 99aa29932271 ("arm64: dts: imx8mn: Enable GPU") imx8mn-venice-gw7902 will hang during kernel init because it uses a MIMX8MN5CVTI which does not have a GPU. Disable pgc_gpumix to work around this. We also disable the GPU devices that depend on the gpumix power domain and pgc_gpu to avoid them staying in a probe deferred state forever. Cc: Adam Ford Cc: Lucas Stach Signed-off-by: Tim Harvey Fixes: 99aa29932271 ("arm64: dts: imx8mn: Enable GPU") Reviewed-by: Adam Ford Signed-off-by: Shawn Guo --- arch/arm64/boot/dts/freescale/imx8mn-venice-gw7902.dts | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/arch/arm64/boot/dts/freescale/imx8mn-venice-gw7902.dts b/arch/arm64/boot/dts/freescale/imx8mn-venice-gw7902.dts index 236f425e1570..2d58005d20e4 100644 --- a/arch/arm64/boot/dts/freescale/imx8mn-venice-gw7902.dts +++ b/arch/arm64/boot/dts/freescale/imx8mn-venice-gw7902.dts @@ -220,6 +220,10 @@ }; }; +&disp_blk_ctrl { + status = "disabled"; +}; + /* off-board header */ &ecspi2 { pinctrl-names = "default"; @@ -251,6 +255,10 @@ }; }; +&gpu { + status = "disabled"; +}; + &i2c1 { clock-frequency = <100000>; pinctrl-names = "default"; @@ -546,6 +554,10 @@ status = "okay"; }; +&pgc_gpumix { + status = "disabled"; +}; + /* off-board header */ &sai3 { pinctrl-names = "default"; -- cgit From 90cafce461de108bfb07c06148395dc86c3fcd23 Mon Sep 17 00:00:00 2001 From: Dongliang Mu Date: Mon, 6 Dec 2021 18:19:31 +0800 Subject: spi: change clk_disable_unprepare to clk_unprepare The corresponding API for clk_prepare is clk_unprepare, other than clk_disable_unprepare. Fix this by changing clk_disable_unprepare to clk_unprepare. Fixes: 5762ab71eb24 ("spi: Add support for Armada 3700 SPI Controller") Signed-off-by: Dongliang Mu Link: https://lore.kernel.org/r/20211206101931.2816597-1-mudongliangabcd@gmail.com Signed-off-by: Mark Brown --- drivers/spi/spi-armada-3700.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/spi/spi-armada-3700.c b/drivers/spi/spi-armada-3700.c index 46feafe4e201..d8cc4b270644 100644 --- a/drivers/spi/spi-armada-3700.c +++ b/drivers/spi/spi-armada-3700.c @@ -901,7 +901,7 @@ static int a3700_spi_probe(struct platform_device *pdev) return 0; error_clk: - clk_disable_unprepare(spi->clk); + clk_unprepare(spi->clk); error: spi_master_put(master); out: -- cgit From 23e3404de1aecc62c14ac96d4b63403c3e0f52d5 Mon Sep 17 00:00:00 2001 From: Kunihiko Hayashi Date: Wed, 22 Dec 2021 13:48:12 +0900 Subject: spi: uniphier: Fix a bug that doesn't point to private data correctly In uniphier_spi_remove(), there is a wrong code to get private data from the platform device, so the driver can't be removed properly. The driver should get spi_master from the platform device and retrieve the private data from it. Cc: Fixes: 5ba155a4d4cc ("spi: add SPI controller driver for UniPhier SoC") Signed-off-by: Kunihiko Hayashi Link: https://lore.kernel.org/r/1640148492-32178-1-git-send-email-hayashi.kunihiko@socionext.com Signed-off-by: Mark Brown --- drivers/spi/spi-uniphier.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/spi/spi-uniphier.c b/drivers/spi/spi-uniphier.c index 8900e51e1a1c..342ee8d2c476 100644 --- a/drivers/spi/spi-uniphier.c +++ b/drivers/spi/spi-uniphier.c @@ -767,12 +767,13 @@ out_master_put: static int uniphier_spi_remove(struct platform_device *pdev) { - struct uniphier_spi_priv *priv = platform_get_drvdata(pdev); + struct spi_master *master = platform_get_drvdata(pdev); + struct uniphier_spi_priv *priv = spi_master_get_devdata(master); - if (priv->master->dma_tx) - dma_release_channel(priv->master->dma_tx); - if (priv->master->dma_rx) - dma_release_channel(priv->master->dma_rx); + if (master->dma_tx) + dma_release_channel(master->dma_tx); + if (master->dma_rx) + dma_release_channel(master->dma_rx); clk_disable_unprepare(priv->clk); -- cgit From 993d66140f8d1c1853a3b58b77b43b681eb64dee Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Sun, 19 Dec 2021 19:42:15 -0300 Subject: ARM: dts: imx6qdl-udoo: Properly describe the SD card detect GPIO7_IO00 is used as SD card detect. Properly describe this in the devicetree. Fixes: 40cdaa542cf0 ("ARM: dts: imx6q-udoo: Add initial board support") Signed-off-by: Fabio Estevam Signed-off-by: Shawn Guo --- arch/arm/boot/dts/imx6qdl-udoo.dtsi | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/arch/arm/boot/dts/imx6qdl-udoo.dtsi b/arch/arm/boot/dts/imx6qdl-udoo.dtsi index d07d8f83456d..ccfa8e320be6 100644 --- a/arch/arm/boot/dts/imx6qdl-udoo.dtsi +++ b/arch/arm/boot/dts/imx6qdl-udoo.dtsi @@ -5,6 +5,8 @@ * Author: Fabio Estevam */ +#include + / { aliases { backlight = &backlight; @@ -226,6 +228,7 @@ MX6QDL_PAD_SD3_DAT1__SD3_DATA1 0x17059 MX6QDL_PAD_SD3_DAT2__SD3_DATA2 0x17059 MX6QDL_PAD_SD3_DAT3__SD3_DATA3 0x17059 + MX6QDL_PAD_SD3_DAT5__GPIO7_IO00 0x1b0b0 >; }; @@ -304,7 +307,7 @@ &usdhc3 { pinctrl-names = "default"; pinctrl-0 = <&pinctrl_usdhc3>; - non-removable; + cd-gpios = <&gpio7 0 GPIO_ACTIVE_LOW>; status = "okay"; }; -- cgit From c5c1546a654f613e291a7c5d6f3660fc1eb6d0c7 Mon Sep 17 00:00:00 2001 From: Srinivas Kandagatla Date: Wed, 26 Jan 2022 11:35:46 +0000 Subject: ASoC: codecs: wcd938x: fix incorrect used of portid Mixer controls have the channel id in mixer->reg, which is not same as port id. port id should be derived from chan_info array. So fix this. Without this, its possible that we could corrupt struct wcd938x_sdw_priv by accessing port_map array out of range with channel id instead of port id. Fixes: e8ba1e05bdc0 ("ASoC: codecs: wcd938x: add basic controls") Signed-off-by: Srinivas Kandagatla Link: https://lore.kernel.org/r/20220126113549.8853-2-srinivas.kandagatla@linaro.org Signed-off-by: Mark Brown --- sound/soc/codecs/wcd938x.c | 25 ++++++++++++++----------- 1 file changed, 14 insertions(+), 11 deletions(-) diff --git a/sound/soc/codecs/wcd938x.c b/sound/soc/codecs/wcd938x.c index eff200a07d9f..5994644c8702 100644 --- a/sound/soc/codecs/wcd938x.c +++ b/sound/soc/codecs/wcd938x.c @@ -1432,14 +1432,10 @@ static int wcd938x_sdw_connect_port(struct wcd938x_sdw_ch_info *ch_info, return 0; } -static int wcd938x_connect_port(struct wcd938x_sdw_priv *wcd, u8 ch_id, u8 enable) +static int wcd938x_connect_port(struct wcd938x_sdw_priv *wcd, u8 port_num, u8 ch_id, u8 enable) { - u8 port_num; - - port_num = wcd->ch_info[ch_id].port_num; - return wcd938x_sdw_connect_port(&wcd->ch_info[ch_id], - &wcd->port_config[port_num], + &wcd->port_config[port_num - 1], enable); } @@ -2593,6 +2589,7 @@ static int wcd938x_set_compander(struct snd_kcontrol *kcontrol, struct wcd938x_priv *wcd938x = snd_soc_component_get_drvdata(component); struct wcd938x_sdw_priv *wcd; int value = ucontrol->value.integer.value[0]; + int portidx; struct soc_mixer_control *mc; bool hphr; @@ -2606,10 +2603,12 @@ static int wcd938x_set_compander(struct snd_kcontrol *kcontrol, else wcd938x->comp1_enable = value; + portidx = wcd->ch_info[mc->reg].port_num; + if (value) - wcd938x_connect_port(wcd, mc->reg, true); + wcd938x_connect_port(wcd, portidx, mc->reg, true); else - wcd938x_connect_port(wcd, mc->reg, false); + wcd938x_connect_port(wcd, portidx, mc->reg, false); return 0; } @@ -2882,9 +2881,11 @@ static int wcd938x_get_swr_port(struct snd_kcontrol *kcontrol, struct wcd938x_sdw_priv *wcd; struct soc_mixer_control *mixer = (struct soc_mixer_control *)kcontrol->private_value; int dai_id = mixer->shift; - int portidx = mixer->reg; + int portidx, ch_idx = mixer->reg; + wcd = wcd938x->sdw_priv[dai_id]; + portidx = wcd->ch_info[ch_idx].port_num; ucontrol->value.integer.value[0] = wcd->port_enable[portidx]; @@ -2899,12 +2900,14 @@ static int wcd938x_set_swr_port(struct snd_kcontrol *kcontrol, struct wcd938x_sdw_priv *wcd; struct soc_mixer_control *mixer = (struct soc_mixer_control *)kcontrol->private_value; - int portidx = mixer->reg; + int ch_idx = mixer->reg; + int portidx; int dai_id = mixer->shift; bool enable; wcd = wcd938x->sdw_priv[dai_id]; + portidx = wcd->ch_info[ch_idx].port_num; if (ucontrol->value.integer.value[0]) enable = true; else @@ -2912,7 +2915,7 @@ static int wcd938x_set_swr_port(struct snd_kcontrol *kcontrol, wcd->port_enable[portidx] = enable; - wcd938x_connect_port(wcd, portidx, enable); + wcd938x_connect_port(wcd, portidx, ch_idx, enable); return 0; -- cgit From fca041a3ab70a099a6d5519ecb689b6279bd04f3 Mon Sep 17 00:00:00 2001 From: Srinivas Kandagatla Date: Wed, 26 Jan 2022 11:35:47 +0000 Subject: ASoC: codecs: lpass-rx-macro: fix sidetone register offsets For some reason we ended up with incorrect register offfset calcuations for sidetone. regmap clearly throw errors when accessing these incorrect registers as these do not belong to any read/write ranges. so fix them to point to correct register offsets. Fixes: f3ce6f3c9a99 ("ASoC: codecs: lpass-rx-macro: add iir widgets") Signed-off-by: Srinivas Kandagatla Link: https://lore.kernel.org/r/20220126113549.8853-3-srinivas.kandagatla@linaro.org Signed-off-by: Mark Brown --- sound/soc/codecs/lpass-rx-macro.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/sound/soc/codecs/lpass-rx-macro.c b/sound/soc/codecs/lpass-rx-macro.c index aec5127260fd..6ffe88345de5 100644 --- a/sound/soc/codecs/lpass-rx-macro.c +++ b/sound/soc/codecs/lpass-rx-macro.c @@ -2688,8 +2688,8 @@ static uint32_t get_iir_band_coeff(struct snd_soc_component *component, int reg, b2_reg; /* Address does not automatically update if reading */ - reg = CDC_RX_SIDETONE_IIR0_IIR_COEF_B1_CTL + 16 * iir_idx; - b2_reg = CDC_RX_SIDETONE_IIR0_IIR_COEF_B2_CTL + 16 * iir_idx; + reg = CDC_RX_SIDETONE_IIR0_IIR_COEF_B1_CTL + 0x80 * iir_idx; + b2_reg = CDC_RX_SIDETONE_IIR0_IIR_COEF_B2_CTL + 0x80 * iir_idx; snd_soc_component_write(component, reg, ((band_idx * BAND_MAX + coeff_idx) * @@ -2718,7 +2718,7 @@ static uint32_t get_iir_band_coeff(struct snd_soc_component *component, static void set_iir_band_coeff(struct snd_soc_component *component, int iir_idx, int band_idx, uint32_t value) { - int reg = CDC_RX_SIDETONE_IIR0_IIR_COEF_B2_CTL + 16 * iir_idx; + int reg = CDC_RX_SIDETONE_IIR0_IIR_COEF_B2_CTL + 0x80 * iir_idx; snd_soc_component_write(component, reg, (value & 0xFF)); snd_soc_component_write(component, reg, (value >> 8) & 0xFF); @@ -2739,7 +2739,7 @@ static int rx_macro_put_iir_band_audio_mixer( int iir_idx = ctl->iir_idx; int band_idx = ctl->band_idx; u32 coeff[BAND_MAX]; - int reg = CDC_RX_SIDETONE_IIR0_IIR_COEF_B1_CTL + 16 * iir_idx; + int reg = CDC_RX_SIDETONE_IIR0_IIR_COEF_B1_CTL + 0x80 * iir_idx; memcpy(&coeff[0], ucontrol->value.bytes.data, params->max); -- cgit From bd2347fd67d8da0fa76296507cc556da0a233bcb Mon Sep 17 00:00:00 2001 From: Srinivas Kandagatla Date: Wed, 26 Jan 2022 11:35:48 +0000 Subject: ASoC: codecs: wcd938x: fix return value of mixer put function wcd938x_ear_pa_put_gain, wcd938x_set_swr_port and wcd938x_set_compander currently returns zero eventhough it changes the value. Fix this, so that change notifications are sent correctly. Fixes: e8ba1e05bdc01 ("ASoC: codecs: wcd938x: add basic controls") Signed-off-by: Srinivas Kandagatla Link: https://lore.kernel.org/r/20220126113549.8853-4-srinivas.kandagatla@linaro.org Signed-off-by: Mark Brown --- sound/soc/codecs/wcd938x.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/sound/soc/codecs/wcd938x.c b/sound/soc/codecs/wcd938x.c index 5994644c8702..36cbc66914f9 100644 --- a/sound/soc/codecs/wcd938x.c +++ b/sound/soc/codecs/wcd938x.c @@ -2559,7 +2559,7 @@ static int wcd938x_ear_pa_put_gain(struct snd_kcontrol *kcontrol, WCD938X_EAR_GAIN_MASK, ucontrol->value.integer.value[0]); - return 0; + return 1; } static int wcd938x_get_compander(struct snd_kcontrol *kcontrol, @@ -2610,7 +2610,7 @@ static int wcd938x_set_compander(struct snd_kcontrol *kcontrol, else wcd938x_connect_port(wcd, portidx, mc->reg, false); - return 0; + return 1; } static int wcd938x_ldoh_get(struct snd_kcontrol *kcontrol, @@ -2917,7 +2917,7 @@ static int wcd938x_set_swr_port(struct snd_kcontrol *kcontrol, wcd938x_connect_port(wcd, portidx, ch_idx, enable); - return 0; + return 1; } -- cgit From 8f2e5c65ec7534cce6d315fccf2c3aef023f68f0 Mon Sep 17 00:00:00 2001 From: Srinivas Kandagatla Date: Wed, 26 Jan 2022 11:35:49 +0000 Subject: ASoC: qdsp6: q6apm-dai: only stop graphs that are started Its possible that the sound card is just opened and closed without actually playing stream, ex: if the audio file itself is missing. Even in such cases we do call stop on graphs that are not yet started. DSP can throw errors in such cases, so add a check to see if the graph was started before stopping it. Fixes: 9b4fe0f1cd79 ("ASoC: qdsp6: audioreach: add q6apm-dai support") Signed-off-by: Srinivas Kandagatla Link: https://lore.kernel.org/r/20220126113549.8853-5-srinivas.kandagatla@linaro.org Signed-off-by: Mark Brown --- sound/soc/qcom/qdsp6/q6apm-dai.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/sound/soc/qcom/qdsp6/q6apm-dai.c b/sound/soc/qcom/qdsp6/q6apm-dai.c index eb1c3aec479b..19c4a90ec1ea 100644 --- a/sound/soc/qcom/qdsp6/q6apm-dai.c +++ b/sound/soc/qcom/qdsp6/q6apm-dai.c @@ -308,8 +308,11 @@ static int q6apm_dai_close(struct snd_soc_component *component, struct snd_pcm_runtime *runtime = substream->runtime; struct q6apm_dai_rtd *prtd = runtime->private_data; - q6apm_graph_stop(prtd->graph); - q6apm_unmap_memory_regions(prtd->graph, substream->stream); + if (prtd->state) { /* only stop graph that is started */ + q6apm_graph_stop(prtd->graph); + q6apm_unmap_memory_regions(prtd->graph, substream->stream); + } + q6apm_graph_close(prtd->graph); prtd->graph = NULL; kfree(prtd); -- cgit From e937440f7fc444a3e3f1fb75ea65292d6f433a44 Mon Sep 17 00:00:00 2001 From: Miaoqian Lin Date: Wed, 26 Jan 2022 11:04:47 +0000 Subject: spi: meson-spicc: add IRQ check in meson_spicc_probe This check misses checking for platform_get_irq()'s call and may passes the negative error codes to devm_request_irq(), which takes unsigned IRQ #, causing it to fail with -EINVAL, overriding an original error code. Stop calling devm_request_irq() with invalid IRQ #s. Fixes: 454fa271bc4e ("spi: Add Meson SPICC driver") Signed-off-by: Miaoqian Lin Link: https://lore.kernel.org/r/20220126110447.24549-1-linmq006@gmail.com Signed-off-by: Mark Brown --- drivers/spi/spi-meson-spicc.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/spi/spi-meson-spicc.c b/drivers/spi/spi-meson-spicc.c index c208efeadd18..0bc7daa7afc8 100644 --- a/drivers/spi/spi-meson-spicc.c +++ b/drivers/spi/spi-meson-spicc.c @@ -693,6 +693,11 @@ static int meson_spicc_probe(struct platform_device *pdev) writel_relaxed(0, spicc->base + SPICC_INTREG); irq = platform_get_irq(pdev, 0); + if (irq < 0) { + ret = irq; + goto out_master; + } + ret = devm_request_irq(&pdev->dev, irq, meson_spicc_irq, 0, NULL, spicc); if (ret) { -- cgit From 549f8ffc7b2f7561bea7f90930b6c5104318e87b Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Wed, 26 Jan 2022 15:50:11 +0100 Subject: ALSA: hda: Fix UAF of leds class devs at unbinding The LED class devices that are created by HD-audio codec drivers are registered via devm_led_classdev_register() and associated with the HD-audio codec device. Unfortunately, it turned out that the devres release doesn't work for this case; namely, since the codec resource release happens before the devm call chain, it triggers a NULL dereference or a UAF for a stale set_brightness_delay callback. For fixing the bug, this patch changes the LED class device register and unregister in a manual manner without devres, keeping the instances in hda_gen_spec. Reported-by: Alexander Sergeyev Cc: Link: https://lore.kernel.org/r/20220111195229.a77wrpjclqwrx4bx@localhost.localdomain Link: https://lore.kernel.org/r/20220126145011.16728-1-tiwai@suse.de Signed-off-by: Takashi Iwai --- sound/pci/hda/hda_generic.c | 17 +++++++++++++++-- sound/pci/hda/hda_generic.h | 3 +++ 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c index 3bf5e3410703..fc114e522480 100644 --- a/sound/pci/hda/hda_generic.c +++ b/sound/pci/hda/hda_generic.c @@ -91,6 +91,12 @@ static void snd_hda_gen_spec_free(struct hda_gen_spec *spec) free_kctls(spec); snd_array_free(&spec->paths); snd_array_free(&spec->loopback_list); +#ifdef CONFIG_SND_HDA_GENERIC_LEDS + if (spec->led_cdevs[LED_AUDIO_MUTE]) + led_classdev_unregister(spec->led_cdevs[LED_AUDIO_MUTE]); + if (spec->led_cdevs[LED_AUDIO_MICMUTE]) + led_classdev_unregister(spec->led_cdevs[LED_AUDIO_MICMUTE]); +#endif } /* @@ -3922,7 +3928,10 @@ static int create_mute_led_cdev(struct hda_codec *codec, enum led_brightness), bool micmute) { + struct hda_gen_spec *spec = codec->spec; struct led_classdev *cdev; + int idx = micmute ? LED_AUDIO_MICMUTE : LED_AUDIO_MUTE; + int err; cdev = devm_kzalloc(&codec->core.dev, sizeof(*cdev), GFP_KERNEL); if (!cdev) @@ -3932,10 +3941,14 @@ static int create_mute_led_cdev(struct hda_codec *codec, cdev->max_brightness = 1; cdev->default_trigger = micmute ? "audio-micmute" : "audio-mute"; cdev->brightness_set_blocking = callback; - cdev->brightness = ledtrig_audio_get(micmute ? LED_AUDIO_MICMUTE : LED_AUDIO_MUTE); + cdev->brightness = ledtrig_audio_get(idx); cdev->flags = LED_CORE_SUSPENDRESUME; - return devm_led_classdev_register(&codec->core.dev, cdev); + err = led_classdev_register(&codec->core.dev, cdev); + if (err < 0) + return err; + spec->led_cdevs[idx] = cdev; + return 0; } /** diff --git a/sound/pci/hda/hda_generic.h b/sound/pci/hda/hda_generic.h index 8e1bc8ea74fc..34eba40cc6e6 100644 --- a/sound/pci/hda/hda_generic.h +++ b/sound/pci/hda/hda_generic.h @@ -294,6 +294,9 @@ struct hda_gen_spec { struct hda_jack_callback *cb); void (*mic_autoswitch_hook)(struct hda_codec *codec, struct hda_jack_callback *cb); + + /* leds */ + struct led_classdev *led_cdevs[NUM_AUDIO_LEDS]; }; /* values for add_stereo_mix_input flag */ -- cgit From 37c2c83ca4f1ef4b6908181ac98e18360af89b42 Mon Sep 17 00:00:00 2001 From: Xin Xiong Date: Tue, 25 Jan 2022 18:12:15 +0800 Subject: spi: uniphier: fix reference count leak in uniphier_spi_probe() The issue happens in several error paths in uniphier_spi_probe(). When either dma_get_slave_caps() or devm_spi_register_master() returns an error code, the function forgets to decrease the refcount of both `dma_rx` and `dma_tx` objects, which may lead to refcount leaks. Fix it by decrementing the reference count of specific objects in those error paths. Signed-off-by: Xin Xiong Signed-off-by: Xiyu Yang Signed-off-by: Xin Tan Reviewed-by: Kunihiko Hayashi Fixes: 28d1dddc59f6 ("spi: uniphier: Add DMA transfer mode support") Link: https://lore.kernel.org/r/20220125101214.35677-1-xiongx18@fudan.edu.cn Signed-off-by: Mark Brown --- drivers/spi/spi-uniphier.c | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/drivers/spi/spi-uniphier.c b/drivers/spi/spi-uniphier.c index 342ee8d2c476..cc0da4822231 100644 --- a/drivers/spi/spi-uniphier.c +++ b/drivers/spi/spi-uniphier.c @@ -726,7 +726,7 @@ static int uniphier_spi_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "failed to get TX DMA capacities: %d\n", ret); - goto out_disable_clk; + goto out_release_dma; } dma_tx_burst = caps.max_burst; } @@ -735,7 +735,7 @@ static int uniphier_spi_probe(struct platform_device *pdev) if (IS_ERR_OR_NULL(master->dma_rx)) { if (PTR_ERR(master->dma_rx) == -EPROBE_DEFER) { ret = -EPROBE_DEFER; - goto out_disable_clk; + goto out_release_dma; } master->dma_rx = NULL; dma_rx_burst = INT_MAX; @@ -744,7 +744,7 @@ static int uniphier_spi_probe(struct platform_device *pdev) if (ret) { dev_err(&pdev->dev, "failed to get RX DMA capacities: %d\n", ret); - goto out_disable_clk; + goto out_release_dma; } dma_rx_burst = caps.max_burst; } @@ -753,10 +753,20 @@ static int uniphier_spi_probe(struct platform_device *pdev) ret = devm_spi_register_master(&pdev->dev, master); if (ret) - goto out_disable_clk; + goto out_release_dma; return 0; +out_release_dma: + if (!IS_ERR_OR_NULL(master->dma_rx)) { + dma_release_channel(master->dma_rx); + master->dma_rx = NULL; + } + if (!IS_ERR_OR_NULL(master->dma_tx)) { + dma_release_channel(master->dma_tx); + master->dma_tx = NULL; + } + out_disable_clk: clk_disable_unprepare(priv->clk); -- cgit From c80d401c52a2d1baf2a5afeb06f0ffe678e56d23 Mon Sep 17 00:00:00 2001 From: Tianchen Ding Date: Tue, 18 Jan 2022 18:05:18 +0800 Subject: cpuset: Fix the bug that subpart_cpus updated wrongly in update_cpumask() subparts_cpus should be limited as a subset of cpus_allowed, but it is updated wrongly by using cpumask_andnot(). Use cpumask_and() instead to fix it. Fixes: ee8dde0cd2ce ("cpuset: Add new v2 cpuset.sched.partition flag") Signed-off-by: Tianchen Ding Reviewed-by: Waiman Long Signed-off-by: Tejun Heo --- kernel/cgroup/cpuset.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/kernel/cgroup/cpuset.c b/kernel/cgroup/cpuset.c index bb3531e7fda7..804ff5738c5f 100644 --- a/kernel/cgroup/cpuset.c +++ b/kernel/cgroup/cpuset.c @@ -1635,8 +1635,7 @@ static int update_cpumask(struct cpuset *cs, struct cpuset *trialcs, * Make sure that subparts_cpus is a subset of cpus_allowed. */ if (cs->nr_subparts_cpus) { - cpumask_andnot(cs->subparts_cpus, cs->subparts_cpus, - cs->cpus_allowed); + cpumask_and(cs->subparts_cpus, cs->subparts_cpus, cs->cpus_allowed); cs->nr_subparts_cpus = cpumask_weight(cs->subparts_cpus); } spin_unlock_irq(&callback_lock); -- cgit From ebb7fb1557b1d03b906b668aa2164b51e6b7d19a Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Wed, 26 Jan 2022 09:19:20 -0800 Subject: xfs, iomap: limit individual ioend chain lengths in writeback Trond Myklebust reported soft lockups in XFS IO completion such as this: watchdog: BUG: soft lockup - CPU#12 stuck for 23s! [kworker/12:1:3106] CPU: 12 PID: 3106 Comm: kworker/12:1 Not tainted 4.18.0-305.10.2.el8_4.x86_64 #1 Workqueue: xfs-conv/md127 xfs_end_io [xfs] RIP: 0010:_raw_spin_unlock_irqrestore+0x11/0x20 Call Trace: wake_up_page_bit+0x8a/0x110 iomap_finish_ioend+0xd7/0x1c0 iomap_finish_ioends+0x7f/0xb0 xfs_end_ioend+0x6b/0x100 [xfs] xfs_end_io+0xb9/0xe0 [xfs] process_one_work+0x1a7/0x360 worker_thread+0x1fa/0x390 kthread+0x116/0x130 ret_from_fork+0x35/0x40 Ioends are processed as an atomic completion unit when all the chained bios in the ioend have completed their IO. Logically contiguous ioends can also be merged and completed as a single, larger unit. Both of these things can be problematic as both the bio chains per ioend and the size of the merged ioends processed as a single completion are both unbound. If we have a large sequential dirty region in the page cache, write_cache_pages() will keep feeding us sequential pages and we will keep mapping them into ioends and bios until we get a dirty page at a non-sequential file offset. These large sequential runs can will result in bio and ioend chaining to optimise the io patterns. The pages iunder writeback are pinned within these chains until the submission chaining is broken, allowing the entire chain to be completed. This can result in huge chains being processed in IO completion context. We get deep bio chaining if we have large contiguous physical extents. We will keep adding pages to the current bio until it is full, then we'll chain a new bio to keep adding pages for writeback. Hence we can build bio chains that map millions of pages and tens of gigabytes of RAM if the page cache contains big enough contiguous dirty file regions. This long bio chain pins those pages until the final bio in the chain completes and the ioend can iterate all the chained bios and complete them. OTOH, if we have a physically fragmented file, we end up submitting one ioend per physical fragment that each have a small bio or bio chain attached to them. We do not chain these at IO submission time, but instead we chain them at completion time based on file offset via iomap_ioend_try_merge(). Hence we can end up with unbound ioend chains being built via completion merging. XFS can then do COW remapping or unwritten extent conversion on that merged chain, which involves walking an extent fragment at a time and running a transaction to modify the physical extent information. IOWs, we merge all the discontiguous ioends together into a contiguous file range, only to then process them individually as discontiguous extents. This extent manipulation is computationally expensive and can run in a tight loop, so merging logically contiguous but physically discontigous ioends gains us nothing except for hiding the fact the fact we broke the ioends up into individual physical extents at submission and then need to loop over those individual physical extents at completion. Hence we need to have mechanisms to limit ioend sizes and to break up completion processing of large merged ioend chains: 1. bio chains per ioend need to be bound in length. Pure overwrites go straight to iomap_finish_ioend() in softirq context with the exact bio chain attached to the ioend by submission. Hence the only way to prevent long holdoffs here is to bound ioend submission sizes because we can't reschedule in softirq context. 2. iomap_finish_ioends() has to handle unbound merged ioend chains correctly. This relies on any one call to iomap_finish_ioend() being bound in runtime so that cond_resched() can be issued regularly as the long ioend chain is processed. i.e. this relies on mechanism #1 to limit individual ioend sizes to work correctly. 3. filesystems have to loop over the merged ioends to process physical extent manipulations. This means they can loop internally, and so we break merging at physical extent boundaries so the filesystem can easily insert reschedule points between individual extent manipulations. Signed-off-by: Dave Chinner Reported-and-tested-by: Trond Myklebust Reviewed-by: Darrick J. Wong Signed-off-by: Darrick J. Wong --- fs/iomap/buffered-io.c | 52 ++++++++++++++++++++++++++++++++++++++++++++++---- fs/xfs/xfs_aops.c | 16 +++++++++++++++- include/linux/iomap.h | 2 ++ 3 files changed, 65 insertions(+), 5 deletions(-) diff --git a/fs/iomap/buffered-io.c b/fs/iomap/buffered-io.c index c938bbad075e..6c51a75d0be6 100644 --- a/fs/iomap/buffered-io.c +++ b/fs/iomap/buffered-io.c @@ -21,6 +21,8 @@ #include "../internal.h" +#define IOEND_BATCH_SIZE 4096 + /* * Structure allocated for each folio when block size < folio size * to track sub-folio uptodate status and I/O completions. @@ -1039,7 +1041,7 @@ static void iomap_finish_folio_write(struct inode *inode, struct folio *folio, * state, release holds on bios, and finally free up memory. Do not use the * ioend after this. */ -static void +static u32 iomap_finish_ioend(struct iomap_ioend *ioend, int error) { struct inode *inode = ioend->io_inode; @@ -1048,6 +1050,7 @@ iomap_finish_ioend(struct iomap_ioend *ioend, int error) u64 start = bio->bi_iter.bi_sector; loff_t offset = ioend->io_offset; bool quiet = bio_flagged(bio, BIO_QUIET); + u32 folio_count = 0; for (bio = &ioend->io_inline_bio; bio; bio = next) { struct folio_iter fi; @@ -1062,9 +1065,11 @@ iomap_finish_ioend(struct iomap_ioend *ioend, int error) next = bio->bi_private; /* walk all folios in bio, ending page IO on them */ - bio_for_each_folio_all(fi, bio) + bio_for_each_folio_all(fi, bio) { iomap_finish_folio_write(inode, fi.folio, fi.length, error); + folio_count++; + } bio_put(bio); } /* The ioend has been freed by bio_put() */ @@ -1074,20 +1079,36 @@ iomap_finish_ioend(struct iomap_ioend *ioend, int error) "%s: writeback error on inode %lu, offset %lld, sector %llu", inode->i_sb->s_id, inode->i_ino, offset, start); } + return folio_count; } +/* + * Ioend completion routine for merged bios. This can only be called from task + * contexts as merged ioends can be of unbound length. Hence we have to break up + * the writeback completions into manageable chunks to avoid long scheduler + * holdoffs. We aim to keep scheduler holdoffs down below 10ms so that we get + * good batch processing throughput without creating adverse scheduler latency + * conditions. + */ void iomap_finish_ioends(struct iomap_ioend *ioend, int error) { struct list_head tmp; + u32 completions; + + might_sleep(); list_replace_init(&ioend->io_list, &tmp); - iomap_finish_ioend(ioend, error); + completions = iomap_finish_ioend(ioend, error); while (!list_empty(&tmp)) { + if (completions > IOEND_BATCH_SIZE * 8) { + cond_resched(); + completions = 0; + } ioend = list_first_entry(&tmp, struct iomap_ioend, io_list); list_del_init(&ioend->io_list); - iomap_finish_ioend(ioend, error); + completions += iomap_finish_ioend(ioend, error); } } EXPORT_SYMBOL_GPL(iomap_finish_ioends); @@ -1108,6 +1129,18 @@ iomap_ioend_can_merge(struct iomap_ioend *ioend, struct iomap_ioend *next) return false; if (ioend->io_offset + ioend->io_size != next->io_offset) return false; + /* + * Do not merge physically discontiguous ioends. The filesystem + * completion functions will have to iterate the physical + * discontiguities even if we merge the ioends at a logical level, so + * we don't gain anything by merging physical discontiguities here. + * + * We cannot use bio->bi_iter.bi_sector here as it is modified during + * submission so does not point to the start sector of the bio at + * completion. + */ + if (ioend->io_sector + (ioend->io_size >> 9) != next->io_sector) + return false; return true; } @@ -1209,8 +1242,10 @@ iomap_alloc_ioend(struct inode *inode, struct iomap_writepage_ctx *wpc, ioend->io_flags = wpc->iomap.flags; ioend->io_inode = inode; ioend->io_size = 0; + ioend->io_folios = 0; ioend->io_offset = offset; ioend->io_bio = bio; + ioend->io_sector = sector; return ioend; } @@ -1251,6 +1286,13 @@ iomap_can_add_to_ioend(struct iomap_writepage_ctx *wpc, loff_t offset, return false; if (sector != bio_end_sector(wpc->ioend->io_bio)) return false; + /* + * Limit ioend bio chain lengths to minimise IO completion latency. This + * also prevents long tight loops ending page writeback on all the + * folios in the ioend. + */ + if (wpc->ioend->io_folios >= IOEND_BATCH_SIZE) + return false; return true; } @@ -1335,6 +1377,8 @@ iomap_writepage_map(struct iomap_writepage_ctx *wpc, &submit_list); count++; } + if (count) + wpc->ioend->io_folios++; WARN_ON_ONCE(!wpc->ioend && !list_empty(&submit_list)); WARN_ON_ONCE(!folio_test_locked(folio)); diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c index 2705f91bdd0d..9d6a67c7d227 100644 --- a/fs/xfs/xfs_aops.c +++ b/fs/xfs/xfs_aops.c @@ -136,7 +136,20 @@ done: memalloc_nofs_restore(nofs_flag); } -/* Finish all pending io completions. */ +/* + * Finish all pending IO completions that require transactional modifications. + * + * We try to merge physical and logically contiguous ioends before completion to + * minimise the number of transactions we need to perform during IO completion. + * Both unwritten extent conversion and COW remapping need to iterate and modify + * one physical extent at a time, so we gain nothing by merging physically + * discontiguous extents here. + * + * The ioend chain length that we can be processing here is largely unbound in + * length and we may have to perform significant amounts of work on each ioend + * to complete it. Hence we have to be careful about holding the CPU for too + * long in this loop. + */ void xfs_end_io( struct work_struct *work) @@ -157,6 +170,7 @@ xfs_end_io( list_del_init(&ioend->io_list); iomap_ioend_try_merge(ioend, &tmp); xfs_end_ioend(ioend); + cond_resched(); } } diff --git a/include/linux/iomap.h b/include/linux/iomap.h index b55bd49e55f5..97a3a2edb585 100644 --- a/include/linux/iomap.h +++ b/include/linux/iomap.h @@ -263,9 +263,11 @@ struct iomap_ioend { struct list_head io_list; /* next ioend in chain */ u16 io_type; u16 io_flags; /* IOMAP_F_* */ + u32 io_folios; /* folios added to ioend */ struct inode *io_inode; /* file being written to */ size_t io_size; /* size of the extent */ loff_t io_offset; /* offset in the file */ + sector_t io_sector; /* start sector of ioend */ struct bio *io_bio; /* bio being built */ struct bio io_inline_bio; /* MUST BE LAST! */ }; -- cgit From 4be990af2f7ed8bf209cce3b86e1abac33742763 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Thu, 9 Dec 2021 16:43:09 +0200 Subject: drm/i915: Fix up pixel_rate vs. clock confusion in wm calculations MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use pixel_rate rather than crtc_clock in the watermark calculations. These are actually identical on gmch platforms for now since we don't adjust the pixel rate based on pfit downscaling. But pixel_rate is the thing we are actually interested here so use the proper name for it. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20211209144311.3221-1-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/intel_pm.c | 52 +++++++++++++++++------------------------ 1 file changed, 22 insertions(+), 30 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 23d3342081b8..29d4fe8cde7e 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -905,15 +905,13 @@ static void pnv_update_wm(struct drm_i915_private *dev_priv) crtc = single_enabled_crtc(dev_priv); if (crtc) { - const struct drm_display_mode *pipe_mode = - &crtc->config->hw.pipe_mode; const struct drm_framebuffer *fb = crtc->base.primary->state->fb; + int pixel_rate = crtc->config->pixel_rate; int cpp = fb->format->cpp[0]; - int clock = pipe_mode->crtc_clock; /* Display SR */ - wm = intel_calculate_wm(clock, &pnv_display_wm, + wm = intel_calculate_wm(pixel_rate, &pnv_display_wm, pnv_display_wm.fifo_size, cpp, latency->display_sr); reg = intel_uncore_read(&dev_priv->uncore, DSPFW1); @@ -923,7 +921,7 @@ static void pnv_update_wm(struct drm_i915_private *dev_priv) drm_dbg_kms(&dev_priv->drm, "DSPFW1 register is %x\n", reg); /* cursor SR */ - wm = intel_calculate_wm(clock, &pnv_cursor_wm, + wm = intel_calculate_wm(pixel_rate, &pnv_cursor_wm, pnv_display_wm.fifo_size, 4, latency->cursor_sr); reg = intel_uncore_read(&dev_priv->uncore, DSPFW3); @@ -932,7 +930,7 @@ static void pnv_update_wm(struct drm_i915_private *dev_priv) intel_uncore_write(&dev_priv->uncore, DSPFW3, reg); /* Display HPLL off SR */ - wm = intel_calculate_wm(clock, &pnv_display_hplloff_wm, + wm = intel_calculate_wm(pixel_rate, &pnv_display_hplloff_wm, pnv_display_hplloff_wm.fifo_size, cpp, latency->display_hpll_disable); reg = intel_uncore_read(&dev_priv->uncore, DSPFW3); @@ -941,7 +939,7 @@ static void pnv_update_wm(struct drm_i915_private *dev_priv) intel_uncore_write(&dev_priv->uncore, DSPFW3, reg); /* cursor HPLL off SR */ - wm = intel_calculate_wm(clock, &pnv_cursor_hplloff_wm, + wm = intel_calculate_wm(pixel_rate, &pnv_cursor_hplloff_wm, pnv_display_hplloff_wm.fifo_size, 4, latency->cursor_hpll_disable); reg = intel_uncore_read(&dev_priv->uncore, DSPFW3); @@ -1144,7 +1142,7 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state, const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; unsigned int latency = dev_priv->wm.pri_latency[level] * 10; - unsigned int clock, htotal, cpp, width, wm; + unsigned int pixel_rate, htotal, cpp, width, wm; if (latency == 0) return USHRT_MAX; @@ -1165,21 +1163,21 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state, level != G4X_WM_LEVEL_NORMAL) cpp = max(cpp, 4u); - clock = pipe_mode->crtc_clock; + pixel_rate = crtc_state->pixel_rate; htotal = pipe_mode->crtc_htotal; width = drm_rect_width(&plane_state->uapi.dst); if (plane->id == PLANE_CURSOR) { - wm = intel_wm_method2(clock, htotal, width, cpp, latency); + wm = intel_wm_method2(pixel_rate, htotal, width, cpp, latency); } else if (plane->id == PLANE_PRIMARY && level == G4X_WM_LEVEL_NORMAL) { - wm = intel_wm_method1(clock, cpp, latency); + wm = intel_wm_method1(pixel_rate, cpp, latency); } else { unsigned int small, large; - small = intel_wm_method1(clock, cpp, latency); - large = intel_wm_method2(clock, htotal, width, cpp, latency); + small = intel_wm_method1(pixel_rate, cpp, latency); + large = intel_wm_method2(pixel_rate, htotal, width, cpp, latency); wm = min(small, large); } @@ -1664,7 +1662,7 @@ static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state, struct drm_i915_private *dev_priv = to_i915(plane->base.dev); const struct drm_display_mode *pipe_mode = &crtc_state->hw.pipe_mode; - unsigned int clock, htotal, cpp, width, wm; + unsigned int pixel_rate, htotal, cpp, width, wm; if (dev_priv->wm.pri_latency[level] == 0) return USHRT_MAX; @@ -1673,7 +1671,7 @@ static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state, return 0; cpp = plane_state->hw.fb->format->cpp[0]; - clock = pipe_mode->crtc_clock; + pixel_rate = crtc_state->pixel_rate; htotal = pipe_mode->crtc_htotal; width = crtc_state->pipe_src_w; @@ -1686,7 +1684,7 @@ static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state, */ wm = 63; } else { - wm = vlv_wm_method2(clock, htotal, width, cpp, + wm = vlv_wm_method2(pixel_rate, htotal, width, cpp, dev_priv->wm.pri_latency[level] * 10); } @@ -2267,13 +2265,13 @@ static void i965_update_wm(struct drm_i915_private *dev_priv) &crtc->config->hw.pipe_mode; const struct drm_framebuffer *fb = crtc->base.primary->state->fb; - int clock = pipe_mode->crtc_clock; + int pixel_rate = crtc->config->pixel_rate; int htotal = pipe_mode->crtc_htotal; int hdisplay = crtc->config->pipe_src_w; int cpp = fb->format->cpp[0]; int entries; - entries = intel_wm_method2(clock, htotal, + entries = intel_wm_method2(pixel_rate, htotal, hdisplay, cpp, sr_latency_ns / 100); entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE); srwm = I965_FIFO_SIZE - entries; @@ -2284,7 +2282,7 @@ static void i965_update_wm(struct drm_i915_private *dev_priv) "self-refresh entries: %d, wm: %d\n", entries, srwm); - entries = intel_wm_method2(clock, htotal, + entries = intel_wm_method2(pixel_rate, htotal, crtc->base.cursor->state->crtc_w, 4, sr_latency_ns / 100); entries = DIV_ROUND_UP(entries, @@ -2363,8 +2361,6 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv) fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_A); crtc = intel_crtc_for_plane(dev_priv, PLANE_A); if (intel_crtc_active(crtc)) { - const struct drm_display_mode *pipe_mode = - &crtc->config->hw.pipe_mode; const struct drm_framebuffer *fb = crtc->base.primary->state->fb; int cpp; @@ -2374,7 +2370,7 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv) else cpp = fb->format->cpp[0]; - planea_wm = intel_calculate_wm(pipe_mode->crtc_clock, + planea_wm = intel_calculate_wm(crtc->config->pixel_rate, wm_info, fifo_size, cpp, pessimal_latency_ns); enabled = crtc; @@ -2393,8 +2389,6 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv) fifo_size = i9xx_get_fifo_size(dev_priv, PLANE_B); crtc = intel_crtc_for_plane(dev_priv, PLANE_B); if (intel_crtc_active(crtc)) { - const struct drm_display_mode *pipe_mode = - &crtc->config->hw.pipe_mode; const struct drm_framebuffer *fb = crtc->base.primary->state->fb; int cpp; @@ -2404,7 +2398,7 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv) else cpp = fb->format->cpp[0]; - planeb_wm = intel_calculate_wm(pipe_mode->crtc_clock, + planeb_wm = intel_calculate_wm(crtc->config->pixel_rate, wm_info, fifo_size, cpp, pessimal_latency_ns); if (enabled == NULL) @@ -2446,7 +2440,7 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv) &enabled->config->hw.pipe_mode; const struct drm_framebuffer *fb = enabled->base.primary->state->fb; - int clock = pipe_mode->crtc_clock; + int pixel_rate = enabled->config->pixel_rate; int htotal = pipe_mode->crtc_htotal; int hdisplay = enabled->config->pipe_src_w; int cpp; @@ -2457,7 +2451,7 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv) else cpp = fb->format->cpp[0]; - entries = intel_wm_method2(clock, htotal, hdisplay, cpp, + entries = intel_wm_method2(pixel_rate, htotal, hdisplay, cpp, sr_latency_ns / 100); entries = DIV_ROUND_UP(entries, wm_info->cacheline_size); drm_dbg_kms(&dev_priv->drm, @@ -2494,7 +2488,6 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv) static void i845_update_wm(struct drm_i915_private *dev_priv) { struct intel_crtc *crtc; - const struct drm_display_mode *pipe_mode; u32 fwater_lo; int planea_wm; @@ -2502,8 +2495,7 @@ static void i845_update_wm(struct drm_i915_private *dev_priv) if (crtc == NULL) return; - pipe_mode = &crtc->config->hw.pipe_mode; - planea_wm = intel_calculate_wm(pipe_mode->crtc_clock, + planea_wm = intel_calculate_wm(crtc->config->pixel_rate, &i845_wm_info, i845_get_fifo_size(dev_priv, PLANE_A), 4, pessimal_latency_ns); -- cgit From d05824796d9cf6c0e59a0aa86333584bde8b51c6 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Thu, 9 Dec 2021 16:43:10 +0200 Subject: drm/i915: Use the correct plane source width in watermark calculations MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently we sometimes use the plane destination width, or just the pipe src width as the plane source width in the watermark calculatons. Use the correct thing everywhere. v2: convert ilk cur/fbc cases too Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20211209144311.3221-2-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/intel_pm.c | 21 ++++++++++----------- 1 file changed, 10 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 29d4fe8cde7e..486251382fa7 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -1165,8 +1165,7 @@ static u16 g4x_compute_wm(const struct intel_crtc_state *crtc_state, pixel_rate = crtc_state->pixel_rate; htotal = pipe_mode->crtc_htotal; - - width = drm_rect_width(&plane_state->uapi.dst); + width = drm_rect_width(&plane_state->uapi.src) >> 16; if (plane->id == PLANE_CURSOR) { wm = intel_wm_method2(pixel_rate, htotal, width, cpp, latency); @@ -1673,7 +1672,7 @@ static u16 vlv_compute_wm_level(const struct intel_crtc_state *crtc_state, cpp = plane_state->hw.fb->format->cpp[0]; pixel_rate = crtc_state->pixel_rate; htotal = pipe_mode->crtc_htotal; - width = crtc_state->pipe_src_w; + width = drm_rect_width(&plane_state->uapi.src) >> 16; if (plane->id == PLANE_CURSOR) { /* @@ -2267,12 +2266,12 @@ static void i965_update_wm(struct drm_i915_private *dev_priv) crtc->base.primary->state->fb; int pixel_rate = crtc->config->pixel_rate; int htotal = pipe_mode->crtc_htotal; - int hdisplay = crtc->config->pipe_src_w; + int width = drm_rect_width(&crtc->base.primary->state->src) >> 16; int cpp = fb->format->cpp[0]; int entries; entries = intel_wm_method2(pixel_rate, htotal, - hdisplay, cpp, sr_latency_ns / 100); + width, cpp, sr_latency_ns / 100); entries = DIV_ROUND_UP(entries, I915_FIFO_LINE_SIZE); srwm = I965_FIFO_SIZE - entries; if (srwm < 0) @@ -2442,7 +2441,7 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv) enabled->base.primary->state->fb; int pixel_rate = enabled->config->pixel_rate; int htotal = pipe_mode->crtc_htotal; - int hdisplay = enabled->config->pipe_src_w; + int width = drm_rect_width(&enabled->base.primary->state->src) >> 16; int cpp; int entries; @@ -2451,7 +2450,7 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv) else cpp = fb->format->cpp[0]; - entries = intel_wm_method2(pixel_rate, htotal, hdisplay, cpp, + entries = intel_wm_method2(pixel_rate, htotal, width, cpp, sr_latency_ns / 100); entries = DIV_ROUND_UP(entries, wm_info->cacheline_size); drm_dbg_kms(&dev_priv->drm, @@ -2586,7 +2585,7 @@ static u32 ilk_compute_pri_wm(const struct intel_crtc_state *crtc_state, method2 = ilk_wm_method2(crtc_state->pixel_rate, crtc_state->hw.pipe_mode.crtc_htotal, - drm_rect_width(&plane_state->uapi.dst), + drm_rect_width(&plane_state->uapi.src) >> 16, cpp, mem_value); return min(method1, method2); @@ -2614,7 +2613,7 @@ static u32 ilk_compute_spr_wm(const struct intel_crtc_state *crtc_state, method1 = ilk_wm_method1(crtc_state->pixel_rate, cpp, mem_value); method2 = ilk_wm_method2(crtc_state->pixel_rate, crtc_state->hw.pipe_mode.crtc_htotal, - drm_rect_width(&plane_state->uapi.dst), + drm_rect_width(&plane_state->uapi.src) >> 16, cpp, mem_value); return min(method1, method2); } @@ -2639,7 +2638,7 @@ static u32 ilk_compute_cur_wm(const struct intel_crtc_state *crtc_state, return ilk_wm_method2(crtc_state->pixel_rate, crtc_state->hw.pipe_mode.crtc_htotal, - drm_rect_width(&plane_state->uapi.dst), + drm_rect_width(&plane_state->uapi.src) >> 16, cpp, mem_value); } @@ -2655,7 +2654,7 @@ static u32 ilk_compute_fbc_wm(const struct intel_crtc_state *crtc_state, cpp = plane_state->hw.fb->format->cpp[0]; - return ilk_wm_fbc(pri_val, drm_rect_width(&plane_state->uapi.dst), + return ilk_wm_fbc(pri_val, drm_rect_width(&plane_state->uapi.src) >> 16, cpp); } -- cgit From bf172a01ea485e630f28b6ad525fb277d73d3e3d Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Thu, 9 Dec 2021 16:43:11 +0200 Subject: drm/i915: Use single_enabled_crtc() in i9xx_update_wm() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Replace the ad-hoc single_enabled_crtc() thing in i9xx_update_wm() with the real thing, just like we do in the other legacy wm functions. We can also nuke the extra 'enabled' variable. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20211209144311.3221-3-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/intel_pm.c | 26 +++++++++++--------------- 1 file changed, 11 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c index 486251382fa7..9e4c4240c448 100644 --- a/drivers/gpu/drm/i915/intel_pm.c +++ b/drivers/gpu/drm/i915/intel_pm.c @@ -2345,7 +2345,7 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv) int cwm, srwm = 1; int fifo_size; int planea_wm, planeb_wm; - struct intel_crtc *crtc, *enabled = NULL; + struct intel_crtc *crtc; if (IS_I945GM(dev_priv)) wm_info = &i945_wm_info; @@ -2372,7 +2372,6 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv) planea_wm = intel_calculate_wm(crtc->config->pixel_rate, wm_info, fifo_size, cpp, pessimal_latency_ns); - enabled = crtc; } else { planea_wm = fifo_size - wm_info->guard_size; if (planea_wm > (long)wm_info->max_wm) @@ -2400,10 +2399,6 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv) planeb_wm = intel_calculate_wm(crtc->config->pixel_rate, wm_info, fifo_size, cpp, pessimal_latency_ns); - if (enabled == NULL) - enabled = crtc; - else - enabled = NULL; } else { planeb_wm = fifo_size - wm_info->guard_size; if (planeb_wm > (long)wm_info->max_wm) @@ -2413,14 +2408,15 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv) drm_dbg_kms(&dev_priv->drm, "FIFO watermarks - A: %d, B: %d\n", planea_wm, planeb_wm); - if (IS_I915GM(dev_priv) && enabled) { + crtc = single_enabled_crtc(dev_priv); + if (IS_I915GM(dev_priv) && crtc) { struct drm_i915_gem_object *obj; - obj = intel_fb_obj(enabled->base.primary->state->fb); + obj = intel_fb_obj(crtc->base.primary->state->fb); /* self-refresh seems busted with untiled */ if (!i915_gem_object_is_tiled(obj)) - enabled = NULL; + crtc = NULL; } /* @@ -2432,16 +2428,16 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv) intel_set_memory_cxsr(dev_priv, false); /* Calc sr entries for one plane configs */ - if (HAS_FW_BLC(dev_priv) && enabled) { + if (HAS_FW_BLC(dev_priv) && crtc) { /* self-refresh has much higher latency */ static const int sr_latency_ns = 6000; const struct drm_display_mode *pipe_mode = - &enabled->config->hw.pipe_mode; + &crtc->config->hw.pipe_mode; const struct drm_framebuffer *fb = - enabled->base.primary->state->fb; - int pixel_rate = enabled->config->pixel_rate; + crtc->base.primary->state->fb; + int pixel_rate = crtc->config->pixel_rate; int htotal = pipe_mode->crtc_htotal; - int width = drm_rect_width(&enabled->base.primary->state->src) >> 16; + int width = drm_rect_width(&crtc->base.primary->state->src) >> 16; int cpp; int entries; @@ -2480,7 +2476,7 @@ static void i9xx_update_wm(struct drm_i915_private *dev_priv) intel_uncore_write(&dev_priv->uncore, FW_BLC, fwater_lo); intel_uncore_write(&dev_priv->uncore, FW_BLC2, fwater_hi); - if (enabled) + if (crtc) intel_set_memory_cxsr(dev_priv, true); } -- cgit From 96e4c3c0ed4eb85e02bfa438c6b4ef7cea78bd8a Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 12 Nov 2021 21:38:05 +0200 Subject: drm/i915: Bump DSL linemask to 20 bits MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Since tgl PIPE_DSL has 20 bits for the scanline. Let's bump our definition to match. And while at it let's also add the define for the current field readback. We can also get rid of the gen2 vs. gen3+ nonsense since none of the extra bits ever did anything and just always read as zero. And now we extend all platforms to use the tgl+ 20 bits deinition, but again that is fine since all the bits used to be mbz and always read as zero on all the platforms. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20211112193813.8224-2-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_display.c | 10 ++-------- drivers/gpu/drm/i915/i915_irq.c | 7 ++----- drivers/gpu/drm/i915/i915_reg.h | 4 ++-- 3 files changed, 6 insertions(+), 15 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 69244ad19eec..311390af1300 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -353,16 +353,10 @@ static bool pipe_scanline_is_moving(struct drm_i915_private *dev_priv, { i915_reg_t reg = PIPEDSL(pipe); u32 line1, line2; - u32 line_mask; - if (DISPLAY_VER(dev_priv) == 2) - line_mask = DSL_LINEMASK_GEN2; - else - line_mask = DSL_LINEMASK_GEN3; - - line1 = intel_de_read(dev_priv, reg) & line_mask; + line1 = intel_de_read(dev_priv, reg) & PIPEDSL_LINE_MASK; msleep(5); - line2 = intel_de_read(dev_priv, reg) & line_mask; + line2 = intel_de_read(dev_priv, reg) & PIPEDSL_LINE_MASK; return line1 != line2; } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 71171338f2df..14ae4f9b3fa6 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -836,10 +836,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc) if (mode->flags & DRM_MODE_FLAG_INTERLACE) vtotal /= 2; - if (DISPLAY_VER(dev_priv) == 2) - position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN2; - else - position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; + position = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & PIPEDSL_LINE_MASK; /* * On HSW, the DSL reg (0x70000) appears to return 0 if we @@ -858,7 +855,7 @@ static int __intel_get_crtc_scanline(struct intel_crtc *crtc) for (i = 0; i < 100; i++) { udelay(1); - temp = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & DSL_LINEMASK_GEN3; + temp = intel_de_read_fw(dev_priv, PIPEDSL(pipe)) & PIPEDSL_LINE_MASK; if (temp != position) { position = temp; break; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index cf168c3e0471..507cc3d54fe8 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -5165,8 +5165,8 @@ enum { /* Pipe A */ #define _PIPEADSL 0x70000 -#define DSL_LINEMASK_GEN2 0x00000fff -#define DSL_LINEMASK_GEN3 0x00001fff +#define PIPEDSL_CURR_FIELD REG_BIT(31) /* ctg+ */ +#define PIPEDSL_LINE_MASK REG_GENMASK(19, 0) #define _PIPEACONF 0x70008 #define PIPECONF_ENABLE (1 << 31) #define PIPECONF_DISABLE 0 -- cgit From d083c232fe2dc4720d8f0e337613f88909ff9d2a Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 12 Nov 2021 21:38:06 +0200 Subject: drm/i915: Clean up PIPEMISC register defines MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use REG_BIT() & co. for PIPEMISC* bits, and while at it fill in the missing dithering bits since we already had some of them defined. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20211112193813.8224-3-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_display.c | 18 +++++++------- drivers/gpu/drm/i915/i915_reg.h | 35 +++++++++++++++------------- 2 files changed, 28 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 311390af1300..5d4f189cb084 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -3764,18 +3764,18 @@ static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state) switch (crtc_state->pipe_bpp) { case 18: - val |= PIPEMISC_6_BPC; + val |= PIPEMISC_BPC_6; break; case 24: - val |= PIPEMISC_8_BPC; + val |= PIPEMISC_BPC_8; break; case 30: - val |= PIPEMISC_10_BPC; + val |= PIPEMISC_BPC_10; break; case 36: /* Port output 12BPC defined for ADLP+ */ if (DISPLAY_VER(dev_priv) > 12) - val |= PIPEMISC_12_BPC_ADLP; + val |= PIPEMISC_BPC_12_ADLP; break; default: MISSING_CASE(crtc_state->pipe_bpp); @@ -3811,7 +3811,7 @@ static void bdw_set_pipemisc(const struct intel_crtc_state *crtc_state) } intel_de_rmw(dev_priv, PIPE_MISC2(crtc->pipe), - PIPE_MISC2_UNDERRUN_BUBBLE_COUNTER_MASK, + PIPE_MISC2_BUBBLE_COUNTER_MASK, scaler_in_use ? PIPE_MISC2_BUBBLE_COUNTER_SCALER_EN : PIPE_MISC2_BUBBLE_COUNTER_SCALER_DIS); } @@ -3827,11 +3827,11 @@ int bdw_get_pipemisc_bpp(struct intel_crtc *crtc) tmp = intel_de_read(dev_priv, PIPEMISC(crtc->pipe)); switch (tmp & PIPEMISC_BPC_MASK) { - case PIPEMISC_6_BPC: + case PIPEMISC_BPC_6: return 18; - case PIPEMISC_8_BPC: + case PIPEMISC_BPC_8: return 24; - case PIPEMISC_10_BPC: + case PIPEMISC_BPC_10: return 30; /* * PORT OUTPUT 12 BPC defined for ADLP+. @@ -3843,7 +3843,7 @@ int bdw_get_pipemisc_bpp(struct intel_crtc *crtc) * on older platforms, need to find a workaround for 12 BPC * MIPI DSI HW readout. */ - case PIPEMISC_12_BPC_ADLP: + case PIPEMISC_BPC_12_ADLP: if (DISPLAY_VER(dev_priv) > 12) return 36; fallthrough; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 507cc3d54fe8..2d17112aabc8 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -5308,32 +5308,35 @@ enum { #define _PIPE_MISC_A 0x70030 #define _PIPE_MISC_B 0x71030 -#define PIPEMISC_YUV420_ENABLE (1 << 27) /* glk+ */ -#define PIPEMISC_YUV420_MODE_FULL_BLEND (1 << 26) /* glk+ */ -#define PIPEMISC_HDR_MODE_PRECISION (1 << 23) /* icl+ */ -#define PIPEMISC_OUTPUT_COLORSPACE_YUV (1 << 11) -#define PIPEMISC_PIXEL_ROUNDING_TRUNC REG_BIT(8) /* tgl+ */ +#define PIPEMISC_YUV420_ENABLE REG_BIT(27) /* glk+ */ +#define PIPEMISC_YUV420_MODE_FULL_BLEND REG_BIT(26) /* glk+ */ +#define PIPEMISC_HDR_MODE_PRECISION REG_BIT(23) /* icl+ */ +#define PIPEMISC_OUTPUT_COLORSPACE_YUV REG_BIT(11) +#define PIPEMISC_PIXEL_ROUNDING_TRUNC REG_BIT(8) /* tgl+ */ /* * For Display < 13, Bits 5-7 of PIPE MISC represent DITHER BPC with * valid values of: 6, 8, 10 BPC. * ADLP+, the bits 5-7 represent PORT OUTPUT BPC with valid values of: * 6, 8, 10, 12 BPC. */ -#define PIPEMISC_BPC_MASK (7 << 5) -#define PIPEMISC_8_BPC (0 << 5) -#define PIPEMISC_10_BPC (1 << 5) -#define PIPEMISC_6_BPC (2 << 5) -#define PIPEMISC_12_BPC_ADLP (4 << 5) /* adlp+ */ -#define PIPEMISC_DITHER_ENABLE (1 << 4) -#define PIPEMISC_DITHER_TYPE_MASK (3 << 2) -#define PIPEMISC_DITHER_TYPE_SP (0 << 2) +#define PIPEMISC_BPC_MASK REG_GENMASK(7, 5) +#define PIPEMISC_BPC_8 REG_FIELD_PREP(PIPEMISC_BPC_MASK, 0) +#define PIPEMISC_BPC_10 REG_FIELD_PREP(PIPEMISC_BPC_MASK, 1) +#define PIPEMISC_BPC_6 REG_FIELD_PREP(PIPEMISC_BPC_MASK, 2) +#define PIPEMISC_BPC_12_ADLP REG_FIELD_PREP(PIPEMISC_BPC_MASK, 4) /* adlp+ */ +#define PIPEMISC_DITHER_ENABLE REG_BIT(4) +#define PIPEMISC_DITHER_TYPE_MASK REG_GENMASK(3, 2) +#define PIPEMISC_DITHER_TYPE_SP REG_FIELD_PREP(PIPEMISC_DITHER_TYPE_MASK, 0) +#define PIPEMISC_DITHER_TYPE_ST1 REG_FIELD_PREP(PIPEMISC_DITHER_TYPE_MASK, 1) +#define PIPEMISC_DITHER_TYPE_ST2 REG_FIELD_PREP(PIPEMISC_DITHER_TYPE_MASK, 2) +#define PIPEMISC_DITHER_TYPE_TEMP REG_FIELD_PREP(PIPEMISC_DITHER_TYPE_MASK, 3) #define PIPEMISC(pipe) _MMIO_PIPE2(pipe, _PIPE_MISC_A) #define _PIPE_MISC2_A 0x7002C #define _PIPE_MISC2_B 0x7102C -#define PIPE_MISC2_BUBBLE_COUNTER_SCALER_EN (0x50 << 24) -#define PIPE_MISC2_BUBBLE_COUNTER_SCALER_DIS (0x14 << 24) -#define PIPE_MISC2_UNDERRUN_BUBBLE_COUNTER_MASK (0xff << 24) +#define PIPE_MISC2_BUBBLE_COUNTER_MASK REG_GENMASK(31, 24) +#define PIPE_MISC2_BUBBLE_COUNTER_SCALER_EN REG_FIELD_PREP(PIPE_MISC2_BUBBLE_COUNTER_MASK, 80) +#define PIPE_MISC2_BUBBLE_COUNTER_SCALER_DIS REG_FIELD_PREP(PIPE_MISC2_BUBBLE_COUNTER_MASK, 20) #define PIPE_MISC2(pipe) _MMIO_PIPE2(pipe, _PIPE_MISC2_A) /* Skylake+ pipe bottom (background) color */ -- cgit From 7e31ce581bf034cdcb1a94f84ffcf3cc983988e9 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 12 Nov 2021 21:38:07 +0200 Subject: drm/i915: Clean up SKL_BOTTOM_COLOR defines MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use REG_BIT() for SKL_BOTTOM_COLOR. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20211112193813.8224-4-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/i915_reg.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 2d17112aabc8..7ce837e3d9e3 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -5341,8 +5341,8 @@ enum { /* Skylake+ pipe bottom (background) color */ #define _SKL_BOTTOM_COLOR_A 0x70034 -#define SKL_BOTTOM_COLOR_GAMMA_ENABLE (1 << 31) -#define SKL_BOTTOM_COLOR_CSC_ENABLE (1 << 30) +#define SKL_BOTTOM_COLOR_GAMMA_ENABLE REG_BIT(31) +#define SKL_BOTTOM_COLOR_CSC_ENABLE REG_BIT(30) #define SKL_BOTTOM_COLOR(pipe) _MMIO_PIPE2(pipe, _SKL_BOTTOM_COLOR_A) #define _ICL_PIPE_A_STATUS 0x70058 -- cgit From 6a6d914de30f15472b2dc36e8ac6bf016cfbaed5 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 12 Nov 2021 21:38:08 +0200 Subject: drm/i915: Clean up PIPECONF bit defines MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use REG_BIT() & co. for PIPECONF bits, and adjust the naming of various bits to be more consistent. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20211112193813.8224-5-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/display/icl_dsi.c | 4 +- drivers/gpu/drm/i915/display/intel_display.c | 60 ++++++------- drivers/gpu/drm/i915/display/intel_pch_display.c | 7 +- drivers/gpu/drm/i915/gvt/display.c | 4 +- drivers/gpu/drm/i915/gvt/handlers.c | 4 +- drivers/gpu/drm/i915/i915_reg.h | 108 +++++++++++------------ 6 files changed, 89 insertions(+), 98 deletions(-) diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index 16a611f7d659..2d5bb9195b20 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -1051,7 +1051,7 @@ static void gen11_dsi_enable_transcoder(struct intel_encoder *encoder) /* wait for transcoder to be enabled */ if (intel_de_wait_for_set(dev_priv, PIPECONF(dsi_trans), - I965_PIPECONF_ACTIVE, 10)) + PIPECONF_STATE_ENABLE, 10)) drm_err(&dev_priv->drm, "DSI transcoder not enabled\n"); } @@ -1319,7 +1319,7 @@ static void gen11_dsi_disable_transcoder(struct intel_encoder *encoder) /* wait for transcoder to be disabled */ if (intel_de_wait_for_clear(dev_priv, PIPECONF(dsi_trans), - I965_PIPECONF_ACTIVE, 50)) + PIPECONF_STATE_ENABLE, 50)) drm_err(&dev_priv->drm, "DSI trancoder not disabled\n"); } diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 5d4f189cb084..e485dd048bce 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -391,13 +391,11 @@ intel_wait_for_pipe_off(const struct intel_crtc_state *old_crtc_state) if (DISPLAY_VER(dev_priv) >= 4) { enum transcoder cpu_transcoder = old_crtc_state->cpu_transcoder; - i915_reg_t reg = PIPECONF(cpu_transcoder); /* Wait for the Pipe State to go off */ - if (intel_de_wait_for_clear(dev_priv, reg, - I965_PIPECONF_ACTIVE, 100)) - drm_WARN(&dev_priv->drm, 1, - "pipe_off wait timed out\n"); + if (intel_de_wait_for_clear(dev_priv, PIPECONF(cpu_transcoder), + PIPECONF_STATE_ENABLE, 100)) + drm_WARN(&dev_priv->drm, 1, "pipe_off wait timed out\n"); } else { intel_wait_for_pipe_scanline_stopped(crtc); } @@ -3378,13 +3376,13 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) switch (crtc_state->pipe_bpp) { case 18: - pipeconf |= PIPECONF_6BPC; + pipeconf |= PIPECONF_BPC_6; break; case 24: - pipeconf |= PIPECONF_8BPC; + pipeconf |= PIPECONF_BPC_8; break; case 30: - pipeconf |= PIPECONF_10BPC; + pipeconf |= PIPECONF_BPC_10; break; default: /* Case prevented by intel_choose_pipe_bpp_dither. */ @@ -3399,7 +3397,7 @@ static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) else pipeconf |= PIPECONF_INTERLACE_W_SYNC_SHIFT; } else { - pipeconf |= PIPECONF_PROGRESSIVE; + pipeconf |= PIPECONF_INTERLACE_PROGRESSIVE; } if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) && @@ -3577,16 +3575,17 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, if (IS_G4X(dev_priv) || IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) { switch (tmp & PIPECONF_BPC_MASK) { - case PIPECONF_6BPC: + case PIPECONF_BPC_6: pipe_config->pipe_bpp = 18; break; - case PIPECONF_8BPC: + case PIPECONF_BPC_8: pipe_config->pipe_bpp = 24; break; - case PIPECONF_10BPC: + case PIPECONF_BPC_10: pipe_config->pipe_bpp = 30; break; default: + MISSING_CASE(tmp); break; } } @@ -3595,8 +3594,7 @@ static bool i9xx_get_pipe_config(struct intel_crtc *crtc, (tmp & PIPECONF_COLOR_RANGE_SELECT)) pipe_config->limited_color_range = true; - pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_I9XX) >> - PIPECONF_GAMMA_MODE_SHIFT; + pipe_config->gamma_mode = REG_FIELD_GET(PIPECONF_GAMMA_MODE_MASK_I9XX, tmp); if (IS_CHERRYVIEW(dev_priv)) pipe_config->cgm_mode = intel_de_read(dev_priv, @@ -3683,16 +3681,16 @@ static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state) switch (crtc_state->pipe_bpp) { case 18: - val |= PIPECONF_6BPC; + val |= PIPECONF_BPC_6; break; case 24: - val |= PIPECONF_8BPC; + val |= PIPECONF_BPC_8; break; case 30: - val |= PIPECONF_10BPC; + val |= PIPECONF_BPC_10; break; case 36: - val |= PIPECONF_12BPC; + val |= PIPECONF_BPC_12; break; default: /* Case prevented by intel_choose_pipe_bpp_dither. */ @@ -3700,12 +3698,12 @@ static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state) } if (crtc_state->dither) - val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); + val |= PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP; if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) - val |= PIPECONF_INTERLACED_ILK; + val |= PIPECONF_INTERLACE_IF_ID_ILK; else - val |= PIPECONF_PROGRESSIVE; + val |= PIPECONF_INTERLACE_PF_PD_ILK; /* * This would end up with an odd purple hue over @@ -3737,12 +3735,12 @@ static void hsw_set_transconf(const struct intel_crtc_state *crtc_state) u32 val = 0; if (IS_HASWELL(dev_priv) && crtc_state->dither) - val |= (PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP); + val |= PIPECONF_DITHER_EN | PIPECONF_DITHER_TYPE_SP; if (crtc_state->hw.adjusted_mode.flags & DRM_MODE_FLAG_INTERLACE) - val |= PIPECONF_INTERLACED_ILK; + val |= PIPECONF_INTERLACE_IF_ID_ILK; else - val |= PIPECONF_PROGRESSIVE; + val |= PIPECONF_INTERLACE_PF_PD_ILK; if (IS_HASWELL(dev_priv) && crtc_state->output_format != INTEL_OUTPUT_FORMAT_RGB) @@ -4036,16 +4034,16 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc, goto out; switch (tmp & PIPECONF_BPC_MASK) { - case PIPECONF_6BPC: + case PIPECONF_BPC_6: pipe_config->pipe_bpp = 18; break; - case PIPECONF_8BPC: + case PIPECONF_BPC_8: pipe_config->pipe_bpp = 24; break; - case PIPECONF_10BPC: + case PIPECONF_BPC_10: pipe_config->pipe_bpp = 30; break; - case PIPECONF_12BPC: + case PIPECONF_BPC_12: pipe_config->pipe_bpp = 36; break; default: @@ -4065,8 +4063,7 @@ static bool ilk_get_pipe_config(struct intel_crtc *crtc, break; } - pipe_config->gamma_mode = (tmp & PIPECONF_GAMMA_MODE_MASK_ILK) >> - PIPECONF_GAMMA_MODE_SHIFT; + pipe_config->gamma_mode = REG_FIELD_GET(PIPECONF_GAMMA_MODE_MASK_ILK, tmp); pipe_config->csc_mode = intel_de_read(dev_priv, PIPE_CSC_MODE(crtc->pipe)); @@ -10008,8 +10005,7 @@ void i830_enable_pipe(struct drm_i915_private *dev_priv, enum pipe pipe) udelay(150); /* wait for warmup */ } - intel_de_write(dev_priv, PIPECONF(pipe), - PIPECONF_ENABLE | PIPECONF_PROGRESSIVE); + intel_de_write(dev_priv, PIPECONF(pipe), PIPECONF_ENABLE); intel_de_posting_read(dev_priv, PIPECONF(pipe)); intel_wait_for_pipe_scanline_moving(crtc); diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c index 0c528c612cb2..657e904061d7 100644 --- a/drivers/gpu/drm/i915/display/intel_pch_display.c +++ b/drivers/gpu/drm/i915/display/intel_pch_display.c @@ -157,13 +157,13 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) */ val &= ~PIPECONF_BPC_MASK; if (intel_crtc_has_type(crtc_state, INTEL_OUTPUT_HDMI)) - val |= PIPECONF_8BPC; + val |= PIPECONF_BPC_8; else val |= pipeconf_val & PIPECONF_BPC_MASK; } val &= ~TRANS_INTERLACE_MASK; - if ((pipeconf_val & PIPECONF_INTERLACE_MASK) == PIPECONF_INTERLACED_ILK) { + if ((pipeconf_val & PIPECONF_INTERLACE_MASK_ILK) == PIPECONF_INTERLACE_IF_ID_ILK) { if (HAS_PCH_IBX(dev_priv) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) val |= TRANS_LEGACY_INTERLACED_ILK; @@ -436,8 +436,7 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, val = TRANS_ENABLE; pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder)); - if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == - PIPECONF_INTERLACED_ILK) + if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == PIPECONF_INTERLACE_IF_ID_ILK) val |= TRANS_INTERLACED; else val |= TRANS_PROGRESSIVE; diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index 4d66fb5fb29f..7198d02edc74 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -184,7 +184,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) for_each_pipe(dev_priv, pipe) { vgpu_vreg_t(vgpu, PIPECONF(pipe)) &= - ~(PIPECONF_ENABLE | I965_PIPECONF_ACTIVE); + ~(PIPECONF_ENABLE | PIPECONF_STATE_ENABLE); vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISP_ENABLE; vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE; vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~MCURSOR_MODE_MASK; @@ -245,7 +245,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) * setup_virtual_dp_monitor. */ vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE; - vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= I965_PIPECONF_ACTIVE; + vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_STATE_ENABLE; /* * Golden M/N are calculated based on: diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c index 9f8ae6776e98..a1f9ab4a4e63 100644 --- a/drivers/gpu/drm/i915/gvt/handlers.c +++ b/drivers/gpu/drm/i915/gvt/handlers.c @@ -702,11 +702,11 @@ static int pipeconf_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, data = vgpu_vreg(vgpu, offset); if (data & PIPECONF_ENABLE) { - vgpu_vreg(vgpu, offset) |= I965_PIPECONF_ACTIVE; + vgpu_vreg(vgpu, offset) |= PIPECONF_STATE_ENABLE; vgpu_update_refresh_rate(vgpu); vgpu_update_vblank_emulation(vgpu, true); } else { - vgpu_vreg(vgpu, offset) &= ~I965_PIPECONF_ACTIVE; + vgpu_vreg(vgpu, offset) &= ~PIPECONF_STATE_ENABLE; vgpu_update_vblank_emulation(vgpu, false); } return 0; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 7ce837e3d9e3..c1b9c45d96ab 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -5168,62 +5168,58 @@ enum { #define PIPEDSL_CURR_FIELD REG_BIT(31) /* ctg+ */ #define PIPEDSL_LINE_MASK REG_GENMASK(19, 0) #define _PIPEACONF 0x70008 -#define PIPECONF_ENABLE (1 << 31) -#define PIPECONF_DISABLE 0 -#define PIPECONF_DOUBLE_WIDE (1 << 30) -#define I965_PIPECONF_ACTIVE (1 << 30) -#define PIPECONF_DSI_PLL_LOCKED (1 << 29) /* vlv & pipe A only */ -#define PIPECONF_FRAME_START_DELAY_MASK (3 << 27) /* pre-hsw */ -#define PIPECONF_FRAME_START_DELAY(x) ((x) << 27) /* pre-hsw: 0-3 */ -#define PIPECONF_SINGLE_WIDE 0 -#define PIPECONF_PIPE_UNLOCKED 0 -#define PIPECONF_PIPE_LOCKED (1 << 25) -#define PIPECONF_FORCE_BORDER (1 << 25) -#define PIPECONF_GAMMA_MODE_MASK_I9XX (1 << 24) /* gmch */ -#define PIPECONF_GAMMA_MODE_MASK_ILK (3 << 24) /* ilk-ivb */ -#define PIPECONF_GAMMA_MODE_8BIT (0 << 24) /* gmch,ilk-ivb */ -#define PIPECONF_GAMMA_MODE_10BIT (1 << 24) /* gmch,ilk-ivb */ -#define PIPECONF_GAMMA_MODE_12BIT (2 << 24) /* ilk-ivb */ -#define PIPECONF_GAMMA_MODE_SPLIT (3 << 24) /* ivb */ -#define PIPECONF_GAMMA_MODE(x) ((x) << 24) /* pass in GAMMA_MODE_MODE_* */ -#define PIPECONF_GAMMA_MODE_SHIFT 24 -#define PIPECONF_INTERLACE_MASK (7 << 21) -#define PIPECONF_INTERLACE_MASK_HSW (3 << 21) -/* Note that pre-gen3 does not support interlaced display directly. Panel - * fitting must be disabled on pre-ilk for interlaced. */ -#define PIPECONF_PROGRESSIVE (0 << 21) -#define PIPECONF_INTERLACE_W_SYNC_SHIFT_PANEL (4 << 21) /* gen4 only */ -#define PIPECONF_INTERLACE_W_SYNC_SHIFT (5 << 21) /* gen4 only */ -#define PIPECONF_INTERLACE_W_FIELD_INDICATION (6 << 21) -#define PIPECONF_INTERLACE_FIELD_0_ONLY (7 << 21) /* gen3 only */ -/* Ironlake and later have a complete new set of values for interlaced. PFIT - * means panel fitter required, PF means progressive fetch, DBL means power - * saving pixel doubling. */ -#define PIPECONF_PFIT_PF_INTERLACED_ILK (1 << 21) -#define PIPECONF_INTERLACED_ILK (3 << 21) -#define PIPECONF_INTERLACED_DBL_ILK (4 << 21) /* ilk/snb only */ -#define PIPECONF_PFIT_PF_INTERLACED_DBL_ILK (5 << 21) /* ilk/snb only */ -#define PIPECONF_INTERLACE_MODE_MASK (7 << 21) -#define PIPECONF_EDP_RR_MODE_SWITCH (1 << 20) -#define PIPECONF_CXSR_DOWNCLOCK (1 << 16) -#define PIPECONF_EDP_RR_MODE_SWITCH_VLV (1 << 14) -#define PIPECONF_COLOR_RANGE_SELECT (1 << 13) -#define PIPECONF_OUTPUT_COLORSPACE_MASK (3 << 11) /* ilk-ivb */ -#define PIPECONF_OUTPUT_COLORSPACE_RGB (0 << 11) /* ilk-ivb */ -#define PIPECONF_OUTPUT_COLORSPACE_YUV601 (1 << 11) /* ilk-ivb */ -#define PIPECONF_OUTPUT_COLORSPACE_YUV709 (2 << 11) /* ilk-ivb */ -#define PIPECONF_OUTPUT_COLORSPACE_YUV_HSW (1 << 11) /* hsw only */ -#define PIPECONF_BPC_MASK (0x7 << 5) -#define PIPECONF_8BPC (0 << 5) -#define PIPECONF_10BPC (1 << 5) -#define PIPECONF_6BPC (2 << 5) -#define PIPECONF_12BPC (3 << 5) -#define PIPECONF_DITHER_EN (1 << 4) -#define PIPECONF_DITHER_TYPE_MASK (0x0000000c) -#define PIPECONF_DITHER_TYPE_SP (0 << 2) -#define PIPECONF_DITHER_TYPE_ST1 (1 << 2) -#define PIPECONF_DITHER_TYPE_ST2 (2 << 2) -#define PIPECONF_DITHER_TYPE_TEMP (3 << 2) +#define PIPECONF_ENABLE REG_BIT(31) +#define PIPECONF_DOUBLE_WIDE REG_BIT(30) /* pre-i965 */ +#define PIPECONF_STATE_ENABLE REG_BIT(30) /* i965+ */ +#define PIPECONF_DSI_PLL_LOCKED REG_BIT(29) /* vlv & pipe A only */ +#define PIPECONF_FRAME_START_DELAY_MASK REG_GENMASK(28, 27) /* pre-hsw */ +#define PIPECONF_FRAME_START_DELAY(x) REG_FIELD_PREP(PIPECONF_FRAME_START_DELAY_MASK, (x)) /* pre-hsw: 0-3 */ +#define PIPECONF_PIPE_LOCKED REG_BIT(25) +#define PIPECONF_FORCE_BORDER REG_BIT(25) +#define PIPECONF_GAMMA_MODE_MASK_I9XX REG_BIT(24) /* gmch */ +#define PIPECONF_GAMMA_MODE_MASK_ILK REG_GENMASK(25, 24) /* ilk-ivb */ +#define PIPECONF_GAMMA_MODE_8BIT REG_FIELD_PREP(PIPECONF_GAMMA_MODE_MASK, 0) +#define PIPECONF_GAMMA_MODE_10BIT REG_FIELD_PREP(PIPECONF_GAMMA_MODE_MASK, 1) +#define PIPECONF_GAMMA_MODE_12BIT REG_FIELD_PREP(PIPECONF_GAMMA_MODE_MASK_ILK, 2) /* ilk-ivb */ +#define PIPECONF_GAMMA_MODE_SPLIT REG_FIELD_PREP(PIPECONF_GAMMA_MODE_MASK_ILK, 3) /* ivb */ +#define PIPECONF_GAMMA_MODE(x) REG_FIELD_PREP(PIPECONF_GAMMA_MODE_MASK_ILK, (x)) /* pass in GAMMA_MODE_MODE_* */ +#define PIPECONF_INTERLACE_MASK REG_GENMASK(23, 21) /* gen3+ */ +#define PIPECONF_INTERLACE_PROGRESSIVE REG_FIELD_PREP(PIPECONF_INTERLACE_MASK, 0) +#define PIPECONF_INTERLACE_W_SYNC_SHIFT_PANEL REG_FIELD_PREP(PIPECONF_INTERLACE_MASK, 4) /* gen4 only */ +#define PIPECONF_INTERLACE_W_SYNC_SHIFT REG_FIELD_PREP(PIPECONF_INTERLACE_MASK, 5) /* gen4 only */ +#define PIPECONF_INTERLACE_W_FIELD_INDICATION REG_FIELD_PREP(PIPECONF_INTERLACE_MASK, 6) +#define PIPECONF_INTERLACE_FIELD_0_ONLY REG_FIELD_PREP(PIPECONF_INTERLACE_MASK, 7) /* gen3 only */ +/* + * ilk+: PF/D=progressive fetch/display, IF/D=interlaced fetch/display, + * DBL=power saving pixel doubling, PF-ID* requires panel fitter + */ +#define PIPECONF_INTERLACE_MASK_ILK REG_GENMASK(23, 21) /* ilk+ */ +#define PIPECONF_INTERLACE_MASK_HSW REG_GENMASK(22, 21) /* hsw+ */ +#define PIPECONF_INTERLACE_PF_PD_ILK REG_FIELD_PREP(PIPECONF_INTERLACE_MASK_ILK, 0) +#define PIPECONF_INTERLACE_PF_ID_ILK REG_FIELD_PREP(PIPECONF_INTERLACE_MASK_ILK, 1) +#define PIPECONF_INTERLACE_IF_ID_ILK REG_FIELD_PREP(PIPECONF_INTERLACE_MASK_ILK, 3) +#define PIPECONF_INTERLACE_IF_ID_DBL_ILK REG_FIELD_PREP(PIPECONF_INTERLACE_MASK_ILK, 4) /* ilk/snb only */ +#define PIPECONF_INTERLACE_PF_ID_DBL_ILK REG_FIELD_PREP(PIPECONF_INTERLACE_MASK_ILK, 5) /* ilk/snb only */ +#define PIPECONF_EDP_RR_MODE_SWITCH REG_BIT(20) +#define PIPECONF_CXSR_DOWNCLOCK REG_BIT(16) +#define PIPECONF_EDP_RR_MODE_SWITCH_VLV REG_BIT(14) +#define PIPECONF_COLOR_RANGE_SELECT REG_BIT(13) +#define PIPECONF_OUTPUT_COLORSPACE_MASK REG_GENMASK(12, 11) /* ilk-ivb */ +#define PIPECONF_OUTPUT_COLORSPACE_RGB REG_FIELD_PREP(PIPECONF_OUTPUT_COLORSPACE_MASK, 0) /* ilk-ivb */ +#define PIPECONF_OUTPUT_COLORSPACE_YUV601 REG_FIELD_PREP(PIPECONF_OUTPUT_COLORSPACE_MASK, 1) /* ilk-ivb */ +#define PIPECONF_OUTPUT_COLORSPACE_YUV709 REG_FIELD_PREP(PIPECONF_OUTPUT_COLORSPACE_MASK, 2) /* ilk-ivb */ +#define PIPECONF_OUTPUT_COLORSPACE_YUV_HSW REG_BIT(11) /* hsw only */ +#define PIPECONF_BPC_MASK REG_GENMASK(7, 5) /* ctg-ivb */ +#define PIPECONF_BPC_8 REG_FIELD_PREP(PIPECONF_BPC_MASK, 0) +#define PIPECONF_BPC_10 REG_FIELD_PREP(PIPECONF_BPC_MASK, 1) +#define PIPECONF_BPC_6 REG_FIELD_PREP(PIPECONF_BPC_MASK, 2) +#define PIPECONF_BPC_12 REG_FIELD_PREP(PIPECONF_BPC_MASK, 3) +#define PIPECONF_DITHER_EN REG_BIT(4) +#define PIPECONF_DITHER_TYPE_MASK REG_GENMASK(3, 2) +#define PIPECONF_DITHER_TYPE_SP REG_FIELD_PREP(PIPECONF_DITHER_TYPE_MASK, 0) +#define PIPECONF_DITHER_TYPE_ST1 REG_FIELD_PREP(PIPECONF_DITHER_TYPE_MASK, 1) +#define PIPECONF_DITHER_TYPE_ST2 REG_FIELD_PREP(PIPECONF_DITHER_TYPE_MASK, 2) +#define PIPECONF_DITHER_TYPE_TEMP REG_FIELD_PREP(PIPECONF_DITHER_TYPE_MASK, 3) #define _PIPEASTAT 0x70024 #define PIPE_FIFO_UNDERRUN_STATUS (1UL << 31) #define SPRITE1_FLIP_DONE_INT_EN_VLV (1UL << 30) -- cgit From e93a590c79faa4aaa4d7eadacdef9240e1e823a1 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 12 Nov 2021 21:38:09 +0200 Subject: drm/i915: Clean up PCH_TRANSCONF/TRANS_DP_CTL bit defines MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use REG_BIT & co. for PCH_TRANSCONF/TRANS_DP_CTL bits, and adjust the naming a some bits to be more consistent. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20211112193813.8224-6-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_pch_display.c | 13 +++--- drivers/gpu/drm/i915/i915_reg.h | 58 +++++++++++------------- 2 files changed, 33 insertions(+), 38 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c index 657e904061d7..7ef2d40997b2 100644 --- a/drivers/gpu/drm/i915/display/intel_pch_display.c +++ b/drivers/gpu/drm/i915/display/intel_pch_display.c @@ -166,11 +166,11 @@ static void ilk_enable_pch_transcoder(const struct intel_crtc_state *crtc_state) if ((pipeconf_val & PIPECONF_INTERLACE_MASK_ILK) == PIPECONF_INTERLACE_IF_ID_ILK) { if (HAS_PCH_IBX(dev_priv) && intel_crtc_has_type(crtc_state, INTEL_OUTPUT_SDVO)) - val |= TRANS_LEGACY_INTERLACED_ILK; + val |= TRANS_INTERLACE_LEGACY_VSYNC_IBX; else - val |= TRANS_INTERLACED; + val |= TRANS_INTERLACE_INTERLACED; } else { - val |= TRANS_PROGRESSIVE; + val |= TRANS_INTERLACE_PROGRESSIVE; } intel_de_write(dev_priv, reg, val | TRANS_ENABLE); @@ -293,7 +293,8 @@ void ilk_pch_enable(struct intel_atomic_state *state, temp = intel_de_read(dev_priv, reg); temp &= ~(TRANS_DP_PORT_SEL_MASK | - TRANS_DP_SYNC_MASK | + TRANS_DP_VSYNC_ACTIVE_HIGH | + TRANS_DP_HSYNC_ACTIVE_HIGH | TRANS_DP_BPC_MASK); temp |= TRANS_DP_OUTPUT_ENABLE; temp |= bpc << 9; /* same format but at 11:9 */ @@ -437,9 +438,9 @@ static void lpt_enable_pch_transcoder(struct drm_i915_private *dev_priv, pipeconf_val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder)); if ((pipeconf_val & PIPECONF_INTERLACE_MASK_HSW) == PIPECONF_INTERLACE_IF_ID_ILK) - val |= TRANS_INTERLACED; + val |= TRANS_INTERLACE_INTERLACED; else - val |= TRANS_PROGRESSIVE; + val |= TRANS_INTERLACE_PROGRESSIVE; intel_de_write(dev_priv, LPT_TRANSCONF, val); if (intel_de_wait_for_set(dev_priv, LPT_TRANSCONF, diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index c1b9c45d96ab..8494fd466ca3 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -8081,22 +8081,19 @@ enum { #define _PCH_TRANSBCONF 0xf1008 #define PCH_TRANSCONF(pipe) _MMIO_PIPE(pipe, _PCH_TRANSACONF, _PCH_TRANSBCONF) #define LPT_TRANSCONF PCH_TRANSCONF(PIPE_A) /* lpt has only one transcoder */ -#define TRANS_DISABLE (0 << 31) -#define TRANS_ENABLE (1 << 31) -#define TRANS_STATE_MASK (1 << 30) -#define TRANS_STATE_DISABLE (0 << 30) -#define TRANS_STATE_ENABLE (1 << 30) -#define TRANS_FRAME_START_DELAY_MASK (3 << 27) /* ibx */ -#define TRANS_FRAME_START_DELAY(x) ((x) << 27) /* ibx: 0-3 */ -#define TRANS_INTERLACE_MASK (7 << 21) -#define TRANS_PROGRESSIVE (0 << 21) -#define TRANS_INTERLACED (3 << 21) -#define TRANS_LEGACY_INTERLACED_ILK (2 << 21) -#define TRANS_8BPC (0 << 5) -#define TRANS_10BPC (1 << 5) -#define TRANS_6BPC (2 << 5) -#define TRANS_12BPC (3 << 5) - +#define TRANS_ENABLE REG_BIT(31) +#define TRANS_STATE_ENABLE REG_BIT(30) +#define TRANS_FRAME_START_DELAY_MASK REG_GENMASK(28, 27) /* ibx */ +#define TRANS_FRAME_START_DELAY(x) REG_FIELD_PREP(TRANS_FRAME_START_DELAY_MASK, (x)) /* ibx: 0-3 */ +#define TRANS_INTERLACE_MASK REG_GENMASK(23, 21) +#define TRANS_INTERLACE_PROGRESSIVE REG_FIELD_PREP(TRANS_INTERLACE_MASK, 0) +#define TRANS_INTERLACE_LEGACY_VSYNC_IBX REG_FIELD_PREP(TRANS_INTERLACE_MASK, 2) /* ibx */ +#define TRANS_INTERLACE_INTERLACED REG_FIELD_PREP(TRANS_INTERLACE_MASK, 3) +#define TRANS_BPC_MASK REG_GENMASK(7, 5) /* ibx */ +#define TRANS_BPC_8 REG_FIELD_PREP(TRANS_BPC_MASK, 0) +#define TRANS_BPC_10 REG_FIELD_PREP(TRANS_BPC_MASK, 1) +#define TRANS_BPC_6 REG_FIELD_PREP(TRANS_BPC_MASK, 2) +#define TRANS_BPC_12 REG_FIELD_PREP(TRANS_BPC_MASK, 3) #define _TRANSA_CHICKEN1 0xf0060 #define _TRANSB_CHICKEN1 0xf1060 #define TRANS_CHICKEN1(pipe) _MMIO_PIPE(pipe, _TRANSA_CHICKEN1, _TRANSB_CHICKEN1) @@ -8306,22 +8303,19 @@ enum { #define _TRANS_DP_CTL_B 0xe1300 #define _TRANS_DP_CTL_C 0xe2300 #define TRANS_DP_CTL(pipe) _MMIO_PIPE(pipe, _TRANS_DP_CTL_A, _TRANS_DP_CTL_B) -#define TRANS_DP_OUTPUT_ENABLE (1 << 31) -#define TRANS_DP_PORT_SEL_MASK (3 << 29) -#define TRANS_DP_PORT_SEL_NONE (3 << 29) -#define TRANS_DP_PORT_SEL(port) (((port) - PORT_B) << 29) -#define TRANS_DP_AUDIO_ONLY (1 << 26) -#define TRANS_DP_ENH_FRAMING (1 << 18) -#define TRANS_DP_8BPC (0 << 9) -#define TRANS_DP_10BPC (1 << 9) -#define TRANS_DP_6BPC (2 << 9) -#define TRANS_DP_12BPC (3 << 9) -#define TRANS_DP_BPC_MASK (3 << 9) -#define TRANS_DP_VSYNC_ACTIVE_HIGH (1 << 4) -#define TRANS_DP_VSYNC_ACTIVE_LOW 0 -#define TRANS_DP_HSYNC_ACTIVE_HIGH (1 << 3) -#define TRANS_DP_HSYNC_ACTIVE_LOW 0 -#define TRANS_DP_SYNC_MASK (3 << 3) +#define TRANS_DP_OUTPUT_ENABLE REG_BIT(31) +#define TRANS_DP_PORT_SEL_MASK REG_GENMASK(30, 29) +#define TRANS_DP_PORT_SEL_NONE REG_FIELD_PREP(TRANS_DP_PORT_SEL_MASK, 3) +#define TRANS_DP_PORT_SEL(port) REG_FIELD_PREP(TRANS_DP_PORT_SEL_MASK, (port) - PORT_B) +#define TRANS_DP_AUDIO_ONLY REG_BIT(26) +#define TRANS_DP_ENH_FRAMING REG_BIT(18) +#define TRANS_DP_BPC_MASK REG_GENMASK(10, 9) +#define TRANS_DP_BPC_8 REG_FIELD_PREP(TRANS_DP_BPC_MASK, 0) +#define TRANS_DP_BPC_10 REG_FIELD_PREP(TRANS_DP_BPC_MASK, 1) +#define TRANS_DP_BPC_6 REG_FIELD_PREP(TRANS_DP_BPC_MASK, 2) +#define TRANS_DP_BPC_12 REG_FIELD_PREP(TRANS_DP_BPC_MASK, 3) +#define TRANS_DP_VSYNC_ACTIVE_HIGH REG_BIT(4) +#define TRANS_DP_HSYNC_ACTIVE_HIGH REG_BIT(3) #define _TRANS_DP2_CTL_A 0x600a0 #define _TRANS_DP2_CTL_B 0x610a0 -- cgit From 62236df23d018fc977d2871744440efe6a08a6cc Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 12 Nov 2021 21:38:10 +0200 Subject: drm/i915: Clean up PIPESRC defines MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use REG_GENMASK() & co. when dealing with PIPESRC. Note that i9xx_get_initial_plane_config() will now use the full 16 bit mask whereas previously it used 12 bits only. But intel_get_pipe_src_size() already used the full 16 bits on all platforms anyway, so at least we're consistent now. The high bits beyond the max supported pipe source size should not be set in any case so this seems fine. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20211112193813.8224-7-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/display/i9xx_plane.c | 4 ++-- drivers/gpu/drm/i915/display/intel_display.c | 7 ++++--- drivers/gpu/drm/i915/i915_reg.h | 4 ++++ 3 files changed, 10 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c index 54f8776ca6b3..a87b65cd41fd 100644 --- a/drivers/gpu/drm/i915/display/i9xx_plane.c +++ b/drivers/gpu/drm/i915/display/i9xx_plane.c @@ -1054,8 +1054,8 @@ i9xx_get_initial_plane_config(struct intel_crtc *crtc, plane_config->base = base; val = intel_de_read(dev_priv, PIPESRC(pipe)); - fb->width = ((val >> 16) & 0xfff) + 1; - fb->height = ((val >> 0) & 0xfff) + 1; + fb->width = REG_FIELD_GET(PIPESRC_WIDTH_MASK, val) + 1; + fb->height = REG_FIELD_GET(PIPESRC_HEIGHT_MASK, val) + 1; val = intel_de_read(dev_priv, DSPSTRIDE(i9xx_plane)); fb->pitches[0] = val & 0xffffffc0; diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index e485dd048bce..2f2113b930be 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -3276,7 +3276,8 @@ static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state) * always be the user's requested size. */ intel_de_write(dev_priv, PIPESRC(pipe), - ((crtc_state->pipe_src_w - 1) << 16) | (crtc_state->pipe_src_h - 1)); + PIPESRC_WIDTH(crtc_state->pipe_src_w - 1) | + PIPESRC_HEIGHT(crtc_state->pipe_src_h - 1)); } static bool intel_pipe_is_interlaced(const struct intel_crtc_state *crtc_state) @@ -3347,8 +3348,8 @@ static void intel_get_pipe_src_size(struct intel_crtc *crtc, u32 tmp; tmp = intel_de_read(dev_priv, PIPESRC(crtc->pipe)); - pipe_config->pipe_src_h = (tmp & 0xffff) + 1; - pipe_config->pipe_src_w = ((tmp >> 16) & 0xffff) + 1; + pipe_config->pipe_src_w = REG_FIELD_GET(PIPESRC_WIDTH_MASK, tmp) + 1; + pipe_config->pipe_src_h = REG_FIELD_GET(PIPESRC_HEIGHT_MASK, tmp) + 1; } static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state) diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 8494fd466ca3..d0286cb55d83 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -3476,6 +3476,10 @@ enum { #define _VSYNC_A 0x60014 #define _EXITLINE_A 0x60018 #define _PIPEASRC 0x6001c +#define PIPESRC_WIDTH_MASK REG_GENMASK(31, 16) +#define PIPESRC_WIDTH(w) REG_FIELD_PREP(PIPESRC_WIDTH_MASK, (w)) +#define PIPESRC_HEIGHT_MASK REG_GENMASK(15, 0) +#define PIPESRC_HEIGHT(h) REG_FIELD_PREP(PIPESRC_HEIGHT_MASK, (h)) #define _BCLRPAT_A 0x60020 #define _VSYNCSHIFT_A 0x60028 #define _PIPE_MULT_A 0x6002c -- cgit From 7355bfe0e0cc27597d530f78e259a985cb85af40 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Sun, 23 Jan 2022 13:57:17 +0100 Subject: netfilter: Remove flowtable relics NF_FLOW_TABLE_IPV4 and NF_FLOW_TABLE_IPV6 are invisble, selected by nothing (so they can no longer be enabled), and their last real users have been removed (nf_flow_table_ipv6.c is empty). Clean up the leftovers. Fixes: c42ba4290b2147aa ("netfilter: flowtable: remove ipv4/ipv6 modules") Signed-off-by: Geert Uytterhoeven Signed-off-by: Pablo Neira Ayuso --- net/ipv4/netfilter/Kconfig | 4 ---- net/ipv6/netfilter/Kconfig | 4 ---- net/ipv6/netfilter/Makefile | 3 --- net/ipv6/netfilter/nf_flow_table_ipv6.c | 0 4 files changed, 11 deletions(-) delete mode 100644 net/ipv6/netfilter/nf_flow_table_ipv6.c diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig index 67087f95579f..aab384126f61 100644 --- a/net/ipv4/netfilter/Kconfig +++ b/net/ipv4/netfilter/Kconfig @@ -58,10 +58,6 @@ config NF_TABLES_ARP endif # NF_TABLES -config NF_FLOW_TABLE_IPV4 - tristate - select NF_FLOW_TABLE_INET - config NF_DUP_IPV4 tristate "Netfilter IPv4 packet duplication to alternate destination" depends on !NF_CONNTRACK || NF_CONNTRACK diff --git a/net/ipv6/netfilter/Kconfig b/net/ipv6/netfilter/Kconfig index 97d3d1b36dbc..0ba62f4868f9 100644 --- a/net/ipv6/netfilter/Kconfig +++ b/net/ipv6/netfilter/Kconfig @@ -47,10 +47,6 @@ config NFT_FIB_IPV6 endif # NF_TABLES_IPV6 endif # NF_TABLES -config NF_FLOW_TABLE_IPV6 - tristate - select NF_FLOW_TABLE_INET - config NF_DUP_IPV6 tristate "Netfilter IPv6 packet duplication to alternate destination" depends on !NF_CONNTRACK || NF_CONNTRACK diff --git a/net/ipv6/netfilter/Makefile b/net/ipv6/netfilter/Makefile index b85383606df7..b8d6dc9aeeb6 100644 --- a/net/ipv6/netfilter/Makefile +++ b/net/ipv6/netfilter/Makefile @@ -28,9 +28,6 @@ obj-$(CONFIG_NFT_REJECT_IPV6) += nft_reject_ipv6.o obj-$(CONFIG_NFT_DUP_IPV6) += nft_dup_ipv6.o obj-$(CONFIG_NFT_FIB_IPV6) += nft_fib_ipv6.o -# flow table support -obj-$(CONFIG_NF_FLOW_TABLE_IPV6) += nf_flow_table_ipv6.o - # matches obj-$(CONFIG_IP6_NF_MATCH_AH) += ip6t_ah.o obj-$(CONFIG_IP6_NF_MATCH_EUI64) += ip6t_eui64.o diff --git a/net/ipv6/netfilter/nf_flow_table_ipv6.c b/net/ipv6/netfilter/nf_flow_table_ipv6.c deleted file mode 100644 index e69de29bb2d1..000000000000 -- cgit From 34243b9ec856309339172b1507379074156947e8 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Sun, 23 Jan 2022 15:24:00 +0100 Subject: netfilter: nft_ct: fix use after free when attaching zone template The conversion erroneously removed the refcount increment. In case we can use the percpu template, we need to increment the refcount, else it will be released when the skb gets freed. In case the slowpath is taken, the new template already has a refcount of 1. Fixes: 719774377622 ("netfilter: conntrack: convert to refcount_t api") Reported-by: kernel test robot Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nft_ct.c | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/net/netfilter/nft_ct.c b/net/netfilter/nft_ct.c index 518d96c8c247..5adf8bb628a8 100644 --- a/net/netfilter/nft_ct.c +++ b/net/netfilter/nft_ct.c @@ -260,9 +260,12 @@ static void nft_ct_set_zone_eval(const struct nft_expr *expr, ct = this_cpu_read(nft_ct_pcpu_template); if (likely(refcount_read(&ct->ct_general.use) == 1)) { + refcount_inc(&ct->ct_general.use); nf_ct_zone_add(ct, &zone); } else { - /* previous skb got queued to userspace */ + /* previous skb got queued to userspace, allocate temporary + * one until percpu template can be reused. + */ ct = nf_ct_tmpl_alloc(nft_net(pkt), &zone, GFP_ATOMIC); if (!ct) { regs->verdict.code = NF_DROP; -- cgit From c858620d2ae3489409af593f005a48a8a324da3d Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Sun, 23 Jan 2022 15:45:54 +0100 Subject: selftests: netfilter: reduce zone stress test running time This selftests needs almost 3 minutes to complete, reduce the insertes zones to 1000. Test now completes in about 20 seconds. Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- tools/testing/selftests/netfilter/nft_zones_many.sh | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/netfilter/nft_zones_many.sh b/tools/testing/selftests/netfilter/nft_zones_many.sh index 04633119b29a..5a8db0b48928 100755 --- a/tools/testing/selftests/netfilter/nft_zones_many.sh +++ b/tools/testing/selftests/netfilter/nft_zones_many.sh @@ -9,7 +9,7 @@ ns="ns-$sfx" # Kselftest framework requirement - SKIP code is 4. ksft_skip=4 -zones=20000 +zones=2000 have_ct_tool=0 ret=0 @@ -75,10 +75,10 @@ EOF while [ $i -lt $max_zones ]; do local start=$(date +%s%3N) - i=$((i + 10000)) + i=$((i + 1000)) j=$((j + 1)) # nft rule in output places each packet in a different zone. - dd if=/dev/zero of=/dev/stdout bs=8k count=10000 2>/dev/null | ip netns exec "$ns" socat STDIN UDP:127.0.0.1:12345,sourceport=12345 + dd if=/dev/zero of=/dev/stdout bs=8k count=1000 2>/dev/null | ip netns exec "$ns" socat STDIN UDP:127.0.0.1:12345,sourceport=12345 if [ $? -ne 0 ] ;then ret=1 break @@ -86,7 +86,7 @@ EOF stop=$(date +%s%3N) local duration=$((stop-start)) - echo "PASS: added 10000 entries in $duration ms (now $i total, loop $j)" + echo "PASS: added 1000 entries in $duration ms (now $i total, loop $j)" done if [ $have_ct_tool -eq 1 ]; then @@ -128,11 +128,11 @@ test_conntrack_tool() { break fi - if [ $((i%10000)) -eq 0 ];then + if [ $((i%1000)) -eq 0 ];then stop=$(date +%s%3N) local duration=$((stop-start)) - echo "PASS: added 10000 entries in $duration ms (now $i total)" + echo "PASS: added 1000 entries in $duration ms (now $i total)" start=$stop fi done -- cgit From aad51ca71ad83273e8826d6cfdcf53c98748d1fa Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Mon, 24 Jan 2022 22:09:15 +0100 Subject: selftests: netfilter: check stateless nat udp checksum fixup Add a test that sends large udp packet (which is fragmented) via a stateless nft nat rule, i.e. 'ip saddr set 10.2.3.4' and check that the datagram is received by peer. On kernels without commit 4e1860a38637 ("netfilter: nft_payload: do not update layer 4 checksum when mangling fragments")', this will fail with: cmp: EOF on /tmp/tmp.V1q0iXJyQF which is empty -rw------- 1 root root 4096 Jan 24 22:03 /tmp/tmp.Aaqnq4rBKS -rw------- 1 root root 0 Jan 24 22:03 /tmp/tmp.V1q0iXJyQF ERROR: in and output file mismatch when checking udp with stateless nat FAIL: nftables v1.0.0 (Fearless Fosdick #2) On patched kernels, this will show: PASS: IP statless for ns2-PFp89amx Signed-off-by: Florian Westphal Signed-off-by: Pablo Neira Ayuso --- tools/testing/selftests/netfilter/nft_nat.sh | 152 +++++++++++++++++++++++++++ 1 file changed, 152 insertions(+) diff --git a/tools/testing/selftests/netfilter/nft_nat.sh b/tools/testing/selftests/netfilter/nft_nat.sh index 349a319a9e51..79fe627b9e81 100755 --- a/tools/testing/selftests/netfilter/nft_nat.sh +++ b/tools/testing/selftests/netfilter/nft_nat.sh @@ -899,6 +899,144 @@ EOF ip netns exec "$ns0" nft delete table $family nat } +test_stateless_nat_ip() +{ + local lret=0 + + ip netns exec "$ns0" sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null + ip netns exec "$ns0" sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null + + ip netns exec "$ns2" ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1 + if [ $? -ne 0 ] ; then + echo "ERROR: cannot ping $ns1 from $ns2 before loading stateless rules" + return 1 + fi + +ip netns exec "$ns0" nft -f /dev/stdin < /dev/null # ping ns2->ns1 + if [ $? -ne 0 ] ; then + echo "ERROR: cannot ping $ns1 from $ns2 with stateless rules" + lret=1 + fi + + # ns1 should have seen packets from .2.2, due to stateless rewrite. + expect="packets 1 bytes 84" + cnt=$(ip netns exec "$ns1" nft list counter inet filter ns0insl | grep -q "$expect") + if [ $? -ne 0 ]; then + bad_counter "$ns1" ns0insl "$expect" "test_stateless 1" + lret=1 + fi + + for dir in "in" "out" ; do + cnt=$(ip netns exec "$ns2" nft list counter inet filter ns1${dir} | grep -q "$expect") + if [ $? -ne 0 ]; then + bad_counter "$ns2" ns1$dir "$expect" "test_stateless 2" + lret=1 + fi + done + + # ns1 should not have seen packets from ns2, due to masquerade + expect="packets 0 bytes 0" + for dir in "in" "out" ; do + cnt=$(ip netns exec "$ns1" nft list counter inet filter ns2${dir} | grep -q "$expect") + if [ $? -ne 0 ]; then + bad_counter "$ns1" ns0$dir "$expect" "test_stateless 3" + lret=1 + fi + + cnt=$(ip netns exec "$ns0" nft list counter inet filter ns1${dir} | grep -q "$expect") + if [ $? -ne 0 ]; then + bad_counter "$ns0" ns1$dir "$expect" "test_stateless 4" + lret=1 + fi + done + + reset_counters + + socat -h > /dev/null 2>&1 + if [ $? -ne 0 ];then + echo "SKIP: Could not run stateless nat frag test without socat tool" + if [ $lret -eq 0 ]; then + return $ksft_skip + fi + + ip netns exec "$ns0" nft delete table ip stateless + return $lret + fi + + local tmpfile=$(mktemp) + dd if=/dev/urandom of=$tmpfile bs=4096 count=1 2>/dev/null + + local outfile=$(mktemp) + ip netns exec "$ns1" timeout 3 socat -u UDP4-RECV:4233 OPEN:$outfile < /dev/null & + sc_r=$! + + sleep 1 + # re-do with large ping -> ip fragmentation + ip netns exec "$ns2" timeout 3 socat - UDP4-SENDTO:"10.0.1.99:4233" < "$tmpfile" > /dev/null + if [ $? -ne 0 ] ; then + echo "ERROR: failed to test udp $ns1 to $ns2 with stateless ip nat" 1>&2 + lret=1 + fi + + wait + + cmp "$tmpfile" "$outfile" + if [ $? -ne 0 ]; then + ls -l "$tmpfile" "$outfile" + echo "ERROR: in and output file mismatch when checking udp with stateless nat" 1>&2 + lret=1 + fi + + rm -f "$tmpfile" "$outfile" + + # ns1 should have seen packets from 2.2, due to stateless rewrite. + expect="packets 3 bytes 4164" + cnt=$(ip netns exec "$ns1" nft list counter inet filter ns0insl | grep -q "$expect") + if [ $? -ne 0 ]; then + bad_counter "$ns1" ns0insl "$expect" "test_stateless 5" + lret=1 + fi + + ip netns exec "$ns0" nft delete table ip stateless + if [ $? -ne 0 ]; then + echo "ERROR: Could not delete table ip stateless" 1>&2 + lret=1 + fi + + test $lret -eq 0 && echo "PASS: IP statless for $ns2" + + return $lret +} + # ip netns exec "$ns0" ping -c 1 -q 10.0.$i.99 for i in 0 1 2; do ip netns exec ns$i-$sfx nft -f /dev/stdin < Date: Tue, 25 Jan 2022 20:06:03 +0100 Subject: netfilter: nft_reject_bridge: Fix for missing reply from prerouting Prior to commit fa538f7cf05aa ("netfilter: nf_reject: add reject skbuff creation helpers"), nft_reject_bridge did not assign to nskb->dev before passing nskb on to br_forward(). The shared skbuff creation helpers introduced in above commit do which seems to confuse br_forward() as reject statements in prerouting hook won't emit a packet anymore. Fix this by simply passing NULL instead of 'dev' to the helpers - they use the pointer for just that assignment, nothing else. Fixes: fa538f7cf05aa ("netfilter: nf_reject: add reject skbuff creation helpers") Signed-off-by: Phil Sutter Signed-off-by: Pablo Neira Ayuso --- net/bridge/netfilter/nft_reject_bridge.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/net/bridge/netfilter/nft_reject_bridge.c b/net/bridge/netfilter/nft_reject_bridge.c index eba0efe64d05..fbf858ddec35 100644 --- a/net/bridge/netfilter/nft_reject_bridge.c +++ b/net/bridge/netfilter/nft_reject_bridge.c @@ -49,7 +49,7 @@ static void nft_reject_br_send_v4_tcp_reset(struct net *net, { struct sk_buff *nskb; - nskb = nf_reject_skb_v4_tcp_reset(net, oldskb, dev, hook); + nskb = nf_reject_skb_v4_tcp_reset(net, oldskb, NULL, hook); if (!nskb) return; @@ -65,7 +65,7 @@ static void nft_reject_br_send_v4_unreach(struct net *net, { struct sk_buff *nskb; - nskb = nf_reject_skb_v4_unreach(net, oldskb, dev, hook, code); + nskb = nf_reject_skb_v4_unreach(net, oldskb, NULL, hook, code); if (!nskb) return; @@ -81,7 +81,7 @@ static void nft_reject_br_send_v6_tcp_reset(struct net *net, { struct sk_buff *nskb; - nskb = nf_reject_skb_v6_tcp_reset(net, oldskb, dev, hook); + nskb = nf_reject_skb_v6_tcp_reset(net, oldskb, NULL, hook); if (!nskb) return; @@ -98,7 +98,7 @@ static void nft_reject_br_send_v6_unreach(struct net *net, { struct sk_buff *nskb; - nskb = nf_reject_skb_v6_unreach(net, oldskb, dev, hook, code); + nskb = nf_reject_skb_v6_unreach(net, oldskb, NULL, hook, code); if (!nskb) return; -- cgit From f459bfd4b9793f25e0fcf19878edd87d8dc569d9 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Wed, 26 Jan 2022 01:46:58 +0100 Subject: netfilter: nft_byteorder: track register operations Cancel tracking for byteorder operation, otherwise selector + byteorder operation is incorrectly reduced if source and destination registers are the same. Reported-by: kernel test robot Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nft_byteorder.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/net/netfilter/nft_byteorder.c b/net/netfilter/nft_byteorder.c index 9d5947ab8d4e..e646e9ee4a98 100644 --- a/net/netfilter/nft_byteorder.c +++ b/net/netfilter/nft_byteorder.c @@ -167,12 +167,24 @@ nla_put_failure: return -1; } +static bool nft_byteorder_reduce(struct nft_regs_track *track, + const struct nft_expr *expr) +{ + struct nft_byteorder *priv = nft_expr_priv(expr); + + track->regs[priv->dreg].selector = NULL; + track->regs[priv->dreg].bitwise = NULL; + + return false; +} + static const struct nft_expr_ops nft_byteorder_ops = { .type = &nft_byteorder_type, .size = NFT_EXPR_SIZE(sizeof(struct nft_byteorder)), .eval = nft_byteorder_eval, .init = nft_byteorder_init, .dump = nft_byteorder_dump, + .reduce = nft_byteorder_reduce, }; struct nft_expr_type nft_byteorder_type __read_mostly = { -- cgit From eda0cf1202acf1ef47f93d8f92d4839213431424 Mon Sep 17 00:00:00 2001 From: Florian Westphal Date: Wed, 26 Jan 2022 12:54:54 +0100 Subject: selftests: nft_concat_range: add test for reload with no element add/del Add a specific test for the reload issue fixed with commit 23c54263efd7cb ("netfilter: nft_set_pipapo: allocate pcpu scratch maps on clone"). Add to set, then flush set content + restore without other add/remove in the transaction. On kernels before the fix, this test case fails: net,mac with reload [FAIL] Signed-off-by: Florian Westphal Reviewed-by: Stefano Brivio Signed-off-by: Pablo Neira Ayuso --- .../selftests/netfilter/nft_concat_range.sh | 72 +++++++++++++++++++++- 1 file changed, 71 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/netfilter/nft_concat_range.sh b/tools/testing/selftests/netfilter/nft_concat_range.sh index ed61f6cab60f..df322e47a54f 100755 --- a/tools/testing/selftests/netfilter/nft_concat_range.sh +++ b/tools/testing/selftests/netfilter/nft_concat_range.sh @@ -27,7 +27,7 @@ TYPES="net_port port_net net6_port port_proto net6_port_mac net6_port_mac_proto net6_port_net6_port net_port_mac_proto_net" # Reported bugs, also described by TYPE_ variables below -BUGS="flush_remove_add" +BUGS="flush_remove_add reload" # List of possible paths to pktgen script from kernel tree for performance tests PKTGEN_SCRIPT_PATHS=" @@ -354,6 +354,23 @@ TYPE_flush_remove_add=" display Add two elements, flush, re-add " +TYPE_reload=" +display net,mac with reload +type_spec ipv4_addr . ether_addr +chain_spec ip daddr . ether saddr +dst addr4 +src mac +start 1 +count 1 +src_delta 2000 +tools sendip nc bash +proto udp + +race_repeat 0 + +perf_duration 0 +" + # Set template for all tests, types and rules are filled in depending on test set_template=' flush ruleset @@ -1473,6 +1490,59 @@ test_bug_flush_remove_add() { nft flush ruleset } +# - add ranged element, check that packets match it +# - reload the set, check packets still match +test_bug_reload() { + setup veth send_"${proto}" set || return ${KSELFTEST_SKIP} + rstart=${start} + + range_size=1 + for i in $(seq "${start}" $((start + count))); do + end=$((start + range_size)) + + # Avoid negative or zero-sized port ranges + if [ $((end / 65534)) -gt $((start / 65534)) ]; then + start=${end} + end=$((end + 1)) + fi + srcstart=$((start + src_delta)) + srcend=$((end + src_delta)) + + add "$(format)" || return 1 + range_size=$((range_size + 1)) + start=$((end + range_size)) + done + + # check kernel does allocate pcpu sctrach map + # for reload with no elemet add/delete + ( echo flush set inet filter test ; + nft list set inet filter test ) | nft -f - + + start=${rstart} + range_size=1 + + for i in $(seq "${start}" $((start + count))); do + end=$((start + range_size)) + + # Avoid negative or zero-sized port ranges + if [ $((end / 65534)) -gt $((start / 65534)) ]; then + start=${end} + end=$((end + 1)) + fi + srcstart=$((start + src_delta)) + srcend=$((end + src_delta)) + + for j in $(seq ${start} $((range_size / 2 + 1)) ${end}); do + send_match "${j}" $((j + src_delta)) || return 1 + done + + range_size=$((range_size + 1)) + start=$((end + range_size)) + done + + nft flush ruleset +} + test_reported_issues() { eval test_bug_"${subtest}" } -- cgit From 37291f60d0822f191748c2a54ce63b0bc669020f Mon Sep 17 00:00:00 2001 From: Robert Hancock Date: Tue, 25 Jan 2022 18:16:00 -0600 Subject: phy: xilinx: zynqmp: Fix bus width setting for SGMII TX_PROT_BUS_WIDTH and RX_PROT_BUS_WIDTH are single registers with separate bit fields for each lane. The code in xpsgtr_phy_init_sgmii was not preserving the existing register value for other lanes, so enabling the PHY in SGMII mode on one lane zeroed out the settings for all other lanes, causing other PS-GTR peripherals such as USB3 to malfunction. Use xpsgtr_clr_set to only manipulate the desired bits in the register. Fixes: 4a33bea00314 ("phy: zynqmp: Add PHY driver for the Xilinx ZynqMP Gigabit Transceiver") Signed-off-by: Robert Hancock Acked-by: Michal Simek Reviewed-by: Laurent Pinchart Link: https://lore.kernel.org/r/20220126001600.1592218-1-robert.hancock@calian.com Signed-off-by: Vinod Koul --- drivers/phy/xilinx/phy-zynqmp.c | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/drivers/phy/xilinx/phy-zynqmp.c b/drivers/phy/xilinx/phy-zynqmp.c index f478d8a17115..9be9535ad7ab 100644 --- a/drivers/phy/xilinx/phy-zynqmp.c +++ b/drivers/phy/xilinx/phy-zynqmp.c @@ -134,7 +134,8 @@ #define PROT_BUS_WIDTH_10 0x0 #define PROT_BUS_WIDTH_20 0x1 #define PROT_BUS_WIDTH_40 0x2 -#define PROT_BUS_WIDTH_SHIFT 2 +#define PROT_BUS_WIDTH_SHIFT(n) ((n) * 2) +#define PROT_BUS_WIDTH_MASK(n) GENMASK((n) * 2 + 1, (n) * 2) /* Number of GT lanes */ #define NUM_LANES 4 @@ -445,12 +446,12 @@ static void xpsgtr_phy_init_sata(struct xpsgtr_phy *gtr_phy) static void xpsgtr_phy_init_sgmii(struct xpsgtr_phy *gtr_phy) { struct xpsgtr_dev *gtr_dev = gtr_phy->dev; + u32 mask = PROT_BUS_WIDTH_MASK(gtr_phy->lane); + u32 val = PROT_BUS_WIDTH_10 << PROT_BUS_WIDTH_SHIFT(gtr_phy->lane); /* Set SGMII protocol TX and RX bus width to 10 bits. */ - xpsgtr_write(gtr_dev, TX_PROT_BUS_WIDTH, - PROT_BUS_WIDTH_10 << (gtr_phy->lane * PROT_BUS_WIDTH_SHIFT)); - xpsgtr_write(gtr_dev, RX_PROT_BUS_WIDTH, - PROT_BUS_WIDTH_10 << (gtr_phy->lane * PROT_BUS_WIDTH_SHIFT)); + xpsgtr_clr_set(gtr_dev, TX_PROT_BUS_WIDTH, mask, val); + xpsgtr_clr_set(gtr_dev, RX_PROT_BUS_WIDTH, mask, val); xpsgtr_bypass_scrambler_8b10b(gtr_phy); } -- cgit From cfc826c88a79e22ba5d8001556eb2c7efd8a01b6 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Wed, 12 Jan 2022 14:17:24 +0300 Subject: phy: stm32: fix a refcount leak in stm32_usbphyc_pll_enable() This error path needs to decrement "usbphyc->n_pll_cons.counter" before returning. Fixes: 5b1af71280ab ("phy: stm32: rework PLL Lock detection") Signed-off-by: Dan Carpenter Link: https://lore.kernel.org/r/20220112111724.GB3019@kili Signed-off-by: Vinod Koul --- drivers/phy/st/phy-stm32-usbphyc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/phy/st/phy-stm32-usbphyc.c b/drivers/phy/st/phy-stm32-usbphyc.c index 2ce9bfd783d4..007a23c78d56 100644 --- a/drivers/phy/st/phy-stm32-usbphyc.c +++ b/drivers/phy/st/phy-stm32-usbphyc.c @@ -304,7 +304,7 @@ static int stm32_usbphyc_pll_enable(struct stm32_usbphyc *usbphyc) ret = __stm32_usbphyc_pll_disable(usbphyc); if (ret) - return ret; + goto dec_n_pll_cons; } ret = stm32_usbphyc_regulators_enable(usbphyc); -- cgit From 46e994717807f4b935c44d81dde9dd8bcd9a4f5d Mon Sep 17 00:00:00 2001 From: Wan Jiabing Date: Fri, 7 Jan 2022 10:50:50 +0800 Subject: phy: phy-mtk-tphy: Fix duplicated argument in phy-mtk-tphy Fix following coccicheck warning: ./drivers/phy/mediatek/phy-mtk-tphy.c:994:6-29: duplicated argument to && or || The efuse_rx_imp is duplicate. Here should be efuse_tx_imp. Signed-off-by: Wan Jiabing Acked-by: Chunfeng Yun Link: https://lore.kernel.org/r/20220107025050.787720-1-wanjiabing@vivo.com Signed-off-by: Vinod Koul --- drivers/phy/mediatek/phy-mtk-tphy.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/phy/mediatek/phy-mtk-tphy.c b/drivers/phy/mediatek/phy-mtk-tphy.c index 6d307102f4f6..8ee7682b8e93 100644 --- a/drivers/phy/mediatek/phy-mtk-tphy.c +++ b/drivers/phy/mediatek/phy-mtk-tphy.c @@ -992,7 +992,7 @@ static int phy_efuse_get(struct mtk_tphy *tphy, struct mtk_phy_instance *instanc /* no efuse, ignore it */ if (!instance->efuse_intr && !instance->efuse_rx_imp && - !instance->efuse_rx_imp) { + !instance->efuse_tx_imp) { dev_warn(dev, "no u3 intr efuse, but dts enable it\n"); instance->efuse_sw_en = 0; break; -- cgit From 1293fccc9e892712d910ec96079d3717307f1d2d Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 25 Jan 2022 13:14:21 +0100 Subject: net: ieee802154: hwsim: Ensure proper channel selection at probe time Drivers are expected to set the PHY current_channel and current_page according to their default state. The hwsim driver is advertising being configured on channel 13 by default but that is not reflected in its own internal pib structure. In order to ensure that this driver consider the current channel as being 13 internally, we at least need to set the pib->channel field to 13. Fixes: f25da51fdc38 ("ieee802154: hwsim: add replacement for fakelb") Signed-off-by: Miquel Raynal [stefan@datenfreihafen.org: fixed assigment from page to channel] Acked-by: Alexander Aring Link: https://lore.kernel.org/r/20220125121426.848337-2-miquel.raynal@bootlin.com Signed-off-by: Stefan Schmidt --- drivers/net/ieee802154/mac802154_hwsim.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ieee802154/mac802154_hwsim.c b/drivers/net/ieee802154/mac802154_hwsim.c index 8caa61ec718f..36f1c5aa98fc 100644 --- a/drivers/net/ieee802154/mac802154_hwsim.c +++ b/drivers/net/ieee802154/mac802154_hwsim.c @@ -786,6 +786,7 @@ static int hwsim_add_one(struct genl_info *info, struct device *dev, goto err_pib; } + pib->channel = 13; rcu_assign_pointer(phy->pib, pib); phy->idx = idx; INIT_LIST_HEAD(&phy->edges); -- cgit From d753c4004820a888ec007dd88b271fa9c3172c5c Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 25 Jan 2022 13:14:22 +0100 Subject: net: ieee802154: mcr20a: Fix lifs/sifs periods These periods are expressed in time units (microseconds) while 40 and 12 are the number of symbol durations these periods will last. We need to multiply them both with phy->symbol_duration in order to get these values in microseconds. Fixes: 8c6ad9cc5157 ("ieee802154: Add NXP MCR20A IEEE 802.15.4 transceiver driver") Signed-off-by: Miquel Raynal Acked-by: Alexander Aring Link: https://lore.kernel.org/r/20220125121426.848337-3-miquel.raynal@bootlin.com Signed-off-by: Stefan Schmidt --- drivers/net/ieee802154/mcr20a.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ieee802154/mcr20a.c b/drivers/net/ieee802154/mcr20a.c index 8dc04e2590b1..383231b85464 100644 --- a/drivers/net/ieee802154/mcr20a.c +++ b/drivers/net/ieee802154/mcr20a.c @@ -976,8 +976,8 @@ static void mcr20a_hw_setup(struct mcr20a_local *lp) dev_dbg(printdev(lp), "%s\n", __func__); phy->symbol_duration = 16; - phy->lifs_period = 40; - phy->sifs_period = 12; + phy->lifs_period = 40 * phy->symbol_duration; + phy->sifs_period = 12 * phy->symbol_duration; hw->flags = IEEE802154_HW_TX_OMIT_CKSUM | IEEE802154_HW_AFILT | -- cgit From e5ce576d45bf72fd0e3dc37eff897bfcc488f6a9 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 25 Jan 2022 13:14:23 +0100 Subject: net: ieee802154: at86rf230: Stop leaking skb's Upon error the ieee802154_xmit_complete() helper is not called. Only ieee802154_wake_queue() is called manually. In the Tx case we then leak the skb structure. Free the skb structure upon error before returning when appropriate. As the 'is_tx = 0' cannot be moved in the complete handler because of a possible race between the delay in switching to STATE_RX_AACK_ON and a new interrupt, we introduce an intermediate 'was_tx' boolean just for this purpose. There is no Fixes tag applying here, many changes have been made on this area and the issue kind of always existed. Suggested-by: Alexander Aring Signed-off-by: Miquel Raynal Acked-by: Alexander Aring Link: https://lore.kernel.org/r/20220125121426.848337-4-miquel.raynal@bootlin.com Signed-off-by: Stefan Schmidt --- drivers/net/ieee802154/at86rf230.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/drivers/net/ieee802154/at86rf230.c b/drivers/net/ieee802154/at86rf230.c index 7d67f41387f5..4f5ef8a9a9a8 100644 --- a/drivers/net/ieee802154/at86rf230.c +++ b/drivers/net/ieee802154/at86rf230.c @@ -100,6 +100,7 @@ struct at86rf230_local { unsigned long cal_timeout; bool is_tx; bool is_tx_from_off; + bool was_tx; u8 tx_retry; struct sk_buff *tx_skb; struct at86rf230_state_change tx; @@ -343,7 +344,11 @@ at86rf230_async_error_recover_complete(void *context) if (ctx->free) kfree(ctx); - ieee802154_wake_queue(lp->hw); + if (lp->was_tx) { + lp->was_tx = 0; + dev_kfree_skb_any(lp->tx_skb); + ieee802154_wake_queue(lp->hw); + } } static void @@ -352,7 +357,11 @@ at86rf230_async_error_recover(void *context) struct at86rf230_state_change *ctx = context; struct at86rf230_local *lp = ctx->lp; - lp->is_tx = 0; + if (lp->is_tx) { + lp->was_tx = 1; + lp->is_tx = 0; + } + at86rf230_async_state_change(lp, ctx, STATE_RX_AACK_ON, at86rf230_async_error_recover_complete); } -- cgit From 621b24b09eb61c63f262da0c9c5f0e93348897e5 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 25 Jan 2022 13:14:24 +0100 Subject: net: ieee802154: ca8210: Stop leaking skb's Upon error the ieee802154_xmit_complete() helper is not called. Only ieee802154_wake_queue() is called manually. We then leak the skb structure. Free the skb structure upon error before returning. Fixes: ded845a781a5 ("ieee802154: Add CA8210 IEEE 802.15.4 device driver") Signed-off-by: Miquel Raynal Acked-by: Alexander Aring Link: https://lore.kernel.org/r/20220125121426.848337-5-miquel.raynal@bootlin.com Signed-off-by: Stefan Schmidt --- drivers/net/ieee802154/ca8210.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c index ece6ff6049f6..f3438d3e104a 100644 --- a/drivers/net/ieee802154/ca8210.c +++ b/drivers/net/ieee802154/ca8210.c @@ -1771,6 +1771,7 @@ static int ca8210_async_xmit_complete( status ); if (status != MAC_TRANSACTION_OVERFLOW) { + dev_kfree_skb_any(priv->tx_skb); ieee802154_wake_queue(priv->hw); return 0; } -- cgit From 79c37ca73a6e9a33f7b2b7783ba6af07a448c8a9 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 25 Jan 2022 13:14:25 +0100 Subject: net: ieee802154: Return meaningful error codes from the netlink helpers Returning -1 does not indicate anything useful. Use a standard and meaningful error code instead. Fixes: a26c5fd7622d ("nl802154: add support for security layer") Signed-off-by: Miquel Raynal Acked-by: Alexander Aring Link: https://lore.kernel.org/r/20220125121426.848337-6-miquel.raynal@bootlin.com Signed-off-by: Stefan Schmidt --- net/ieee802154/nl802154.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/net/ieee802154/nl802154.c b/net/ieee802154/nl802154.c index 277124f206e0..e0b072aecf0f 100644 --- a/net/ieee802154/nl802154.c +++ b/net/ieee802154/nl802154.c @@ -1441,7 +1441,7 @@ static int nl802154_send_key(struct sk_buff *msg, u32 cmd, u32 portid, hdr = nl802154hdr_put(msg, portid, seq, flags, cmd); if (!hdr) - return -1; + return -ENOBUFS; if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex)) goto nla_put_failure; @@ -1634,7 +1634,7 @@ static int nl802154_send_device(struct sk_buff *msg, u32 cmd, u32 portid, hdr = nl802154hdr_put(msg, portid, seq, flags, cmd); if (!hdr) - return -1; + return -ENOBUFS; if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex)) goto nla_put_failure; @@ -1812,7 +1812,7 @@ static int nl802154_send_devkey(struct sk_buff *msg, u32 cmd, u32 portid, hdr = nl802154hdr_put(msg, portid, seq, flags, cmd); if (!hdr) - return -1; + return -ENOBUFS; if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex)) goto nla_put_failure; @@ -1988,7 +1988,7 @@ static int nl802154_send_seclevel(struct sk_buff *msg, u32 cmd, u32 portid, hdr = nl802154hdr_put(msg, portid, seq, flags, cmd); if (!hdr) - return -1; + return -ENOBUFS; if (nla_put_u32(msg, NL802154_ATTR_IFINDEX, dev->ifindex)) goto nla_put_failure; -- cgit From 5d8a8b324ff48c9d9fe4f1634e33dc647d2481b4 Mon Sep 17 00:00:00 2001 From: Miquel Raynal Date: Tue, 25 Jan 2022 13:14:26 +0100 Subject: MAINTAINERS: Remove Harry Morris bouncing address Harry's e-mail address from Cascoda bounces, I have not found any contributions from him since 2018 so let's drop the Maintainer entry from the CA8210 driver and mark it Orphan. Signed-off-by: Miquel Raynal Acked-by: Alexander Aring Link: https://lore.kernel.org/r/20220125121426.848337-7-miquel.raynal@bootlin.com Signed-off-by: Stefan Schmidt --- MAINTAINERS | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/MAINTAINERS b/MAINTAINERS index 0d7883977e9b..ee938e96b101 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -4157,9 +4157,8 @@ N: csky K: csky CA8210 IEEE-802.15.4 RADIO DRIVER -M: Harry Morris L: linux-wpan@vger.kernel.org -S: Maintained +S: Orphan W: https://github.com/Cascoda/ca8210-linux.git F: Documentation/devicetree/bindings/net/ieee802154/ca8210.txt F: drivers/net/ieee802154/ca8210.c -- cgit From 516b33460c5bee78b2055637b0547bdb0e6af754 Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Wed, 26 Jan 2022 12:43:56 +0200 Subject: drm/i915/adlp: Fix TypeC PHY-ready status readout MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The TCSS_DDI_STATUS register is indexed by tc_port not by the FIA port index, fix this up. This only caused an issue on TC#3/4 ports in legacy mode, as in all other cases the two indices either match (on TC#1/2) or the TCSS_DDI_STATUS_READY flag is set regardless of something being connected or not (on TC#1/2/3/4 in dp-alt and tbt-alt modes). Reported-and-tested-by: Chia-Lin Kao (AceLan) Fixes: 55ce306c2aa1 ("drm/i915/adl_p: Implement TC sequences") Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/4698 Cc: José Roberto de Souza Cc: # v5.14+ Signed-off-by: Imre Deak Reviewed-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20220126104356.2022975-1-imre.deak@intel.com --- drivers/gpu/drm/i915/display/intel_tc.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index 4eefe7b0bb26..3291124a99e5 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -346,10 +346,11 @@ static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port) static bool adl_tc_phy_status_complete(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port); struct intel_uncore *uncore = &i915->uncore; u32 val; - val = intel_uncore_read(uncore, TCSS_DDI_STATUS(dig_port->tc_phy_fia_idx)); + val = intel_uncore_read(uncore, TCSS_DDI_STATUS(tc_port)); if (val == 0xffffffff) { drm_dbg_kms(&i915->drm, "Port %s: PHY in TCCOLD, assuming not complete\n", -- cgit From a0f25a6bb319aa05e04dcf51707c97c2881b4f47 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Thu, 16 Dec 2021 22:09:36 +0100 Subject: drm/hisilicon/hibmc: Allow to be built if COMPILE_TEST is enabled The commit feeb07d0ca5a ("drm/hisilicon/hibmc: Make CONFIG_DRM_HISI_HIBMC depend on ARM64") made the driver Kconfig symbol to depend on ARM64 since it only supports that architecture and loading the module on others would lead to incorrect video modes being used. But it also prevented the driver to be built on other architectures which is useful to have compile test coverage when doing subsystem wide changes. Make the dependency instead to be (ARM64 || COMPILE_TEST), so the driver is buildable when the CONFIG_COMPILE_TEST option is enabled. Signed-off-by: Javier Martinez Canillas Acked-by: Thomas Zimmermann Link: https://patchwork.freedesktop.org/patch/msgid/20211216210936.3329977-1-javierm@redhat.com --- drivers/gpu/drm/hisilicon/hibmc/Kconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/hisilicon/hibmc/Kconfig b/drivers/gpu/drm/hisilicon/hibmc/Kconfig index 43943e980203..073adfe438dd 100644 --- a/drivers/gpu/drm/hisilicon/hibmc/Kconfig +++ b/drivers/gpu/drm/hisilicon/hibmc/Kconfig @@ -1,7 +1,7 @@ # SPDX-License-Identifier: GPL-2.0-only config DRM_HISI_HIBMC tristate "DRM Support for Hisilicon Hibmc" - depends on DRM && PCI && ARM64 + depends on DRM && PCI && (ARM64 || COMPILE_TEST) select DRM_KMS_HELPER select DRM_VRAM_HELPER select DRM_TTM -- cgit From 3da4b7403db87d39bc2613cfd790de1de99a70ab Mon Sep 17 00:00:00 2001 From: Tom Rix Date: Wed, 26 Jan 2022 10:21:42 -0800 Subject: ALSA: usb-audio: initialize variables that could ignore errors clang static analysis reports this representative issue mixer.c:1548:35: warning: Assigned value is garbage or undefined ucontrol->value.integer.value[0] = val; ^ ~~~ The filter_error() macro allows errors to be ignored. If errors can be ignored, initialize variables so garbage will not be used. Fixes: 48cc42973509 ("ALSA: usb-audio: Filter error from connector kctl ops, too") Signed-off-by: Tom Rix Link: https://lore.kernel.org/r/20220126182142.1184819-1-trix@redhat.com Signed-off-by: Takashi Iwai --- sound/usb/mixer.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/sound/usb/mixer.c b/sound/usb/mixer.c index e8f3f8d622ec..630766ba259f 100644 --- a/sound/usb/mixer.c +++ b/sound/usb/mixer.c @@ -1527,6 +1527,10 @@ error: usb_audio_err(chip, "cannot get connectors status: req = %#x, wValue = %#x, wIndex = %#x, type = %d\n", UAC_GET_CUR, validx, idx, cval->val_type); + + if (val) + *val = 0; + return filter_error(cval, ret); } -- cgit From d19a7af73b5ecaac8168712d18be72b9db166768 Mon Sep 17 00:00:00 2001 From: "J. Bruce Fields" Date: Tue, 18 Jan 2022 17:00:51 -0500 Subject: lockd: fix failure to cleanup client locks In my testing, we're sometimes hitting the request->fl_flags & FL_EXISTS case in posix_lock_inode, presumably just by random luck since we're not actually initializing fl_flags here. This probably didn't matter before commit 7f024fcd5c97 ("Keep read and write fds with each nlm_file") since we wouldn't previously unlock unless we knew there were locks. But now it causes lockd to give up on removing more locks. We could just initialize fl_flags, but really it seems dubious to be calling vfs_lock_file with random values in some of the fields. Fixes: 7f024fcd5c97 ("Keep read and write fds with each nlm_file") Signed-off-by: J. Bruce Fields [ cel: fixed checkpatch.pl nit ] Signed-off-by: Chuck Lever --- fs/lockd/svcsubs.c | 1 + 1 file changed, 1 insertion(+) diff --git a/fs/lockd/svcsubs.c b/fs/lockd/svcsubs.c index 54c2e42130ca..0a22a2faf552 100644 --- a/fs/lockd/svcsubs.c +++ b/fs/lockd/svcsubs.c @@ -180,6 +180,7 @@ static int nlm_unlock_files(struct nlm_file *file) { struct file_lock lock; + locks_init_lock(&lock); lock.fl_type = F_UNLCK; lock.fl_start = 0; lock.fl_end = OFFSET_MAX; -- cgit From b07f413732549e5a96e891411fbb5980f2d8e5a1 Mon Sep 17 00:00:00 2001 From: Pablo Neira Ayuso Date: Thu, 27 Jan 2022 13:40:38 +0100 Subject: netfilter: nf_tables: remove assignment with no effect in chain blob builder cppcheck possible warnings: >> net/netfilter/nf_tables_api.c:2014:2: warning: Assignment of function parameter has no effect outside the function. Did you forget dereferencing it? [uselessAssignmentPtrArg] ptr += offsetof(struct nft_rule_dp, data); ^ Reported-by: kernel test robot Signed-off-by: Pablo Neira Ayuso --- net/netfilter/nf_tables_api.c | 1 - 1 file changed, 1 deletion(-) diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index cf454f8ca2b0..5fa16990da95 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -2011,7 +2011,6 @@ static void nft_last_rule(struct nft_rule_blob *blob, const void *ptr) prule = (struct nft_rule_dp *)ptr; prule->is_last = 1; - ptr += offsetof(struct nft_rule_dp, data); /* blob size does not include the trailer rule */ } -- cgit From dede34b2c1a88e26f8353b433e381ea355f7258d Mon Sep 17 00:00:00 2001 From: Shuah Khan Date: Wed, 26 Jan 2022 13:13:41 -0700 Subject: docs/kselftest: clarify running mainline tests on stables Update the document to clarifiy support for running mainline kselftest on stable releases and the reasons for not removing test code that can test older kernels. Signed-off-by: Shuah Khan --- Documentation/dev-tools/kselftest.rst | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/Documentation/dev-tools/kselftest.rst b/Documentation/dev-tools/kselftest.rst index dcefee707ccd..a833ecf12fbc 100644 --- a/Documentation/dev-tools/kselftest.rst +++ b/Documentation/dev-tools/kselftest.rst @@ -7,6 +7,14 @@ directory. These are intended to be small tests to exercise individual code paths in the kernel. Tests are intended to be run after building, installing and booting a kernel. +Kselftest from mainline can be run on older stable kernels. Running tests +from mainline offers the best coverage. Several test rings run mainline +kselftest suite on stable releases. The reason is that when a new test +gets added to test existing code to regression test a bug, we should be +able to run that test on an older kernel. Hence, it is important to keep +code that can still test an older kernel and make sure it skips the test +gracefully on newer releases. + You can find additional information on Kselftest framework, how to write new tests using the framework on Kselftest wiki: -- cgit From fc4eb486a59d70bd35cf1209f0e68c2d8b979193 Mon Sep 17 00:00:00 2001 From: Yang Xu Date: Thu, 27 Jan 2022 17:11:35 +0800 Subject: selftests/zram: Skip max_comp_streams interface on newer kernel Since commit 43209ea2d17a ("zram: remove max_comp_streams internals"), zram has switched to per-cpu streams. Even kernel still keep this interface for some reasons, but writing to max_comp_stream doesn't take any effect. So skip it on newer kernel ie 4.7. The code that comparing kernel version is from xfstests testsuite ext4/053. Signed-off-by: Yang Xu Signed-off-by: Shuah Khan --- tools/testing/selftests/zram/zram_lib.sh | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/tools/testing/selftests/zram/zram_lib.sh b/tools/testing/selftests/zram/zram_lib.sh index 6f872f266fd1..f47fc0f27e99 100755 --- a/tools/testing/selftests/zram/zram_lib.sh +++ b/tools/testing/selftests/zram/zram_lib.sh @@ -11,6 +11,9 @@ dev_mounted=-1 # Kselftest framework requirement - SKIP code is 4. ksft_skip=4 +kernel_version=`uname -r | cut -d'.' -f1,2` +kernel_major=${kernel_version%.*} +kernel_minor=${kernel_version#*.} trap INT @@ -25,6 +28,20 @@ check_prereqs() fi } +kernel_gte() +{ + major=${1%.*} + minor=${1#*.} + + if [ $kernel_major -gt $major ]; then + return 0 + elif [[ $kernel_major -eq $major && $kernel_minor -ge $minor ]]; then + return 0 + fi + + return 1 +} + zram_cleanup() { echo "zram cleanup" @@ -86,6 +103,13 @@ zram_max_streams() { echo "set max_comp_streams to zram device(s)" + kernel_gte 4.7 + if [ $? -eq 0 ]; then + echo "The device attribute max_comp_streams was"\ + "deprecated in 4.7" + return 0 + fi + local i=0 for max_s in $zram_max_streams; do local sys_path="/sys/block/zram${i}/max_comp_streams" -- cgit From d18da7ec3719559d6e74937266d0416e6c7e0b31 Mon Sep 17 00:00:00 2001 From: Yang Xu Date: Thu, 27 Jan 2022 17:11:36 +0800 Subject: selftests/zram01.sh: Fix compression ratio calculation zram01 uses `free -m` to measure zram memory usage. The results are no sense because they are polluted by all running processes on the system. We Should only calculate the free memory delta for the current process. So use the third field of /sys/block/zram/mm_stat to measure memory usage instead. The file is available since kernel 4.1. orig_data_size(first): uncompressed size of data stored in this disk. compr_data_size(second): compressed size of data stored in this disk mem_used_total(third): the amount of memory allocated for this disk Also remove useless zram cleanup call in zram_fill_fs and so we don't need to cleanup zram twice if fails. Signed-off-by: Yang Xu Signed-off-by: Shuah Khan --- tools/testing/selftests/zram/zram01.sh | 30 ++++++++---------------------- 1 file changed, 8 insertions(+), 22 deletions(-) diff --git a/tools/testing/selftests/zram/zram01.sh b/tools/testing/selftests/zram/zram01.sh index 114863d9fb87..e9e9eb777e2c 100755 --- a/tools/testing/selftests/zram/zram01.sh +++ b/tools/testing/selftests/zram/zram01.sh @@ -33,8 +33,6 @@ zram_algs="lzo" zram_fill_fs() { - local mem_free0=$(free -m | awk 'NR==2 {print $4}') - for i in $(seq 0 $(($dev_num - 1))); do echo "fill zram$i..." local b=0 @@ -45,29 +43,17 @@ zram_fill_fs() b=$(($b + 1)) done echo "zram$i can be filled with '$b' KB" - done - local mem_free1=$(free -m | awk 'NR==2 {print $4}') - local used_mem=$(($mem_free0 - $mem_free1)) + local mem_used_total=`awk '{print $3}' "/sys/block/zram$i/mm_stat"` + local v=$((100 * 1024 * $b / $mem_used_total)) + if [ "$v" -lt 100 ]; then + echo "FAIL compression ratio: 0.$v:1" + ERR_CODE=-1 + return + fi - local total_size=0 - for sm in $zram_sizes; do - local s=$(echo $sm | sed 's/M//') - total_size=$(($total_size + $s)) + echo "zram compression ratio: $(echo "scale=2; $v / 100 " | bc):1: OK" done - - echo "zram used ${used_mem}M, zram disk sizes ${total_size}M" - - local v=$((100 * $total_size / $used_mem)) - - if [ "$v" -lt 100 ]; then - echo "FAIL compression ratio: 0.$v:1" - ERR_CODE=-1 - zram_cleanup - return - fi - - echo "zram compression ratio: $(echo "scale=2; $v / 100 " | bc):1: OK" } check_prereqs -- cgit From 01dabed20573804750af5c7bf8d1598a6bf7bf6e Mon Sep 17 00:00:00 2001 From: Yang Xu Date: Thu, 27 Jan 2022 17:11:37 +0800 Subject: selftests/zram: Adapt the situation that /dev/zram0 is being used If zram-generator package is installed and works, then we can not remove zram module because zram swap is being used. This case needs a clean zram environment, change this test by using hot_add/hot_remove interface. So even zram device is being used, we still can add zram device and remove them in cleanup. The two interface was introduced since kernel commit 6566d1a32bf7("zram: add dynamic device add/remove functionality") in v4.2-rc1. If kernel supports these two interface, we use hot_add/hot_remove to slove this problem, if not, just check whether zram is being used or built in, then skip it on old kernel. Signed-off-by: Yang Xu Signed-off-by: Shuah Khan --- tools/testing/selftests/zram/zram.sh | 15 +---- tools/testing/selftests/zram/zram01.sh | 3 +- tools/testing/selftests/zram/zram02.sh | 1 - tools/testing/selftests/zram/zram_lib.sh | 110 ++++++++++++++++++------------- 4 files changed, 66 insertions(+), 63 deletions(-) diff --git a/tools/testing/selftests/zram/zram.sh b/tools/testing/selftests/zram/zram.sh index 232e958ec454..b0b91d9b0dc2 100755 --- a/tools/testing/selftests/zram/zram.sh +++ b/tools/testing/selftests/zram/zram.sh @@ -2,9 +2,6 @@ # SPDX-License-Identifier: GPL-2.0 TCID="zram.sh" -# Kselftest framework requirement - SKIP code is 4. -ksft_skip=4 - . ./zram_lib.sh run_zram () { @@ -18,14 +15,4 @@ echo "" check_prereqs -# check zram module exists -MODULE_PATH=/lib/modules/`uname -r`/kernel/drivers/block/zram/zram.ko -if [ -f $MODULE_PATH ]; then - run_zram -elif [ -b /dev/zram0 ]; then - run_zram -else - echo "$TCID : No zram.ko module or /dev/zram0 device file not found" - echo "$TCID : CONFIG_ZRAM is not set" - exit $ksft_skip -fi +run_zram diff --git a/tools/testing/selftests/zram/zram01.sh b/tools/testing/selftests/zram/zram01.sh index e9e9eb777e2c..8f4affe34f3e 100755 --- a/tools/testing/selftests/zram/zram01.sh +++ b/tools/testing/selftests/zram/zram01.sh @@ -33,7 +33,7 @@ zram_algs="lzo" zram_fill_fs() { - for i in $(seq 0 $(($dev_num - 1))); do + for i in $(seq $dev_start $dev_end); do echo "fill zram$i..." local b=0 while [ true ]; do @@ -67,7 +67,6 @@ zram_mount zram_fill_fs zram_cleanup -zram_unload if [ $ERR_CODE -ne 0 ]; then echo "$TCID : [FAIL]" diff --git a/tools/testing/selftests/zram/zram02.sh b/tools/testing/selftests/zram/zram02.sh index e83b404807c0..2418b0c4ed13 100755 --- a/tools/testing/selftests/zram/zram02.sh +++ b/tools/testing/selftests/zram/zram02.sh @@ -36,7 +36,6 @@ zram_set_memlimit zram_makeswap zram_swapoff zram_cleanup -zram_unload if [ $ERR_CODE -ne 0 ]; then echo "$TCID : [FAIL]" diff --git a/tools/testing/selftests/zram/zram_lib.sh b/tools/testing/selftests/zram/zram_lib.sh index f47fc0f27e99..21ec1966de76 100755 --- a/tools/testing/selftests/zram/zram_lib.sh +++ b/tools/testing/selftests/zram/zram_lib.sh @@ -5,10 +5,12 @@ # Author: Alexey Kodanev # Modified: Naresh Kamboju -MODULE=0 dev_makeswap=-1 dev_mounted=-1 - +dev_start=0 +dev_end=-1 +module_load=-1 +sys_control=-1 # Kselftest framework requirement - SKIP code is 4. ksft_skip=4 kernel_version=`uname -r | cut -d'.' -f1,2` @@ -46,57 +48,72 @@ zram_cleanup() { echo "zram cleanup" local i= - for i in $(seq 0 $dev_makeswap); do + for i in $(seq $dev_start $dev_makeswap); do swapoff /dev/zram$i done - for i in $(seq 0 $dev_mounted); do + for i in $(seq $dev_start $dev_mounted); do umount /dev/zram$i done - for i in $(seq 0 $(($dev_num - 1))); do + for i in $(seq $dev_start $dev_end); do echo 1 > /sys/block/zram${i}/reset rm -rf zram$i done -} + if [ $sys_control -eq 1 ]; then + for i in $(seq $dev_start $dev_end); do + echo $i > /sys/class/zram-control/hot_remove + done + fi -zram_unload() -{ - if [ $MODULE -ne 0 ] ; then - echo "zram rmmod zram" + if [ $module_load -eq 1 ]; then rmmod zram > /dev/null 2>&1 fi } zram_load() { - # check zram module exists - MODULE_PATH=/lib/modules/`uname -r`/kernel/drivers/block/zram/zram.ko - if [ -f $MODULE_PATH ]; then - MODULE=1 - echo "create '$dev_num' zram device(s)" - modprobe zram num_devices=$dev_num - if [ $? -ne 0 ]; then - echo "failed to insert zram module" - exit 1 - fi - - dev_num_created=$(ls /dev/zram* | wc -w) + echo "create '$dev_num' zram device(s)" + + # zram module loaded, new kernel + if [ -d "/sys/class/zram-control" ]; then + echo "zram modules already loaded, kernel supports" \ + "zram-control interface" + dev_start=$(ls /dev/zram* | wc -w) + dev_end=$(($dev_start + $dev_num - 1)) + sys_control=1 + + for i in $(seq $dev_start $dev_end); do + cat /sys/class/zram-control/hot_add > /dev/null + done + + echo "all zram devices (/dev/zram$dev_start~$dev_end" \ + "successfully created" + return 0 + fi - if [ "$dev_num_created" -ne "$dev_num" ]; then - echo "unexpected num of devices: $dev_num_created" - ERR_CODE=-1 + # detect old kernel or built-in + modprobe zram num_devices=$dev_num + if [ ! -d "/sys/class/zram-control" ]; then + if grep -q '^zram' /proc/modules; then + rmmod zram > /dev/null 2>&1 + if [ $? -ne 0 ]; then + echo "zram module is being used on old kernel" \ + "without zram-control interface" + exit $ksft_skip + fi else - echo "zram load module successful" + echo "test needs CONFIG_ZRAM=m on old kernel without" \ + "zram-control interface" + exit $ksft_skip fi - elif [ -b /dev/zram0 ]; then - echo "/dev/zram0 device file found: OK" - else - echo "ERROR: No zram.ko module or no /dev/zram0 device found" - echo "$TCID : CONFIG_ZRAM is not set" - exit 1 + modprobe zram num_devices=$dev_num fi + + module_load=1 + dev_end=$(($dev_num - 1)) + echo "all zram devices (/dev/zram0~$dev_end) successfully created" } zram_max_streams() @@ -110,7 +127,7 @@ zram_max_streams() return 0 fi - local i=0 + local i=$dev_start for max_s in $zram_max_streams; do local sys_path="/sys/block/zram${i}/max_comp_streams" echo $max_s > $sys_path || \ @@ -122,7 +139,7 @@ zram_max_streams() echo "FAIL can't set max_streams '$max_s', get $max_stream" i=$(($i + 1)) - echo "$sys_path = '$max_streams' ($i/$dev_num)" + echo "$sys_path = '$max_streams'" done echo "zram max streams: OK" @@ -132,15 +149,16 @@ zram_compress_alg() { echo "test that we can set compression algorithm" - local algs=$(cat /sys/block/zram0/comp_algorithm) + local i=$dev_start + local algs=$(cat /sys/block/zram${i}/comp_algorithm) echo "supported algs: $algs" - local i=0 + for alg in $zram_algs; do local sys_path="/sys/block/zram${i}/comp_algorithm" echo "$alg" > $sys_path || \ echo "FAIL can't set '$alg' to $sys_path" i=$(($i + 1)) - echo "$sys_path = '$alg' ($i/$dev_num)" + echo "$sys_path = '$alg'" done echo "zram set compression algorithm: OK" @@ -149,14 +167,14 @@ zram_compress_alg() zram_set_disksizes() { echo "set disk size to zram device(s)" - local i=0 + local i=$dev_start for ds in $zram_sizes; do local sys_path="/sys/block/zram${i}/disksize" echo "$ds" > $sys_path || \ echo "FAIL can't set '$ds' to $sys_path" i=$(($i + 1)) - echo "$sys_path = '$ds' ($i/$dev_num)" + echo "$sys_path = '$ds'" done echo "zram set disksizes: OK" @@ -166,14 +184,14 @@ zram_set_memlimit() { echo "set memory limit to zram device(s)" - local i=0 + local i=$dev_start for ds in $zram_mem_limits; do local sys_path="/sys/block/zram${i}/mem_limit" echo "$ds" > $sys_path || \ echo "FAIL can't set '$ds' to $sys_path" i=$(($i + 1)) - echo "$sys_path = '$ds' ($i/$dev_num)" + echo "$sys_path = '$ds'" done echo "zram set memory limit: OK" @@ -182,8 +200,8 @@ zram_set_memlimit() zram_makeswap() { echo "make swap with zram device(s)" - local i=0 - for i in $(seq 0 $(($dev_num - 1))); do + local i=$dev_start + for i in $(seq $dev_start $dev_end); do mkswap /dev/zram$i > err.log 2>&1 if [ $? -ne 0 ]; then cat err.log @@ -206,7 +224,7 @@ zram_makeswap() zram_swapoff() { local i= - for i in $(seq 0 $dev_makeswap); do + for i in $(seq $dev_start $dev_end); do swapoff /dev/zram$i > err.log 2>&1 if [ $? -ne 0 ]; then cat err.log @@ -220,7 +238,7 @@ zram_swapoff() zram_makefs() { - local i=0 + local i=$dev_start for fs in $zram_filesystems; do # if requested fs not supported default it to ext2 which mkfs.$fs > /dev/null 2>&1 || fs=ext2 @@ -239,7 +257,7 @@ zram_makefs() zram_mount() { local i=0 - for i in $(seq 0 $(($dev_num - 1))); do + for i in $(seq $dev_start $dev_end); do echo "mount /dev/zram$i" mkdir zram$i mount /dev/zram$i zram$i > /dev/null || \ -- cgit From 908a26e139e8cf21093acc56d8e90ddad2ad1eff Mon Sep 17 00:00:00 2001 From: Muhammad Usama Anjum Date: Thu, 27 Jan 2022 21:33:45 +0500 Subject: selftests/exec: Remove pipe from TEST_GEN_FILES pipe named FIFO special file is being created in execveat.c to perform some tests. Makefile doesn't need to do anything with the pipe. When it isn't found, Makefile generates the following build error: make: *** No rule to make target '../tools/testing/selftests/exec/pipe', needed by 'all'. Stop. pipe is created and removed during test run-time. Amended change log to add pipe remove info: Shuah Khan Fixes: 61016db15b8e ("selftests/exec: Verify execve of non-regular files fail") Signed-off-by: Muhammad Usama Anjum Reviewed-by: Shuah Khan Signed-off-by: Shuah Khan --- tools/testing/selftests/exec/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/testing/selftests/exec/Makefile b/tools/testing/selftests/exec/Makefile index dd61118df66e..12c5e27d32c1 100644 --- a/tools/testing/selftests/exec/Makefile +++ b/tools/testing/selftests/exec/Makefile @@ -5,7 +5,7 @@ CFLAGS += -D_GNU_SOURCE TEST_PROGS := binfmt_script non-regular TEST_GEN_PROGS := execveat load_address_4096 load_address_2097152 load_address_16777216 -TEST_GEN_FILES := execveat.symlink execveat.denatured script subdir pipe +TEST_GEN_FILES := execveat.symlink execveat.denatured script subdir # Makefile is a run-time dependency, since it's accessed by the execveat test TEST_FILES := Makefile -- cgit From a7b23fd90c804e79d1edb478b42935848bcd7e36 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Fri, 17 Dec 2021 15:46:11 +0100 Subject: drm/mgag200: Replace module-init boiler-plate code with DRM helpers Remove custom mgag200_init() and mgag200_exit() functions and initialize the module with DRM_module helpers. Signed-off-by: Thomas Zimmermann Reviewed-by: Javier Martinez Canillas Signed-off-by: Javier Martinez Canillas Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20211217144615.32733-7-tzimmermann@suse.de --- drivers/gpu/drm/mgag200/mgag200_drv.c | 20 ++------------------ 1 file changed, 2 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c index 740108a006ba..217844d71ab5 100644 --- a/drivers/gpu/drm/mgag200/mgag200_drv.c +++ b/drivers/gpu/drm/mgag200/mgag200_drv.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include "mgag200_drv.h" @@ -375,24 +376,7 @@ static struct pci_driver mgag200_pci_driver = { .remove = mgag200_pci_remove, }; -static int __init mgag200_init(void) -{ - if (drm_firmware_drivers_only() && mgag200_modeset == -1) - return -EINVAL; - - if (mgag200_modeset == 0) - return -EINVAL; - - return pci_register_driver(&mgag200_pci_driver); -} - -static void __exit mgag200_exit(void) -{ - pci_unregister_driver(&mgag200_pci_driver); -} - -module_init(mgag200_init); -module_exit(mgag200_exit); +drm_module_pci_driver_if_modeset(mgag200_pci_driver, mgag200_modeset); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); -- cgit From 66755b4871782cb95e3584c9e88b6ed6c52c9022 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Fri, 17 Dec 2021 15:46:12 +0100 Subject: drm/qxl: Move ioctl array next to its only user Move the array qxl_ioctl to qxl_drv.c and initialize the num_ioctls field of struct drm_driver at runtime. Replaces the current fragile ioctl setup and allows for generating the module init/exit code. Signed-off-by: Thomas Zimmermann Reviewed-by: Javier Martinez Canillas Signed-off-by: Javier Martinez Canillas Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20211217144615.32733-8-tzimmermann@suse.de --- drivers/gpu/drm/qxl/qxl_drv.c | 12 +++++++++++- drivers/gpu/drm/qxl/qxl_drv.h | 13 +++++++++---- drivers/gpu/drm/qxl/qxl_ioctl.c | 41 +++++++---------------------------------- 3 files changed, 27 insertions(+), 39 deletions(-) diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index e4b16421500b..323671e9cfc8 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c @@ -269,6 +269,16 @@ static struct pci_driver qxl_pci_driver = { .driver.pm = &qxl_pm_ops, }; +static const struct drm_ioctl_desc qxl_ioctls[] = { + DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH), + DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH), + DRM_IOCTL_DEF_DRV(QXL_EXECBUFFER, qxl_execbuffer_ioctl, DRM_AUTH), + DRM_IOCTL_DEF_DRV(QXL_UPDATE_AREA, qxl_update_area_ioctl, DRM_AUTH), + DRM_IOCTL_DEF_DRV(QXL_GETPARAM, qxl_getparam_ioctl, DRM_AUTH), + DRM_IOCTL_DEF_DRV(QXL_CLIENTCAP, qxl_clientcap_ioctl, DRM_AUTH), + DRM_IOCTL_DEF_DRV(QXL_ALLOC_SURF, qxl_alloc_surf_ioctl, DRM_AUTH), +}; + static struct drm_driver qxl_driver = { .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, @@ -282,6 +292,7 @@ static struct drm_driver qxl_driver = { .gem_prime_import_sg_table = qxl_gem_prime_import_sg_table, .fops = &qxl_fops, .ioctls = qxl_ioctls, + .num_ioctls = ARRAY_SIZE(qxl_ioctls), .name = DRIVER_NAME, .desc = DRIVER_DESC, .date = DRIVER_DATE, @@ -299,7 +310,6 @@ static int __init qxl_init(void) if (qxl_modeset == 0) return -EINVAL; - qxl_driver.num_ioctls = qxl_max_ioctls; return pci_register_driver(&qxl_pci_driver); } diff --git a/drivers/gpu/drm/qxl/qxl_drv.h b/drivers/gpu/drm/qxl/qxl_drv.h index 359266d9e860..29641ceaab7d 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.h +++ b/drivers/gpu/drm/qxl/qxl_drv.h @@ -65,7 +65,6 @@ struct dma_buf_map; #define QXL_DEBUGFS_MAX_COMPONENTS 32 extern int qxl_num_crtc; -extern int qxl_max_ioctls; #define QXL_INTERRUPT_MASK (\ QXL_INTERRUPT_DISPLAY |\ @@ -261,9 +260,6 @@ struct qxl_device { int qxl_debugfs_fence_init(struct qxl_device *rdev); -extern const struct drm_ioctl_desc qxl_ioctls[]; -extern int qxl_max_ioctl; - int qxl_device_init(struct qxl_device *qdev, struct pci_dev *pdev); void qxl_device_fini(struct qxl_device *qdev); @@ -457,4 +453,13 @@ struct qxl_drv_surface * qxl_surface_lookup(struct drm_device *dev, int surface_id); void qxl_surface_evict(struct qxl_device *qdev, struct qxl_bo *surf, bool freeing); +/* qxl_ioctl.c */ +int qxl_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int qxl_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int qxl_execbuffer_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int qxl_update_area_ioctl(struct drm_device *dev, void *data, struct drm_file *file); +int qxl_getparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int qxl_clientcap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv); +int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, struct drm_file *file); + #endif diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c index 38aabcbe2238..30f58b21372a 100644 --- a/drivers/gpu/drm/qxl/qxl_ioctl.c +++ b/drivers/gpu/drm/qxl/qxl_ioctl.c @@ -33,8 +33,7 @@ * TODO: allocating a new gem(in qxl_bo) for each request. * This is wasteful since bo's are page aligned. */ -static int qxl_alloc_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) +int qxl_alloc_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct qxl_device *qdev = to_qxl(dev); struct drm_qxl_alloc *qxl_alloc = data; @@ -61,8 +60,7 @@ static int qxl_alloc_ioctl(struct drm_device *dev, void *data, return 0; } -static int qxl_map_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) +int qxl_map_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct qxl_device *qdev = to_qxl(dev); struct drm_qxl_map *qxl_map = data; @@ -272,8 +270,7 @@ out_free_reloc: return ret; } -static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) +int qxl_execbuffer_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct qxl_device *qdev = to_qxl(dev); struct drm_qxl_execbuffer *execbuffer = data; @@ -297,8 +294,7 @@ static int qxl_execbuffer_ioctl(struct drm_device *dev, void *data, return 0; } -static int qxl_update_area_ioctl(struct drm_device *dev, void *data, - struct drm_file *file) +int qxl_update_area_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct qxl_device *qdev = to_qxl(dev); struct drm_qxl_update_area *update_area = data; @@ -347,8 +343,7 @@ out: return ret; } -static int qxl_getparam_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) +int qxl_getparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct qxl_device *qdev = to_qxl(dev); struct drm_qxl_getparam *param = data; @@ -366,8 +361,7 @@ static int qxl_getparam_ioctl(struct drm_device *dev, void *data, return 0; } -static int qxl_clientcap_ioctl(struct drm_device *dev, void *data, - struct drm_file *file_priv) +int qxl_clientcap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct qxl_device *qdev = to_qxl(dev); struct pci_dev *pdev = to_pci_dev(dev->dev); @@ -388,8 +382,7 @@ static int qxl_clientcap_ioctl(struct drm_device *dev, void *data, return -ENOSYS; } -static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, - struct drm_file *file) +int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct qxl_device *qdev = to_qxl(dev); struct drm_qxl_alloc_surf *param = data; @@ -422,23 +415,3 @@ static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, param->handle = handle; return ret; } - -const struct drm_ioctl_desc qxl_ioctls[] = { - DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH), - - DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH), - - DRM_IOCTL_DEF_DRV(QXL_EXECBUFFER, qxl_execbuffer_ioctl, - DRM_AUTH), - DRM_IOCTL_DEF_DRV(QXL_UPDATE_AREA, qxl_update_area_ioctl, - DRM_AUTH), - DRM_IOCTL_DEF_DRV(QXL_GETPARAM, qxl_getparam_ioctl, - DRM_AUTH), - DRM_IOCTL_DEF_DRV(QXL_CLIENTCAP, qxl_clientcap_ioctl, - DRM_AUTH), - - DRM_IOCTL_DEF_DRV(QXL_ALLOC_SURF, qxl_alloc_surf_ioctl, - DRM_AUTH), -}; - -int qxl_max_ioctls = ARRAY_SIZE(qxl_ioctls); -- cgit From 10dcc8317f6063806ce1d34235af23da5e2fdd7a Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Fri, 17 Dec 2021 15:46:13 +0100 Subject: drm/qxl: Replace module-init boiler-plate code with DRM helpers Remove custom qxl_init() and qxl_exit() functions and initialize the module with DRM module helpers. Signed-off-by: Thomas Zimmermann Reviewed-by: Javier Martinez Canillas Signed-off-by: Javier Martinez Canillas Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20211217144615.32733-9-tzimmermann@suse.de --- drivers/gpu/drm/qxl/qxl_drv.c | 19 ++----------------- 1 file changed, 2 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c index 323671e9cfc8..1cb6f0c224bb 100644 --- a/drivers/gpu/drm/qxl/qxl_drv.c +++ b/drivers/gpu/drm/qxl/qxl_drv.c @@ -39,6 +39,7 @@ #include #include #include +#include #include #include #include @@ -303,23 +304,7 @@ static struct drm_driver qxl_driver = { .release = qxl_drm_release, }; -static int __init qxl_init(void) -{ - if (drm_firmware_drivers_only() && qxl_modeset == -1) - return -EINVAL; - - if (qxl_modeset == 0) - return -EINVAL; - return pci_register_driver(&qxl_pci_driver); -} - -static void __exit qxl_exit(void) -{ - pci_unregister_driver(&qxl_pci_driver); -} - -module_init(qxl_init); -module_exit(qxl_exit); +drm_module_pci_driver_if_modeset(qxl_pci_driver, qxl_modeset); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); -- cgit From ccecfd013a39d8b8ea837e90f7f907e4ed5abe17 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Fri, 17 Dec 2021 15:46:14 +0100 Subject: drm/vboxvideo: Replace module-init boiler-plate code with DRM helpers Remove custom vbox_init() and vbox_exit() functions and initialize the module with DRM module helpers. Signed-off-by: Thomas Zimmermann Reviewed-by: Javier Martinez Canillas Signed-off-by: Javier Martinez Canillas Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20211217144615.32733-10-tzimmermann@suse.de --- drivers/gpu/drm/vboxvideo/vbox_drv.c | 20 ++------------------ 1 file changed, 2 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/vboxvideo/vbox_drv.c b/drivers/gpu/drm/vboxvideo/vbox_drv.c index f35d9e44c6b7..f4f2bd79a7cb 100644 --- a/drivers/gpu/drm/vboxvideo/vbox_drv.c +++ b/drivers/gpu/drm/vboxvideo/vbox_drv.c @@ -18,6 +18,7 @@ #include #include #include +#include #include "vbox_drv.h" @@ -190,24 +191,7 @@ static const struct drm_driver driver = { DRM_GEM_VRAM_DRIVER, }; -static int __init vbox_init(void) -{ - if (drm_firmware_drivers_only() && vbox_modeset == -1) - return -EINVAL; - - if (vbox_modeset == 0) - return -EINVAL; - - return pci_register_driver(&vbox_pci_driver); -} - -static void __exit vbox_exit(void) -{ - pci_unregister_driver(&vbox_pci_driver); -} - -module_init(vbox_init); -module_exit(vbox_exit); +drm_module_pci_driver_if_modeset(vbox_pci_driver, vbox_modeset); MODULE_AUTHOR("Oracle Corporation"); MODULE_AUTHOR("Hans de Goede "); -- cgit From df8d1d0abd9439479ae1a0d8812ed57debe48a86 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Fri, 17 Dec 2021 15:46:15 +0100 Subject: drm/vmwgfx: Replace module-init boiler-plate code with DRM helpers Remove custom vmwgfx_init() and vmwgfx_exit() functions and initialize the module with DRM_module helpers. Signed-off-by: Thomas Zimmermann Reviewed-by: Javier Martinez Canillas Signed-off-by: Javier Martinez Canillas Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20211217144615.32733-11-tzimmermann@suse.de --- drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 24 +++--------------------- 1 file changed, 3 insertions(+), 21 deletions(-) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index fe36efdb7ff5..26eb5478394a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -32,9 +32,10 @@ #include #include +#include #include +#include #include -#include #include #include #include @@ -1643,26 +1644,7 @@ out_error: return ret; } -static int __init vmwgfx_init(void) -{ - int ret; - - if (drm_firmware_drivers_only()) - return -EINVAL; - - ret = pci_register_driver(&vmw_pci_driver); - if (ret) - DRM_ERROR("Failed initializing DRM.\n"); - return ret; -} - -static void __exit vmwgfx_exit(void) -{ - pci_unregister_driver(&vmw_pci_driver); -} - -module_init(vmwgfx_init); -module_exit(vmwgfx_exit); +drm_module_pci_driver(vmw_pci_driver); MODULE_AUTHOR("VMware Inc. and others"); MODULE_DESCRIPTION("Standalone drm driver for the VMware SVGA device"); -- cgit From 94afe983b5aa56a841f208a6b455691a44eafc7e Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Fri, 17 Dec 2021 01:37:21 +0100 Subject: drm/aspeed: Use drm_module_platform_driver() to register the driver The macro calls to a DRM specific platform driver init handler that checks whether the driver is allowed to be registered or not. Signed-off-by: Javier Martinez Canillas Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20211217003752.3946210-7-javierm@redhat.com --- drivers/gpu/drm/aspeed/aspeed_gfx_drv.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c index 65f172807a0d..13f496473b9e 100644 --- a/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c +++ b/drivers/gpu/drm/aspeed/aspeed_gfx_drv.c @@ -20,6 +20,7 @@ #include #include #include +#include #include #include #include @@ -359,7 +360,7 @@ static struct platform_driver aspeed_gfx_platform_driver = { }, }; -module_platform_driver(aspeed_gfx_platform_driver); +drm_module_platform_driver(aspeed_gfx_platform_driver); MODULE_AUTHOR("Joel Stanley "); MODULE_DESCRIPTION("ASPEED BMC DRM/KMS driver"); -- cgit From d5410d6974acd0aaea6742ecd8e3f7bdedbddf4b Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Fri, 17 Dec 2021 01:37:22 +0100 Subject: drm/atmel-hlcdc: Use drm_module_platform_driver() to register the driver The macro calls to a DRM specific platform driver init handler that checks whether the driver is allowed to be registered or not. Signed-off-by: Javier Martinez Canillas Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20211217003752.3946210-8-javierm@redhat.com --- drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c index 1656d27b78b6..651e3c109360 100644 --- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c +++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -833,7 +834,7 @@ static struct platform_driver atmel_hlcdc_dc_platform_driver = { .of_match_table = atmel_hlcdc_dc_of_match, }, }; -module_platform_driver(atmel_hlcdc_dc_platform_driver); +drm_module_platform_driver(atmel_hlcdc_dc_platform_driver); MODULE_AUTHOR("Jean-Jacques Hiblot "); MODULE_AUTHOR("Boris Brezillon "); -- cgit From f4b5091def94d95bdae0ff092a3e4a9d77cb03f4 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Fri, 17 Dec 2021 01:37:23 +0100 Subject: drm/fsl-dcu: Use drm_module_platform_driver() to register the driver The macro calls to a DRM specific platform driver init handler that checks whether the driver is allowed to be registered or not. Signed-off-by: Javier Martinez Canillas Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20211217003752.3946210-9-javierm@redhat.com --- drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c index 660fe573db96..7a503bf08d0f 100644 --- a/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c +++ b/drivers/gpu/drm/fsl-dcu/fsl_dcu_drm_drv.c @@ -24,6 +24,7 @@ #include #include #include +#include #include #include @@ -368,7 +369,7 @@ static struct platform_driver fsl_dcu_drm_platform_driver = { }, }; -module_platform_driver(fsl_dcu_drm_platform_driver); +drm_module_platform_driver(fsl_dcu_drm_platform_driver); MODULE_DESCRIPTION("Freescale DCU DRM Driver"); MODULE_LICENSE("GPL"); -- cgit From fdb5713c7d6f6d60bf23596eafb1ada154869ae9 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Fri, 17 Dec 2021 01:37:24 +0100 Subject: drm/hisilicon/kirin: Use drm_module_platform_driver() to register the driver The macro calls to a DRM specific platform driver init handler that checks whether the driver is allowed to be registered or not. Signed-off-by: Javier Martinez Canillas Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20211217003752.3946210-10-javierm@redhat.com --- drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c index 98ae9a48f3fe..3cf057269f2a 100644 --- a/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c +++ b/drivers/gpu/drm/hisilicon/kirin/kirin_drm_drv.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include #include @@ -307,7 +308,7 @@ static struct platform_driver kirin_drm_platform_driver = { }, }; -module_platform_driver(kirin_drm_platform_driver); +drm_module_platform_driver(kirin_drm_platform_driver); MODULE_AUTHOR("Xinliang Liu "); MODULE_AUTHOR("Xinliang Liu "); -- cgit From 1439e3bea7b1201a9461ffbff2a9d59f3e65dc1e Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Fri, 17 Dec 2021 01:37:26 +0100 Subject: drm/kmb: Use drm_module_platform_driver() to register the driver The macro calls to a DRM specific platform driver init handler that checks whether the driver is allowed to be registered or not. Signed-off-by: Javier Martinez Canillas Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20211217003752.3946210-12-javierm@redhat.com --- drivers/gpu/drm/kmb/kmb_drv.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/kmb/kmb_drv.c b/drivers/gpu/drm/kmb/kmb_drv.c index ed2424350773..76fef0880504 100644 --- a/drivers/gpu/drm/kmb/kmb_drv.c +++ b/drivers/gpu/drm/kmb/kmb_drv.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -628,7 +629,7 @@ static struct platform_driver kmb_platform_driver = { }, }; -module_platform_driver(kmb_platform_driver); +drm_module_platform_driver(kmb_platform_driver); MODULE_AUTHOR("Intel Corporation"); MODULE_DESCRIPTION("Keembay Display driver"); -- cgit From a9b19b0d707bf93d42e13c22d44eb5132d7cdbab Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Fri, 17 Dec 2021 01:37:27 +0100 Subject: drm/meson: Use drm_module_platform_driver() to register the driver The macro calls to a DRM specific platform driver init handler that checks whether the driver is allowed to be registered or not. Signed-off-by: Javier Martinez Canillas Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20211217003752.3946210-13-javierm@redhat.com --- drivers/gpu/drm/meson/meson_drv.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/meson/meson_drv.c b/drivers/gpu/drm/meson/meson_drv.c index 26aeaf0ab86e..93a7a033a3e8 100644 --- a/drivers/gpu/drm/meson/meson_drv.c +++ b/drivers/gpu/drm/meson/meson_drv.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -542,7 +543,7 @@ static struct platform_driver meson_drm_platform_driver = { }, }; -module_platform_driver(meson_drm_platform_driver); +drm_module_platform_driver(meson_drm_platform_driver); MODULE_AUTHOR("Jasper St. Pierre "); MODULE_AUTHOR("Neil Armstrong "); -- cgit From d405054dc72f57b7e248779eed671a179d821886 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Fri, 17 Dec 2021 01:37:28 +0100 Subject: drm: mxsfb: Use drm_module_platform_driver() to register the driver The macro calls to a DRM specific platform driver init handler that checks whether the driver is allowed to be registered or not. Signed-off-by: Javier Martinez Canillas Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20211217003752.3946210-14-javierm@redhat.com --- drivers/gpu/drm/mxsfb/mxsfb_drv.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/mxsfb/mxsfb_drv.c b/drivers/gpu/drm/mxsfb/mxsfb_drv.c index 375f26d4a417..a7b49ba6b8e2 100644 --- a/drivers/gpu/drm/mxsfb/mxsfb_drv.c +++ b/drivers/gpu/drm/mxsfb/mxsfb_drv.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -419,7 +420,7 @@ static struct platform_driver mxsfb_platform_driver = { }, }; -module_platform_driver(mxsfb_platform_driver); +drm_module_platform_driver(mxsfb_platform_driver); MODULE_AUTHOR("Marek Vasut "); MODULE_DESCRIPTION("Freescale MXS DRM/KMS driver"); -- cgit From 233a32534513567c0daaecb0b92e8e6e18752060 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Fri, 17 Dec 2021 01:37:29 +0100 Subject: drm/shmobile: Use drm_module_platform_driver() to register the driver The macro calls to a DRM specific platform driver init handler that checks whether the driver is allowed to be registered or not. Signed-off-by: Javier Martinez Canillas Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20211217003752.3946210-15-javierm@redhat.com --- drivers/gpu/drm/shmobile/shmob_drm_drv.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/shmobile/shmob_drm_drv.c b/drivers/gpu/drm/shmobile/shmob_drm_drv.c index 80078a9fd7f6..731cbad7520f 100644 --- a/drivers/gpu/drm/shmobile/shmob_drm_drv.c +++ b/drivers/gpu/drm/shmobile/shmob_drm_drv.c @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -295,7 +296,7 @@ static struct platform_driver shmob_drm_platform_driver = { }, }; -module_platform_driver(shmob_drm_platform_driver); +drm_module_platform_driver(shmob_drm_platform_driver); MODULE_AUTHOR("Laurent Pinchart "); MODULE_DESCRIPTION("Renesas SH Mobile DRM Driver"); -- cgit From ba497a551a4907f6a9a0ca70e203f2fbe223f258 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Fri, 17 Dec 2021 01:37:30 +0100 Subject: drm/stm: Use drm_module_platform_driver() to register the driver The macro calls to a DRM specific platform driver init handler that checks whether the driver is allowed to be registered or not. Signed-off-by: Javier Martinez Canillas Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20211217003752.3946210-16-javierm@redhat.com --- drivers/gpu/drm/stm/drv.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c index 9f441aadf2d5..0da7cce2a1a2 100644 --- a/drivers/gpu/drm/stm/drv.c +++ b/drivers/gpu/drm/stm/drv.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -241,7 +242,7 @@ static struct platform_driver stm_drm_platform_driver = { }, }; -module_platform_driver(stm_drm_platform_driver); +drm_module_platform_driver(stm_drm_platform_driver); MODULE_AUTHOR("Philippe Cornu "); MODULE_AUTHOR("Yannick Fertre "); -- cgit From ab41e6aa9128ef5b55e109e73cb3edaa5ca02b0d Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Fri, 17 Dec 2021 01:37:31 +0100 Subject: drm/sun4i: Use drm_module_platform_driver() to register the driver The macro calls to a DRM specific platform driver init handler that checks whether the driver is allowed to be registered or not. Signed-off-by: Javier Martinez Canillas Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20211217003752.3946210-17-javierm@redhat.com --- drivers/gpu/drm/sun4i/sun4i_drv.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c index b630614b3d72..a3fd441dd9ad 100644 --- a/drivers/gpu/drm/sun4i/sun4i_drv.c +++ b/drivers/gpu/drm/sun4i/sun4i_drv.c @@ -19,6 +19,7 @@ #include #include #include +#include #include #include #include @@ -441,7 +442,7 @@ static struct platform_driver sun4i_drv_platform_driver = { .pm = &sun4i_drv_drm_pm_ops, }, }; -module_platform_driver(sun4i_drv_platform_driver); +drm_module_platform_driver(sun4i_drv_platform_driver); MODULE_AUTHOR("Boris Brezillon "); MODULE_AUTHOR("Maxime Ripard "); -- cgit From d9c7853593a44cbe7d1c3a63981ae1b8186293d8 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Fri, 17 Dec 2021 01:37:32 +0100 Subject: drm/tidss: Use drm_module_platform_driver() to register the driver The macro calls to a DRM specific platform driver init handler that checks whether the driver is allowed to be registered or not. Signed-off-by: Javier Martinez Canillas Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20211217003752.3946210-18-javierm@redhat.com --- drivers/gpu/drm/tidss/tidss_drv.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/tidss/tidss_drv.c b/drivers/gpu/drm/tidss/tidss_drv.c index 7c784e90e40e..04cfff89ee51 100644 --- a/drivers/gpu/drm/tidss/tidss_drv.c +++ b/drivers/gpu/drm/tidss/tidss_drv.c @@ -17,6 +17,7 @@ #include #include #include +#include #include #include "tidss_dispc.h" @@ -251,7 +252,7 @@ static struct platform_driver tidss_platform_driver = { }, }; -module_platform_driver(tidss_platform_driver); +drm_module_platform_driver(tidss_platform_driver); MODULE_AUTHOR("Tomi Valkeinen "); MODULE_DESCRIPTION("TI Keystone DSS Driver"); -- cgit From 8acd15a0c8c647ed4cb07c53c3ea4a8768c974ce Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Fri, 17 Dec 2021 01:37:33 +0100 Subject: drm/arc: Use drm_module_platform_driver() to register the driver The macro calls to a DRM specific platform driver init handler that checks whether the driver is allowed to be registered or not. Signed-off-by: Javier Martinez Canillas Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20211217003752.3946210-19-javierm@redhat.com --- drivers/gpu/drm/tiny/arcpgu.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/tiny/arcpgu.c b/drivers/gpu/drm/tiny/arcpgu.c index f8531c50a072..f0fa3b15c341 100644 --- a/drivers/gpu/drm/tiny/arcpgu.c +++ b/drivers/gpu/drm/tiny/arcpgu.c @@ -15,6 +15,7 @@ #include #include #include +#include #include #include #include @@ -427,7 +428,7 @@ static struct platform_driver arcpgu_platform_driver = { }, }; -module_platform_driver(arcpgu_platform_driver); +drm_module_platform_driver(arcpgu_platform_driver); MODULE_AUTHOR("Carlos Palminha "); MODULE_DESCRIPTION("ARC PGU DRM driver"); -- cgit From 8a843011d196dc1da826144266b833a84208c0d3 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Fri, 17 Dec 2021 01:37:34 +0100 Subject: drm/tve200: Use drm_module_platform_driver() to register the driver The macro calls to a DRM specific platform driver init handler that checks whether the driver is allowed to be registered or not. Signed-off-by: Javier Martinez Canillas Reviewed-by: Linus Walleij Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20211217003752.3946210-20-javierm@redhat.com --- drivers/gpu/drm/tve200/tve200_drv.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/tve200/tve200_drv.c b/drivers/gpu/drm/tve200/tve200_drv.c index 7fa71c8bb828..6d9d2921abf4 100644 --- a/drivers/gpu/drm/tve200/tve200_drv.c +++ b/drivers/gpu/drm/tve200/tve200_drv.c @@ -43,6 +43,7 @@ #include #include #include +#include #include #include #include @@ -266,7 +267,7 @@ static struct platform_driver tve200_driver = { .probe = tve200_probe, .remove = tve200_remove, }; -module_platform_driver(tve200_driver); +drm_module_platform_driver(tve200_driver); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_AUTHOR("Linus Walleij "); -- cgit From fad5453444fd95fb63e3cb6033c2489b6d9f4f14 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Fri, 17 Dec 2021 01:37:35 +0100 Subject: drm/xlnx: Use drm_module_platform_driver() to register the driver The macro calls to a DRM specific platform driver init handler that checks whether the driver is allowed to be registered or not. Signed-off-by: Javier Martinez Canillas Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20211217003752.3946210-21-javierm@redhat.com --- drivers/gpu/drm/xlnx/zynqmp_dpsub.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c index ac37053412a1..824b510e337b 100644 --- a/drivers/gpu/drm/xlnx/zynqmp_dpsub.c +++ b/drivers/gpu/drm/xlnx/zynqmp_dpsub.c @@ -25,6 +25,7 @@ #include #include #include +#include #include #include @@ -286,7 +287,7 @@ static struct platform_driver zynqmp_dpsub_driver = { }, }; -module_platform_driver(zynqmp_dpsub_driver); +drm_module_platform_driver(zynqmp_dpsub_driver); MODULE_AUTHOR("Xilinx, Inc."); MODULE_DESCRIPTION("ZynqMP DP Subsystem Driver"); -- cgit From d593767e6b1d50909ce398b9b07f16eec9d84954 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Fri, 17 Dec 2021 01:37:36 +0100 Subject: drm/armada: Add support for the nomodeset kernel parameter According to disable Documentation/admin-guide/kernel-parameters.txt, this parameter can be used to disable kernel modesetting. DRM drivers will not perform display-mode changes or accelerated rendering and only the system framebuffer will be available if it was set-up. But only a few DRM drivers currently check for nomodeset, make this driver to also support the command line parameter. Signed-off-by: Javier Martinez Canillas Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20211217003752.3946210-22-javierm@redhat.com --- drivers/gpu/drm/armada/armada_drv.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c index 8e3e98f13db4..4f9b0a9f13e3 100644 --- a/drivers/gpu/drm/armada/armada_drv.c +++ b/drivers/gpu/drm/armada/armada_drv.c @@ -273,6 +273,9 @@ static int __init armada_drm_init(void) { int ret; + if (drm_firmware_drivers_only()) + return -ENODEV; + ret = platform_driver_register(&armada_lcd_platform_driver); if (ret) return ret; -- cgit From 87a628abd8b038e414dab7de379fc2ebd0971372 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Fri, 17 Dec 2021 01:37:37 +0100 Subject: drm/exynos: Add support for the nomodeset kernel parameter According to disable Documentation/admin-guide/kernel-parameters.txt, this parameter can be used to disable kernel modesetting. DRM drivers will not perform display-mode changes or accelerated rendering and only the system framebuffer will be available if it was set-up. But only a few DRM drivers currently check for nomodeset, make this driver to also support the command line parameter. Signed-off-by: Javier Martinez Canillas Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20211217003752.3946210-23-javierm@redhat.com --- drivers/gpu/drm/exynos/exynos_drm_drv.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c index d8f1cf4d6b69..f9f10413a4f2 100644 --- a/drivers/gpu/drm/exynos/exynos_drm_drv.c +++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c @@ -464,6 +464,9 @@ static int exynos_drm_init(void) { int ret; + if (drm_firmware_drivers_only()) + return -ENODEV; + ret = exynos_drm_register_devices(); if (ret) return ret; -- cgit From ba4a28bb168aefa98ee422be8e04a0c964256f95 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Fri, 17 Dec 2021 01:37:38 +0100 Subject: drm/gma500: Add support for the nomodeset kernel parameter According to disable Documentation/admin-guide/kernel-parameters.txt, this parameter can be used to disable kernel modesetting. DRM drivers will not perform display-mode changes or accelerated rendering and only the system framebuffer will be available if it was set-up. But only a few DRM drivers currently check for nomodeset, make this driver to also support the command line parameter. Signed-off-by: Javier Martinez Canillas Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20211217003752.3946210-24-javierm@redhat.com --- drivers/gpu/drm/gma500/psb_drv.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/gma500/psb_drv.c b/drivers/gpu/drm/gma500/psb_drv.c index 65cf1c79dd7c..eeb681be9c95 100644 --- a/drivers/gpu/drm/gma500/psb_drv.c +++ b/drivers/gpu/drm/gma500/psb_drv.c @@ -538,6 +538,9 @@ static struct pci_driver psb_pci_driver = { static int __init psb_init(void) { + if (drm_firmware_drivers_only()) + return -ENODEV; + return pci_register_driver(&psb_pci_driver); } -- cgit From 5f825973b491a457c7233e808ecf64726abbeb86 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Fri, 17 Dec 2021 01:37:39 +0100 Subject: drm/hyperv: Add support for the nomodeset kernel parameter According to disable Documentation/admin-guide/kernel-parameters.txt, this parameter can be used to disable kernel modesetting. DRM drivers will not perform display-mode changes or accelerated rendering and only the system framebuffer will be available if it was set-up. But only a few DRM drivers currently check for nomodeset, make this driver to also support the command line parameter. Signed-off-by: Javier Martinez Canillas Acked-by: Deepak Rawat Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20211217003752.3946210-25-javierm@redhat.com --- drivers/gpu/drm/hyperv/hyperv_drm_drv.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c index 00e53de4812b..4a8941fa0815 100644 --- a/drivers/gpu/drm/hyperv/hyperv_drm_drv.c +++ b/drivers/gpu/drm/hyperv/hyperv_drm_drv.c @@ -305,6 +305,9 @@ static int __init hyperv_init(void) { int ret; + if (drm_firmware_drivers_only()) + return -ENODEV; + ret = pci_register_driver(&hyperv_pci_driver); if (ret != 0) return ret; -- cgit From 56dcbfd934ad57a46f2039c8c466a3c56293480a Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Fri, 17 Dec 2021 01:37:40 +0100 Subject: drm/imx: Add support for the nomodeset kernel parameter According to disable Documentation/admin-guide/kernel-parameters.txt, this parameter can be used to disable kernel modesetting. DRM drivers will not perform display-mode changes or accelerated rendering and only the system framebuffer will be available if it was set-up. But only a few DRM drivers currently check for nomodeset, make this driver to also support the command line parameter. Signed-off-by: Javier Martinez Canillas Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20211217003752.3946210-26-javierm@redhat.com --- drivers/gpu/drm/imx/imx-drm-core.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c index cb685fe2039b..a57812ec36b1 100644 --- a/drivers/gpu/drm/imx/imx-drm-core.c +++ b/drivers/gpu/drm/imx/imx-drm-core.c @@ -341,6 +341,9 @@ static struct platform_driver * const drivers[] = { static int __init imx_drm_init(void) { + if (drm_firmware_drivers_only()) + return -ENODEV; + return platform_register_drivers(drivers, ARRAY_SIZE(drivers)); } module_init(imx_drm_init); -- cgit From fcf5cc92b015684de7c3f45106013b35f54f2fa1 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Fri, 17 Dec 2021 01:37:41 +0100 Subject: drm/ingenic: Add support for the nomodeset kernel parameter According to disable Documentation/admin-guide/kernel-parameters.txt, this parameter can be used to disable kernel modesetting. DRM drivers will not perform display-mode changes or accelerated rendering and only the system framebuffer will be available if it was set-up. But only a few DRM drivers currently check for nomodeset, make this driver to also support the command line parameter. Signed-off-by: Javier Martinez Canillas Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20211217003752.3946210-27-javierm@redhat.com --- drivers/gpu/drm/ingenic/ingenic-drm-drv.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c index 542c4af70661..7f10d6eed549 100644 --- a/drivers/gpu/drm/ingenic/ingenic-drm-drv.c +++ b/drivers/gpu/drm/ingenic/ingenic-drm-drv.c @@ -1543,6 +1543,9 @@ static int ingenic_drm_init(void) { int err; + if (drm_firmware_drivers_only()) + return -ENODEV; + if (IS_ENABLED(CONFIG_DRM_INGENIC_IPU)) { err = platform_driver_register(ingenic_ipu_driver_ptr); if (err) -- cgit From ab120b9264f9519d8aa255534cc4c0d11ac959a7 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Fri, 17 Dec 2021 01:37:42 +0100 Subject: drm/mcde: Add support for the nomodeset kernel parameter According to disable Documentation/admin-guide/kernel-parameters.txt, this parameter can be used to disable kernel modesetting. DRM drivers will not perform display-mode changes or accelerated rendering and only the system framebuffer will be available if it was set-up. But only a few DRM drivers currently check for nomodeset, make this driver to also support the command line parameter. Signed-off-by: Javier Martinez Canillas Reviewed-by: Linus Walleij Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20211217003752.3946210-28-javierm@redhat.com --- drivers/gpu/drm/mcde/mcde_drv.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/mcde/mcde_drv.c b/drivers/gpu/drm/mcde/mcde_drv.c index 5b5afc6aaf8e..0b2910e69b42 100644 --- a/drivers/gpu/drm/mcde/mcde_drv.c +++ b/drivers/gpu/drm/mcde/mcde_drv.c @@ -491,6 +491,9 @@ static int __init mcde_drm_register(void) { int ret; + if (drm_firmware_drivers_only()) + return -ENODEV; + ret = platform_register_drivers(component_drivers, ARRAY_SIZE(component_drivers)); if (ret) -- cgit From c0a23916f4ae880d0265e19255997825b374baf0 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Fri, 17 Dec 2021 01:37:43 +0100 Subject: drm/mediatek: Add support for the nomodeset kernel parameter According to disable Documentation/admin-guide/kernel-parameters.txt, this parameter can be used to disable kernel modesetting. DRM drivers will not perform display-mode changes or accelerated rendering and only the system framebuffer will be available if it was set-up. But only a few DRM drivers currently check for nomodeset, make this driver to also support the command line parameter. Signed-off-by: Javier Martinez Canillas Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20211217003752.3946210-29-javierm@redhat.com --- drivers/gpu/drm/mediatek/mtk_drm_drv.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c index aec39724ebeb..e336358fee20 100644 --- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c +++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c @@ -210,6 +210,9 @@ static int mtk_drm_kms_init(struct drm_device *drm) struct device *dma_dev; int ret; + if (drm_firmware_drivers_only()) + return -ENODEV; + if (!iommu_present(&platform_bus_type)) return -EPROBE_DEFER; -- cgit From 5d40a4b8d824e346bb5db96d651f2f5c57953d63 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Fri, 17 Dec 2021 01:37:44 +0100 Subject: drm/msm: Add support for the nomodeset kernel parameter According to disable Documentation/admin-guide/kernel-parameters.txt, this parameter can be used to disable kernel modesetting. DRM drivers will not perform display-mode changes or accelerated rendering and only the system framebuffer will be available if it was set-up. But only a few DRM drivers currently check for nomodeset, make this driver to also support the command line parameter. Signed-off-by: Javier Martinez Canillas Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20211217003752.3946210-30-javierm@redhat.com --- drivers/gpu/drm/msm/msm_drv.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 892c04365239..8f30e68ae3b5 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -518,6 +518,9 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv) struct msm_mdss *mdss; int ret, i; + if (drm_firmware_drivers_only()) + return -ENODEV; + ddev = drm_dev_alloc(drv, dev); if (IS_ERR(ddev)) { DRM_DEV_ERROR(dev, "failed to allocate drm_device\n"); -- cgit From 7fd7d70181b3796e456bef38397caf947b7de061 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Fri, 17 Dec 2021 01:37:45 +0100 Subject: drm/omap: Add support for the nomodeset kernel parameter According to disable Documentation/admin-guide/kernel-parameters.txt, this parameter can be used to disable kernel modesetting. DRM drivers will not perform display-mode changes or accelerated rendering and only the system framebuffer will be available if it was set-up. But only a few DRM drivers currently check for nomodeset, make this driver to also support the command line parameter. Signed-off-by: Javier Martinez Canillas Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20211217003752.3946210-31-javierm@redhat.com --- drivers/gpu/drm/omapdrm/omap_drv.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index 2720a58ccd90..eaf67b9e5f12 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -727,6 +727,9 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev) DBG("%s", dev_name(dev)); + if (drm_firmware_drivers_only()) + return -ENODEV; + /* Allocate and initialize the DRM device. */ ddev = drm_dev_alloc(&omap_drm_driver, dev); if (IS_ERR(ddev)) -- cgit From fce8bfffa23943dfcc2339a748db4f296dc7bea7 Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Fri, 17 Dec 2021 01:37:46 +0100 Subject: drm: rcar-du: Add support for the nomodeset kernel parameter According to disable Documentation/admin-guide/kernel-parameters.txt, this parameter can be used to disable kernel modesetting. DRM drivers will not perform display-mode changes or accelerated rendering and only the system framebuffer will be available if it was set-up. But only a few DRM drivers currently check for nomodeset, make this driver to also support the command line parameter. Signed-off-by: Javier Martinez Canillas Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20211217003752.3946210-32-javierm@redhat.com --- drivers/gpu/drm/rcar-du/rcar_du_drv.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/rcar-du/rcar_du_drv.c b/drivers/gpu/drm/rcar-du/rcar_du_drv.c index 5a8131ef81d5..982e450233ed 100644 --- a/drivers/gpu/drm/rcar-du/rcar_du_drv.c +++ b/drivers/gpu/drm/rcar-du/rcar_du_drv.c @@ -701,6 +701,9 @@ static struct platform_driver rcar_du_platform_driver = { static int __init rcar_du_init(void) { + if (drm_firmware_drivers_only()) + return -ENODEV; + rcar_du_of_init(rcar_du_of_table); return platform_driver_register(&rcar_du_platform_driver); -- cgit From 09037781c1094cc95a48da220abb23ee9ae92ede Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Fri, 17 Dec 2021 01:37:47 +0100 Subject: drm/rockchip: Add support for the nomodeset kernel parameter According to disable Documentation/admin-guide/kernel-parameters.txt, this parameter can be used to disable kernel modesetting. DRM drivers will not perform display-mode changes or accelerated rendering and only the system framebuffer will be available if it was set-up. But only a few DRM drivers currently check for nomodeset, make this driver to also support the command line parameter. Signed-off-by: Javier Martinez Canillas Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20211217003752.3946210-33-javierm@redhat.com --- drivers/gpu/drm/rockchip/rockchip_drm_drv.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index bec207de4544..ac190e2b1f7a 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -457,6 +457,9 @@ static int __init rockchip_drm_init(void) { int ret; + if (drm_firmware_drivers_only()) + return -ENODEV; + num_rockchip_sub_drivers = 0; ADD_ROCKCHIP_SUB_DRIVER(vop_platform_driver, CONFIG_DRM_ROCKCHIP); ADD_ROCKCHIP_SUB_DRIVER(rockchip_lvds_driver, -- cgit From 5e66e818e0358fe42704404580b70e1ffc7afb6a Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Fri, 17 Dec 2021 01:37:48 +0100 Subject: drm/sprd: Add support for the nomodeset kernel parameter According to disable Documentation/admin-guide/kernel-parameters.txt, this parameter can be used to disable kernel modesetting. DRM drivers will not perform display-mode changes or accelerated rendering and only the system framebuffer will be available if it was set-up. But only a few DRM drivers currently check for nomodeset, make this driver to also support the command line parameter. Signed-off-by: Javier Martinez Canillas Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20211217003752.3946210-34-javierm@redhat.com --- drivers/gpu/drm/sprd/sprd_drm.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/sprd/sprd_drm.c b/drivers/gpu/drm/sprd/sprd_drm.c index a077e2d4d721..dd7e3de780f3 100644 --- a/drivers/gpu/drm/sprd/sprd_drm.c +++ b/drivers/gpu/drm/sprd/sprd_drm.c @@ -186,6 +186,9 @@ static struct platform_driver *sprd_drm_drivers[] = { static int __init sprd_drm_init(void) { + if (drm_firmware_drivers_only()) + return -ENODEV; + return platform_register_drivers(sprd_drm_drivers, ARRAY_SIZE(sprd_drm_drivers)); } -- cgit From 89ec0023200e20ab4f601aaae1e457a0026209bc Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Fri, 17 Dec 2021 01:37:49 +0100 Subject: drm/sti: Add support for the nomodeset kernel parameter According to disable Documentation/admin-guide/kernel-parameters.txt, this parameter can be used to disable kernel modesetting. DRM drivers will not perform display-mode changes or accelerated rendering and only the system framebuffer will be available if it was set-up. But only a few DRM drivers currently check for nomodeset, make this driver to also support the command line parameter. Signed-off-by: Javier Martinez Canillas Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20211217003752.3946210-35-javierm@redhat.com --- drivers/gpu/drm/sti/sti_drv.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/sti/sti_drv.c b/drivers/gpu/drm/sti/sti_drv.c index c7efb43b83ee..860b2230aa08 100644 --- a/drivers/gpu/drm/sti/sti_drv.c +++ b/drivers/gpu/drm/sti/sti_drv.c @@ -287,6 +287,9 @@ static struct platform_driver * const drivers[] = { static int sti_drm_init(void) { + if (drm_firmware_drivers_only()) + return -ENODEV; + return platform_register_drivers(drivers, ARRAY_SIZE(drivers)); } module_init(sti_drm_init); -- cgit From 93804f5d2dd2f9c1b7f45fe630a2f5302ae0dcfb Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Fri, 17 Dec 2021 01:37:50 +0100 Subject: drm/tegra: Add support for the nomodeset kernel parameter According to disable Documentation/admin-guide/kernel-parameters.txt, this parameter can be used to disable kernel modesetting. DRM drivers will not perform display-mode changes or accelerated rendering and only the system framebuffer will be available if it was set-up. But only a few DRM drivers currently check for nomodeset, make this driver to also support the command line parameter. Signed-off-by: Javier Martinez Canillas Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20211217003752.3946210-36-javierm@redhat.com --- drivers/gpu/drm/tegra/drm.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c index 8d37d6b00562..48e35d686473 100644 --- a/drivers/gpu/drm/tegra/drm.c +++ b/drivers/gpu/drm/tegra/drm.c @@ -1382,6 +1382,9 @@ static int __init host1x_drm_init(void) { int err; + if (drm_firmware_drivers_only()) + return -ENODEV; + err = host1x_driver_register(&host1x_drm_driver); if (err < 0) return err; -- cgit From 9b71ce89b55565ecce1b189e216048cd1348e36a Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Fri, 17 Dec 2021 01:37:51 +0100 Subject: drm/tilcdc: Add support for the nomodeset kernel parameter According to disable Documentation/admin-guide/kernel-parameters.txt, this parameter can be used to disable kernel modesetting. DRM drivers will not perform display-mode changes or accelerated rendering and only the system framebuffer will be available if it was set-up. But only a few DRM drivers currently check for nomodeset, make this driver to also support the command line parameter. Signed-off-by: Javier Martinez Canillas Acked-by: Jyri Sarha Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20211217003752.3946210-37-javierm@redhat.com --- drivers/gpu/drm/tilcdc/tilcdc_drv.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/tilcdc/tilcdc_drv.c b/drivers/gpu/drm/tilcdc/tilcdc_drv.c index cc567c87057d..eee3c447fbac 100644 --- a/drivers/gpu/drm/tilcdc/tilcdc_drv.c +++ b/drivers/gpu/drm/tilcdc/tilcdc_drv.c @@ -604,6 +604,9 @@ static struct platform_driver tilcdc_platform_driver = { static int __init tilcdc_drm_init(void) { + if (drm_firmware_drivers_only()) + return -ENODEV; + DBG("init"); tilcdc_panel_init(); return platform_driver_register(&tilcdc_platform_driver); -- cgit From 9d6bf794084d9258dadf2754d911fcfeb13ea8fc Mon Sep 17 00:00:00 2001 From: Javier Martinez Canillas Date: Fri, 17 Dec 2021 01:37:52 +0100 Subject: drm/xen: Add support for the nomodeset kernel parameter According to disable Documentation/admin-guide/kernel-parameters.txt, this parameter can be used to disable kernel modesetting. DRM drivers will not perform display-mode changes or accelerated rendering and only the system framebuffer will be available if it was set-up. But only a few DRM drivers currently check for nomodeset, make this driver to also support the command line parameter. Signed-off-by: Javier Martinez Canillas Reviewed-by: Oleksandr Andrushchenko Acked-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20211217003752.3946210-38-javierm@redhat.com --- drivers/gpu/drm/xen/xen_drm_front.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/xen/xen_drm_front.c b/drivers/gpu/drm/xen/xen_drm_front.c index e63088c2121d..0d8e6bd1ccbf 100644 --- a/drivers/gpu/drm/xen/xen_drm_front.c +++ b/drivers/gpu/drm/xen/xen_drm_front.c @@ -495,6 +495,9 @@ static int xen_drm_drv_init(struct xen_drm_front_info *front_info) struct drm_device *drm_dev; int ret; + if (drm_firmware_drivers_only()) + return -ENODEV; + DRM_INFO("Creating %s\n", xen_drm_driver.desc); drm_info = kzalloc(sizeof(*drm_info), GFP_KERNEL); -- cgit From 039cacd2393971fe11f855118eea6c83c8f506fa Mon Sep 17 00:00:00 2001 From: Victor Zhao Date: Mon, 24 Jan 2022 12:13:58 +0800 Subject: drm/amdgpu: add determine passthrough under arm64 add determine for passthrough mode under arm64 by reading CurrentEL register v2: squash in warning fix (Alex) Signed-off-by: Victor Zhao Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index c5edd84c1c12..645093610aa0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -294,8 +294,10 @@ struct amdgpu_video_codec_info; static inline bool is_virtual_machine(void) { -#ifdef CONFIG_X86 +#if defined(CONFIG_X86) return boot_cpu_has(X86_FEATURE_HYPERVISOR); +#elif defined(CONFIG_ARM64) + return !is_kernel_in_hyp_mode(); #else return false; #endif -- cgit From 1790b649b01ba7e44af7c08ffcfc0eaa9d4f5ac6 Mon Sep 17 00:00:00 2001 From: Eric Huang Date: Fri, 21 Jan 2022 10:39:54 -0500 Subject: drm/amdkfd: enable heavy-weight TLB flush on Vega20 It is to meet the requirement for memory allocation optimization on MI50. Signed-off-by: Eric Huang Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 337953af7c2f..26a593b3d7dc 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -1419,7 +1419,8 @@ err_unlock: static bool kfd_flush_tlb_after_unmap(struct kfd_dev *dev) { return KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2) || (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 1) && - dev->adev->sdma.instance[0].fw_version >= 18); + dev->adev->sdma.instance[0].fw_version >= 18) || + KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 0); } static int kfd_ioctl_map_memory_to_gpu(struct file *filep, -- cgit From b6dca949b88ee69561fc4ed853ab7a2ae13d842f Mon Sep 17 00:00:00 2001 From: Bas Nieuwenhuizen Date: Sun, 23 Jan 2022 03:38:28 +0100 Subject: drm/amdgpu/display: Remove t_srx_delay_us. Unused. Convert the divisions into asserts on the divisor, to debug why it is zero. The divide by zero is suspected of causing kernel panics. While I have no idea where the zero is coming from I think this patch is a positive either way. Reviewed-by: Harry Wentland Signed-off-by: Bas Nieuwenhuizen Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c | 1 - drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c | 2 -- drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c | 2 -- drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c | 2 -- drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c | 2 -- drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h | 1 - drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c | 3 --- drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c | 4 ---- 8 files changed, 17 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c index ec19678a0702..e447c74be713 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c +++ b/drivers/gpu/drm/amd/display/dc/dml/calcs/dcn_calcs.c @@ -503,7 +503,6 @@ static void dcn_bw_calc_rq_dlg_ttu( //input[in_idx].dout.output_standard; /*todo: soc->sr_enter_plus_exit_time??*/ - dlg_sys_param->t_srx_delay_us = dc->dcn_ip->dcfclk_cstate_latency / v->dcf_clk_deep_sleep; dml1_rq_dlg_get_rq_params(dml, rq_param, &input->pipe.src); dml1_extract_rq_regs(dml, rq_regs, rq_param); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c index 246071c72f6b..548cdef8a8ad 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20.c @@ -1576,8 +1576,6 @@ void dml20_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib, dlg_sys_param.total_flip_bytes = get_total_immediate_flip_bytes(mode_lib, e2e_pipe_param, num_pipes); - dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency - / dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated print__dlg_sys_params_st(mode_lib, &dlg_sys_param); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c index 015e7f2c0b16..0fc9f3e3ffae 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn20/display_rq_dlg_calc_20v2.c @@ -1577,8 +1577,6 @@ void dml20v2_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib, dlg_sys_param.total_flip_bytes = get_total_immediate_flip_bytes(mode_lib, e2e_pipe_param, num_pipes); - dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency - / dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated print__dlg_sys_params_st(mode_lib, &dlg_sys_param); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c index 8bc27de4c104..618f4b682ab1 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn21/display_rq_dlg_calc_21.c @@ -1688,8 +1688,6 @@ void dml21_rq_dlg_get_dlg_reg( mode_lib, e2e_pipe_param, num_pipes); - dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency - / dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated print__dlg_sys_params_st(mode_lib, &dlg_sys_param); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c index aef854270054..747167083dea 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn30/display_rq_dlg_calc_30.c @@ -1858,8 +1858,6 @@ void dml30_rq_dlg_get_dlg_reg(struct display_mode_lib *mode_lib, dlg_sys_param.total_flip_bytes = get_total_immediate_flip_bytes(mode_lib, e2e_pipe_param, num_pipes); - dlg_sys_param.t_srx_delay_us = mode_lib->ip.dcfclk_cstate_latency - / dlg_sys_param.deepsleep_dcfclk_mhz; // TODO: Deprecated print__dlg_sys_params_st(mode_lib, &dlg_sys_param); diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h index d46a2733024c..8f9f1d607f7c 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h +++ b/drivers/gpu/drm/amd/display/dc/dml/display_mode_structs.h @@ -546,7 +546,6 @@ struct _vcs_dpi_display_dlg_sys_params_st { double t_sr_wm_us; double t_extra_us; double mem_trip_us; - double t_srx_delay_us; double deepsleep_dcfclk_mhz; double total_flip_bw; unsigned int total_flip_bytes; diff --git a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c index 71ea503cb32f..412e75eb4704 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c +++ b/drivers/gpu/drm/amd/display/dc/dml/display_rq_dlg_helpers.c @@ -141,9 +141,6 @@ void print__dlg_sys_params_st(struct display_mode_lib *mode_lib, const struct _v dml_print("DML_RQ_DLG_CALC: t_urg_wm_us = %3.2f\n", dlg_sys_param->t_urg_wm_us); dml_print("DML_RQ_DLG_CALC: t_sr_wm_us = %3.2f\n", dlg_sys_param->t_sr_wm_us); dml_print("DML_RQ_DLG_CALC: t_extra_us = %3.2f\n", dlg_sys_param->t_extra_us); - dml_print( - "DML_RQ_DLG_CALC: t_srx_delay_us = %3.2f\n", - dlg_sys_param->t_srx_delay_us); dml_print( "DML_RQ_DLG_CALC: deepsleep_dcfclk_mhz = %3.2f\n", dlg_sys_param->deepsleep_dcfclk_mhz); diff --git a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c index 59dc2c5b58dd..3df559c591f8 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dml1_display_rq_dlg_calc.c @@ -1331,10 +1331,6 @@ void dml1_rq_dlg_get_dlg_params( if (dual_plane) DTRACE("DLG: %s: swath_height_c = %d", __func__, swath_height_c); - DTRACE( - "DLG: %s: t_srx_delay_us = %3.2f", - __func__, - (double) dlg_sys_param->t_srx_delay_us); DTRACE("DLG: %s: line_time_in_us = %3.2f", __func__, (double) line_time_in_us); DTRACE("DLG: %s: vupdate_offset = %d", __func__, vupdate_offset); DTRACE("DLG: %s: vupdate_width = %d", __func__, vupdate_width); -- cgit From ac7c48c0cce00d03b3c95fddcccb0a45257e33e3 Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Mon, 24 Jan 2022 16:40:44 -0500 Subject: drm/amdkfd: Don't take process mutex for svm ioctls SVM ioctls take proper svms->lock to handle race conditions, don't need take process mutex to serialize ioctls. This also fixes circular locking warning: WARNING: possible circular locking dependency detected Possible unsafe locking scenario: CPU0 CPU1 ---- ---- lock((work_completion)(&svms->deferred_list_work)); lock(&process->mutex); lock((work_completion)(&svms->deferred_list_work)); lock(&process->mutex); *** DEADLOCK *** Signed-off-by: Philip Yang Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 4 ---- 1 file changed, 4 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 26a593b3d7dc..214a2c67fba4 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -1847,13 +1847,9 @@ static int kfd_ioctl_svm(struct file *filep, struct kfd_process *p, void *data) if (!args->start_addr || !args->size) return -EINVAL; - mutex_lock(&p->mutex); - r = svm_ioctl(p, args->op, args->start_addr, args->size, args->nattr, args->attrs); - mutex_unlock(&p->mutex); - return r; } #else -- cgit From 367c9b0f1b8750a704070e7ae85234d591290434 Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Tue, 18 Jan 2022 12:15:24 -0500 Subject: drm/amdkfd: Ensure mm remain valid in svm deferred_list work svm_deferred_list work should continue to handle deferred_range_list which maybe split to child range to avoid child range leak, and remove ranges mmu interval notifier to avoid mm mm_count leak. So taking mm reference when adding range to deferred list, to ensure mm is valid in the scheduled deferred_list_work, and drop the mm referrence after range is handled. Signed-off-by: Philip Yang Reported-by: Ruili Ji Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 62 +++++++++++++++++++++--------------- 1 file changed, 36 insertions(+), 26 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index f2805ba74c80..225affcddbc1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1985,10 +1985,9 @@ svm_range_update_notifier_and_interval_tree(struct mm_struct *mm, } static void -svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange) +svm_range_handle_list_op(struct svm_range_list *svms, struct svm_range *prange, + struct mm_struct *mm) { - struct mm_struct *mm = prange->work_item.mm; - switch (prange->work_item.op) { case SVM_OP_NULL: pr_debug("NULL OP 0x%p prange 0x%p [0x%lx 0x%lx]\n", @@ -2071,34 +2070,41 @@ static void svm_range_deferred_list_work(struct work_struct *work) pr_debug("enter svms 0x%p\n", svms); p = container_of(svms, struct kfd_process, svms); - /* Avoid mm is gone when inserting mmu notifier */ - mm = get_task_mm(p->lead_thread); - if (!mm) { - pr_debug("svms 0x%p process mm gone\n", svms); - return; - } -retry: - mmap_write_lock(mm); - - /* Checking for the need to drain retry faults must be inside - * mmap write lock to serialize with munmap notifiers. - */ - if (unlikely(atomic_read(&svms->drain_pagefaults))) { - mmap_write_unlock(mm); - svm_range_drain_retry_fault(svms); - goto retry; - } spin_lock(&svms->deferred_list_lock); while (!list_empty(&svms->deferred_range_list)) { prange = list_first_entry(&svms->deferred_range_list, struct svm_range, deferred_list); - list_del_init(&prange->deferred_list); spin_unlock(&svms->deferred_list_lock); pr_debug("prange 0x%p [0x%lx 0x%lx] op %d\n", prange, prange->start, prange->last, prange->work_item.op); + mm = prange->work_item.mm; +retry: + mmap_write_lock(mm); + + /* Checking for the need to drain retry faults must be inside + * mmap write lock to serialize with munmap notifiers. + */ + if (unlikely(atomic_read(&svms->drain_pagefaults))) { + mmap_write_unlock(mm); + svm_range_drain_retry_fault(svms); + goto retry; + } + + /* Remove from deferred_list must be inside mmap write lock, for + * two race cases: + * 1. unmap_from_cpu may change work_item.op and add the range + * to deferred_list again, cause use after free bug. + * 2. svm_range_list_lock_and_flush_work may hold mmap write + * lock and continue because deferred_list is empty, but + * deferred_list work is actually waiting for mmap lock. + */ + spin_lock(&svms->deferred_list_lock); + list_del_init(&prange->deferred_list); + spin_unlock(&svms->deferred_list_lock); + mutex_lock(&svms->lock); mutex_lock(&prange->migrate_mutex); while (!list_empty(&prange->child_list)) { @@ -2109,19 +2115,20 @@ retry: pr_debug("child prange 0x%p op %d\n", pchild, pchild->work_item.op); list_del_init(&pchild->child_list); - svm_range_handle_list_op(svms, pchild); + svm_range_handle_list_op(svms, pchild, mm); } mutex_unlock(&prange->migrate_mutex); - svm_range_handle_list_op(svms, prange); + svm_range_handle_list_op(svms, prange, mm); mutex_unlock(&svms->lock); + mmap_write_unlock(mm); + + /* Pairs with mmget in svm_range_add_list_work */ + mmput(mm); spin_lock(&svms->deferred_list_lock); } spin_unlock(&svms->deferred_list_lock); - - mmap_write_unlock(mm); - mmput(mm); pr_debug("exit svms 0x%p\n", svms); } @@ -2139,6 +2146,9 @@ svm_range_add_list_work(struct svm_range_list *svms, struct svm_range *prange, prange->work_item.op = op; } else { prange->work_item.op = op; + + /* Pairs with mmput in deferred_list_work */ + mmget(mm); prange->work_item.mm = mm; list_add_tail(&prange->deferred_list, &prange->svms->deferred_range_list); -- cgit From 6225bb3a88d22594aacea2485dc28ca12d596721 Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Wed, 12 Jan 2022 11:50:50 -0500 Subject: drm/amdkfd: svm range restore work deadlock when process exit kfd_process_notifier_release flush svm_range_restore_work which calls svm_range_list_lock_and_flush_work to flush deferred_list work, but if deferred_list work mmput release the last user, it will call exit_mmap -> notifier_release, it is deadlock with below backtrace. Move flush svm_range_restore_work to kfd_process_wq_release to avoid deadlock. Then svm_range_restore_work take task->mm ref to avoid mm is gone while validating and mapping ranges to GPU. Workqueue: events svm_range_deferred_list_work [amdgpu] Call Trace: wait_for_completion+0x94/0x100 __flush_work+0x12a/0x1e0 __cancel_work_timer+0x10e/0x190 cancel_delayed_work_sync+0x13/0x20 kfd_process_notifier_release+0x98/0x2a0 [amdgpu] __mmu_notifier_release+0x74/0x1f0 exit_mmap+0x170/0x200 mmput+0x5d/0x130 svm_range_deferred_list_work+0x104/0x230 [amdgpu] process_one_work+0x220/0x3c0 Signed-off-by: Philip Yang Reported-by: Ruili Ji Tested-by: Ruili Ji Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_process.c | 1 - drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 15 +++++++++------ 2 files changed, 9 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index d1145da5348f..74f162887d3b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -1150,7 +1150,6 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn, cancel_delayed_work_sync(&p->eviction_work); cancel_delayed_work_sync(&p->restore_work); - cancel_delayed_work_sync(&p->svms.restore_work); mutex_lock(&p->mutex); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 225affcddbc1..1cf9041c9727 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1643,13 +1643,14 @@ static void svm_range_restore_work(struct work_struct *work) pr_debug("restore svm ranges\n"); - /* kfd_process_notifier_release destroys this worker thread. So during - * the lifetime of this thread, kfd_process and mm will be valid. - */ p = container_of(svms, struct kfd_process, svms); - mm = p->mm; - if (!mm) + + /* Keep mm reference when svm_range_validate_and_map ranges */ + mm = get_task_mm(p->lead_thread); + if (!mm) { + pr_debug("svms 0x%p process mm gone\n", svms); return; + } svm_range_list_lock_and_flush_work(svms, mm); mutex_lock(&svms->lock); @@ -1703,6 +1704,7 @@ static void svm_range_restore_work(struct work_struct *work) out_reschedule: mutex_unlock(&svms->lock); mmap_write_unlock(mm); + mmput(mm); /* If validation failed, reschedule another attempt */ if (evicted_ranges) { @@ -2840,6 +2842,8 @@ void svm_range_list_fini(struct kfd_process *p) pr_debug("pasid 0x%x svms 0x%p\n", p->pasid, &p->svms); + cancel_delayed_work_sync(&p->svms.restore_work); + /* Ensure list work is finished before process is destroyed */ flush_work(&p->svms.deferred_list_work); @@ -2850,7 +2854,6 @@ void svm_range_list_fini(struct kfd_process *p) atomic_inc(&p->svms.drain_pagefaults); svm_range_drain_retry_fault(&p->svms); - list_for_each_entry_safe(prange, next, &p->svms.list, list) { svm_range_unlink(prange); svm_range_remove_notifier(prange); -- cgit From a154bf6eda9881aa3dd74cea83dcff6de3ce9680 Mon Sep 17 00:00:00 2001 From: Fangzhi Zuo Date: Tue, 25 Jan 2022 00:57:36 -0500 Subject: drm/amd/display: Add Missing HPO Stream Encoder Function Hook [Why] configure_dp_hpo_throttled_vcp_size() was missing promotion before, but it was covered by not calling the missing function hook in the old interface hpo_dp_link_encoder->funcs. Recent refactor replaces with new caller link_hwss->set_throttled_vcp_size which needs that hook, and that causes null ptr hang. Signed-off-by: Fangzhi Zuo Reviewed-by: Harry Wentland Signed-off-by: Alex Deucher --- .../drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c | 11 +++++++++++ .../drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h | 9 ++++++--- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c index 5065904c7833..23621ff08c90 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.c @@ -710,6 +710,16 @@ static void dcn31_hpo_dp_stream_enc_read_state( } } +static void dcn31_set_hblank_min_symbol_width( + struct hpo_dp_stream_encoder *enc, + uint16_t width) +{ + struct dcn31_hpo_dp_stream_encoder *enc3 = DCN3_1_HPO_DP_STREAM_ENC_FROM_HPO_STREAM_ENC(enc); + + REG_SET(DP_SYM32_ENC_HBLANK_CONTROL, 0, + HBLANK_MINIMUM_SYMBOL_WIDTH, width); +} + static const struct hpo_dp_stream_encoder_funcs dcn30_str_enc_funcs = { .enable_stream = dcn31_hpo_dp_stream_enc_enable_stream, .dp_unblank = dcn31_hpo_dp_stream_enc_dp_unblank, @@ -725,6 +735,7 @@ static const struct hpo_dp_stream_encoder_funcs dcn30_str_enc_funcs = { .dp_audio_enable = dcn31_hpo_dp_stream_enc_audio_enable, .dp_audio_disable = dcn31_hpo_dp_stream_enc_audio_disable, .read_state = dcn31_hpo_dp_stream_enc_read_state, + .set_hblank_min_symbol_width = dcn31_set_hblank_min_symbol_width, }; void dcn31_hpo_dp_stream_encoder_construct( diff --git a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h index 70b94fc25304..7c77c71591a0 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h +++ b/drivers/gpu/drm/amd/display/dc/dcn31/dcn31_hpo_dp_stream_encoder.h @@ -80,7 +80,8 @@ SRI(DP_SYM32_ENC_SDP_GSP_CONTROL11, DP_SYM32_ENC, id),\ SRI(DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL, DP_SYM32_ENC, id),\ SRI(DP_SYM32_ENC_SDP_AUDIO_CONTROL0, DP_SYM32_ENC, id),\ - SRI(DP_SYM32_ENC_VID_CRC_CONTROL, DP_SYM32_ENC, id) + SRI(DP_SYM32_ENC_VID_CRC_CONTROL, DP_SYM32_ENC, id), \ + SRI(DP_SYM32_ENC_HBLANK_CONTROL, DP_SYM32_ENC, id) #define DCN3_1_HPO_DP_STREAM_ENC_REGS \ uint32_t DP_STREAM_MAPPER_CONTROL0;\ @@ -116,7 +117,8 @@ uint32_t DP_SYM32_ENC_SDP_GSP_CONTROL11;\ uint32_t DP_SYM32_ENC_SDP_METADATA_PACKET_CONTROL;\ uint32_t DP_SYM32_ENC_SDP_AUDIO_CONTROL0;\ - uint32_t DP_SYM32_ENC_VID_CRC_CONTROL + uint32_t DP_SYM32_ENC_VID_CRC_CONTROL;\ + uint32_t DP_SYM32_ENC_HBLANK_CONTROL #define DCN3_1_HPO_DP_STREAM_ENC_MASK_SH_LIST(mask_sh)\ @@ -202,7 +204,8 @@ type GSP_SOF_REFERENCE;\ type METADATA_PACKET_ENABLE;\ type CRC_ENABLE;\ - type CRC_CONT_MODE_ENABLE + type CRC_CONT_MODE_ENABLE;\ + type HBLANK_MINIMUM_SYMBOL_WIDTH struct dcn31_hpo_dp_stream_encoder_registers { -- cgit From f4e2a66dae996b4fa2cc21b1904798ad1dc83049 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 24 Jan 2022 13:40:35 +0800 Subject: drm/amd/pm: correct the MGpuFanBoost support for Beige Goby The existing way cannot handle Beige Goby well as a different PPTable data structure(PPTable_beige_goby_t instead of PPTable_t) is used there. Signed-off-by: Evan Quan Acked-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 651fe748e423..dcd35c68e59b 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -3729,14 +3729,14 @@ static ssize_t sienna_cichlid_get_ecc_info(struct smu_context *smu, } static int sienna_cichlid_enable_mgpu_fan_boost(struct smu_context *smu) { - struct smu_table_context *table_context = &smu->smu_table; - PPTable_t *smc_pptable = table_context->driver_pptable; + uint16_t *mgpu_fan_boost_limit_rpm; + GET_PPTABLE_MEMBER(MGpuFanBoostLimitRpm, &mgpu_fan_boost_limit_rpm); /* * Skip the MGpuFanBoost setting for those ASICs * which do not support it */ - if (!smc_pptable->MGpuFanBoostLimitRpm) + if (*mgpu_fan_boost_limit_rpm == 0) return 0; return smu_cmn_send_smc_msg_with_param(smu, -- cgit From 75513bf5d72cd1a81401866642f4a8052b2d4420 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Tue, 25 Jan 2022 16:09:00 +0800 Subject: drm/amd/pm: fix the deadlock observed on performance_level setting The sub-routine(amdgpu_gfx_off_ctrl) tried to obtain the lock adev->pm.mutex which was actually hold by amdgpu_dpm_force_performance_level. A deadlock happened then. Signed-off-by: Evan Quan Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/amdgpu_dpm.c | 50 +++++++++++++------------------------ 1 file changed, 17 insertions(+), 33 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c index 5fc33893a68c..ef574c96b41c 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c @@ -692,25 +692,16 @@ void amdgpu_dpm_set_power_state(struct amdgpu_device *adev, amdgpu_dpm_compute_clocks(adev); } -static enum amd_dpm_forced_level amdgpu_dpm_get_performance_level_locked(struct amdgpu_device *adev) +enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev) { const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs; enum amd_dpm_forced_level level; + mutex_lock(&adev->pm.mutex); if (pp_funcs->get_performance_level) level = pp_funcs->get_performance_level(adev->powerplay.pp_handle); else level = adev->pm.dpm.forced_level; - - return level; -} - -enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev) -{ - enum amd_dpm_forced_level level; - - mutex_lock(&adev->pm.mutex); - level = amdgpu_dpm_get_performance_level_locked(adev); mutex_unlock(&adev->pm.mutex); return level; @@ -725,23 +716,16 @@ int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev, AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; - int ret = 0; if (!pp_funcs->force_performance_level) return 0; - mutex_lock(&adev->pm.mutex); - - if (adev->pm.dpm.thermal_active) { - ret = -EINVAL; - goto out; - } + if (adev->pm.dpm.thermal_active) + return -EINVAL; - current_level = amdgpu_dpm_get_performance_level_locked(adev); - if (current_level == level) { - ret = 0; - goto out; - } + current_level = amdgpu_dpm_get_performance_level(adev); + if (current_level == level) + return 0; if (adev->asic_type == CHIP_RAVEN) { if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) { @@ -755,10 +739,8 @@ int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev, } if (!(current_level & profile_mode_mask) && - (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) { - ret = -EINVAL; - goto out; - } + (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) + return -EINVAL; if (!(current_level & profile_mode_mask) && (level & profile_mode_mask)) { @@ -780,17 +762,19 @@ int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev, AMD_PG_STATE_GATE); } + mutex_lock(&adev->pm.mutex); + if (pp_funcs->force_performance_level(adev->powerplay.pp_handle, - level)) - ret = -EINVAL; + level)) { + mutex_unlock(&adev->pm.mutex); + return -EINVAL; + } - if (!ret) - adev->pm.dpm.forced_level = level; + adev->pm.dpm.forced_level = level; -out: mutex_unlock(&adev->pm.mutex); - return ret; + return 0; } int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev, -- cgit From 7270e8957eb9aacf5914605d04865f3829a14bce Mon Sep 17 00:00:00 2001 From: "Tianci.Yin" Date: Tue, 25 Jan 2022 16:54:03 +0800 Subject: drm/amdgpu: Fix an error message in rmmod [why] In rmmod procedure, kfd sends cp a dequeue request, but the request does not get response, then an error message "cp queue pipe 4 queue 0 preemption failed" printed. [how] Performing kfd suspending after disabling gfxoff can fix it. Acked-by: Felix Kuehling Reviewed-by: Yang Wang Signed-off-by: Tianci.Yin Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 7cec3a0f61b9..6ba57ad88640 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2721,11 +2721,11 @@ static int amdgpu_device_ip_fini_early(struct amdgpu_device *adev) } } - amdgpu_amdkfd_suspend(adev, false); - amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); + amdgpu_amdkfd_suspend(adev, false); + /* Workaroud for ASICs need to disable SMC first */ amdgpu_device_smu_fini_early(adev); -- cgit From fc6ea4bee130710a77ec16a86d2013e964602503 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Tue, 25 Jan 2022 10:51:49 -0500 Subject: drm/amdgpu: Wipe all VRAM on free when RAS is enabled MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit On GPUs with RAS, poison can propagate between processes if VRAM is not cleared when it is freed or allocated. The reason is, that not all write accesses clear RAS poison. 32-byte writes by the SDMA engine do clear RAS poison. Clearing memory in the background when it is freed should avoid major performance impact. KFD has been doing this already for a long time. Signed-off-by: Felix Kuehling Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 5661b82d84d4..ec29365d108d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -575,6 +575,9 @@ int amdgpu_bo_create(struct amdgpu_device *adev, if (!amdgpu_bo_support_uswc(bo->flags)) bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC; + if (adev->ras_enabled) + bo->flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE; + bo->tbo.bdev = &adev->mman.bdev; if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA | AMDGPU_GEM_DOMAIN_GDS)) -- cgit From 9a17696049889550ce76a987562e679535943c96 Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Tue, 25 Jan 2022 15:49:47 -0600 Subject: display/amd: decrease message verbosity about watermarks table failure A number of BIOS versions have a problem with the watermarks table not being configured properly. This manifests as a very scary looking warning during resume from s0i3. This should be harmless in most cases and is well understood, so decrease the assertion to a clearer warning about the problem. Reviewed-by: Harry Wentland Signed-off-by: Mario Limonciello Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c index 9b4836350547..c5d7d075026f 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn31/dcn31_smu.c @@ -120,7 +120,11 @@ static int dcn31_smu_send_msg_with_param(struct clk_mgr_internal *clk_mgr, result = dcn31_smu_wait_for_response(clk_mgr, 10, 200000); if (result == VBIOSSMC_Result_Failed) { - ASSERT(0); + if (msg_id == VBIOSSMC_MSG_TransferTableDram2Smu && + param == TABLE_WATERMARKS) + DC_LOG_WARNING("Watermarks table not configured properly by SMU"); + else + ASSERT(0); REG_WRITE(MP1_SMN_C2PMSG_91, VBIOSSMC_Result_OK); return -1; } -- cgit From 400013b268cb666a44c0827b136bfd4bb741b13d Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Wed, 19 Jan 2022 15:42:55 +0800 Subject: drm/amdgpu: add umc_fill_error_record to make code more simple Create common amdgpu_umc_fill_error_record function for all versions of UMC and clean up related codes. Signed-off-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c | 23 +++++---------------- drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c | 21 +++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h | 5 +++++ drivers/gpu/drm/amd/amdgpu/umc_v6_1.c | 18 +++-------------- drivers/gpu/drm/amd/amdgpu/umc_v6_7.c | 36 ++++++--------------------------- drivers/gpu/drm/amd/amdgpu/umc_v8_7.c | 36 ++++++--------------------------- 6 files changed, 46 insertions(+), 93 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c index d4e07d0acb66..e6324995fc54 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras.c @@ -158,14 +158,9 @@ static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t addre } memset(&err_rec, 0x0, sizeof(struct eeprom_table_record)); - - err_rec.address = address; - err_rec.retired_page = address >> AMDGPU_GPU_PAGE_SHIFT; - err_rec.ts = (uint64_t)ktime_get_real_seconds(); - err_rec.err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE; - err_data.err_addr = &err_rec; - err_data.err_addr_cnt = 1; + amdgpu_umc_fill_error_record(&err_data, address, + (address >> AMDGPU_GPU_PAGE_SHIFT), 0, 0); if (amdgpu_bad_page_threshold != 0) { amdgpu_ras_add_bad_pages(adev, err_data.err_addr, @@ -2660,8 +2655,6 @@ static int amdgpu_bad_page_notifier(struct notifier_block *nb, dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d", umc_inst, ch_inst); - memset(&err_rec, 0x0, sizeof(struct eeprom_table_record)); - /* * Translate UMC channel address to Physical address */ @@ -2673,16 +2666,10 @@ static int amdgpu_bad_page_notifier(struct notifier_block *nb, ADDR_OF_256B_BLOCK(channel_index) | OFFSET_IN_256B_BLOCK(m->addr); - err_rec.address = m->addr; - err_rec.retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT; - err_rec.ts = (uint64_t)ktime_get_real_seconds(); - err_rec.err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE; - err_rec.cu = 0; - err_rec.mem_channel = channel_index; - err_rec.mcumc_id = umc_inst; - + memset(&err_rec, 0x0, sizeof(struct eeprom_table_record)); err_data.err_addr = &err_rec; - err_data.err_addr_cnt = 1; + amdgpu_umc_fill_error_record(&err_data, m->addr, + retired_page, channel_index, umc_inst); if (amdgpu_bad_page_threshold != 0) { amdgpu_ras_add_bad_pages(adev, err_data.err_addr, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c index b4c68c09e071..ff7805beda38 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.c @@ -218,3 +218,24 @@ int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev, amdgpu_ras_interrupt_dispatch(adev, &ih_data); return 0; } + +void amdgpu_umc_fill_error_record(struct ras_err_data *err_data, + uint64_t err_addr, + uint64_t retired_page, + uint32_t channel_index, + uint32_t umc_inst) +{ + struct eeprom_table_record *err_rec = + &err_data->err_addr[err_data->err_addr_cnt]; + + err_rec->address = err_addr; + /* page frame address is saved */ + err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT; + err_rec->ts = (uint64_t)ktime_get_real_seconds(); + err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE; + err_rec->cu = 0; + err_rec->mem_channel = channel_index; + err_rec->mcumc_id = umc_inst; + + err_data->err_addr_cnt++; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h index 195740a6d97d..4db0526d0be4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_umc.h @@ -80,4 +80,9 @@ int amdgpu_umc_poison_handler(struct amdgpu_device *adev, int amdgpu_umc_process_ecc_irq(struct amdgpu_device *adev, struct amdgpu_irq_src *source, struct amdgpu_iv_entry *entry); +void amdgpu_umc_fill_error_record(struct ras_err_data *err_data, + uint64_t err_addr, + uint64_t retired_page, + uint32_t channel_index, + uint32_t umc_inst); #endif diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c index 4776301972d4..939cb203f7ad 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_1.c @@ -300,7 +300,6 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev, { uint32_t lsb, mc_umc_status_addr; uint64_t mc_umc_status, err_addr, retired_page, mc_umc_addrt0; - struct eeprom_table_record *err_rec; uint32_t channel_index = adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst]; if (adev->asic_type == CHIP_ARCTURUS) { @@ -328,8 +327,6 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev, return; } - err_rec = &err_data->err_addr[err_data->err_addr_cnt]; - /* calculate error address if ue/ce error is detected */ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || @@ -348,18 +345,9 @@ static void umc_v6_1_query_error_address(struct amdgpu_device *adev, /* we only save ue error information currently, ce is skipped */ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) - == 1) { - err_rec->address = err_addr; - /* page frame address is saved */ - err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT; - err_rec->ts = (uint64_t)ktime_get_real_seconds(); - err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE; - err_rec->cu = 0; - err_rec->mem_channel = channel_index; - err_rec->mcumc_id = umc_inst; - - err_data->err_addr_cnt++; - } + == 1) + amdgpu_umc_fill_error_record(err_data, err_addr, + retired_page, channel_index, umc_inst); } /* clear umc status */ diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c index f5a1ba7db75a..300dee9ec6b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c @@ -120,7 +120,6 @@ static void umc_v6_7_ecc_info_query_error_address(struct amdgpu_device *adev, uint32_t umc_inst) { uint64_t mc_umc_status, err_addr, retired_page; - struct eeprom_table_record *err_rec; uint32_t channel_index; uint32_t eccinfo_table_idx; struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); @@ -137,8 +136,6 @@ static void umc_v6_7_ecc_info_query_error_address(struct amdgpu_device *adev, if (!err_data->err_addr) return; - err_rec = &err_data->err_addr[err_data->err_addr_cnt]; - /* calculate error address if ue/ce error is detected */ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || @@ -154,18 +151,9 @@ static void umc_v6_7_ecc_info_query_error_address(struct amdgpu_device *adev, /* we only save ue error information currently, ce is skipped */ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) - == 1) { - err_rec->address = err_addr; - /* page frame address is saved */ - err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT; - err_rec->ts = (uint64_t)ktime_get_real_seconds(); - err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE; - err_rec->cu = 0; - err_rec->mem_channel = channel_index; - err_rec->mcumc_id = umc_inst; - - err_data->err_addr_cnt++; - } + == 1) + amdgpu_umc_fill_error_record(err_data, err_addr, + retired_page, channel_index, umc_inst); } } @@ -345,7 +333,6 @@ static void umc_v6_7_query_error_address(struct amdgpu_device *adev, { uint32_t mc_umc_status_addr; uint64_t mc_umc_status, err_addr, retired_page, mc_umc_addrt0; - struct eeprom_table_record *err_rec; uint32_t channel_index; mc_umc_status_addr = @@ -364,8 +351,6 @@ static void umc_v6_7_query_error_address(struct amdgpu_device *adev, return; } - err_rec = &err_data->err_addr[err_data->err_addr_cnt]; - channel_index = adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst]; @@ -384,18 +369,9 @@ static void umc_v6_7_query_error_address(struct amdgpu_device *adev, /* we only save ue error information currently, ce is skipped */ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) - == 1) { - err_rec->address = err_addr; - /* page frame address is saved */ - err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT; - err_rec->ts = (uint64_t)ktime_get_real_seconds(); - err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE; - err_rec->cu = 0; - err_rec->mem_channel = channel_index; - err_rec->mcumc_id = umc_inst; - - err_data->err_addr_cnt++; - } + == 1) + amdgpu_umc_fill_error_record(err_data, err_addr, + retired_page, channel_index, umc_inst); } /* clear umc status */ diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c index d70417196662..de85a998ef99 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v8_7.c @@ -121,7 +121,6 @@ static void umc_v8_7_ecc_info_query_error_address(struct amdgpu_device *adev, uint32_t umc_inst) { uint64_t mc_umc_status, err_addr, retired_page; - struct eeprom_table_record *err_rec; uint32_t channel_index; uint32_t eccinfo_table_idx; struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); @@ -138,8 +137,6 @@ static void umc_v8_7_ecc_info_query_error_address(struct amdgpu_device *adev, if (!err_data->err_addr) return; - err_rec = &err_data->err_addr[err_data->err_addr_cnt]; - /* calculate error address if ue/ce error is detected */ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || @@ -155,18 +152,9 @@ static void umc_v8_7_ecc_info_query_error_address(struct amdgpu_device *adev, /* we only save ue error information currently, ce is skipped */ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) - == 1) { - err_rec->address = err_addr; - /* page frame address is saved */ - err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT; - err_rec->ts = (uint64_t)ktime_get_real_seconds(); - err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE; - err_rec->cu = 0; - err_rec->mem_channel = channel_index; - err_rec->mcumc_id = umc_inst; - - err_data->err_addr_cnt++; - } + == 1) + amdgpu_umc_fill_error_record(err_data, err_addr, + retired_page, channel_index, umc_inst); } } @@ -344,7 +332,6 @@ static void umc_v8_7_query_error_address(struct amdgpu_device *adev, { uint32_t lsb, mc_umc_status_addr; uint64_t mc_umc_status, err_addr, retired_page, mc_umc_addrt0; - struct eeprom_table_record *err_rec; uint32_t channel_index = adev->umc.channel_idx_tbl[umc_inst * adev->umc.channel_inst_num + ch_inst]; mc_umc_status_addr = @@ -363,8 +350,6 @@ static void umc_v8_7_query_error_address(struct amdgpu_device *adev, return; } - err_rec = &err_data->err_addr[err_data->err_addr_cnt]; - /* calculate error address if ue/ce error is detected */ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, Val) == 1 && (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) == 1 || @@ -383,18 +368,9 @@ static void umc_v8_7_query_error_address(struct amdgpu_device *adev, /* we only save ue error information currently, ce is skipped */ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) - == 1) { - err_rec->address = err_addr; - /* page frame address is saved */ - err_rec->retired_page = retired_page >> AMDGPU_GPU_PAGE_SHIFT; - err_rec->ts = (uint64_t)ktime_get_real_seconds(); - err_rec->err_type = AMDGPU_RAS_EEPROM_ERR_NON_RECOVERABLE; - err_rec->cu = 0; - err_rec->mem_channel = channel_index; - err_rec->mcumc_id = umc_inst; - - err_data->err_addr_cnt++; - } + == 1) + amdgpu_umc_fill_error_record(err_data, err_addr, + retired_page, channel_index, umc_inst); } /* clear umc status */ -- cgit From 498d46fe7aa7eda5807352d62af133a2f432b814 Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Wed, 19 Jan 2022 16:00:11 +0800 Subject: drm/amdgpu: increase bad page number for umc ras query One piece of umc normalizing address can be mapped to 16 pieces of physical address in each umc channel on ALDEBARAN. Signed-off-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 3 ++- drivers/gpu/drm/amd/amdgpu/umc_v6_7.h | 4 ++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 600ff658ab1b..4595027a8c63 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1213,7 +1213,8 @@ static void gmc_v9_0_set_umc_funcs(struct amdgpu_device *adev) adev->umc.ras = &umc_v6_1_ras; break; case IP_VERSION(6, 7, 0): - adev->umc.max_ras_err_cnt_per_query = UMC_V6_7_TOTAL_CHANNEL_NUM; + adev->umc.max_ras_err_cnt_per_query = + UMC_V6_7_TOTAL_CHANNEL_NUM * UMC_V6_7_BAD_PAGE_NUM_PER_CHANNEL; adev->umc.channel_inst_num = UMC_V6_7_CHANNEL_INSTANCE_NUM; adev->umc.umc_inst_num = UMC_V6_7_UMC_INSTANCE_NUM; adev->umc.channel_offs = UMC_V6_7_PER_CHANNEL_OFFSET; diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.h b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.h index 1f2edf625370..9adebcf98582 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.h +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.h @@ -41,6 +41,10 @@ #define UMC_V6_7_CHANNEL_INSTANCE_NUM 8 /* total channel instances in one umc block */ #define UMC_V6_7_TOTAL_CHANNEL_NUM (UMC_V6_7_CHANNEL_INSTANCE_NUM * UMC_V6_7_UMC_INSTANCE_NUM) +/* one piece of normalizing address is mapped to 8 pieces of physical address */ +#define UMC_V6_7_NA_MAP_PA_NUM 8 +/* R14 bit shift should be considered, double the number */ +#define UMC_V6_7_BAD_PAGE_NUM_PER_CHANNEL (UMC_V6_7_NA_MAP_PA_NUM * 2) /* UMC regiser per channel offset */ #define UMC_V6_7_PER_CHANNEL_OFFSET 0x400 extern struct amdgpu_umc_ras umc_v6_7_ras; -- cgit From e63fa4dcea2f7afcbf8f2d013dfae23a61a273d8 Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Wed, 19 Jan 2022 17:00:09 +0800 Subject: drm/amdgpu: update algorithm of umc address conversion On ALDEBARAN, we need to traverse all column bits higher than BIT11(C4C3C2) in a row, the shift of R14 bit should be also taken into account. Retire all pages we find. Signed-off-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/umc_v6_7.c | 41 ++++++++++++++++++++++++++++------- drivers/gpu/drm/amd/amdgpu/umc_v6_7.h | 4 ++++ 2 files changed, 37 insertions(+), 8 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c index 300dee9ec6b4..1ecba7b5df1c 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c @@ -119,7 +119,7 @@ static void umc_v6_7_ecc_info_query_error_address(struct amdgpu_device *adev, uint32_t ch_inst, uint32_t umc_inst) { - uint64_t mc_umc_status, err_addr, retired_page; + uint64_t mc_umc_status, err_addr, soc_pa, retired_page, column; uint32_t channel_index; uint32_t eccinfo_table_idx; struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); @@ -145,15 +145,27 @@ static void umc_v6_7_ecc_info_query_error_address(struct amdgpu_device *adev, err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr); /* translate umc channel address to soc pa, 3 parts are included */ - retired_page = ADDR_OF_8KB_BLOCK(err_addr) | + soc_pa = ADDR_OF_8KB_BLOCK(err_addr) | ADDR_OF_256B_BLOCK(channel_index) | OFFSET_IN_256B_BLOCK(err_addr); + /* clear [C4 C3 C2] in soc physical address */ + soc_pa &= ~(0x7ULL << UMC_V6_7_PA_C2_BIT); /* we only save ue error information currently, ce is skipped */ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) - == 1) - amdgpu_umc_fill_error_record(err_data, err_addr, + == 1) { + /* loop for all possibilities of [C4 C3 C2] */ + for (column = 0; column < UMC_V6_7_NA_MAP_PA_NUM; column++) { + retired_page = soc_pa | (column << UMC_V6_7_PA_C2_BIT); + amdgpu_umc_fill_error_record(err_data, err_addr, retired_page, channel_index, umc_inst); + + /* shift R14 bit */ + retired_page ^= (0x1ULL << UMC_V6_7_PA_R14_BIT); + amdgpu_umc_fill_error_record(err_data, err_addr, + retired_page, channel_index, umc_inst); + } + } } } @@ -332,8 +344,9 @@ static void umc_v6_7_query_error_address(struct amdgpu_device *adev, uint32_t umc_inst) { uint32_t mc_umc_status_addr; - uint64_t mc_umc_status, err_addr, retired_page, mc_umc_addrt0; uint32_t channel_index; + uint64_t mc_umc_status, mc_umc_addrt0; + uint64_t err_addr, soc_pa, retired_page, column; mc_umc_status_addr = SOC15_REG_OFFSET(UMC, 0, regMCA_UMC_UMC0_MCUMC_STATUST0); @@ -363,15 +376,27 @@ static void umc_v6_7_query_error_address(struct amdgpu_device *adev, err_addr = REG_GET_FIELD(err_addr, MCA_UMC_UMC0_MCUMC_ADDRT0, ErrorAddr); /* translate umc channel address to soc pa, 3 parts are included */ - retired_page = ADDR_OF_8KB_BLOCK(err_addr) | + soc_pa = ADDR_OF_8KB_BLOCK(err_addr) | ADDR_OF_256B_BLOCK(channel_index) | OFFSET_IN_256B_BLOCK(err_addr); + /* clear [C4 C3 C2] in soc physical address */ + soc_pa &= ~(0x7ULL << UMC_V6_7_PA_C2_BIT); /* we only save ue error information currently, ce is skipped */ if (REG_GET_FIELD(mc_umc_status, MCA_UMC_UMC0_MCUMC_STATUST0, UECC) - == 1) - amdgpu_umc_fill_error_record(err_data, err_addr, + == 1) { + /* loop for all possibilities of [C4 C3 C2] */ + for (column = 0; column < UMC_V6_7_NA_MAP_PA_NUM; column++) { + retired_page = soc_pa | (column << UMC_V6_7_PA_C2_BIT); + amdgpu_umc_fill_error_record(err_data, err_addr, + retired_page, channel_index, umc_inst); + + /* shift R14 bit */ + retired_page ^= (0x1ULL << UMC_V6_7_PA_R14_BIT); + amdgpu_umc_fill_error_record(err_data, err_addr, retired_page, channel_index, umc_inst); + } + } } /* clear umc status */ diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.h b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.h index 9adebcf98582..b67677867b45 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.h +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.h @@ -45,6 +45,10 @@ #define UMC_V6_7_NA_MAP_PA_NUM 8 /* R14 bit shift should be considered, double the number */ #define UMC_V6_7_BAD_PAGE_NUM_PER_CHANNEL (UMC_V6_7_NA_MAP_PA_NUM * 2) +/* The C2 bit in SOC physical address */ +#define UMC_V6_7_PA_C2_BIT 17 +/* The R14 bit in SOC physical address */ +#define UMC_V6_7_PA_R14_BIT 34 /* UMC regiser per channel offset */ #define UMC_V6_7_PER_CHANNEL_OFFSET 0x400 extern struct amdgpu_umc_ras umc_v6_7_ras; -- cgit From bee7f8d09268dc80da0e841ca99d79f500d03b84 Mon Sep 17 00:00:00 2001 From: Tao Zhou Date: Mon, 24 Jan 2022 16:41:40 +0800 Subject: drm/amdgpu: get hash bit for CH4 in umc channel index On ALDEBARAN, the umc channel bits are not original values, they are hashed. Signed-off-by: Tao Zhou Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/umc_v6_7.c | 8 ++++++++ drivers/gpu/drm/amd/amdgpu/umc_v6_7.h | 15 +++++++++++++++ 2 files changed, 23 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c index 1ecba7b5df1c..47452b61b615 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.c @@ -148,6 +148,10 @@ static void umc_v6_7_ecc_info_query_error_address(struct amdgpu_device *adev, soc_pa = ADDR_OF_8KB_BLOCK(err_addr) | ADDR_OF_256B_BLOCK(channel_index) | OFFSET_IN_256B_BLOCK(err_addr); + + /* The umc channel bits are not original values, they are hashed */ + SET_CHANNEL_HASH(channel_index, soc_pa); + /* clear [C4 C3 C2] in soc physical address */ soc_pa &= ~(0x7ULL << UMC_V6_7_PA_C2_BIT); @@ -379,6 +383,10 @@ static void umc_v6_7_query_error_address(struct amdgpu_device *adev, soc_pa = ADDR_OF_8KB_BLOCK(err_addr) | ADDR_OF_256B_BLOCK(channel_index) | OFFSET_IN_256B_BLOCK(err_addr); + + /* The umc channel bits are not original values, they are hashed */ + SET_CHANNEL_HASH(channel_index, soc_pa); + /* clear [C4 C3 C2] in soc physical address */ soc_pa &= ~(0x7ULL << UMC_V6_7_PA_C2_BIT); diff --git a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.h b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.h index b67677867b45..fe41ed2f5945 100644 --- a/drivers/gpu/drm/amd/amdgpu/umc_v6_7.h +++ b/drivers/gpu/drm/amd/amdgpu/umc_v6_7.h @@ -45,12 +45,27 @@ #define UMC_V6_7_NA_MAP_PA_NUM 8 /* R14 bit shift should be considered, double the number */ #define UMC_V6_7_BAD_PAGE_NUM_PER_CHANNEL (UMC_V6_7_NA_MAP_PA_NUM * 2) +/* The CH4 bit in SOC physical address */ +#define UMC_V6_7_PA_CH4_BIT 12 /* The C2 bit in SOC physical address */ #define UMC_V6_7_PA_C2_BIT 17 /* The R14 bit in SOC physical address */ #define UMC_V6_7_PA_R14_BIT 34 /* UMC regiser per channel offset */ #define UMC_V6_7_PER_CHANNEL_OFFSET 0x400 + +/* XOR bit 20, 25, 34 of PA into CH4 bit (bit 12 of PA), + * hash bit is only effective when related setting is enabled + */ +#define CHANNEL_HASH(channel_idx, pa) (((channel_idx) >> 4) ^ \ + (((pa) >> 20) & 0x1ULL & adev->df.hash_status.hash_64k) ^ \ + (((pa) >> 25) & 0x1ULL & adev->df.hash_status.hash_2m) ^ \ + (((pa) >> 34) & 0x1ULL & adev->df.hash_status.hash_1g)) +#define SET_CHANNEL_HASH(channel_idx, pa) do { \ + (pa) &= ~(0x1ULL << UMC_V6_7_PA_CH4_BIT); \ + (pa) |= (CHANNEL_HASH(channel_idx, pa) << UMC_V6_7_PA_CH4_BIT); \ + } while (0) + extern struct amdgpu_umc_ras umc_v6_7_ras; extern const uint32_t umc_v6_7_channel_idx_tbl_second[UMC_V6_7_UMC_INSTANCE_NUM][UMC_V6_7_CHANNEL_INSTANCE_NUM]; -- cgit From 7367540b26214ba5f7236e0f212fd60ec6d07c3f Mon Sep 17 00:00:00 2001 From: Tim Huang Date: Thu, 27 Jan 2022 10:48:41 +0800 Subject: drm/amd/display: Fix unused variable warning [Why] It will build failed with unused variable 'dc' with '-Werror=unused-variable'enabled when CONFIG_DRM_AMD_DC_DCN is not defined. Signed-off-by: Tim Huang Reviewed-by: Alex Deucher Reviewed-by: Huang Rui Reviewed-by: Aaron Liu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 84342f27ee64..322333b1eb08 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -1027,7 +1027,6 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) const unsigned char *fw_inst_const, *fw_bss_data; uint32_t i, fw_inst_const_size, fw_bss_data_size; bool has_hw_support; - struct dc *dc = adev->dm.dc; if (!dmub_srv) /* DMUB isn't supported on the ASIC. */ @@ -1123,7 +1122,7 @@ static int dm_dmub_hw_init(struct amdgpu_device *adev) case IP_VERSION(3, 1, 3): /* Only for this asic hw internal rev B0 */ hw_params.dpia_supported = true; #if defined(CONFIG_DRM_AMD_DC_DCN) - hw_params.disable_dpia = dc->debug.dpia_debug.bits.disable_dpia; + hw_params.disable_dpia = adev->dm.dc->debug.dpia_debug.bits.disable_dpia; #endif break; default: -- cgit From 4e13b063d2e510b54e3ffc2e975315d08d14c5af Mon Sep 17 00:00:00 2001 From: Aaron Liu Date: Thu, 27 Jan 2022 09:40:44 +0800 Subject: drm/amdgpu: convert code name to ip version for athub Use IP version rather than codename for athub. Signed-off-by: Aaron Liu Reviewed-by: Alex Deucher Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/athub_v1_0.c | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c index 3ea557864320..88642e7ecdf4 100644 --- a/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c +++ b/drivers/gpu/drm/amd/amdgpu/athub_v1_0.c @@ -68,12 +68,13 @@ int athub_v1_0_set_clockgating(struct amdgpu_device *adev, if (amdgpu_sriov_vf(adev)) return 0; - switch (adev->asic_type) { - case CHIP_VEGA10: - case CHIP_VEGA12: - case CHIP_VEGA20: - case CHIP_RAVEN: - case CHIP_RENOIR: + switch (adev->ip_versions[ATHUB_HWIP][0]) { + case IP_VERSION(9, 0, 0): + case IP_VERSION(9, 1, 0): + case IP_VERSION(9, 2, 0): + case IP_VERSION(9, 3, 0): + case IP_VERSION(9, 4, 0): + case IP_VERSION(1, 5, 0): athub_update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE); athub_update_medium_grain_light_sleep(adev, -- cgit From f06d9e4eec7320f5a560e49ed652e785c8ab5c45 Mon Sep 17 00:00:00 2001 From: Aaron Liu Date: Thu, 27 Jan 2022 09:30:33 +0800 Subject: drm/amdgpu: add 1.3.1/2.4.0 athub CG support This patch adds 1.3.1/2.4.0 athub clock gating support. Signed-off-by: Aaron Liu Reviewed-by: Alex Deucher Reviewed-by: Huang Rui Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/athub_v2_0.c | 1 + drivers/gpu/drm/amd/amdgpu/athub_v2_1.c | 1 + 2 files changed, 2 insertions(+) diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c index ab6a07e5e8c4..a720436857b4 100644 --- a/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c +++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_0.c @@ -78,6 +78,7 @@ int athub_v2_0_set_clockgating(struct amdgpu_device *adev, return 0; switch (adev->ip_versions[ATHUB_HWIP][0]) { + case IP_VERSION(1, 3, 1): case IP_VERSION(2, 0, 0): case IP_VERSION(2, 0, 2): athub_v2_0_update_medium_grain_clock_gating(adev, diff --git a/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c b/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c index 2edefd10e56c..ad8e87d3d2cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c +++ b/drivers/gpu/drm/amd/amdgpu/athub_v2_1.c @@ -74,6 +74,7 @@ int athub_v2_1_set_clockgating(struct amdgpu_device *adev, case IP_VERSION(2, 1, 0): case IP_VERSION(2, 1, 1): case IP_VERSION(2, 1, 2): + case IP_VERSION(2, 4, 0): athub_v2_1_update_medium_grain_clock_gating(adev, state == AMD_CG_STATE_GATE); athub_v2_1_update_medium_grain_light_sleep(adev, state == AMD_CG_STATE_GATE); break; -- cgit From 2f60dd50769efcd6eedd0dc6b3f419cdd1f1f1fa Mon Sep 17 00:00:00 2001 From: Luben Tuikov Date: Wed, 19 Jan 2022 20:20:53 -0500 Subject: drm/amd: Expose the FRU SMU I2C bus Expose both SMU I2C buses. Some boards use the same bus for both the RAS and FRU EEPROMs and others use different buses. This enables the additional I2C bus and sets the right buses to use for RAS and FRU EEPROM access. Cc: Roy Sun Co-developed-by: Alex Deucher Signed-off-by: Luben Tuikov Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c | 6 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c | 14 ++-- drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c | 80 ++++++++++++++-------- drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.h | 6 +- drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h | 14 +++- .../drm/amd/pm/powerplay/smumgr/vega20_smumgr.c | 4 +- drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c | 4 +- drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h | 4 +- drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c | 69 ++++++++++++++----- drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c | 68 +++++++++++++----- .../drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c | 71 ++++++++++++++----- drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c | 44 +++++++++--- drivers/gpu/drm/amd/pm/swsmu/smu_internal.h | 4 +- 13 files changed, 273 insertions(+), 115 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c index 60e7e637eaa3..40180648be38 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c @@ -75,7 +75,7 @@ static int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr, { int ret, size; - ret = amdgpu_eeprom_read(&adev->pm.smu_i2c, addrptr, buff, 1); + ret = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, addrptr, buff, 1); if (ret < 1) { DRM_WARN("FRU: Failed to get size field"); return ret; @@ -86,7 +86,7 @@ static int amdgpu_fru_read_eeprom(struct amdgpu_device *adev, uint32_t addrptr, */ size = buff[0] - I2C_PRODUCT_INFO_OFFSET; - ret = amdgpu_eeprom_read(&adev->pm.smu_i2c, addrptr + 1, buff, size); + ret = amdgpu_eeprom_read(adev->pm.fru_eeprom_i2c_bus, addrptr + 1, buff, size); if (ret < 1) { DRM_WARN("FRU: Failed to get data field"); return ret; @@ -109,7 +109,7 @@ int amdgpu_fru_get_product_info(struct amdgpu_device *adev) offset = 0; /* If algo exists, it means that the i2c_adapter's initialized */ - if (!adev->pm.smu_i2c.algo) { + if (!adev->pm.fru_eeprom_i2c_bus || !adev->pm.fru_eeprom_i2c_bus->algo) { DRM_WARN("Cannot access FRU, EEPROM accessor not initialized"); return -ENODEV; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c index 05117eda105b..c09d047272b2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ras_eeprom.c @@ -194,7 +194,7 @@ static int __write_table_header(struct amdgpu_ras_eeprom_control *control) /* i2c may be unstable in gpu reset */ down_read(&adev->reset_sem); - res = amdgpu_eeprom_write(&adev->pm.smu_i2c, + res = amdgpu_eeprom_write(adev->pm.ras_eeprom_i2c_bus, control->i2c_address + control->ras_header_offset, buf, RAS_TABLE_HEADER_SIZE); @@ -389,7 +389,7 @@ static int __amdgpu_ras_eeprom_write(struct amdgpu_ras_eeprom_control *control, /* i2c may be unstable in gpu reset */ down_read(&adev->reset_sem); buf_size = num * RAS_TABLE_RECORD_SIZE; - res = amdgpu_eeprom_write(&adev->pm.smu_i2c, + res = amdgpu_eeprom_write(adev->pm.ras_eeprom_i2c_bus, control->i2c_address + RAS_INDEX_TO_OFFSET(control, fri), buf, buf_size); @@ -548,7 +548,7 @@ amdgpu_ras_eeprom_update_header(struct amdgpu_ras_eeprom_control *control) } down_read(&adev->reset_sem); - res = amdgpu_eeprom_read(&adev->pm.smu_i2c, + res = amdgpu_eeprom_read(adev->pm.ras_eeprom_i2c_bus, control->i2c_address + control->ras_record_offset, buf, buf_size); @@ -644,7 +644,7 @@ static int __amdgpu_ras_eeprom_read(struct amdgpu_ras_eeprom_control *control, /* i2c may be unstable in gpu reset */ down_read(&adev->reset_sem); buf_size = num * RAS_TABLE_RECORD_SIZE; - res = amdgpu_eeprom_read(&adev->pm.smu_i2c, + res = amdgpu_eeprom_read(adev->pm.ras_eeprom_i2c_bus, control->i2c_address + RAS_INDEX_TO_OFFSET(control, fri), buf, buf_size); @@ -1009,7 +1009,7 @@ static int __verify_ras_table_checksum(struct amdgpu_ras_eeprom_control *control return -ENOMEM; } - res = amdgpu_eeprom_read(&adev->pm.smu_i2c, + res = amdgpu_eeprom_read(adev->pm.ras_eeprom_i2c_bus, control->i2c_address + control->ras_header_offset, buf, buf_size); @@ -1045,7 +1045,7 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control, return 0; /* Verify i2c adapter is initialized */ - if (!adev->pm.smu_i2c.algo) + if (!adev->pm.ras_eeprom_i2c_bus || !adev->pm.ras_eeprom_i2c_bus->algo) return -ENOENT; if (!__get_eeprom_i2c_addr(adev, control)) @@ -1057,7 +1057,7 @@ int amdgpu_ras_eeprom_init(struct amdgpu_ras_eeprom_control *control, mutex_init(&control->ras_tbl_mutex); /* Read the table header from EEPROM address */ - res = amdgpu_eeprom_read(&adev->pm.smu_i2c, + res = amdgpu_eeprom_read(adev->pm.ras_eeprom_i2c_bus, control->i2c_address + control->ras_header_offset, buf, RAS_TABLE_HEADER_SIZE); if (res < RAS_TABLE_HEADER_SIZE) { diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c index 73ffa8fde3df..87acb089cfcc 100644 --- a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c +++ b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.c @@ -26,6 +26,7 @@ #include "smu_v11_0_i2c.h" #include "amdgpu.h" +#include "amdgpu_dpm.h" #include "soc15_common.h" #include #include @@ -43,11 +44,10 @@ #define I2C_X_RESTART BIT(31) -#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c)) - static void smu_v11_0_i2c_set_clock_gating(struct i2c_adapter *control, bool en) { - struct amdgpu_device *adev = to_amdgpu_device(control); + struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(control); + struct amdgpu_device *adev = smu_i2c->adev; uint32_t reg = RREG32_SOC15(SMUIO, 0, mmSMUIO_PWRMGT); reg = REG_SET_FIELD(reg, SMUIO_PWRMGT, i2c_clk_gate_en, en ? 1 : 0); @@ -75,7 +75,8 @@ static void smu_v11_0_i2c_set_clock_gating(struct i2c_adapter *control, bool en) static int smu_v11_0_i2c_enable(struct i2c_adapter *control, bool enable) { - struct amdgpu_device *adev = to_amdgpu_device(control); + struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(control); + struct amdgpu_device *adev = smu_i2c->adev; WREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_ENABLE, enable ? 1 : 0); @@ -100,7 +101,8 @@ static int smu_v11_0_i2c_enable(struct i2c_adapter *control, bool enable) static void smu_v11_0_i2c_clear_status(struct i2c_adapter *control) { - struct amdgpu_device *adev = to_amdgpu_device(control); + struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(control); + struct amdgpu_device *adev = smu_i2c->adev; /* do */ { RREG32_SOC15(SMUIO, 0, mmCKSVII2C_IC_CLR_INTR); @@ -110,7 +112,8 @@ static void smu_v11_0_i2c_clear_status(struct i2c_adapter *control) static void smu_v11_0_i2c_configure(struct i2c_adapter *control) { - struct amdgpu_device *adev = to_amdgpu_device(control); + struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(control); + struct amdgpu_device *adev = smu_i2c->adev; uint32_t reg = 0; reg = REG_SET_FIELD(reg, CKSVII2C_IC_CON, IC_SLAVE_DISABLE, 1); @@ -131,7 +134,8 @@ static void smu_v11_0_i2c_configure(struct i2c_adapter *control) static void smu_v11_0_i2c_set_clock(struct i2c_adapter *control) { - struct amdgpu_device *adev = to_amdgpu_device(control); + struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(control); + struct amdgpu_device *adev = smu_i2c->adev; /* * Standard mode speed, These values are taken from SMUIO MAS, @@ -154,7 +158,8 @@ static void smu_v11_0_i2c_set_clock(struct i2c_adapter *control) static void smu_v11_0_i2c_set_address(struct i2c_adapter *control, u16 address) { - struct amdgpu_device *adev = to_amdgpu_device(control); + struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(control); + struct amdgpu_device *adev = smu_i2c->adev; /* The IC_TAR::IC_TAR field is 10-bits wide. * It takes a 7-bit or 10-bit addresses as an address, @@ -165,7 +170,8 @@ static void smu_v11_0_i2c_set_address(struct i2c_adapter *control, u16 address) static uint32_t smu_v11_0_i2c_poll_tx_status(struct i2c_adapter *control) { - struct amdgpu_device *adev = to_amdgpu_device(control); + struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(control); + struct amdgpu_device *adev = smu_i2c->adev; uint32_t ret = I2C_OK; uint32_t reg, reg_c_tx_abrt_source; @@ -216,7 +222,8 @@ static uint32_t smu_v11_0_i2c_poll_tx_status(struct i2c_adapter *control) static uint32_t smu_v11_0_i2c_poll_rx_status(struct i2c_adapter *control) { - struct amdgpu_device *adev = to_amdgpu_device(control); + struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(control); + struct amdgpu_device *adev = smu_i2c->adev; uint32_t ret = I2C_OK; uint32_t reg_ic_status, reg_c_tx_abrt_source; @@ -262,7 +269,8 @@ static uint32_t smu_v11_0_i2c_transmit(struct i2c_adapter *control, u16 address, u8 *data, u32 numbytes, u32 i2c_flag) { - struct amdgpu_device *adev = to_amdgpu_device(control); + struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(control); + struct amdgpu_device *adev = smu_i2c->adev; u32 bytes_sent, reg, ret = I2C_OK; unsigned long timeout_counter; @@ -360,7 +368,8 @@ static uint32_t smu_v11_0_i2c_receive(struct i2c_adapter *control, u16 address, u8 *data, u32 numbytes, u32 i2c_flag) { - struct amdgpu_device *adev = to_amdgpu_device(control); + struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(control); + struct amdgpu_device *adev = smu_i2c->adev; uint32_t bytes_received, ret = I2C_OK; bytes_received = 0; @@ -431,7 +440,8 @@ static uint32_t smu_v11_0_i2c_receive(struct i2c_adapter *control, static void smu_v11_0_i2c_abort(struct i2c_adapter *control) { - struct amdgpu_device *adev = to_amdgpu_device(control); + struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(control); + struct amdgpu_device *adev = smu_i2c->adev; uint32_t reg = 0; /* Enable I2C engine; */ @@ -447,7 +457,8 @@ static void smu_v11_0_i2c_abort(struct i2c_adapter *control) static bool smu_v11_0_i2c_activity_done(struct i2c_adapter *control) { - struct amdgpu_device *adev = to_amdgpu_device(control); + struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(control); + struct amdgpu_device *adev = smu_i2c->adev; const uint32_t IDLE_TIMEOUT = 1024; uint32_t timeout_count = 0; @@ -508,7 +519,8 @@ static void smu_v11_0_i2c_init(struct i2c_adapter *control) static void smu_v11_0_i2c_fini(struct i2c_adapter *control) { - struct amdgpu_device *adev = to_amdgpu_device(control); + struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(control); + struct amdgpu_device *adev = smu_i2c->adev; u32 status, enable, en_stat; int res; @@ -543,7 +555,8 @@ static void smu_v11_0_i2c_fini(struct i2c_adapter *control) static bool smu_v11_0_i2c_bus_lock(struct i2c_adapter *control) { - struct amdgpu_device *adev = to_amdgpu_device(control); + struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(control); + struct amdgpu_device *adev = smu_i2c->adev; /* Send PPSMC_MSG_RequestI2CBus */ if (!amdgpu_dpm_smu_i2c_bus_access(adev, true)) @@ -554,7 +567,8 @@ static bool smu_v11_0_i2c_bus_lock(struct i2c_adapter *control) static bool smu_v11_0_i2c_bus_unlock(struct i2c_adapter *control) { - struct amdgpu_device *adev = to_amdgpu_device(control); + struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(control); + struct amdgpu_device *adev = smu_i2c->adev; /* Send PPSMC_MSG_ReleaseI2CBus */ if (!amdgpu_dpm_smu_i2c_bus_access(adev, false)) @@ -587,16 +601,17 @@ static uint32_t smu_v11_0_i2c_write_data(struct i2c_adapter *control, if (ret != I2C_OK) DRM_ERROR("WriteI2CData() - I2C error occurred :%x", ret); - + return ret; } static void lock_bus(struct i2c_adapter *i2c, unsigned int flags) { - struct amdgpu_device *adev = to_amdgpu_device(i2c); + struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c); + struct amdgpu_device *adev = smu_i2c->adev; - mutex_lock(&adev->pm.smu_i2c_mutex); + mutex_lock(&smu_i2c->mutex); if (!smu_v11_0_i2c_bus_lock(i2c)) DRM_ERROR("Failed to lock the bus from SMU"); else @@ -611,13 +626,14 @@ static int trylock_bus(struct i2c_adapter *i2c, unsigned int flags) static void unlock_bus(struct i2c_adapter *i2c, unsigned int flags) { - struct amdgpu_device *adev = to_amdgpu_device(i2c); + struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c); + struct amdgpu_device *adev = smu_i2c->adev; if (!smu_v11_0_i2c_bus_unlock(i2c)) DRM_ERROR("Failed to unlock the bus from SMU"); else adev->pm.bus_locked = false; - mutex_unlock(&adev->pm.smu_i2c_mutex); + mutex_unlock(&smu_i2c->mutex); } static const struct i2c_lock_operations smu_v11_0_i2c_i2c_lock_ops = { @@ -706,19 +722,26 @@ static const struct i2c_adapter_quirks smu_v11_0_i2c_control_quirks = { .flags = I2C_AQ_NO_ZERO_LEN, }; -int smu_v11_0_i2c_control_init(struct i2c_adapter *control) +int smu_v11_0_i2c_control_init(struct amdgpu_device *adev) { - struct amdgpu_device *adev = to_amdgpu_device(control); + struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[0]; + struct i2c_adapter *control = &smu_i2c->adapter; int res; - mutex_init(&adev->pm.smu_i2c_mutex); + smu_i2c->adev = adev; + smu_i2c->port = 0; + mutex_init(&smu_i2c->mutex); control->owner = THIS_MODULE; control->class = I2C_CLASS_HWMON; control->dev.parent = &adev->pdev->dev; control->algo = &smu_v11_0_i2c_algo; - snprintf(control->name, sizeof(control->name), "AMDGPU SMU"); + snprintf(control->name, sizeof(control->name), "AMDGPU SMU 0"); control->lock_ops = &smu_v11_0_i2c_i2c_lock_ops; control->quirks = &smu_v11_0_i2c_control_quirks; + i2c_set_adapdata(control, smu_i2c); + + adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter; + adev->pm.fru_eeprom_i2c_bus = NULL; res = i2c_add_adapter(control); if (res) @@ -727,9 +750,12 @@ int smu_v11_0_i2c_control_init(struct i2c_adapter *control) return res; } -void smu_v11_0_i2c_control_fini(struct i2c_adapter *control) +void smu_v11_0_i2c_control_fini(struct amdgpu_device *adev) { + struct i2c_adapter *control = adev->pm.ras_eeprom_i2c_bus; + i2c_del_adapter(control); + adev->pm.ras_eeprom_i2c_bus = NULL; } /* diff --git a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.h b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.h index 44467c05f642..96ad14288a0c 100644 --- a/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.h +++ b/drivers/gpu/drm/amd/amdgpu/smu_v11_0_i2c.h @@ -26,9 +26,9 @@ #include -struct i2c_adapter; +struct amdgpu_device; -int smu_v11_0_i2c_control_init(struct i2c_adapter *control); -void smu_v11_0_i2c_control_fini(struct i2c_adapter *control); +int smu_v11_0_i2c_control_init(struct amdgpu_device *adev); +void smu_v11_0_i2c_control_fini(struct amdgpu_device *adev); #endif diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h index ba857ca75392..2766b88ecf96 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h @@ -284,6 +284,15 @@ enum ip_power_state { /* Used to mask smu debug modes */ #define SMU_DEBUG_HALT_ON_ERROR 0x1 +#define MAX_SMU_I2C_BUSES 2 + +struct amdgpu_smu_i2c_bus { + struct i2c_adapter adapter; + struct amdgpu_device *adev; + int port; + struct mutex mutex; +}; + struct amdgpu_pm { struct mutex mutex; u32 current_sclk; @@ -316,8 +325,9 @@ struct amdgpu_pm { uint32_t pp_feature; /* Used for I2C access to various EEPROMs on relevant ASICs */ - struct i2c_adapter smu_i2c; - struct mutex smu_i2c_mutex; + struct amdgpu_smu_i2c_bus smu_i2c[MAX_SMU_I2C_BUSES]; + struct i2c_adapter *ras_eeprom_i2c_bus; + struct i2c_adapter *fru_eeprom_i2c_bus; struct list_head pm_attr_list; atomic_t pwr_state[AMD_IP_BLOCK_TYPE_NUM]; diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c index 9ad26c285ecd..a5c95b180672 100644 --- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c +++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c @@ -520,7 +520,7 @@ static int vega20_smu_init(struct pp_hwmgr *hwmgr) priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].version = 0x01; priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size = sizeof(DpmActivityMonitorCoeffInt_t); - ret = smu_v11_0_i2c_control_init(&adev->pm.smu_i2c); + ret = smu_v11_0_i2c_control_init(adev); if (ret) goto err4; @@ -558,7 +558,7 @@ static int vega20_smu_fini(struct pp_hwmgr *hwmgr) (struct vega20_smumgr *)(hwmgr->smu_backend); struct amdgpu_device *adev = hwmgr->adev; - smu_v11_0_i2c_control_fini(&adev->pm.smu_i2c); + smu_v11_0_i2c_control_fini(adev); if (priv) { amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PPTABLE].handle, diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c index 264eb09ccfd5..9861d70a5dad 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c @@ -886,7 +886,7 @@ static int smu_smc_table_sw_init(struct smu_context *smu) if (ret) return ret; - ret = smu_i2c_init(smu, &smu->adev->pm.smu_i2c); + ret = smu_i2c_init(smu); if (ret) return ret; @@ -897,7 +897,7 @@ static int smu_smc_table_sw_fini(struct smu_context *smu) { int ret; - smu_i2c_fini(smu, &smu->adev->pm.smu_i2c); + smu_i2c_fini(smu); smu_free_dummy_read_table(smu); diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h index 18f24db7d202..4c585241ef66 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h +++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h @@ -816,12 +816,12 @@ struct pptable_funcs { * other devices. The i2c's EEPROM also stores bad page tables on boards * with ECC. */ - int (*i2c_init)(struct smu_context *smu, struct i2c_adapter *control); + int (*i2c_init)(struct smu_context *smu); /** * @i2c_fini: Tear down i2c. */ - void (*i2c_fini)(struct smu_context *smu, struct i2c_adapter *control); + void (*i2c_fini)(struct smu_context *smu); /** * @get_unique_id: Get the GPU's unique id. Used for asset tracking. diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index 2c78d04d5611..0cf385e8b4e9 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -25,6 +25,7 @@ #include #include "amdgpu.h" +#include "amdgpu_dpm.h" #include "amdgpu_smu.h" #include "atomfirmware.h" #include "amdgpu_atomfirmware.h" @@ -45,6 +46,7 @@ #include #include "amdgpu_ras.h" #include "smu_cmn.h" +#include "amdgpu_dpm.h" /* * DO NOT use these for err/warn/info/debug messages. @@ -56,8 +58,6 @@ #undef pr_info #undef pr_debug -#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c)) - #define ARCTURUS_FEA_MAP(smu_feature, arcturus_feature) \ [smu_feature] = {1, (arcturus_feature)} @@ -2062,7 +2062,8 @@ static int arcturus_dpm_set_vcn_enable(struct smu_context *smu, bool enable) static int arcturus_i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msg, int num_msgs) { - struct amdgpu_device *adev = to_amdgpu_device(i2c_adap); + struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap); + struct amdgpu_device *adev = smu_i2c->adev; struct smu_context *smu = adev->powerplay.pp_handle; struct smu_table_context *smu_table = &smu->smu_table; struct smu_table *table = &smu_table->driver_table; @@ -2074,7 +2075,7 @@ static int arcturus_i2c_xfer(struct i2c_adapter *i2c_adap, if (!req) return -ENOMEM; - req->I2CcontrollerPort = 0; + req->I2CcontrollerPort = smu_i2c->port; req->I2CSpeed = I2C_SPEED_FAST_400K; req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */ dir = msg[0].flags & I2C_M_RD; @@ -2153,28 +2154,60 @@ static const struct i2c_adapter_quirks arcturus_i2c_control_quirks = { .max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS - 2, }; -static int arcturus_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control) +static int arcturus_i2c_control_init(struct smu_context *smu) { - struct amdgpu_device *adev = to_amdgpu_device(control); - int res; + struct amdgpu_device *adev = smu->adev; + int res, i; + + for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { + struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; + struct i2c_adapter *control = &smu_i2c->adapter; + + smu_i2c->adev = adev; + smu_i2c->port = i; + mutex_init(&smu_i2c->mutex); + control->owner = THIS_MODULE; + control->class = I2C_CLASS_HWMON; + control->dev.parent = &adev->pdev->dev; + control->algo = &arcturus_i2c_algo; + control->quirks = &arcturus_i2c_control_quirks; + snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i); + i2c_set_adapdata(control, smu_i2c); + + res = i2c_add_adapter(control); + if (res) { + DRM_ERROR("Failed to register hw i2c, err: %d\n", res); + goto Out_err; + } + } - control->owner = THIS_MODULE; - control->class = I2C_CLASS_HWMON; - control->dev.parent = &adev->pdev->dev; - control->algo = &arcturus_i2c_algo; - control->quirks = &arcturus_i2c_control_quirks; - snprintf(control->name, sizeof(control->name), "AMDGPU SMU"); + adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter; + adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter; - res = i2c_add_adapter(control); - if (res) - DRM_ERROR("Failed to register hw i2c, err: %d\n", res); + return 0; +Out_err: + for ( ; i >= 0; i--) { + struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; + struct i2c_adapter *control = &smu_i2c->adapter; + i2c_del_adapter(control); + } return res; } -static void arcturus_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control) +static void arcturus_i2c_control_fini(struct smu_context *smu) { - i2c_del_adapter(control); + struct amdgpu_device *adev = smu->adev; + int i; + + for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { + struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; + struct i2c_adapter *control = &smu_i2c->adapter; + + i2c_del_adapter(control); + } + adev->pm.ras_eeprom_i2c_bus = NULL; + adev->pm.fru_eeprom_i2c_bus = NULL; } static void arcturus_get_unique_id(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 84834c24a7e9..16a5d4d35981 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -27,6 +27,7 @@ #include #include #include "amdgpu.h" +#include "amdgpu_dpm.h" #include "amdgpu_smu.h" #include "atomfirmware.h" #include "amdgpu_atomfirmware.h" @@ -57,8 +58,6 @@ #undef pr_info #undef pr_debug -#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c)) - #define FEATURE_MASK(feature) (1ULL << feature) #define SMC_DPM_FEATURE ( \ FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT) | \ @@ -2746,7 +2745,8 @@ static ssize_t navi10_get_legacy_gpu_metrics(struct smu_context *smu, static int navi10_i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msg, int num_msgs) { - struct amdgpu_device *adev = to_amdgpu_device(i2c_adap); + struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap); + struct amdgpu_device *adev = smu_i2c->adev; struct smu_context *smu = adev->powerplay.pp_handle; struct smu_table_context *smu_table = &smu->smu_table; struct smu_table *table = &smu_table->driver_table; @@ -2758,7 +2758,7 @@ static int navi10_i2c_xfer(struct i2c_adapter *i2c_adap, if (!req) return -ENOMEM; - req->I2CcontrollerPort = 0; + req->I2CcontrollerPort = smu_i2c->port; req->I2CSpeed = I2C_SPEED_FAST_400K; req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */ dir = msg[0].flags & I2C_M_RD; @@ -2836,28 +2836,60 @@ static const struct i2c_adapter_quirks navi10_i2c_control_quirks = { .max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS - 2, }; -static int navi10_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control) +static int navi10_i2c_control_init(struct smu_context *smu) { - struct amdgpu_device *adev = to_amdgpu_device(control); - int res; + struct amdgpu_device *adev = smu->adev; + int res, i; + + for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { + struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; + struct i2c_adapter *control = &smu_i2c->adapter; + + smu_i2c->adev = adev; + smu_i2c->port = i; + mutex_init(&smu_i2c->mutex); + control->owner = THIS_MODULE; + control->class = I2C_CLASS_HWMON; + control->dev.parent = &adev->pdev->dev; + control->algo = &navi10_i2c_algo; + snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i); + control->quirks = &navi10_i2c_control_quirks; + i2c_set_adapdata(control, smu_i2c); + + res = i2c_add_adapter(control); + if (res) { + DRM_ERROR("Failed to register hw i2c, err: %d\n", res); + goto Out_err; + } + } - control->owner = THIS_MODULE; - control->class = I2C_CLASS_HWMON; - control->dev.parent = &adev->pdev->dev; - control->algo = &navi10_i2c_algo; - snprintf(control->name, sizeof(control->name), "AMDGPU SMU"); - control->quirks = &navi10_i2c_control_quirks; + adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter; + adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter; - res = i2c_add_adapter(control); - if (res) - DRM_ERROR("Failed to register hw i2c, err: %d\n", res); + return 0; +Out_err: + for ( ; i >= 0; i--) { + struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; + struct i2c_adapter *control = &smu_i2c->adapter; + i2c_del_adapter(control); + } return res; } -static void navi10_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control) +static void navi10_i2c_control_fini(struct smu_context *smu) { - i2c_del_adapter(control); + struct amdgpu_device *adev = smu->adev; + int i; + + for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { + struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; + struct i2c_adapter *control = &smu_i2c->adapter; + + i2c_del_adapter(control); + } + adev->pm.ras_eeprom_i2c_bus = NULL; + adev->pm.fru_eeprom_i2c_bus = NULL; } static ssize_t navi10_get_gpu_metrics(struct smu_context *smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index dcd35c68e59b..1c71ea97d06f 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -27,6 +27,7 @@ #include #include #include "amdgpu.h" +#include "amdgpu_dpm.h" #include "amdgpu_smu.h" #include "atomfirmware.h" #include "amdgpu_atomfirmware.h" @@ -59,8 +60,6 @@ #undef pr_info #undef pr_debug -#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c)) - #define FEATURE_MASK(feature) (1ULL << feature) #define SMC_DPM_FEATURE ( \ FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT) | \ @@ -3441,7 +3440,8 @@ static void sienna_cichlid_dump_pptable(struct smu_context *smu) static int sienna_cichlid_i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msg, int num_msgs) { - struct amdgpu_device *adev = to_amdgpu_device(i2c_adap); + struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap); + struct amdgpu_device *adev = smu_i2c->adev; struct smu_context *smu = adev->powerplay.pp_handle; struct smu_table_context *smu_table = &smu->smu_table; struct smu_table *table = &smu_table->driver_table; @@ -3453,7 +3453,7 @@ static int sienna_cichlid_i2c_xfer(struct i2c_adapter *i2c_adap, if (!req) return -ENOMEM; - req->I2CcontrollerPort = 1; + req->I2CcontrollerPort = smu_i2c->port; req->I2CSpeed = I2C_SPEED_FAST_400K; req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */ dir = msg[0].flags & I2C_M_RD; @@ -3531,28 +3531,61 @@ static const struct i2c_adapter_quirks sienna_cichlid_i2c_control_quirks = { .max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS - 2, }; -static int sienna_cichlid_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control) +static int sienna_cichlid_i2c_control_init(struct smu_context *smu) { - struct amdgpu_device *adev = to_amdgpu_device(control); - int res; - - control->owner = THIS_MODULE; - control->class = I2C_CLASS_HWMON; - control->dev.parent = &adev->pdev->dev; - control->algo = &sienna_cichlid_i2c_algo; - snprintf(control->name, sizeof(control->name), "AMDGPU SMU"); - control->quirks = &sienna_cichlid_i2c_control_quirks; + struct amdgpu_device *adev = smu->adev; + int res, i; + + for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { + struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; + struct i2c_adapter *control = &smu_i2c->adapter; + + smu_i2c->adev = adev; + smu_i2c->port = i; + mutex_init(&smu_i2c->mutex); + control->owner = THIS_MODULE; + control->class = I2C_CLASS_HWMON; + control->dev.parent = &adev->pdev->dev; + control->algo = &sienna_cichlid_i2c_algo; + snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i); + control->quirks = &sienna_cichlid_i2c_control_quirks; + i2c_set_adapdata(control, smu_i2c); + + res = i2c_add_adapter(control); + if (res) { + DRM_ERROR("Failed to register hw i2c, err: %d\n", res); + goto Out_err; + } + } + /* assign the buses used for the FRU EEPROM and RAS EEPROM */ + /* XXX ideally this would be something in a vbios data table */ + adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter; + adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter; - res = i2c_add_adapter(control); - if (res) - DRM_ERROR("Failed to register hw i2c, err: %d\n", res); + return 0; +Out_err: + for ( ; i >= 0; i--) { + struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; + struct i2c_adapter *control = &smu_i2c->adapter; + i2c_del_adapter(control); + } return res; } -static void sienna_cichlid_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control) +static void sienna_cichlid_i2c_control_fini(struct smu_context *smu) { - i2c_del_adapter(control); + struct amdgpu_device *adev = smu->adev; + int i; + + for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { + struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; + struct i2c_adapter *control = &smu_i2c->adapter; + + i2c_del_adapter(control); + } + adev->pm.ras_eeprom_i2c_bus = NULL; + adev->pm.fru_eeprom_i2c_bus = NULL; } static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu, diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index 8c02adbf446a..450049bcedcb 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -25,6 +25,7 @@ #include #include "amdgpu.h" +#include "amdgpu_dpm.h" #include "amdgpu_smu.h" #include "atomfirmware.h" #include "amdgpu_atomfirmware.h" @@ -56,8 +57,6 @@ #undef pr_info #undef pr_debug -#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c)) - #define ALDEBARAN_FEA_MAP(smu_feature, aldebaran_feature) \ [smu_feature] = {1, (aldebaran_feature)} @@ -1465,7 +1464,8 @@ static bool aldebaran_is_dpm_running(struct smu_context *smu) static int aldebaran_i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msg, int num_msgs) { - struct amdgpu_device *adev = to_amdgpu_device(i2c_adap); + struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap); + struct amdgpu_device *adev = smu_i2c->adev; struct smu_context *smu = adev->powerplay.pp_handle; struct smu_table_context *smu_table = &smu->smu_table; struct smu_table *table = &smu_table->driver_table; @@ -1477,7 +1477,7 @@ static int aldebaran_i2c_xfer(struct i2c_adapter *i2c_adap, if (!req) return -ENOMEM; - req->I2CcontrollerPort = 0; + req->I2CcontrollerPort = smu_i2c->port; req->I2CSpeed = I2C_SPEED_FAST_400K; req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */ dir = msg[0].flags & I2C_M_RD; @@ -1555,28 +1555,52 @@ static const struct i2c_adapter_quirks aldebaran_i2c_control_quirks = { .max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS - 2, }; -static int aldebaran_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control) +static int aldebaran_i2c_control_init(struct smu_context *smu) { - struct amdgpu_device *adev = to_amdgpu_device(control); + struct amdgpu_device *adev = smu->adev; + struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[0]; + struct i2c_adapter *control = &smu_i2c->adapter; int res; + smu_i2c->adev = adev; + smu_i2c->port = 0; + mutex_init(&smu_i2c->mutex); control->owner = THIS_MODULE; control->class = I2C_CLASS_SPD; control->dev.parent = &adev->pdev->dev; control->algo = &aldebaran_i2c_algo; - snprintf(control->name, sizeof(control->name), "AMDGPU SMU"); + snprintf(control->name, sizeof(control->name), "AMDGPU SMU 0"); control->quirks = &aldebaran_i2c_control_quirks; + i2c_set_adapdata(control, smu_i2c); res = i2c_add_adapter(control); - if (res) + if (res) { DRM_ERROR("Failed to register hw i2c, err: %d\n", res); + goto Out_err; + } + + adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter; + + return 0; +Out_err: + i2c_del_adapter(control); return res; } -static void aldebaran_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control) +static void aldebaran_i2c_control_fini(struct smu_context *smu) { - i2c_del_adapter(control); + struct amdgpu_device *adev = smu->adev; + int i; + + for (i = 0; i < MAX_SMU_I2C_BUSES; i++) { + struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i]; + struct i2c_adapter *control = &smu_i2c->adapter; + + i2c_del_adapter(control); + } + adev->pm.ras_eeprom_i2c_bus = NULL; + adev->pm.fru_eeprom_i2c_bus = NULL; } static void aldebaran_get_unique_id(struct smu_context *smu) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h index 59f9cfff3d61..a290b5789195 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h +++ b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h @@ -78,8 +78,8 @@ #define smu_dump_pptable(smu) smu_ppt_funcs(dump_pptable, 0, smu) #define smu_update_pcie_parameters(smu, pcie_gen_cap, pcie_width_cap) smu_ppt_funcs(update_pcie_parameters, 0, smu, pcie_gen_cap, pcie_width_cap) #define smu_set_power_source(smu, power_src) smu_ppt_funcs(set_power_source, 0, smu, power_src) -#define smu_i2c_init(smu, control) smu_ppt_funcs(i2c_init, 0, smu, control) -#define smu_i2c_fini(smu, control) smu_ppt_funcs(i2c_fini, 0, smu, control) +#define smu_i2c_init(smu) smu_ppt_funcs(i2c_init, 0, smu) +#define smu_i2c_fini(smu) smu_ppt_funcs(i2c_fini, 0, smu) #define smu_get_unique_id(smu) smu_ppt_funcs(get_unique_id, 0, smu) #define smu_log_thermal_throttling(smu) smu_ppt_funcs(log_thermal_throttling_event, 0, smu) #define smu_get_asic_power_limits(smu, current, default, max) smu_ppt_funcs(get_power_limit, 0, smu, current, default, max) -- cgit From 3ed893396b0132fa5a4d3fe3f9ba358678c6dba3 Mon Sep 17 00:00:00 2001 From: Luben Tuikov Date: Thu, 27 Jan 2022 13:49:30 -0500 Subject: drm/amd: Enable FRU EEPROM for Sienna Cichlid Enable the FRU EEPROM I2C bus for Sienna Cichlid server boards, for which it is enabled by checking the VBIOS version. Cc: Roy Sun Cc: Alex Deucher Signed-off-by: Luben Tuikov Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c index 40180648be38..ce5d5ee336a9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fru_eeprom.c @@ -46,7 +46,7 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev) if (amdgpu_sriov_vf(adev)) return false; - /* VBIOS is of the format ###-DXXXYY-##. For SKU identification, + /* VBIOS is of the format ###-DXXXYYYY-##. For SKU identification, * we can use just the "DXXX" portion. If there were more models, we * could convert the 3 characters to a hex integer and use a switch * for ease/speed/readability. For now, 2 string comparisons are @@ -65,6 +65,12 @@ static bool is_fru_eeprom_supported(struct amdgpu_device *adev) case CHIP_ALDEBARAN: /* All Aldebaran SKUs have the FRU */ return true; + case CHIP_SIENNA_CICHLID: + if (strnstr(atom_ctx->vbios_version, "D603", + sizeof(atom_ctx->vbios_version))) + return true; + else + return false; default: return false; } -- cgit From e281d5940ae7f2ceff99d5e001a69b5f0884d2f0 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 27 Jan 2022 12:17:23 -0500 Subject: drm/amdgpu/swsmu/i2c: return an error if the SMU is not running Return an error if someone tries to use the i2c bus when the SMU is not running. Otherwise we can end up sending commands to the SMU which will either get ignored or could cause other issues depending on what state the GPU and SMU are in. Cc: Luben.Tuikov@amd.com Reviewed-by: Luben Tuikov Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c | 3 +++ drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c | 3 +++ drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c | 3 +++ drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c | 3 +++ 4 files changed, 12 insertions(+) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c index 0cf385e8b4e9..ee296441c5bc 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c @@ -2071,6 +2071,9 @@ static int arcturus_i2c_xfer(struct i2c_adapter *i2c_adap, int i, j, r, c; u16 dir; + if (!adev->pm.dpm_enabled) + return -EBUSY; + req = kzalloc(sizeof(*req), GFP_KERNEL); if (!req) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c index 16a5d4d35981..4a86520d01c3 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c @@ -2754,6 +2754,9 @@ static int navi10_i2c_xfer(struct i2c_adapter *i2c_adap, int i, j, r, c; u16 dir; + if (!adev->pm.dpm_enabled) + return -EBUSY; + req = kzalloc(sizeof(*req), GFP_KERNEL); if (!req) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 1c71ea97d06f..22dd372b8d24 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -3449,6 +3449,9 @@ static int sienna_cichlid_i2c_xfer(struct i2c_adapter *i2c_adap, int i, j, r, c; u16 dir; + if (!adev->pm.dpm_enabled) + return -EBUSY; + req = kzalloc(sizeof(*req), GFP_KERNEL); if (!req) return -ENOMEM; diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c index 450049bcedcb..ba4c9771ffc5 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c @@ -1473,6 +1473,9 @@ static int aldebaran_i2c_xfer(struct i2c_adapter *i2c_adap, int i, j, r, c; u16 dir; + if (!adev->pm.dpm_enabled) + return -EBUSY; + req = kzalloc(sizeof(*req), GFP_KERNEL); if (!req) return -ENOMEM; -- cgit From 8cda7a4f96e435be2fd074009d69521d973d7d31 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Fri, 7 Jan 2022 17:57:33 -0500 Subject: drm/amdgpu/UAPI: add new CTX OP to get/set stable pstates Add a new CTX ioctl operation to set stable pstates for profiling. When creating traces for tools like RGP or using SPM or doing performance profiling, it's required to enable a special stable profiling power state on the GPU. These profiling states set fixed clocks and disable certain other power features like powergating which may impact the results. Historically, these profiling pstates were enabled via sysfs, but this adds an interface to enable it via the CTX ioctl from the application. Since the power state is global only one application can set it at a time, so if multiple applications try and use it only the first will get it, the ioctl will return -EBUSY for others. The sysfs interface will override whatever has been set by this interface. Mesa MR: https://gitlab.freedesktop.org/mesa/drm/-/merge_requests/207 v2: don't default r = 0; v3: rebase on Evan's PM cleanup Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 129 ++++++++++++++++++++++++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 1 + drivers/gpu/drm/amd/pm/amdgpu_pm.c | 5 ++ drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h | 3 + include/uapi/drm/amdgpu_drm.h | 17 +++- 6 files changed, 153 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index 468003583b2a..2c929fa40379 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -237,6 +237,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter); ctx->init_priority = priority; ctx->override_priority = AMDGPU_CTX_PRIORITY_UNSET; + ctx->stable_pstate = AMDGPU_CTX_STABLE_PSTATE_NONE; return 0; } @@ -255,6 +256,86 @@ static void amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity) kfree(entity); } +static int amdgpu_ctx_get_stable_pstate(struct amdgpu_ctx *ctx, + u32 *stable_pstate) +{ + struct amdgpu_device *adev = ctx->adev; + enum amd_dpm_forced_level current_level; + + if (!ctx) + return -EINVAL; + + current_level = amdgpu_dpm_get_performance_level(adev); + + switch (current_level) { + case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD: + *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_STANDARD; + break; + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK: + *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_MIN_SCLK; + break; + case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK: + *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_MIN_MCLK; + break; + case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK: + *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_PEAK; + break; + default: + *stable_pstate = AMDGPU_CTX_STABLE_PSTATE_NONE; + break; + } + return 0; +} + +static int amdgpu_ctx_set_stable_pstate(struct amdgpu_ctx *ctx, + u32 stable_pstate) +{ + struct amdgpu_device *adev = ctx->adev; + enum amd_dpm_forced_level level; + int r; + + if (!ctx) + return -EINVAL; + + mutex_lock(&adev->pm.stable_pstate_ctx_lock); + if (adev->pm.stable_pstate_ctx && adev->pm.stable_pstate_ctx != ctx) { + r = -EBUSY; + goto done; + } + + switch (stable_pstate) { + case AMDGPU_CTX_STABLE_PSTATE_NONE: + level = AMD_DPM_FORCED_LEVEL_AUTO; + break; + case AMDGPU_CTX_STABLE_PSTATE_STANDARD: + level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD; + break; + case AMDGPU_CTX_STABLE_PSTATE_MIN_SCLK: + level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK; + break; + case AMDGPU_CTX_STABLE_PSTATE_MIN_MCLK: + level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK; + break; + case AMDGPU_CTX_STABLE_PSTATE_PEAK: + level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; + break; + default: + r = -EINVAL; + goto done; + } + + r = amdgpu_dpm_force_performance_level(adev, level); + + if (level == AMD_DPM_FORCED_LEVEL_AUTO) + adev->pm.stable_pstate_ctx = NULL; + else + adev->pm.stable_pstate_ctx = ctx; +done: + mutex_unlock(&adev->pm.stable_pstate_ctx_lock); + + return r; +} + static void amdgpu_ctx_fini(struct kref *ref) { struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount); @@ -270,7 +351,7 @@ static void amdgpu_ctx_fini(struct kref *ref) ctx->entities[i][j] = NULL; } } - + amdgpu_ctx_set_stable_pstate(ctx, AMDGPU_CTX_STABLE_PSTATE_NONE); mutex_destroy(&ctx->lock); kfree(ctx); } @@ -467,11 +548,41 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev, return 0; } + + +static int amdgpu_ctx_stable_pstate(struct amdgpu_device *adev, + struct amdgpu_fpriv *fpriv, uint32_t id, + bool set, u32 *stable_pstate) +{ + struct amdgpu_ctx *ctx; + struct amdgpu_ctx_mgr *mgr; + int r; + + if (!fpriv) + return -EINVAL; + + mgr = &fpriv->ctx_mgr; + mutex_lock(&mgr->lock); + ctx = idr_find(&mgr->ctx_handles, id); + if (!ctx) { + mutex_unlock(&mgr->lock); + return -EINVAL; + } + + if (set) + r = amdgpu_ctx_set_stable_pstate(ctx, *stable_pstate); + else + r = amdgpu_ctx_get_stable_pstate(ctx, stable_pstate); + + mutex_unlock(&mgr->lock); + return r; +} + int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { int r; - uint32_t id; + uint32_t id, stable_pstate; int32_t priority; union drm_amdgpu_ctx *args = data; @@ -500,6 +611,20 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data, case AMDGPU_CTX_OP_QUERY_STATE2: r = amdgpu_ctx_query2(adev, fpriv, id, &args->out); break; + case AMDGPU_CTX_OP_GET_STABLE_PSTATE: + if (args->in.flags) + return -EINVAL; + r = amdgpu_ctx_stable_pstate(adev, fpriv, id, false, &stable_pstate); + args->out.pstate.flags = stable_pstate; + break; + case AMDGPU_CTX_OP_SET_STABLE_PSTATE: + if (args->in.flags & ~AMDGPU_CTX_STABLE_PSTATE_FLAGS_MASK) + return -EINVAL; + stable_pstate = args->in.flags & AMDGPU_CTX_STABLE_PSTATE_FLAGS_MASK; + if (stable_pstate > AMDGPU_CTX_STABLE_PSTATE_PEAK) + return -EINVAL; + r = amdgpu_ctx_stable_pstate(adev, fpriv, id, true, &stable_pstate); + break; default: return -EINVAL; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h index a44b8b8ed39c..142f2f87d44c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.h @@ -53,6 +53,7 @@ struct amdgpu_ctx { atomic_t guilty; unsigned long ras_counter_ce; unsigned long ras_counter_ue; + uint32_t stable_pstate; }; struct amdgpu_ctx_mgr { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 6ba57ad88640..660fb4085c97 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -3510,6 +3510,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, init_rwsem(&adev->reset_sem); mutex_init(&adev->psp.mutex); mutex_init(&adev->notifier_lock); + mutex_init(&adev->pm.stable_pstate_ctx_lock); amdgpu_device_init_apu_flags(adev); diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index 837a31a46596..d68e7132da2c 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -336,11 +336,16 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev, return ret; } + mutex_lock(&adev->pm.stable_pstate_ctx_lock); if (amdgpu_dpm_force_performance_level(adev, level)) { pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); + mutex_unlock(&adev->pm.stable_pstate_ctx_lock); return -EINVAL; } + /* override whatever a user ctx may have set */ + adev->pm.stable_pstate_ctx = NULL; + mutex_unlock(&adev->pm.stable_pstate_ctx_lock); pm_runtime_mark_last_busy(ddev->dev); pm_runtime_put_autosuspend(ddev->dev); diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h index 2766b88ecf96..5cc05110cdae 100644 --- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h +++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h @@ -338,6 +338,9 @@ struct amdgpu_pm { uint32_t smu_debug_mask; bool pp_force_state_enabled; + + struct mutex stable_pstate_ctx_lock; + struct amdgpu_ctx *stable_pstate_ctx; }; u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev); diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h index be4f9111f478..76b580d10a52 100644 --- a/include/uapi/drm/amdgpu_drm.h +++ b/include/uapi/drm/amdgpu_drm.h @@ -206,6 +206,8 @@ union drm_amdgpu_bo_list { #define AMDGPU_CTX_OP_FREE_CTX 2 #define AMDGPU_CTX_OP_QUERY_STATE 3 #define AMDGPU_CTX_OP_QUERY_STATE2 4 +#define AMDGPU_CTX_OP_GET_STABLE_PSTATE 5 +#define AMDGPU_CTX_OP_SET_STABLE_PSTATE 6 /* GPU reset status */ #define AMDGPU_CTX_NO_RESET 0 @@ -238,10 +240,18 @@ union drm_amdgpu_bo_list { #define AMDGPU_CTX_PRIORITY_HIGH 512 #define AMDGPU_CTX_PRIORITY_VERY_HIGH 1023 +/* select a stable profiling pstate for perfmon tools */ +#define AMDGPU_CTX_STABLE_PSTATE_FLAGS_MASK 0xf +#define AMDGPU_CTX_STABLE_PSTATE_NONE 0 +#define AMDGPU_CTX_STABLE_PSTATE_STANDARD 1 +#define AMDGPU_CTX_STABLE_PSTATE_MIN_SCLK 2 +#define AMDGPU_CTX_STABLE_PSTATE_MIN_MCLK 3 +#define AMDGPU_CTX_STABLE_PSTATE_PEAK 4 + struct drm_amdgpu_ctx_in { /** AMDGPU_CTX_OP_* */ __u32 op; - /** For future use, no flags defined so far */ + /** Flags */ __u32 flags; __u32 ctx_id; /** AMDGPU_CTX_PRIORITY_* */ @@ -262,6 +272,11 @@ union drm_amdgpu_ctx_out { /** Reset status since the last call of the ioctl. */ __u32 reset_status; } state; + + struct { + __u32 flags; + __u32 _pad; + } pstate; }; union drm_amdgpu_ctx { -- cgit From ded81d5b2b67e6e6fce0a1e8b73e4565a28dbfd8 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Thu, 2 Dec 2021 11:02:23 -0500 Subject: drm/amdgpu: bump driver version for new CTX OP to set/get stable pstates So mesa and tools know when this is available. Mesa MR: https://gitlab.freedesktop.org/mesa/drm/-/merge_requests/207 Reviewed-by: Evan Quan Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 9c799645f2e7..f3cada3f743b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -98,9 +98,10 @@ * - 3.42.0 - Add 16bpc fixed point display support * - 3.43.0 - Add device hot plug/unplug support * - 3.44.0 - DCN3 supports DCC independent block settings: !64B && 128B, 64B && 128B + * - 3.45.0 - Add context ioctl stable pstate interface */ #define KMS_DRIVER_MAJOR 3 -#define KMS_DRIVER_MINOR 44 +#define KMS_DRIVER_MINOR 45 #define KMS_DRIVER_PATCHLEVEL 0 int amdgpu_vram_limit; -- cgit From 1ec5a44331af283b1cd3b0f0981cf65f0903ec8f Mon Sep 17 00:00:00 2001 From: tangmeng Date: Thu, 27 Jan 2022 15:12:39 +0800 Subject: drm/amd/amdgpu: fix spelling mistake "disbale" -> "disable" There is a spelling mistake. Fix it. Signed-off-by: tangmeng Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c index aef9d059ae52..a642c04cf17d 100644 --- a/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c +++ b/drivers/gpu/drm/amd/amdgpu/mxgpu_vi.c @@ -544,7 +544,7 @@ static int xgpu_vi_mailbox_rcv_irq(struct amdgpu_device *adev, { int r; - /* trigger gpu-reset by hypervisor only if TDR disbaled */ + /* trigger gpu-reset by hypervisor only if TDR disabled */ if (!amdgpu_gpu_recovery) { /* see what event we get */ r = xgpu_vi_mailbox_rcv_msg(adev, IDH_FLR_NOTIFICATION); -- cgit From c57f5ba2c8febe944ddebae53730667d5af179e5 Mon Sep 17 00:00:00 2001 From: huangqu Date: Wed, 26 Jan 2022 17:16:02 +0800 Subject: drm/amdgpu: Wrong order for config and counter_id parameters Wrong order for config and counter_id parameters was passed, when calling df_v3_6_pmc_set_deferred and df_v3_6_pmc_is_deferred functions. Signed-off-by: huangqu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/df_v3_6.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c index 43c5e3ec9a39..f4dfca013ec5 100644 --- a/drivers/gpu/drm/amd/amdgpu/df_v3_6.c +++ b/drivers/gpu/drm/amd/amdgpu/df_v3_6.c @@ -458,7 +458,7 @@ static int df_v3_6_pmc_add_cntr(struct amdgpu_device *adev, #define DEFERRED_ARM_MASK (1 << 31) static int df_v3_6_pmc_set_deferred(struct amdgpu_device *adev, - int counter_idx, uint64_t config, + uint64_t config, int counter_idx, bool is_deferred) { @@ -476,8 +476,8 @@ static int df_v3_6_pmc_set_deferred(struct amdgpu_device *adev, } static bool df_v3_6_pmc_is_deferred(struct amdgpu_device *adev, - int counter_idx, - uint64_t config) + uint64_t config, + int counter_idx) { return (df_v3_6_pmc_has_counter(adev, config, counter_idx) && (adev->df_perfmon_config_assign_mask[counter_idx] -- cgit From 43f2517955875be5d96b641fba33d73097fe3cd9 Mon Sep 17 00:00:00 2001 From: Anitha Chrisanthus Date: Thu, 27 Jan 2022 10:45:46 -0800 Subject: drm/kmb: Fix for build errors with Warray-bounds This fixes the following build error drivers/gpu/drm/kmb/kmb_plane.c: In function 'kmb_plane_atomic_disable': drivers/gpu/drm/kmb/kmb_plane.c:165:34: error: array subscript 3 is above array bounds of 'struct layer_status[2]' [-Werror=array-bounds] 165 | kmb->plane_status[plane_id].ctrl = LCD_CTRL_GL2_ENABLE; | ~~~~~~~~~~~~~~~~~^~~~~~~~~~ In file included from drivers/gpu/drm/kmb/kmb_plane.c:17: drivers/gpu/drm/kmb/kmb_drv.h:61:41: note: while referencing 'plane_status' 61 | struct layer_status plane_status[KMB_MAX_PLANES]; | ^~~~~~~~~~~~ drivers/gpu/drm/kmb/kmb_plane.c:162:34: error: array subscript 2 is above array bounds of 'struct layer_status[2]' [-Werror=array-bounds] 162 | kmb->plane_status[plane_id].ctrl = LCD_CTRL_GL1_ENABLE; | ~~~~~~~~~~~~~~~~~^~~~~~~~~~ In file included from drivers/gpu/drm/kmb/kmb_plane.c:17: drivers/gpu/drm/kmb/kmb_drv.h:61:41: note: while referencing 'plane_status' 61 | struct layer_status plane_status[KMB_MAX_PLANES]; | ^~~~~~~~~~~~ Fixes: 7f7b96a8a0a1 ("drm/kmb: Add support for KeemBay Display") Signed-off-by: Anitha Chrisanthus Reviewed-by: Kees Cook Link: https://patchwork.freedesktop.org/patch/msgid/20220127194227.2213608-1-anitha.chrisanthus@intel.com --- drivers/gpu/drm/kmb/kmb_plane.c | 6 ------ 1 file changed, 6 deletions(-) diff --git a/drivers/gpu/drm/kmb/kmb_plane.c b/drivers/gpu/drm/kmb/kmb_plane.c index 00404ba4126d..2735b8eb3537 100644 --- a/drivers/gpu/drm/kmb/kmb_plane.c +++ b/drivers/gpu/drm/kmb/kmb_plane.c @@ -158,12 +158,6 @@ static void kmb_plane_atomic_disable(struct drm_plane *plane, case LAYER_1: kmb->plane_status[plane_id].ctrl = LCD_CTRL_VL2_ENABLE; break; - case LAYER_2: - kmb->plane_status[plane_id].ctrl = LCD_CTRL_GL1_ENABLE; - break; - case LAYER_3: - kmb->plane_status[plane_id].ctrl = LCD_CTRL_GL2_ENABLE; - break; } kmb->plane_status[plane_id].disable = true; -- cgit From b9199181a9ef8252e47e207be8c23e1f50662620 Mon Sep 17 00:00:00 2001 From: Muhammad Usama Anjum Date: Thu, 27 Jan 2022 22:44:46 +0500 Subject: selftests: futex: Use variable MAKE instead of make MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Recursive make commands should always use the variable MAKE, not the explicit command name ‘make’. This has benefits and removes the following warning when multiple jobs are used for the build: make[2]: warning: jobserver unavailable: using -j1. Add '+' to parent make rule. Fixes: a8ba798bc8ec ("selftests: enable O and KBUILD_OUTPUT") Signed-off-by: Muhammad Usama Anjum Reviewed-by: André Almeida Signed-off-by: Shuah Khan --- tools/testing/selftests/futex/Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tools/testing/selftests/futex/Makefile b/tools/testing/selftests/futex/Makefile index 12631f0076a1..11e157d7533b 100644 --- a/tools/testing/selftests/futex/Makefile +++ b/tools/testing/selftests/futex/Makefile @@ -11,7 +11,7 @@ all: @for DIR in $(SUBDIRS); do \ BUILD_TARGET=$(OUTPUT)/$$DIR; \ mkdir $$BUILD_TARGET -p; \ - make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\ + $(MAKE) OUTPUT=$$BUILD_TARGET -C $$DIR $@;\ if [ -e $$DIR/$(TEST_PROGS) ]; then \ rsync -a $$DIR/$(TEST_PROGS) $$BUILD_TARGET/; \ fi \ @@ -32,6 +32,6 @@ override define CLEAN @for DIR in $(SUBDIRS); do \ BUILD_TARGET=$(OUTPUT)/$$DIR; \ mkdir $$BUILD_TARGET -p; \ - make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\ + $(MAKE) OUTPUT=$$BUILD_TARGET -C $$DIR $@;\ done endef -- cgit From e051cdf655fa016692008a446a060eff06222bb5 Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Wed, 26 Jan 2022 10:27:21 +0000 Subject: selftests: openat2: Print also errno in failure messages In E_func() macro, on error, print also errno in order to aid debugging. Cc: Aleksa Sarai Signed-off-by: Cristian Marussi Signed-off-by: Shuah Khan --- tools/testing/selftests/openat2/helpers.h | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/tools/testing/selftests/openat2/helpers.h b/tools/testing/selftests/openat2/helpers.h index a6ea27344db2..ad5d0ba5b6ce 100644 --- a/tools/testing/selftests/openat2/helpers.h +++ b/tools/testing/selftests/openat2/helpers.h @@ -62,11 +62,12 @@ bool needs_openat2(const struct open_how *how); (similar to chroot(2)). */ #endif /* RESOLVE_IN_ROOT */ -#define E_func(func, ...) \ - do { \ - if (func(__VA_ARGS__) < 0) \ - ksft_exit_fail_msg("%s:%d %s failed\n", \ - __FILE__, __LINE__, #func);\ +#define E_func(func, ...) \ + do { \ + errno = 0; \ + if (func(__VA_ARGS__) < 0) \ + ksft_exit_fail_msg("%s:%d %s failed - errno:%d\n", \ + __FILE__, __LINE__, #func, errno); \ } while (0) #define E_asprintf(...) E_func(asprintf, __VA_ARGS__) -- cgit From ea3396725aa143dd42fe388cb67e44c90d2fb719 Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Wed, 26 Jan 2022 10:27:22 +0000 Subject: selftests: openat2: Add missing dependency in Makefile Add a dependency on header helpers.h to the main target; while at that add to helpers.h also a missing include for bool types. Cc: Aleksa Sarai Signed-off-by: Cristian Marussi Signed-off-by: Shuah Khan --- tools/testing/selftests/openat2/Makefile | 2 +- tools/testing/selftests/openat2/helpers.h | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/openat2/Makefile b/tools/testing/selftests/openat2/Makefile index 4b93b1417b86..843ba56d8e49 100644 --- a/tools/testing/selftests/openat2/Makefile +++ b/tools/testing/selftests/openat2/Makefile @@ -5,4 +5,4 @@ TEST_GEN_PROGS := openat2_test resolve_test rename_attack_test include ../lib.mk -$(TEST_GEN_PROGS): helpers.c +$(TEST_GEN_PROGS): helpers.c helpers.h diff --git a/tools/testing/selftests/openat2/helpers.h b/tools/testing/selftests/openat2/helpers.h index ad5d0ba5b6ce..7056340b9339 100644 --- a/tools/testing/selftests/openat2/helpers.h +++ b/tools/testing/selftests/openat2/helpers.h @@ -9,6 +9,7 @@ #define _GNU_SOURCE #include +#include #include #include #include "../kselftest.h" -- cgit From ac9e0a250bb155078601a5b999aab05f2a04d1ab Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Wed, 26 Jan 2022 10:27:23 +0000 Subject: selftests: openat2: Skip testcases that fail with EOPNOTSUPP Skip testcases that fail since the requested valid flags combination is not supported by the underlying filesystem. Cc: Aleksa Sarai Signed-off-by: Cristian Marussi Signed-off-by: Shuah Khan --- tools/testing/selftests/openat2/openat2_test.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/tools/testing/selftests/openat2/openat2_test.c b/tools/testing/selftests/openat2/openat2_test.c index 1bddbe934204..7fb902099de4 100644 --- a/tools/testing/selftests/openat2/openat2_test.c +++ b/tools/testing/selftests/openat2/openat2_test.c @@ -259,6 +259,16 @@ void test_openat2_flags(void) unlink(path); fd = sys_openat2(AT_FDCWD, path, &test->how); + if (fd < 0 && fd == -EOPNOTSUPP) { + /* + * Skip the testcase if it failed because not supported + * by FS. (e.g. a valid O_TMPFILE combination on NFS) + */ + ksft_test_result_skip("openat2 with %s fails with %d (%s)\n", + test->name, fd, strerror(-fd)); + goto next; + } + if (test->err >= 0) failed = (fd < 0); else @@ -303,7 +313,7 @@ skip: else resultfn("openat2 with %s fails with %d (%s)\n", test->name, test->err, strerror(-test->err)); - +next: free(fdpath); fflush(stdout); } -- cgit From dae1d8ac31896988e7313384c0370176a75e9b45 Mon Sep 17 00:00:00 2001 From: Cristian Marussi Date: Wed, 26 Jan 2022 10:27:19 +0000 Subject: selftests: skip mincore.check_file_mmap when fs lacks needed support MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Report mincore.check_file_mmap as SKIP instead of FAIL if the underlying filesystem lacks support of O_TMPFILE or fallocate since such failures are not really related to mincore functionality. Cc: Ricardo Cañuelo Signed-off-by: Cristian Marussi Signed-off-by: Shuah Khan --- tools/testing/selftests/mincore/mincore_selftest.c | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/tools/testing/selftests/mincore/mincore_selftest.c b/tools/testing/selftests/mincore/mincore_selftest.c index e54106643337..4c88238fc8f0 100644 --- a/tools/testing/selftests/mincore/mincore_selftest.c +++ b/tools/testing/selftests/mincore/mincore_selftest.c @@ -207,15 +207,21 @@ TEST(check_file_mmap) errno = 0; fd = open(".", O_TMPFILE | O_RDWR, 0600); - ASSERT_NE(-1, fd) { - TH_LOG("Can't create temporary file: %s", - strerror(errno)); + if (fd < 0) { + ASSERT_EQ(errno, EOPNOTSUPP) { + TH_LOG("Can't create temporary file: %s", + strerror(errno)); + } + SKIP(goto out_free, "O_TMPFILE not supported by filesystem."); } errno = 0; retval = fallocate(fd, 0, 0, FILE_SIZE); - ASSERT_EQ(0, retval) { - TH_LOG("Error allocating space for the temporary file: %s", - strerror(errno)); + if (retval) { + ASSERT_EQ(errno, EOPNOTSUPP) { + TH_LOG("Error allocating space for the temporary file: %s", + strerror(errno)); + } + SKIP(goto out_close, "fallocate not supported by filesystem."); } /* @@ -271,7 +277,9 @@ TEST(check_file_mmap) } munmap(addr, FILE_SIZE); +out_close: close(fd); +out_free: free(vec); } -- cgit From 5aac9108a180fc06e28d4e7fb00247ce603b72ee Mon Sep 17 00:00:00 2001 From: Shyam Sundar S K Date: Thu, 27 Jan 2022 14:50:03 +0530 Subject: net: amd-xgbe: Fix skb data length underflow There will be BUG_ON() triggered in include/linux/skbuff.h leading to intermittent kernel panic, when the skb length underflow is detected. Fix this by dropping the packet if such length underflows are seen because of inconsistencies in the hardware descriptors. Fixes: 622c36f143fc ("amd-xgbe: Fix jumbo MTU processing on newer hardware") Suggested-by: Tom Lendacky Signed-off-by: Shyam Sundar S K Acked-by: Tom Lendacky Link: https://lore.kernel.org/r/20220127092003.2812745-1-Shyam-sundar.S-k@amd.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index 492ac383f16d..ec3b287e3a71 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -2550,6 +2550,14 @@ read_again: buf2_len = xgbe_rx_buf2_len(rdata, packet, len); len += buf2_len; + if (buf2_len > rdata->rx.buf.dma_len) { + /* Hardware inconsistency within the descriptors + * that has resulted in a length underflow. + */ + error = 1; + goto skip_data; + } + if (!skb) { skb = xgbe_create_skb(pdata, napi, rdata, buf1_len); @@ -2579,8 +2587,10 @@ skip_data: if (!last || context_next) goto read_again; - if (!skb) + if (!skb || error) { + dev_kfree_skb(skb); goto next_packet; + } /* Be sure we don't exceed the configured MTU */ max_len = netdev->mtu + ETH_HLEN; -- cgit From 7674b7b559b683478c3832527c59bceb169e701d Mon Sep 17 00:00:00 2001 From: Raju Rangoju Date: Thu, 27 Jan 2022 11:32:22 +0530 Subject: net: amd-xgbe: ensure to reset the tx_timer_active flag Ensure to reset the tx_timer_active flag in xgbe_stop(), otherwise a port restart may result in tx timeout due to uncleared flag. Fixes: c635eaacbf77 ("amd-xgbe: Remove Tx coalescing") Co-developed-by: Sudheesh Mavila Signed-off-by: Sudheesh Mavila Signed-off-by: Raju Rangoju Acked-by: Tom Lendacky Link: https://lore.kernel.org/r/20220127060222.453371-1-Raju.Rangoju@amd.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/amd/xgbe/xgbe-drv.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c index ec3b287e3a71..a3593290886f 100644 --- a/drivers/net/ethernet/amd/xgbe/xgbe-drv.c +++ b/drivers/net/ethernet/amd/xgbe/xgbe-drv.c @@ -721,7 +721,9 @@ static void xgbe_stop_timers(struct xgbe_prv_data *pdata) if (!channel->tx_ring) break; + /* Deactivate the Tx timer */ del_timer_sync(&channel->tx_timer); + channel->tx_timer_active = 0; } } -- cgit From 42c9b28e6862d16db82a56f5667cf4d1f6658cf6 Mon Sep 17 00:00:00 2001 From: Fabio Estevam Date: Mon, 27 Dec 2021 13:14:02 -0300 Subject: ARM: dts: imx23-evk: Remove MX23_PAD_SSP1_DETECT from hog group Currently, SD card fails to mount due to the following pinctrl error: [ 11.170000] imx23-pinctrl 80018000.pinctrl: pin SSP1_DETECT already requested by 80018000.pinctrl; cannot claim for 80010000.spi [ 11.180000] imx23-pinctrl 80018000.pinctrl: pin-65 (80010000.spi) status -22 [ 11.190000] imx23-pinctrl 80018000.pinctrl: could not request pin 65 (SSP1_DETECT) from group mmc0-pins-fixup.0 on device 80018000.pinctrl [ 11.200000] mxs-mmc 80010000.spi: Error applying setting, reverse things back Fix it by removing the MX23_PAD_SSP1_DETECT pin from the hog group as it is already been used by the mmc0-pins-fixup pinctrl group. With this change the rootfs can be mounted and the imx23-evk board can boot successfully. Cc: Fixes: bc3875f1a61e ("ARM: dts: mxs: modify mx23/mx28 dts files to use pinctrl headers") Signed-off-by: Fabio Estevam Signed-off-by: Shawn Guo --- arch/arm/boot/dts/imx23-evk.dts | 1 - 1 file changed, 1 deletion(-) diff --git a/arch/arm/boot/dts/imx23-evk.dts b/arch/arm/boot/dts/imx23-evk.dts index 8cbaf1c81174..3b609d987d88 100644 --- a/arch/arm/boot/dts/imx23-evk.dts +++ b/arch/arm/boot/dts/imx23-evk.dts @@ -79,7 +79,6 @@ MX23_PAD_LCD_RESET__GPIO_1_18 MX23_PAD_PWM3__GPIO_1_29 MX23_PAD_PWM4__GPIO_1_30 - MX23_PAD_SSP1_DETECT__SSP1_DETECT >; fsl,drive-strength = ; fsl,voltage = ; -- cgit From d29c9930279df7c10348772f812154d3c41562f5 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Thu, 27 Jan 2022 11:32:50 +0200 Subject: drm/i915: Extract intel_{get,set}_m_n() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make the M/N setup/readout a bit less repitive by extracting a few small helpers. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220127093303.17309-2-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_display.c | 109 ++++++++++++--------------- 1 file changed, 47 insertions(+), 62 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 2f2113b930be..c6bf474a8479 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -3113,6 +3113,17 @@ static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv) } } +static void intel_set_m_n(struct drm_i915_private *i915, + const struct intel_link_m_n *m_n, + i915_reg_t data_m_reg, i915_reg_t data_n_reg, + i915_reg_t link_m_reg, i915_reg_t link_n_reg) +{ + intel_de_write(i915, data_m_reg, TU_SIZE(m_n->tu) | m_n->gmch_m); + intel_de_write(i915, data_n_reg, m_n->gmch_n); + intel_de_write(i915, link_m_reg, m_n->link_m); + intel_de_write(i915, link_n_reg, m_n->link_n); +} + static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, const struct intel_link_m_n *m_n) { @@ -3120,11 +3131,9 @@ static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_sta struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; - intel_de_write(dev_priv, PCH_TRANS_DATA_M1(pipe), - TU_SIZE(m_n->tu) | m_n->gmch_m); - intel_de_write(dev_priv, PCH_TRANS_DATA_N1(pipe), m_n->gmch_n); - intel_de_write(dev_priv, PCH_TRANS_LINK_M1(pipe), m_n->link_m); - intel_de_write(dev_priv, PCH_TRANS_LINK_N1(pipe), m_n->link_n); + intel_set_m_n(dev_priv, m_n, + PCH_TRANS_DATA_M1(pipe), PCH_TRANS_DATA_N1(pipe), + PCH_TRANS_LINK_M1(pipe), PCH_TRANS_LINK_N1(pipe)); } static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv, @@ -3150,35 +3159,23 @@ static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_sta enum transcoder transcoder = crtc_state->cpu_transcoder; if (DISPLAY_VER(dev_priv) >= 5) { - intel_de_write(dev_priv, PIPE_DATA_M1(transcoder), - TU_SIZE(m_n->tu) | m_n->gmch_m); - intel_de_write(dev_priv, PIPE_DATA_N1(transcoder), - m_n->gmch_n); - intel_de_write(dev_priv, PIPE_LINK_M1(transcoder), - m_n->link_m); - intel_de_write(dev_priv, PIPE_LINK_N1(transcoder), - m_n->link_n); + intel_set_m_n(dev_priv, m_n, + PIPE_DATA_M1(transcoder), PIPE_DATA_N1(transcoder), + PIPE_LINK_M1(transcoder), PIPE_LINK_N1(transcoder)); /* * M2_N2 registers are set only if DRRS is supported * (to make sure the registers are not unnecessarily accessed). */ if (m2_n2 && crtc_state->has_drrs && transcoder_has_m2_n2(dev_priv, transcoder)) { - intel_de_write(dev_priv, PIPE_DATA_M2(transcoder), - TU_SIZE(m2_n2->tu) | m2_n2->gmch_m); - intel_de_write(dev_priv, PIPE_DATA_N2(transcoder), - m2_n2->gmch_n); - intel_de_write(dev_priv, PIPE_LINK_M2(transcoder), - m2_n2->link_m); - intel_de_write(dev_priv, PIPE_LINK_N2(transcoder), - m2_n2->link_n); + intel_set_m_n(dev_priv, m2_n2, + PIPE_DATA_M2(transcoder), PIPE_DATA_N2(transcoder), + PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder)); } } else { - intel_de_write(dev_priv, PIPE_DATA_M_G4X(pipe), - TU_SIZE(m_n->tu) | m_n->gmch_m); - intel_de_write(dev_priv, PIPE_DATA_N_G4X(pipe), m_n->gmch_n); - intel_de_write(dev_priv, PIPE_LINK_M_G4X(pipe), m_n->link_m); - intel_de_write(dev_priv, PIPE_LINK_N_G4X(pipe), m_n->link_n); + intel_set_m_n(dev_priv, m_n, + PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe), + PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe)); } } @@ -3863,6 +3860,18 @@ int ilk_get_lanes_required(int target_clock, int link_bw, int bpp) return DIV_ROUND_UP(bps, link_bw * 8); } +static void intel_get_m_n(struct drm_i915_private *i915, + struct intel_link_m_n *m_n, + i915_reg_t data_m_reg, i915_reg_t data_n_reg, + i915_reg_t link_m_reg, i915_reg_t link_n_reg) +{ + m_n->link_m = intel_de_read(i915, link_m_reg); + m_n->link_n = intel_de_read(i915, link_n_reg); + m_n->gmch_m = intel_de_read(i915, data_m_reg) & ~TU_SIZE_MASK; + m_n->gmch_n = intel_de_read(i915, data_n_reg); + m_n->tu = ((intel_de_read(i915, data_m_reg) & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; +} + static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, struct intel_link_m_n *m_n) { @@ -3870,13 +3879,9 @@ static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, struct drm_i915_private *dev_priv = to_i915(dev); enum pipe pipe = crtc->pipe; - m_n->link_m = intel_de_read(dev_priv, PCH_TRANS_LINK_M1(pipe)); - m_n->link_n = intel_de_read(dev_priv, PCH_TRANS_LINK_N1(pipe)); - m_n->gmch_m = intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe)) - & ~TU_SIZE_MASK; - m_n->gmch_n = intel_de_read(dev_priv, PCH_TRANS_DATA_N1(pipe)); - m_n->tu = ((intel_de_read(dev_priv, PCH_TRANS_DATA_M1(pipe)) - & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; + intel_get_m_n(dev_priv, m_n, + PCH_TRANS_DATA_M1(pipe), PCH_TRANS_DATA_N1(pipe), + PCH_TRANS_LINK_M1(pipe), PCH_TRANS_LINK_N1(pipe)); } static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, @@ -3888,39 +3893,19 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, enum pipe pipe = crtc->pipe; if (DISPLAY_VER(dev_priv) >= 5) { - m_n->link_m = intel_de_read(dev_priv, - PIPE_LINK_M1(transcoder)); - m_n->link_n = intel_de_read(dev_priv, - PIPE_LINK_N1(transcoder)); - m_n->gmch_m = intel_de_read(dev_priv, - PIPE_DATA_M1(transcoder)) - & ~TU_SIZE_MASK; - m_n->gmch_n = intel_de_read(dev_priv, - PIPE_DATA_N1(transcoder)); - m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M1(transcoder)) - & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; + intel_get_m_n(dev_priv, m_n, + PIPE_DATA_M1(transcoder), PIPE_DATA_N1(transcoder), + PIPE_LINK_M1(transcoder), PIPE_LINK_N1(transcoder)); if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) { - m2_n2->link_m = intel_de_read(dev_priv, - PIPE_LINK_M2(transcoder)); - m2_n2->link_n = intel_de_read(dev_priv, - PIPE_LINK_N2(transcoder)); - m2_n2->gmch_m = intel_de_read(dev_priv, - PIPE_DATA_M2(transcoder)) - & ~TU_SIZE_MASK; - m2_n2->gmch_n = intel_de_read(dev_priv, - PIPE_DATA_N2(transcoder)); - m2_n2->tu = ((intel_de_read(dev_priv, PIPE_DATA_M2(transcoder)) - & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; + intel_get_m_n(dev_priv, m2_n2, + PIPE_DATA_M2(transcoder), PIPE_DATA_N2(transcoder), + PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder)); } } else { - m_n->link_m = intel_de_read(dev_priv, PIPE_LINK_M_G4X(pipe)); - m_n->link_n = intel_de_read(dev_priv, PIPE_LINK_N_G4X(pipe)); - m_n->gmch_m = intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe)) - & ~TU_SIZE_MASK; - m_n->gmch_n = intel_de_read(dev_priv, PIPE_DATA_N_G4X(pipe)); - m_n->tu = ((intel_de_read(dev_priv, PIPE_DATA_M_G4X(pipe)) - & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; + intel_get_m_n(dev_priv, m_n, + PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe), + PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe)); } } -- cgit From c65b3affc6737c99c09925b910c7471d3db26b54 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Thu, 27 Jan 2022 14:02:19 +0200 Subject: drm/i915: Clean up M/N register defines MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use REG_GENMASK() & co. for the M/N register values. There are also a lot of weird unused defines (eg. *_OFFSET) we can just throw out. Also let's mask out the unused bits during readout for good measure. Previously we only masked out the TU_SIZE from one of the registers, which was a bit too inconsistent for my taste. v2: Mention the readout masking in the commit msg (Jani) Deal wth gvt Reviewed-by: Jani Nikula Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220127120219.20143-1-ville.syrjala@linux.intel.com --- drivers/gpu/drm/i915/display/intel_display.c | 10 +++++----- drivers/gpu/drm/i915/gvt/display.c | 4 ++-- drivers/gpu/drm/i915/i915_reg.h | 22 +++------------------- 3 files changed, 10 insertions(+), 26 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index c6bf474a8479..e1abe2d7ab96 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -3865,11 +3865,11 @@ static void intel_get_m_n(struct drm_i915_private *i915, i915_reg_t data_m_reg, i915_reg_t data_n_reg, i915_reg_t link_m_reg, i915_reg_t link_n_reg) { - m_n->link_m = intel_de_read(i915, link_m_reg); - m_n->link_n = intel_de_read(i915, link_n_reg); - m_n->gmch_m = intel_de_read(i915, data_m_reg) & ~TU_SIZE_MASK; - m_n->gmch_n = intel_de_read(i915, data_n_reg); - m_n->tu = ((intel_de_read(i915, data_m_reg) & TU_SIZE_MASK) >> TU_SIZE_SHIFT) + 1; + m_n->link_m = intel_de_read(i915, link_m_reg) & DATA_LINK_M_N_MASK; + m_n->link_n = intel_de_read(i915, link_n_reg) & DATA_LINK_M_N_MASK; + m_n->gmch_m = intel_de_read(i915, data_m_reg) & DATA_LINK_M_N_MASK; + m_n->gmch_n = intel_de_read(i915, data_n_reg) & DATA_LINK_M_N_MASK; + m_n->tu = REG_FIELD_GET(TU_SIZE_MASK, intel_de_read(i915, data_m_reg)) + 1; } static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c index 7198d02edc74..3ce88dea525c 100644 --- a/drivers/gpu/drm/i915/gvt/display.c +++ b/drivers/gpu/drm/i915/gvt/display.c @@ -253,7 +253,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) * DP link clk 1620 MHz and non-constant_n. * TODO: calculate DP link symbol clk and stream clk m/n. */ - vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) = 63 << TU_SIZE_SHIFT; + vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) = TU_SIZE(64); vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) |= 0x5b425e; vgpu_vreg_t(vgpu, PIPE_DATA_N1(TRANSCODER_A)) = 0x800000; vgpu_vreg_t(vgpu, PIPE_LINK_M1(TRANSCODER_A)) = 0x3cd6e; @@ -387,7 +387,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) * DP link clk 1620 MHz and non-constant_n. * TODO: calculate DP link symbol clk and stream clk m/n. */ - vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) = 63 << TU_SIZE_SHIFT; + vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) = TU_SIZE(64); vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) |= 0x5b425e; vgpu_vreg_t(vgpu, PIPE_DATA_N1(TRANSCODER_A)) = 0x800000; vgpu_vreg_t(vgpu, PIPE_LINK_M1(TRANSCODER_A)) = 0x3cd6e; diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index d0286cb55d83..ef8ae4076d9c 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -5130,16 +5130,14 @@ enum { #define _PIPEB_DATA_M_G4X 0x71050 /* Transfer unit size for display port - 1, default is 0x3f (for TU size 64) */ -#define TU_SIZE(x) (((x) - 1) << 25) /* default size 64 */ -#define TU_SIZE_SHIFT 25 -#define TU_SIZE_MASK (0x3f << 25) +#define TU_SIZE_MASK REG_GENMASK(30, 25) +#define TU_SIZE(x) REG_FIELD_PREP(TU_SIZE_MASK, (x) - 1) /* default size 64 */ -#define DATA_LINK_M_N_MASK (0xffffff) +#define DATA_LINK_M_N_MASK REG_GENMASK(23, 0) #define DATA_LINK_N_MAX (0x800000) #define _PIPEA_DATA_N_G4X 0x70054 #define _PIPEB_DATA_N_G4X 0x71054 -#define PIPE_GMCH_DATA_N_MASK (0xffffff) /* * Computing Link M and N values for the Display Port link @@ -5154,11 +5152,8 @@ enum { #define _PIPEA_LINK_M_G4X 0x70060 #define _PIPEB_LINK_M_G4X 0x71060 -#define PIPEA_DP_LINK_M_MASK (0xffffff) - #define _PIPEA_LINK_N_G4X 0x70064 #define _PIPEB_LINK_N_G4X 0x71064 -#define PIPEA_DP_LINK_N_MASK (0xffffff) #define PIPE_DATA_M_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_DATA_M_G4X, _PIPEB_DATA_M_G4X) #define PIPE_DATA_N_G4X(pipe) _MMIO_PIPE(pipe, _PIPEA_DATA_N_G4X, _PIPEB_DATA_N_G4X) @@ -6761,24 +6756,13 @@ enum { #define _PIPEA_DATA_M1 0x60030 -#define PIPE_DATA_M1_OFFSET 0 #define _PIPEA_DATA_N1 0x60034 -#define PIPE_DATA_N1_OFFSET 0 - #define _PIPEA_DATA_M2 0x60038 -#define PIPE_DATA_M2_OFFSET 0 #define _PIPEA_DATA_N2 0x6003c -#define PIPE_DATA_N2_OFFSET 0 - #define _PIPEA_LINK_M1 0x60040 -#define PIPE_LINK_M1_OFFSET 0 #define _PIPEA_LINK_N1 0x60044 -#define PIPE_LINK_N1_OFFSET 0 - #define _PIPEA_LINK_M2 0x60048 -#define PIPE_LINK_M2_OFFSET 0 #define _PIPEA_LINK_N2 0x6004c -#define PIPE_LINK_N2_OFFSET 0 /* PIPEB timing regs are same start from 0x61000 */ -- cgit From 5f721a5d1bb2e3ada83f04a119908b66d909300a Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Thu, 27 Jan 2022 11:32:52 +0200 Subject: drm/i915: s/gmch_{m,n}/data_{m,n}/ MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Rename the gmch_* M/N members to data_* to match the register definitions and thus make life a little less confusing. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220127093303.17309-4-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_ddi.c | 4 +-- drivers/gpu/drm/i915/display/intel_display.c | 48 ++++++++++++++-------------- drivers/gpu/drm/i915/display/intel_display.h | 4 +-- drivers/gpu/drm/i915/display/intel_dp.c | 2 +- drivers/gpu/drm/i915/display/intel_drrs.c | 2 +- 5 files changed, 30 insertions(+), 30 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 5d1f7d6218c5..ca8becb07e45 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -3684,8 +3684,8 @@ static bool m_n_equal(const struct intel_link_m_n *m_n_1, const struct intel_link_m_n *m_n_2) { return m_n_1->tu == m_n_2->tu && - m_n_1->gmch_m == m_n_2->gmch_m && - m_n_1->gmch_n == m_n_2->gmch_n && + m_n_1->data_m == m_n_2->data_m && + m_n_1->data_n == m_n_2->data_n && m_n_1->link_m == m_n_2->link_m && m_n_1->link_n == m_n_2->link_n; } diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index e1abe2d7ab96..49be51c32a62 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -3082,7 +3082,7 @@ intel_link_compute_m_n(u16 bits_per_pixel, int nlanes, m_n->tu = 64; compute_m_n(data_clock, link_clock * nlanes * 8, - &m_n->gmch_m, &m_n->gmch_n, + &m_n->data_m, &m_n->data_n, constant_n); compute_m_n(pixel_clock, link_clock, @@ -3118,8 +3118,8 @@ static void intel_set_m_n(struct drm_i915_private *i915, i915_reg_t data_m_reg, i915_reg_t data_n_reg, i915_reg_t link_m_reg, i915_reg_t link_n_reg) { - intel_de_write(i915, data_m_reg, TU_SIZE(m_n->tu) | m_n->gmch_m); - intel_de_write(i915, data_n_reg, m_n->gmch_n); + intel_de_write(i915, data_m_reg, TU_SIZE(m_n->tu) | m_n->data_m); + intel_de_write(i915, data_n_reg, m_n->data_n); intel_de_write(i915, link_m_reg, m_n->link_m); intel_de_write(i915, link_n_reg, m_n->link_n); } @@ -3867,8 +3867,8 @@ static void intel_get_m_n(struct drm_i915_private *i915, { m_n->link_m = intel_de_read(i915, link_m_reg) & DATA_LINK_M_N_MASK; m_n->link_n = intel_de_read(i915, link_n_reg) & DATA_LINK_M_N_MASK; - m_n->gmch_m = intel_de_read(i915, data_m_reg) & DATA_LINK_M_N_MASK; - m_n->gmch_n = intel_de_read(i915, data_n_reg) & DATA_LINK_M_N_MASK; + m_n->data_m = intel_de_read(i915, data_m_reg) & DATA_LINK_M_N_MASK; + m_n->data_n = intel_de_read(i915, data_n_reg) & DATA_LINK_M_N_MASK; m_n->tu = REG_FIELD_GET(TU_SIZE_MASK, intel_de_read(i915, data_m_reg)) + 1; } @@ -5498,9 +5498,9 @@ intel_dump_m_n_config(const struct intel_crtc_state *pipe_config, struct drm_i915_private *i915 = to_i915(pipe_config->uapi.crtc->dev); drm_dbg_kms(&i915->drm, - "%s: lanes: %i; gmch_m: %u, gmch_n: %u, link_m: %u, link_n: %u, tu: %u\n", + "%s: lanes: %i; data_m: %u, data_n: %u, link_m: %u, link_n: %u, tu: %u\n", id, lane_count, - m_n->gmch_m, m_n->gmch_n, + m_n->data_m, m_n->data_n, m_n->link_m, m_n->link_n, m_n->tu); } @@ -6196,8 +6196,8 @@ intel_compare_link_m_n(const struct intel_link_m_n *m_n, bool exact) { return m_n->tu == m2_n2->tu && - intel_compare_m_n(m_n->gmch_m, m_n->gmch_n, - m2_n2->gmch_m, m2_n2->gmch_n, exact) && + intel_compare_m_n(m_n->data_m, m_n->data_n, + m2_n2->data_m, m2_n2->data_n, exact) && intel_compare_m_n(m_n->link_m, m_n->link_n, m2_n2->link_m, m2_n2->link_n, exact); } @@ -6396,16 +6396,16 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, &pipe_config->name,\ !fastset)) { \ pipe_config_mismatch(fastset, crtc, __stringify(name), \ - "(expected tu %i gmch %i/%i link %i/%i, " \ - "found tu %i, gmch %i/%i link %i/%i)", \ + "(expected tu %i data %i/%i link %i/%i, " \ + "found tu %i, data %i/%i link %i/%i)", \ current_config->name.tu, \ - current_config->name.gmch_m, \ - current_config->name.gmch_n, \ + current_config->name.data_m, \ + current_config->name.data_n, \ current_config->name.link_m, \ current_config->name.link_n, \ pipe_config->name.tu, \ - pipe_config->name.gmch_m, \ - pipe_config->name.gmch_n, \ + pipe_config->name.data_m, \ + pipe_config->name.data_n, \ pipe_config->name.link_m, \ pipe_config->name.link_n); \ ret = false; \ @@ -6423,22 +6423,22 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, !intel_compare_link_m_n(¤t_config->alt_name, \ &pipe_config->name, !fastset)) { \ pipe_config_mismatch(fastset, crtc, __stringify(name), \ - "(expected tu %i gmch %i/%i link %i/%i, " \ - "or tu %i gmch %i/%i link %i/%i, " \ - "found tu %i, gmch %i/%i link %i/%i)", \ + "(expected tu %i data %i/%i link %i/%i, " \ + "or tu %i data %i/%i link %i/%i, " \ + "found tu %i, data %i/%i link %i/%i)", \ current_config->name.tu, \ - current_config->name.gmch_m, \ - current_config->name.gmch_n, \ + current_config->name.data_m, \ + current_config->name.data_n, \ current_config->name.link_m, \ current_config->name.link_n, \ current_config->alt_name.tu, \ - current_config->alt_name.gmch_m, \ - current_config->alt_name.gmch_n, \ + current_config->alt_name.data_m, \ + current_config->alt_name.data_n, \ current_config->alt_name.link_m, \ current_config->alt_name.link_n, \ pipe_config->name.tu, \ - pipe_config->name.gmch_m, \ - pipe_config->name.gmch_n, \ + pipe_config->name.data_m, \ + pipe_config->name.data_n, \ pipe_config->name.link_m, \ pipe_config->name.link_n); \ ret = false; \ diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index b61b75248ded..a241007f5c82 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -317,8 +317,8 @@ enum aux_ch { /* Used by dp and fdi links */ struct intel_link_m_n { u32 tu; - u32 gmch_m; - u32 gmch_n; + u32 data_m; + u32 data_n; u32 link_m; u32 link_n; }; diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index d6f11fe4130a..c3173a0d38e0 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -1895,7 +1895,7 @@ intel_dp_compute_config(struct intel_encoder *encoder, /* FIXME: abstract this better */ if (pipe_config->splitter.enable) - pipe_config->dp_m_n.gmch_m *= pipe_config->splitter.link_count; + pipe_config->dp_m_n.data_m *= pipe_config->splitter.link_count; if (!HAS_DDI(dev_priv)) g4x_dp_set_clock(encoder, pipe_config); diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c index c1439fcb5a95..46be46f2c47e 100644 --- a/drivers/gpu/drm/i915/display/intel_drrs.c +++ b/drivers/gpu/drm/i915/display/intel_drrs.c @@ -84,7 +84,7 @@ intel_drrs_compute_config(struct intel_dp *intel_dp, /* FIXME: abstract this better */ if (pipe_config->splitter.enable) - pipe_config->dp_m2_n2.gmch_m *= pipe_config->splitter.link_count; + pipe_config->dp_m2_n2.data_m *= pipe_config->splitter.link_count; } static void intel_drrs_set_state(struct drm_i915_private *dev_priv, -- cgit From 14683babf8ee356a232ee76b0acd332aef51fdc4 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Thu, 27 Jan 2022 11:32:53 +0200 Subject: drm/i915: Move drrs hardware bit frobbing to small helpers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Split the drrs code that actually changes the refresh rate (via PIPECONF or M/N values) to small helper functions that only deal with the hardware details an nothing else. We'll soon have a third way of doing this, and it's less confusing when each difference method lives in its own funciton. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220127093303.17309-5-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_drrs.c | 67 +++++++++++++++++-------------- 1 file changed, 36 insertions(+), 31 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c index 46be46f2c47e..0cacdb174fd0 100644 --- a/drivers/gpu/drm/i915/display/intel_drrs.c +++ b/drivers/gpu/drm/i915/display/intel_drrs.c @@ -87,6 +87,38 @@ intel_drrs_compute_config(struct intel_dp *intel_dp, pipe_config->dp_m2_n2.data_m *= pipe_config->splitter.link_count; } +static void +intel_drrs_set_refresh_rate_pipeconf(const struct intel_crtc_state *crtc_state, + enum drrs_refresh_rate_type refresh_type) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + u32 val, bit; + + if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) + bit = PIPECONF_EDP_RR_MODE_SWITCH_VLV; + else + bit = PIPECONF_EDP_RR_MODE_SWITCH; + + val = intel_de_read(dev_priv, PIPECONF(cpu_transcoder)); + + if (refresh_type == DRRS_LOW_RR) + val |= bit; + else + val &= ~bit; + + intel_de_write(dev_priv, PIPECONF(cpu_transcoder), val); +} + +static void +intel_drrs_set_refresh_rate_m_n(const struct intel_crtc_state *crtc_state, + enum drrs_refresh_rate_type refresh_type) +{ + intel_dp_set_m_n(crtc_state, + refresh_type == DRRS_LOW_RR ? M2_N2 : M1_N1); +} + static void intel_drrs_set_state(struct drm_i915_private *dev_priv, const struct intel_crtc_state *crtc_state, enum drrs_refresh_rate_type refresh_type) @@ -120,37 +152,10 @@ static void intel_drrs_set_state(struct drm_i915_private *dev_priv, return; } - if (DISPLAY_VER(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) { - switch (refresh_type) { - case DRRS_HIGH_RR: - intel_dp_set_m_n(crtc_state, M1_N1); - break; - case DRRS_LOW_RR: - intel_dp_set_m_n(crtc_state, M2_N2); - break; - case DRRS_MAX_RR: - default: - drm_err(&dev_priv->drm, - "Unsupported refreshrate type\n"); - } - } else if (DISPLAY_VER(dev_priv) > 6) { - i915_reg_t reg = PIPECONF(crtc_state->cpu_transcoder); - u32 val; - - val = intel_de_read(dev_priv, reg); - if (refresh_type == DRRS_LOW_RR) { - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - val |= PIPECONF_EDP_RR_MODE_SWITCH_VLV; - else - val |= PIPECONF_EDP_RR_MODE_SWITCH; - } else { - if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) - val &= ~PIPECONF_EDP_RR_MODE_SWITCH_VLV; - else - val &= ~PIPECONF_EDP_RR_MODE_SWITCH; - } - intel_de_write(dev_priv, reg, val); - } + if (DISPLAY_VER(dev_priv) >= 8 && !IS_CHERRYVIEW(dev_priv)) + intel_drrs_set_refresh_rate_m_n(crtc_state, refresh_type); + else if (DISPLAY_VER(dev_priv) > 6) + intel_drrs_set_refresh_rate_pipeconf(crtc_state, refresh_type); dev_priv->drrs.refresh_rate_type = refresh_type; -- cgit From 0444f82766f0b5b9c8302ad802dafa5dd0e722d0 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Thu, 27 Jan 2022 14:57:17 +0100 Subject: ALSA: hda: Fix signedness of sscanf() arguments The %x format of sscanf() takes an unsigned int pointer, while we pass a signed int pointer. Practically it's OK, but this may result in a compile warning. Let's fix it. Fixes: a235d5b8e550 ("ALSA: hda: Allow model option to specify PCI SSID alias") Reported-by: kernel test robot Link: https://lore.kernel.org/r/20220127135717.31751-1-tiwai@suse.de Signed-off-by: Takashi Iwai --- sound/pci/hda/hda_auto_parser.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/pci/hda/hda_auto_parser.c b/sound/pci/hda/hda_auto_parser.c index 82c492b05667..cd1db943b7e0 100644 --- a/sound/pci/hda/hda_auto_parser.c +++ b/sound/pci/hda/hda_auto_parser.c @@ -981,7 +981,7 @@ void snd_hda_pick_fixup(struct hda_codec *codec, int id = HDA_FIXUP_ID_NOT_SET; const char *name = NULL; const char *type = NULL; - int vendor, device; + unsigned int vendor, device; if (codec->fixup_id != HDA_FIXUP_ID_NOT_SET) return; -- cgit From c8980fcb210851138cb34c9a8cb0cf0c09f07bf9 Mon Sep 17 00:00:00 2001 From: Roger Pau Monne Date: Fri, 21 Jan 2022 10:01:46 +0100 Subject: xen/x2apic: enable x2apic mode when supported for HVM MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit There's no point in disabling x2APIC mode when running as a Xen HVM guest, just enable it when available. Remove some unneeded wrapping around the detection functions, and simply provide a xen_x2apic_available helper that's a wrapper around x2apic_supported. Signed-off-by: Roger Pau Monné Reviewed-by: Boris Ostrovsky Link: https://lore.kernel.org/r/20220121090146.13697-1-roger.pau@citrix.com Signed-off-by: Juergen Gross --- arch/x86/include/asm/xen/hypervisor.h | 14 -------------- arch/x86/xen/enlighten_hvm.c | 13 ++++--------- 2 files changed, 4 insertions(+), 23 deletions(-) diff --git a/arch/x86/include/asm/xen/hypervisor.h b/arch/x86/include/asm/xen/hypervisor.h index 1bf2ad34188a..16f548a661cf 100644 --- a/arch/x86/include/asm/xen/hypervisor.h +++ b/arch/x86/include/asm/xen/hypervisor.h @@ -43,20 +43,6 @@ static inline uint32_t xen_cpuid_base(void) return hypervisor_cpuid_base("XenVMMXenVMM", 2); } -#ifdef CONFIG_XEN -extern bool __init xen_hvm_need_lapic(void); - -static inline bool __init xen_x2apic_para_available(void) -{ - return xen_hvm_need_lapic(); -} -#else -static inline bool __init xen_x2apic_para_available(void) -{ - return (xen_cpuid_base() != 0); -} -#endif - struct pci_dev; #ifdef CONFIG_XEN_PV_DOM0 diff --git a/arch/x86/xen/enlighten_hvm.c b/arch/x86/xen/enlighten_hvm.c index 42300941ec29..6448c5071117 100644 --- a/arch/x86/xen/enlighten_hvm.c +++ b/arch/x86/xen/enlighten_hvm.c @@ -9,6 +9,7 @@ #include #include +#include #include #include #include @@ -242,15 +243,9 @@ static __init int xen_parse_no_vector_callback(char *arg) } early_param("xen_no_vector_callback", xen_parse_no_vector_callback); -bool __init xen_hvm_need_lapic(void) +static __init bool xen_x2apic_available(void) { - if (xen_pv_domain()) - return false; - if (!xen_hvm_domain()) - return false; - if (xen_feature(XENFEAT_hvm_pirqs) && xen_have_vector_callback) - return false; - return true; + return x2apic_supported(); } static __init void xen_hvm_guest_late_init(void) @@ -312,7 +307,7 @@ struct hypervisor_x86 x86_hyper_xen_hvm __initdata = { .detect = xen_platform_hvm, .type = X86_HYPER_XEN_HVM, .init.init_platform = xen_hvm_guest_init, - .init.x2apic_available = xen_x2apic_para_available, + .init.x2apic_available = xen_x2apic_available, .init.init_mem_mapping = xen_hvm_init_mem_mapping, .init.guest_late_init = xen_hvm_guest_late_init, .runtime.pin_vcpu = xen_pin_vcpu, -- cgit From fb25621da5702c104ce0a48de5b174ced09e5b4e Mon Sep 17 00:00:00 2001 From: Miaoqian Lin Date: Thu, 27 Jan 2022 13:13:34 +0000 Subject: ASoC: fsl: Add missing error handling in pcm030_fabric_probe Add the missing platform_device_put() and platform_device_del() before return from pcm030_fabric_probe in the error handling case. Fixes: c912fa913446 ("ASoC: fsl: register the wm9712-codec") Signed-off-by: Miaoqian Lin Link: https://lore.kernel.org/r/20220127131336.30214-1-linmq006@gmail.com Signed-off-by: Mark Brown --- sound/soc/fsl/pcm030-audio-fabric.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/sound/soc/fsl/pcm030-audio-fabric.c b/sound/soc/fsl/pcm030-audio-fabric.c index af3c3b90c0ac..83b4a22bf15a 100644 --- a/sound/soc/fsl/pcm030-audio-fabric.c +++ b/sound/soc/fsl/pcm030-audio-fabric.c @@ -93,16 +93,21 @@ static int pcm030_fabric_probe(struct platform_device *op) dev_err(&op->dev, "platform_device_alloc() failed\n"); ret = platform_device_add(pdata->codec_device); - if (ret) + if (ret) { dev_err(&op->dev, "platform_device_add() failed: %d\n", ret); + platform_device_put(pdata->codec_device); + } ret = snd_soc_register_card(card); - if (ret) + if (ret) { dev_err(&op->dev, "snd_soc_register_card() failed: %d\n", ret); + platform_device_del(pdata->codec_device); + platform_device_put(pdata->codec_device); + } platform_set_drvdata(op, pdata); - return ret; + } static int pcm030_fabric_remove(struct platform_device *op) -- cgit From 2cbd27267ffe020af1442b95ec57f59a157ba85c Mon Sep 17 00:00:00 2001 From: Kamal Dasu Date: Thu, 27 Jan 2022 13:53:59 -0500 Subject: spi: bcm-qspi: check for valid cs before applying chip select Apply only valid chip select value. This change fixes case where chip select is set to initial value of '-1' during probe and PM supend and subsequent resume can try to use the value with undefined behaviour. Also in case where gpio based chip select, the check in bcm_qspi_chip_select() shall prevent undefined behaviour on resume. Fixes: fa236a7ef240 ("spi: bcm-qspi: Add Broadcom MSPI driver") Signed-off-by: Kamal Dasu Acked-by: Florian Fainelli Link: https://lore.kernel.org/r/20220127185359.27322-1-kdasu.kdev@gmail.com Signed-off-by: Mark Brown --- drivers/spi/spi-bcm-qspi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c index c9a769b8594b..86c76211b3d3 100644 --- a/drivers/spi/spi-bcm-qspi.c +++ b/drivers/spi/spi-bcm-qspi.c @@ -585,7 +585,7 @@ static void bcm_qspi_chip_select(struct bcm_qspi *qspi, int cs) u32 rd = 0; u32 wr = 0; - if (qspi->base[CHIP_SELECT]) { + if (cs >= 0 && qspi->base[CHIP_SELECT]) { rd = bcm_qspi_read(qspi, CHIP_SELECT, 0); wr = (rd & ~0xff) | (1 << cs); if (rd == wr) -- cgit From 60b1e97140a487608b7cbde774b3cff1b5a99c00 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Wed, 26 Jan 2022 17:13:26 -0600 Subject: spi: dt-bindings: Fix 'reg' child node schema The schema for SPI child nodes' 'reg' property is not complete. 'reg' is a matrix of cells. The schema needs to define both the number of 'reg' entries and constraints on each entry. Signed-off-by: Rob Herring Link: https://lore.kernel.org/r/20220126231326.1636199-1-robh@kernel.org Signed-off-by: Mark Brown --- Documentation/devicetree/bindings/spi/spi-peripheral-props.yaml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/Documentation/devicetree/bindings/spi/spi-peripheral-props.yaml b/Documentation/devicetree/bindings/spi/spi-peripheral-props.yaml index 5dd209206e88..3ec2d7b83775 100644 --- a/Documentation/devicetree/bindings/spi/spi-peripheral-props.yaml +++ b/Documentation/devicetree/bindings/spi/spi-peripheral-props.yaml @@ -23,8 +23,9 @@ properties: minItems: 1 maxItems: 256 items: - minimum: 0 - maximum: 256 + items: + - minimum: 0 + maximum: 256 description: Chip select used by the device. -- cgit From ab451ea952fe9d7afefae55ddb28943a148247fe Mon Sep 17 00:00:00 2001 From: Dai Ngo Date: Wed, 26 Jan 2022 13:13:38 -0800 Subject: nfsd: nfsd4_setclientid_confirm mistakenly expires confirmed client. From RFC 7530 Section 16.34.5: o The server has not recorded an unconfirmed { v, x, c, *, * } and has recorded a confirmed { v, x, c, *, s }. If the principals of the record and of SETCLIENTID_CONFIRM do not match, the server returns NFS4ERR_CLID_INUSE without removing any relevant leased client state, and without changing recorded callback and callback_ident values for client { x }. The current code intends to do what the spec describes above but it forgot to set 'old' to NULL resulting to the confirmed client to be expired. Fixes: 2b63482185e6 ("nfsd: fix clid_inuse on mount with security change") Signed-off-by: Dai Ngo Signed-off-by: Chuck Lever Reviewed-by: Bruce Fields --- fs/nfsd/nfs4state.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 72900b89cf84..32063733443d 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -4130,8 +4130,10 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp, status = nfserr_clid_inuse; if (client_has_state(old) && !same_creds(&unconf->cl_cred, - &old->cl_cred)) + &old->cl_cred)) { + old = NULL; goto out; + } status = mark_client_expired_locked(old); if (status) { old = NULL; -- cgit From 928d6fe996f69330ded6b887baf4534c5fac7988 Mon Sep 17 00:00:00 2001 From: Yuji Ishikawa Date: Thu, 27 Jan 2022 21:17:14 +0900 Subject: net: stmmac: dwmac-visconti: No change to ETHER_CLOCK_SEL for unexpected speed request. Variable clk_sel_val is not initialized in the default case of the first switch statement. In that case, the function should return immediately without any changes to the hardware. Reported-by: kernel test robot Reported-by: Dan Carpenter Fixes: b38dd98ff8d0 ("net: stmmac: Add Toshiba Visconti SoCs glue driver") Signed-off-by: Yuji Ishikawa Reviewed-by: Nobuhiro Iwamatsu Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c index dde5b772a5af..c3f10a92b62b 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-visconti.c @@ -49,13 +49,15 @@ struct visconti_eth { void __iomem *reg; u32 phy_intf_sel; struct clk *phy_ref_clk; + struct device *dev; spinlock_t lock; /* lock to protect register update */ }; static void visconti_eth_fix_mac_speed(void *priv, unsigned int speed) { struct visconti_eth *dwmac = priv; - unsigned int val, clk_sel_val; + struct net_device *netdev = dev_get_drvdata(dwmac->dev); + unsigned int val, clk_sel_val = 0; unsigned long flags; spin_lock_irqsave(&dwmac->lock, flags); @@ -85,7 +87,9 @@ static void visconti_eth_fix_mac_speed(void *priv, unsigned int speed) break; default: /* No bit control */ - break; + netdev_err(netdev, "Unsupported speed request (%d)", speed); + spin_unlock_irqrestore(&dwmac->lock, flags); + return; } writel(val, dwmac->reg + MAC_CTRL_REG); @@ -229,6 +233,7 @@ static int visconti_eth_dwmac_probe(struct platform_device *pdev) spin_lock_init(&dwmac->lock); dwmac->reg = stmmac_res.addr; + dwmac->dev = &pdev->dev; plat_dat->bsp_priv = dwmac; plat_dat->fix_mac_speed = visconti_eth_fix_mac_speed; -- cgit From 500c77eed0feabddd5b3afb48e32c204614a8eab Mon Sep 17 00:00:00 2001 From: Gerhard Engleder Date: Thu, 27 Jan 2022 20:46:02 +0100 Subject: pinctrl: zynqmp: Revert "Unify pin naming" This reverts commit 54784ff24971ed5bd3f1056edce998148709d0a7. This patch changes the pin names from "MIO%d" to "MIO-%d", but all dts in arch/arm64/boot/dts/xilinx still use the old name. As a result my ZCU104 has no output on serial terminal and is not reachable over network. Signed-off-by: Gerhard Engleder Signed-off-by: Andy Shevchenko --- drivers/pinctrl/pinctrl-zynqmp.c | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/drivers/pinctrl/pinctrl-zynqmp.c b/drivers/pinctrl/pinctrl-zynqmp.c index 42da6bd399ee..e14012209992 100644 --- a/drivers/pinctrl/pinctrl-zynqmp.c +++ b/drivers/pinctrl/pinctrl-zynqmp.c @@ -809,7 +809,6 @@ static int zynqmp_pinctrl_prepare_pin_desc(struct device *dev, unsigned int *npins) { struct pinctrl_pin_desc *pins, *pin; - char **pin_names; int ret; int i; @@ -821,14 +820,13 @@ static int zynqmp_pinctrl_prepare_pin_desc(struct device *dev, if (!pins) return -ENOMEM; - pin_names = devm_kasprintf_strarray(dev, ZYNQMP_PIN_PREFIX, *npins); - if (IS_ERR(pin_names)) - return PTR_ERR(pin_names); - for (i = 0; i < *npins; i++) { pin = &pins[i]; pin->number = i; - pin->name = pin_names[i]; + pin->name = devm_kasprintf(dev, GFP_KERNEL, "%s%d", + ZYNQMP_PIN_PREFIX, i); + if (!pin->name) + return -ENOMEM; } *zynqmp_pins = pins; -- cgit From 4e0f718daf97d47cf7dec122da1be970f145c809 Mon Sep 17 00:00:00 2001 From: Duoming Zhou Date: Fri, 28 Jan 2022 12:47:15 +0800 Subject: ax25: improve the incomplete fix to avoid UAF and NPD bugs The previous commit 1ade48d0c27d ("ax25: NPD bug when detaching AX25 device") introduce lock_sock() into ax25_kill_by_device to prevent NPD bug. But the concurrency NPD or UAF bug will occur, when lock_sock() or release_sock() dereferences the ax25_cb->sock. The NULL pointer dereference bug can be shown as below: ax25_kill_by_device() | ax25_release() | ax25_destroy_socket() | ax25_cb_del() ... | ... | ax25->sk=NULL; lock_sock(s->sk); //(1) | s->ax25_dev = NULL; | ... release_sock(s->sk); //(2) | ... | The root cause is that the sock is set to null before dereference site (1) or (2). Therefore, this patch extracts the ax25_cb->sock in advance, and uses ax25_list_lock to protect it, which can synchronize with ax25_cb_del() and ensure the value of sock is not null before dereference sites. The concurrency UAF bug can be shown as below: ax25_kill_by_device() | ax25_release() | ax25_destroy_socket() ... | ... | sock_put(sk); //FREE lock_sock(s->sk); //(1) | s->ax25_dev = NULL; | ... release_sock(s->sk); //(2) | ... | The root cause is that the sock is released before dereference site (1) or (2). Therefore, this patch uses sock_hold() to increase the refcount of sock and uses ax25_list_lock to protect it, which can synchronize with ax25_cb_del() in ax25_destroy_socket() and ensure the sock wil not be released before dereference sites. Signed-off-by: Duoming Zhou Signed-off-by: David S. Miller --- net/ax25/af_ax25.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 02f43f3e2c56..44a8730c26ac 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -77,6 +77,7 @@ static void ax25_kill_by_device(struct net_device *dev) { ax25_dev *ax25_dev; ax25_cb *s; + struct sock *sk; if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) return; @@ -85,13 +86,15 @@ static void ax25_kill_by_device(struct net_device *dev) again: ax25_for_each(s, &ax25_list) { if (s->ax25_dev == ax25_dev) { + sk = s->sk; + sock_hold(sk); spin_unlock_bh(&ax25_list_lock); - lock_sock(s->sk); + lock_sock(sk); s->ax25_dev = NULL; - release_sock(s->sk); + release_sock(sk); ax25_disconnect(s, ENETUNREACH); spin_lock_bh(&ax25_list_lock); - + sock_put(sk); /* The entry could have been deleted from the * list meanwhile and thus the next pointer is * no longer valid. Play it safe and restart -- cgit From d01ffb9eee4af165d83b08dd73ebdf9fe94a519b Mon Sep 17 00:00:00 2001 From: Duoming Zhou Date: Fri, 28 Jan 2022 12:47:16 +0800 Subject: ax25: add refcount in ax25_dev to avoid UAF bugs If we dereference ax25_dev after we call kfree(ax25_dev) in ax25_dev_device_down(), it will lead to concurrency UAF bugs. There are eight syscall functions suffer from UAF bugs, include ax25_bind(), ax25_release(), ax25_connect(), ax25_ioctl(), ax25_getname(), ax25_sendmsg(), ax25_getsockopt() and ax25_info_show(). One of the concurrency UAF can be shown as below: (USE) | (FREE) | ax25_device_event | ax25_dev_device_down ax25_bind | ... ... | kfree(ax25_dev) ax25_fillin_cb() | ... ax25_fillin_cb_from_dev() | ... | The root cause of UAF bugs is that kfree(ax25_dev) in ax25_dev_device_down() is not protected by any locks. When ax25_dev, which there are still pointers point to, is released, the concurrency UAF bug will happen. This patch introduces refcount into ax25_dev in order to guarantee that there are no pointers point to it when ax25_dev is released. Signed-off-by: Duoming Zhou Signed-off-by: David S. Miller --- include/net/ax25.h | 10 ++++++++++ net/ax25/af_ax25.c | 2 ++ net/ax25/ax25_dev.c | 12 ++++++++++-- net/ax25/ax25_route.c | 3 +++ 4 files changed, 25 insertions(+), 2 deletions(-) diff --git a/include/net/ax25.h b/include/net/ax25.h index 526e49589197..50b417df6221 100644 --- a/include/net/ax25.h +++ b/include/net/ax25.h @@ -239,6 +239,7 @@ typedef struct ax25_dev { #if defined(CONFIG_AX25_DAMA_SLAVE) || defined(CONFIG_AX25_DAMA_MASTER) ax25_dama_info dama; #endif + refcount_t refcount; } ax25_dev; typedef struct ax25_cb { @@ -293,6 +294,15 @@ static __inline__ void ax25_cb_put(ax25_cb *ax25) } } +#define ax25_dev_hold(__ax25_dev) \ + refcount_inc(&((__ax25_dev)->refcount)) + +static __inline__ void ax25_dev_put(ax25_dev *ax25_dev) +{ + if (refcount_dec_and_test(&ax25_dev->refcount)) { + kfree(ax25_dev); + } +} static inline __be16 ax25_type_trans(struct sk_buff *skb, struct net_device *dev) { skb->dev = dev; diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index 44a8730c26ac..32f61978ff29 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -91,6 +91,7 @@ again: spin_unlock_bh(&ax25_list_lock); lock_sock(sk); s->ax25_dev = NULL; + ax25_dev_put(ax25_dev); release_sock(sk); ax25_disconnect(s, ENETUNREACH); spin_lock_bh(&ax25_list_lock); @@ -439,6 +440,7 @@ static int ax25_ctl_ioctl(const unsigned int cmd, void __user *arg) } out_put: + ax25_dev_put(ax25_dev); ax25_cb_put(ax25); return ret; diff --git a/net/ax25/ax25_dev.c b/net/ax25/ax25_dev.c index 256fadb94df3..770b787fb7bb 100644 --- a/net/ax25/ax25_dev.c +++ b/net/ax25/ax25_dev.c @@ -37,6 +37,7 @@ ax25_dev *ax25_addr_ax25dev(ax25_address *addr) for (ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next) if (ax25cmp(addr, (const ax25_address *)ax25_dev->dev->dev_addr) == 0) { res = ax25_dev; + ax25_dev_hold(ax25_dev); } spin_unlock_bh(&ax25_dev_lock); @@ -56,6 +57,7 @@ void ax25_dev_device_up(struct net_device *dev) return; } + refcount_set(&ax25_dev->refcount, 1); dev->ax25_ptr = ax25_dev; ax25_dev->dev = dev; dev_hold_track(dev, &ax25_dev->dev_tracker, GFP_ATOMIC); @@ -83,6 +85,7 @@ void ax25_dev_device_up(struct net_device *dev) spin_lock_bh(&ax25_dev_lock); ax25_dev->next = ax25_dev_list; ax25_dev_list = ax25_dev; + ax25_dev_hold(ax25_dev); spin_unlock_bh(&ax25_dev_lock); ax25_register_dev_sysctl(ax25_dev); @@ -112,20 +115,22 @@ void ax25_dev_device_down(struct net_device *dev) if ((s = ax25_dev_list) == ax25_dev) { ax25_dev_list = s->next; + ax25_dev_put(ax25_dev); spin_unlock_bh(&ax25_dev_lock); dev->ax25_ptr = NULL; dev_put_track(dev, &ax25_dev->dev_tracker); - kfree(ax25_dev); + ax25_dev_put(ax25_dev); return; } while (s != NULL && s->next != NULL) { if (s->next == ax25_dev) { s->next = ax25_dev->next; + ax25_dev_put(ax25_dev); spin_unlock_bh(&ax25_dev_lock); dev->ax25_ptr = NULL; dev_put_track(dev, &ax25_dev->dev_tracker); - kfree(ax25_dev); + ax25_dev_put(ax25_dev); return; } @@ -133,6 +138,7 @@ void ax25_dev_device_down(struct net_device *dev) } spin_unlock_bh(&ax25_dev_lock); dev->ax25_ptr = NULL; + ax25_dev_put(ax25_dev); } int ax25_fwd_ioctl(unsigned int cmd, struct ax25_fwd_struct *fwd) @@ -149,6 +155,7 @@ int ax25_fwd_ioctl(unsigned int cmd, struct ax25_fwd_struct *fwd) if (ax25_dev->forward != NULL) return -EINVAL; ax25_dev->forward = fwd_dev->dev; + ax25_dev_put(fwd_dev); break; case SIOCAX25DELFWD: @@ -161,6 +168,7 @@ int ax25_fwd_ioctl(unsigned int cmd, struct ax25_fwd_struct *fwd) return -EINVAL; } + ax25_dev_put(ax25_dev); return 0; } diff --git a/net/ax25/ax25_route.c b/net/ax25/ax25_route.c index d0b2e094bd55..1e32693833e5 100644 --- a/net/ax25/ax25_route.c +++ b/net/ax25/ax25_route.c @@ -116,6 +116,7 @@ static int __must_check ax25_rt_add(struct ax25_routes_struct *route) ax25_rt->dev = ax25_dev->dev; ax25_rt->digipeat = NULL; ax25_rt->ip_mode = ' '; + ax25_dev_put(ax25_dev); if (route->digi_count != 0) { if ((ax25_rt->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) { write_unlock_bh(&ax25_route_lock); @@ -172,6 +173,7 @@ static int ax25_rt_del(struct ax25_routes_struct *route) } } } + ax25_dev_put(ax25_dev); write_unlock_bh(&ax25_route_lock); return 0; @@ -214,6 +216,7 @@ static int ax25_rt_opt(struct ax25_route_opt_struct *rt_option) } out: + ax25_dev_put(ax25_dev); write_unlock_bh(&ax25_route_lock); return err; } -- cgit From 8c83d39cc730378bbac64d67a551897b203a606e Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Sat, 15 Jan 2022 18:02:33 -0500 Subject: IB/hfi1: Fix panic with larger ipoib send_queue_size When the ipoib send_queue_size is increased from the default the following panic happens: RIP: 0010:hfi1_ipoib_drain_tx_ring+0x45/0xf0 [hfi1] Code: 31 e4 eb 0f 8b 85 c8 02 00 00 41 83 c4 01 44 39 e0 76 60 8b 8d cc 02 00 00 44 89 e3 be 01 00 00 00 d3 e3 48 03 9d c0 02 00 00 83 18 01 00 00 00 00 00 00 48 8b bb 30 01 00 00 e8 25 af a7 e0 RSP: 0018:ffffc9000798f4a0 EFLAGS: 00010286 RAX: 0000000000008000 RBX: ffffc9000aa0f000 RCX: 000000000000000f RDX: 0000000000000000 RSI: 0000000000000001 RDI: 0000000000000000 RBP: ffff88810ff08000 R08: ffff88889476d900 R09: 0000000000000101 R10: 0000000000000000 R11: ffffc90006590ff8 R12: 0000000000000200 R13: ffffc9000798fba8 R14: 0000000000000000 R15: 0000000000000001 FS: 00007fd0f79cc3c0(0000) GS:ffff88885fb00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: ffffc9000aa0f118 CR3: 0000000889c84001 CR4: 00000000001706e0 Call Trace: hfi1_ipoib_napi_tx_disable+0x45/0x60 [hfi1] hfi1_ipoib_dev_stop+0x18/0x80 [hfi1] ipoib_ib_dev_stop+0x1d/0x40 [ib_ipoib] ipoib_stop+0x48/0xc0 [ib_ipoib] __dev_close_many+0x9e/0x110 __dev_change_flags+0xd9/0x210 dev_change_flags+0x21/0x60 do_setlink+0x31c/0x10f0 ? __nla_validate_parse+0x12d/0x1a0 ? __nla_parse+0x21/0x30 ? inet6_validate_link_af+0x5e/0xf0 ? cpumask_next+0x1f/0x20 ? __snmp6_fill_stats64.isra.53+0xbb/0x140 ? __nla_validate_parse+0x47/0x1a0 __rtnl_newlink+0x530/0x910 ? pskb_expand_head+0x73/0x300 ? __kmalloc_node_track_caller+0x109/0x280 ? __nla_put+0xc/0x20 ? cpumask_next_and+0x20/0x30 ? update_sd_lb_stats.constprop.144+0xd3/0x820 ? _raw_spin_unlock_irqrestore+0x25/0x37 ? __wake_up_common_lock+0x87/0xc0 ? kmem_cache_alloc_trace+0x3d/0x3d0 rtnl_newlink+0x43/0x60 The issue happens when the shift that should have been a function of the txq item size mistakenly used the ring size. Fix by using the item size. Cc: stable@vger.kernel.org Fixes: d47dfc2b00e6 ("IB/hfi1: Remove cache and embed txreq in ring") Link: https://lore.kernel.org/r/1642287756-182313-2-git-send-email-mike.marciniszyn@cornelisnetworks.com Reviewed-by: Dennis Dalessandro Signed-off-by: Mike Marciniszyn Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hfi1/ipoib_tx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/hfi1/ipoib_tx.c b/drivers/infiniband/hw/hfi1/ipoib_tx.c index f4010890309f..bf62956c8667 100644 --- a/drivers/infiniband/hw/hfi1/ipoib_tx.c +++ b/drivers/infiniband/hw/hfi1/ipoib_tx.c @@ -731,7 +731,7 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv) goto free_txqs; txq->tx_ring.max_items = tx_ring_size; - txq->tx_ring.shift = ilog2(tx_ring_size); + txq->tx_ring.shift = ilog2(tx_item_size); txq->tx_ring.avail = hfi1_ipoib_ring_hwat(txq); netif_tx_napi_add(dev, &txq->napi, -- cgit From 1f84a9450d75e08af70d9e2f2d5e1c0ac0c881d2 Mon Sep 17 00:00:00 2001 From: Haiyue Wang Date: Fri, 28 Jan 2022 18:47:14 +0800 Subject: gve: fix the wrong AdminQ buffer queue index check The 'tail' and 'head' are 'unsigned int' type free-running count, when 'head' is overflow, the 'int i (= tail) < u32 head' will be false: Only '- loop 0: idx = 63' result is shown, so it needs to use 'int' type to compare, it can handle the overflow correctly. typedef uint32_t u32; int main() { u32 tail, head; int stail, shead; int i, loop; tail = 0xffffffff; head = 0x00000000; for (i = tail, loop = 0; i < head; i++) { unsigned int idx = i & 63; printf("+ loop %d: idx = %u\n", loop++, idx); } stail = tail; shead = head; for (i = stail, loop = 0; i < shead; i++) { unsigned int idx = i & 63; printf("- loop %d: idx = %u\n", loop++, idx); } return 0; } Fixes: 5cdad90de62c ("gve: Batch AQ commands for creating and destroying queues.") Signed-off-by: Haiyue Wang Signed-off-by: David S. Miller --- drivers/net/ethernet/google/gve/gve_adminq.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/google/gve/gve_adminq.c b/drivers/net/ethernet/google/gve/gve_adminq.c index 2ad7f57f7e5b..f7621ab672b9 100644 --- a/drivers/net/ethernet/google/gve/gve_adminq.c +++ b/drivers/net/ethernet/google/gve/gve_adminq.c @@ -301,7 +301,7 @@ static int gve_adminq_parse_err(struct gve_priv *priv, u32 status) */ static int gve_adminq_kick_and_wait(struct gve_priv *priv) { - u32 tail, head; + int tail, head; int i; tail = ioread32be(&priv->reg_bar0->adminq_event_counter); -- cgit From b1151b74ff68cc83c2a8e1a618efe7d056e4f237 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Sat, 15 Jan 2022 18:02:34 -0500 Subject: IB/hfi1: Fix alloc failure with larger txqueuelen The following allocation with large txqueuelen will result in the following warning: Call Trace: __alloc_pages_nodemask+0x283/0x2c0 kmalloc_large_node+0x3c/0xa0 __kmalloc_node+0x22a/0x2f0 hfi1_ipoib_txreq_init+0x19f/0x330 [hfi1] hfi1_ipoib_setup_rn+0xd3/0x1a0 [hfi1] rdma_init_netdev+0x5a/0x80 [ib_core] ipoib_intf_init+0x6c/0x350 [ib_ipoib] ipoib_intf_alloc+0x5c/0xc0 [ib_ipoib] ipoib_add_one+0xbe/0x300 [ib_ipoib] add_client_context+0x12c/0x1a0 [ib_core] ib_register_client+0x147/0x190 [ib_core] ipoib_init_module+0xdd/0x132 [ib_ipoib] do_one_initcall+0x46/0x1c3 do_init_module+0x5a/0x220 load_module+0x14c5/0x17f0 __do_sys_init_module+0x13b/0x180 do_syscall_64+0x5b/0x1a0 entry_SYSCALL_64_after_hwframe+0x65/0xca For ipoib, the txqueuelen is modified with the module parameter send_queue_size. Fix by changing to use kv versions of the same allocator to handle the large allocations. The allocation embeds a hdr struct that is dma mapped. Change that struct to a pointer to a kzalloced struct. Cc: stable@vger.kernel.org Fixes: d99dc602e2a5 ("IB/hfi1: Add functions to transmit datagram ipoib packets") Link: https://lore.kernel.org/r/1642287756-182313-3-git-send-email-mike.marciniszyn@cornelisnetworks.com Reviewed-by: Dennis Dalessandro Signed-off-by: Mike Marciniszyn Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hfi1/ipoib.h | 2 +- drivers/infiniband/hw/hfi1/ipoib_tx.c | 36 ++++++++++++++++++++++++----------- 2 files changed, 26 insertions(+), 12 deletions(-) diff --git a/drivers/infiniband/hw/hfi1/ipoib.h b/drivers/infiniband/hw/hfi1/ipoib.h index 909122934246..aec60d4888eb 100644 --- a/drivers/infiniband/hw/hfi1/ipoib.h +++ b/drivers/infiniband/hw/hfi1/ipoib.h @@ -55,7 +55,7 @@ union hfi1_ipoib_flow { */ struct ipoib_txreq { struct sdma_txreq txreq; - struct hfi1_sdma_header sdma_hdr; + struct hfi1_sdma_header *sdma_hdr; int sdma_status; int complete; struct hfi1_ipoib_dev_priv *priv; diff --git a/drivers/infiniband/hw/hfi1/ipoib_tx.c b/drivers/infiniband/hw/hfi1/ipoib_tx.c index bf62956c8667..d6bbdb8fcb50 100644 --- a/drivers/infiniband/hw/hfi1/ipoib_tx.c +++ b/drivers/infiniband/hw/hfi1/ipoib_tx.c @@ -122,7 +122,7 @@ static void hfi1_ipoib_free_tx(struct ipoib_txreq *tx, int budget) dd_dev_warn(priv->dd, "%s: Status = 0x%x pbc 0x%llx txq = %d sde = %d\n", __func__, tx->sdma_status, - le64_to_cpu(tx->sdma_hdr.pbc), tx->txq->q_idx, + le64_to_cpu(tx->sdma_hdr->pbc), tx->txq->q_idx, tx->txq->sde->this_idx); } @@ -231,7 +231,7 @@ static int hfi1_ipoib_build_tx_desc(struct ipoib_txreq *tx, { struct hfi1_devdata *dd = txp->dd; struct sdma_txreq *txreq = &tx->txreq; - struct hfi1_sdma_header *sdma_hdr = &tx->sdma_hdr; + struct hfi1_sdma_header *sdma_hdr = tx->sdma_hdr; u16 pkt_bytes = sizeof(sdma_hdr->pbc) + (txp->hdr_dwords << 2) + tx->skb->len; int ret; @@ -256,7 +256,7 @@ static void hfi1_ipoib_build_ib_tx_headers(struct ipoib_txreq *tx, struct ipoib_txparms *txp) { struct hfi1_ipoib_dev_priv *priv = tx->txq->priv; - struct hfi1_sdma_header *sdma_hdr = &tx->sdma_hdr; + struct hfi1_sdma_header *sdma_hdr = tx->sdma_hdr; struct sk_buff *skb = tx->skb; struct hfi1_pportdata *ppd = ppd_from_ibp(txp->ibp); struct rdma_ah_attr *ah_attr = txp->ah_attr; @@ -483,7 +483,7 @@ static int hfi1_ipoib_send_dma_single(struct net_device *dev, if (likely(!ret)) { tx_ok: trace_sdma_output_ibhdr(txq->priv->dd, - &tx->sdma_hdr.hdr, + &tx->sdma_hdr->hdr, ib_is_sc5(txp->flow.sc5)); hfi1_ipoib_check_queue_depth(txq); return NETDEV_TX_OK; @@ -547,7 +547,7 @@ static int hfi1_ipoib_send_dma_list(struct net_device *dev, hfi1_ipoib_check_queue_depth(txq); trace_sdma_output_ibhdr(txq->priv->dd, - &tx->sdma_hdr.hdr, + &tx->sdma_hdr->hdr, ib_is_sc5(txp->flow.sc5)); if (!netdev_xmit_more()) @@ -683,7 +683,8 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv) { struct net_device *dev = priv->netdev; u32 tx_ring_size, tx_item_size; - int i; + struct hfi1_ipoib_circ_buf *tx_ring; + int i, j; /* * Ring holds 1 less than tx_ring_size @@ -701,7 +702,9 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv) for (i = 0; i < dev->num_tx_queues; i++) { struct hfi1_ipoib_txq *txq = &priv->txqs[i]; + struct ipoib_txreq *tx; + tx_ring = &txq->tx_ring; iowait_init(&txq->wait, 0, hfi1_ipoib_flush_txq, @@ -725,14 +728,19 @@ int hfi1_ipoib_txreq_init(struct hfi1_ipoib_dev_priv *priv) priv->dd->node); txq->tx_ring.items = - kcalloc_node(tx_ring_size, tx_item_size, - GFP_KERNEL, priv->dd->node); + kvzalloc_node(array_size(tx_ring_size, tx_item_size), + GFP_KERNEL, priv->dd->node); if (!txq->tx_ring.items) goto free_txqs; txq->tx_ring.max_items = tx_ring_size; txq->tx_ring.shift = ilog2(tx_item_size); txq->tx_ring.avail = hfi1_ipoib_ring_hwat(txq); + tx_ring = &txq->tx_ring; + for (j = 0; j < tx_ring_size; j++) + hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr = + kzalloc_node(sizeof(*tx->sdma_hdr), + GFP_KERNEL, priv->dd->node); netif_tx_napi_add(dev, &txq->napi, hfi1_ipoib_poll_tx_ring, @@ -746,7 +754,10 @@ free_txqs: struct hfi1_ipoib_txq *txq = &priv->txqs[i]; netif_napi_del(&txq->napi); - kfree(txq->tx_ring.items); + tx_ring = &txq->tx_ring; + for (j = 0; j < tx_ring_size; j++) + kfree(hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr); + kvfree(tx_ring->items); } kfree(priv->txqs); @@ -780,17 +791,20 @@ static void hfi1_ipoib_drain_tx_list(struct hfi1_ipoib_txq *txq) void hfi1_ipoib_txreq_deinit(struct hfi1_ipoib_dev_priv *priv) { - int i; + int i, j; for (i = 0; i < priv->netdev->num_tx_queues; i++) { struct hfi1_ipoib_txq *txq = &priv->txqs[i]; + struct hfi1_ipoib_circ_buf *tx_ring = &txq->tx_ring; iowait_cancel_work(&txq->wait); iowait_sdma_drain(&txq->wait); hfi1_ipoib_drain_tx_list(txq); netif_napi_del(&txq->napi); hfi1_ipoib_drain_tx_ring(txq); - kfree(txq->tx_ring.items); + for (j = 0; j < tx_ring->max_items; j++) + kfree(hfi1_txreq_from_idx(tx_ring, j)->sdma_hdr); + kvfree(tx_ring->items); } kfree(priv->txqs); -- cgit From 5f8f55b92edd621f056bdf09e572092849fabd83 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Sat, 15 Jan 2022 18:02:35 -0500 Subject: IB/hfi1: Fix AIP early init panic An early failure in hfi1_ipoib_setup_rn() can lead to the following panic: BUG: unable to handle kernel NULL pointer dereference at 00000000000001b0 PGD 0 P4D 0 Oops: 0002 [#1] SMP NOPTI Workqueue: events work_for_cpu_fn RIP: 0010:try_to_grab_pending+0x2b/0x140 Code: 1f 44 00 00 41 55 41 54 55 48 89 d5 53 48 89 fb 9c 58 0f 1f 44 00 00 48 89 c2 fa 66 0f 1f 44 00 00 48 89 55 00 40 84 f6 75 77 48 0f ba 2b 00 72 09 31 c0 5b 5d 41 5c 41 5d c3 48 89 df e8 6c RSP: 0018:ffffb6b3cf7cfa48 EFLAGS: 00010046 RAX: 0000000000000246 RBX: 00000000000001b0 RCX: 0000000000000000 RDX: 0000000000000246 RSI: 0000000000000000 RDI: 00000000000001b0 RBP: ffffb6b3cf7cfa70 R08: 0000000000000f09 R09: 0000000000000001 R10: 0000000000000000 R11: 0000000000000001 R12: 0000000000000000 R13: ffffb6b3cf7cfa90 R14: ffffffff9b2fbfc0 R15: ffff8a4fdf244690 FS: 0000000000000000(0000) GS:ffff8a527f400000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 00000000000001b0 CR3: 00000017e2410003 CR4: 00000000007706f0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 PKRU: 55555554 Call Trace: __cancel_work_timer+0x42/0x190 ? dev_printk_emit+0x4e/0x70 iowait_cancel_work+0x15/0x30 [hfi1] hfi1_ipoib_txreq_deinit+0x5a/0x220 [hfi1] ? dev_err+0x6c/0x90 hfi1_ipoib_netdev_dtor+0x15/0x30 [hfi1] hfi1_ipoib_setup_rn+0x10e/0x150 [hfi1] rdma_init_netdev+0x5a/0x80 [ib_core] ? hfi1_ipoib_free_rdma_netdev+0x20/0x20 [hfi1] ipoib_intf_init+0x6c/0x350 [ib_ipoib] ipoib_intf_alloc+0x5c/0xc0 [ib_ipoib] ipoib_add_one+0xbe/0x300 [ib_ipoib] add_client_context+0x12c/0x1a0 [ib_core] enable_device_and_get+0xdc/0x1d0 [ib_core] ib_register_device+0x572/0x6b0 [ib_core] rvt_register_device+0x11b/0x220 [rdmavt] hfi1_register_ib_device+0x6b4/0x770 [hfi1] do_init_one.isra.20+0x3e3/0x680 [hfi1] local_pci_probe+0x41/0x90 work_for_cpu_fn+0x16/0x20 process_one_work+0x1a7/0x360 ? create_worker+0x1a0/0x1a0 worker_thread+0x1cf/0x390 ? create_worker+0x1a0/0x1a0 kthread+0x116/0x130 ? kthread_flush_work_fn+0x10/0x10 ret_from_fork+0x1f/0x40 The panic happens in hfi1_ipoib_txreq_deinit() because there is a NULL deref when hfi1_ipoib_netdev_dtor() is called in this error case. hfi1_ipoib_txreq_init() and hfi1_ipoib_rxq_init() are self unwinding so fix by adjusting the error paths accordingly. Other changes: - hfi1_ipoib_free_rdma_netdev() is deleted including the free_netdev() since the netdev core code deletes calls free_netdev() - The switch to the accelerated entrances is moved to the success path. Cc: stable@vger.kernel.org Fixes: d99dc602e2a5 ("IB/hfi1: Add functions to transmit datagram ipoib packets") Link: https://lore.kernel.org/r/1642287756-182313-4-git-send-email-mike.marciniszyn@cornelisnetworks.com Reviewed-by: Dennis Dalessandro Signed-off-by: Mike Marciniszyn Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hfi1/ipoib_main.c | 13 +++---------- 1 file changed, 3 insertions(+), 10 deletions(-) diff --git a/drivers/infiniband/hw/hfi1/ipoib_main.c b/drivers/infiniband/hw/hfi1/ipoib_main.c index e1a2b02bbd91..8306ed5c1b80 100644 --- a/drivers/infiniband/hw/hfi1/ipoib_main.c +++ b/drivers/infiniband/hw/hfi1/ipoib_main.c @@ -168,12 +168,6 @@ static void hfi1_ipoib_netdev_dtor(struct net_device *dev) free_percpu(dev->tstats); } -static void hfi1_ipoib_free_rdma_netdev(struct net_device *dev) -{ - hfi1_ipoib_netdev_dtor(dev); - free_netdev(dev); -} - static void hfi1_ipoib_set_id(struct net_device *dev, int id) { struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev); @@ -211,24 +205,23 @@ static int hfi1_ipoib_setup_rn(struct ib_device *device, priv->port_num = port_num; priv->netdev_ops = netdev->netdev_ops; - netdev->netdev_ops = &hfi1_ipoib_netdev_ops; - ib_query_pkey(device, port_num, priv->pkey_index, &priv->pkey); rc = hfi1_ipoib_txreq_init(priv); if (rc) { dd_dev_err(dd, "IPoIB netdev TX init - failed(%d)\n", rc); - hfi1_ipoib_free_rdma_netdev(netdev); return rc; } rc = hfi1_ipoib_rxq_init(netdev); if (rc) { dd_dev_err(dd, "IPoIB netdev RX init - failed(%d)\n", rc); - hfi1_ipoib_free_rdma_netdev(netdev); + hfi1_ipoib_txreq_deinit(priv); return rc; } + netdev->netdev_ops = &hfi1_ipoib_netdev_ops; + netdev->priv_destructor = hfi1_ipoib_netdev_dtor; netdev->needs_free_netdev = true; -- cgit From e5cce44aff3be9ad2cd52f63f35edbd706181d50 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Sat, 15 Jan 2022 18:02:36 -0500 Subject: IB/hfi1: Fix tstats alloc and dealloc The tstats allocation is done in the accelerated ndo_init function but the allocation is not tested to succeed. The deallocation is not done in the accelerated ndo_uninit function. Resolve issues by testing for an allocation failure and adding the free_percpu in the uninit function. Fixes: aa0616a9bd52 ("IB/hfi1: switch to core handling of rx/tx byte/packet counters") Link: https://lore.kernel.org/r/1642287756-182313-5-git-send-email-mike.marciniszyn@cornelisnetworks.com Reviewed-by: Dennis Dalessandro Signed-off-by: Mike Marciniszyn Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/hfi1/ipoib_main.c | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/drivers/infiniband/hw/hfi1/ipoib_main.c b/drivers/infiniband/hw/hfi1/ipoib_main.c index 8306ed5c1b80..5d814afdf7f3 100644 --- a/drivers/infiniband/hw/hfi1/ipoib_main.c +++ b/drivers/infiniband/hw/hfi1/ipoib_main.c @@ -22,26 +22,35 @@ static int hfi1_ipoib_dev_init(struct net_device *dev) int ret; dev->tstats = netdev_alloc_pcpu_stats(struct pcpu_sw_netstats); + if (!dev->tstats) + return -ENOMEM; ret = priv->netdev_ops->ndo_init(dev); if (ret) - return ret; + goto out_ret; ret = hfi1_netdev_add_data(priv->dd, qpn_from_mac(priv->netdev->dev_addr), dev); if (ret < 0) { priv->netdev_ops->ndo_uninit(dev); - return ret; + goto out_ret; } return 0; +out_ret: + free_percpu(dev->tstats); + dev->tstats = NULL; + return ret; } static void hfi1_ipoib_dev_uninit(struct net_device *dev) { struct hfi1_ipoib_dev_priv *priv = hfi1_ipoib_priv(dev); + free_percpu(dev->tstats); + dev->tstats = NULL; + hfi1_netdev_remove_data(priv->dd, qpn_from_mac(priv->netdev->dev_addr)); priv->netdev_ops->ndo_uninit(dev); @@ -166,6 +175,7 @@ static void hfi1_ipoib_netdev_dtor(struct net_device *dev) hfi1_ipoib_rxq_deinit(priv->netdev); free_percpu(dev->tstats); + dev->tstats = NULL; } static void hfi1_ipoib_set_id(struct net_device *dev, int id) -- cgit From 6449520391dfc3d2cef134f11a91251a054ff7d0 Mon Sep 17 00:00:00 2001 From: Jisheng Zhang Date: Fri, 28 Jan 2022 22:15:50 +0800 Subject: net: stmmac: properly handle with runtime pm in stmmac_dvr_remove() There are two issues with runtime pm handling in stmmac_dvr_remove(): 1. the mac is runtime suspended before stopping dma and rx/tx. We need to ensure the device is properly resumed back. 2. the stmmaceth clk enable/disable isn't balanced in both exit and error handling code path. Take the exit code path for example, when we unbind the driver or rmmod the driver module, the mac is runtime suspended as said above, so the stmmaceth clk is disabled, but stmmac_dvr_remove() stmmac_remove_config_dt() clk_disable_unprepare() CCF will complain this time. The error handling code path suffers from the similar situtaion. Here are kernel warnings in error handling code path on Allwinner D1 platform: [ 1.604695] ------------[ cut here ]------------ [ 1.609328] bus-emac already disabled [ 1.613015] WARNING: CPU: 0 PID: 38 at drivers/clk/clk.c:952 clk_core_disable+0xcc/0xec [ 1.621039] CPU: 0 PID: 38 Comm: kworker/u2:1 Not tainted 5.14.0-rc4#1 [ 1.627653] Hardware name: Allwinner D1 NeZha (DT) [ 1.632443] Workqueue: events_unbound deferred_probe_work_func [ 1.638286] epc : clk_core_disable+0xcc/0xec [ 1.642561] ra : clk_core_disable+0xcc/0xec [ 1.646835] epc : ffffffff8023c2ec ra : ffffffff8023c2ec sp : ffffffd00411bb10 [ 1.654054] gp : ffffffff80ec9988 tp : ffffffe00143a800 t0 : ffffffff80ed6a6f [ 1.661272] t1 : ffffffff80ed6a60 t2 : 0000000000000000 s0 : ffffffe001509e00 [ 1.668489] s1 : 0000000000000001 a0 : 0000000000000019 a1 : ffffffff80e80bd8 [ 1.675707] a2 : 00000000ffffefff a3 : 00000000000000f4 a4 : 0000000000000002 [ 1.682924] a5 : 0000000000000001 a6 : 0000000000000030 a7 : 00000000028f5c29 [ 1.690141] s2 : 0000000000000800 s3 : ffffffe001375000 s4 : ffffffe01fdf7a80 [ 1.697358] s5 : ffffffe001375010 s6 : ffffffff8001fc10 s7 : ffffffffffffffff [ 1.704577] s8 : 0000000000000001 s9 : ffffffff80ecb248 s10: ffffffe001b80000 [ 1.711794] s11: ffffffe001b80760 t3 : 0000000000000062 t4 : ffffffffffffffff [ 1.719012] t5 : ffffffff80e0f6d8 t6 : ffffffd00411b8f0 [ 1.724321] status: 8000000201800100 badaddr: 0000000000000000 cause: 0000000000000003 [ 1.732233] [] clk_core_disable+0xcc/0xec [ 1.737810] [] clk_disable+0x38/0x78 [ 1.742956] [] worker_thread+0x1a8/0x4d8 [ 1.748451] [] stmmac_remove_config_dt+0x1c/0x4c [ 1.754646] [] sun8i_dwmac_probe+0x378/0x82c [ 1.760484] [] worker_thread+0x1a8/0x4d8 [ 1.765975] [] platform_probe+0x64/0xf0 [ 1.771382] [] really_probe.part.0+0x8c/0x30c [ 1.777305] [] __driver_probe_device+0xa0/0x148 [ 1.783402] [] driver_probe_device+0x38/0x138 [ 1.789324] [] __device_attach_driver+0xd0/0x170 [ 1.795508] [] __driver_attach_async_helper+0xbc/0xc0 [ 1.802125] [] bus_for_each_drv+0x68/0xb4 [ 1.807701] [] __device_attach+0xd8/0x184 [ 1.813277] [] bus_probe_device+0x98/0xbc [ 1.818852] [] deferred_probe_work_func+0x90/0xd4 [ 1.825122] [] process_one_work+0x1e4/0x390 [ 1.830872] [] worker_thread+0x31c/0x4d8 [ 1.836362] [] kthreadd+0x94/0x188 [ 1.841335] [] kthreadd+0x94/0x188 [ 1.846304] [] process_one_work+0x38c/0x390 [ 1.852054] [] kthread+0x124/0x160 [ 1.857021] [] set_kthread_struct+0x5c/0x60 [ 1.862770] [] ret_from_syscall_rejected+0x8/0xc [ 1.868956] ---[ end trace 8d5c6046255f84a0 ]--- [ 1.873675] ------------[ cut here ]------------ [ 1.878366] bus-emac already unprepared [ 1.882378] WARNING: CPU: 0 PID: 38 at drivers/clk/clk.c:810 clk_core_unprepare+0xe4/0x168 [ 1.890673] CPU: 0 PID: 38 Comm: kworker/u2:1 Tainted: G W 5.14.0-rc4 #1 [ 1.898674] Hardware name: Allwinner D1 NeZha (DT) [ 1.903464] Workqueue: events_unbound deferred_probe_work_func [ 1.909305] epc : clk_core_unprepare+0xe4/0x168 [ 1.913840] ra : clk_core_unprepare+0xe4/0x168 [ 1.918375] epc : ffffffff8023d6cc ra : ffffffff8023d6cc sp : ffffffd00411bb10 [ 1.925593] gp : ffffffff80ec9988 tp : ffffffe00143a800 t0 : 0000000000000002 [ 1.932811] t1 : ffffffe01f743be0 t2 : 0000000000000040 s0 : ffffffe001509e00 [ 1.940029] s1 : 0000000000000001 a0 : 000000000000001b a1 : ffffffe00143a800 [ 1.947246] a2 : 0000000000000000 a3 : 00000000000000f4 a4 : 0000000000000001 [ 1.954463] a5 : 0000000000000000 a6 : 0000000005fce2a5 a7 : 0000000000000001 [ 1.961680] s2 : 0000000000000800 s3 : ffffffff80afeb90 s4 : ffffffe01fdf7a80 [ 1.968898] s5 : ffffffe001375010 s6 : ffffffff8001fc10 s7 : ffffffffffffffff [ 1.976115] s8 : 0000000000000001 s9 : ffffffff80ecb248 s10: ffffffe001b80000 [ 1.983333] s11: ffffffe001b80760 t3 : ffffffff80b39120 t4 : 0000000000000001 [ 1.990550] t5 : 0000000000000000 t6 : ffffffe001600002 [ 1.995859] status: 8000000201800120 badaddr: 0000000000000000 cause: 0000000000000003 [ 2.003771] [] clk_core_unprepare+0xe4/0x168 [ 2.009609] [] clk_unprepare+0x24/0x3c [ 2.014929] [] stmmac_remove_config_dt+0x24/0x4c [ 2.021125] [] sun8i_dwmac_probe+0x378/0x82c [ 2.026965] [] worker_thread+0x1a8/0x4d8 [ 2.032463] [] platform_probe+0x64/0xf0 [ 2.037871] [] really_probe.part.0+0x8c/0x30c [ 2.043795] [] __driver_probe_device+0xa0/0x148 [ 2.049892] [] driver_probe_device+0x38/0x138 [ 2.055815] [] __device_attach_driver+0xd0/0x170 [ 2.061999] [] __driver_attach_async_helper+0xbc/0xc0 [ 2.068616] [] bus_for_each_drv+0x68/0xb4 [ 2.074193] [] __device_attach+0xd8/0x184 [ 2.079769] [] bus_probe_device+0x98/0xbc [ 2.085345] [] deferred_probe_work_func+0x90/0xd4 [ 2.091616] [] process_one_work+0x1e4/0x390 [ 2.097367] [] worker_thread+0x31c/0x4d8 [ 2.102858] [] kthreadd+0x94/0x188 [ 2.107830] [] kthreadd+0x94/0x188 [ 2.112800] [] process_one_work+0x38c/0x390 [ 2.118551] [] kthread+0x124/0x160 [ 2.123520] [] set_kthread_struct+0x5c/0x60 [ 2.129268] [] ret_from_syscall_rejected+0x8/0xc [ 2.135455] ---[ end trace 8d5c6046255f84a1 ]--- Fixes: 5ec55823438e ("net: stmmac: add clocks management for gmac driver") Signed-off-by: Jisheng Zhang Signed-off-by: David S. Miller --- drivers/net/ethernet/stmicro/stmmac/stmmac_main.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index 639a753266e6..bde76ea2deec 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -7252,6 +7252,10 @@ int stmmac_dvr_remove(struct device *dev) netdev_info(priv->dev, "%s: removing driver", __func__); + pm_runtime_get_sync(dev); + pm_runtime_disable(dev); + pm_runtime_put_noidle(dev); + stmmac_stop_all_dma(priv); stmmac_mac_set(priv, priv->ioaddr, false); netif_carrier_off(ndev); @@ -7270,8 +7274,6 @@ int stmmac_dvr_remove(struct device *dev) if (priv->plat->stmmac_rst) reset_control_assert(priv->plat->stmmac_rst); reset_control_assert(priv->plat->stmmac_ahb_rst); - pm_runtime_put(dev); - pm_runtime_disable(dev); if (priv->hw->pcs != STMMAC_PCS_TBI && priv->hw->pcs != STMMAC_PCS_RTBI) stmmac_mdio_unregister(ndev); -- cgit From d9e410ebbed9d091b97bdf45b8a3792e2878dc48 Mon Sep 17 00:00:00 2001 From: Maor Gottlieb Date: Tue, 18 Jan 2022 09:35:00 +0200 Subject: RDMA/cma: Use correct address when leaving multicast group In RoCE we should use cma_iboe_set_mgid() and not cma_set_mgid to generate the mgid, otherwise we will generate an IGMP for an incorrect address. Fixes: b5de0c60cc30 ("RDMA/cma: Fix use after free race in roce multicast join") Link: https://lore.kernel.org/r/913bc6783fd7a95fe71ad9454e01653ee6fb4a9a.1642491047.git.leonro@nvidia.com Signed-off-by: Maor Gottlieb Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cma.c | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c index 27a00ce2e101..c447526288f4 100644 --- a/drivers/infiniband/core/cma.c +++ b/drivers/infiniband/core/cma.c @@ -67,8 +67,8 @@ static const char * const cma_events[] = { [RDMA_CM_EVENT_TIMEWAIT_EXIT] = "timewait exit", }; -static void cma_set_mgid(struct rdma_id_private *id_priv, struct sockaddr *addr, - union ib_gid *mgid); +static void cma_iboe_set_mgid(struct sockaddr *addr, union ib_gid *mgid, + enum ib_gid_type gid_type); const char *__attribute_const__ rdma_event_msg(enum rdma_cm_event_type event) { @@ -1846,17 +1846,19 @@ static void destroy_mc(struct rdma_id_private *id_priv, if (dev_addr->bound_dev_if) ndev = dev_get_by_index(dev_addr->net, dev_addr->bound_dev_if); - if (ndev) { + if (ndev && !send_only) { + enum ib_gid_type gid_type; union ib_gid mgid; - cma_set_mgid(id_priv, (struct sockaddr *)&mc->addr, - &mgid); - - if (!send_only) - cma_igmp_send(ndev, &mgid, false); - - dev_put(ndev); + gid_type = id_priv->cma_dev->default_gid_type + [id_priv->id.port_num - + rdma_start_port( + id_priv->cma_dev->device)]; + cma_iboe_set_mgid((struct sockaddr *)&mc->addr, &mgid, + gid_type); + cma_igmp_send(ndev, &mgid, false); } + dev_put(ndev); cancel_work_sync(&mc->iboe_join.work); } -- cgit From 36e8169ec973359f671f9ec7213547059cae972e Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Tue, 18 Jan 2022 09:35:01 +0200 Subject: RDMA/ucma: Protect mc during concurrent multicast leaves Partially revert the commit mentioned in the Fixes line to make sure that allocation and erasing multicast struct are locked. BUG: KASAN: use-after-free in ucma_cleanup_multicast drivers/infiniband/core/ucma.c:491 [inline] BUG: KASAN: use-after-free in ucma_destroy_private_ctx+0x914/0xb70 drivers/infiniband/core/ucma.c:579 Read of size 8 at addr ffff88801bb74b00 by task syz-executor.1/25529 CPU: 0 PID: 25529 Comm: syz-executor.1 Not tainted 5.16.0-rc7-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:88 [inline] dump_stack_lvl+0xcd/0x134 lib/dump_stack.c:106 print_address_description.constprop.0.cold+0x8d/0x320 mm/kasan/report.c:247 __kasan_report mm/kasan/report.c:433 [inline] kasan_report.cold+0x83/0xdf mm/kasan/report.c:450 ucma_cleanup_multicast drivers/infiniband/core/ucma.c:491 [inline] ucma_destroy_private_ctx+0x914/0xb70 drivers/infiniband/core/ucma.c:579 ucma_destroy_id+0x1e6/0x280 drivers/infiniband/core/ucma.c:614 ucma_write+0x25c/0x350 drivers/infiniband/core/ucma.c:1732 vfs_write+0x28e/0xae0 fs/read_write.c:588 ksys_write+0x1ee/0x250 fs/read_write.c:643 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0xae Currently the xarray search can touch a concurrently freeing mc as the xa_for_each() is not surrounded by any lock. Rather than hold the lock for a full scan hold it only for the effected items, which is usually an empty list. Fixes: 95fe51096b7a ("RDMA/ucma: Remove mc_list and rely on xarray") Link: https://lore.kernel.org/r/1cda5fabb1081e8d16e39a48d3a4f8160cea88b8.1642491047.git.leonro@nvidia.com Reported-by: syzbot+e3f96c43d19782dd14a7@syzkaller.appspotmail.com Suggested-by: Jason Gunthorpe Reviewed-by: Maor Gottlieb Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/ucma.c | 34 +++++++++++++++++++++++----------- 1 file changed, 23 insertions(+), 11 deletions(-) diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c index 2b72c4fa9550..9d6ac9dff39a 100644 --- a/drivers/infiniband/core/ucma.c +++ b/drivers/infiniband/core/ucma.c @@ -95,6 +95,7 @@ struct ucma_context { u64 uid; struct list_head list; + struct list_head mc_list; struct work_struct close_work; }; @@ -105,6 +106,7 @@ struct ucma_multicast { u64 uid; u8 join_state; + struct list_head list; struct sockaddr_storage addr; }; @@ -198,6 +200,7 @@ static struct ucma_context *ucma_alloc_ctx(struct ucma_file *file) INIT_WORK(&ctx->close_work, ucma_close_id); init_completion(&ctx->comp); + INIT_LIST_HEAD(&ctx->mc_list); /* So list_del() will work if we don't do ucma_finish_ctx() */ INIT_LIST_HEAD(&ctx->list); ctx->file = file; @@ -484,19 +487,19 @@ err1: static void ucma_cleanup_multicast(struct ucma_context *ctx) { - struct ucma_multicast *mc; - unsigned long index; + struct ucma_multicast *mc, *tmp; - xa_for_each(&multicast_table, index, mc) { - if (mc->ctx != ctx) - continue; + xa_lock(&multicast_table); + list_for_each_entry_safe(mc, tmp, &ctx->mc_list, list) { + list_del(&mc->list); /* * At this point mc->ctx->ref is 0 so the mc cannot leave the * lock on the reader and this is enough serialization */ - xa_erase(&multicast_table, index); + __xa_erase(&multicast_table, mc->id); kfree(mc); } + xa_unlock(&multicast_table); } static void ucma_cleanup_mc_events(struct ucma_multicast *mc) @@ -1469,12 +1472,16 @@ static ssize_t ucma_process_join(struct ucma_file *file, mc->uid = cmd->uid; memcpy(&mc->addr, addr, cmd->addr_size); - if (xa_alloc(&multicast_table, &mc->id, NULL, xa_limit_32b, + xa_lock(&multicast_table); + if (__xa_alloc(&multicast_table, &mc->id, NULL, xa_limit_32b, GFP_KERNEL)) { ret = -ENOMEM; goto err_free_mc; } + list_add_tail(&mc->list, &ctx->mc_list); + xa_unlock(&multicast_table); + mutex_lock(&ctx->mutex); ret = rdma_join_multicast(ctx->cm_id, (struct sockaddr *)&mc->addr, join_state, mc); @@ -1500,8 +1507,11 @@ err_leave_multicast: mutex_unlock(&ctx->mutex); ucma_cleanup_mc_events(mc); err_xa_erase: - xa_erase(&multicast_table, mc->id); + xa_lock(&multicast_table); + list_del(&mc->list); + __xa_erase(&multicast_table, mc->id); err_free_mc: + xa_unlock(&multicast_table); kfree(mc); err_put_ctx: ucma_put_ctx(ctx); @@ -1569,15 +1579,17 @@ static ssize_t ucma_leave_multicast(struct ucma_file *file, mc = ERR_PTR(-EINVAL); else if (!refcount_inc_not_zero(&mc->ctx->ref)) mc = ERR_PTR(-ENXIO); - else - __xa_erase(&multicast_table, mc->id); - xa_unlock(&multicast_table); if (IS_ERR(mc)) { + xa_unlock(&multicast_table); ret = PTR_ERR(mc); goto out; } + list_del(&mc->list); + __xa_erase(&multicast_table, mc->id); + xa_unlock(&multicast_table); + mutex_lock(&mc->ctx->mutex); rdma_leave_multicast(mc->ctx->cm_id, (struct sockaddr *) &mc->addr); mutex_unlock(&mc->ctx->mutex); -- cgit From a75badebfdc0b3823054bedf112edb54d6357c75 Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Tue, 18 Jan 2022 12:11:04 +0300 Subject: RDMA/siw: Fix refcounting leak in siw_create_qp() The atomic_inc() needs to be paired with an atomic_dec() on the error path. Fixes: 514aee660df4 ("RDMA: Globally allocate and release QP memory") Link: https://lore.kernel.org/r/20220118091104.GA11671@kili Signed-off-by: Dan Carpenter Reviewed-by: Leon Romanovsky Reviewed-by: Bernard Metzler Signed-off-by: Jason Gunthorpe --- drivers/infiniband/sw/siw/siw_verbs.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/infiniband/sw/siw/siw_verbs.c b/drivers/infiniband/sw/siw/siw_verbs.c index a3dd2cb6d5c9..54ef367b074a 100644 --- a/drivers/infiniband/sw/siw/siw_verbs.c +++ b/drivers/infiniband/sw/siw/siw_verbs.c @@ -313,7 +313,8 @@ int siw_create_qp(struct ib_qp *ibqp, struct ib_qp_init_attr *attrs, if (atomic_inc_return(&sdev->num_qp) > SIW_MAX_QP) { siw_dbg(base_dev, "too many QP's\n"); - return -ENOMEM; + rv = -ENOMEM; + goto err_atomic; } if (attrs->qp_type != IB_QPT_RC) { siw_dbg(base_dev, "only RC QP's supported\n"); -- cgit From 3c75c0ea5da749bd1efebd1387f2e5011b8c7d78 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Wed, 19 Jan 2022 16:52:48 +0100 Subject: ASoC: soc-pcm: Fix DPCM lockdep warning due to nested stream locks The recent change for DPCM locking caused spurious lockdep warnings. Actually the warnings are false-positive, as those are triggered due to the nested stream locks for FE and BE. Since both locks belong to the same lock class, lockdep sees it as if a deadlock. For fixing this, we need to take PCM stream locks for BE with the nested lock primitives. Since currently snd_pcm_stream_lock*() helper assumes only the top-level single locking, a new helper function snd_pcm_stream_lock_irqsave_nested() is defined for a single-depth nested lock, which is now used in the BE DAI trigger that is always performed inside a FE stream lock. Fixes: b2ae80663008 ("ASoC: soc-pcm: serialize BE triggers") Reported-and-tested-by: Hans de Goede Reported-and-tested-by: Marek Szyprowski Link: https://lore.kernel.org/r/73018f3c-9769-72ea-0325-b3f8e2381e30@redhat.com Link: https://lore.kernel.org/alsa-devel/9a0abddd-49e9-872d-2f00-a1697340f786@samsung.com Signed-off-by: Takashi Iwai Link: https://lore.kernel.org/r/20220119155249.26754-2-tiwai@suse.de Signed-off-by: Mark Brown --- include/sound/pcm.h | 15 +++++++++++++++ sound/core/pcm_native.c | 13 +++++++++++++ sound/soc/soc-pcm.c | 6 +++--- 3 files changed, 31 insertions(+), 3 deletions(-) diff --git a/include/sound/pcm.h b/include/sound/pcm.h index 33451f8ff755..524220fe1af6 100644 --- a/include/sound/pcm.h +++ b/include/sound/pcm.h @@ -614,6 +614,7 @@ void snd_pcm_stream_unlock(struct snd_pcm_substream *substream); void snd_pcm_stream_lock_irq(struct snd_pcm_substream *substream); void snd_pcm_stream_unlock_irq(struct snd_pcm_substream *substream); unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream); +unsigned long _snd_pcm_stream_lock_irqsave_nested(struct snd_pcm_substream *substream); /** * snd_pcm_stream_lock_irqsave - Lock the PCM stream @@ -632,6 +633,20 @@ unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream); void snd_pcm_stream_unlock_irqrestore(struct snd_pcm_substream *substream, unsigned long flags); +/** + * snd_pcm_stream_lock_irqsave_nested - Single-nested PCM stream locking + * @substream: PCM substream + * @flags: irq flags + * + * This locks the PCM stream like snd_pcm_stream_lock_irqsave() but with + * the single-depth lockdep subclass. + */ +#define snd_pcm_stream_lock_irqsave_nested(substream, flags) \ + do { \ + typecheck(unsigned long, flags); \ + flags = _snd_pcm_stream_lock_irqsave_nested(substream); \ + } while (0) + /** * snd_pcm_group_for_each_entry - iterate over the linked substreams * @s: the iterator diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index 621883e71194..a056b3ef3c84 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -172,6 +172,19 @@ unsigned long _snd_pcm_stream_lock_irqsave(struct snd_pcm_substream *substream) } EXPORT_SYMBOL_GPL(_snd_pcm_stream_lock_irqsave); +unsigned long _snd_pcm_stream_lock_irqsave_nested(struct snd_pcm_substream *substream) +{ + unsigned long flags = 0; + if (substream->pcm->nonatomic) + mutex_lock_nested(&substream->self_group.mutex, + SINGLE_DEPTH_NESTING); + else + spin_lock_irqsave_nested(&substream->self_group.lock, flags, + SINGLE_DEPTH_NESTING); + return flags; +} +EXPORT_SYMBOL_GPL(_snd_pcm_stream_lock_irqsave_nested); + /** * snd_pcm_stream_unlock_irqrestore - Unlock the PCM stream * @substream: PCM substream diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index 7abfc48b26ca..e8876e65c649 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c @@ -46,8 +46,8 @@ static inline void snd_soc_dpcm_stream_lock_irq(struct snd_soc_pcm_runtime *rtd, snd_pcm_stream_lock_irq(snd_soc_dpcm_get_substream(rtd, stream)); } -#define snd_soc_dpcm_stream_lock_irqsave(rtd, stream, flags) \ - snd_pcm_stream_lock_irqsave(snd_soc_dpcm_get_substream(rtd, stream), flags) +#define snd_soc_dpcm_stream_lock_irqsave_nested(rtd, stream, flags) \ + snd_pcm_stream_lock_irqsave_nested(snd_soc_dpcm_get_substream(rtd, stream), flags) static inline void snd_soc_dpcm_stream_unlock_irq(struct snd_soc_pcm_runtime *rtd, int stream) @@ -2094,7 +2094,7 @@ int dpcm_be_dai_trigger(struct snd_soc_pcm_runtime *fe, int stream, be = dpcm->be; be_substream = snd_soc_dpcm_get_substream(be, stream); - snd_soc_dpcm_stream_lock_irqsave(be, stream, flags); + snd_soc_dpcm_stream_lock_irqsave_nested(be, stream, flags); /* is this op for this BE ? */ if (!snd_soc_dpcm_be_can_update(fe, be, stream)) -- cgit From 9f620684c1ef5a002b6622ecc7b5818e81252f48 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Wed, 19 Jan 2022 16:52:49 +0100 Subject: ASoC: soc-pcm: Move debugfs removal out of spinlock The recent fix for DPCM locking also covered the loop in dpcm_be_disconnect() with the FE stream lock. This caused an unexpected side effect, thought: calling debugfs_remove_recursive() in the spinlock may lead to lockdep splats as the code there assumes the SOFTIRQ-safe context. For avoiding the problem, this patch changes the disconnection procedure to two phases: at first, the matching entries are removed from the linked list, then the resources are freed outside the lock. Fixes: b7898396f4bb ("ASoC: soc-pcm: Fix and cleanup DPCM locking") Reported-and-tested-by: Marek Szyprowski Signed-off-by: Takashi Iwai Link: https://lore.kernel.org/r/20220119155249.26754-3-tiwai@suse.de Signed-off-by: Mark Brown --- sound/soc/soc-pcm.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c index e8876e65c649..9a954680d492 100644 --- a/sound/soc/soc-pcm.c +++ b/sound/soc/soc-pcm.c @@ -1268,6 +1268,7 @@ static void dpcm_be_reparent(struct snd_soc_pcm_runtime *fe, void dpcm_be_disconnect(struct snd_soc_pcm_runtime *fe, int stream) { struct snd_soc_dpcm *dpcm, *d; + LIST_HEAD(deleted_dpcms); snd_soc_dpcm_mutex_assert_held(fe); @@ -1287,13 +1288,18 @@ void dpcm_be_disconnect(struct snd_soc_pcm_runtime *fe, int stream) /* BEs still alive need new FE */ dpcm_be_reparent(fe, dpcm->be, stream); - dpcm_remove_debugfs_state(dpcm); - list_del(&dpcm->list_be); + list_move(&dpcm->list_fe, &deleted_dpcms); + } + snd_soc_dpcm_stream_unlock_irq(fe, stream); + + while (!list_empty(&deleted_dpcms)) { + dpcm = list_first_entry(&deleted_dpcms, struct snd_soc_dpcm, + list_fe); list_del(&dpcm->list_fe); + dpcm_remove_debugfs_state(dpcm); kfree(dpcm); } - snd_soc_dpcm_stream_unlock_irq(fe, stream); } /* get BE for DAI widget and stream */ -- cgit From 06feec6005c9d9500cd286ec440aabf8b2ddd94d Mon Sep 17 00:00:00 2001 From: Dmitry Osipenko Date: Wed, 12 Jan 2022 22:50:39 +0300 Subject: ASoC: hdmi-codec: Fix OOB memory accesses Correct size of iec_status array by changing it to the size of status array of the struct snd_aes_iec958. This fixes out-of-bounds slab read accesses made by memcpy() of the hdmi-codec driver. This problem is reported by KASAN. Cc: stable@vger.kernel.org Signed-off-by: Dmitry Osipenko Link: https://lore.kernel.org/r/20220112195039.1329-1-digetx@gmail.com Signed-off-by: Mark Brown --- include/uapi/sound/asound.h | 4 +++- sound/soc/codecs/hdmi-codec.c | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h index ff7e638221c5..228279ea0670 100644 --- a/include/uapi/sound/asound.h +++ b/include/uapi/sound/asound.h @@ -56,8 +56,10 @@ * * ****************************************************************************/ +#define AES_IEC958_STATUS_SIZE 24 + struct snd_aes_iec958 { - unsigned char status[24]; /* AES/IEC958 channel status bits */ + unsigned char status[AES_IEC958_STATUS_SIZE]; /* AES/IEC958 channel status bits */ unsigned char subcode[147]; /* AES/IEC958 subcode bits */ unsigned char pad; /* nothing */ unsigned char dig_subframe[4]; /* AES/IEC958 subframe bits */ diff --git a/sound/soc/codecs/hdmi-codec.c b/sound/soc/codecs/hdmi-codec.c index b61f980cabdc..b07607a9ecea 100644 --- a/sound/soc/codecs/hdmi-codec.c +++ b/sound/soc/codecs/hdmi-codec.c @@ -277,7 +277,7 @@ struct hdmi_codec_priv { bool busy; struct snd_soc_jack *jack; unsigned int jack_status; - u8 iec_status[5]; + u8 iec_status[AES_IEC958_STATUS_SIZE]; }; static const struct snd_soc_dapm_widget hdmi_widgets[] = { -- cgit From 4045daf0fa87846a27f56329fddad2deeb5ca354 Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Wed, 26 Jan 2022 12:03:25 +0200 Subject: ASoC: rt5682: Fix deadlock on resume On resume from suspend the following chain of events can happen: A rt5682_resume() -> mod_delayed_work() for jack_detect_work B DAPM sequence starts ( DAPM is locked now) A1. rt5682_jack_detect_handler() scheduled - Takes both jdet_mutex and calibrate_mutex - Calls in to rt5682_headset_detect() which tries to take DAPM lock, it starts to wait for it as B path took it already. B1. DAPM sequence reaches the "HP Amp", rt5682_hp_event() tries to take the jdet_mutex, but it is locked in A1, so it waits. Deadlock. To solve the deadlock, drop the jdet_mutex, use the jack_detect_work to do the jack removal handling, move the dapm lock up one level to protect the most of the rt5682_jack_detect_handler(), but not the jack reporting as it might trigger a DAPM sequence. The rt5682_headset_detect() can be changed to static as well. Fixes: 8deb34a90f063 ("ASoC: rt5682: fix the wrong jack type detected") Signed-off-by: Peter Ujfalusi Link: https://lore.kernel.org/r/20220126100325.16513-1-peter.ujfalusi@linux.intel.com Signed-off-by: Mark Brown --- sound/soc/codecs/rt5682-i2c.c | 15 ++++----------- sound/soc/codecs/rt5682.c | 24 ++++++++---------------- sound/soc/codecs/rt5682.h | 2 -- 3 files changed, 12 insertions(+), 29 deletions(-) diff --git a/sound/soc/codecs/rt5682-i2c.c b/sound/soc/codecs/rt5682-i2c.c index 20e0f90ea498..20fc0f3766de 100644 --- a/sound/soc/codecs/rt5682-i2c.c +++ b/sound/soc/codecs/rt5682-i2c.c @@ -59,18 +59,12 @@ static void rt5682_jd_check_handler(struct work_struct *work) struct rt5682_priv *rt5682 = container_of(work, struct rt5682_priv, jd_check_work.work); - if (snd_soc_component_read(rt5682->component, RT5682_AJD1_CTRL) - & RT5682_JDH_RS_MASK) { + if (snd_soc_component_read(rt5682->component, RT5682_AJD1_CTRL) & RT5682_JDH_RS_MASK) /* jack out */ - rt5682->jack_type = rt5682_headset_detect(rt5682->component, 0); - - snd_soc_jack_report(rt5682->hs_jack, rt5682->jack_type, - SND_JACK_HEADSET | - SND_JACK_BTN_0 | SND_JACK_BTN_1 | - SND_JACK_BTN_2 | SND_JACK_BTN_3); - } else { + mod_delayed_work(system_power_efficient_wq, + &rt5682->jack_detect_work, 0); + else schedule_delayed_work(&rt5682->jd_check_work, 500); - } } static irqreturn_t rt5682_irq(int irq, void *data) @@ -198,7 +192,6 @@ static int rt5682_i2c_probe(struct i2c_client *i2c, } mutex_init(&rt5682->calibrate_mutex); - mutex_init(&rt5682->jdet_mutex); rt5682_calibrate(rt5682); rt5682_apply_patch_list(rt5682, &i2c->dev); diff --git a/sound/soc/codecs/rt5682.c b/sound/soc/codecs/rt5682.c index 415ec564c82e..0a0ec4a021e1 100644 --- a/sound/soc/codecs/rt5682.c +++ b/sound/soc/codecs/rt5682.c @@ -922,15 +922,13 @@ static void rt5682_enable_push_button_irq(struct snd_soc_component *component, * * Returns detect status. */ -int rt5682_headset_detect(struct snd_soc_component *component, int jack_insert) +static int rt5682_headset_detect(struct snd_soc_component *component, int jack_insert) { struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component); struct snd_soc_dapm_context *dapm = &component->dapm; unsigned int val, count; if (jack_insert) { - snd_soc_dapm_mutex_lock(dapm); - snd_soc_component_update_bits(component, RT5682_PWR_ANLG_1, RT5682_PWR_VREF2 | RT5682_PWR_MB, RT5682_PWR_VREF2 | RT5682_PWR_MB); @@ -981,8 +979,6 @@ int rt5682_headset_detect(struct snd_soc_component *component, int jack_insert) snd_soc_component_update_bits(component, RT5682_MICBIAS_2, RT5682_PWR_CLK25M_MASK | RT5682_PWR_CLK1M_MASK, RT5682_PWR_CLK25M_PU | RT5682_PWR_CLK1M_PU); - - snd_soc_dapm_mutex_unlock(dapm); } else { rt5682_enable_push_button_irq(component, false); snd_soc_component_update_bits(component, RT5682_CBJ_CTRL_1, @@ -1011,7 +1007,6 @@ int rt5682_headset_detect(struct snd_soc_component *component, int jack_insert) dev_dbg(component->dev, "jack_type = %d\n", rt5682->jack_type); return rt5682->jack_type; } -EXPORT_SYMBOL_GPL(rt5682_headset_detect); static int rt5682_set_jack_detect(struct snd_soc_component *component, struct snd_soc_jack *hs_jack, void *data) @@ -1094,6 +1089,7 @@ void rt5682_jack_detect_handler(struct work_struct *work) { struct rt5682_priv *rt5682 = container_of(work, struct rt5682_priv, jack_detect_work.work); + struct snd_soc_dapm_context *dapm; int val, btn_type; while (!rt5682->component) @@ -1102,7 +1098,9 @@ void rt5682_jack_detect_handler(struct work_struct *work) while (!rt5682->component->card->instantiated) usleep_range(10000, 15000); - mutex_lock(&rt5682->jdet_mutex); + dapm = snd_soc_component_get_dapm(rt5682->component); + + snd_soc_dapm_mutex_lock(dapm); mutex_lock(&rt5682->calibrate_mutex); val = snd_soc_component_read(rt5682->component, RT5682_AJD1_CTRL) @@ -1162,6 +1160,9 @@ void rt5682_jack_detect_handler(struct work_struct *work) rt5682->irq_work_delay_time = 50; } + mutex_unlock(&rt5682->calibrate_mutex); + snd_soc_dapm_mutex_unlock(dapm); + snd_soc_jack_report(rt5682->hs_jack, rt5682->jack_type, SND_JACK_HEADSET | SND_JACK_BTN_0 | SND_JACK_BTN_1 | @@ -1174,9 +1175,6 @@ void rt5682_jack_detect_handler(struct work_struct *work) else cancel_delayed_work_sync(&rt5682->jd_check_work); } - - mutex_unlock(&rt5682->calibrate_mutex); - mutex_unlock(&rt5682->jdet_mutex); } EXPORT_SYMBOL_GPL(rt5682_jack_detect_handler); @@ -1526,7 +1524,6 @@ static int rt5682_hp_event(struct snd_soc_dapm_widget *w, { struct snd_soc_component *component = snd_soc_dapm_to_component(w->dapm); - struct rt5682_priv *rt5682 = snd_soc_component_get_drvdata(component); switch (event) { case SND_SOC_DAPM_PRE_PMU: @@ -1538,17 +1535,12 @@ static int rt5682_hp_event(struct snd_soc_dapm_widget *w, RT5682_DEPOP_1, 0x60, 0x60); snd_soc_component_update_bits(component, RT5682_DAC_ADC_DIG_VOL1, 0x00c0, 0x0080); - - mutex_lock(&rt5682->jdet_mutex); - snd_soc_component_update_bits(component, RT5682_HP_CTRL_2, RT5682_HP_C2_DAC_L_EN | RT5682_HP_C2_DAC_R_EN, RT5682_HP_C2_DAC_L_EN | RT5682_HP_C2_DAC_R_EN); usleep_range(5000, 10000); snd_soc_component_update_bits(component, RT5682_CHARGE_PUMP_1, RT5682_CP_SW_SIZE_MASK, RT5682_CP_SW_SIZE_L); - - mutex_unlock(&rt5682->jdet_mutex); break; case SND_SOC_DAPM_POST_PMD: diff --git a/sound/soc/codecs/rt5682.h b/sound/soc/codecs/rt5682.h index c917c76200ea..52ff0d9c36c5 100644 --- a/sound/soc/codecs/rt5682.h +++ b/sound/soc/codecs/rt5682.h @@ -1463,7 +1463,6 @@ struct rt5682_priv { int jack_type; int irq_work_delay_time; - struct mutex jdet_mutex; }; extern const char *rt5682_supply_names[RT5682_NUM_SUPPLIES]; @@ -1473,7 +1472,6 @@ int rt5682_sel_asrc_clk_src(struct snd_soc_component *component, void rt5682_apply_patch_list(struct rt5682_priv *rt5682, struct device *dev); -int rt5682_headset_detect(struct snd_soc_component *component, int jack_insert); void rt5682_jack_detect_handler(struct work_struct *work); bool rt5682_volatile_register(struct device *dev, unsigned int reg); -- cgit From 483529f3209f56d4c7a465d045278a2546ae7ed9 Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 27 Jan 2022 16:02:34 +0000 Subject: Fix a warning about a malformed kernel doc comment in cifs Fix by removing the extra asterisk. Signed-off-by: David Howells Acked-by: Jeff Layton Reviewed-by: Rohith Surabattula Signed-off-by: Steve French --- fs/cifs/connect.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index 11a22a30ee14..ed210d774a21 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -162,7 +162,7 @@ static void cifs_resolve_server(struct work_struct *work) mutex_unlock(&server->srv_mutex); } -/** +/* * Mark all sessions and tcons for reconnect. * * @server needs to be previously set to CifsNeedReconnect. -- cgit From b856101a1774b5f1c8c99e8dfdef802856520732 Mon Sep 17 00:00:00 2001 From: Mark Zhang Date: Wed, 19 Jan 2022 10:37:55 +0200 Subject: IB/cm: Release previously acquired reference counter in the cm_id_priv In failure flow, the reference counter acquired was not released, and the following error was reported: drivers/infiniband/core/cm.c:3373 cm_lap_handler() warn: inconsistent refcounting 'cm_id_priv->refcount.refs.counter': Fixes: 7345201c3963 ("IB/cm: Improve the calling of cm_init_av_for_lap and cm_init_av_by_path") Link: https://lore.kernel.org/r/7615f23bbb5c5b66d03f6fa13e1c99d51dae6916.1642581448.git.leonro@nvidia.com Reported-by: Dan Carpenter Signed-off-by: Mark Zhang Signed-off-by: Leon Romanovsky Signed-off-by: Jason Gunthorpe --- drivers/infiniband/core/cm.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c index c903b74f46a4..35f0d5e7533d 100644 --- a/drivers/infiniband/core/cm.c +++ b/drivers/infiniband/core/cm.c @@ -3322,7 +3322,7 @@ static int cm_lap_handler(struct cm_work *work) ret = cm_init_av_by_path(param->alternate_path, NULL, &alt_av); if (ret) { rdma_destroy_ah_attr(&ah_attr); - return -EINVAL; + goto deref; } spin_lock_irq(&cm_id_priv->lock); -- cgit From 4028bccb003cf67e46632dee7f97ddc5d7b6e685 Mon Sep 17 00:00:00 2001 From: Mike Marciniszyn Date: Wed, 19 Jan 2022 04:28:09 -0500 Subject: IB/rdmavt: Validate remote_addr during loopback atomic tests The rdma-core test suite sends an unaligned remote address and expects a failure. ERROR: test_atomic_non_aligned_addr (tests.test_atomic.AtomicTest) The qib/hfi1 rc handling validates properly, but the test has the client and server on the same system. The loopback of these operations is a distinct code path. Fix by syntaxing the proposed remote address in the loopback code path. Fixes: 15703461533a ("IB/{hfi1, qib, rdmavt}: Move ruc_loopback to rdmavt") Link: https://lore.kernel.org/r/1642584489-141005-1-git-send-email-mike.marciniszyn@cornelisnetworks.com Reviewed-by: Dennis Dalessandro Signed-off-by: Mike Marciniszyn Signed-off-by: Jason Gunthorpe --- drivers/infiniband/sw/rdmavt/qp.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/infiniband/sw/rdmavt/qp.c b/drivers/infiniband/sw/rdmavt/qp.c index 3305f2744bfa..ae50b56e8913 100644 --- a/drivers/infiniband/sw/rdmavt/qp.c +++ b/drivers/infiniband/sw/rdmavt/qp.c @@ -3073,6 +3073,8 @@ do_write: case IB_WR_ATOMIC_FETCH_AND_ADD: if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) goto inv_err; + if (unlikely(wqe->atomic_wr.remote_addr & (sizeof(u64) - 1))) + goto inv_err; if (unlikely(!rvt_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), wqe->atomic_wr.remote_addr, wqe->atomic_wr.rkey, -- cgit From 279eb8575fdaa92c314a54c0d583c65e26229107 Mon Sep 17 00:00:00 2001 From: Sergey Shtylyov Date: Mon, 24 Jan 2022 21:55:02 +0300 Subject: EDAC/altera: Fix deferred probing The driver overrides the error codes returned by platform_get_irq() to -ENODEV for some strange reason, so if it returns -EPROBE_DEFER, the driver will fail the probe permanently instead of the deferred probing. Switch to propagating the proper error codes to platform driver code upwards. [ bp: Massage commit message. ] Fixes: 71bcada88b0f ("edac: altera: Add Altera SDRAM EDAC support") Signed-off-by: Sergey Shtylyov Signed-off-by: Borislav Petkov Acked-by: Dinh Nguyen Cc: Link: https://lore.kernel.org/r/20220124185503.6720-2-s.shtylyov@omp.ru --- drivers/edac/altera_edac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/edac/altera_edac.c b/drivers/edac/altera_edac.c index 3a6d2416cb0f..5dd29789f97d 100644 --- a/drivers/edac/altera_edac.c +++ b/drivers/edac/altera_edac.c @@ -350,7 +350,7 @@ static int altr_sdram_probe(struct platform_device *pdev) if (irq < 0) { edac_printk(KERN_ERR, EDAC_MC, "No irq %d in DT\n", irq); - return -ENODEV; + return irq; } /* Arria10 has a 2nd IRQ */ -- cgit From 1601033da2dd2052e0489137f7788a46a8fcd82f Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Fri, 28 Jan 2022 19:24:43 +0000 Subject: ASoC: ops: Check for negative values before reading them The controls allow inputs to be specified as negative but our manipulating them into register fields need to be done on unsigned variables so the checks for negative numbers weren't taking effect properly. Do the checks for negative values on the variable in the ABI struct rather than on our local unsigned copy. Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20220128192443.3504823-1-broonie@kernel.org Signed-off-by: Mark Brown --- sound/soc/soc-ops.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/sound/soc/soc-ops.c b/sound/soc/soc-ops.c index dc0e7c8d31f3..9833611b83d1 100644 --- a/sound/soc/soc-ops.c +++ b/sound/soc/soc-ops.c @@ -316,26 +316,26 @@ int snd_soc_put_volsw(struct snd_kcontrol *kcontrol, if (sign_bit) mask = BIT(sign_bit + 1) - 1; + if (ucontrol->value.integer.value[0] < 0) + return -EINVAL; val = ucontrol->value.integer.value[0]; if (mc->platform_max && val > mc->platform_max) return -EINVAL; if (val > max - min) return -EINVAL; - if (val < 0) - return -EINVAL; val = (val + min) & mask; if (invert) val = max - val; val_mask = mask << shift; val = val << shift; if (snd_soc_volsw_is_stereo(mc)) { + if (ucontrol->value.integer.value[1] < 0) + return -EINVAL; val2 = ucontrol->value.integer.value[1]; if (mc->platform_max && val2 > mc->platform_max) return -EINVAL; if (val2 > max - min) return -EINVAL; - if (val2 < 0) - return -EINVAL; val2 = (val2 + min) & mask; if (invert) val2 = max - val2; @@ -423,13 +423,13 @@ int snd_soc_put_volsw_sx(struct snd_kcontrol *kcontrol, int err = 0; unsigned int val, val_mask; + if (ucontrol->value.integer.value[0] < 0) + return -EINVAL; val = ucontrol->value.integer.value[0]; if (mc->platform_max && val > mc->platform_max) return -EINVAL; if (val > max - min) return -EINVAL; - if (val < 0) - return -EINVAL; val_mask = mask << shift; val = (val + min) & mask; val = val << shift; -- cgit From dbe0d009d8c23c7408da9721c1378a5f661aaa83 Mon Sep 17 00:00:00 2001 From: Alexander Stein Date: Fri, 14 Jan 2022 09:59:06 +0100 Subject: arm64: dts: freescale: Fix sound card model for MBa8Mx The audio codec connection on MBa8Mx is identical to MBa7 (imx7) and MBa6 (imx6). Use the same sound card model as well. Fixes commit dfcd1b6f7620 ("arm64: dts: freescale: add initial device tree for TQMa8MQML with i.MX8MM") Signed-off-by: Alexander Stein Signed-off-by: Shawn Guo --- arch/arm64/boot/dts/freescale/mba8mx.dtsi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/boot/dts/freescale/mba8mx.dtsi b/arch/arm64/boot/dts/freescale/mba8mx.dtsi index f27e3c8de916..ce6d5bdba0a8 100644 --- a/arch/arm64/boot/dts/freescale/mba8mx.dtsi +++ b/arch/arm64/boot/dts/freescale/mba8mx.dtsi @@ -91,7 +91,7 @@ sound { compatible = "fsl,imx-audio-tlv320aic32x4"; - model = "tqm-tlv320aic32"; + model = "imx-audio-tlv320aic32x4"; ssi-controller = <&sai3>; audio-codec = <&tlv320aic3x04>; }; -- cgit From ff3cfc35a4dd4b432e33eee4f6d772411d70399f Mon Sep 17 00:00:00 2001 From: Michael Walle Date: Sat, 15 Jan 2022 22:04:32 +0100 Subject: arm64: dts: ls1028a: sl28: re-enable ftm_alarm0 Commit dd3d936a1b17 ("arm64: dts: ls1028a: add ftm_alarm1 node to be used as wakeup source") disables ftm_alarm0 in the SoC dtsi but doesn't enable it on the board which is still using it. Re-enable it on the sl28 board. Fixes: dd3d936a1b17 ("arm64: dts: ls1028a: add ftm_alarm1 node to be used as wakeup source") Reported-by: Guillaume Tucker Reported-by: "kernelci.org bot" Signed-off-by: Michael Walle Signed-off-by: Shawn Guo --- arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28.dts | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28.dts b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28.dts index d74e738e4070..c03f4e183389 100644 --- a/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28.dts +++ b/arch/arm64/boot/dts/freescale/fsl-ls1028a-kontron-sl28.dts @@ -157,6 +157,10 @@ }; }; +&ftm_alarm0 { + status = "okay"; +}; + &gpio1 { gpio-line-names = "", "", "", "", "", "", "", "", -- cgit From 6d58c5e21a3fe355ce6d1808e96d02a610265218 Mon Sep 17 00:00:00 2001 From: Rob Herring Date: Thu, 20 Jan 2022 11:23:55 -0600 Subject: ARM: dts: imx7ulp: Fix 'assigned-clocks-parents' typo The correct property name is 'assigned-clock-parents', not 'assigned-clocks-parents'. Though if the platform works with the typo, one has to wonder if the property is even needed. Signed-off-by: Rob Herring Fixes: 8b8c7d97e2c7 ("ARM: dts: imx7ulp: Add wdog1 node") Signed-off-by: Shawn Guo --- arch/arm/boot/dts/imx7ulp.dtsi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/boot/dts/imx7ulp.dtsi b/arch/arm/boot/dts/imx7ulp.dtsi index b7ea37ad4e55..bcec98b96411 100644 --- a/arch/arm/boot/dts/imx7ulp.dtsi +++ b/arch/arm/boot/dts/imx7ulp.dtsi @@ -259,7 +259,7 @@ interrupts = ; clocks = <&pcc2 IMX7ULP_CLK_WDG1>; assigned-clocks = <&pcc2 IMX7ULP_CLK_WDG1>; - assigned-clocks-parents = <&scg1 IMX7ULP_CLK_FIRC_BUS_CLK>; + assigned-clock-parents = <&scg1 IMX7ULP_CLK_FIRC_BUS_CLK>; timeout-sec = <40>; }; -- cgit From 283d45145fbf460dbaf0229cacd7ed60ec52f364 Mon Sep 17 00:00:00 2001 From: Martin Kepplinger Date: Fri, 21 Jan 2022 10:33:25 +0100 Subject: arm64: dts: imx8mq: fix mipi_csi bidirectional port numbers The port numbers for the imx8mq mipi csi controller are wrong and the mipi driver can't find any media devices as port@1 is connected to the CSI bridge, not port@0. And port@0 is connected to the source - the sensor. Fix this. Fixes: bcadd5f66c2a ("arm64: dts: imx8mq: add mipi csi phy and csi bridge descriptions") Signed-off-by: Martin Kepplinger Signed-off-by: Shawn Guo --- arch/arm64/boot/dts/freescale/imx8mq.dtsi | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi index 2df2510d0118..bb68c94c2fc9 100644 --- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi @@ -1151,8 +1151,8 @@ #address-cells = <1>; #size-cells = <0>; - port@0 { - reg = <0>; + port@1 { + reg = <1>; csi1_mipi_ep: endpoint { remote-endpoint = <&csi1_ep>; @@ -1203,8 +1203,8 @@ #address-cells = <1>; #size-cells = <0>; - port@0 { - reg = <0>; + port@1 { + reg = <1>; csi2_mipi_ep: endpoint { remote-endpoint = <&csi2_ep>; -- cgit From 5ea62d06b1899f63c4374f52c8d40c43cad69ec0 Mon Sep 17 00:00:00 2001 From: Martin Kepplinger Date: Fri, 21 Jan 2022 10:33:26 +0100 Subject: arm64: dts: imx8mq-librem5: fix mipi_csi1 port number to sensor Since the previous commit fixed a hardware description bug for imx8mq, we need to fix up all DT users like this. The mipi_csi port@0 is connected to the sensor, not port@1. Fixes: fed7603597fa ("arm64: dts: imx8mq-librem5: describe the selfie cam") Signed-off-by: Martin Kepplinger Signed-off-by: Shawn Guo --- arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi b/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi index f3e3418f7edc..2d4a472af6a9 100644 --- a/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mq-librem5.dtsi @@ -1115,8 +1115,8 @@ status = "okay"; ports { - port@1 { - reg = <1>; + port@0 { + reg = <0>; mipi1_sensor_ep: endpoint { remote-endpoint = <&camera1_ep>; -- cgit From 91f6d5f181f6629dd74ab71759fe92d3f4eff966 Mon Sep 17 00:00:00 2001 From: Alexander Stein Date: Sat, 29 Jan 2022 14:39:05 +0800 Subject: arm64: dts: imx8mq: fix lcdif port node The port node does not have a unit-address, remove it. This fixes the warnings: lcd-controller@30320000: 'port' is a required property lcd-controller@30320000: 'port@0' does not match any of the regexes: 'pinctrl-[0-9]+' Fixes: commit d0081bd02a03 ("arm64: dts: imx8mq: Add NWL MIPI DSI controller") Signed-off-by: Alexander Stein Reviewed-by: Fabio Estevam Signed-off-by: Shawn Guo --- arch/arm64/boot/dts/freescale/imx8mq.dtsi | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm64/boot/dts/freescale/imx8mq.dtsi b/arch/arm64/boot/dts/freescale/imx8mq.dtsi index bb68c94c2fc9..e92ebb6147e6 100644 --- a/arch/arm64/boot/dts/freescale/imx8mq.dtsi +++ b/arch/arm64/boot/dts/freescale/imx8mq.dtsi @@ -554,7 +554,7 @@ assigned-clock-rates = <0>, <0>, <0>, <594000000>; status = "disabled"; - port@0 { + port { lcdif_mipi_dsi: endpoint { remote-endpoint = <&mipi_dsi_lcdif_in>; }; -- cgit From 489f710a738e24d887823a010b8b206b4124e26f Mon Sep 17 00:00:00 2001 From: Shyam Prasad N Date: Sat, 29 Jan 2022 09:32:33 +0000 Subject: cifs: unlock chan_lock before calling cifs_put_tcp_session While removing an smb session, we need to free up the tcp session for each channel for that session. We were doing this with chan_lock held. This results in a cyclic dependency with cifs_tcp_ses_lock. For now, unlock the chan_lock temporarily before calling cifs_put_tcp_session. This should not cause any problem for now, since we do not remove channels anywhere else. And this code segment will not be called by two threads. When we do implement the code for removing channels, we will need to execute proper ref counting here. Signed-off-by: Shyam Prasad N Signed-off-by: Steve French --- fs/cifs/connect.c | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index ed210d774a21..5a51a098b845 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -1831,13 +1831,9 @@ void cifs_put_smb_ses(struct cifs_ses *ses) int i; for (i = 1; i < chan_count; i++) { - /* - * note: for now, we're okay accessing ses->chans - * without chan_lock. But when chans can go away, we'll - * need to introduce ref counting to make sure that chan - * is not freed from under us. - */ + spin_unlock(&ses->chan_lock); cifs_put_tcp_session(ses->chans[i].server, 0); + spin_lock(&ses->chan_lock); ses->chans[i].server = NULL; } } -- cgit From dfd0dfb9a7cc04acf93435b440dd34c2ca7b4424 Mon Sep 17 00:00:00 2001 From: Sergey Shtylyov Date: Mon, 24 Jan 2022 21:55:03 +0300 Subject: EDAC/xgene: Fix deferred probing The driver overrides error codes returned by platform_get_irq_optional() to -EINVAL for some strange reason, so if it returns -EPROBE_DEFER, the driver will fail the probe permanently instead of the deferred probing. Switch to propagating the proper error codes to platform driver code upwards. [ bp: Massage commit message. ] Fixes: 0d4429301c4a ("EDAC: Add APM X-Gene SoC EDAC driver") Signed-off-by: Sergey Shtylyov Signed-off-by: Borislav Petkov Cc: Link: https://lore.kernel.org/r/20220124185503.6720-3-s.shtylyov@omp.ru --- drivers/edac/xgene_edac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/edac/xgene_edac.c b/drivers/edac/xgene_edac.c index 2ccd1db5e98f..7197f9fa0245 100644 --- a/drivers/edac/xgene_edac.c +++ b/drivers/edac/xgene_edac.c @@ -1919,7 +1919,7 @@ static int xgene_edac_probe(struct platform_device *pdev) irq = platform_get_irq_optional(pdev, i); if (irq < 0) { dev_err(&pdev->dev, "No IRQ resource\n"); - rc = -EINVAL; + rc = irq; goto out_err; } rc = devm_request_irq(&pdev->dev, irq, -- cgit From 5297c693d8c8e08fa742e3112cf70723f7a04da2 Mon Sep 17 00:00:00 2001 From: Florian Fainelli Date: Thu, 27 Jan 2022 13:50:31 -0800 Subject: pinctrl: bcm2835: Fix a few error paths After commit 266423e60ea1 ("pinctrl: bcm2835: Change init order for gpio hogs") a few error paths would not unwind properly the registration of gpio ranges. Correct that by assigning a single error label and goto it whenever we encounter a fatal error. Fixes: 266423e60ea1 ("pinctrl: bcm2835: Change init order for gpio hogs") Signed-off-by: Florian Fainelli Link: https://lore.kernel.org/r/20220127215033.267227-1-f.fainelli@gmail.com Signed-off-by: Linus Walleij --- drivers/pinctrl/bcm/pinctrl-bcm2835.c | 23 +++++++++++++++-------- 1 file changed, 15 insertions(+), 8 deletions(-) diff --git a/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/drivers/pinctrl/bcm/pinctrl-bcm2835.c index c4ebfa852b42..47e433e09c5c 100644 --- a/drivers/pinctrl/bcm/pinctrl-bcm2835.c +++ b/drivers/pinctrl/bcm/pinctrl-bcm2835.c @@ -1269,16 +1269,18 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev) sizeof(*girq->parents), GFP_KERNEL); if (!girq->parents) { - pinctrl_remove_gpio_range(pc->pctl_dev, &pc->gpio_range); - return -ENOMEM; + err = -ENOMEM; + goto out_remove; } if (is_7211) { pc->wake_irq = devm_kcalloc(dev, BCM2835_NUM_IRQS, sizeof(*pc->wake_irq), GFP_KERNEL); - if (!pc->wake_irq) - return -ENOMEM; + if (!pc->wake_irq) { + err = -ENOMEM; + goto out_remove; + } } /* @@ -1306,8 +1308,10 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev) len = strlen(dev_name(pc->dev)) + 16; name = devm_kzalloc(pc->dev, len, GFP_KERNEL); - if (!name) - return -ENOMEM; + if (!name) { + err = -ENOMEM; + goto out_remove; + } snprintf(name, len, "%s:bank%d", dev_name(pc->dev), i); @@ -1326,11 +1330,14 @@ static int bcm2835_pinctrl_probe(struct platform_device *pdev) err = gpiochip_add_data(&pc->gpio_chip, pc); if (err) { dev_err(dev, "could not add GPIO chip\n"); - pinctrl_remove_gpio_range(pc->pctl_dev, &pc->gpio_range); - return err; + goto out_remove; } return 0; + +out_remove: + pinctrl_remove_gpio_range(pc->pctl_dev, &pc->gpio_range); + return err; } static struct platform_driver bcm2835_pinctrl_driver = { -- cgit From 3a5286955bf5febc3d151bcb2c5e272e383b64aa Mon Sep 17 00:00:00 2001 From: Julian Braha Date: Mon, 17 Jan 2022 01:25:57 -0500 Subject: pinctrl: bcm63xx: fix unmet dependency on REGMAP for GPIO_REGMAP When PINCTRL_BCM63XX is selected, and REGMAP is not selected, Kbuild gives the following warning: WARNING: unmet direct dependencies detected for GPIO_REGMAP Depends on [n]: GPIOLIB [=y] && REGMAP [=n] Selected by [y]: - PINCTRL_BCM63XX [=y] && PINCTRL [=y] This is because PINCTRL_BCM63XX selects GPIO_REGMAP without selecting or depending on REGMAP, despite GPIO_REGMAP depending on REGMAP. This unmet dependency bug was detected by Kismet, a static analysis tool for Kconfig. Please advise if this is not the appropriate solution. Signed-off-by: Julian Braha Link: https://lore.kernel.org/r/20220117062557.89568-1-julianbraha@gmail.com Signed-off-by: Linus Walleij --- drivers/pinctrl/bcm/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/pinctrl/bcm/Kconfig b/drivers/pinctrl/bcm/Kconfig index 5123f4c33854..ac1e400bbbac 100644 --- a/drivers/pinctrl/bcm/Kconfig +++ b/drivers/pinctrl/bcm/Kconfig @@ -35,6 +35,7 @@ config PINCTRL_BCM63XX select PINCONF select GENERIC_PINCONF select GPIOLIB + select REGMAP select GPIO_REGMAP config PINCTRL_BCM6318 -- cgit From 22e424feb6658c5d6789e45121830357809c59cb Mon Sep 17 00:00:00 2001 From: Dominique Martinet Date: Sat, 29 Jan 2022 18:42:59 +0900 Subject: Revert "fs/9p: search open fids first" This reverts commit 478ba09edc1f2f2ee27180a06150cb2d1a686f9c. That commit was meant as a fix for setattrs with by fd (e.g. ftruncate) to use an open fid instead of the first fid it found on lookup. The proper fix for that is to use the fid associated with the open file struct, available in iattr->ia_file for such operations, and was actually done just before in 66246641609b ("9p: retrieve fid from file when file instance exist.") As such, this commit is no longer required. Furthermore, changing lookup to return open fids first had unwanted side effects, as it turns out the protocol forbids the use of open fids for further walks (e.g. clone_fid) and we broke mounts for some servers enforcing this rule. Note this only reverts to the old working behaviour, but it's still possible for lookup to return open fids if dentry->d_fsdata is not set, so more work is needed to make sure we respect this rule in the future, for example by adding a flag to the lookup functions to only match certain fid open modes depending on caller requirements. Link: https://lkml.kernel.org/r/20220130130651.712293-1-asmadeus@codewreck.org Fixes: 478ba09edc1f ("fs/9p: search open fids first") Cc: stable@vger.kernel.org # v5.11+ Reported-by: ron minnich Reported-by: ng@0x80.stream Signed-off-by: Dominique Martinet --- fs/9p/fid.c | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/fs/9p/fid.c b/fs/9p/fid.c index 6aab046c98e2..79df61fe0e59 100644 --- a/fs/9p/fid.c +++ b/fs/9p/fid.c @@ -96,12 +96,8 @@ static struct p9_fid *v9fs_fid_find(struct dentry *dentry, kuid_t uid, int any) dentry, dentry, from_kuid(&init_user_ns, uid), any); ret = NULL; - - if (d_inode(dentry)) - ret = v9fs_fid_find_inode(d_inode(dentry), uid); - /* we'll recheck under lock if there's anything to look in */ - if (!ret && dentry->d_fsdata) { + if (dentry->d_fsdata) { struct hlist_head *h = (struct hlist_head *)&dentry->d_fsdata; spin_lock(&dentry->d_lock); @@ -113,6 +109,9 @@ static struct p9_fid *v9fs_fid_find(struct dentry *dentry, kuid_t uid, int any) } } spin_unlock(&dentry->d_lock); + } else { + if (dentry->d_inode) + ret = v9fs_fid_find_inode(dentry->d_inode, uid); } return ret; -- cgit From 2719c7160dcfaae1f73a1c0c210ad3281c19022e Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Sun, 30 Jan 2022 08:53:16 -0800 Subject: vfs: make freeze_super abort when sync_filesystem returns error If we fail to synchronize the filesystem while preparing to freeze the fs, abort the freeze. Signed-off-by: Darrick J. Wong Reviewed-by: Jan Kara Reviewed-by: Christoph Hellwig Acked-by: Christian Brauner --- fs/super.c | 19 ++++++++++++------- 1 file changed, 12 insertions(+), 7 deletions(-) diff --git a/fs/super.c b/fs/super.c index 7af820ba5ad5..f1d4a193602d 100644 --- a/fs/super.c +++ b/fs/super.c @@ -1616,11 +1616,9 @@ static void lockdep_sb_freeze_acquire(struct super_block *sb) percpu_rwsem_acquire(sb->s_writers.rw_sem + level, 0, _THIS_IP_); } -static void sb_freeze_unlock(struct super_block *sb) +static void sb_freeze_unlock(struct super_block *sb, int level) { - int level; - - for (level = SB_FREEZE_LEVELS - 1; level >= 0; level--) + for (level--; level >= 0; level--) percpu_up_write(sb->s_writers.rw_sem + level); } @@ -1691,7 +1689,14 @@ int freeze_super(struct super_block *sb) sb_wait_write(sb, SB_FREEZE_PAGEFAULT); /* All writers are done so after syncing there won't be dirty data */ - sync_filesystem(sb); + ret = sync_filesystem(sb); + if (ret) { + sb->s_writers.frozen = SB_UNFROZEN; + sb_freeze_unlock(sb, SB_FREEZE_PAGEFAULT); + wake_up(&sb->s_writers.wait_unfrozen); + deactivate_locked_super(sb); + return ret; + } /* Now wait for internal filesystem counter */ sb->s_writers.frozen = SB_FREEZE_FS; @@ -1703,7 +1708,7 @@ int freeze_super(struct super_block *sb) printk(KERN_ERR "VFS:Filesystem freeze failed\n"); sb->s_writers.frozen = SB_UNFROZEN; - sb_freeze_unlock(sb); + sb_freeze_unlock(sb, SB_FREEZE_FS); wake_up(&sb->s_writers.wait_unfrozen); deactivate_locked_super(sb); return ret; @@ -1748,7 +1753,7 @@ static int thaw_super_locked(struct super_block *sb) } sb->s_writers.frozen = SB_UNFROZEN; - sb_freeze_unlock(sb); + sb_freeze_unlock(sb, SB_FREEZE_FS); out: wake_up(&sb->s_writers.wait_unfrozen); deactivate_locked_super(sb); -- cgit From 5679897eb104cec9e99609c3f045a0c20603da4c Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Sun, 30 Jan 2022 08:53:16 -0800 Subject: vfs: make sync_filesystem return errors from ->sync_fs Strangely, sync_filesystem ignores the return code from the ->sync_fs call, which means that syscalls like syncfs(2) never see the error. This doesn't seem right, so fix that. Signed-off-by: Darrick J. Wong Reviewed-by: Jan Kara Reviewed-by: Christoph Hellwig Acked-by: Christian Brauner --- fs/sync.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/fs/sync.c b/fs/sync.c index 3ce8e2137f31..c7690016453e 100644 --- a/fs/sync.c +++ b/fs/sync.c @@ -29,7 +29,7 @@ */ int sync_filesystem(struct super_block *sb) { - int ret; + int ret = 0; /* * We need to be protected against the filesystem going from @@ -52,15 +52,21 @@ int sync_filesystem(struct super_block *sb) * at a time. */ writeback_inodes_sb(sb, WB_REASON_SYNC); - if (sb->s_op->sync_fs) - sb->s_op->sync_fs(sb, 0); + if (sb->s_op->sync_fs) { + ret = sb->s_op->sync_fs(sb, 0); + if (ret) + return ret; + } ret = sync_blockdev_nowait(sb->s_bdev); - if (ret < 0) + if (ret) return ret; sync_inodes_sb(sb); - if (sb->s_op->sync_fs) - sb->s_op->sync_fs(sb, 1); + if (sb->s_op->sync_fs) { + ret = sb->s_op->sync_fs(sb, 1); + if (ret) + return ret; + } return sync_blockdev(sb->s_bdev); } EXPORT_SYMBOL(sync_filesystem); -- cgit From dd5532a4994bfda0386eb2286ec00758cee08444 Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Sun, 30 Jan 2022 08:53:16 -0800 Subject: quota: make dquot_quota_sync return errors from ->sync_fs Strangely, dquot_quota_sync ignores the return code from the ->sync_fs call, which means that quotacalls like Q_SYNC never see the error. This doesn't seem right, so fix that. Signed-off-by: Darrick J. Wong Reviewed-by: Jan Kara Reviewed-by: Christoph Hellwig Acked-by: Christian Brauner --- fs/quota/dquot.c | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c index 22d904bde6ab..a74aef99bd3d 100644 --- a/fs/quota/dquot.c +++ b/fs/quota/dquot.c @@ -690,9 +690,14 @@ int dquot_quota_sync(struct super_block *sb, int type) /* This is not very clever (and fast) but currently I don't know about * any other simple way of getting quota data to disk and we must get * them there for userspace to be visible... */ - if (sb->s_op->sync_fs) - sb->s_op->sync_fs(sb, 1); - sync_blockdev(sb->s_bdev); + if (sb->s_op->sync_fs) { + ret = sb->s_op->sync_fs(sb, 1); + if (ret) + return ret; + } + ret = sync_blockdev(sb->s_bdev); + if (ret) + return ret; /* * Now when everything is written we can discard the pagecache so -- cgit From 2d86293c70750e4331e9616aded33ab6b47c299d Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Sun, 30 Jan 2022 08:53:17 -0800 Subject: xfs: return errors in xfs_fs_sync_fs Now that the VFS will do something with the return values from ->sync_fs, make ours pass on error codes. Signed-off-by: Darrick J. Wong Reviewed-by: Jan Kara Reviewed-by: Christoph Hellwig Acked-by: Christian Brauner --- fs/xfs/xfs_super.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c index e8f37bdc8354..4c0dee78b2f8 100644 --- a/fs/xfs/xfs_super.c +++ b/fs/xfs/xfs_super.c @@ -735,6 +735,7 @@ xfs_fs_sync_fs( int wait) { struct xfs_mount *mp = XFS_M(sb); + int error; trace_xfs_fs_sync_fs(mp, __return_address); @@ -744,7 +745,10 @@ xfs_fs_sync_fs( if (!wait) return 0; - xfs_log_force(mp, XFS_LOG_SYNC); + error = xfs_log_force(mp, XFS_LOG_SYNC); + if (error) + return error; + if (laptop_mode) { /* * The disk must be active because we're syncing. -- cgit From 47307c31d90ae7d52cebbbc7c1d4ff213213d4e9 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Wed, 12 Jan 2022 12:38:11 -0800 Subject: crypto: octeontx2 - Avoid stack variable overflow Building with -Warray-bounds showed a stack variable array index overflow. Increase the expected size of the array to avoid the warning: In file included from ./include/linux/printk.h:555, from ./include/asm-generic/bug.h:22, from ./arch/x86/include/asm/bug.h:84, from ./include/linux/bug.h:5, from ./include/linux/mmdebug.h:5, from ./include/linux/gfp.h:5, from ./include/linux/firmware.h:7, from drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c:5: drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c: In function 'otx2_cpt_print_uc_dbg_info': ./include/linux/dynamic_debug.h:162:33: warning: array subscript 4 is above array bounds of 'u32[4]' {aka 'unsigned int[4]'} [-Warray-bounds] 162 | _dynamic_func_call(fmt, __dynamic_pr_debug, \ | ^ ./include/linux/dynamic_debug.h:134:17: note: in definition of macro '__dynamic_func_call' 134 | func(&id, ##__VA_ARGS__); \ | ^~~~ ./include/linux/dynamic_debug.h:162:9: note: in expansion of macro '_dynamic_func_call' 162 | _dynamic_func_call(fmt, __dynamic_pr_debug, \ | ^~~~~~~~~~~~~~~~~~ ./include/linux/printk.h:570:9: note: in expansion of macro 'dynamic_pr_debug' 570 | dynamic_pr_debug(fmt, ##__VA_ARGS__) | ^~~~~~~~~~~~~~~~ drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c:1807:41: note: in expansion of macro 'pr_debug' 1807 | pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x %8.8x", | ^~~~~~~~ drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c:1765:13: note: while referencing 'mask' 1765 | u32 mask[4]; | ^~~~ This is justified because the mask size (eng_grps->engs_num) can be at most 144 (OTX2_CPT_MAX_ENGINES bits), which is larger than available storage. 4 * 32 == 128, so this must be 5: 5 * 32bit = 160. Additionally clear the mask before conversion so trailing bits are zero. Cc: Herbert Xu Cc: Boris Brezillon Cc: Arnaud Ebalard Cc: Srujana Challa Cc: "David S. Miller" Cc: Suheil Chandran Cc: Shijith Thotton Cc: Lukasz Bartosik Cc: linux-crypto@vger.kernel.org Fixes: d9d7749773e8 ("crypto: octeontx2 - add apis for custom engine groups") Acked-by: Ard Biesheuvel Signed-off-by: Kees Cook Signed-off-by: Herbert Xu --- drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c index 4c8ebdf671ca..1b4d425bbf0e 100644 --- a/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c +++ b/drivers/crypto/marvell/octeontx2/otx2_cptpf_ucode.c @@ -1753,7 +1753,6 @@ void otx2_cpt_print_uc_dbg_info(struct otx2_cptpf_dev *cptpf) char engs_info[2 * OTX2_CPT_NAME_LENGTH]; struct otx2_cpt_eng_grp_info *grp; struct otx2_cpt_engs_rsvd *engs; - u32 mask[4]; int i, j; pr_debug("Engine groups global info"); @@ -1785,6 +1784,8 @@ void otx2_cpt_print_uc_dbg_info(struct otx2_cptpf_dev *cptpf) for (j = 0; j < OTX2_CPT_MAX_ETYPES_PER_GRP; j++) { engs = &grp->engs[j]; if (engs->type) { + u32 mask[5] = { }; + get_engs_info(grp, engs_info, 2 * OTX2_CPT_NAME_LENGTH, j); pr_debug("Slot%d: %s", j, engs_info); -- cgit From b837a9f5ab3bdfab9233c9f98a6bef717673a3e5 Mon Sep 17 00:00:00 2001 From: Takashi Iwai Date: Mon, 31 Jan 2022 08:57:38 +0100 Subject: ALSA: hda: realtek: Fix race at concurrent COEF updates The COEF access is done with two steps: setting the index then read or write the data. When multiple COEF accesses are performed concurrently, the index and data might be paired unexpectedly. In most cases, this isn't a big problem as the COEF setup is done at the initialization, but some dynamic changes like the mute LED may hit such a race. For avoiding the racy COEF accesses, this patch introduces a new mutex coef_mutex to alc_spec, and wrap the COEF accessing functions with it. Reported-by: Alexander Sergeyev Cc: Link: https://lore.kernel.org/r/20220111195229.a77wrpjclqwrx4bx@localhost.localdomain Link: https://lore.kernel.org/r/20220131075738.24323-1-tiwai@suse.de Signed-off-by: Takashi Iwai --- sound/pci/hda/patch_realtek.c | 61 +++++++++++++++++++++++++++++++++++-------- 1 file changed, 50 insertions(+), 11 deletions(-) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 668274e52674..a5677be0a405 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -98,6 +98,7 @@ struct alc_spec { unsigned int gpio_mic_led_mask; struct alc_coef_led mute_led_coef; struct alc_coef_led mic_led_coef; + struct mutex coef_mutex; hda_nid_t headset_mic_pin; hda_nid_t headphone_mic_pin; @@ -137,8 +138,8 @@ struct alc_spec { * COEF access helper functions */ -static int alc_read_coefex_idx(struct hda_codec *codec, hda_nid_t nid, - unsigned int coef_idx) +static int __alc_read_coefex_idx(struct hda_codec *codec, hda_nid_t nid, + unsigned int coef_idx) { unsigned int val; @@ -147,28 +148,61 @@ static int alc_read_coefex_idx(struct hda_codec *codec, hda_nid_t nid, return val; } +static int alc_read_coefex_idx(struct hda_codec *codec, hda_nid_t nid, + unsigned int coef_idx) +{ + struct alc_spec *spec = codec->spec; + unsigned int val; + + mutex_lock(&spec->coef_mutex); + val = __alc_read_coefex_idx(codec, nid, coef_idx); + mutex_unlock(&spec->coef_mutex); + return val; +} + #define alc_read_coef_idx(codec, coef_idx) \ alc_read_coefex_idx(codec, 0x20, coef_idx) -static void alc_write_coefex_idx(struct hda_codec *codec, hda_nid_t nid, - unsigned int coef_idx, unsigned int coef_val) +static void __alc_write_coefex_idx(struct hda_codec *codec, hda_nid_t nid, + unsigned int coef_idx, unsigned int coef_val) { snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_COEF_INDEX, coef_idx); snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_PROC_COEF, coef_val); } +static void alc_write_coefex_idx(struct hda_codec *codec, hda_nid_t nid, + unsigned int coef_idx, unsigned int coef_val) +{ + struct alc_spec *spec = codec->spec; + + mutex_lock(&spec->coef_mutex); + __alc_write_coefex_idx(codec, nid, coef_idx, coef_val); + mutex_unlock(&spec->coef_mutex); +} + #define alc_write_coef_idx(codec, coef_idx, coef_val) \ alc_write_coefex_idx(codec, 0x20, coef_idx, coef_val) +static void __alc_update_coefex_idx(struct hda_codec *codec, hda_nid_t nid, + unsigned int coef_idx, unsigned int mask, + unsigned int bits_set) +{ + unsigned int val = __alc_read_coefex_idx(codec, nid, coef_idx); + + if (val != -1) + __alc_write_coefex_idx(codec, nid, coef_idx, + (val & ~mask) | bits_set); +} + static void alc_update_coefex_idx(struct hda_codec *codec, hda_nid_t nid, unsigned int coef_idx, unsigned int mask, unsigned int bits_set) { - unsigned int val = alc_read_coefex_idx(codec, nid, coef_idx); + struct alc_spec *spec = codec->spec; - if (val != -1) - alc_write_coefex_idx(codec, nid, coef_idx, - (val & ~mask) | bits_set); + mutex_lock(&spec->coef_mutex); + __alc_update_coefex_idx(codec, nid, coef_idx, mask, bits_set); + mutex_unlock(&spec->coef_mutex); } #define alc_update_coef_idx(codec, coef_idx, mask, bits_set) \ @@ -201,13 +235,17 @@ struct coef_fw { static void alc_process_coef_fw(struct hda_codec *codec, const struct coef_fw *fw) { + struct alc_spec *spec = codec->spec; + + mutex_lock(&spec->coef_mutex); for (; fw->nid; fw++) { if (fw->mask == (unsigned short)-1) - alc_write_coefex_idx(codec, fw->nid, fw->idx, fw->val); + __alc_write_coefex_idx(codec, fw->nid, fw->idx, fw->val); else - alc_update_coefex_idx(codec, fw->nid, fw->idx, - fw->mask, fw->val); + __alc_update_coefex_idx(codec, fw->nid, fw->idx, + fw->mask, fw->val); } + mutex_unlock(&spec->coef_mutex); } /* @@ -1153,6 +1191,7 @@ static int alc_alloc_spec(struct hda_codec *codec, hda_nid_t mixer_nid) codec->spdif_status_reset = 1; codec->forced_resume = 1; codec->patch_ops = alc_patch_ops; + mutex_init(&spec->coef_mutex); err = alc_codec_rename_from_preset(codec); if (err < 0) { -- cgit From 63394a16086fc2152869d7902621e2525e14bc40 Mon Sep 17 00:00:00 2001 From: Christian Lachner Date: Sat, 29 Jan 2022 12:32:41 +0100 Subject: ALSA: hda/realtek: Add missing fixup-model entry for Gigabyte X570 ALC1220 quirks The initial commit of the new Gigabyte X570 ALC1220 quirks lacked the fixup-model entry in alc882_fixup_models[]. It seemed not to cause any ill effects but for completeness sake this commit makes up for that. Signed-off-by: Christian Lachner Cc: Link: https://lore.kernel.org/r/20220129113243.93068-2-gladiac@gmail.com Signed-off-by: Takashi Iwai --- sound/pci/hda/patch_realtek.c | 1 + 1 file changed, 1 insertion(+) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index a5677be0a405..d662ad805960 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -2665,6 +2665,7 @@ static const struct hda_model_fixup alc882_fixup_models[] = { {.id = ALC882_FIXUP_NO_PRIMARY_HP, .name = "no-primary-hp"}, {.id = ALC887_FIXUP_ASUS_BASS, .name = "asus-bass"}, {.id = ALC1220_FIXUP_GB_DUAL_CODECS, .name = "dual-codecs"}, + {.id = ALC1220_FIXUP_GB_X570, .name = "gb-x570"}, {.id = ALC1220_FIXUP_CLEVO_P950, .name = "clevo-p950"}, {} }; -- cgit From 41a8601302ecbe704ac970552c33dc942300fc37 Mon Sep 17 00:00:00 2001 From: Christian Lachner Date: Sat, 29 Jan 2022 12:32:42 +0100 Subject: ALSA: hda/realtek: Fix silent output on Gigabyte X570S Aorus Master (newer chipset) Newer versions of the X570 Master come with a newer revision of the mainboard chipset - the X570S. These boards have the same ALC1220 codec but seem to initialize the codec with a different parameter in Coef 0x7 which causes the output audio to be very low. We therefore write a known-good value to Coef 0x7 to fix that. As the value is the exact same as on the other X570(non-S) boards the same quirk-function can be shared between both generations. This commit adds the Gigabyte X570S Aorus Master to the list of boards using the ALC1220_FIXUP_GB_X570 quirk. This fixes both, the silent output and the no-audio after reboot from windows problems. This work has been tested by the folks over at the level1techs forum here: https://forum.level1techs.com/t/has-anybody-gotten-audio-working-in-linux-on-aorus-x570-master/154072 Signed-off-by: Christian Lachner Cc: Link: https://lore.kernel.org/r/20220129113243.93068-3-gladiac@gmail.com Signed-off-by: Takashi Iwai --- sound/pci/hda/patch_realtek.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index d662ad805960..54301d208d2b 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -2164,6 +2164,7 @@ static void alc1220_fixup_gb_x570(struct hda_codec *codec, { static const hda_nid_t conn1[] = { 0x0c }; static const struct coef_fw gb_x570_coefs[] = { + WRITE_COEF(0x07, 0x03c0), WRITE_COEF(0x1a, 0x01c1), WRITE_COEF(0x1b, 0x0202), WRITE_COEF(0x43, 0x3005), @@ -2591,6 +2592,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS), SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_GB_X570), SND_PCI_QUIRK(0x1458, 0xa0ce, "Gigabyte X570 Aorus Xtreme", ALC1220_FIXUP_CLEVO_P950), + SND_PCI_QUIRK(0x1458, 0xa0d5, "Gigabyte X570S Aorus Master", ALC1220_FIXUP_GB_X570), SND_PCI_QUIRK(0x1462, 0x11f7, "MSI-GE63", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1462, 0x1229, "MSI-GP73", ALC1220_FIXUP_CLEVO_P950), -- cgit From ea3541961376f733373839cc90493aafa8a7f733 Mon Sep 17 00:00:00 2001 From: Christian Lachner Date: Sat, 29 Jan 2022 12:32:43 +0100 Subject: ALSA: hda/realtek: Fix silent output on Gigabyte X570 Aorus Xtreme after reboot from Windows This commit switches the Gigabyte X570 Aorus Xtreme from using the ALC1220_FIXUP_CLEVO_P950 to the ALC1220_FIXUP_GB_X570 quirk. This fixes the no-audio after reboot from windows problem. BugLink: https://bugzilla.kernel.org/show_bug.cgi?id=205275 Signed-off-by: Christian Lachner Cc: Link: https://lore.kernel.org/r/20220129113243.93068-4-gladiac@gmail.com Signed-off-by: Takashi Iwai --- sound/pci/hda/patch_realtek.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 54301d208d2b..0d52eca57bbc 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -2591,7 +2591,7 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE), SND_PCI_QUIRK(0x1458, 0xa0b8, "Gigabyte AZ370-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS), SND_PCI_QUIRK(0x1458, 0xa0cd, "Gigabyte X570 Aorus Master", ALC1220_FIXUP_GB_X570), - SND_PCI_QUIRK(0x1458, 0xa0ce, "Gigabyte X570 Aorus Xtreme", ALC1220_FIXUP_CLEVO_P950), + SND_PCI_QUIRK(0x1458, 0xa0ce, "Gigabyte X570 Aorus Xtreme", ALC1220_FIXUP_GB_X570), SND_PCI_QUIRK(0x1458, 0xa0d5, "Gigabyte X570S Aorus Master", ALC1220_FIXUP_GB_X570), SND_PCI_QUIRK(0x1462, 0x11f7, "MSI-GE63", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1462, 0x1228, "MSI-GP63", ALC1220_FIXUP_CLEVO_P950), -- cgit From 94db9cc8f8fa2d5426ce79ec4ca16028f7084224 Mon Sep 17 00:00:00 2001 From: Albert Geantă Date: Mon, 31 Jan 2022 03:05:23 +0200 Subject: ALSA: hda/realtek: Add quirk for ASUS GU603 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The ASUS GU603 (Zephyrus M16 - SSID 1043:16b2) requires a quirk similar to other ASUS devices for correctly routing the 4 integrated speakers. This fixes it by adding a corresponding quirk entry, which connects the bass speakers to the proper DAC. Signed-off-by: Albert Geantă Cc: Link: https://lore.kernel.org/r/20220131010523.546386-1-albertgeanta@gmail.com Signed-off-by: Takashi Iwai --- sound/pci/hda/patch_realtek.c | 1 + 1 file changed, 1 insertion(+) diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 0d52eca57bbc..8315bf7d4c38 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -9011,6 +9011,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = { SND_PCI_QUIRK(0x1043, 0x1e51, "ASUS Zephyrus M15", ALC294_FIXUP_ASUS_GU502_PINS), SND_PCI_QUIRK(0x1043, 0x1e8e, "ASUS Zephyrus G15", ALC289_FIXUP_ASUS_GA401), SND_PCI_QUIRK(0x1043, 0x1f11, "ASUS Zephyrus G14", ALC289_FIXUP_ASUS_GA401), + SND_PCI_QUIRK(0x1043, 0x16b2, "ASUS GU603", ALC289_FIXUP_ASUS_GA401), SND_PCI_QUIRK(0x1043, 0x3030, "ASUS ZN270IE", ALC256_FIXUP_ASUS_AIO_GPIO2), SND_PCI_QUIRK(0x1043, 0x831a, "ASUS P901", ALC269_FIXUP_STEREO_DMIC), SND_PCI_QUIRK(0x1043, 0x834a, "ASUS S101", ALC269_FIXUP_STEREO_DMIC), -- cgit From b470947c3672f7eb7c4c271d510383d896831cc2 Mon Sep 17 00:00:00 2001 From: Robert Hancock Date: Thu, 27 Jan 2022 16:15:00 -0600 Subject: usb: dwc3: xilinx: fix uninitialized return value A previous patch to skip part of the initialization when a USB3 PHY was not present could result in the return value being uninitialized in that case, causing spurious probe failures. Initialize ret to 0 to avoid this. Fixes: 9678f3361afc ("usb: dwc3: xilinx: Skip resets and USB3 register settings for USB2.0 mode") Cc: Reviewed-by: Nathan Chancellor Signed-off-by: Robert Hancock Link: https://lore.kernel.org/r/20220127221500.177021-1-robert.hancock@calian.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/dwc3/dwc3-xilinx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/usb/dwc3/dwc3-xilinx.c b/drivers/usb/dwc3/dwc3-xilinx.c index e14ac15e24c3..a6f3a9b38789 100644 --- a/drivers/usb/dwc3/dwc3-xilinx.c +++ b/drivers/usb/dwc3/dwc3-xilinx.c @@ -99,7 +99,7 @@ static int dwc3_xlnx_init_zynqmp(struct dwc3_xlnx *priv_data) struct device *dev = priv_data->dev; struct reset_control *crst, *hibrst, *apbrst; struct phy *usb3_phy; - int ret; + int ret = 0; u32 reg; usb3_phy = devm_phy_optional_get(dev, "usb3-phy"); -- cgit From 8172f41859cf7516e73eb957297e6752b3073119 Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Thu, 20 Jan 2022 20:31:16 -0800 Subject: drm/i915: Allocate intel_engine_coredump_alloc with ALLOW_FAIL Allocate intel_engine_coredump_alloc with ALLOW_FAIL rather than GFP_KERNEL to fully decouple the error capture from fence signalling. v2: (John Harrison) - Fix typo in commit message (s/do/to) Fixes: 8b91cdd4f8649 ("drm/i915: Use __GFP_KSWAPD_RECLAIM in the capture code") Signed-off-by: Matthew Brost Reviewed-by: John Harrison Signed-off-by: John Harrison Link: https://patchwork.freedesktop.org/patch/msgid/20220121043118.24886-2-matthew.brost@intel.com (cherry picked from commit 4f72fc3c7f3d9f29a438bb0e17c7773f2fc8242a) Signed-off-by: Tvrtko Ursulin --- drivers/gpu/drm/i915/i915_gpu_error.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 5ae812d60abe..0633888a411e 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -1522,7 +1522,7 @@ capture_engine(struct intel_engine_cs *engine, struct i915_request *rq = NULL; unsigned long flags; - ee = intel_engine_coredump_alloc(engine, GFP_KERNEL); + ee = intel_engine_coredump_alloc(engine, ALLOW_FAIL); if (!ee) return NULL; -- cgit From 5ae13c305ef8cb54efc4f0ba4565709b9f320fed Mon Sep 17 00:00:00 2001 From: Matthew Brost Date: Tue, 11 Jan 2022 08:39:29 -0800 Subject: drm/i915: Lock timeline mutex directly in error path of eb_pin_timeline Don't use the interruptable version of the timeline mutex lock in the error path of eb_pin_timeline as the cleanup must always happen. v2: (John Harrison) - Don't check for interrupt during mutex lock v3: (Tvrtko) - A comment explaining why lock helper isn't used Fixes: 544460c33821 ("drm/i915: Multi-BB execbuf") Signed-off-by: Matthew Brost Reviewed-by: John Harrison Signed-off-by: John Harrison Link: https://patchwork.freedesktop.org/patch/msgid/20220111163929.14017-1-matthew.brost@intel.com (cherry picked from commit cb935c4618bd2ff9058feee4af7088446da6a763) Signed-off-by: Tvrtko Ursulin --- drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c index 3a5b247be738..1736efa43339 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_execbuffer.c @@ -2505,9 +2505,14 @@ static int eb_pin_timeline(struct i915_execbuffer *eb, struct intel_context *ce, timeout) < 0) { i915_request_put(rq); - tl = intel_context_timeline_lock(ce); + /* + * Error path, cannot use intel_context_timeline_lock as + * that is user interruptable and this clean up step + * must be done. + */ + mutex_lock(&ce->timeline->mutex); intel_context_exit(ce); - intel_context_timeline_unlock(tl); + mutex_unlock(&ce->timeline->mutex); if (nonblock) return -EWOULDBLOCK; -- cgit From 90a3d22ff02b196d5884e111f39271a1d4ee8e3e Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Mon, 24 Jan 2022 15:24:09 +0300 Subject: drm/i915/overlay: Prevent divide by zero bugs in scaling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Smatch detected a divide by zero bug in check_overlay_scaling(). drivers/gpu/drm/i915/display/intel_overlay.c:976 check_overlay_scaling() error: potential divide by zero bug '/ rec->dst_height'. drivers/gpu/drm/i915/display/intel_overlay.c:980 check_overlay_scaling() error: potential divide by zero bug '/ rec->dst_width'. Prevent this by ensuring that the dst height and width are non-zero. Fixes: 02e792fbaadb ("drm/i915: implement drmmode overlay support v4") Signed-off-by: Dan Carpenter Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220124122409.GA31673@kili (cherry picked from commit cf5b64f7f10b28bebb9b7c9d25e7aee5cbe43918) Signed-off-by: Tvrtko Ursulin --- drivers/gpu/drm/i915/display/intel_overlay.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c index 1a376e9a1ff3..d610e48cab94 100644 --- a/drivers/gpu/drm/i915/display/intel_overlay.c +++ b/drivers/gpu/drm/i915/display/intel_overlay.c @@ -959,6 +959,9 @@ static int check_overlay_dst(struct intel_overlay *overlay, const struct intel_crtc_state *pipe_config = overlay->crtc->config; + if (rec->dst_height == 0 || rec->dst_width == 0) + return -EINVAL; + if (rec->dst_x < pipe_config->pipe_src_w && rec->dst_x + rec->dst_width <= pipe_config->pipe_src_w && rec->dst_y < pipe_config->pipe_src_h && -- cgit From b3f74938d65665f892d1b7807c51140f68dc911c Mon Sep 17 00:00:00 2001 From: Umesh Nerlige Ramappa Date: Mon, 10 Jan 2022 17:55:23 -0800 Subject: drm/i915/pmu: Use PM timestamp instead of RING TIMESTAMP for reference All timestamps returned by GuC for GuC PMU busyness are captured from GUC PM TIMESTAMP. Since this timestamp does not tick when GuC goes idle, kmd uses RING_TIMESTAMP to measure busyness of an engine with an active context. In further stress testing, the MMIO read of the RING_TIMESTAMP is seen to cause a rare hang. Resolve the issue by using gt specific timestamp from PM which is in sync with the GuC PM timestamp. Fixes: 77cdd054dd2c ("drm/i915/pmu: Connect engine busyness stats from GuC to pmu") Signed-off-by: Umesh Nerlige Ramappa Reviewed-by: Alan Previn Signed-off-by: John Harrison Link: https://patchwork.freedesktop.org/patch/msgid/20220111015523.225562-1-umesh.nerlige.ramappa@intel.com (cherry picked from commit 721fd84ea1fe957453587efad5fdc44dfba58e04) Signed-off-by: Tvrtko Ursulin --- drivers/gpu/drm/i915/gt/uc/intel_guc.h | 5 ++ drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 56 +++++++++++++++++------ drivers/gpu/drm/i915/i915_reg.h | 3 +- 3 files changed, 50 insertions(+), 14 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc.h b/drivers/gpu/drm/i915/gt/uc/intel_guc.h index f9240d4baa69..3aabe164c329 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc.h +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc.h @@ -206,6 +206,11 @@ struct intel_guc { * context usage for overflows. */ struct delayed_work work; + + /** + * @shift: Right shift value for the gpm timestamp + */ + u32 shift; } timestamp; #ifdef CONFIG_DRM_I915_SELFTEST diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index e7517206af82..fcec2cb833af 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -1149,23 +1149,51 @@ static void guc_update_engine_gt_clks(struct intel_engine_cs *engine) } } -static void guc_update_pm_timestamp(struct intel_guc *guc, - struct intel_engine_cs *engine, - ktime_t *now) +static u32 gpm_timestamp_shift(struct intel_gt *gt) { - u32 gt_stamp_now, gt_stamp_hi; + intel_wakeref_t wakeref; + u32 reg, shift; + + with_intel_runtime_pm(gt->uncore->rpm, wakeref) + reg = intel_uncore_read(gt->uncore, RPM_CONFIG0); + + shift = (reg & GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_MASK) >> + GEN10_RPM_CONFIG0_CTC_SHIFT_PARAMETER_SHIFT; + + return 3 - shift; +} + +static u64 gpm_timestamp(struct intel_gt *gt) +{ + u32 lo, hi, old_hi, loop = 0; + + hi = intel_uncore_read(gt->uncore, MISC_STATUS1); + do { + lo = intel_uncore_read(gt->uncore, MISC_STATUS0); + old_hi = hi; + hi = intel_uncore_read(gt->uncore, MISC_STATUS1); + } while (old_hi != hi && loop++ < 2); + + return ((u64)hi << 32) | lo; +} + +static void guc_update_pm_timestamp(struct intel_guc *guc, ktime_t *now) +{ + struct intel_gt *gt = guc_to_gt(guc); + u32 gt_stamp_lo, gt_stamp_hi; + u64 gpm_ts; lockdep_assert_held(&guc->timestamp.lock); gt_stamp_hi = upper_32_bits(guc->timestamp.gt_stamp); - gt_stamp_now = intel_uncore_read(engine->uncore, - RING_TIMESTAMP(engine->mmio_base)); + gpm_ts = gpm_timestamp(gt) >> guc->timestamp.shift; + gt_stamp_lo = lower_32_bits(gpm_ts); *now = ktime_get(); - if (gt_stamp_now < lower_32_bits(guc->timestamp.gt_stamp)) + if (gt_stamp_lo < lower_32_bits(guc->timestamp.gt_stamp)) gt_stamp_hi++; - guc->timestamp.gt_stamp = ((u64)gt_stamp_hi << 32) | gt_stamp_now; + guc->timestamp.gt_stamp = ((u64)gt_stamp_hi << 32) | gt_stamp_lo; } /* @@ -1209,7 +1237,7 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now) stats_saved = *stats; gt_stamp_saved = guc->timestamp.gt_stamp; guc_update_engine_gt_clks(engine); - guc_update_pm_timestamp(guc, engine, now); + guc_update_pm_timestamp(guc, now); intel_gt_pm_put_async(gt); if (i915_reset_count(gpu_error) != reset_count) { *stats = stats_saved; @@ -1241,8 +1269,8 @@ static void __reset_guc_busyness_stats(struct intel_guc *guc) spin_lock_irqsave(&guc->timestamp.lock, flags); + guc_update_pm_timestamp(guc, &unused); for_each_engine(engine, gt, id) { - guc_update_pm_timestamp(guc, engine, &unused); guc_update_engine_gt_clks(engine); engine->stats.guc.prev_total = 0; } @@ -1259,10 +1287,11 @@ static void __update_guc_busyness_stats(struct intel_guc *guc) ktime_t unused; spin_lock_irqsave(&guc->timestamp.lock, flags); - for_each_engine(engine, gt, id) { - guc_update_pm_timestamp(guc, engine, &unused); + + guc_update_pm_timestamp(guc, &unused); + for_each_engine(engine, gt, id) guc_update_engine_gt_clks(engine); - } + spin_unlock_irqrestore(&guc->timestamp.lock, flags); } @@ -1783,6 +1812,7 @@ int intel_guc_submission_init(struct intel_guc *guc) spin_lock_init(&guc->timestamp.lock); INIT_DELAYED_WORK(&guc->timestamp.work, guc_timestamp_ping); guc->timestamp.ping_delay = (POLL_TIME_CLKS / gt->clock_frequency + 1) * HZ; + guc->timestamp.shift = gpm_timestamp_shift(gt); return 0; } diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index c32420cb8ed5..c2bb33febb68 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -2684,7 +2684,8 @@ static inline bool i915_mmio_reg_valid(i915_reg_t reg) #define RING_WAIT (1 << 11) /* gen3+, PRBx_CTL */ #define RING_WAIT_SEMAPHORE (1 << 10) /* gen6+ */ -#define GUCPMTIMESTAMP _MMIO(0xC3E8) +#define MISC_STATUS0 _MMIO(0xA500) +#define MISC_STATUS1 _MMIO(0xA504) /* There are 16 64-bit CS General Purpose Registers per-engine on Gen8+ */ #define GEN8_RING_CS_GPR(base, n) _MMIO((base) + 0x600 + (n) * 8) -- cgit From 3c6f13ad723e7206f03bb2752b01d18202b7fc9d Mon Sep 17 00:00:00 2001 From: Imre Deak Date: Wed, 26 Jan 2022 12:43:56 +0200 Subject: drm/i915/adlp: Fix TypeC PHY-ready status readout MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The TCSS_DDI_STATUS register is indexed by tc_port not by the FIA port index, fix this up. This only caused an issue on TC#3/4 ports in legacy mode, as in all other cases the two indices either match (on TC#1/2) or the TCSS_DDI_STATUS_READY flag is set regardless of something being connected or not (on TC#1/2/3/4 in dp-alt and tbt-alt modes). Reported-and-tested-by: Chia-Lin Kao (AceLan) Fixes: 55ce306c2aa1 ("drm/i915/adl_p: Implement TC sequences") Closes: https://gitlab.freedesktop.org/drm/intel/-/issues/4698 Cc: José Roberto de Souza Cc: # v5.14+ Signed-off-by: Imre Deak Reviewed-by: José Roberto de Souza Link: https://patchwork.freedesktop.org/patch/msgid/20220126104356.2022975-1-imre.deak@intel.com (cherry picked from commit 516b33460c5bee78b2055637b0547bdb0e6af754) Signed-off-by: Tvrtko Ursulin --- drivers/gpu/drm/i915/display/intel_tc.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index 40faa18947c9..dbd7d0d83a14 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -345,10 +345,11 @@ static bool icl_tc_phy_status_complete(struct intel_digital_port *dig_port) static bool adl_tc_phy_status_complete(struct intel_digital_port *dig_port) { struct drm_i915_private *i915 = to_i915(dig_port->base.base.dev); + enum tc_port tc_port = intel_port_to_tc(i915, dig_port->base.port); struct intel_uncore *uncore = &i915->uncore; u32 val; - val = intel_uncore_read(uncore, TCSS_DDI_STATUS(dig_port->tc_phy_fia_idx)); + val = intel_uncore_read(uncore, TCSS_DDI_STATUS(tc_port)); if (val == 0xffffffff) { drm_dbg_kms(&i915->drm, "Port %s: PHY in TCCOLD, assuming not complete\n", -- cgit From ebe2b1add1055b903e2acd86b290a85297edc0b3 Mon Sep 17 00:00:00 2001 From: Udipto Goswami Date: Thu, 27 Jan 2022 09:39:55 +0530 Subject: usb: f_fs: Fix use-after-free for epfile Consider a case where ffs_func_eps_disable is called from ffs_func_disable as part of composition switch and at the same time ffs_epfile_release get called from userspace. ffs_epfile_release will free up the read buffer and call ffs_data_closed which in turn destroys ffs->epfiles and mark it as NULL. While this was happening the driver has already initialized the local epfile in ffs_func_eps_disable which is now freed and waiting to acquire the spinlock. Once spinlock is acquired the driver proceeds with the stale value of epfile and tries to free the already freed read buffer causing use-after-free. Following is the illustration of the race: CPU1 CPU2 ffs_func_eps_disable epfiles (local copy) ffs_epfile_release ffs_data_closed if (last file closed) ffs_data_reset ffs_data_clear ffs_epfiles_destroy spin_lock dereference epfiles Fix this races by taking epfiles local copy & assigning it under spinlock and if epfiles(local) is null then update it in ffs->epfiles then finally destroy it. Extending the scope further from the race, protecting the ep related structures, and concurrent accesses. Fixes: a9e6f83c2df1 ("usb: gadget: f_fs: stop sleeping in ffs_func_eps_disable") Co-developed-by: Udipto Goswami Reviewed-by: John Keeping Signed-off-by: Pratham Pratap Signed-off-by: Udipto Goswami Link: https://lore.kernel.org/r/1643256595-10797-1-git-send-email-quic_ugoswami@quicinc.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/function/f_fs.c | 56 ++++++++++++++++++++++++++++---------- 1 file changed, 42 insertions(+), 14 deletions(-) diff --git a/drivers/usb/gadget/function/f_fs.c b/drivers/usb/gadget/function/f_fs.c index 25ad1e97a458..1922fd02043c 100644 --- a/drivers/usb/gadget/function/f_fs.c +++ b/drivers/usb/gadget/function/f_fs.c @@ -1711,16 +1711,24 @@ static void ffs_data_put(struct ffs_data *ffs) static void ffs_data_closed(struct ffs_data *ffs) { + struct ffs_epfile *epfiles; + unsigned long flags; + ENTER(); if (atomic_dec_and_test(&ffs->opened)) { if (ffs->no_disconnect) { ffs->state = FFS_DEACTIVATED; - if (ffs->epfiles) { - ffs_epfiles_destroy(ffs->epfiles, - ffs->eps_count); - ffs->epfiles = NULL; - } + spin_lock_irqsave(&ffs->eps_lock, flags); + epfiles = ffs->epfiles; + ffs->epfiles = NULL; + spin_unlock_irqrestore(&ffs->eps_lock, + flags); + + if (epfiles) + ffs_epfiles_destroy(epfiles, + ffs->eps_count); + if (ffs->setup_state == FFS_SETUP_PENDING) __ffs_ep0_stall(ffs); } else { @@ -1767,14 +1775,27 @@ static struct ffs_data *ffs_data_new(const char *dev_name) static void ffs_data_clear(struct ffs_data *ffs) { + struct ffs_epfile *epfiles; + unsigned long flags; + ENTER(); ffs_closed(ffs); BUG_ON(ffs->gadget); - if (ffs->epfiles) { - ffs_epfiles_destroy(ffs->epfiles, ffs->eps_count); + spin_lock_irqsave(&ffs->eps_lock, flags); + epfiles = ffs->epfiles; + ffs->epfiles = NULL; + spin_unlock_irqrestore(&ffs->eps_lock, flags); + + /* + * potential race possible between ffs_func_eps_disable + * & ffs_epfile_release therefore maintaining a local + * copy of epfile will save us from use-after-free. + */ + if (epfiles) { + ffs_epfiles_destroy(epfiles, ffs->eps_count); ffs->epfiles = NULL; } @@ -1922,12 +1943,15 @@ static void ffs_epfiles_destroy(struct ffs_epfile *epfiles, unsigned count) static void ffs_func_eps_disable(struct ffs_function *func) { - struct ffs_ep *ep = func->eps; - struct ffs_epfile *epfile = func->ffs->epfiles; - unsigned count = func->ffs->eps_count; + struct ffs_ep *ep; + struct ffs_epfile *epfile; + unsigned short count; unsigned long flags; spin_lock_irqsave(&func->ffs->eps_lock, flags); + count = func->ffs->eps_count; + epfile = func->ffs->epfiles; + ep = func->eps; while (count--) { /* pending requests get nuked */ if (ep->ep) @@ -1945,14 +1969,18 @@ static void ffs_func_eps_disable(struct ffs_function *func) static int ffs_func_eps_enable(struct ffs_function *func) { - struct ffs_data *ffs = func->ffs; - struct ffs_ep *ep = func->eps; - struct ffs_epfile *epfile = ffs->epfiles; - unsigned count = ffs->eps_count; + struct ffs_data *ffs; + struct ffs_ep *ep; + struct ffs_epfile *epfile; + unsigned short count; unsigned long flags; int ret = 0; spin_lock_irqsave(&func->ffs->eps_lock, flags); + ffs = func->ffs; + ep = func->eps; + epfile = ffs->epfiles; + count = ffs->eps_count; while(count--) { ep->ep->driver_data = ep; -- cgit From fa77ce201f7f2d823b07753575122d1ae5597fbe Mon Sep 17 00:00:00 2001 From: Stephan Brunner Date: Sat, 8 Jan 2022 13:00:20 +0100 Subject: USB: serial: ch341: add support for GW Instek USB2.0-Serial devices Programmable lab power supplies made by GW Instek, such as the GPP-2323, have a USB port exposing a serial port to control the device. Stringing the supplied Windows driver, references to the ch341 chip are found. Binding the existing ch341 driver to the VID/PID of the GPP-2323 ("GW Instek USB2.0-Serial" as per the USB product name) works out of the box, communication and control is now possible. This patch should work with any GPP series power supply due to similarities in the product line. Signed-off-by: Stephan Brunner Link: https://lore.kernel.org/r/4a47b864-0816-6f6a-efee-aa20e74bcdc6@stephan-brunner.net Cc: stable@vger.kernel.org Signed-off-by: Johan Hovold --- drivers/usb/serial/ch341.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/usb/serial/ch341.c b/drivers/usb/serial/ch341.c index 29f4b87a9e74..58cba8ee0277 100644 --- a/drivers/usb/serial/ch341.c +++ b/drivers/usb/serial/ch341.c @@ -85,6 +85,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x1a86, 0x5523) }, { USB_DEVICE(0x1a86, 0x7522) }, { USB_DEVICE(0x1a86, 0x7523) }, + { USB_DEVICE(0x2184, 0x0057) }, { USB_DEVICE(0x4348, 0x5523) }, { USB_DEVICE(0x9986, 0x7523) }, { }, -- cgit From d48384c7ed6c8fe4727eaa0f3048f62afd1cd715 Mon Sep 17 00:00:00 2001 From: Pawel Dembicki Date: Tue, 11 Jan 2022 23:12:05 +0100 Subject: USB: serial: option: add ZTE MF286D modem Modem from ZTE MF286D is an Qualcomm MDM9250 based 3G/4G modem. T: Bus=02 Lev=01 Prnt=01 Port=00 Cnt=01 Dev#= 3 Spd=5000 MxCh= 0 D: Ver= 3.00 Cls=00(>ifc ) Sub=00 Prot=00 MxPS= 9 #Cfgs= 1 P: Vendor=19d2 ProdID=1485 Rev=52.87 S: Manufacturer=ZTE,Incorporated S: Product=ZTE Technologies MSM S: SerialNumber=MF286DZTED000000 C:* #Ifs= 7 Cfg#= 1 Atr=80 MxPwr=896mA A: FirstIf#= 0 IfCount= 2 Cls=02(comm.) Sub=06 Prot=00 I:* If#= 0 Alt= 0 #EPs= 1 Cls=02(comm.) Sub=02 Prot=ff Driver=rndis_host E: Ad=82(I) Atr=03(Int.) MxPS= 8 Ivl=32ms I:* If#= 1 Alt= 0 #EPs= 2 Cls=0a(data ) Sub=00 Prot=00 Driver=rndis_host E: Ad=81(I) Atr=02(Bulk) MxPS=1024 Ivl=0ms E: Ad=01(O) Atr=02(Bulk) MxPS=1024 Ivl=0ms I:* If#= 2 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=ff Prot=ff Driver=option E: Ad=83(I) Atr=02(Bulk) MxPS=1024 Ivl=0ms E: Ad=02(O) Atr=02(Bulk) MxPS=1024 Ivl=0ms I:* If#= 3 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=ff Driver=option E: Ad=85(I) Atr=03(Int.) MxPS= 10 Ivl=32ms E: Ad=84(I) Atr=02(Bulk) MxPS=1024 Ivl=0ms E: Ad=03(O) Atr=02(Bulk) MxPS=1024 Ivl=0ms I:* If#= 4 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=ff Driver=option E: Ad=87(I) Atr=03(Int.) MxPS= 10 Ivl=32ms E: Ad=86(I) Atr=02(Bulk) MxPS=1024 Ivl=0ms E: Ad=04(O) Atr=02(Bulk) MxPS=1024 Ivl=0ms I:* If#= 5 Alt= 0 #EPs= 3 Cls=ff(vend.) Sub=ff Prot=ff Driver=qmi_wwan E: Ad=88(I) Atr=03(Int.) MxPS= 8 Ivl=32ms E: Ad=8e(I) Atr=02(Bulk) MxPS=1024 Ivl=0ms E: Ad=0f(O) Atr=02(Bulk) MxPS=1024 Ivl=0ms I:* If#= 6 Alt= 0 #EPs= 2 Cls=ff(vend.) Sub=42 Prot=01 Driver=usbfs E: Ad=05(O) Atr=02(Bulk) MxPS=1024 Ivl=0ms E: Ad=89(I) Atr=02(Bulk) MxPS=1024 Ivl=0ms Signed-off-by: Pawel Dembicki Cc: stable@vger.kernel.org Signed-off-by: Johan Hovold --- drivers/usb/serial/option.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 42420bfc983c..962e9943fc20 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -1649,6 +1649,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = RSVD(2) }, { USB_DEVICE_INTERFACE_CLASS(ZTE_VENDOR_ID, 0x1476, 0xff) }, /* GosunCn ZTE WeLink ME3630 (ECM/NCM mode) */ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1481, 0xff, 0x00, 0x00) }, /* ZTE MF871A */ + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1485, 0xff, 0xff, 0xff), /* ZTE MF286D */ + .driver_info = RSVD(5) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1533, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1534, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1535, 0xff, 0xff, 0xff) }, -- cgit From 341adeec9adad0874f29a0a1af35638207352a39 Mon Sep 17 00:00:00 2001 From: Wen Gu Date: Wed, 26 Jan 2022 23:33:04 +0800 Subject: net/smc: Forward wakeup to smc socket waitqueue after fallback When we replace TCP with SMC and a fallback occurs, there may be some socket waitqueue entries remaining in smc socket->wq, such as eppoll_entries inserted by userspace applications. After the fallback, data flows over TCP/IP and only clcsocket->wq will be woken up. Applications can't be notified by the entries which were inserted in smc socket->wq before fallback. So we need a mechanism to wake up smc socket->wq at the same time if some entries remaining in it. The current workaround is to transfer the entries from smc socket->wq to clcsock->wq during the fallback. But this may cause a crash like this: general protection fault, probably for non-canonical address 0xdead000000000100: 0000 [#1] PREEMPT SMP PTI CPU: 3 PID: 0 Comm: swapper/3 Kdump: loaded Tainted: G E 5.16.0+ #107 RIP: 0010:__wake_up_common+0x65/0x170 Call Trace: __wake_up_common_lock+0x7a/0xc0 sock_def_readable+0x3c/0x70 tcp_data_queue+0x4a7/0xc40 tcp_rcv_established+0x32f/0x660 ? sk_filter_trim_cap+0xcb/0x2e0 tcp_v4_do_rcv+0x10b/0x260 tcp_v4_rcv+0xd2a/0xde0 ip_protocol_deliver_rcu+0x3b/0x1d0 ip_local_deliver_finish+0x54/0x60 ip_local_deliver+0x6a/0x110 ? tcp_v4_early_demux+0xa2/0x140 ? tcp_v4_early_demux+0x10d/0x140 ip_sublist_rcv_finish+0x49/0x60 ip_sublist_rcv+0x19d/0x230 ip_list_rcv+0x13e/0x170 __netif_receive_skb_list_core+0x1c2/0x240 netif_receive_skb_list_internal+0x1e6/0x320 napi_complete_done+0x11d/0x190 mlx5e_napi_poll+0x163/0x6b0 [mlx5_core] __napi_poll+0x3c/0x1b0 net_rx_action+0x27c/0x300 __do_softirq+0x114/0x2d2 irq_exit_rcu+0xb4/0xe0 common_interrupt+0xba/0xe0 The crash is caused by privately transferring waitqueue entries from smc socket->wq to clcsock->wq. The owners of these entries, such as epoll, have no idea that the entries have been transferred to a different socket wait queue and still use original waitqueue spinlock (smc socket->wq.wait.lock) to make the entries operation exclusive, but it doesn't work. The operations to the entries, such as removing from the waitqueue (now is clcsock->wq after fallback), may cause a crash when clcsock waitqueue is being iterated over at the moment. This patch tries to fix this by no longer transferring wait queue entries privately, but introducing own implementations of clcsock's callback functions in fallback situation. The callback functions will forward the wakeup to smc socket->wq if clcsock->wq is actually woken up and smc socket->wq has remaining entries. Fixes: 2153bd1e3d3d ("net/smc: Transfer remaining wait queue entries during fallback") Suggested-by: Karsten Graul Signed-off-by: Wen Gu Acked-by: Karsten Graul Signed-off-by: David S. Miller --- net/smc/af_smc.c | 133 ++++++++++++++++++++++++++++++++++++++++++++++++------- net/smc/smc.h | 20 ++++++++- 2 files changed, 137 insertions(+), 16 deletions(-) diff --git a/net/smc/af_smc.c b/net/smc/af_smc.c index d5ea62b82bb8..8c89d0b0ca18 100644 --- a/net/smc/af_smc.c +++ b/net/smc/af_smc.c @@ -566,17 +566,115 @@ static void smc_stat_fallback(struct smc_sock *smc) mutex_unlock(&net->smc.mutex_fback_rsn); } +/* must be called under rcu read lock */ +static void smc_fback_wakeup_waitqueue(struct smc_sock *smc, void *key) +{ + struct socket_wq *wq; + __poll_t flags; + + wq = rcu_dereference(smc->sk.sk_wq); + if (!skwq_has_sleeper(wq)) + return; + + /* wake up smc sk->sk_wq */ + if (!key) { + /* sk_state_change */ + wake_up_interruptible_all(&wq->wait); + } else { + flags = key_to_poll(key); + if (flags & (EPOLLIN | EPOLLOUT)) + /* sk_data_ready or sk_write_space */ + wake_up_interruptible_sync_poll(&wq->wait, flags); + else if (flags & EPOLLERR) + /* sk_error_report */ + wake_up_interruptible_poll(&wq->wait, flags); + } +} + +static int smc_fback_mark_woken(wait_queue_entry_t *wait, + unsigned int mode, int sync, void *key) +{ + struct smc_mark_woken *mark = + container_of(wait, struct smc_mark_woken, wait_entry); + + mark->woken = true; + mark->key = key; + return 0; +} + +static void smc_fback_forward_wakeup(struct smc_sock *smc, struct sock *clcsk, + void (*clcsock_callback)(struct sock *sk)) +{ + struct smc_mark_woken mark = { .woken = false }; + struct socket_wq *wq; + + init_waitqueue_func_entry(&mark.wait_entry, + smc_fback_mark_woken); + rcu_read_lock(); + wq = rcu_dereference(clcsk->sk_wq); + if (!wq) + goto out; + add_wait_queue(sk_sleep(clcsk), &mark.wait_entry); + clcsock_callback(clcsk); + remove_wait_queue(sk_sleep(clcsk), &mark.wait_entry); + + if (mark.woken) + smc_fback_wakeup_waitqueue(smc, mark.key); +out: + rcu_read_unlock(); +} + +static void smc_fback_state_change(struct sock *clcsk) +{ + struct smc_sock *smc = + smc_clcsock_user_data(clcsk); + + if (!smc) + return; + smc_fback_forward_wakeup(smc, clcsk, smc->clcsk_state_change); +} + +static void smc_fback_data_ready(struct sock *clcsk) +{ + struct smc_sock *smc = + smc_clcsock_user_data(clcsk); + + if (!smc) + return; + smc_fback_forward_wakeup(smc, clcsk, smc->clcsk_data_ready); +} + +static void smc_fback_write_space(struct sock *clcsk) +{ + struct smc_sock *smc = + smc_clcsock_user_data(clcsk); + + if (!smc) + return; + smc_fback_forward_wakeup(smc, clcsk, smc->clcsk_write_space); +} + +static void smc_fback_error_report(struct sock *clcsk) +{ + struct smc_sock *smc = + smc_clcsock_user_data(clcsk); + + if (!smc) + return; + smc_fback_forward_wakeup(smc, clcsk, smc->clcsk_error_report); +} + static int smc_switch_to_fallback(struct smc_sock *smc, int reason_code) { - wait_queue_head_t *smc_wait = sk_sleep(&smc->sk); - wait_queue_head_t *clc_wait; - unsigned long flags; + struct sock *clcsk; mutex_lock(&smc->clcsock_release_lock); if (!smc->clcsock) { mutex_unlock(&smc->clcsock_release_lock); return -EBADF; } + clcsk = smc->clcsock->sk; + smc->use_fallback = true; smc->fallback_rsn = reason_code; smc_stat_fallback(smc); @@ -587,16 +685,22 @@ static int smc_switch_to_fallback(struct smc_sock *smc, int reason_code) smc->clcsock->wq.fasync_list = smc->sk.sk_socket->wq.fasync_list; - /* There may be some entries remaining in - * smc socket->wq, which should be removed - * to clcsocket->wq during the fallback. + /* There might be some wait entries remaining + * in smc sk->sk_wq and they should be woken up + * as clcsock's wait queue is woken up. */ - clc_wait = sk_sleep(smc->clcsock->sk); - spin_lock_irqsave(&smc_wait->lock, flags); - spin_lock_nested(&clc_wait->lock, SINGLE_DEPTH_NESTING); - list_splice_init(&smc_wait->head, &clc_wait->head); - spin_unlock(&clc_wait->lock); - spin_unlock_irqrestore(&smc_wait->lock, flags); + smc->clcsk_state_change = clcsk->sk_state_change; + smc->clcsk_data_ready = clcsk->sk_data_ready; + smc->clcsk_write_space = clcsk->sk_write_space; + smc->clcsk_error_report = clcsk->sk_error_report; + + clcsk->sk_state_change = smc_fback_state_change; + clcsk->sk_data_ready = smc_fback_data_ready; + clcsk->sk_write_space = smc_fback_write_space; + clcsk->sk_error_report = smc_fback_error_report; + + smc->clcsock->sk->sk_user_data = + (void *)((uintptr_t)smc | SK_USER_DATA_NOCOPY); } mutex_unlock(&smc->clcsock_release_lock); return 0; @@ -2115,10 +2219,9 @@ out: static void smc_clcsock_data_ready(struct sock *listen_clcsock) { - struct smc_sock *lsmc; + struct smc_sock *lsmc = + smc_clcsock_user_data(listen_clcsock); - lsmc = (struct smc_sock *) - ((uintptr_t)listen_clcsock->sk_user_data & ~SK_USER_DATA_NOCOPY); if (!lsmc) return; lsmc->clcsk_data_ready(listen_clcsock); diff --git a/net/smc/smc.h b/net/smc/smc.h index 3d0b8e300deb..37b2001a0255 100644 --- a/net/smc/smc.h +++ b/net/smc/smc.h @@ -139,6 +139,12 @@ enum smc_urg_state { SMC_URG_READ = 3, /* data was already read */ }; +struct smc_mark_woken { + bool woken; + void *key; + wait_queue_entry_t wait_entry; +}; + struct smc_connection { struct rb_node alert_node; struct smc_link_group *lgr; /* link group of connection */ @@ -228,8 +234,14 @@ struct smc_connection { struct smc_sock { /* smc sock container */ struct sock sk; struct socket *clcsock; /* internal tcp socket */ + void (*clcsk_state_change)(struct sock *sk); + /* original stat_change fct. */ void (*clcsk_data_ready)(struct sock *sk); - /* original data_ready fct. **/ + /* original data_ready fct. */ + void (*clcsk_write_space)(struct sock *sk); + /* original write_space fct. */ + void (*clcsk_error_report)(struct sock *sk); + /* original error_report fct. */ struct smc_connection conn; /* smc connection */ struct smc_sock *listen_smc; /* listen parent */ struct work_struct connect_work; /* handle non-blocking connect*/ @@ -264,6 +276,12 @@ static inline struct smc_sock *smc_sk(const struct sock *sk) return (struct smc_sock *)sk; } +static inline struct smc_sock *smc_clcsock_user_data(struct sock *clcsk) +{ + return (struct smc_sock *) + ((uintptr_t)clcsk->sk_user_data & ~SK_USER_DATA_NOCOPY); +} + extern struct workqueue_struct *smc_hs_wq; /* wq for handshake work */ extern struct workqueue_struct *smc_close_wq; /* wq for close work */ -- cgit From baf927a833ca2c6717795ac131079f485cb7a5dc Mon Sep 17 00:00:00 2001 From: Horatiu Vultur Date: Mon, 31 Jan 2022 09:52:01 +0100 Subject: pinctrl: microchip-sgpio: Fix support for regmap Initially the driver accessed the registers using u32 __iomem but then in the blamed commit it changed it to use regmap. The problem is that now the offset of the registers is not calculated anymore at word offset but at byte offset. Therefore make sure to multiply the offset with word size. Acked-by: Steen Hegelund Reviewed-by: Colin Foster Fixes: 2afbbab45c261a ("pinctrl: microchip-sgpio: update to support regmap") Signed-off-by: Horatiu Vultur Reviewed-by: Andy Shevchenko Link: https://lore.kernel.org/r/20220131085201.307031-1-horatiu.vultur@microchip.com Signed-off-by: Linus Walleij --- drivers/pinctrl/pinctrl-microchip-sgpio.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/pinctrl/pinctrl-microchip-sgpio.c b/drivers/pinctrl/pinctrl-microchip-sgpio.c index 8e081c90bdb2..639f1130e989 100644 --- a/drivers/pinctrl/pinctrl-microchip-sgpio.c +++ b/drivers/pinctrl/pinctrl-microchip-sgpio.c @@ -137,7 +137,8 @@ static inline int sgpio_addr_to_pin(struct sgpio_priv *priv, int port, int bit) static inline u32 sgpio_get_addr(struct sgpio_priv *priv, u32 rno, u32 off) { - return priv->properties->regoff[rno] + off; + return (priv->properties->regoff[rno] + off) * + regmap_get_reg_stride(priv->regs); } static u32 sgpio_readl(struct sgpio_priv *priv, u32 rno, u32 off) -- cgit From 092f45b13e51666fe8ecbf2d6cd247aa7e6c1f74 Mon Sep 17 00:00:00 2001 From: Sean Anderson Date: Thu, 27 Jan 2022 14:00:02 -0500 Subject: usb: ulpi: Move of_node_put to ulpi_dev_release Drivers are not unbound from the device when ulpi_unregister_interface is called. Move of_node-freeing code to ulpi_dev_release which is called only after all users are gone. Fixes: ef6a7bcfb01c ("usb: ulpi: Support device discovery via DT") Cc: stable Reviewed-by: Heikki Krogerus Signed-off-by: Sean Anderson Link: https://lore.kernel.org/r/20220127190004.1446909-2-sean.anderson@seco.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/common/ulpi.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c index 8f8405b0d608..09ad569a1a35 100644 --- a/drivers/usb/common/ulpi.c +++ b/drivers/usb/common/ulpi.c @@ -130,6 +130,7 @@ static const struct attribute_group *ulpi_dev_attr_groups[] = { static void ulpi_dev_release(struct device *dev) { + of_node_put(dev->of_node); kfree(to_ulpi_dev(dev)); } @@ -299,7 +300,6 @@ EXPORT_SYMBOL_GPL(ulpi_register_interface); */ void ulpi_unregister_interface(struct ulpi *ulpi) { - of_node_put(ulpi->dev.of_node); device_unregister(&ulpi->dev); } EXPORT_SYMBOL_GPL(ulpi_unregister_interface); -- cgit From 0a907ee9d95e3ac35eb023d71f29eae0aaa52d1b Mon Sep 17 00:00:00 2001 From: Sean Anderson Date: Thu, 27 Jan 2022 14:00:03 -0500 Subject: usb: ulpi: Call of_node_put correctly of_node_put should always be called on device nodes gotten from of_get_*. Additionally, it should only be called after there are no remaining users. To address the first issue, call of_node_put if later steps in ulpi_register fail. To address the latter, call put_device if device_register fails, which will call ulpi_dev_release if necessary. Fixes: ef6a7bcfb01c ("usb: ulpi: Support device discovery via DT") Cc: stable Reviewed-by: Heikki Krogerus Signed-off-by: Sean Anderson Link: https://lore.kernel.org/r/20220127190004.1446909-3-sean.anderson@seco.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/common/ulpi.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/drivers/usb/common/ulpi.c b/drivers/usb/common/ulpi.c index 09ad569a1a35..5509d3847af4 100644 --- a/drivers/usb/common/ulpi.c +++ b/drivers/usb/common/ulpi.c @@ -248,12 +248,16 @@ static int ulpi_register(struct device *dev, struct ulpi *ulpi) return ret; ret = ulpi_read_id(ulpi); - if (ret) + if (ret) { + of_node_put(ulpi->dev.of_node); return ret; + } ret = device_register(&ulpi->dev); - if (ret) + if (ret) { + put_device(&ulpi->dev); return ret; + } dev_dbg(&ulpi->dev, "registered ULPI PHY: vendor %04x, product %04x\n", ulpi->id.vendor, ulpi->id.product); -- cgit From a4f399a1416f645ac701064a55b0cb5203707ac9 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sun, 30 Jan 2022 09:06:36 +0100 Subject: Input: wm97xx: Simplify resource management Since the commit in the Fixes tag below, 'wm->input_dev' is a managed resource that doesn't need to be explicitly unregistered or freed (see devm_input_allocate_device() documentation) So, remove some unless line of code to slightly simplify it. Fixes: c72f61e74073 ("Input: wm97xx: split out touchscreen registering") Signed-off-by: Christophe JAILLET Acked-by: Charles Keepax Link: https://lore.kernel.org/r/87dce7e80ea9b191843fa22415ca3aef5f3cc2e6.1643529968.git.christophe.jaillet@wanadoo.fr Signed-off-by: Mark Brown --- drivers/input/touchscreen/wm97xx-core.c | 12 +++--------- 1 file changed, 3 insertions(+), 9 deletions(-) diff --git a/drivers/input/touchscreen/wm97xx-core.c b/drivers/input/touchscreen/wm97xx-core.c index 78d2ee99f37a..1b58611c8084 100644 --- a/drivers/input/touchscreen/wm97xx-core.c +++ b/drivers/input/touchscreen/wm97xx-core.c @@ -615,10 +615,9 @@ static int wm97xx_register_touch(struct wm97xx *wm) * extensions) */ wm->touch_dev = platform_device_alloc("wm97xx-touch", -1); - if (!wm->touch_dev) { - ret = -ENOMEM; - goto touch_err; - } + if (!wm->touch_dev) + return -ENOMEM; + platform_set_drvdata(wm->touch_dev, wm); wm->touch_dev->dev.parent = wm->dev; wm->touch_dev->dev.platform_data = pdata; @@ -629,9 +628,6 @@ static int wm97xx_register_touch(struct wm97xx *wm) return 0; touch_reg_err: platform_device_put(wm->touch_dev); -touch_err: - input_unregister_device(wm->input_dev); - wm->input_dev = NULL; return ret; } @@ -639,8 +635,6 @@ touch_err: static void wm97xx_unregister_touch(struct wm97xx *wm) { platform_device_unregister(wm->touch_dev); - input_unregister_device(wm->input_dev); - wm->input_dev = NULL; } static int _wm97xx_probe(struct wm97xx *wm) -- cgit From 5c2b9c61ae5d8ad0a196d33b66ce44543be22281 Mon Sep 17 00:00:00 2001 From: Tommaso Merciai Date: Fri, 28 Jan 2022 19:17:13 +0100 Subject: usb: usb251xb: add boost-up property support Add support for boost-up register of usb251xb hub. boost-up property control USB electrical drive strength This register can be set: - Normal mode -> 0x00 - Low -> 0x01 - Medium -> 0x10 - High -> 0x11 (Normal Default) References: - http://www.mouser.com/catalog/specsheets/2514.pdf p29 Reviewed-by: Richard Leitner Signed-off-by: Tommaso Merciai Link: https://lore.kernel.org/r/20220128181713.96856-1-tomm.merciai@gmail.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/misc/usb251xb.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/drivers/usb/misc/usb251xb.c b/drivers/usb/misc/usb251xb.c index 507deef1f709..04c4e3fed094 100644 --- a/drivers/usb/misc/usb251xb.c +++ b/drivers/usb/misc/usb251xb.c @@ -543,6 +543,9 @@ static int usb251xb_get_ofdata(struct usb251xb *hub, if (of_property_read_u16_array(np, "language-id", &hub->lang_id, 1)) hub->lang_id = USB251XB_DEF_LANGUAGE_ID; + if (of_property_read_u8(np, "boost-up", &hub->boost_up)) + hub->boost_up = USB251XB_DEF_BOOST_UP; + cproperty_char = of_get_property(np, "manufacturer", NULL); strlcpy(str, cproperty_char ? : USB251XB_DEF_MANUFACTURER_STRING, sizeof(str)); @@ -584,7 +587,6 @@ static int usb251xb_get_ofdata(struct usb251xb *hub, * may be as soon as needed. */ hub->bat_charge_en = USB251XB_DEF_BATTERY_CHARGING_ENABLE; - hub->boost_up = USB251XB_DEF_BOOST_UP; hub->boost_57 = USB251XB_DEF_BOOST_57; hub->boost_14 = USB251XB_DEF_BOOST_14; hub->port_map12 = USB251XB_DEF_PORT_MAP_12; -- cgit From 292d2c82b105d92082c2120a44a58de9767e44f1 Mon Sep 17 00:00:00 2001 From: Jann Horn Date: Wed, 26 Jan 2022 21:52:14 +0100 Subject: usb: raw-gadget: fix handling of dual-direction-capable endpoints Under dummy_hcd, every available endpoint is *either* IN or OUT capable. But with some real hardware, there are endpoints that support both IN and OUT. In particular, the PLX 2380 has four available endpoints that each support both IN and OUT. raw-gadget currently gets confused and thinks that any endpoint that is usable as an IN endpoint can never be used as an OUT endpoint. Fix it by looking at the direction in the configured endpoint descriptor instead of looking at the hardware capabilities. With this change, I can use the PLX 2380 with raw-gadget. Fixes: f2c2e717642c ("usb: gadget: add raw-gadget interface") Cc: stable Tested-by: Andrey Konovalov Reviewed-by: Andrey Konovalov Signed-off-by: Jann Horn Link: https://lore.kernel.org/r/20220126205214.2149936-1-jannh@google.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/legacy/raw_gadget.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c index c5a2c734234a..d86c3a36441e 100644 --- a/drivers/usb/gadget/legacy/raw_gadget.c +++ b/drivers/usb/gadget/legacy/raw_gadget.c @@ -1004,7 +1004,7 @@ static int raw_process_ep_io(struct raw_dev *dev, struct usb_raw_ep_io *io, ret = -EBUSY; goto out_unlock; } - if ((in && !ep->ep->caps.dir_in) || (!in && ep->ep->caps.dir_in)) { + if (in != usb_endpoint_dir_in(ep->ep->desc)) { dev_dbg(&dev->gadget->dev, "fail, wrong direction\n"); ret = -EINVAL; goto out_unlock; -- cgit From 459702eea6132888b5c5b64c0e9c626da4ec2493 Mon Sep 17 00:00:00 2001 From: Adam Ford Date: Fri, 28 Jan 2022 16:36:03 -0600 Subject: usb: gadget: udc: renesas_usb3: Fix host to USB_ROLE_NONE transition The support the external role switch a variety of situations were addressed, but the transition from USB_ROLE_HOST to USB_ROLE_NONE leaves the host up which can cause some error messages when switching from host to none, to gadget, to none, and then back to host again. xhci-hcd ee000000.usb: Abort failed to stop command ring: -110 xhci-hcd ee000000.usb: xHCI host controller not responding, assume dead xhci-hcd ee000000.usb: HC died; cleaning up usb 4-1: device not accepting address 6, error -108 usb usb4-port1: couldn't allocate usb_device After this happens it will not act as a host again. Fix this by releasing the host mode when transitioning to USB_ROLE_NONE. Fixes: 0604160d8c0b ("usb: gadget: udc: renesas_usb3: Enhance role switch support") Cc: stable Reviewed-by: Yoshihiro Shimoda Signed-off-by: Adam Ford Link: https://lore.kernel.org/r/20220128223603.2362621-1-aford173@gmail.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/udc/renesas_usb3.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/usb/gadget/udc/renesas_usb3.c b/drivers/usb/gadget/udc/renesas_usb3.c index 57d417a7c3e0..601829a6b4ba 100644 --- a/drivers/usb/gadget/udc/renesas_usb3.c +++ b/drivers/usb/gadget/udc/renesas_usb3.c @@ -2378,6 +2378,8 @@ static void handle_ext_role_switch_states(struct device *dev, switch (role) { case USB_ROLE_NONE: usb3->connection_state = USB_ROLE_NONE; + if (cur_role == USB_ROLE_HOST) + device_release_driver(host); if (usb3->driver) usb3_disconnect(usb3); usb3_vbus_out(usb3, false); -- cgit From 5432184107cd0013761bdfa6cb6079527ef87b95 Mon Sep 17 00:00:00 2001 From: Pavel Hofman Date: Mon, 31 Jan 2022 08:18:13 +0100 Subject: usb: gadget: f_uac2: Define specific wTerminalType Several users have reported that their Win10 does not enumerate UAC2 gadget with the existing wTerminalType set to UAC_INPUT_TERMINAL_UNDEFINED/UAC_INPUT_TERMINAL_UNDEFINED, e.g. https://github.com/raspberrypi/linux/issues/4587#issuecomment-926567213. While the constant is officially defined by the USB terminal types document, e.g. XMOS firmware for UAC2 (commonly used for Win10) defines no undefined output terminal type in its usbaudio20.h header. Therefore wTerminalType of EP-IN is set to UAC_INPUT_TERMINAL_MICROPHONE and wTerminalType of EP-OUT to UAC_OUTPUT_TERMINAL_SPEAKER for the UAC2 gadget. Signed-off-by: Pavel Hofman Cc: stable Link: https://lore.kernel.org/r/20220131071813.7433-1-pavel.hofman@ivitera.com Signed-off-by: Greg Kroah-Hartman --- drivers/usb/gadget/function/f_uac2.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c index 36fa6ef0581b..097a709549d6 100644 --- a/drivers/usb/gadget/function/f_uac2.c +++ b/drivers/usb/gadget/function/f_uac2.c @@ -203,7 +203,7 @@ static struct uac2_input_terminal_descriptor io_in_it_desc = { .bDescriptorSubtype = UAC_INPUT_TERMINAL, /* .bTerminalID = DYNAMIC */ - .wTerminalType = cpu_to_le16(UAC_INPUT_TERMINAL_UNDEFINED), + .wTerminalType = cpu_to_le16(UAC_INPUT_TERMINAL_MICROPHONE), .bAssocTerminal = 0, /* .bCSourceID = DYNAMIC */ .iChannelNames = 0, @@ -231,7 +231,7 @@ static struct uac2_output_terminal_descriptor io_out_ot_desc = { .bDescriptorSubtype = UAC_OUTPUT_TERMINAL, /* .bTerminalID = DYNAMIC */ - .wTerminalType = cpu_to_le16(UAC_OUTPUT_TERMINAL_UNDEFINED), + .wTerminalType = cpu_to_le16(UAC_OUTPUT_TERMINAL_SPEAKER), .bAssocTerminal = 0, /* .bSourceID = DYNAMIC */ /* .bCSourceID = DYNAMIC */ -- cgit From c816b2e65b0e86b95011418cad334f0524fc33b8 Mon Sep 17 00:00:00 2001 From: "TATSUKAWA KOSUKE (立川 江介)" Date: Wed, 26 Jan 2022 23:35:02 +0000 Subject: n_tty: wake up poll(POLLRDNORM) on receiving data The poll man page says POLLRDNORM is equivalent to POLLIN when used as an event. $ man poll POLLRDNORM Equivalent to POLLIN. However, in n_tty driver, POLLRDNORM does not return until timeout even if there is terminal input, whereas POLLIN returns. The following test program works until kernel-3.17, but the test stops in poll() after commit 57087d515441 ("tty: Fix spurious poll() wakeups"). [Steps to run test program] $ cc -o test-pollrdnorm test-pollrdnorm.c $ ./test-pollrdnorm foo <-- Type in something from the terminal followed by [RET]. The string should be echoed back. ------------------------< test-pollrdnorm.c >------------------------ #include #include #include #include void main(void) { int n; unsigned char buf[8]; struct pollfd fds[1] = {{ 0, POLLRDNORM, 0 }}; n = poll(fds, 1, -1); if (n < 0) perror("poll"); n = read(0, buf, 8); if (n < 0) perror("read"); if (n > 0) write(1, buf, n); } ------------------------------------------------------------------------ The attached patch fixes this problem. Many calls to wake_up_interruptible_poll() in the kernel source code already specify "POLLIN | POLLRDNORM". Fixes: 57087d515441 ("tty: Fix spurious poll() wakeups") Cc: stable@vger.kernel.org Signed-off-by: Kosuke Tatsukawa Link: https://lore.kernel.org/r/TYCPR01MB81901C0F932203D30E452B3EA5209@TYCPR01MB8190.jpnprd01.prod.outlook.com Signed-off-by: Greg Kroah-Hartman --- drivers/tty/n_tty.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index 8933ef1f83c0..5e988e514653 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -1329,7 +1329,7 @@ handle_newline: put_tty_queue(c, ldata); smp_store_release(&ldata->canon_head, ldata->read_head); kill_fasync(&tty->fasync, SIGIO, POLL_IN); - wake_up_interruptible_poll(&tty->read_wait, EPOLLIN); + wake_up_interruptible_poll(&tty->read_wait, EPOLLIN | EPOLLRDNORM); return; } } @@ -1561,7 +1561,7 @@ static void __receive_buf(struct tty_struct *tty, const unsigned char *cp, if (read_cnt(ldata)) { kill_fasync(&tty->fasync, SIGIO, POLL_IN); - wake_up_interruptible_poll(&tty->read_wait, EPOLLIN); + wake_up_interruptible_poll(&tty->read_wait, EPOLLIN | EPOLLRDNORM); } } -- cgit From f6c6804c43fa18d3cee64b55490dfbd3bef1363a Mon Sep 17 00:00:00 2001 From: Janosch Frank Date: Fri, 28 Jan 2022 15:40:25 +0000 Subject: kvm: Move KVM_GET_XSAVE2 IOCTL definition at the end of kvm.h This way we can more easily find the next free IOCTL number when adding new IOCTLs. Fixes: be50b2065dfa ("kvm: x86: Add support for getting/setting expanded xstate buffer") Signed-off-by: Janosch Frank Message-Id: <20220128154025.102666-1-frankja@linux.ibm.com> Signed-off-by: Paolo Bonzini --- include/uapi/linux/kvm.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h index b46bcdb0cab1..5191b57e1562 100644 --- a/include/uapi/linux/kvm.h +++ b/include/uapi/linux/kvm.h @@ -1624,9 +1624,6 @@ struct kvm_enc_region { #define KVM_S390_NORMAL_RESET _IO(KVMIO, 0xc3) #define KVM_S390_CLEAR_RESET _IO(KVMIO, 0xc4) -/* Available with KVM_CAP_XSAVE2 */ -#define KVM_GET_XSAVE2 _IOR(KVMIO, 0xcf, struct kvm_xsave) - struct kvm_s390_pv_sec_parm { __u64 origin; __u64 length; @@ -2048,4 +2045,7 @@ struct kvm_stats_desc { #define KVM_GET_STATS_FD _IO(KVMIO, 0xce) +/* Available with KVM_CAP_XSAVE2 */ +#define KVM_GET_XSAVE2 _IOR(KVMIO, 0xcf, struct kvm_xsave) + #endif /* __LINUX_KVM_H */ -- cgit From bd2db32e7c3e35bd4d9b8bbff689434a50893546 Mon Sep 17 00:00:00 2001 From: Greg Kroah-Hartman Date: Thu, 27 Jan 2022 08:16:38 +0100 Subject: moxart: fix potential use-after-free on remove path It was reported that the mmc host structure could be accessed after it was freed in moxart_remove(), so fix this by saving the base register of the device and using it instead of the pointer dereference. Cc: Ulf Hansson Cc: Xiyu Yang Cc: Xin Xiong Cc: Xin Tan Cc: Tony Lindgren Cc: Yang Li Cc: linux-mmc@vger.kernel.org Cc: stable Reported-by: whitehat002 Signed-off-by: Greg Kroah-Hartman Link: https://lore.kernel.org/r/20220127071638.4057899-1-gregkh@linuxfoundation.org Signed-off-by: Ulf Hansson --- drivers/mmc/host/moxart-mmc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mmc/host/moxart-mmc.c b/drivers/mmc/host/moxart-mmc.c index 16d1c7a43d33..b6eb75f4bbfc 100644 --- a/drivers/mmc/host/moxart-mmc.c +++ b/drivers/mmc/host/moxart-mmc.c @@ -705,12 +705,12 @@ static int moxart_remove(struct platform_device *pdev) if (!IS_ERR_OR_NULL(host->dma_chan_rx)) dma_release_channel(host->dma_chan_rx); mmc_remove_host(mmc); - mmc_free_host(mmc); writel(0, host->base + REG_INTERRUPT_MASK); writel(0, host->base + REG_POWER_CONTROL); writel(readl(host->base + REG_CLOCK_CONTROL) | CLK_OFF, host->base + REG_CLOCK_CONTROL); + mmc_free_host(mmc); return 0; } -- cgit From 2d192fc4c1abeb0d04d1c8cd54405ff4a0b0255b Mon Sep 17 00:00:00 2001 From: Qu Wenruo Date: Thu, 16 Dec 2021 19:47:35 +0800 Subject: btrfs: don't start transaction for scrub if the fs is mounted read-only [BUG] The following super simple script would crash btrfs at unmount time, if CONFIG_BTRFS_ASSERT() is set. mkfs.btrfs -f $dev mount $dev $mnt xfs_io -f -c "pwrite 0 4k" $mnt/file umount $mnt mount -r ro $dev $mnt btrfs scrub start -Br $mnt umount $mnt This will trigger the following ASSERT() introduced by commit 0a31daa4b602 ("btrfs: add assertion for empty list of transactions at late stage of umount"). That patch is definitely not the cause, it just makes enough noise for developers. [CAUSE] We will start transaction for the following call chain during scrub: scrub_enumerate_chunks() |- btrfs_inc_block_group_ro() |- btrfs_join_transaction() However for RO mount, there is no running transaction at all, thus btrfs_join_transaction() will start a new transaction. Furthermore, since it's read-only mount, btrfs_sync_fs() will not call btrfs_commit_super() to commit the new but empty transaction. And leads to the ASSERT(). The bug has been there for a long time. Only the new ASSERT() makes it noisy enough to be noticed. [FIX] For read-only scrub on read-only mount, there is no need to start a transaction nor to allocate new chunks in btrfs_inc_block_group_ro(). Just do extra read-only mount check in btrfs_inc_block_group_ro(), and if it's read-only, skip all chunk allocation and go inc_block_group_ro() directly. CC: stable@vger.kernel.org # 5.4+ Signed-off-by: Qu Wenruo Signed-off-by: David Sterba --- fs/btrfs/block-group.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 1db24e6d6d90..68feabc83a27 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -2544,6 +2544,19 @@ int btrfs_inc_block_group_ro(struct btrfs_block_group *cache, int ret; bool dirty_bg_running; + /* + * This can only happen when we are doing read-only scrub on read-only + * mount. + * In that case we should not start a new transaction on read-only fs. + * Thus here we skip all chunk allocations. + */ + if (sb_rdonly(fs_info->sb)) { + mutex_lock(&fs_info->ro_block_group_mutex); + ret = inc_block_group_ro(cache, 0); + mutex_unlock(&fs_info->ro_block_group_mutex); + return ret; + } + do { trans = btrfs_join_transaction(root); if (IS_ERR(trans)) -- cgit From e804861bd4e69cc5fe1053eedcb024982dde8e48 Mon Sep 17 00:00:00 2001 From: Shin'ichiro Kawasaki Date: Thu, 20 Jan 2022 20:09:16 +0900 Subject: btrfs: fix deadlock between quota disable and qgroup rescan worker Quota disable ioctl starts a transaction before waiting for the qgroup rescan worker completes. However, this wait can be infinite and results in deadlock because of circular dependency among the quota disable ioctl, the qgroup rescan worker and the other task with transaction such as block group relocation task. The deadlock happens with the steps following: 1) Task A calls ioctl to disable quota. It starts a transaction and waits for qgroup rescan worker completes. 2) Task B such as block group relocation task starts a transaction and joins to the transaction that task A started. Then task B commits to the transaction. In this commit, task B waits for a commit by task A. 3) Task C as the qgroup rescan worker starts its job and starts a transaction. In this transaction start, task C waits for completion of the transaction that task A started and task B committed. This deadlock was found with fstests test case btrfs/115 and a zoned null_blk device. The test case enables and disables quota, and the block group reclaim was triggered during the quota disable by chance. The deadlock was also observed by running quota enable and disable in parallel with 'btrfs balance' command on regular null_blk devices. An example report of the deadlock: [372.469894] INFO: task kworker/u16:6:103 blocked for more than 122 seconds. [372.479944] Not tainted 5.16.0-rc8 #7 [372.485067] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. [372.493898] task:kworker/u16:6 state:D stack: 0 pid: 103 ppid: 2 flags:0x00004000 [372.503285] Workqueue: btrfs-qgroup-rescan btrfs_work_helper [btrfs] [372.510782] Call Trace: [372.514092] [372.521684] __schedule+0xb56/0x4850 [372.530104] ? io_schedule_timeout+0x190/0x190 [372.538842] ? lockdep_hardirqs_on+0x7e/0x100 [372.547092] ? _raw_spin_unlock_irqrestore+0x3e/0x60 [372.555591] schedule+0xe0/0x270 [372.561894] btrfs_commit_transaction+0x18bb/0x2610 [btrfs] [372.570506] ? btrfs_apply_pending_changes+0x50/0x50 [btrfs] [372.578875] ? free_unref_page+0x3f2/0x650 [372.585484] ? finish_wait+0x270/0x270 [372.591594] ? release_extent_buffer+0x224/0x420 [btrfs] [372.599264] btrfs_qgroup_rescan_worker+0xc13/0x10c0 [btrfs] [372.607157] ? lock_release+0x3a9/0x6d0 [372.613054] ? btrfs_qgroup_account_extent+0xda0/0xda0 [btrfs] [372.620960] ? do_raw_spin_lock+0x11e/0x250 [372.627137] ? rwlock_bug.part.0+0x90/0x90 [372.633215] ? lock_is_held_type+0xe4/0x140 [372.639404] btrfs_work_helper+0x1ae/0xa90 [btrfs] [372.646268] process_one_work+0x7e9/0x1320 [372.652321] ? lock_release+0x6d0/0x6d0 [372.658081] ? pwq_dec_nr_in_flight+0x230/0x230 [372.664513] ? rwlock_bug.part.0+0x90/0x90 [372.670529] worker_thread+0x59e/0xf90 [372.676172] ? process_one_work+0x1320/0x1320 [372.682440] kthread+0x3b9/0x490 [372.687550] ? _raw_spin_unlock_irq+0x24/0x50 [372.693811] ? set_kthread_struct+0x100/0x100 [372.700052] ret_from_fork+0x22/0x30 [372.705517] [372.709747] INFO: task btrfs-transacti:2347 blocked for more than 123 seconds. [372.729827] Not tainted 5.16.0-rc8 #7 [372.745907] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. [372.767106] task:btrfs-transacti state:D stack: 0 pid: 2347 ppid: 2 flags:0x00004000 [372.787776] Call Trace: [372.801652] [372.812961] __schedule+0xb56/0x4850 [372.830011] ? io_schedule_timeout+0x190/0x190 [372.852547] ? lockdep_hardirqs_on+0x7e/0x100 [372.871761] ? _raw_spin_unlock_irqrestore+0x3e/0x60 [372.886792] schedule+0xe0/0x270 [372.901685] wait_current_trans+0x22c/0x310 [btrfs] [372.919743] ? btrfs_put_transaction+0x3d0/0x3d0 [btrfs] [372.938923] ? finish_wait+0x270/0x270 [372.959085] ? join_transaction+0xc75/0xe30 [btrfs] [372.977706] start_transaction+0x938/0x10a0 [btrfs] [372.997168] transaction_kthread+0x19d/0x3c0 [btrfs] [373.013021] ? btrfs_cleanup_transaction.isra.0+0xfc0/0xfc0 [btrfs] [373.031678] kthread+0x3b9/0x490 [373.047420] ? _raw_spin_unlock_irq+0x24/0x50 [373.064645] ? set_kthread_struct+0x100/0x100 [373.078571] ret_from_fork+0x22/0x30 [373.091197] [373.105611] INFO: task btrfs:3145 blocked for more than 123 seconds. [373.114147] Not tainted 5.16.0-rc8 #7 [373.120401] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. [373.130393] task:btrfs state:D stack: 0 pid: 3145 ppid: 3141 flags:0x00004000 [373.140998] Call Trace: [373.145501] [373.149654] __schedule+0xb56/0x4850 [373.155306] ? io_schedule_timeout+0x190/0x190 [373.161965] ? lockdep_hardirqs_on+0x7e/0x100 [373.168469] ? _raw_spin_unlock_irqrestore+0x3e/0x60 [373.175468] schedule+0xe0/0x270 [373.180814] wait_for_commit+0x104/0x150 [btrfs] [373.187643] ? test_and_set_bit+0x20/0x20 [btrfs] [373.194772] ? kmem_cache_free+0x124/0x550 [373.201191] ? btrfs_put_transaction+0x69/0x3d0 [btrfs] [373.208738] ? finish_wait+0x270/0x270 [373.214704] ? __btrfs_end_transaction+0x347/0x7b0 [btrfs] [373.222342] btrfs_commit_transaction+0x44d/0x2610 [btrfs] [373.230233] ? join_transaction+0x255/0xe30 [btrfs] [373.237334] ? btrfs_record_root_in_trans+0x4d/0x170 [btrfs] [373.245251] ? btrfs_apply_pending_changes+0x50/0x50 [btrfs] [373.253296] relocate_block_group+0x105/0xc20 [btrfs] [373.260533] ? mutex_lock_io_nested+0x1270/0x1270 [373.267516] ? btrfs_wait_nocow_writers+0x85/0x180 [btrfs] [373.275155] ? merge_reloc_roots+0x710/0x710 [btrfs] [373.283602] ? btrfs_wait_ordered_extents+0xd30/0xd30 [btrfs] [373.291934] ? kmem_cache_free+0x124/0x550 [373.298180] btrfs_relocate_block_group+0x35c/0x930 [btrfs] [373.306047] btrfs_relocate_chunk+0x85/0x210 [btrfs] [373.313229] btrfs_balance+0x12f4/0x2d20 [btrfs] [373.320227] ? lock_release+0x3a9/0x6d0 [373.326206] ? btrfs_relocate_chunk+0x210/0x210 [btrfs] [373.333591] ? lock_is_held_type+0xe4/0x140 [373.340031] ? rcu_read_lock_sched_held+0x3f/0x70 [373.346910] btrfs_ioctl_balance+0x548/0x700 [btrfs] [373.354207] btrfs_ioctl+0x7f2/0x71b0 [btrfs] [373.360774] ? lockdep_hardirqs_on_prepare+0x410/0x410 [373.367957] ? lockdep_hardirqs_on_prepare+0x410/0x410 [373.375327] ? btrfs_ioctl_get_supported_features+0x20/0x20 [btrfs] [373.383841] ? find_held_lock+0x2c/0x110 [373.389993] ? lock_release+0x3a9/0x6d0 [373.395828] ? mntput_no_expire+0xf7/0xad0 [373.402083] ? lock_is_held_type+0xe4/0x140 [373.408249] ? vfs_fileattr_set+0x9f0/0x9f0 [373.414486] ? selinux_file_ioctl+0x349/0x4e0 [373.420938] ? trace_raw_output_lock+0xb4/0xe0 [373.427442] ? selinux_inode_getsecctx+0x80/0x80 [373.434224] ? lockdep_hardirqs_on+0x7e/0x100 [373.440660] ? force_qs_rnp+0x2a0/0x6b0 [373.446534] ? lock_is_held_type+0x9b/0x140 [373.452763] ? __blkcg_punt_bio_submit+0x1b0/0x1b0 [373.459732] ? security_file_ioctl+0x50/0x90 [373.466089] __x64_sys_ioctl+0x127/0x190 [373.472022] do_syscall_64+0x3b/0x90 [373.477513] entry_SYSCALL_64_after_hwframe+0x44/0xae [373.484823] RIP: 0033:0x7f8f4af7e2bb [373.490493] RSP: 002b:00007ffcbf936178 EFLAGS: 00000246 ORIG_RAX: 0000000000000010 [373.500197] RAX: ffffffffffffffda RBX: 0000000000000003 RCX: 00007f8f4af7e2bb [373.509451] RDX: 00007ffcbf936220 RSI: 00000000c4009420 RDI: 0000000000000003 [373.518659] RBP: 00007ffcbf93774a R08: 0000000000000013 R09: 00007f8f4b02d4e0 [373.527872] R10: 00007f8f4ae87740 R11: 0000000000000246 R12: 0000000000000001 [373.537222] R13: 00007ffcbf936220 R14: 0000000000000000 R15: 0000000000000002 [373.546506] [373.550878] INFO: task btrfs:3146 blocked for more than 123 seconds. [373.559383] Not tainted 5.16.0-rc8 #7 [373.565748] "echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message. [373.575748] task:btrfs state:D stack: 0 pid: 3146 ppid: 2168 flags:0x00000000 [373.586314] Call Trace: [373.590846] [373.595121] __schedule+0xb56/0x4850 [373.600901] ? __lock_acquire+0x23db/0x5030 [373.607176] ? io_schedule_timeout+0x190/0x190 [373.613954] schedule+0xe0/0x270 [373.619157] schedule_timeout+0x168/0x220 [373.625170] ? usleep_range_state+0x150/0x150 [373.631653] ? mark_held_locks+0x9e/0xe0 [373.637767] ? do_raw_spin_lock+0x11e/0x250 [373.643993] ? lockdep_hardirqs_on_prepare+0x17b/0x410 [373.651267] ? _raw_spin_unlock_irq+0x24/0x50 [373.657677] ? lockdep_hardirqs_on+0x7e/0x100 [373.664103] wait_for_completion+0x163/0x250 [373.670437] ? bit_wait_timeout+0x160/0x160 [373.676585] btrfs_quota_disable+0x176/0x9a0 [btrfs] [373.683979] ? btrfs_quota_enable+0x12f0/0x12f0 [btrfs] [373.691340] ? down_write+0xd0/0x130 [373.696880] ? down_write_killable+0x150/0x150 [373.703352] btrfs_ioctl+0x3945/0x71b0 [btrfs] [373.710061] ? find_held_lock+0x2c/0x110 [373.716192] ? lock_release+0x3a9/0x6d0 [373.722047] ? __handle_mm_fault+0x23cd/0x3050 [373.728486] ? btrfs_ioctl_get_supported_features+0x20/0x20 [btrfs] [373.737032] ? set_pte+0x6a/0x90 [373.742271] ? do_raw_spin_unlock+0x55/0x1f0 [373.748506] ? lock_is_held_type+0xe4/0x140 [373.754792] ? vfs_fileattr_set+0x9f0/0x9f0 [373.761083] ? selinux_file_ioctl+0x349/0x4e0 [373.767521] ? selinux_inode_getsecctx+0x80/0x80 [373.774247] ? __up_read+0x182/0x6e0 [373.780026] ? count_memcg_events.constprop.0+0x46/0x60 [373.787281] ? up_write+0x460/0x460 [373.792932] ? security_file_ioctl+0x50/0x90 [373.799232] __x64_sys_ioctl+0x127/0x190 [373.805237] do_syscall_64+0x3b/0x90 [373.810947] entry_SYSCALL_64_after_hwframe+0x44/0xae [373.818102] RIP: 0033:0x7f1383ea02bb [373.823847] RSP: 002b:00007fffeb4d71f8 EFLAGS: 00000202 ORIG_RAX: 0000000000000010 [373.833641] RAX: ffffffffffffffda RBX: 0000000000000000 RCX: 00007f1383ea02bb [373.842961] RDX: 00007fffeb4d7210 RSI: 00000000c0109428 RDI: 0000000000000003 [373.852179] RBP: 0000000000000003 R08: 0000000000000003 R09: 0000000000000078 [373.861408] R10: 00007f1383daec78 R11: 0000000000000202 R12: 00007fffeb4d874a [373.870647] R13: 0000000000493099 R14: 0000000000000001 R15: 0000000000000000 [373.879838] [373.884018] Showing all locks held in the system: [373.894250] 3 locks held by kworker/4:1/58: [373.900356] 1 lock held by khungtaskd/63: [373.906333] #0: ffffffff8945ff60 (rcu_read_lock){....}-{1:2}, at: debug_show_all_locks+0x53/0x260 [373.917307] 3 locks held by kworker/u16:6/103: [373.923938] #0: ffff888127b4f138 ((wq_completion)btrfs-qgroup-rescan){+.+.}-{0:0}, at: process_one_work+0x712/0x1320 [373.936555] #1: ffff88810b817dd8 ((work_completion)(&work->normal_work)){+.+.}-{0:0}, at: process_one_work+0x73f/0x1320 [373.951109] #2: ffff888102dd4650 (sb_internal#2){.+.+}-{0:0}, at: btrfs_qgroup_rescan_worker+0x1f6/0x10c0 [btrfs] [373.964027] 2 locks held by less/1803: [373.969982] #0: ffff88813ed56098 (&tty->ldisc_sem){++++}-{0:0}, at: tty_ldisc_ref_wait+0x24/0x80 [373.981295] #1: ffffc90000b3b2e8 (&ldata->atomic_read_lock){+.+.}-{3:3}, at: n_tty_read+0x9e2/0x1060 [373.992969] 1 lock held by btrfs-transacti/2347: [373.999893] #0: ffff88813d4887a8 (&fs_info->transaction_kthread_mutex){+.+.}-{3:3}, at: transaction_kthread+0xe3/0x3c0 [btrfs] [374.015872] 3 locks held by btrfs/3145: [374.022298] #0: ffff888102dd4460 (sb_writers#18){.+.+}-{0:0}, at: btrfs_ioctl_balance+0xc3/0x700 [btrfs] [374.034456] #1: ffff88813d48a0a0 (&fs_info->reclaim_bgs_lock){+.+.}-{3:3}, at: btrfs_balance+0xfe5/0x2d20 [btrfs] [374.047646] #2: ffff88813d488838 (&fs_info->cleaner_mutex){+.+.}-{3:3}, at: btrfs_relocate_block_group+0x354/0x930 [btrfs] [374.063295] 4 locks held by btrfs/3146: [374.069647] #0: ffff888102dd4460 (sb_writers#18){.+.+}-{0:0}, at: btrfs_ioctl+0x38b1/0x71b0 [btrfs] [374.081601] #1: ffff88813d488bb8 (&fs_info->subvol_sem){+.+.}-{3:3}, at: btrfs_ioctl+0x38fd/0x71b0 [btrfs] [374.094283] #2: ffff888102dd4650 (sb_internal#2){.+.+}-{0:0}, at: btrfs_quota_disable+0xc8/0x9a0 [btrfs] [374.106885] #3: ffff88813d489800 (&fs_info->qgroup_ioctl_lock){+.+.}-{3:3}, at: btrfs_quota_disable+0xd5/0x9a0 [btrfs] [374.126780] ============================================= To avoid the deadlock, wait for the qgroup rescan worker to complete before starting the transaction for the quota disable ioctl. Clear BTRFS_FS_QUOTA_ENABLE flag before the wait and the transaction to request the worker to complete. On transaction start failure, set the BTRFS_FS_QUOTA_ENABLE flag again. These BTRFS_FS_QUOTA_ENABLE flag changes can be done safely since the function btrfs_quota_disable is not called concurrently because of fs_info->subvol_sem. Also check the BTRFS_FS_QUOTA_ENABLE flag in qgroup_rescan_init to avoid another qgroup rescan worker to start after the previous qgroup worker completed. CC: stable@vger.kernel.org # 5.4+ Suggested-by: Nikolay Borisov Reviewed-by: Filipe Manana Signed-off-by: Shin'ichiro Kawasaki Signed-off-by: David Sterba --- fs/btrfs/qgroup.c | 21 +++++++++++++++++++-- 1 file changed, 19 insertions(+), 2 deletions(-) diff --git a/fs/btrfs/qgroup.c b/fs/btrfs/qgroup.c index 8928275823a1..f12dc687350c 100644 --- a/fs/btrfs/qgroup.c +++ b/fs/btrfs/qgroup.c @@ -1185,9 +1185,24 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info) struct btrfs_trans_handle *trans = NULL; int ret = 0; + /* + * We need to have subvol_sem write locked, to prevent races between + * concurrent tasks trying to disable quotas, because we will unlock + * and relock qgroup_ioctl_lock across BTRFS_FS_QUOTA_ENABLED changes. + */ + lockdep_assert_held_write(&fs_info->subvol_sem); + mutex_lock(&fs_info->qgroup_ioctl_lock); if (!fs_info->quota_root) goto out; + + /* + * Request qgroup rescan worker to complete and wait for it. This wait + * must be done before transaction start for quota disable since it may + * deadlock with transaction by the qgroup rescan worker. + */ + clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); + btrfs_qgroup_wait_for_completion(fs_info, false); mutex_unlock(&fs_info->qgroup_ioctl_lock); /* @@ -1205,14 +1220,13 @@ int btrfs_quota_disable(struct btrfs_fs_info *fs_info) if (IS_ERR(trans)) { ret = PTR_ERR(trans); trans = NULL; + set_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); goto out; } if (!fs_info->quota_root) goto out; - clear_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags); - btrfs_qgroup_wait_for_completion(fs_info, false); spin_lock(&fs_info->qgroup_lock); quota_root = fs_info->quota_root; fs_info->quota_root = NULL; @@ -3383,6 +3397,9 @@ qgroup_rescan_init(struct btrfs_fs_info *fs_info, u64 progress_objectid, btrfs_warn(fs_info, "qgroup rescan init failed, qgroup is not enabled"); ret = -EINVAL; + } else if (!test_bit(BTRFS_FS_QUOTA_ENABLED, &fs_info->flags)) { + /* Quota disable is in progress */ + ret = -EBUSY; } if (ret) { -- cgit From 0c982944af27d131d3b74242f3528169f66950ad Mon Sep 17 00:00:00 2001 From: Su Yue Date: Fri, 21 Jan 2022 17:33:34 +0800 Subject: btrfs: tree-checker: check item_size for inode_item while mounting the crafted image, out-of-bounds access happens: [350.429619] UBSAN: array-index-out-of-bounds in fs/btrfs/struct-funcs.c:161:1 [350.429636] index 1048096 is out of range for type 'page *[16]' [350.429650] CPU: 0 PID: 9 Comm: kworker/u8:1 Not tainted 5.16.0-rc4 #1 [350.429652] Hardware name: QEMU Standard PC (Q35 + ICH9, 2009), BIOS 1.13.0-1ubuntu1.1 04/01/2014 [350.429653] Workqueue: btrfs-endio-meta btrfs_work_helper [btrfs] [350.429772] Call Trace: [350.429774] [350.429776] dump_stack_lvl+0x47/0x5c [350.429780] ubsan_epilogue+0x5/0x50 [350.429786] __ubsan_handle_out_of_bounds+0x66/0x70 [350.429791] btrfs_get_16+0xfd/0x120 [btrfs] [350.429832] check_leaf+0x754/0x1a40 [btrfs] [350.429874] ? filemap_read+0x34a/0x390 [350.429878] ? load_balance+0x175/0xfc0 [350.429881] validate_extent_buffer+0x244/0x310 [btrfs] [350.429911] btrfs_validate_metadata_buffer+0xf8/0x100 [btrfs] [350.429935] end_bio_extent_readpage+0x3af/0x850 [btrfs] [350.429969] ? newidle_balance+0x259/0x480 [350.429972] end_workqueue_fn+0x29/0x40 [btrfs] [350.429995] btrfs_work_helper+0x71/0x330 [btrfs] [350.430030] ? __schedule+0x2fb/0xa40 [350.430033] process_one_work+0x1f6/0x400 [350.430035] ? process_one_work+0x400/0x400 [350.430036] worker_thread+0x2d/0x3d0 [350.430037] ? process_one_work+0x400/0x400 [350.430038] kthread+0x165/0x190 [350.430041] ? set_kthread_struct+0x40/0x40 [350.430043] ret_from_fork+0x1f/0x30 [350.430047] [350.430077] BTRFS warning (device loop0): bad eb member start: ptr 0xffe20f4e start 20975616 member offset 4293005178 size 2 check_leaf() is checking the leaf: corrupt leaf: root=4 block=29396992 slot=1, bad key order, prev (16140901064495857664 1 0) current (1 204 12582912) leaf 29396992 items 6 free space 3565 generation 6 owner DEV_TREE leaf 29396992 flags 0x1(WRITTEN) backref revision 1 fs uuid a62e00e8-e94e-4200-8217-12444de93c2e chunk uuid cecbd0f7-9ca0-441e-ae9f-f782f9732bd8 item 0 key (16140901064495857664 INODE_ITEM 0) itemoff 3955 itemsize 40 generation 0 transid 0 size 0 nbytes 17592186044416 block group 0 mode 52667 links 33 uid 0 gid 2104132511 rdev 94223634821136 sequence 100305 flags 0x2409000(none) atime 0.0 (1970-01-01 08:00:00) ctime 2973280098083405823.4294967295 (-269783007-01-01 21:37:03) mtime 18446744071572723616.4026825121 (1902-04-16 12:40:00) otime 9249929404488876031.4294967295 (622322949-04-16 04:25:58) item 1 key (1 DEV_EXTENT 12582912) itemoff 3907 itemsize 48 dev extent chunk_tree 3 chunk_objectid 256 chunk_offset 12582912 length 8388608 chunk_tree_uuid cecbd0f7-9ca0-441e-ae9f-f782f9732bd8 The corrupted leaf of device tree has an inode item. The leaf passed checksum and others checks in validate_extent_buffer until check_leaf_item(). Because of the key type BTRFS_INODE_ITEM, check_inode_item() is called even we are in the device tree. Since the item offset + sizeof(struct btrfs_inode_item) > eb->len, out-of-bounds access is triggered. The item end vs leaf boundary check has been done before check_leaf_item(), so fix it by checking item size in check_inode_item() before access of the inode item in extent buffer. Other check functions except check_dev_item() in check_leaf_item() have their item size checks. The commit for check_dev_item() is followed. No regression observed during running fstests. Bugzilla: https://bugzilla.kernel.org/show_bug.cgi?id=215299 CC: stable@vger.kernel.org # 5.10+ CC: Wenqing Liu Signed-off-by: Su Yue Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/tree-checker.c | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c index 72e1c942197d..a819eb5e264a 100644 --- a/fs/btrfs/tree-checker.c +++ b/fs/btrfs/tree-checker.c @@ -1007,6 +1007,7 @@ static int check_inode_item(struct extent_buffer *leaf, struct btrfs_inode_item *iitem; u64 super_gen = btrfs_super_generation(fs_info->super_copy); u32 valid_mask = (S_IFMT | S_ISUID | S_ISGID | S_ISVTX | 0777); + const u32 item_size = btrfs_item_size(leaf, slot); u32 mode; int ret; u32 flags; @@ -1016,6 +1017,12 @@ static int check_inode_item(struct extent_buffer *leaf, if (unlikely(ret < 0)) return ret; + if (unlikely(item_size != sizeof(*iitem))) { + generic_err(leaf, slot, "invalid item size: has %u expect %zu", + item_size, sizeof(*iitem)); + return -EUCLEAN; + } + iitem = btrfs_item_ptr(leaf, slot, struct btrfs_inode_item); /* Here we use super block generation + 1 to handle log tree */ -- cgit From ea1d1ca4025ac6c075709f549f9aa036b5b6597d Mon Sep 17 00:00:00 2001 From: Su Yue Date: Fri, 21 Jan 2022 17:33:35 +0800 Subject: btrfs: tree-checker: check item_size for dev_item Check item size before accessing the device item to avoid out of bound access, similar to inode_item check. Signed-off-by: Su Yue Reviewed-by: David Sterba Signed-off-by: David Sterba --- fs/btrfs/tree-checker.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/fs/btrfs/tree-checker.c b/fs/btrfs/tree-checker.c index a819eb5e264a..9fd145f1c4bc 100644 --- a/fs/btrfs/tree-checker.c +++ b/fs/btrfs/tree-checker.c @@ -965,6 +965,7 @@ static int check_dev_item(struct extent_buffer *leaf, struct btrfs_key *key, int slot) { struct btrfs_dev_item *ditem; + const u32 item_size = btrfs_item_size(leaf, slot); if (unlikely(key->objectid != BTRFS_DEV_ITEMS_OBJECTID)) { dev_item_err(leaf, slot, @@ -972,6 +973,13 @@ static int check_dev_item(struct extent_buffer *leaf, key->objectid, BTRFS_DEV_ITEMS_OBJECTID); return -EUCLEAN; } + + if (unlikely(item_size != sizeof(*ditem))) { + dev_item_err(leaf, slot, "invalid item size: has %u expect %zu", + item_size, sizeof(*ditem)); + return -EUCLEAN; + } + ditem = btrfs_item_ptr(leaf, slot, struct btrfs_dev_item); if (unlikely(btrfs_device_id(leaf, ditem) != key->offset)) { dev_item_err(leaf, slot, -- cgit From 28b21c558a3753171097193b6f6602a94169093a Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Fri, 21 Jan 2022 15:44:39 +0000 Subject: btrfs: fix use-after-free after failure to create a snapshot At ioctl.c:create_snapshot(), we allocate a pending snapshot structure and then attach it to the transaction's list of pending snapshots. After that we call btrfs_commit_transaction(), and if that returns an error we jump to 'fail' label, where we kfree() the pending snapshot structure. This can result in a later use-after-free of the pending snapshot: 1) We allocated the pending snapshot and added it to the transaction's list of pending snapshots; 2) We call btrfs_commit_transaction(), and it fails either at the first call to btrfs_run_delayed_refs() or btrfs_start_dirty_block_groups(). In both cases, we don't abort the transaction and we release our transaction handle. We jump to the 'fail' label and free the pending snapshot structure. We return with the pending snapshot still in the transaction's list; 3) Another task commits the transaction. This time there's no error at all, and then during the transaction commit it accesses a pointer to the pending snapshot structure that the snapshot creation task has already freed, resulting in a user-after-free. This issue could actually be detected by smatch, which produced the following warning: fs/btrfs/ioctl.c:843 create_snapshot() warn: '&pending_snapshot->list' not removed from list So fix this by not having the snapshot creation ioctl directly add the pending snapshot to the transaction's list. Instead add the pending snapshot to the transaction handle, and then at btrfs_commit_transaction() we add the snapshot to the list only when we can guarantee that any error returned after that point will result in a transaction abort, in which case the ioctl code can safely free the pending snapshot and no one can access it anymore. CC: stable@vger.kernel.org # 5.10+ Signed-off-by: Filipe Manana Signed-off-by: David Sterba --- fs/btrfs/ioctl.c | 5 +---- fs/btrfs/transaction.c | 24 ++++++++++++++++++++++++ fs/btrfs/transaction.h | 2 ++ 3 files changed, 27 insertions(+), 4 deletions(-) diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index eef5b300b9a9..90c11ddff6e5 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -805,10 +805,7 @@ static int create_snapshot(struct btrfs_root *root, struct inode *dir, goto fail; } - spin_lock(&fs_info->trans_lock); - list_add(&pending_snapshot->list, - &trans->transaction->pending_snapshots); - spin_unlock(&fs_info->trans_lock); + trans->pending_snapshot = pending_snapshot; ret = btrfs_commit_transaction(trans); if (ret) diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c index 03de89b45f27..c43bbc7f623e 100644 --- a/fs/btrfs/transaction.c +++ b/fs/btrfs/transaction.c @@ -2000,6 +2000,27 @@ static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info) btrfs_wait_ordered_roots(fs_info, U64_MAX, 0, (u64)-1); } +/* + * Add a pending snapshot associated with the given transaction handle to the + * respective handle. This must be called after the transaction commit started + * and while holding fs_info->trans_lock. + * This serves to guarantee a caller of btrfs_commit_transaction() that it can + * safely free the pending snapshot pointer in case btrfs_commit_transaction() + * returns an error. + */ +static void add_pending_snapshot(struct btrfs_trans_handle *trans) +{ + struct btrfs_transaction *cur_trans = trans->transaction; + + if (!trans->pending_snapshot) + return; + + lockdep_assert_held(&trans->fs_info->trans_lock); + ASSERT(cur_trans->state >= TRANS_STATE_COMMIT_START); + + list_add(&trans->pending_snapshot->list, &cur_trans->pending_snapshots); +} + int btrfs_commit_transaction(struct btrfs_trans_handle *trans) { struct btrfs_fs_info *fs_info = trans->fs_info; @@ -2073,6 +2094,8 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) if (cur_trans->state >= TRANS_STATE_COMMIT_START) { enum btrfs_trans_state want_state = TRANS_STATE_COMPLETED; + add_pending_snapshot(trans); + spin_unlock(&fs_info->trans_lock); refcount_inc(&cur_trans->use_count); @@ -2163,6 +2186,7 @@ int btrfs_commit_transaction(struct btrfs_trans_handle *trans) * COMMIT_DOING so make sure to wait for num_writers to == 1 again. */ spin_lock(&fs_info->trans_lock); + add_pending_snapshot(trans); cur_trans->state = TRANS_STATE_COMMIT_DOING; spin_unlock(&fs_info->trans_lock); wait_event(cur_trans->writer_wait, diff --git a/fs/btrfs/transaction.h b/fs/btrfs/transaction.h index 1852ed9de7fd..9402d8d94484 100644 --- a/fs/btrfs/transaction.h +++ b/fs/btrfs/transaction.h @@ -123,6 +123,8 @@ struct btrfs_trans_handle { struct btrfs_transaction *transaction; struct btrfs_block_rsv *block_rsv; struct btrfs_block_rsv *orig_rsv; + /* Set by a task that wants to create a snapshot. */ + struct btrfs_pending_snapshot *pending_snapshot; refcount_t use_count; unsigned int type; /* -- cgit From 37b4599547e324589e011c20f74b021d6d25cb7f Mon Sep 17 00:00:00 2001 From: Tom Rix Date: Fri, 21 Jan 2022 05:45:22 -0800 Subject: btrfs: fix use of uninitialized variable at rm device ioctl Clang static analysis reports this problem ioctl.c:3333:8: warning: 3rd function call argument is an uninitialized value ret = exclop_start_or_cancel_reloc(fs_info, cancel is only set in one branch of an if-check and is always used. So initialize to false. Fixes: 1a15eb724aae ("btrfs: use btrfs_get_dev_args_from_path in dev removal ioctls") Reviewed-by: Filipe Manana Reviewed-by: Anand Jain Signed-off-by: Tom Rix Signed-off-by: David Sterba --- fs/btrfs/ioctl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index 90c11ddff6e5..925522756e28 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -3353,7 +3353,7 @@ static long btrfs_ioctl_rm_dev(struct file *file, void __user *arg) struct block_device *bdev = NULL; fmode_t mode; int ret; - bool cancel; + bool cancel = false; if (!capable(CAP_SYS_ADMIN)) return -EPERM; -- cgit From 40cdc509877bacb438213b83c7541c5e24a1d9ec Mon Sep 17 00:00:00 2001 From: Filipe Manana Date: Tue, 18 Jan 2022 13:39:34 +0000 Subject: btrfs: skip reserved bytes warning on unmount after log cleanup failure After the recent changes made by commit c2e39305299f01 ("btrfs: clear extent buffer uptodate when we fail to write it") and its followup fix, commit 651740a5024117 ("btrfs: check WRITE_ERR when trying to read an extent buffer"), we can now end up not cleaning up space reservations of log tree extent buffers after a transaction abort happens, as well as not cleaning up still dirty extent buffers. This happens because if writeback for a log tree extent buffer failed, then we have cleared the bit EXTENT_BUFFER_UPTODATE from the extent buffer and we have also set the bit EXTENT_BUFFER_WRITE_ERR on it. Later on, when trying to free the log tree with free_log_tree(), which iterates over the tree, we can end up getting an -EIO error when trying to read a node or a leaf, since read_extent_buffer_pages() returns -EIO if an extent buffer does not have EXTENT_BUFFER_UPTODATE set and has the EXTENT_BUFFER_WRITE_ERR bit set. Getting that -EIO means that we return immediately as we can not iterate over the entire tree. In that case we never update the reserved space for an extent buffer in the respective block group and space_info object. When this happens we get the following traces when unmounting the fs: [174957.284509] BTRFS: error (device dm-0) in cleanup_transaction:1913: errno=-5 IO failure [174957.286497] BTRFS: error (device dm-0) in free_log_tree:3420: errno=-5 IO failure [174957.399379] ------------[ cut here ]------------ [174957.402497] WARNING: CPU: 2 PID: 3206883 at fs/btrfs/block-group.c:127 btrfs_put_block_group+0x77/0xb0 [btrfs] [174957.407523] Modules linked in: btrfs overlay dm_zero (...) [174957.424917] CPU: 2 PID: 3206883 Comm: umount Tainted: G W 5.16.0-rc5-btrfs-next-109 #1 [174957.426689] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.14.0-0-g155821a1990b-prebuilt.qemu.org 04/01/2014 [174957.428716] RIP: 0010:btrfs_put_block_group+0x77/0xb0 [btrfs] [174957.429717] Code: 21 48 8b bd (...) [174957.432867] RSP: 0018:ffffb70d41cffdd0 EFLAGS: 00010206 [174957.433632] RAX: 0000000000000001 RBX: ffff8b09c3848000 RCX: ffff8b0758edd1c8 [174957.434689] RDX: 0000000000000001 RSI: ffffffffc0b467e7 RDI: ffff8b0758edd000 [174957.436068] RBP: ffff8b0758edd000 R08: 0000000000000000 R09: 0000000000000000 [174957.437114] R10: 0000000000000246 R11: 0000000000000000 R12: ffff8b09c3848148 [174957.438140] R13: ffff8b09c3848198 R14: ffff8b0758edd188 R15: dead000000000100 [174957.439317] FS: 00007f328fb82800(0000) GS:ffff8b0a2d200000(0000) knlGS:0000000000000000 [174957.440402] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [174957.441164] CR2: 00007fff13563e98 CR3: 0000000404f4e005 CR4: 0000000000370ee0 [174957.442117] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [174957.443076] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [174957.443948] Call Trace: [174957.444264] [174957.444538] btrfs_free_block_groups+0x255/0x3c0 [btrfs] [174957.445238] close_ctree+0x301/0x357 [btrfs] [174957.445803] ? call_rcu+0x16c/0x290 [174957.446250] generic_shutdown_super+0x74/0x120 [174957.446832] kill_anon_super+0x14/0x30 [174957.447305] btrfs_kill_super+0x12/0x20 [btrfs] [174957.447890] deactivate_locked_super+0x31/0xa0 [174957.448440] cleanup_mnt+0x147/0x1c0 [174957.448888] task_work_run+0x5c/0xa0 [174957.449336] exit_to_user_mode_prepare+0x1e5/0x1f0 [174957.449934] syscall_exit_to_user_mode+0x16/0x40 [174957.450512] do_syscall_64+0x48/0xc0 [174957.450980] entry_SYSCALL_64_after_hwframe+0x44/0xae [174957.451605] RIP: 0033:0x7f328fdc4a97 [174957.452059] Code: 03 0c 00 f7 (...) [174957.454320] RSP: 002b:00007fff13564ec8 EFLAGS: 00000246 ORIG_RAX: 00000000000000a6 [174957.455262] RAX: 0000000000000000 RBX: 00007f328feea264 RCX: 00007f328fdc4a97 [174957.456131] RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000560b8ae51dd0 [174957.457118] RBP: 0000560b8ae51ba0 R08: 0000000000000000 R09: 00007fff13563c40 [174957.458005] R10: 00007f328fe49fc0 R11: 0000000000000246 R12: 0000000000000000 [174957.459113] R13: 0000560b8ae51dd0 R14: 0000560b8ae51cb0 R15: 0000000000000000 [174957.460193] [174957.460534] irq event stamp: 0 [174957.461003] hardirqs last enabled at (0): [<0000000000000000>] 0x0 [174957.461947] hardirqs last disabled at (0): [] copy_process+0x934/0x2040 [174957.463147] softirqs last enabled at (0): [] copy_process+0x934/0x2040 [174957.465116] softirqs last disabled at (0): [<0000000000000000>] 0x0 [174957.466323] ---[ end trace bc7ee0c490bce3af ]--- [174957.467282] ------------[ cut here ]------------ [174957.468184] WARNING: CPU: 2 PID: 3206883 at fs/btrfs/block-group.c:3976 btrfs_free_block_groups+0x330/0x3c0 [btrfs] [174957.470066] Modules linked in: btrfs overlay dm_zero (...) [174957.483137] CPU: 2 PID: 3206883 Comm: umount Tainted: G W 5.16.0-rc5-btrfs-next-109 #1 [174957.484691] Hardware name: QEMU Standard PC (i440FX + PIIX, 1996), BIOS rel-1.14.0-0-g155821a1990b-prebuilt.qemu.org 04/01/2014 [174957.486853] RIP: 0010:btrfs_free_block_groups+0x330/0x3c0 [btrfs] [174957.488050] Code: 00 00 00 ad de (...) [174957.491479] RSP: 0018:ffffb70d41cffde0 EFLAGS: 00010206 [174957.492520] RAX: ffff8b08d79310b0 RBX: ffff8b09c3848000 RCX: 0000000000000000 [174957.493868] RDX: 0000000000000001 RSI: fffff443055ee600 RDI: ffffffffb1131846 [174957.495183] RBP: ffff8b08d79310b0 R08: 0000000000000000 R09: 0000000000000000 [174957.496580] R10: 0000000000000001 R11: 0000000000000000 R12: ffff8b08d7931000 [174957.498027] R13: ffff8b09c38492b0 R14: dead000000000122 R15: dead000000000100 [174957.499438] FS: 00007f328fb82800(0000) GS:ffff8b0a2d200000(0000) knlGS:0000000000000000 [174957.500990] CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 [174957.502117] CR2: 00007fff13563e98 CR3: 0000000404f4e005 CR4: 0000000000370ee0 [174957.503513] DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 [174957.504864] DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 [174957.506167] Call Trace: [174957.506654] [174957.507047] close_ctree+0x301/0x357 [btrfs] [174957.507867] ? call_rcu+0x16c/0x290 [174957.508567] generic_shutdown_super+0x74/0x120 [174957.509447] kill_anon_super+0x14/0x30 [174957.510194] btrfs_kill_super+0x12/0x20 [btrfs] [174957.511123] deactivate_locked_super+0x31/0xa0 [174957.511976] cleanup_mnt+0x147/0x1c0 [174957.512610] task_work_run+0x5c/0xa0 [174957.513309] exit_to_user_mode_prepare+0x1e5/0x1f0 [174957.514231] syscall_exit_to_user_mode+0x16/0x40 [174957.515069] do_syscall_64+0x48/0xc0 [174957.515718] entry_SYSCALL_64_after_hwframe+0x44/0xae [174957.516688] RIP: 0033:0x7f328fdc4a97 [174957.517413] Code: 03 0c 00 f7 d8 (...) [174957.521052] RSP: 002b:00007fff13564ec8 EFLAGS: 00000246 ORIG_RAX: 00000000000000a6 [174957.522514] RAX: 0000000000000000 RBX: 00007f328feea264 RCX: 00007f328fdc4a97 [174957.523950] RDX: 0000000000000000 RSI: 0000000000000000 RDI: 0000560b8ae51dd0 [174957.525375] RBP: 0000560b8ae51ba0 R08: 0000000000000000 R09: 00007fff13563c40 [174957.526763] R10: 00007f328fe49fc0 R11: 0000000000000246 R12: 0000000000000000 [174957.528058] R13: 0000560b8ae51dd0 R14: 0000560b8ae51cb0 R15: 0000000000000000 [174957.529404] [174957.529843] irq event stamp: 0 [174957.530256] hardirqs last enabled at (0): [<0000000000000000>] 0x0 [174957.531061] hardirqs last disabled at (0): [] copy_process+0x934/0x2040 [174957.532075] softirqs last enabled at (0): [] copy_process+0x934/0x2040 [174957.533083] softirqs last disabled at (0): [<0000000000000000>] 0x0 [174957.533865] ---[ end trace bc7ee0c490bce3b0 ]--- [174957.534452] BTRFS info (device dm-0): space_info 4 has 1070841856 free, is not full [174957.535404] BTRFS info (device dm-0): space_info total=1073741824, used=2785280, pinned=0, reserved=49152, may_use=0, readonly=65536 zone_unusable=0 [174957.537029] BTRFS info (device dm-0): global_block_rsv: size 0 reserved 0 [174957.537859] BTRFS info (device dm-0): trans_block_rsv: size 0 reserved 0 [174957.538697] BTRFS info (device dm-0): chunk_block_rsv: size 0 reserved 0 [174957.539552] BTRFS info (device dm-0): delayed_block_rsv: size 0 reserved 0 [174957.540403] BTRFS info (device dm-0): delayed_refs_rsv: size 0 reserved 0 This also means that in case we have log tree extent buffers that are still dirty, we can end up not cleaning them up in case we find an extent buffer with EXTENT_BUFFER_WRITE_ERR set on it, as in that case we have no way for iterating over the rest of the tree. This issue is very often triggered with test cases generic/475 and generic/648 from fstests. The issue could almost be fixed by iterating over the io tree attached to each log root which keeps tracks of the range of allocated extent buffers, log_root->dirty_log_pages, however that does not work and has some inconveniences: 1) After we sync the log, we clear the range of the extent buffers from the io tree, so we can't find them after writeback. We could keep the ranges in the io tree, with a separate bit to signal they represent extent buffers already written, but that means we need to hold into more memory until the transaction commits. How much more memory is used depends a lot on whether we are able to allocate contiguous extent buffers on disk (and how often) for a log tree - if we are able to, then a single extent state record can represent multiple extent buffers, otherwise we need multiple extent state record structures to track each extent buffer. In fact, my earlier approach did that: https://lore.kernel.org/linux-btrfs/3aae7c6728257c7ce2279d6660ee2797e5e34bbd.1641300250.git.fdmanana@suse.com/ However that can cause a very significant negative impact on performance, not only due to the extra memory usage but also because we get a larger and deeper dirty_log_pages io tree. We got a report that, on beefy machines at least, we can get such performance drop with fsmark for example: https://lore.kernel.org/linux-btrfs/20220117082426.GE32491@xsang-OptiPlex-9020/ 2) We would be doing it only to deal with an unexpected and exceptional case, which is basically failure to read an extent buffer from disk due to IO failures. On a healthy system we don't expect transaction aborts to happen after all; 3) Instead of relying on iterating the log tree or tracking the ranges of extent buffers in the dirty_log_pages io tree, using the radix tree that tracks extent buffers (fs_info->buffer_radix) to find all log tree extent buffers is not reliable either, because after writeback of an extent buffer it can be evicted from memory by the release page callback of the btree inode (btree_releasepage()). Since there's no way to be able to properly cleanup a log tree without being able to read its extent buffers from disk and without using more memory to track the logical ranges of the allocated extent buffers do the following: 1) When we fail to cleanup a log tree, setup a flag that indicates that failure; 2) Trigger writeback of all log tree extent buffers that are still dirty, and wait for the writeback to complete. This is just to cleanup their state, page states, page leaks, etc; 3) When unmounting the fs, ignore if the number of bytes reserved in a block group and in a space_info is not 0 if, and only if, we failed to cleanup a log tree. Also ignore only for metadata block groups and the metadata space_info object. This is far from a perfect solution, but it serves to silence test failures such as those from generic/475 and generic/648. However having a non-zero value for the reserved bytes counters on unmount after a transaction abort, is not such a terrible thing and it's completely harmless, it does not affect the filesystem integrity in any way. Signed-off-by: Filipe Manana Signed-off-by: David Sterba --- fs/btrfs/block-group.c | 26 ++++++++++++++++++++++++-- fs/btrfs/ctree.h | 6 ++++++ fs/btrfs/tree-log.c | 23 +++++++++++++++++++++++ 3 files changed, 53 insertions(+), 2 deletions(-) diff --git a/fs/btrfs/block-group.c b/fs/btrfs/block-group.c index 68feabc83a27..8202ad6aa131 100644 --- a/fs/btrfs/block-group.c +++ b/fs/btrfs/block-group.c @@ -124,7 +124,16 @@ void btrfs_put_block_group(struct btrfs_block_group *cache) { if (refcount_dec_and_test(&cache->refs)) { WARN_ON(cache->pinned > 0); - WARN_ON(cache->reserved > 0); + /* + * If there was a failure to cleanup a log tree, very likely due + * to an IO failure on a writeback attempt of one or more of its + * extent buffers, we could not do proper (and cheap) unaccounting + * of their reserved space, so don't warn on reserved > 0 in that + * case. + */ + if (!(cache->flags & BTRFS_BLOCK_GROUP_METADATA) || + !BTRFS_FS_LOG_CLEANUP_ERROR(cache->fs_info)) + WARN_ON(cache->reserved > 0); /* * A block_group shouldn't be on the discard_list anymore. @@ -3987,9 +3996,22 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info) * important and indicates a real bug if this happens. */ if (WARN_ON(space_info->bytes_pinned > 0 || - space_info->bytes_reserved > 0 || space_info->bytes_may_use > 0)) btrfs_dump_space_info(info, space_info, 0, 0); + + /* + * If there was a failure to cleanup a log tree, very likely due + * to an IO failure on a writeback attempt of one or more of its + * extent buffers, we could not do proper (and cheap) unaccounting + * of their reserved space, so don't warn on bytes_reserved > 0 in + * that case. + */ + if (!(space_info->flags & BTRFS_BLOCK_GROUP_METADATA) || + !BTRFS_FS_LOG_CLEANUP_ERROR(info)) { + if (WARN_ON(space_info->bytes_reserved > 0)) + btrfs_dump_space_info(info, space_info, 0, 0); + } + WARN_ON(space_info->reclaim_size > 0); list_del(&space_info->list); btrfs_sysfs_remove_space_info(space_info); diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h index b4a9b1c58d22..8992e0096163 100644 --- a/fs/btrfs/ctree.h +++ b/fs/btrfs/ctree.h @@ -145,6 +145,9 @@ enum { BTRFS_FS_STATE_DUMMY_FS_INFO, BTRFS_FS_STATE_NO_CSUMS, + + /* Indicates there was an error cleaning up a log tree. */ + BTRFS_FS_STATE_LOG_CLEANUP_ERROR, }; #define BTRFS_BACKREF_REV_MAX 256 @@ -3593,6 +3596,9 @@ do { \ #define BTRFS_FS_ERROR(fs_info) (unlikely(test_bit(BTRFS_FS_STATE_ERROR, \ &(fs_info)->fs_state))) +#define BTRFS_FS_LOG_CLEANUP_ERROR(fs_info) \ + (unlikely(test_bit(BTRFS_FS_STATE_LOG_CLEANUP_ERROR, \ + &(fs_info)->fs_state))) __printf(5, 6) __cold diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index c1ddbe800897..3ee014c06b82 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -3414,6 +3414,29 @@ static void free_log_tree(struct btrfs_trans_handle *trans, if (log->node) { ret = walk_log_tree(trans, log, &wc); if (ret) { + /* + * We weren't able to traverse the entire log tree, the + * typical scenario is getting an -EIO when reading an + * extent buffer of the tree, due to a previous writeback + * failure of it. + */ + set_bit(BTRFS_FS_STATE_LOG_CLEANUP_ERROR, + &log->fs_info->fs_state); + + /* + * Some extent buffers of the log tree may still be dirty + * and not yet written back to storage, because we may + * have updates to a log tree without syncing a log tree, + * such as during rename and link operations. So flush + * them out and wait for their writeback to complete, so + * that we properly cleanup their state and pages. + */ + btrfs_write_marked_extents(log->fs_info, + &log->dirty_log_pages, + EXTENT_DIRTY | EXTENT_NEW); + btrfs_wait_tree_log_extents(log, + EXTENT_DIRTY | EXTENT_NEW); + if (trans) btrfs_abort_transaction(trans, ret); else -- cgit From f83a96e5f033fbbd21764705cb9c04234b96218e Mon Sep 17 00:00:00 2001 From: Benjamin Gaignard Date: Mon, 31 Jan 2022 15:17:08 +0100 Subject: spi: mediatek: Avoid NULL pointer crash in interrupt In some case, like after a transfer timeout, master->cur_msg pointer is NULL which led to a kernel crash when trying to use master->cur_msg->spi. mtk_spi_can_dma(), pointed by master->can_dma, doesn't use this parameter avoid the problem by setting NULL as second parameter. Fixes: a568231f46322 ("spi: mediatek: Add spi bus for Mediatek MT8173") Signed-off-by: Benjamin Gaignard Link: https://lore.kernel.org/r/20220131141708.888710-1-benjamin.gaignard@collabora.com Signed-off-by: Mark Brown --- drivers/spi/spi-mt65xx.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/spi/spi-mt65xx.c b/drivers/spi/spi-mt65xx.c index a15de10ee286..753bd313e6fd 100644 --- a/drivers/spi/spi-mt65xx.c +++ b/drivers/spi/spi-mt65xx.c @@ -624,7 +624,7 @@ static irqreturn_t mtk_spi_interrupt(int irq, void *dev_id) else mdata->state = MTK_SPI_IDLE; - if (!master->can_dma(master, master->cur_msg->spi, trans)) { + if (!master->can_dma(master, NULL, trans)) { if (trans->rx_buf) { cnt = mdata->xfer_len / 4; ioread32_rep(mdata->base + SPI_RX_DATA_REG, -- cgit From 46f47807738441e354873546dde0b000106c068a Mon Sep 17 00:00:00 2001 From: Yongzhi Liu Date: Sun, 23 Jan 2022 23:20:35 -0800 Subject: drm/bridge: Add missing pm_runtime_put_sync pm_runtime_get_sync() will increase the rumtime PM counter even when it returns an error. Thus a pairing decrement is needed to prevent refcount leak. Fix this by replacing this API with pm_runtime_resume_and_get(), which will not change the runtime PM counter on error. Besides, a matching decrement is needed on the error handling path to keep the counter balanced. Signed-off-by: Yongzhi Liu Reviewed-by: Laurent Pinchart Signed-off-by: Robert Foss Link: https://patchwork.freedesktop.org/patch/msgid/1643008835-73961-1-git-send-email-lyz_cs@pku.edu.cn --- drivers/gpu/drm/bridge/nwl-dsi.c | 18 ++++++++++++------ 1 file changed, 12 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c index 9282e61dfbf0..30aacd939dc3 100644 --- a/drivers/gpu/drm/bridge/nwl-dsi.c +++ b/drivers/gpu/drm/bridge/nwl-dsi.c @@ -862,18 +862,19 @@ nwl_dsi_bridge_mode_set(struct drm_bridge *bridge, memcpy(&dsi->mode, adjusted_mode, sizeof(dsi->mode)); drm_mode_debug_printmodeline(adjusted_mode); - pm_runtime_get_sync(dev); + if (pm_runtime_resume_and_get(dev) < 0) + return; if (clk_prepare_enable(dsi->lcdif_clk) < 0) - return; + goto runtime_put; if (clk_prepare_enable(dsi->core_clk) < 0) - return; + goto runtime_put; /* Step 1 from DSI reset-out instructions */ ret = reset_control_deassert(dsi->rst_pclk); if (ret < 0) { DRM_DEV_ERROR(dev, "Failed to deassert PCLK: %d\n", ret); - return; + goto runtime_put; } /* Step 2 from DSI reset-out instructions */ @@ -883,13 +884,18 @@ nwl_dsi_bridge_mode_set(struct drm_bridge *bridge, ret = reset_control_deassert(dsi->rst_esc); if (ret < 0) { DRM_DEV_ERROR(dev, "Failed to deassert ESC: %d\n", ret); - return; + goto runtime_put; } ret = reset_control_deassert(dsi->rst_byte); if (ret < 0) { DRM_DEV_ERROR(dev, "Failed to deassert BYTE: %d\n", ret); - return; + goto runtime_put; } + + return; + +runtime_put: + pm_runtime_put_sync(dev); } static void -- cgit From b54240ad494300ff0994c4539a531727874381f4 Mon Sep 17 00:00:00 2001 From: Vijayanand Jitta Date: Mon, 31 Jan 2022 12:42:35 +0530 Subject: iommu: Fix potential use-after-free during probe Kasan has reported the following use after free on dev->iommu. when a device probe fails and it is in process of freeing dev->iommu in dev_iommu_free function, a deferred_probe_work_func runs in parallel and tries to access dev->iommu->fwspec in of_iommu_configure path thus causing use after free. BUG: KASAN: use-after-free in of_iommu_configure+0xb4/0x4a4 Read of size 8 at addr ffffff87a2f1acb8 by task kworker/u16:2/153 Workqueue: events_unbound deferred_probe_work_func Call trace: dump_backtrace+0x0/0x33c show_stack+0x18/0x24 dump_stack_lvl+0x16c/0x1e0 print_address_description+0x84/0x39c __kasan_report+0x184/0x308 kasan_report+0x50/0x78 __asan_load8+0xc0/0xc4 of_iommu_configure+0xb4/0x4a4 of_dma_configure_id+0x2fc/0x4d4 platform_dma_configure+0x40/0x5c really_probe+0x1b4/0xb74 driver_probe_device+0x11c/0x228 __device_attach_driver+0x14c/0x304 bus_for_each_drv+0x124/0x1b0 __device_attach+0x25c/0x334 device_initial_probe+0x24/0x34 bus_probe_device+0x78/0x134 deferred_probe_work_func+0x130/0x1a8 process_one_work+0x4c8/0x970 worker_thread+0x5c8/0xaec kthread+0x1f8/0x220 ret_from_fork+0x10/0x18 Allocated by task 1: ____kasan_kmalloc+0xd4/0x114 __kasan_kmalloc+0x10/0x1c kmem_cache_alloc_trace+0xe4/0x3d4 __iommu_probe_device+0x90/0x394 probe_iommu_group+0x70/0x9c bus_for_each_dev+0x11c/0x19c bus_iommu_probe+0xb8/0x7d4 bus_set_iommu+0xcc/0x13c arm_smmu_bus_init+0x44/0x130 [arm_smmu] arm_smmu_device_probe+0xb88/0xc54 [arm_smmu] platform_drv_probe+0xe4/0x13c really_probe+0x2c8/0xb74 driver_probe_device+0x11c/0x228 device_driver_attach+0xf0/0x16c __driver_attach+0x80/0x320 bus_for_each_dev+0x11c/0x19c driver_attach+0x38/0x48 bus_add_driver+0x1dc/0x3a4 driver_register+0x18c/0x244 __platform_driver_register+0x88/0x9c init_module+0x64/0xff4 [arm_smmu] do_one_initcall+0x17c/0x2f0 do_init_module+0xe8/0x378 load_module+0x3f80/0x4a40 __se_sys_finit_module+0x1a0/0x1e4 __arm64_sys_finit_module+0x44/0x58 el0_svc_common+0x100/0x264 do_el0_svc+0x38/0xa4 el0_svc+0x20/0x30 el0_sync_handler+0x68/0xac el0_sync+0x160/0x180 Freed by task 1: kasan_set_track+0x4c/0x84 kasan_set_free_info+0x28/0x4c ____kasan_slab_free+0x120/0x15c __kasan_slab_free+0x18/0x28 slab_free_freelist_hook+0x204/0x2fc kfree+0xfc/0x3a4 __iommu_probe_device+0x284/0x394 probe_iommu_group+0x70/0x9c bus_for_each_dev+0x11c/0x19c bus_iommu_probe+0xb8/0x7d4 bus_set_iommu+0xcc/0x13c arm_smmu_bus_init+0x44/0x130 [arm_smmu] arm_smmu_device_probe+0xb88/0xc54 [arm_smmu] platform_drv_probe+0xe4/0x13c really_probe+0x2c8/0xb74 driver_probe_device+0x11c/0x228 device_driver_attach+0xf0/0x16c __driver_attach+0x80/0x320 bus_for_each_dev+0x11c/0x19c driver_attach+0x38/0x48 bus_add_driver+0x1dc/0x3a4 driver_register+0x18c/0x244 __platform_driver_register+0x88/0x9c init_module+0x64/0xff4 [arm_smmu] do_one_initcall+0x17c/0x2f0 do_init_module+0xe8/0x378 load_module+0x3f80/0x4a40 __se_sys_finit_module+0x1a0/0x1e4 __arm64_sys_finit_module+0x44/0x58 el0_svc_common+0x100/0x264 do_el0_svc+0x38/0xa4 el0_svc+0x20/0x30 el0_sync_handler+0x68/0xac el0_sync+0x160/0x180 Fix this by setting dev->iommu to NULL first and then freeing dev_iommu structure in dev_iommu_free function. Suggested-by: Robin Murphy Signed-off-by: Vijayanand Jitta Link: https://lore.kernel.org/r/1643613155-20215-1-git-send-email-quic_vjitta@quicinc.com Signed-off-by: Joerg Roedel --- drivers/iommu/iommu.c | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 8b86406b7162..3632bf8b4031 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -207,9 +207,14 @@ static struct dev_iommu *dev_iommu_get(struct device *dev) static void dev_iommu_free(struct device *dev) { - iommu_fwspec_free(dev); - kfree(dev->iommu); + struct dev_iommu *param = dev->iommu; + dev->iommu = NULL; + if (param->fwspec) { + fwnode_handle_put(param->fwspec->iommu_fwnode); + kfree(param->fwspec); + } + kfree(param); } static int __iommu_probe_device(struct device *dev, struct list_head *group_list) -- cgit From 30209b93177a75843673de92771716c941c20ef5 Mon Sep 17 00:00:00 2001 From: John Garry Date: Fri, 28 Jan 2022 18:44:33 +0800 Subject: iommu: Fix some W=1 warnings The code is mostly free of W=1 warning, so fix the following: drivers/iommu/iommu.c:996: warning: expecting prototype for iommu_group_for_each_dev(). Prototype was for __iommu_group_for_each_dev() instead drivers/iommu/iommu.c:3048: warning: Function parameter or member 'drvdata' not described in 'iommu_sva_bind_device' drivers/iommu/ioasid.c:354: warning: Function parameter or member 'ioasid' not described in 'ioasid_get' drivers/iommu/omap-iommu.c:1098: warning: expecting prototype for omap_iommu_suspend_prepare(). Prototype was for omap_iommu_prepare() instead Signed-off-by: John Garry Reviewed-by: Robin Murphy Link: https://lore.kernel.org/r/1643366673-26803-1-git-send-email-john.garry@huawei.com Signed-off-by: Joerg Roedel --- drivers/iommu/ioasid.c | 1 + drivers/iommu/iommu.c | 24 ++++++++++++------------ drivers/iommu/omap-iommu.c | 2 +- 3 files changed, 14 insertions(+), 13 deletions(-) diff --git a/drivers/iommu/ioasid.c b/drivers/iommu/ioasid.c index 50ee27bbd04e..06fee7416816 100644 --- a/drivers/iommu/ioasid.c +++ b/drivers/iommu/ioasid.c @@ -349,6 +349,7 @@ EXPORT_SYMBOL_GPL(ioasid_alloc); /** * ioasid_get - obtain a reference to the IOASID + * @ioasid: the ID to get */ void ioasid_get(ioasid_t ioasid) { diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c index 3632bf8b4031..107dcf5938d6 100644 --- a/drivers/iommu/iommu.c +++ b/drivers/iommu/iommu.c @@ -985,17 +985,6 @@ static int iommu_group_device_count(struct iommu_group *group) return ret; } -/** - * iommu_group_for_each_dev - iterate over each device in the group - * @group: the group - * @data: caller opaque data to be passed to callback function - * @fn: caller supplied callback function - * - * This function is called by group users to iterate over group devices. - * Callers should hold a reference count to the group during callback. - * The group->mutex is held across callbacks, which will block calls to - * iommu_group_add/remove_device. - */ static int __iommu_group_for_each_dev(struct iommu_group *group, void *data, int (*fn)(struct device *, void *)) { @@ -1010,7 +999,17 @@ static int __iommu_group_for_each_dev(struct iommu_group *group, void *data, return ret; } - +/** + * iommu_group_for_each_dev - iterate over each device in the group + * @group: the group + * @data: caller opaque data to be passed to callback function + * @fn: caller supplied callback function + * + * This function is called by group users to iterate over group devices. + * Callers should hold a reference count to the group during callback. + * The group->mutex is held across callbacks, which will block calls to + * iommu_group_add/remove_device. + */ int iommu_group_for_each_dev(struct iommu_group *group, void *data, int (*fn)(struct device *, void *)) { @@ -3037,6 +3036,7 @@ EXPORT_SYMBOL_GPL(iommu_aux_get_pasid); * iommu_sva_bind_device() - Bind a process address space to a device * @dev: the device * @mm: the mm to bind, caller must hold a reference to it + * @drvdata: opaque data pointer to pass to bind callback * * Create a bond between device and address space, allowing the device to access * the mm using the returned PASID. If a bond already exists between @device and diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c index 91749654fd49..980e4af3f06b 100644 --- a/drivers/iommu/omap-iommu.c +++ b/drivers/iommu/omap-iommu.c @@ -1085,7 +1085,7 @@ static __maybe_unused int omap_iommu_runtime_resume(struct device *dev) } /** - * omap_iommu_suspend_prepare - prepare() dev_pm_ops implementation + * omap_iommu_prepare - prepare() dev_pm_ops implementation * @dev: iommu device * * This function performs the necessary checks to determine if the IOMMU -- cgit From 99e675d473eb8cf2deac1376a0f840222fc1adcf Mon Sep 17 00:00:00 2001 From: Guoqing Jiang Date: Fri, 28 Jan 2022 11:10:02 +0800 Subject: iommu/vt-d: Fix potential memory leak in intel_setup_irq_remapping() After commit e3beca48a45b ("irqdomain/treewide: Keep firmware node unconditionally allocated"). For tear down scenario, fn is only freed after fail to allocate ir_domain, though it also should be freed in case dmar_enable_qi returns error. Besides free fn, irq_domain and ir_msi_domain need to be removed as well if intel_setup_irq_remapping fails to enable queued invalidation. Improve the rewinding path by add out_free_ir_domain and out_free_fwnode lables per Baolu's suggestion. Fixes: e3beca48a45b ("irqdomain/treewide: Keep firmware node unconditionally allocated") Suggested-by: Lu Baolu Signed-off-by: Guoqing Jiang Link: https://lore.kernel.org/r/20220119063640.16864-1-guoqing.jiang@linux.dev Signed-off-by: Lu Baolu Link: https://lore.kernel.org/r/20220128031002.2219155-3-baolu.lu@linux.intel.com Signed-off-by: Joerg Roedel --- drivers/iommu/intel/irq_remapping.c | 13 ++++++++++--- 1 file changed, 10 insertions(+), 3 deletions(-) diff --git a/drivers/iommu/intel/irq_remapping.c b/drivers/iommu/intel/irq_remapping.c index f912fe45bea2..a67319597884 100644 --- a/drivers/iommu/intel/irq_remapping.c +++ b/drivers/iommu/intel/irq_remapping.c @@ -569,9 +569,8 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu) fn, &intel_ir_domain_ops, iommu); if (!iommu->ir_domain) { - irq_domain_free_fwnode(fn); pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id); - goto out_free_bitmap; + goto out_free_fwnode; } iommu->ir_msi_domain = arch_create_remap_msi_irq_domain(iommu->ir_domain, @@ -595,7 +594,7 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu) if (dmar_enable_qi(iommu)) { pr_err("Failed to enable queued invalidation\n"); - goto out_free_bitmap; + goto out_free_ir_domain; } } @@ -619,6 +618,14 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu) return 0; +out_free_ir_domain: + if (iommu->ir_msi_domain) + irq_domain_remove(iommu->ir_msi_domain); + iommu->ir_msi_domain = NULL; + irq_domain_remove(iommu->ir_domain); + iommu->ir_domain = NULL; +out_free_fwnode: + irq_domain_free_fwnode(fn); out_free_bitmap: bitmap_free(bitmap); out_free_pages: -- cgit From 9987151a90567785beebcbd5c8ac58d05f254137 Mon Sep 17 00:00:00 2001 From: Christophe JAILLET Date: Sat, 29 Jan 2022 16:06:24 +0100 Subject: drm/bridge: lt9611: Fix an error handling path in lt9611_probe() If lt9611_audio_init() fails, some resources still need to be released before returning an error code. Add the missing goto the error handling path. Fixes: 23278bf54afe ("drm/bridge: Introduce LT9611 DSI to HDMI bridge") Signed-off-by: Christophe JAILLET Signed-off-by: Robert Foss Link: https://patchwork.freedesktop.org/patch/msgid/9c20eb74d42f6d4128e58e3e46aa320482472b77.1643468761.git.christophe.jaillet@wanadoo.fr Reviewed-by: Robert Foss --- drivers/gpu/drm/bridge/lontium-lt9611.c | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/drivers/gpu/drm/bridge/lontium-lt9611.c b/drivers/gpu/drm/bridge/lontium-lt9611.c index feb128a4557d..63df2e8a8abc 100644 --- a/drivers/gpu/drm/bridge/lontium-lt9611.c +++ b/drivers/gpu/drm/bridge/lontium-lt9611.c @@ -1164,7 +1164,11 @@ static int lt9611_probe(struct i2c_client *client, lt9611_enable_hpd_interrupts(lt9611); - return lt9611_audio_init(dev, lt9611); + ret = lt9611_audio_init(dev, lt9611); + if (ret) + goto err_remove_bridge; + + return 0; err_remove_bridge: drm_bridge_remove(<9611->bridge); -- cgit From c26b85ea16365079be8d206b20556a60a0c69ad4 Mon Sep 17 00:00:00 2001 From: Ajish Koshy Date: Mon, 24 Jan 2022 13:52:55 +0530 Subject: scsi: pm80xx: Fix double completion for SATA devices Current code handles completions for SATA devices in mpi_sata_completion() and mpi_sata_event(). However, at the time when any SATA event happens, for almost all the event types, the command is still in the target. It is therefore incorrect to complete the task in sata_event(). There are some events for which we get sata_completions, some need recovery procedure and others abort. All the tasks must be completed via sata_completion() path. Removed the task done related code from sata_events(). For tasks where we don't get completions, let top layer call abort() to abort the command post timeout. Link: https://lore.kernel.org/r/20220124082255.86223-1-Ajish.Koshy@microchip.com Acked-by: Jack Wang Co-developed-by: Viswas G Signed-off-by: Viswas G Signed-off-by: Ajish Koshy Signed-off-by: Martin K. Petersen --- drivers/scsi/pm8001/pm8001_hwi.c | 18 ------------------ drivers/scsi/pm8001/pm80xx_hwi.c | 26 -------------------------- 2 files changed, 44 deletions(-) diff --git a/drivers/scsi/pm8001/pm8001_hwi.c b/drivers/scsi/pm8001/pm8001_hwi.c index c814e5071712..9ec310b795c3 100644 --- a/drivers/scsi/pm8001/pm8001_hwi.c +++ b/drivers/scsi/pm8001/pm8001_hwi.c @@ -2692,7 +2692,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb) u32 tag = le32_to_cpu(psataPayload->tag); u32 port_id = le32_to_cpu(psataPayload->port_id); u32 dev_id = le32_to_cpu(psataPayload->device_id); - unsigned long flags; if (event) pm8001_dbg(pm8001_ha, FAIL, "SATA EVENT 0x%x\n", event); @@ -2724,8 +2723,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb) ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_DATA_OVERRUN; ts->residual = 0; - if (pm8001_dev) - atomic_dec(&pm8001_dev->running_req); break; case IO_XFER_ERROR_BREAK: pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n"); @@ -2767,7 +2764,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb) IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_QUEUE_FULL; - pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); return; } break; @@ -2853,20 +2849,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, void *piomb) ts->stat = SAS_OPEN_TO; break; } - spin_lock_irqsave(&t->task_state_lock, flags); - t->task_state_flags &= ~SAS_TASK_STATE_PENDING; - t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; - t->task_state_flags |= SAS_TASK_STATE_DONE; - if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { - spin_unlock_irqrestore(&t->task_state_lock, flags); - pm8001_dbg(pm8001_ha, FAIL, - "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n", - t, event, ts->resp, ts->stat); - pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); - } else { - spin_unlock_irqrestore(&t->task_state_lock, flags); - pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); - } } /*See the comments for mpi_ssp_completion */ diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c index 2530d1365556..c350d2017aa4 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.c +++ b/drivers/scsi/pm8001/pm80xx_hwi.c @@ -2821,7 +2821,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, u32 tag = le32_to_cpu(psataPayload->tag); u32 port_id = le32_to_cpu(psataPayload->port_id); u32 dev_id = le32_to_cpu(psataPayload->device_id); - unsigned long flags; if (event) pm8001_dbg(pm8001_ha, FAIL, "SATA EVENT 0x%x\n", event); @@ -2854,8 +2853,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_DATA_OVERRUN; ts->residual = 0; - if (pm8001_dev) - atomic_dec(&pm8001_dev->running_req); break; case IO_XFER_ERROR_BREAK: pm8001_dbg(pm8001_ha, IO, "IO_XFER_ERROR_BREAK\n"); @@ -2904,11 +2901,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, IO_OPEN_CNX_ERROR_IT_NEXUS_LOSS); ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_QUEUE_FULL; - spin_unlock_irqrestore(&circularQ->oq_lock, - circularQ->lock_flags); - pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); - spin_lock_irqsave(&circularQ->oq_lock, - circularQ->lock_flags); return; } break; @@ -3008,24 +3000,6 @@ static void mpi_sata_event(struct pm8001_hba_info *pm8001_ha, ts->stat = SAS_OPEN_TO; break; } - spin_lock_irqsave(&t->task_state_lock, flags); - t->task_state_flags &= ~SAS_TASK_STATE_PENDING; - t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; - t->task_state_flags |= SAS_TASK_STATE_DONE; - if (unlikely((t->task_state_flags & SAS_TASK_STATE_ABORTED))) { - spin_unlock_irqrestore(&t->task_state_lock, flags); - pm8001_dbg(pm8001_ha, FAIL, - "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n", - t, event, ts->resp, ts->stat); - pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); - } else { - spin_unlock_irqrestore(&t->task_state_lock, flags); - spin_unlock_irqrestore(&circularQ->oq_lock, - circularQ->lock_flags); - pm8001_ccb_task_free_done(pm8001_ha, t, ccb, tag); - spin_lock_irqsave(&circularQ->oq_lock, - circularQ->lock_flags); - } } /*See the comments for mpi_ssp_completion */ -- cgit From 936bd03405fc83ba039d42bc93ffd4b88418f1d3 Mon Sep 17 00:00:00 2001 From: John Meneghini Date: Mon, 24 Jan 2022 09:51:10 -0500 Subject: scsi: bnx2fc: Make bnx2fc_recv_frame() mp safe Running tests with a debug kernel shows that bnx2fc_recv_frame() is modifying the per_cpu lport stats counters in a non-mpsafe way. Just boot a debug kernel and run the bnx2fc driver with the hardware enabled. [ 1391.699147] BUG: using smp_processor_id() in preemptible [00000000] code: bnx2fc_ [ 1391.699160] caller is bnx2fc_recv_frame+0xbf9/0x1760 [bnx2fc] [ 1391.699174] CPU: 2 PID: 4355 Comm: bnx2fc_l2_threa Kdump: loaded Tainted: G B [ 1391.699180] Hardware name: HP ProLiant DL120 G7, BIOS J01 07/01/2013 [ 1391.699183] Call Trace: [ 1391.699188] dump_stack_lvl+0x57/0x7d [ 1391.699198] check_preemption_disabled+0xc8/0xd0 [ 1391.699205] bnx2fc_recv_frame+0xbf9/0x1760 [bnx2fc] [ 1391.699215] ? do_raw_spin_trylock+0xb5/0x180 [ 1391.699221] ? bnx2fc_npiv_create_vports.isra.0+0x4e0/0x4e0 [bnx2fc] [ 1391.699229] ? bnx2fc_l2_rcv_thread+0xb7/0x3a0 [bnx2fc] [ 1391.699240] bnx2fc_l2_rcv_thread+0x1af/0x3a0 [bnx2fc] [ 1391.699250] ? bnx2fc_ulp_init+0xc0/0xc0 [bnx2fc] [ 1391.699258] kthread+0x364/0x420 [ 1391.699263] ? _raw_spin_unlock_irq+0x24/0x50 [ 1391.699268] ? set_kthread_struct+0x100/0x100 [ 1391.699273] ret_from_fork+0x22/0x30 Restore the old get_cpu/put_cpu code with some modifications to reduce the size of the critical section. Link: https://lore.kernel.org/r/20220124145110.442335-1-jmeneghi@redhat.com Fixes: d576a5e80cd0 ("bnx2fc: Improve stats update mechanism") Tested-by: Guangwu Zhang Acked-by: Saurav Kashyap Signed-off-by: John Meneghini Signed-off-by: Martin K. Petersen --- drivers/scsi/bnx2fc/bnx2fc_fcoe.c | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c index 9be273c320e2..a826456c6075 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c +++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c @@ -508,7 +508,8 @@ static int bnx2fc_l2_rcv_thread(void *arg) static void bnx2fc_recv_frame(struct sk_buff *skb) { - u32 fr_len; + u64 crc_err; + u32 fr_len, fr_crc; struct fc_lport *lport; struct fcoe_rcv_info *fr; struct fc_stats *stats; @@ -542,6 +543,11 @@ static void bnx2fc_recv_frame(struct sk_buff *skb) skb_pull(skb, sizeof(struct fcoe_hdr)); fr_len = skb->len - sizeof(struct fcoe_crc_eof); + stats = per_cpu_ptr(lport->stats, get_cpu()); + stats->RxFrames++; + stats->RxWords += fr_len / FCOE_WORD_TO_BYTE; + put_cpu(); + fp = (struct fc_frame *)skb; fc_frame_init(fp); fr_dev(fp) = lport; @@ -624,16 +630,15 @@ static void bnx2fc_recv_frame(struct sk_buff *skb) return; } - stats = per_cpu_ptr(lport->stats, smp_processor_id()); - stats->RxFrames++; - stats->RxWords += fr_len / FCOE_WORD_TO_BYTE; + fr_crc = le32_to_cpu(fr_crc(fp)); - if (le32_to_cpu(fr_crc(fp)) != - ~crc32(~0, skb->data, fr_len)) { - if (stats->InvalidCRCCount < 5) + if (unlikely(fr_crc != ~crc32(~0, skb->data, fr_len))) { + stats = per_cpu_ptr(lport->stats, get_cpu()); + crc_err = (stats->InvalidCRCCount++); + put_cpu(); + if (crc_err < 5) printk(KERN_WARNING PFX "dropping frame with " "CRC error\n"); - stats->InvalidCRCCount++; kfree_skb(skb); return; } -- cgit From ec049891b2dc16591813eacaddc476b3d27c8c14 Mon Sep 17 00:00:00 2001 From: Vincenzo Frascino Date: Mon, 31 Jan 2022 11:34:05 +0000 Subject: kselftest: Fix vdso_test_abi return status vdso_test_abi contains a batch of tests that verify the validity of the vDSO ABI. When a vDSO symbol is not found the relevant test is skipped reporting KSFT_SKIP. All the tests return values are then added in a single variable which is checked to verify failures. This approach can have side effects which result in reporting the wrong kselftest exit status. Fix vdso_test_abi verifying the return code of each test separately. Cc: Shuah Khan Cc: Andy Lutomirski Cc: Thomas Gleixner Reported-by: Cristian Marussi Signed-off-by: Vincenzo Frascino Signed-off-by: Shuah Khan --- tools/testing/selftests/vDSO/vdso_test_abi.c | 135 ++++++++++++--------------- 1 file changed, 62 insertions(+), 73 deletions(-) diff --git a/tools/testing/selftests/vDSO/vdso_test_abi.c b/tools/testing/selftests/vDSO/vdso_test_abi.c index 3d603f1394af..883ca85424bc 100644 --- a/tools/testing/selftests/vDSO/vdso_test_abi.c +++ b/tools/testing/selftests/vDSO/vdso_test_abi.c @@ -33,110 +33,114 @@ typedef long (*vdso_clock_gettime_t)(clockid_t clk_id, struct timespec *ts); typedef long (*vdso_clock_getres_t)(clockid_t clk_id, struct timespec *ts); typedef time_t (*vdso_time_t)(time_t *t); -static int vdso_test_gettimeofday(void) +#define VDSO_TEST_PASS_MSG() "\n%s(): PASS\n", __func__ +#define VDSO_TEST_FAIL_MSG(x) "\n%s(): %s FAIL\n", __func__, x +#define VDSO_TEST_SKIP_MSG(x) "\n%s(): SKIP: Could not find %s\n", __func__, x + +static void vdso_test_gettimeofday(void) { /* Find gettimeofday. */ vdso_gettimeofday_t vdso_gettimeofday = (vdso_gettimeofday_t)vdso_sym(version, name[0]); if (!vdso_gettimeofday) { - printf("Could not find %s\n", name[0]); - return KSFT_SKIP; + ksft_test_result_skip(VDSO_TEST_SKIP_MSG(name[0])); + return; } struct timeval tv; long ret = vdso_gettimeofday(&tv, 0); if (ret == 0) { - printf("The time is %lld.%06lld\n", - (long long)tv.tv_sec, (long long)tv.tv_usec); + ksft_print_msg("The time is %lld.%06lld\n", + (long long)tv.tv_sec, (long long)tv.tv_usec); + ksft_test_result_pass(VDSO_TEST_PASS_MSG()); } else { - printf("%s failed\n", name[0]); - return KSFT_FAIL; + ksft_test_result_fail(VDSO_TEST_FAIL_MSG(name[0])); } - - return KSFT_PASS; } -static int vdso_test_clock_gettime(clockid_t clk_id) +static void vdso_test_clock_gettime(clockid_t clk_id) { /* Find clock_gettime. */ vdso_clock_gettime_t vdso_clock_gettime = (vdso_clock_gettime_t)vdso_sym(version, name[1]); if (!vdso_clock_gettime) { - printf("Could not find %s\n", name[1]); - return KSFT_SKIP; + ksft_test_result_skip(VDSO_TEST_SKIP_MSG(name[1])); + return; } struct timespec ts; long ret = vdso_clock_gettime(clk_id, &ts); if (ret == 0) { - printf("The time is %lld.%06lld\n", - (long long)ts.tv_sec, (long long)ts.tv_nsec); + ksft_print_msg("The time is %lld.%06lld\n", + (long long)ts.tv_sec, (long long)ts.tv_nsec); + ksft_test_result_pass(VDSO_TEST_PASS_MSG()); } else { - printf("%s failed\n", name[1]); - return KSFT_FAIL; + ksft_test_result_fail(VDSO_TEST_FAIL_MSG(name[1])); } - - return KSFT_PASS; } -static int vdso_test_time(void) +static void vdso_test_time(void) { /* Find time. */ vdso_time_t vdso_time = (vdso_time_t)vdso_sym(version, name[2]); if (!vdso_time) { - printf("Could not find %s\n", name[2]); - return KSFT_SKIP; + ksft_test_result_skip(VDSO_TEST_SKIP_MSG(name[2])); + return; } long ret = vdso_time(NULL); if (ret > 0) { - printf("The time in hours since January 1, 1970 is %lld\n", + ksft_print_msg("The time in hours since January 1, 1970 is %lld\n", (long long)(ret / 3600)); + ksft_test_result_pass(VDSO_TEST_PASS_MSG()); } else { - printf("%s failed\n", name[2]); - return KSFT_FAIL; + ksft_test_result_fail(VDSO_TEST_FAIL_MSG(name[2])); } - - return KSFT_PASS; } -static int vdso_test_clock_getres(clockid_t clk_id) +static void vdso_test_clock_getres(clockid_t clk_id) { + int clock_getres_fail = 0; + /* Find clock_getres. */ vdso_clock_getres_t vdso_clock_getres = (vdso_clock_getres_t)vdso_sym(version, name[3]); if (!vdso_clock_getres) { - printf("Could not find %s\n", name[3]); - return KSFT_SKIP; + ksft_test_result_skip(VDSO_TEST_SKIP_MSG(name[3])); + return; } struct timespec ts, sys_ts; long ret = vdso_clock_getres(clk_id, &ts); if (ret == 0) { - printf("The resolution is %lld %lld\n", - (long long)ts.tv_sec, (long long)ts.tv_nsec); + ksft_print_msg("The vdso resolution is %lld %lld\n", + (long long)ts.tv_sec, (long long)ts.tv_nsec); } else { - printf("%s failed\n", name[3]); - return KSFT_FAIL; + clock_getres_fail++; } ret = syscall(SYS_clock_getres, clk_id, &sys_ts); - if ((sys_ts.tv_sec != ts.tv_sec) || (sys_ts.tv_nsec != ts.tv_nsec)) { - printf("%s failed\n", name[3]); - return KSFT_FAIL; - } + ksft_print_msg("The syscall resolution is %lld %lld\n", + (long long)sys_ts.tv_sec, (long long)sys_ts.tv_nsec); - return KSFT_PASS; + if ((sys_ts.tv_sec != ts.tv_sec) || (sys_ts.tv_nsec != ts.tv_nsec)) + clock_getres_fail++; + + if (clock_getres_fail > 0) { + ksft_test_result_fail(VDSO_TEST_FAIL_MSG(name[3])); + } else { + ksft_test_result_pass(VDSO_TEST_PASS_MSG()); + } } const char *vdso_clock_name[12] = { @@ -158,36 +162,23 @@ const char *vdso_clock_name[12] = { * This function calls vdso_test_clock_gettime and vdso_test_clock_getres * with different values for clock_id. */ -static inline int vdso_test_clock(clockid_t clock_id) +static inline void vdso_test_clock(clockid_t clock_id) { - int ret0, ret1; - - ret0 = vdso_test_clock_gettime(clock_id); - /* A skipped test is considered passed */ - if (ret0 == KSFT_SKIP) - ret0 = KSFT_PASS; - - ret1 = vdso_test_clock_getres(clock_id); - /* A skipped test is considered passed */ - if (ret1 == KSFT_SKIP) - ret1 = KSFT_PASS; + ksft_print_msg("\nclock_id: %s\n", vdso_clock_name[clock_id]); - ret0 += ret1; + vdso_test_clock_gettime(clock_id); - printf("clock_id: %s", vdso_clock_name[clock_id]); - - if (ret0 > 0) - printf(" [FAIL]\n"); - else - printf(" [PASS]\n"); - - return ret0; + vdso_test_clock_getres(clock_id); } +#define VDSO_TEST_PLAN 16 + int main(int argc, char **argv) { unsigned long sysinfo_ehdr = getauxval(AT_SYSINFO_EHDR); - int ret; + + ksft_print_header(); + ksft_set_plan(VDSO_TEST_PLAN); if (!sysinfo_ehdr) { printf("AT_SYSINFO_EHDR is not present!\n"); @@ -201,44 +192,42 @@ int main(int argc, char **argv) vdso_init_from_sysinfo_ehdr(getauxval(AT_SYSINFO_EHDR)); - ret = vdso_test_gettimeofday(); + vdso_test_gettimeofday(); #if _POSIX_TIMERS > 0 #ifdef CLOCK_REALTIME - ret += vdso_test_clock(CLOCK_REALTIME); + vdso_test_clock(CLOCK_REALTIME); #endif #ifdef CLOCK_BOOTTIME - ret += vdso_test_clock(CLOCK_BOOTTIME); + vdso_test_clock(CLOCK_BOOTTIME); #endif #ifdef CLOCK_TAI - ret += vdso_test_clock(CLOCK_TAI); + vdso_test_clock(CLOCK_TAI); #endif #ifdef CLOCK_REALTIME_COARSE - ret += vdso_test_clock(CLOCK_REALTIME_COARSE); + vdso_test_clock(CLOCK_REALTIME_COARSE); #endif #ifdef CLOCK_MONOTONIC - ret += vdso_test_clock(CLOCK_MONOTONIC); + vdso_test_clock(CLOCK_MONOTONIC); #endif #ifdef CLOCK_MONOTONIC_RAW - ret += vdso_test_clock(CLOCK_MONOTONIC_RAW); + vdso_test_clock(CLOCK_MONOTONIC_RAW); #endif #ifdef CLOCK_MONOTONIC_COARSE - ret += vdso_test_clock(CLOCK_MONOTONIC_COARSE); + vdso_test_clock(CLOCK_MONOTONIC_COARSE); #endif #endif - ret += vdso_test_time(); - - if (ret > 0) - return KSFT_FAIL; + vdso_test_time(); - return KSFT_PASS; + ksft_print_cnts(); + return ksft_get_fail_cnt() == 0 ? KSFT_PASS : KSFT_FAIL; } -- cgit From edb854a3680bacc9ef9b91ec0c5ff6105886f6f3 Mon Sep 17 00:00:00 2001 From: Ming Lei Date: Thu, 27 Jan 2022 23:37:33 +0800 Subject: scsi: core: Reallocate device's budget map on queue depth change We currently use ->cmd_per_lun as initial queue depth for setting up the budget_map. Martin Wilck reported that it is common for the queue_depth to be subsequently updated in slave_configure() based on detected hardware characteristics. As a result, for some drivers, the static host template settings for cmd_per_lun and can_queue won't actually get used in practice. And if the default values are used to allocate the budget_map, memory may be consumed unnecessarily. Fix the issue by reallocating the budget_map after ->slave_configure() returns. At that time the device queue_depth should accurately reflect what the hardware needs. Link: https://lore.kernel.org/r/20220127153733.409132-1-ming.lei@redhat.com Cc: Bart Van Assche Reported-by: Martin Wilck Suggested-by: Martin Wilck Tested-by: Martin Wilck Reviewed-by: Martin Wilck Reviewed-by: Bart Van Assche Signed-off-by: Ming Lei Signed-off-by: Martin K. Petersen --- drivers/scsi/scsi_scan.c | 55 +++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 50 insertions(+), 5 deletions(-) diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c index 3520b9384428..f4e6c68ac99e 100644 --- a/drivers/scsi/scsi_scan.c +++ b/drivers/scsi/scsi_scan.c @@ -214,6 +214,48 @@ static void scsi_unlock_floptical(struct scsi_device *sdev, SCSI_TIMEOUT, 3, NULL); } +static int scsi_realloc_sdev_budget_map(struct scsi_device *sdev, + unsigned int depth) +{ + int new_shift = sbitmap_calculate_shift(depth); + bool need_alloc = !sdev->budget_map.map; + bool need_free = false; + int ret; + struct sbitmap sb_backup; + + /* + * realloc if new shift is calculated, which is caused by setting + * up one new default queue depth after calling ->slave_configure + */ + if (!need_alloc && new_shift != sdev->budget_map.shift) + need_alloc = need_free = true; + + if (!need_alloc) + return 0; + + /* + * Request queue has to be frozen for reallocating budget map, + * and here disk isn't added yet, so freezing is pretty fast + */ + if (need_free) { + blk_mq_freeze_queue(sdev->request_queue); + sb_backup = sdev->budget_map; + } + ret = sbitmap_init_node(&sdev->budget_map, + scsi_device_max_queue_depth(sdev), + new_shift, GFP_KERNEL, + sdev->request_queue->node, false, true); + if (need_free) { + if (ret) + sdev->budget_map = sb_backup; + else + sbitmap_free(&sb_backup); + ret = 0; + blk_mq_unfreeze_queue(sdev->request_queue); + } + return ret; +} + /** * scsi_alloc_sdev - allocate and setup a scsi_Device * @starget: which target to allocate a &scsi_device for @@ -306,11 +348,7 @@ static struct scsi_device *scsi_alloc_sdev(struct scsi_target *starget, * default device queue depth to figure out sbitmap shift * since we use this queue depth most of times. */ - if (sbitmap_init_node(&sdev->budget_map, - scsi_device_max_queue_depth(sdev), - sbitmap_calculate_shift(depth), - GFP_KERNEL, sdev->request_queue->node, - false, true)) { + if (scsi_realloc_sdev_budget_map(sdev, depth)) { put_device(&starget->dev); kfree(sdev); goto out; @@ -1017,6 +1055,13 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result, } return SCSI_SCAN_NO_RESPONSE; } + + /* + * The queue_depth is often changed in ->slave_configure. + * Set up budget map again since memory consumption of + * the map depends on actual queue depth. + */ + scsi_realloc_sdev_budget_map(sdev, sdev->queue_depth); } if (sdev->scsi_level >= SCSI_3) -- cgit From 04662bac0067e2fd7f243d6abaa4d779bce14114 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sun, 30 Jan 2022 14:38:18 -0800 Subject: ACPI: require CRC32 to build ACPI core now requires crc32() but the kernel build can fail when CRC32 is not set/enabled, so select it in the ACPI Kconfig entry. Fixes this build error: ia64-linux-ld: drivers/acpi/scan.o: in function `acpi_store_pld_crc': include/acpi/platform/aclinuxex.h:62: undefined reference to `crc32_le' Fixes: 882c982dada4 ("acpi: Store CRC-32 hash of the _PLD in struct acpi_device") Signed-off-by: Randy Dunlap Reported-by: Guenter Roeck Reviewed-by: Guenter Roeck Tested-by: Guenter Roeck Signed-off-by: Rafael J. Wysocki --- drivers/acpi/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index ba45541b1f1f..273741dedfd2 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -11,6 +11,7 @@ menuconfig ACPI depends on ARCH_SUPPORTS_ACPI select PNP select NLS + select CRC32 default y if X86 help Advanced Configuration and Power Interface (ACPI) support for -- cgit From 2af104290da5e4858e8caefa068827d7392c6a09 Mon Sep 17 00:00:00 2001 From: Tomohito Esaki Date: Fri, 28 Jan 2022 15:08:34 +0900 Subject: drm: introduce fb_modifiers_not_supported flag in mode_config If only linear modifier is advertised, since there are many drivers that only linear supported, the DRM core should handle this rather than open-coding in every driver. However, there are legacy drivers such as radeon that do not support modifiers but infer the actual layout of the underlying buffer. Therefore, a new flag fb_modifiers_not_supported is introduced for these legacy drivers, and allow_fb_modifiers is replaced with this new flag. v3: - change the order as follows: 1. add fb_modifiers_not_supported flag 2. add default modifiers 3. remove allow_fb_modifiers flag - add a conditional disable in amdgpu_dm_plane_init() v4: - modify kernel docs v5: - modify kernel docs Signed-off-by: Tomohito Esaki Acked-by: Harry Wentland Reviewed-by: Andy Shevchenko Reviewed-by: Laurent Pinchart Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20220128060836.11216-2-etom@igel.co.jp --- drivers/gpu/drm/amd/amdgpu/amdgpu_display.c | 6 +++--- drivers/gpu/drm/amd/amdgpu/dce_v10_0.c | 2 ++ drivers/gpu/drm/amd/amdgpu/dce_v11_0.c | 2 ++ drivers/gpu/drm/amd/amdgpu/dce_v6_0.c | 1 + drivers/gpu/drm/amd/amdgpu/dce_v8_0.c | 2 ++ drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c | 3 +++ drivers/gpu/drm/drm_framebuffer.c | 6 +++--- drivers/gpu/drm/drm_ioctl.c | 2 +- drivers/gpu/drm/nouveau/nouveau_display.c | 6 ++++-- drivers/gpu/drm/radeon/radeon_display.c | 2 ++ include/drm/drm_mode_config.h | 10 ++++++++++ 11 files changed, 33 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index 9a273420a67a..4c847f48f276 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -956,7 +956,7 @@ static int amdgpu_display_verify_sizes(struct amdgpu_framebuffer *rfb) int ret; unsigned int i, block_width, block_height, block_size_log2; - if (!rfb->base.dev->mode_config.allow_fb_modifiers) + if (rfb->base.dev->mode_config.fb_modifiers_not_supported) return 0; for (i = 0; i < format_info->num_planes; ++i) { @@ -1143,7 +1143,7 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev, if (ret) return ret; - if (!dev->mode_config.allow_fb_modifiers) { + if (dev->mode_config.fb_modifiers_not_supported) { drm_WARN_ONCE(dev, adev->family >= AMDGPU_FAMILY_AI, "GFX9+ requires FB check based on format modifier\n"); ret = check_tiling_flags_gfx6(rfb); @@ -1151,7 +1151,7 @@ int amdgpu_display_framebuffer_init(struct drm_device *dev, return ret; } - if (dev->mode_config.allow_fb_modifiers && + if (!dev->mode_config.fb_modifiers_not_supported && !(rfb->base.flags & DRM_MODE_FB_MODIFIERS)) { ret = convert_tiling_flags_to_modifier(rfb); if (ret) { diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c index d1570a462a51..fb61c0814115 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v10_0.c @@ -2798,6 +2798,8 @@ static int dce_v10_0_sw_init(void *handle) adev_to_drm(adev)->mode_config.preferred_depth = 24; adev_to_drm(adev)->mode_config.prefer_shadow = 1; + adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true; + adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base; r = amdgpu_display_modeset_create_props(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c index 18a7b3bd633b..17942a11366d 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v11_0.c @@ -2916,6 +2916,8 @@ static int dce_v11_0_sw_init(void *handle) adev_to_drm(adev)->mode_config.preferred_depth = 24; adev_to_drm(adev)->mode_config.prefer_shadow = 1; + adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true; + adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base; r = amdgpu_display_modeset_create_props(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c index c7803dc2b2d5..2ec99ec8e1a3 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v6_0.c @@ -2674,6 +2674,7 @@ static int dce_v6_0_sw_init(void *handle) adev_to_drm(adev)->mode_config.max_height = 16384; adev_to_drm(adev)->mode_config.preferred_depth = 24; adev_to_drm(adev)->mode_config.prefer_shadow = 1; + adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true; adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base; r = amdgpu_display_modeset_create_props(adev); diff --git a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c index 8318ee8339f1..de11fbe5aba2 100644 --- a/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/dce_v8_0.c @@ -2695,6 +2695,8 @@ static int dce_v8_0_sw_init(void *handle) adev_to_drm(adev)->mode_config.preferred_depth = 24; adev_to_drm(adev)->mode_config.prefer_shadow = 1; + adev_to_drm(adev)->mode_config.fb_modifiers_not_supported = true; + adev_to_drm(adev)->mode_config.fb_base = adev->gmc.aper_base; r = amdgpu_display_modeset_create_props(adev); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 39812daeb7c5..e8a994559b65 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -7828,6 +7828,9 @@ static int amdgpu_dm_plane_init(struct amdgpu_display_manager *dm, if (res) return res; + if (modifiers == NULL) + adev_to_drm(dm->adev)->mode_config.fb_modifiers_not_supported = true; + res = drm_universal_plane_init(adev_to_drm(dm->adev), plane, possible_crtcs, &dm_plane_funcs, formats, num_formats, modifiers, plane->type, NULL); diff --git a/drivers/gpu/drm/drm_framebuffer.c b/drivers/gpu/drm/drm_framebuffer.c index 07f5abc875e9..4562a8b86579 100644 --- a/drivers/gpu/drm/drm_framebuffer.c +++ b/drivers/gpu/drm/drm_framebuffer.c @@ -309,7 +309,7 @@ drm_internal_framebuffer_create(struct drm_device *dev, } if (r->flags & DRM_MODE_FB_MODIFIERS && - !dev->mode_config.allow_fb_modifiers) { + dev->mode_config.fb_modifiers_not_supported) { DRM_DEBUG_KMS("driver does not support fb modifiers\n"); return ERR_PTR(-EINVAL); } @@ -594,7 +594,7 @@ int drm_mode_getfb2_ioctl(struct drm_device *dev, r->pixel_format = fb->format->format; r->flags = 0; - if (dev->mode_config.allow_fb_modifiers) + if (!dev->mode_config.fb_modifiers_not_supported) r->flags |= DRM_MODE_FB_MODIFIERS; for (i = 0; i < ARRAY_SIZE(r->handles); i++) { @@ -607,7 +607,7 @@ int drm_mode_getfb2_ioctl(struct drm_device *dev, for (i = 0; i < fb->format->num_planes; i++) { r->pitches[i] = fb->pitches[i]; r->offsets[i] = fb->offsets[i]; - if (dev->mode_config.allow_fb_modifiers) + if (!dev->mode_config.fb_modifiers_not_supported) r->modifier[i] = fb->modifier; } diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c index 8b8744dcf691..51fcf1298023 100644 --- a/drivers/gpu/drm/drm_ioctl.c +++ b/drivers/gpu/drm/drm_ioctl.c @@ -297,7 +297,7 @@ static int drm_getcap(struct drm_device *dev, void *data, struct drm_file *file_ req->value = 64; break; case DRM_CAP_ADDFB2_MODIFIERS: - req->value = dev->mode_config.allow_fb_modifiers; + req->value = !dev->mode_config.fb_modifiers_not_supported; break; case DRM_CAP_CRTC_IN_VBLANK_EVENT: req->value = 1; diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c index 2b460835a438..2cd0932b3d68 100644 --- a/drivers/gpu/drm/nouveau/nouveau_display.c +++ b/drivers/gpu/drm/nouveau/nouveau_display.c @@ -708,10 +708,12 @@ nouveau_display_create(struct drm_device *dev) &disp->disp); if (ret == 0) { nouveau_display_create_properties(dev); - if (disp->disp.object.oclass < NV50_DISP) + if (disp->disp.object.oclass < NV50_DISP) { + dev->mode_config.fb_modifiers_not_supported = true; ret = nv04_display_create(dev); - else + } else { ret = nv50_display_create(dev); + } } } else { ret = 0; diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 573154268d43..b9a07677a71e 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -1596,6 +1596,8 @@ int radeon_modeset_init(struct radeon_device *rdev) rdev->ddev->mode_config.preferred_depth = 24; rdev->ddev->mode_config.prefer_shadow = 1; + rdev->ddev->mode_config.fb_modifiers_not_supported = true; + rdev->ddev->mode_config.fb_base = rdev->mc.aper_base; ret = radeon_modeset_create_props(rdev); diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h index 91ca575a78de..4a93dac91cf9 100644 --- a/include/drm/drm_mode_config.h +++ b/include/drm/drm_mode_config.h @@ -933,6 +933,16 @@ struct drm_mode_config { */ bool allow_fb_modifiers; + /** + * @fb_modifiers_not_supported: + * + * When this flag is set, the DRM device will not expose modifier + * support to userspace. This is only used by legacy drivers that infer + * the buffer layout through heuristics without using modifiers. New + * drivers shall not set fhis flag. + */ + bool fb_modifiers_not_supported; + /** * @normalize_zpos: * -- cgit From 8be576837b6e62b2ad0de2f9ba31cef618fa2891 Mon Sep 17 00:00:00 2001 From: Tomohito Esaki Date: Fri, 28 Jan 2022 15:08:35 +0900 Subject: drm: add support modifiers for drivers whose planes only support linear layout The LINEAR modifier is advertised as default if a driver doesn't specify modifiers. v2: - rebase to the latest master branch (5.16.0+) + "drm/plane: Make format_mod_supported truly optional" patch [1] [1] https://patchwork.freedesktop.org/patch/467940/?series=98255&rev=3 v3: - change the order as follows: 1. add fb_modifiers_not_supported flag 2. add default modifiers 3. remove allow_fb_modifiers flag v5: - change default_modifiers array from non-static to static - remove terminator in default_modifiers array - use ARRAY_SIZE to get the format_modifier_count - update sanity check in plane init func to use the fb_modifiers_not_supported - modify kernel docs Signed-off-by: Tomohito Esaki Reviewed-by: Andy Shevchenko Reviewed-by: Laurent Pinchart Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20220128060836.11216-3-etom@igel.co.jp --- drivers/gpu/drm/drm_plane.c | 23 +++++++++++++---------- include/drm/drm_plane.h | 3 +++ 2 files changed, 16 insertions(+), 10 deletions(-) diff --git a/drivers/gpu/drm/drm_plane.c b/drivers/gpu/drm/drm_plane.c index deeec60a3315..bf0daa8d9bbd 100644 --- a/drivers/gpu/drm/drm_plane.c +++ b/drivers/gpu/drm/drm_plane.c @@ -237,6 +237,9 @@ static int __drm_universal_plane_init(struct drm_device *dev, const char *name, va_list ap) { struct drm_mode_config *config = &dev->mode_config; + static const uint64_t default_modifiers[] = { + DRM_FORMAT_MOD_LINEAR, + }; unsigned int format_modifier_count = 0; int ret; @@ -277,16 +280,16 @@ static int __drm_universal_plane_init(struct drm_device *dev, while (*temp_modifiers++ != DRM_FORMAT_MOD_INVALID) format_modifier_count++; + } else { + if (!dev->mode_config.fb_modifiers_not_supported) { + format_modifiers = default_modifiers; + format_modifier_count = ARRAY_SIZE(default_modifiers); + } } /* autoset the cap and check for consistency across all planes */ - if (format_modifier_count) { - drm_WARN_ON(dev, !config->allow_fb_modifiers && - !list_empty(&config->plane_list)); - config->allow_fb_modifiers = true; - } else { - drm_WARN_ON(dev, config->allow_fb_modifiers); - } + drm_WARN_ON(dev, config->fb_modifiers_not_supported && + format_modifier_count); plane->modifier_count = format_modifier_count; plane->modifiers = kmalloc_array(format_modifier_count, @@ -341,7 +344,7 @@ static int __drm_universal_plane_init(struct drm_device *dev, drm_object_attach_property(&plane->base, config->prop_src_h, 0); } - if (config->allow_fb_modifiers) + if (format_modifier_count) create_in_format_blob(dev, plane); return 0; @@ -368,8 +371,8 @@ static int __drm_universal_plane_init(struct drm_device *dev, * drm_universal_plane_init() to let the DRM managed resource infrastructure * take care of cleanup and deallocation. * - * Drivers supporting modifiers must set @format_modifiers on all their planes, - * even those that only support DRM_FORMAT_MOD_LINEAR. + * Drivers that only support the DRM_FORMAT_MOD_LINEAR modifier support may set + * @format_modifiers to NULL. The plane will advertise the linear modifier. * * Returns: * Zero on success, error code on failure. diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h index 06759badf99f..2628c7cde2da 100644 --- a/include/drm/drm_plane.h +++ b/include/drm/drm_plane.h @@ -803,6 +803,9 @@ void *__drmm_universal_plane_alloc(struct drm_device *dev, * * The @drm_plane_funcs.destroy hook must be NULL. * + * Drivers that only support the DRM_FORMAT_MOD_LINEAR modifier support may set + * @format_modifiers to NULL. The plane will advertise the linear modifier. + * * Returns: * Pointer to new plane, or ERR_PTR on failure. */ -- cgit From 3d082157a24216ca084082ce421a37d14ecfcfad Mon Sep 17 00:00:00 2001 From: Tomohito Esaki Date: Fri, 28 Jan 2022 15:08:36 +0900 Subject: drm: remove allow_fb_modifiers The allow_fb_modifiers flag is unnecessary since it has been replaced with fb_modifiers_not_supported flag. v3: - change the order as follows: 1. add fb_modifiers_not_supported flag 2. add default modifiers 3. remove allow_fb_modifiers flag v5: - keep a sanity check in plane init func Signed-off-by: Tomohito Esaki Reviewed-by: Andy Shevchenko Reviewed-by: Laurent Pinchart Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20220128060836.11216-4-etom@igel.co.jp --- drivers/gpu/drm/selftests/test-drm_framebuffer.c | 1 - include/drm/drm_mode_config.h | 16 ---------------- 2 files changed, 17 deletions(-) diff --git a/drivers/gpu/drm/selftests/test-drm_framebuffer.c b/drivers/gpu/drm/selftests/test-drm_framebuffer.c index 61b44d3a6a61..f6d66285c5fc 100644 --- a/drivers/gpu/drm/selftests/test-drm_framebuffer.c +++ b/drivers/gpu/drm/selftests/test-drm_framebuffer.c @@ -323,7 +323,6 @@ static struct drm_device mock_drm_device = { .max_width = MAX_WIDTH, .min_height = MIN_HEIGHT, .max_height = MAX_HEIGHT, - .allow_fb_modifiers = true, .funcs = &mock_config_funcs, }, }; diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h index 4a93dac91cf9..6b5e01295348 100644 --- a/include/drm/drm_mode_config.h +++ b/include/drm/drm_mode_config.h @@ -917,22 +917,6 @@ struct drm_mode_config { */ bool async_page_flip; - /** - * @allow_fb_modifiers: - * - * Whether the driver supports fb modifiers in the ADDFB2.1 ioctl call. - * Note that drivers should not set this directly, it is automatically - * set in drm_universal_plane_init(). - * - * IMPORTANT: - * - * If this is set the driver must fill out the full implicit modifier - * information in their &drm_mode_config_funcs.fb_create hook for legacy - * userspace which does not set modifiers. Otherwise the GETFB2 ioctl is - * broken for modifier aware userspace. - */ - bool allow_fb_modifiers; - /** * @fb_modifiers_not_supported: * -- cgit From d80976d9ffd9d7f89a26134a299b236910477f3b Mon Sep 17 00:00:00 2001 From: Daniel Vetter Date: Tue, 30 Nov 2021 16:27:55 +0100 Subject: dma-resv: some doc polish for iterators MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Hammer it a bit more in that iterators can be restarted and when that matters, plus suggest to prefer the locked version whenver. Also delete the two leftover kerneldoc for static functions plus sprinkle some more links while at it. v2: Keep some comments (Christian) Reviewed-by: Christian König Signed-off-by: Daniel Vetter Cc: Sumit Semwal Cc: "Christian König" Cc: linux-media@vger.kernel.org Cc: linaro-mm-sig@lists.linaro.org Link: https://patchwork.freedesktop.org/patch/msgid/20211130152756.1388106-1-daniel.vetter@ffwll.ch --- drivers/dma-buf/dma-resv.c | 29 +++++++++++++++-------------- include/linux/dma-resv.h | 13 ++++++++++++- 2 files changed, 27 insertions(+), 15 deletions(-) diff --git a/drivers/dma-buf/dma-resv.c b/drivers/dma-buf/dma-resv.c index 6dd9a40b55d4..ee31f15d633a 100644 --- a/drivers/dma-buf/dma-resv.c +++ b/drivers/dma-buf/dma-resv.c @@ -323,12 +323,8 @@ void dma_resv_add_excl_fence(struct dma_resv *obj, struct dma_fence *fence) } EXPORT_SYMBOL(dma_resv_add_excl_fence); -/** - * dma_resv_iter_restart_unlocked - restart the unlocked iterator - * @cursor: The dma_resv_iter object to restart - * - * Restart the unlocked iteration by initializing the cursor object. - */ +/* Restart the iterator by initializing all the necessary fields, but not the + * relation to the dma_resv object. */ static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor) { cursor->seq = read_seqcount_begin(&cursor->obj->seq); @@ -344,14 +340,7 @@ static void dma_resv_iter_restart_unlocked(struct dma_resv_iter *cursor) cursor->is_restarted = true; } -/** - * dma_resv_iter_walk_unlocked - walk over fences in a dma_resv obj - * @cursor: cursor to record the current position - * - * Return all the fences in the dma_resv object which are not yet signaled. - * The returned fence has an extra local reference so will stay alive. - * If a concurrent modify is detected the whole iteration is started over again. - */ +/* Walk to the next not signaled fence and grab a reference to it */ static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor) { struct dma_resv *obj = cursor->obj; @@ -387,6 +376,12 @@ static void dma_resv_iter_walk_unlocked(struct dma_resv_iter *cursor) * dma_resv_iter_first_unlocked - first fence in an unlocked dma_resv obj. * @cursor: the cursor with the current position * + * Subsequent fences are iterated with dma_resv_iter_next_unlocked(). + * + * Beware that the iterator can be restarted. Code which accumulates statistics + * or similar needs to check for this with dma_resv_iter_is_restarted(). For + * this reason prefer the locked dma_resv_iter_first() whenver possible. + * * Returns the first fence from an unlocked dma_resv obj. */ struct dma_fence *dma_resv_iter_first_unlocked(struct dma_resv_iter *cursor) @@ -406,6 +401,10 @@ EXPORT_SYMBOL(dma_resv_iter_first_unlocked); * dma_resv_iter_next_unlocked - next fence in an unlocked dma_resv obj. * @cursor: the cursor with the current position * + * Beware that the iterator can be restarted. Code which accumulates statistics + * or similar needs to check for this with dma_resv_iter_is_restarted(). For + * this reason prefer the locked dma_resv_iter_next() whenver possible. + * * Returns the next fence from an unlocked dma_resv obj. */ struct dma_fence *dma_resv_iter_next_unlocked(struct dma_resv_iter *cursor) @@ -431,6 +430,8 @@ EXPORT_SYMBOL(dma_resv_iter_next_unlocked); * dma_resv_iter_first - first fence from a locked dma_resv object * @cursor: cursor to record the current position * + * Subsequent fences are iterated with dma_resv_iter_next_unlocked(). + * * Return the first fence in the dma_resv object while holding the * &dma_resv.lock. */ diff --git a/include/linux/dma-resv.h b/include/linux/dma-resv.h index a715df97b31a..afdfdfac729f 100644 --- a/include/linux/dma-resv.h +++ b/include/linux/dma-resv.h @@ -153,6 +153,13 @@ struct dma_resv { * struct dma_resv_iter - current position into the dma_resv fences * * Don't touch this directly in the driver, use the accessor function instead. + * + * IMPORTANT + * + * When using the lockless iterators like dma_resv_iter_next_unlocked() or + * dma_resv_for_each_fence_unlocked() beware that the iterator can be restarted. + * Code which accumulates statistics or similar needs to check for this with + * dma_resv_iter_is_restarted(). */ struct dma_resv_iter { /** @obj: The dma_resv object we iterate over */ @@ -243,7 +250,11 @@ static inline bool dma_resv_iter_is_restarted(struct dma_resv_iter *cursor) * &dma_resv.lock and using RCU instead. The cursor needs to be initialized * with dma_resv_iter_begin() and cleaned up with dma_resv_iter_end(). Inside * the iterator a reference to the dma_fence is held and the RCU lock dropped. - * When the dma_resv is modified the iteration starts over again. + * + * Beware that the iterator can be restarted when the struct dma_resv for + * @cursor is modified. Code which accumulates statistics or similar needs to + * check for this with dma_resv_iter_is_restarted(). For this reason prefer the + * lock iterator dma_resv_for_each_fence() whenever possible. */ #define dma_resv_for_each_fence_unlocked(cursor, fence) \ for (fence = dma_resv_iter_first_unlocked(cursor); \ -- cgit From f588a1bbfce781042196e68f8e200f08b3d9e8c4 Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Tue, 11 Jan 2022 14:00:26 -0600 Subject: drm/amd: Warn users about potential s0ix problems On some OEM setups users can configure the BIOS for S3 or S2idle. When configured to S3 users can still choose 's2idle' in the kernel by using `/sys/power/mem_sleep`. Before commit 6dc8265f9803 ("drm/amdgpu: always reset the asic in suspend (v2)"), the GPU would crash. Now when configured this way, the system should resume but will use more power. As such, adjust the `amdpu_acpi_is_s0ix function` to warn users about potential power consumption issues during their first attempt at suspending. Reported-by: Bjoren Dasse Link: https://gitlab.freedesktop.org/drm/amd/-/issues/1824 Reviewed-by: Alex Deucher Signed-off-by: Mario Limonciello Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 8 ++++++-- drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c | 24 +++++++++++++++++++----- 2 files changed, 25 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 845c92dc73f6..65d2799f5a87 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1407,12 +1407,10 @@ int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_sta int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev); void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps); -bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev); void amdgpu_acpi_detect(void); #else static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; } static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { } -static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; } static inline void amdgpu_acpi_detect(void) { } static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; } static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev, @@ -1421,6 +1419,12 @@ static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_state) { return 0; } #endif +#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND) +bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev); +#else +static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; } +#endif + int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, uint64_t addr, struct amdgpu_bo **bo, struct amdgpu_bo_va_mapping **mapping); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 4811b0faafd9..b19d40751802 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -1031,6 +1031,7 @@ void amdgpu_acpi_detect(void) } } +#if IS_ENABLED(CONFIG_SUSPEND) /** * amdgpu_acpi_is_s0ix_active * @@ -1040,11 +1041,24 @@ void amdgpu_acpi_detect(void) */ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { -#if IS_ENABLED(CONFIG_AMD_PMC) && IS_ENABLED(CONFIG_SUSPEND) - if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) { - if (adev->flags & AMD_IS_APU) - return pm_suspend_target_state == PM_SUSPEND_TO_IDLE; + if (!(adev->flags & AMD_IS_APU) || + (pm_suspend_target_state != PM_SUSPEND_TO_IDLE)) + return false; + + if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) { + dev_warn_once(adev->dev, + "Power consumption will be higher as BIOS has not been configured for suspend-to-idle.\n" + "To use suspend-to-idle change the sleep mode in BIOS setup.\n"); + return false; } -#endif + +#if !IS_ENABLED(CONFIG_AMD_PMC) + dev_warn_once(adev->dev, + "Power consumption will be higher as the kernel has not been compiled with CONFIG_AMD_PMC.\n"); return false; +#else + return true; +#endif /* CONFIG_AMD_PMC */ } + +#endif /* CONFIG_SUSPEND */ -- cgit From 29d650f7e3ab55283b89c9f5883d0c256ce478b5 Mon Sep 17 00:00:00 2001 From: "Darrick J. Wong" Date: Mon, 24 Jan 2022 15:48:31 -0800 Subject: xfs: reject crazy array sizes being fed to XFS_IOC_GETBMAP* Syzbot tripped over the following complaint from the kernel: WARNING: CPU: 2 PID: 15402 at mm/util.c:597 kvmalloc_node+0x11e/0x125 mm/util.c:597 While trying to run XFS_IOC_GETBMAP against the following structure: struct getbmap fubar = { .bmv_count = 0x22dae649, }; Obviously, this is a crazy huge value since the next thing that the ioctl would do is allocate 37GB of memory. This is enough to make kvmalloc mad, but isn't large enough to trip the validation functions. In other words, I'm fussing with checks that were **already sufficient** because that's easier than dealing with 644 internal bug reports. Yes, that's right, six hundred and forty-four. Signed-off-by: Darrick J. Wong Reviewed-by: Allison Henderson Reviewed-by: Catherine Hoang --- fs/xfs/xfs_ioctl.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c index 03a6198c97f6..2515fe8299e1 100644 --- a/fs/xfs/xfs_ioctl.c +++ b/fs/xfs/xfs_ioctl.c @@ -1464,7 +1464,7 @@ xfs_ioc_getbmap( if (bmx.bmv_count < 2) return -EINVAL; - if (bmx.bmv_count > ULONG_MAX / recsize) + if (bmx.bmv_count >= INT_MAX / recsize) return -ENOMEM; buf = kvcalloc(bmx.bmv_count, sizeof(*buf), GFP_KERNEL); -- cgit From 3d2504663c41104b4359a15f35670cfa82de1bbf Mon Sep 17 00:00:00 2001 From: Jedrzej Jagielski Date: Tue, 14 Dec 2021 10:08:22 +0000 Subject: i40e: Fix reset bw limit when DCB enabled with 1 TC There was an AQ error I40E_AQ_RC_EINVAL when trying to reset bw limit as part of bw allocation setup. This was caused by trying to reset bw limit with DCB enabled. Bw limit should not be reset when DCB is enabled. The code was relying on the pf->flags to check if DCB is enabled but if only 1 TC is available this flag will not be set even though DCB is enabled. Add a check for number of TC and if it is 1 don't try to reset bw limit even if pf->flags shows DCB as disabled. Fixes: fa38e30ac73f ("i40e: Fix for Tx timeouts when interface is brought up if DCB is enabled") Suggested-by: Alexander Lobakin # Flatten the condition Signed-off-by: Sylwester Dziedziuch Signed-off-by: Jedrzej Jagielski Reviewed-by: Alexander Lobakin Tested-by: Imam Hassan Reza Biswas Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/i40e/i40e_main.c | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index f70c478dafdb..5cb4dc69fe87 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -5372,7 +5372,15 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, /* There is no need to reset BW when mqprio mode is on. */ if (pf->flags & I40E_FLAG_TC_MQPRIO) return 0; - if (!vsi->mqprio_qopt.qopt.hw && !(pf->flags & I40E_FLAG_DCB_ENABLED)) { + + if (!vsi->mqprio_qopt.qopt.hw) { + if (pf->flags & I40E_FLAG_DCB_ENABLED) + goto skip_reset; + + if (IS_ENABLED(CONFIG_I40E_DCB) && + i40e_dcb_hw_get_num_tc(&pf->hw) == 1) + goto skip_reset; + ret = i40e_set_bw_limit(vsi, vsi->seid, 0); if (ret) dev_info(&pf->pdev->dev, @@ -5380,6 +5388,8 @@ static int i40e_vsi_configure_bw_alloc(struct i40e_vsi *vsi, u8 enabled_tc, vsi->seid); return ret; } + +skip_reset: memset(&bw_data, 0, sizeof(bw_data)); bw_data.tc_valid_bits = enabled_tc; for (i = 0; i < I40E_MAX_TRAFFIC_CLASS; i++) -- cgit From 0aed75fd30dacd31144188f7ddd5d571db7511c5 Mon Sep 17 00:00:00 2001 From: John Garry Date: Thu, 27 Jan 2022 21:12:50 +0800 Subject: scsi: pm8001: Fix warning for undescribed param in process_one_iomb() make W=1 complains of an undescribed function parameter: drivers/scsi/pm8001/pm80xx_hwi.c:3938: warning: Function parameter or member 'circularQ' not described in 'process_one_iomb' Fix it. Link: https://lore.kernel.org/r/1643289172-165636-2-git-send-email-john.garry@huawei.com Reported-by: Damien Le Moal Reviewed-by: Damien Le Moal Acked-by: Jack Wang Signed-off-by: John Garry Signed-off-by: Martin K. Petersen --- drivers/scsi/pm8001/pm80xx_hwi.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c index c350d2017aa4..b7ab643ef014 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.c +++ b/drivers/scsi/pm8001/pm80xx_hwi.c @@ -3905,6 +3905,7 @@ static int ssp_coalesced_comp_resp(struct pm8001_hba_info *pm8001_ha, /** * process_one_iomb - process one outbound Queue memory block * @pm8001_ha: our hba card information + * @circularQ: outbound circular queue * @piomb: IO message buffer */ static void process_one_iomb(struct pm8001_hba_info *pm8001_ha, -- cgit From 61f162aa4381845acbdc7f2be4dfb694d027c018 Mon Sep 17 00:00:00 2001 From: John Garry Date: Thu, 27 Jan 2022 21:12:51 +0800 Subject: scsi: pm8001: Fix use-after-free for aborted TMF sas_task Currently a use-after-free may occur if a TMF sas_task is aborted before we handle the IO completion in mpi_ssp_completion(). The abort occurs due to timeout. When the timeout occurs, the SAS_TASK_STATE_ABORTED flag is set and the sas_task is freed in pm8001_exec_internal_tmf_task(). However, if the I/O completion occurs later, the I/O completion still thinks that the sas_task is available. Fix this by clearing the ccb->task if the TMF times out - the I/O completion handler does nothing if this pointer is cleared. Link: https://lore.kernel.org/r/1643289172-165636-3-git-send-email-john.garry@huawei.com Reviewed-by: Damien Le Moal Acked-by: Jack Wang Signed-off-by: John Garry Signed-off-by: Martin K. Petersen --- drivers/scsi/pm8001/pm8001_sas.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/drivers/scsi/pm8001/pm8001_sas.c b/drivers/scsi/pm8001/pm8001_sas.c index 160ee8b228c9..32edda3e55c6 100644 --- a/drivers/scsi/pm8001/pm8001_sas.c +++ b/drivers/scsi/pm8001/pm8001_sas.c @@ -769,8 +769,13 @@ static int pm8001_exec_internal_tmf_task(struct domain_device *dev, res = -TMF_RESP_FUNC_FAILED; /* Even TMF timed out, return direct. */ if (task->task_state_flags & SAS_TASK_STATE_ABORTED) { + struct pm8001_ccb_info *ccb = task->lldd_task; + pm8001_dbg(pm8001_ha, FAIL, "TMF task[%x]timeout.\n", tmf->tmf); + + if (ccb) + ccb->task = NULL; goto ex_err; } -- cgit From df7abcaa1246e2537ab4016077b5443bb3c09378 Mon Sep 17 00:00:00 2001 From: John Garry Date: Thu, 27 Jan 2022 21:12:52 +0800 Subject: scsi: pm8001: Fix use-after-free for aborted SSP/STP sas_task Currently a use-after-free may occur if a sas_task is aborted by the upper layer before we handle the I/O completion in mpi_ssp_completion() or mpi_sata_completion(). In this case, the following are the two steps in handling those I/O completions: - Call complete() to inform the upper layer handler of completion of the I/O. - Release driver resources associated with the sas_task in pm8001_ccb_task_free() call. When complete() is called, the upper layer may free the sas_task. As such, we should not touch the associated sas_task afterwards, but we do so in the pm8001_ccb_task_free() call. Fix by swapping the complete() and pm8001_ccb_task_free() calls ordering. Link: https://lore.kernel.org/r/1643289172-165636-4-git-send-email-john.garry@huawei.com Reviewed-by: Damien Le Moal Acked-by: Jack Wang Signed-off-by: John Garry Signed-off-by: Martin K. Petersen --- drivers/scsi/pm8001/pm80xx_hwi.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/scsi/pm8001/pm80xx_hwi.c b/drivers/scsi/pm8001/pm80xx_hwi.c index b7ab643ef014..9d20f8009b89 100644 --- a/drivers/scsi/pm8001/pm80xx_hwi.c +++ b/drivers/scsi/pm8001/pm80xx_hwi.c @@ -2185,9 +2185,9 @@ mpi_ssp_completion(struct pm8001_hba_info *pm8001_ha, void *piomb) pm8001_dbg(pm8001_ha, FAIL, "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n", t, status, ts->resp, ts->stat); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); if (t->slow_task) complete(&t->slow_task->completion); - pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); } else { spin_unlock_irqrestore(&t->task_state_lock, flags); pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); @@ -2794,9 +2794,9 @@ mpi_sata_completion(struct pm8001_hba_info *pm8001_ha, pm8001_dbg(pm8001_ha, FAIL, "task 0x%p done with io_status 0x%x resp 0x%x stat 0x%x but aborted by upper layer!\n", t, status, ts->resp, ts->stat); + pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); if (t->slow_task) complete(&t->slow_task->completion); - pm8001_ccb_task_free(pm8001_ha, t, ccb, tag); } else { spin_unlock_irqrestore(&t->task_state_lock, flags); spin_unlock_irqrestore(&circularQ->oq_lock, -- cgit From 1b777d4d9e383d2744fc9b3a09af6ec1893c8b1a Mon Sep 17 00:00:00 2001 From: Nick Lopez Date: Sat, 22 Jan 2022 01:19:06 -0700 Subject: drm/nouveau: fix off by one in BIOS boundary checking Bounds checking when parsing init scripts embedded in the BIOS reject access to the last byte. This causes driver initialization to fail on Apple eMac's with GeForce 2 MX GPUs, leaving the system with no working console. This is probably only seen on OpenFirmware machines like PowerPC Macs because the BIOS image provided by OF is only the used parts of the ROM, not a power-of-two blocks read from PCI directly so PCs always have empty bytes at the end that are never accessed. Signed-off-by: Nick Lopez Fixes: 4d4e9907ff572 ("drm/nouveau/bios: guard against out-of-bounds accesses to image") Cc: # v4.10+ Reviewed-by: Ilia Mirkin Reviewed-by: Karol Herbst Signed-off-by: Karol Herbst Link: https://patchwork.freedesktop.org/patch/msgid/20220122081906.2633061-1-github@glowingmonkey.org --- drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c index d0f52d59fc2f..64e423dddd9e 100644 --- a/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c +++ b/drivers/gpu/drm/nouveau/nvkm/subdev/bios/base.c @@ -38,7 +38,7 @@ nvbios_addr(struct nvkm_bios *bios, u32 *addr, u8 size) *addr += bios->imaged_addr; } - if (unlikely(*addr + size >= bios->size)) { + if (unlikely(*addr + size > bios->size)) { nvkm_error(&bios->subdev, "OOB %d %08x %08x\n", size, p, *addr); return false; } -- cgit From c763ec4c10f78678d6d4415646237f07109a5a5f Mon Sep 17 00:00:00 2001 From: John Garry Date: Mon, 31 Jan 2022 19:13:27 +0800 Subject: scsi: hisi_sas: Fix setting of hisi_sas_slot.is_internal The hisi_sas_slot.is_internal member is not set properly for ATA commands which the driver sends directly. A TMF struct pointer is normally used as a test to set this, but it is NULL for those commands. It's not ideal, but pass an empty TMF struct to set that member properly. Link: https://lore.kernel.org/r/1643627607-138785-1-git-send-email-john.garry@huawei.com Fixes: dc313f6b125b ("scsi: hisi_sas: Factor out task prep and delivery code") Reported-by: Xiang Chen Signed-off-by: John Garry Signed-off-by: Martin K. Petersen --- drivers/scsi/hisi_sas/hisi_sas_main.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/drivers/scsi/hisi_sas/hisi_sas_main.c b/drivers/scsi/hisi_sas/hisi_sas_main.c index 2f53a2ee024a..ebf5ec38891b 100644 --- a/drivers/scsi/hisi_sas/hisi_sas_main.c +++ b/drivers/scsi/hisi_sas/hisi_sas_main.c @@ -400,8 +400,7 @@ void hisi_sas_task_deliver(struct hisi_hba *hisi_hba, struct hisi_sas_slot *slot, struct hisi_sas_dq *dq, struct hisi_sas_device *sas_dev, - struct hisi_sas_internal_abort *abort, - struct hisi_sas_tmf_task *tmf) + struct hisi_sas_internal_abort *abort) { struct hisi_sas_cmd_hdr *cmd_hdr_base; int dlvry_queue_slot, dlvry_queue; @@ -427,8 +426,6 @@ void hisi_sas_task_deliver(struct hisi_hba *hisi_hba, cmd_hdr_base = hisi_hba->cmd_hdr[dlvry_queue]; slot->cmd_hdr = &cmd_hdr_base[dlvry_queue_slot]; - slot->tmf = tmf; - slot->is_internal = tmf; task->lldd_task = slot; memset(slot->cmd_hdr, 0, sizeof(struct hisi_sas_cmd_hdr)); @@ -587,7 +584,7 @@ static int hisi_sas_task_exec(struct sas_task *task, gfp_t gfp_flags, slot->is_internal = tmf; /* protect task_prep and start_delivery sequence */ - hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev, NULL, tmf); + hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev, NULL); return 0; @@ -1380,12 +1377,13 @@ static int hisi_sas_softreset_ata_disk(struct domain_device *device) struct hisi_hba *hisi_hba = dev_to_hisi_hba(device); struct device *dev = hisi_hba->dev; int s = sizeof(struct host_to_dev_fis); + struct hisi_sas_tmf_task tmf = {}; ata_for_each_link(link, ap, EDGE) { int pmp = sata_srst_pmp(link); hisi_sas_fill_ata_reset_cmd(link->device, 1, pmp, fis); - rc = hisi_sas_exec_internal_tmf_task(device, fis, s, NULL); + rc = hisi_sas_exec_internal_tmf_task(device, fis, s, &tmf); if (rc != TMF_RESP_FUNC_COMPLETE) break; } @@ -1396,7 +1394,7 @@ static int hisi_sas_softreset_ata_disk(struct domain_device *device) hisi_sas_fill_ata_reset_cmd(link->device, 0, pmp, fis); rc = hisi_sas_exec_internal_tmf_task(device, fis, - s, NULL); + s, &tmf); if (rc != TMF_RESP_FUNC_COMPLETE) dev_err(dev, "ata disk %016llx de-reset failed\n", SAS_ADDR(device->sas_addr)); @@ -2067,7 +2065,7 @@ hisi_sas_internal_abort_task_exec(struct hisi_hba *hisi_hba, int device_id, slot->port = port; slot->is_internal = true; - hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev, abort, NULL); + hisi_sas_task_deliver(hisi_hba, slot, dq, sas_dev, abort); return 0; -- cgit From 6533e558c6505e94c3e0ed4281ed5e31ec985f4d Mon Sep 17 00:00:00 2001 From: Karen Sornek Date: Wed, 12 Jan 2022 10:19:47 +0100 Subject: i40e: Fix reset path while removing the driver Fix the crash in kernel while dereferencing the NULL pointer, when the driver is unloaded and simultaneously the VSI rings are being stopped. The hardware requires 50msec in order to finish RX queues disable. For this purpose the driver spins in mdelay function for the operation to be completed. For example changing number of queues which requires reset would fail in the following call stack: 1) i40e_prep_for_reset 2) i40e_pf_quiesce_all_vsi 3) i40e_quiesce_vsi 4) i40e_vsi_close 5) i40e_down 6) i40e_vsi_stop_rings 7) i40e_vsi_control_rx -> disable requires the delay of 50msecs 8) continue back in i40e_down function where i40e_clean_tx_ring(vsi->tx_rings[i]) is going to crash When the driver was spinning vsi_release called i40e_vsi_free_arrays where the vsi->tx_rings resources were freed and the pointer was set to NULL. Fixes: 5b6d4a7f20b0 ("i40e: Fix crash during removing i40e driver") Signed-off-by: Slawomir Laba Signed-off-by: Sylwester Dziedziuch Signed-off-by: Karen Sornek Tested-by: Gurucharan G Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/i40e/i40e.h | 1 + drivers/net/ethernet/intel/i40e/i40e_main.c | 19 ++++++++++++++++++- 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h index 2e02cc68cd3f..80c5cecaf2b5 100644 --- a/drivers/net/ethernet/intel/i40e/i40e.h +++ b/drivers/net/ethernet/intel/i40e/i40e.h @@ -144,6 +144,7 @@ enum i40e_state_t { __I40E_VIRTCHNL_OP_PENDING, __I40E_RECOVERY_MODE, __I40E_VF_RESETS_DISABLED, /* disable resets during i40e_remove */ + __I40E_IN_REMOVE, __I40E_VFS_RELEASING, /* This must be last as it determines the size of the BITMAP */ __I40E_STATE_SIZE__, diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c index 5cb4dc69fe87..0c4b7dfb3b35 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_main.c +++ b/drivers/net/ethernet/intel/i40e/i40e_main.c @@ -10863,6 +10863,9 @@ static void i40e_reset_and_rebuild(struct i40e_pf *pf, bool reinit, bool lock_acquired) { int ret; + + if (test_bit(__I40E_IN_REMOVE, pf->state)) + return; /* Now we wait for GRST to settle out. * We don't have to delete the VEBs or VSIs from the hw switch * because the reset will make them disappear. @@ -12222,6 +12225,8 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) vsi->req_queue_pairs = queue_count; i40e_prep_for_reset(pf); + if (test_bit(__I40E_IN_REMOVE, pf->state)) + return pf->alloc_rss_size; pf->alloc_rss_size = new_rss_size; @@ -13048,6 +13053,10 @@ static int i40e_xdp_setup(struct i40e_vsi *vsi, struct bpf_prog *prog, if (need_reset) i40e_prep_for_reset(pf); + /* VSI shall be deleted in a moment, just return EINVAL */ + if (test_bit(__I40E_IN_REMOVE, pf->state)) + return -EINVAL; + old_prog = xchg(&vsi->xdp_prog, prog); if (need_reset) { @@ -15938,8 +15947,13 @@ static void i40e_remove(struct pci_dev *pdev) i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0); i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0); - while (test_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) + /* Grab __I40E_RESET_RECOVERY_PENDING and set __I40E_IN_REMOVE + * flags, once they are set, i40e_rebuild should not be called as + * i40e_prep_for_reset always returns early. + */ + while (test_and_set_bit(__I40E_RESET_RECOVERY_PENDING, pf->state)) usleep_range(1000, 2000); + set_bit(__I40E_IN_REMOVE, pf->state); if (pf->flags & I40E_FLAG_SRIOV_ENABLED) { set_bit(__I40E_VF_RESETS_DISABLED, pf->state); @@ -16138,6 +16152,9 @@ static void i40e_pci_error_reset_done(struct pci_dev *pdev) { struct i40e_pf *pf = pci_get_drvdata(pdev); + if (test_bit(__I40E_IN_REMOVE, pf->state)) + return; + i40e_reset_and_rebuild(pf, false, false); } -- cgit From 3ec5586b4699cfb75cdfa09425e11d121db40773 Mon Sep 17 00:00:00 2001 From: Evan Quan Date: Mon, 24 Jan 2022 13:40:35 +0800 Subject: drm/amd/pm: correct the MGpuFanBoost support for Beige Goby The existing way cannot handle Beige Goby well as a different PPTable data structure(PPTable_beige_goby_t instead of PPTable_t) is used there. Signed-off-by: Evan Quan Acked-by: Alex Deucher Signed-off-by: Alex Deucher Cc: stable@vger.kernel.org --- drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c index 777f717c37ae..a4207293158c 100644 --- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c +++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c @@ -3696,14 +3696,14 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu, static int sienna_cichlid_enable_mgpu_fan_boost(struct smu_context *smu) { - struct smu_table_context *table_context = &smu->smu_table; - PPTable_t *smc_pptable = table_context->driver_pptable; + uint16_t *mgpu_fan_boost_limit_rpm; + GET_PPTABLE_MEMBER(MGpuFanBoostLimitRpm, &mgpu_fan_boost_limit_rpm); /* * Skip the MGpuFanBoost setting for those ASICs * which do not support it */ - if (!smc_pptable->MGpuFanBoostLimitRpm) + if (*mgpu_fan_boost_limit_rpm == 0) return 0; return smu_cmn_send_smc_msg_with_param(smu, -- cgit From a6ed2035878e5ad2e43ed175d8812ac9399d6c40 Mon Sep 17 00:00:00 2001 From: Mario Limonciello Date: Tue, 11 Jan 2022 14:00:26 -0600 Subject: drm/amd: Warn users about potential s0ix problems On some OEM setups users can configure the BIOS for S3 or S2idle. When configured to S3 users can still choose 's2idle' in the kernel by using `/sys/power/mem_sleep`. Before commit 6dc8265f9803 ("drm/amdgpu: always reset the asic in suspend (v2)"), the GPU would crash. Now when configured this way, the system should resume but will use more power. As such, adjust the `amdpu_acpi_is_s0ix function` to warn users about potential power consumption issues during their first attempt at suspending. Reported-by: Bjoren Dasse Link: https://gitlab.freedesktop.org/drm/amd/-/issues/1824 Reviewed-by: Alex Deucher Signed-off-by: Mario Limonciello Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 8 ++++++-- drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c | 24 +++++++++++++++++++----- 2 files changed, 25 insertions(+), 7 deletions(-) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index d8b854fcbffa..5c466d8972f5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -1408,12 +1408,10 @@ int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_sta int amdgpu_acpi_pcie_notify_device_ready(struct amdgpu_device *adev); void amdgpu_acpi_get_backlight_caps(struct amdgpu_dm_backlight_caps *caps); -bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev); void amdgpu_acpi_detect(void); #else static inline int amdgpu_acpi_init(struct amdgpu_device *adev) { return 0; } static inline void amdgpu_acpi_fini(struct amdgpu_device *adev) { } -static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; } static inline void amdgpu_acpi_detect(void) { } static inline bool amdgpu_acpi_is_power_shift_control_supported(void) { return false; } static inline int amdgpu_acpi_power_shift_control(struct amdgpu_device *adev, @@ -1422,6 +1420,12 @@ static inline int amdgpu_acpi_smart_shift_update(struct drm_device *dev, enum amdgpu_ss ss_state) { return 0; } #endif +#if defined(CONFIG_ACPI) && defined(CONFIG_SUSPEND) +bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev); +#else +static inline bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { return false; } +#endif + int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, uint64_t addr, struct amdgpu_bo **bo, struct amdgpu_bo_va_mapping **mapping); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c index 4811b0faafd9..b19d40751802 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c @@ -1031,6 +1031,7 @@ void amdgpu_acpi_detect(void) } } +#if IS_ENABLED(CONFIG_SUSPEND) /** * amdgpu_acpi_is_s0ix_active * @@ -1040,11 +1041,24 @@ void amdgpu_acpi_detect(void) */ bool amdgpu_acpi_is_s0ix_active(struct amdgpu_device *adev) { -#if IS_ENABLED(CONFIG_AMD_PMC) && IS_ENABLED(CONFIG_SUSPEND) - if (acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0) { - if (adev->flags & AMD_IS_APU) - return pm_suspend_target_state == PM_SUSPEND_TO_IDLE; + if (!(adev->flags & AMD_IS_APU) || + (pm_suspend_target_state != PM_SUSPEND_TO_IDLE)) + return false; + + if (!(acpi_gbl_FADT.flags & ACPI_FADT_LOW_POWER_S0)) { + dev_warn_once(adev->dev, + "Power consumption will be higher as BIOS has not been configured for suspend-to-idle.\n" + "To use suspend-to-idle change the sleep mode in BIOS setup.\n"); + return false; } -#endif + +#if !IS_ENABLED(CONFIG_AMD_PMC) + dev_warn_once(adev->dev, + "Power consumption will be higher as the kernel has not been compiled with CONFIG_AMD_PMC.\n"); return false; +#else + return true; +#endif /* CONFIG_AMD_PMC */ } + +#endif /* CONFIG_SUSPEND */ -- cgit From 4223f86512877b04c932e7203648b37eec931731 Mon Sep 17 00:00:00 2001 From: Arınç ÜNAL Date: Sat, 29 Jan 2022 09:27:04 +0300 Subject: net: dsa: mt7530: make NET_DSA_MT7530 select MEDIATEK_GE_PHY MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make MediaTek MT753x DSA driver enable MediaTek Gigabit PHYs driver to properly control MT7530 and MT7531 switch PHYs. A noticeable change is that the behaviour of switchport interfaces going up-down-up-down is no longer there. Fixes: b8f126a8d543 ("net-next: dsa: add dsa support for Mediatek MT7530 switch") Signed-off-by: Arınç ÜNAL Reviewed-by: Andrew Lunn Reviewed-by: Florian Fainelli Link: https://lore.kernel.org/r/20220129062703.595-1-arinc.unal@arinc9.com Signed-off-by: Jakub Kicinski --- drivers/net/dsa/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/net/dsa/Kconfig b/drivers/net/dsa/Kconfig index 7b1457a6e327..c0c91440340a 100644 --- a/drivers/net/dsa/Kconfig +++ b/drivers/net/dsa/Kconfig @@ -36,6 +36,7 @@ config NET_DSA_LANTIQ_GSWIP config NET_DSA_MT7530 tristate "MediaTek MT753x and MT7621 Ethernet switch support" select NET_DSA_TAG_MTK + select MEDIATEK_GE_PHY help This enables support for the MediaTek MT7530, MT7531, and MT7621 Ethernet switch chips. -- cgit From 7af037c39b600bac2c716dd1228e8ddbe149573f Mon Sep 17 00:00:00 2001 From: Camel Guo Date: Mon, 31 Jan 2022 09:38:40 +0100 Subject: net: stmmac: dump gmac4 DMA registers correctly Unlike gmac100, gmac1000, gmac4 has 27 DMA registers and they are located at DMA_CHAN_BASE_ADDR (0x1100). In order for ethtool to dump gmac4 DMA registers correctly, this commit checks if a net_device has gmac4 and uses different logic to dump its DMA registers. This fixes the following KASAN warning, which can normally be triggered by a command similar like "ethtool -d eth0": BUG: KASAN: vmalloc-out-of-bounds in dwmac4_dump_dma_regs+0x6d4/0xb30 Write of size 4 at addr ffffffc010177100 by task ethtool/1839 kasan_report+0x200/0x21c __asan_report_store4_noabort+0x34/0x60 dwmac4_dump_dma_regs+0x6d4/0xb30 stmmac_ethtool_gregs+0x110/0x204 ethtool_get_regs+0x200/0x4b0 dev_ethtool+0x1dac/0x3800 dev_ioctl+0x7c0/0xb50 sock_ioctl+0x298/0x6c4 ... Fixes: fbf68229ffe7 ("net: stmmac: unify registers dumps methods") Signed-off-by: Camel Guo Link: https://lore.kernel.org/r/20220131083841.3346801-1-camel.guo@axis.com Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h | 1 + drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c | 19 +++++++++++++++++-- 2 files changed, 18 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h index 1914ad698cab..acd70b9a3173 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac_dma.h @@ -150,6 +150,7 @@ #define NUM_DWMAC100_DMA_REGS 9 #define NUM_DWMAC1000_DMA_REGS 23 +#define NUM_DWMAC4_DMA_REGS 27 void dwmac_enable_dma_transmission(void __iomem *ioaddr); void dwmac_enable_dma_irq(void __iomem *ioaddr, u32 chan, bool rx, bool tx); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c index 164dff5ec32e..abfb3cd5958d 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c @@ -21,10 +21,18 @@ #include "dwxgmac2.h" #define REG_SPACE_SIZE 0x1060 +#define GMAC4_REG_SPACE_SIZE 0x116C #define MAC100_ETHTOOL_NAME "st_mac100" #define GMAC_ETHTOOL_NAME "st_gmac" #define XGMAC_ETHTOOL_NAME "st_xgmac" +/* Same as DMA_CHAN_BASE_ADDR defined in dwmac4_dma.h + * + * It is here because dwmac_dma.h and dwmac4_dam.h can not be included at the + * same time due to the conflicting macro names. + */ +#define GMAC4_DMA_CHAN_BASE_ADDR 0x00001100 + #define ETHTOOL_DMA_OFFSET 55 struct stmmac_stats { @@ -434,6 +442,8 @@ static int stmmac_ethtool_get_regs_len(struct net_device *dev) if (priv->plat->has_xgmac) return XGMAC_REGSIZE * 4; + else if (priv->plat->has_gmac4) + return GMAC4_REG_SPACE_SIZE; return REG_SPACE_SIZE; } @@ -446,8 +456,13 @@ static void stmmac_ethtool_gregs(struct net_device *dev, stmmac_dump_mac_regs(priv, priv->hw, reg_space); stmmac_dump_dma_regs(priv, priv->ioaddr, reg_space); - if (!priv->plat->has_xgmac) { - /* Copy DMA registers to where ethtool expects them */ + /* Copy DMA registers to where ethtool expects them */ + if (priv->plat->has_gmac4) { + /* GMAC4 dumps its DMA registers at its DMA_CHAN_BASE_ADDR */ + memcpy(®_space[ETHTOOL_DMA_OFFSET], + ®_space[GMAC4_DMA_CHAN_BASE_ADDR / 4], + NUM_DWMAC4_DMA_REGS * 4); + } else if (!priv->plat->has_xgmac) { memcpy(®_space[ETHTOOL_DMA_OFFSET], ®_space[DMA_BUS_MODE / 4], NUM_DWMAC1000_DMA_REGS * 4); -- cgit From 9cef24c8b76c1f6effe499d2f131807c90f7ce9a Mon Sep 17 00:00:00 2001 From: Lior Nahmanson Date: Sun, 30 Jan 2022 13:29:01 +0200 Subject: net: macsec: Fix offload support for NETDEV_UNREGISTER event Current macsec netdev notify handler handles NETDEV_UNREGISTER event by releasing relevant SW resources only, this causes resources leak in case of macsec HW offload, as the underlay driver was not notified to clean it's macsec offload resources. Fix by calling the underlay driver to clean it's relevant resources by moving offload handling from macsec_dellink() to macsec_common_dellink() when handling NETDEV_UNREGISTER event. Fixes: 3cf3227a21d1 ("net: macsec: hardware offloading infrastructure") Signed-off-by: Lior Nahmanson Reviewed-by: Raed Salem Signed-off-by: Raed Salem Reviewed-by: Antoine Tenart Link: https://lore.kernel.org/r/1643542141-28956-1-git-send-email-raeds@nvidia.com Signed-off-by: Jakub Kicinski --- drivers/net/macsec.c | 24 ++++++++++++------------ 1 file changed, 12 insertions(+), 12 deletions(-) diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 16aa3a478e9e..33ff33c05aab 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -3870,6 +3870,18 @@ static void macsec_common_dellink(struct net_device *dev, struct list_head *head struct macsec_dev *macsec = macsec_priv(dev); struct net_device *real_dev = macsec->real_dev; + /* If h/w offloading is available, propagate to the device */ + if (macsec_is_offloaded(macsec)) { + const struct macsec_ops *ops; + struct macsec_context ctx; + + ops = macsec_get_ops(netdev_priv(dev), &ctx); + if (ops) { + ctx.secy = &macsec->secy; + macsec_offload(ops->mdo_del_secy, &ctx); + } + } + unregister_netdevice_queue(dev, head); list_del_rcu(&macsec->secys); macsec_del_dev(macsec); @@ -3884,18 +3896,6 @@ static void macsec_dellink(struct net_device *dev, struct list_head *head) struct net_device *real_dev = macsec->real_dev; struct macsec_rxh_data *rxd = macsec_data_rtnl(real_dev); - /* If h/w offloading is available, propagate to the device */ - if (macsec_is_offloaded(macsec)) { - const struct macsec_ops *ops; - struct macsec_context ctx; - - ops = macsec_get_ops(netdev_priv(dev), &ctx); - if (ops) { - ctx.secy = &macsec->secy; - macsec_offload(ops->mdo_del_secy, &ctx); - } - } - macsec_common_dellink(dev, head); if (list_empty(&rxd->secys)) { -- cgit From ff4865b3c8cd746ef72f59bdd485848b4cebd43d Mon Sep 17 00:00:00 2001 From: "Rafael J. Wysocki" Date: Wed, 26 Jan 2022 20:48:49 +0100 Subject: ALSA: Replace acpi_bus_get_device() Replace acpi_bus_get_device() that is going to be dropped with acpi_fetch_acpi_dev(). No intentional functional impact. Signed-off-by: Rafael J. Wysocki Link: https://lore.kernel.org/r/2828205.e9J7NaK4W3@kreacher Signed-off-by: Takashi Iwai --- sound/hda/intel-sdw-acpi.c | 7 +++---- sound/soc/soc-acpi.c | 7 ++----- 2 files changed, 5 insertions(+), 9 deletions(-) diff --git a/sound/hda/intel-sdw-acpi.c b/sound/hda/intel-sdw-acpi.c index b7758dbe2371..5cb92f7ccbca 100644 --- a/sound/hda/intel-sdw-acpi.c +++ b/sound/hda/intel-sdw-acpi.c @@ -50,11 +50,11 @@ static bool is_link_enabled(struct fwnode_handle *fw_node, int i) static int sdw_intel_scan_controller(struct sdw_intel_acpi_info *info) { - struct acpi_device *adev; + struct acpi_device *adev = acpi_fetch_acpi_dev(info->handle); int ret, i; u8 count; - if (acpi_bus_get_device(info->handle, &adev)) + if (!adev) return -EINVAL; /* Found controller, find links supported */ @@ -119,7 +119,6 @@ static acpi_status sdw_intel_acpi_cb(acpi_handle handle, u32 level, void *cdata, void **return_value) { struct sdw_intel_acpi_info *info = cdata; - struct acpi_device *adev; acpi_status status; u64 adr; @@ -127,7 +126,7 @@ static acpi_status sdw_intel_acpi_cb(acpi_handle handle, u32 level, if (ACPI_FAILURE(status)) return AE_OK; /* keep going */ - if (acpi_bus_get_device(handle, &adev)) { + if (!acpi_fetch_acpi_dev(handle)) { pr_err("%s: Couldn't find ACPI handle\n", __func__); return AE_NOT_FOUND; } diff --git a/sound/soc/soc-acpi.c b/sound/soc/soc-acpi.c index cbd7ea48837b..142476f1396f 100644 --- a/sound/soc/soc-acpi.c +++ b/sound/soc/soc-acpi.c @@ -55,16 +55,13 @@ EXPORT_SYMBOL_GPL(snd_soc_acpi_find_machine); static acpi_status snd_soc_acpi_find_package(acpi_handle handle, u32 level, void *context, void **ret) { - struct acpi_device *adev; + struct acpi_device *adev = acpi_fetch_acpi_dev(handle); acpi_status status; struct snd_soc_acpi_package_context *pkg_ctx = context; pkg_ctx->data_valid = false; - if (acpi_bus_get_device(handle, &adev)) - return AE_OK; - - if (adev->status.present && adev->status.functional) { + if (adev && adev->status.present && adev->status.functional) { struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; union acpi_object *myobj = NULL; -- cgit From 4ee02e20893d2f9e951c7888f2284fa608ddaa35 Mon Sep 17 00:00:00 2001 From: Jonas Hahnfeld Date: Mon, 31 Jan 2022 19:35:16 +0100 Subject: ALSA: usb-audio: Correct quirk for VF0770 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This device provides both audio and video. The original quirk added in commit 48827e1d6af5 ("ALSA: usb-audio: Add quirk for VF0770") used USB_DEVICE to match the vendor and product ID. Depending on module order, if snd-usb-audio was asked first, it would match the entire device and uvcvideo wouldn't get to see it. Change the matching to USB_AUDIO_DEVICE to restore uvcvideo matching in all cases. Fixes: 48827e1d6af5 ("ALSA: usb-audio: Add quirk for VF0770") Reported-by: Jukka Heikintalo Tested-by: Jukka Heikintalo Reported-by: Paweł Susicki Tested-by: Paweł Susicki Cc: # 5.4, 5.10, 5.14, 5.15 Signed-off-by: Jonas Hahnfeld Link: https://lore.kernel.org/r/20220131183516.61191-1-hahnjo@hahnjo.de Signed-off-by: Takashi Iwai --- sound/usb/quirks-table.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index b1522e43173e..0ea39565e623 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -84,7 +84,7 @@ * combination. */ { - USB_DEVICE(0x041e, 0x4095), + USB_AUDIO_DEVICE(0x041e, 0x4095), .driver_info = (unsigned long) &(const struct snd_usb_audio_quirk) { .ifnum = QUIRK_ANY_INTERFACE, .type = QUIRK_COMPOSITE, -- cgit From 50317b636e7184d15126e2dfc83db0963a38d31e Mon Sep 17 00:00:00 2001 From: Thomas Bogendoerfer Date: Mon, 31 Jan 2022 11:07:02 +0100 Subject: MIPS: octeon: Fix missed PTR->PTR_WD conversion Fixes: fa62f39dc7e2 ("MIPS: Fix build error due to PTR used in more places") Signed-off-by: Thomas Bogendoerfer --- arch/mips/cavium-octeon/octeon-memcpy.S | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/mips/cavium-octeon/octeon-memcpy.S b/arch/mips/cavium-octeon/octeon-memcpy.S index 0a515cde1c18..25860fba6218 100644 --- a/arch/mips/cavium-octeon/octeon-memcpy.S +++ b/arch/mips/cavium-octeon/octeon-memcpy.S @@ -74,7 +74,7 @@ #define EXC(inst_reg,addr,handler) \ 9: inst_reg, addr; \ .section __ex_table,"a"; \ - PTR 9b, handler; \ + PTR_WD 9b, handler; \ .previous /* -- cgit From 2161ba070999a709f975910b6b9ad6b51cd6f120 Mon Sep 17 00:00:00 2001 From: Randy Dunlap Date: Sat, 29 Jan 2022 12:58:19 -0800 Subject: MIPS: KVM: fix vz.c kernel-doc notation Fix all kernel-doc warnings in mips/kvm/vz.c as reported by the kernel test robot: arch/mips/kvm/vz.c:471: warning: Function parameter or member 'out_compare' not described in '_kvm_vz_save_htimer' arch/mips/kvm/vz.c:471: warning: Function parameter or member 'out_cause' not described in '_kvm_vz_save_htimer' arch/mips/kvm/vz.c:471: warning: Excess function parameter 'compare' description in '_kvm_vz_save_htimer' arch/mips/kvm/vz.c:471: warning: Excess function parameter 'cause' description in '_kvm_vz_save_htimer' arch/mips/kvm/vz.c:1551: warning: No description found for return value of 'kvm_trap_vz_handle_cop_unusable' arch/mips/kvm/vz.c:1552: warning: expecting prototype for kvm_trap_vz_handle_cop_unusuable(). Prototype was for kvm_trap_vz_handle_cop_unusable() instead arch/mips/kvm/vz.c:1597: warning: No description found for return value of 'kvm_trap_vz_handle_msa_disabled' Fixes: c992a4f6a9b0 ("KVM: MIPS: Implement VZ support") Fixes: f4474d50c7d4 ("KVM: MIPS/VZ: Support hardware guest timer") Signed-off-by: Randy Dunlap Reported-by: kernel test robot Cc: Thomas Bogendoerfer Cc: linux-mips@vger.kernel.org Cc: Huacai Chen Cc: Aleksandar Markovic Cc: James Hogan Cc: kvm@vger.kernel.org Signed-off-by: Thomas Bogendoerfer --- arch/mips/kvm/vz.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/arch/mips/kvm/vz.c b/arch/mips/kvm/vz.c index 4adca5abbc72..c706f5890a05 100644 --- a/arch/mips/kvm/vz.c +++ b/arch/mips/kvm/vz.c @@ -458,8 +458,8 @@ void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu) /** * _kvm_vz_save_htimer() - Switch to software emulation of guest timer. * @vcpu: Virtual CPU. - * @compare: Pointer to write compare value to. - * @cause: Pointer to write cause value to. + * @out_compare: Pointer to write compare value to. + * @out_cause: Pointer to write cause value to. * * Save VZ guest timer state and switch to software emulation of guest CP0 * timer. The hard timer must already be in use, so preemption should be @@ -1541,11 +1541,14 @@ static int kvm_trap_vz_handle_guest_exit(struct kvm_vcpu *vcpu) } /** - * kvm_trap_vz_handle_cop_unusuable() - Guest used unusable coprocessor. + * kvm_trap_vz_handle_cop_unusable() - Guest used unusable coprocessor. * @vcpu: Virtual CPU context. * * Handle when the guest attempts to use a coprocessor which hasn't been allowed * by the root context. + * + * Return: value indicating whether to resume the host or the guest + * (RESUME_HOST or RESUME_GUEST) */ static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu) { @@ -1592,6 +1595,9 @@ static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu) * * Handle when the guest attempts to use MSA when it is disabled in the root * context. + * + * Return: value indicating whether to resume the host or the guest + * (RESUME_HOST or RESUME_GUEST) */ static int kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu *vcpu) { -- cgit From 92c4cfaee6872038563c5b6f2e8e613f9d84d47d Mon Sep 17 00:00:00 2001 From: Jordy Zomer Date: Sat, 29 Jan 2022 16:06:04 +0100 Subject: dma-buf: heaps: Fix potential spectre v1 gadget It appears like nr could be a Spectre v1 gadget as it's supplied by a user and used as an array index. Prevent the contents of kernel memory from being leaked to userspace via speculative execution by using array_index_nospec. Signed-off-by: Jordy Zomer Fixes: c02a81fba74f ("dma-buf: Add dma-buf heaps framework") Cc: # v5.6+ Acked-by: John Stultz Signed-off-by: Sumit Semwal [sumits: added fixes and cc: stable tags] Link: https://patchwork.freedesktop.org/patch/msgid/20220129150604.3461652-1-jordy@pwning.systems --- drivers/dma-buf/dma-heap.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/drivers/dma-buf/dma-heap.c b/drivers/dma-buf/dma-heap.c index 56bf5ad01ad5..8f5848aa144f 100644 --- a/drivers/dma-buf/dma-heap.c +++ b/drivers/dma-buf/dma-heap.c @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -135,6 +136,7 @@ static long dma_heap_ioctl(struct file *file, unsigned int ucmd, if (nr >= ARRAY_SIZE(dma_heap_ioctl_cmds)) return -EINVAL; + nr = array_index_nospec(nr, ARRAY_SIZE(dma_heap_ioctl_cmds)); /* Get the kernel ioctl cmd that matches */ kcmd = dma_heap_ioctl_cmds[nr]; -- cgit From 7d73c602154df56802a9e75ac212505fc1e9a2b6 Mon Sep 17 00:00:00 2001 From: Umesh Nerlige Ramappa Date: Mon, 24 Jan 2022 18:01:24 -0800 Subject: drm/i915/pmu: Fix KMD and GuC race on accessing busyness GuC updates shared memory and KMD reads it. Since this is not synchronized, we run into a race where the value read is inconsistent. Sometimes the inconsistency is in reading the upper MSB bytes of the last_switch_in value. 2 types of cases are seen - upper 8 bits are zero and upper 24 bits are zero. Since these are non-zero values, it is not trivial to determine validity of these values. Instead we read the values multiple times until they are consistent. In test runs, 3 attempts results in consistent values. The upper bound is set to 6 attempts and may need to be tuned as per any new occurences. Since the duration that gt is parked can vary, the patch also updates the gt timestamp on unpark before starting the worker. v2: - Initialize i - Use READ_ONCE to access engine record Fixes: 77cdd054dd2c ("drm/i915/pmu: Connect engine busyness stats from GuC to pmu") Signed-off-by: Umesh Nerlige Ramappa Reviewed-by: Alan Previn Signed-off-by: John Harrison Link: https://patchwork.freedesktop.org/patch/msgid/20220125020124.788679-2-umesh.nerlige.ramappa@intel.com (cherry picked from commit 512712a824de9b856a4e61343e3e4390eba2c391) Signed-off-by: Tvrtko Ursulin --- drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c | 58 +++++++++++++++++++++-- 1 file changed, 54 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c index fcec2cb833af..154ad726e266 100644 --- a/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c +++ b/drivers/gpu/drm/i915/gt/uc/intel_guc_submission.c @@ -1113,6 +1113,19 @@ __extend_last_switch(struct intel_guc *guc, u64 *prev_start, u32 new_start) if (new_start == lower_32_bits(*prev_start)) return; + /* + * When gt is unparked, we update the gt timestamp and start the ping + * worker that updates the gt_stamp every POLL_TIME_CLKS. As long as gt + * is unparked, all switched in contexts will have a start time that is + * within +/- POLL_TIME_CLKS of the most recent gt_stamp. + * + * If neither gt_stamp nor new_start has rolled over, then the + * gt_stamp_hi does not need to be adjusted, however if one of them has + * rolled over, we need to adjust gt_stamp_hi accordingly. + * + * The below conditions address the cases of new_start rollover and + * gt_stamp_last rollover respectively. + */ if (new_start < gt_stamp_last && (new_start - gt_stamp_last) <= POLL_TIME_CLKS) gt_stamp_hi++; @@ -1124,17 +1137,45 @@ __extend_last_switch(struct intel_guc *guc, u64 *prev_start, u32 new_start) *prev_start = ((u64)gt_stamp_hi << 32) | new_start; } -static void guc_update_engine_gt_clks(struct intel_engine_cs *engine) +/* + * GuC updates shared memory and KMD reads it. Since this is not synchronized, + * we run into a race where the value read is inconsistent. Sometimes the + * inconsistency is in reading the upper MSB bytes of the last_in value when + * this race occurs. 2 types of cases are seen - upper 8 bits are zero and upper + * 24 bits are zero. Since these are non-zero values, it is non-trivial to + * determine validity of these values. Instead we read the values multiple times + * until they are consistent. In test runs, 3 attempts results in consistent + * values. The upper bound is set to 6 attempts and may need to be tuned as per + * any new occurences. + */ +static void __get_engine_usage_record(struct intel_engine_cs *engine, + u32 *last_in, u32 *id, u32 *total) { struct guc_engine_usage_record *rec = intel_guc_engine_usage(engine); + int i = 0; + + do { + *last_in = READ_ONCE(rec->last_switch_in_stamp); + *id = READ_ONCE(rec->current_context_index); + *total = READ_ONCE(rec->total_runtime); + + if (READ_ONCE(rec->last_switch_in_stamp) == *last_in && + READ_ONCE(rec->current_context_index) == *id && + READ_ONCE(rec->total_runtime) == *total) + break; + } while (++i < 6); +} + +static void guc_update_engine_gt_clks(struct intel_engine_cs *engine) +{ struct intel_engine_guc_stats *stats = &engine->stats.guc; struct intel_guc *guc = &engine->gt->uc.guc; - u32 last_switch = rec->last_switch_in_stamp; - u32 ctx_id = rec->current_context_index; - u32 total = rec->total_runtime; + u32 last_switch, ctx_id, total; lockdep_assert_held(&guc->timestamp.lock); + __get_engine_usage_record(engine, &last_switch, &ctx_id, &total); + stats->running = ctx_id != ~0U && last_switch; if (stats->running) __extend_last_switch(guc, &stats->start_gt_clk, last_switch); @@ -1236,6 +1277,10 @@ static ktime_t guc_engine_busyness(struct intel_engine_cs *engine, ktime_t *now) if (!in_reset && intel_gt_pm_get_if_awake(gt)) { stats_saved = *stats; gt_stamp_saved = guc->timestamp.gt_stamp; + /* + * Update gt_clks, then gt timestamp to simplify the 'gt_stamp - + * start_gt_clk' calculation below for active engines. + */ guc_update_engine_gt_clks(engine); guc_update_pm_timestamp(guc, now); intel_gt_pm_put_async(gt); @@ -1364,10 +1409,15 @@ void intel_guc_busyness_park(struct intel_gt *gt) void intel_guc_busyness_unpark(struct intel_gt *gt) { struct intel_guc *guc = >->uc.guc; + unsigned long flags; + ktime_t unused; if (!guc_submission_initialized(guc)) return; + spin_lock_irqsave(&guc->timestamp.lock, flags); + guc_update_pm_timestamp(guc, &unused); + spin_unlock_irqrestore(&guc->timestamp.lock, flags); mod_delayed_work(system_highpri_wq, &guc->timestamp.work, guc->timestamp.ping_delay); } -- cgit From 751a9d69b19702af35b0fedfb8ff362027c1cf0c Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 26 Jan 2022 10:15:38 +0200 Subject: drm/i915: Fix oops due to missing stack depot MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We call __save_depot_stack() unconditionally so the stack depot must always be initialized or else we'll oops on platforms without runtime pm support. Presumably we've not seen this in CI due to stack_depot_init() already getting called via drm_mm_init()+CONFIG_DRM_DEBUG_MM. Cc: Vlastimil Babka Cc: Dmitry Vyukov Cc: Marco Elver # stackdepot Cc: Chris Wilson Cc: Imre Deak Fixes: 2dba5eb1c73b ("lib/stackdepot: allow optional init and stack_table allocation by kvmalloc()") Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220126081539.23227-1-ville.syrjala@linux.intel.com Acked-by: Vlastimil Babka Reviewed-by: Imre Deak --- drivers/gpu/drm/i915/intel_runtime_pm.c | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 53f1ccb78849..64c2708efc9e 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -68,9 +68,7 @@ static noinline depot_stack_handle_t __save_depot_stack(void) static void init_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm) { spin_lock_init(&rpm->debug.lock); - - if (rpm->available) - stack_depot_init(); + stack_depot_init(); } static noinline depot_stack_handle_t -- cgit From c50df701d49e78bea6410b4b111c7be71e2a7c2b Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Wed, 26 Jan 2022 10:15:39 +0200 Subject: drm/i915: Enable rpm wakeref tracking whether runtime pm is enabled or not MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Don't see why we should skip the wakeref tracking when the platform doesn't support runtime pm. We still want all the code to be 100% leak free so let's track this unconditionally. Cc: Vlastimil Babka Cc: Dmitry Vyukov Cc: Marco Elver # stackdepot Cc: Chris Wilson Cc: Imre Deak Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220126081539.23227-2-ville.syrjala@linux.intel.com Reviewed-by: Imre Deak --- drivers/gpu/drm/i915/intel_runtime_pm.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c index 64c2708efc9e..3293ac71bcf8 100644 --- a/drivers/gpu/drm/i915/intel_runtime_pm.c +++ b/drivers/gpu/drm/i915/intel_runtime_pm.c @@ -77,9 +77,6 @@ track_intel_runtime_pm_wakeref(struct intel_runtime_pm *rpm) depot_stack_handle_t stack, *stacks; unsigned long flags; - if (!rpm->available) - return -1; - stack = __save_depot_stack(); if (!stack) return -1; -- cgit From 8023d3bef18bafe54708faca0c4206e1a36ca155 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 28 Jan 2022 12:37:41 +0200 Subject: drm/i915: Nuke intel_dp_set_m_n() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit I want to make a clean split betwen the CPU vs. PCH transcoder programming. To that end eliminate intel_dp_set_m_n() and just call the individual CPU/PCH transcoder functions directly. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220128103757.22461-2-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_ddi.c | 4 +- drivers/gpu/drm/i915/display/intel_display.c | 56 ++++++++-------------- drivers/gpu/drm/i915/display/intel_display.h | 6 +-- drivers/gpu/drm/i915/display/intel_display_types.h | 19 -------- drivers/gpu/drm/i915/display/intel_dp_mst.c | 4 +- drivers/gpu/drm/i915/display/intel_drrs.c | 5 +- 6 files changed, 32 insertions(+), 62 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index ca8becb07e45..41342a1333cf 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -2510,7 +2510,9 @@ static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state, if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) { intel_ddi_set_dp_msa(crtc_state, conn_state); - intel_dp_set_m_n(crtc_state, M1_N1); + intel_cpu_transcoder_set_m_n(crtc_state, + &crtc_state->dp_m_n, + &crtc_state->dp_m2_n2); } } diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 7728795ee26d..af2b095a805b 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -118,9 +118,8 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state); static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state); -static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, - const struct intel_link_m_n *m_n, - const struct intel_link_m_n *m2_n2); +static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, + const struct intel_link_m_n *m_n); static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state); static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state); static void hsw_set_transconf(const struct intel_crtc_state *crtc_state); @@ -1835,8 +1834,15 @@ static void ilk_crtc_enable(struct intel_atomic_state *state, intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); - if (intel_crtc_has_dp_encoder(new_crtc_state)) - intel_dp_set_m_n(new_crtc_state, M1_N1); + if (intel_crtc_has_dp_encoder(new_crtc_state)) { + if (new_crtc_state->has_pch_encoder) + intel_pch_transcoder_set_m_n(new_crtc_state, + &new_crtc_state->dp_m_n); + else + intel_cpu_transcoder_set_m_n(new_crtc_state, + &new_crtc_state->dp_m_n, + &new_crtc_state->dp_m2_n2); + } intel_set_transcoder_timings(new_crtc_state); intel_set_pipe_src_size(new_crtc_state); @@ -2450,7 +2456,9 @@ static void valleyview_crtc_enable(struct intel_atomic_state *state, return; if (intel_crtc_has_dp_encoder(new_crtc_state)) - intel_dp_set_m_n(new_crtc_state, M1_N1); + intel_cpu_transcoder_set_m_n(new_crtc_state, + &new_crtc_state->dp_m_n, + &new_crtc_state->dp_m2_n2); intel_set_transcoder_timings(new_crtc_state); intel_set_pipe_src_size(new_crtc_state); @@ -2502,7 +2510,9 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state, return; if (intel_crtc_has_dp_encoder(new_crtc_state)) - intel_dp_set_m_n(new_crtc_state, M1_N1); + intel_cpu_transcoder_set_m_n(new_crtc_state, + &new_crtc_state->dp_m_n, + &new_crtc_state->dp_m2_n2); intel_set_transcoder_timings(new_crtc_state); intel_set_pipe_src_size(new_crtc_state); @@ -3149,9 +3159,9 @@ static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv, return DISPLAY_VER(dev_priv) == 7 || IS_CHERRYVIEW(dev_priv); } -static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, - const struct intel_link_m_n *m_n, - const struct intel_link_m_n *m2_n2) +void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, + const struct intel_link_m_n *m_n, + const struct intel_link_m_n *m2_n2) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); @@ -3179,32 +3189,6 @@ static void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_sta } } -void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, enum link_m_n_set m_n) -{ - const struct intel_link_m_n *dp_m_n, *dp_m2_n2 = NULL; - struct drm_i915_private *i915 = to_i915(crtc_state->uapi.crtc->dev); - - if (m_n == M1_N1) { - dp_m_n = &crtc_state->dp_m_n; - dp_m2_n2 = &crtc_state->dp_m2_n2; - } else if (m_n == M2_N2) { - - /* - * M2_N2 registers are not supported. Hence m2_n2 divider value - * needs to be programmed into M1_N1. - */ - dp_m_n = &crtc_state->dp_m2_n2; - } else { - drm_err(&i915->drm, "Unsupported divider value\n"); - return; - } - - if (crtc_state->has_pch_encoder) - intel_pch_transcoder_set_m_n(crtc_state, &crtc_state->dp_m_n); - else - intel_cpu_transcoder_set_m_n(crtc_state, dp_m_n, dp_m2_n2); -} - static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index a241007f5c82..5c3bd1b4d9c9 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -27,7 +27,6 @@ #include -enum link_m_n_set; enum drm_scaling_filter; struct dpll; struct drm_connector; @@ -607,8 +606,9 @@ void intel_display_prepare_reset(struct drm_i915_private *dev_priv); void intel_display_finish_reset(struct drm_i915_private *dev_priv); void intel_dp_get_m_n(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config); -void intel_dp_set_m_n(const struct intel_crtc_state *crtc_state, - enum link_m_n_set m_n); +void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, + const struct intel_link_m_n *m_n, + const struct intel_link_m_n *m2_n2); void ilk_get_fdi_m_n_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config); void i9xx_crtc_clock_get(struct intel_crtc *crtc, diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index e83cb799427b..15b13939d572 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -1445,25 +1445,6 @@ struct intel_hdmi { }; struct intel_dp_mst_encoder; -/* - * enum link_m_n_set: - * When platform provides two set of M_N registers for dp, we can - * program them and switch between them incase of DRRS. - * But When only one such register is provided, we have to program the - * required divider value on that registers itself based on the DRRS state. - * - * M1_N1 : Program dp_m_n on M1_N1 registers - * dp_m2_n2 on M2_N2 registers (If supported) - * - * M2_N2 : Program dp_m2_n2 on M1_N1 registers - * M2_N2 registers are not supported - */ - -enum link_m_n_set { - /* Sets the m1_n1 and m2_n2 */ - M1_N1 = 0, - M2_N2 -}; struct intel_dp_compliance_data { unsigned long edid; diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index b8bc7d397c81..7031bd786822 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -523,7 +523,9 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state, intel_ddi_set_dp_msa(pipe_config, conn_state); - intel_dp_set_m_n(pipe_config, M1_N1); + intel_cpu_transcoder_set_m_n(pipe_config, + &pipe_config->dp_m_n, + &pipe_config->dp_m2_n2); } static void intel_mst_enable_dp(struct intel_atomic_state *state, diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c index 0cacdb174fd0..c978badbc82f 100644 --- a/drivers/gpu/drm/i915/display/intel_drrs.c +++ b/drivers/gpu/drm/i915/display/intel_drrs.c @@ -115,8 +115,9 @@ static void intel_drrs_set_refresh_rate_m_n(const struct intel_crtc_state *crtc_state, enum drrs_refresh_rate_type refresh_type) { - intel_dp_set_m_n(crtc_state, - refresh_type == DRRS_LOW_RR ? M2_N2 : M1_N1); + intel_cpu_transcoder_set_m_n(crtc_state, refresh_type == DRRS_LOW_RR ? + &crtc_state->dp_m2_n2 : &crtc_state->dp_m_n, + NULL); } static void intel_drrs_set_state(struct drm_i915_private *dev_priv, -- cgit From 6149cb68a5be127909ee39f4d40b8f5ba0d047cf Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 28 Jan 2022 12:37:42 +0200 Subject: drm/i915: Nuke intel_dp_get_m_n() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As with intel_dp_set_m_n() let's get rid of the wrapper and just call the relevant PCH vs. CPU transcoder functions directly. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220128103757.22461-3-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/display/g4x_dp.c | 14 +++++++++++++- drivers/gpu/drm/i915/display/intel_ddi.c | 9 +++++++-- drivers/gpu/drm/i915/display/intel_display.c | 23 ++++++----------------- drivers/gpu/drm/i915/display/intel_display.h | 8 ++++++-- 4 files changed, 32 insertions(+), 22 deletions(-) diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c index f37677df6ebf..771bff714772 100644 --- a/drivers/gpu/drm/i915/display/g4x_dp.c +++ b/drivers/gpu/drm/i915/display/g4x_dp.c @@ -333,6 +333,18 @@ static bool intel_dp_get_hw_state(struct intel_encoder *encoder, return ret; } +static void g4x_dp_get_m_n(struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + + if (crtc_state->has_pch_encoder) + intel_pch_transcoder_get_m_n(crtc, &crtc_state->dp_m_n); + else + intel_cpu_transcoder_get_m_n(crtc, crtc_state->cpu_transcoder, + &crtc_state->dp_m_n, + &crtc_state->dp_m2_n2); +} + static void intel_dp_get_config(struct intel_encoder *encoder, struct intel_crtc_state *pipe_config) { @@ -384,7 +396,7 @@ static void intel_dp_get_config(struct intel_encoder *encoder, pipe_config->lane_count = ((tmp & DP_PORT_WIDTH_MASK) >> DP_PORT_WIDTH_SHIFT) + 1; - intel_dp_get_m_n(crtc, pipe_config); + g4x_dp_get_m_n(pipe_config); if (port == PORT_A) { if ((intel_de_read(dev_priv, DP_A) & DP_PLL_FREQ_MASK) == DP_PLL_FREQ_162MHZ) diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 41342a1333cf..2eb868eaab8f 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -3360,7 +3360,10 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder, pipe_config->output_types |= BIT(INTEL_OUTPUT_DP); pipe_config->lane_count = ((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1; - intel_dp_get_m_n(crtc, pipe_config); + + intel_cpu_transcoder_get_m_n(crtc, cpu_transcoder, + &pipe_config->dp_m_n, + &pipe_config->dp_m2_n2); if (DISPLAY_VER(dev_priv) >= 11) { i915_reg_t dp_tp_ctl = dp_tp_ctl_reg(encoder, pipe_config); @@ -3397,7 +3400,9 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder, pipe_config->mst_master_transcoder = REG_FIELD_GET(TRANS_DDI_MST_TRANSPORT_SELECT_MASK, temp); - intel_dp_get_m_n(crtc, pipe_config); + intel_cpu_transcoder_get_m_n(crtc, cpu_transcoder, + &pipe_config->dp_m_n, + &pipe_config->dp_m2_n2); pipe_config->infoframes.enable |= intel_hdmi_infoframes_enabled(encoder, pipe_config); diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index af2b095a805b..2e194fac3761 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -3856,8 +3856,8 @@ static void intel_get_m_n(struct drm_i915_private *i915, m_n->tu = REG_FIELD_GET(TU_SIZE_MASK, intel_de_read(i915, data_m_reg)) + 1; } -static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, - struct intel_link_m_n *m_n) +void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, + struct intel_link_m_n *m_n) { struct drm_device *dev = crtc->base.dev; struct drm_i915_private *dev_priv = to_i915(dev); @@ -3868,10 +3868,10 @@ static void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, PCH_TRANS_LINK_M1(pipe), PCH_TRANS_LINK_N1(pipe)); } -static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, - enum transcoder transcoder, - struct intel_link_m_n *m_n, - struct intel_link_m_n *m2_n2) +void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, + enum transcoder transcoder, + struct intel_link_m_n *m_n, + struct intel_link_m_n *m2_n2) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; @@ -3893,17 +3893,6 @@ static void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, } } -void intel_dp_get_m_n(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config) -{ - if (pipe_config->has_pch_encoder) - intel_pch_transcoder_get_m_n(crtc, &pipe_config->dp_m_n); - else - intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, - &pipe_config->dp_m_n, - &pipe_config->dp_m2_n2); -} - void ilk_get_fdi_m_n_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index 5c3bd1b4d9c9..ac05ee47c6a7 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -604,11 +604,15 @@ bool intel_fuzzy_clock_check(int clock1, int clock2); void intel_display_prepare_reset(struct drm_i915_private *dev_priv); void intel_display_finish_reset(struct drm_i915_private *dev_priv); -void intel_dp_get_m_n(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config); void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, const struct intel_link_m_n *m_n, const struct intel_link_m_n *m2_n2); +void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, + enum transcoder cpu_transcoder, + struct intel_link_m_n *m_n, + struct intel_link_m_n *m2_n2); +void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, + struct intel_link_m_n *m_n); void ilk_get_fdi_m_n_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config); void i9xx_crtc_clock_get(struct intel_crtc *crtc, -- cgit From cc954cfa6fe47579aa8eceaed00677feda0a95b6 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 28 Jan 2022 12:37:43 +0200 Subject: drm/i915: Nuke ilk_get_fdi_m_n_config() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Get rid of the entirely pointless ilk_get_fdi_m_n_config() wrapper and just call the CPU transcoder function directly. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220128103757.22461-4-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_display.c | 7 ------- drivers/gpu/drm/i915/display/intel_display.h | 2 -- drivers/gpu/drm/i915/display/intel_pch_display.c | 6 ++++-- 3 files changed, 4 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 2e194fac3761..1f4896a1f79c 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -3893,13 +3893,6 @@ void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, } } -void ilk_get_fdi_m_n_config(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config) -{ - intel_cpu_transcoder_get_m_n(crtc, pipe_config->cpu_transcoder, - &pipe_config->fdi_m_n, NULL); -} - static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state, u32 pos, u32 size) { diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index ac05ee47c6a7..2747a7f2c6cd 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -613,8 +613,6 @@ void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, struct intel_link_m_n *m2_n2); void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, struct intel_link_m_n *m_n); -void ilk_get_fdi_m_n_config(struct intel_crtc *crtc, - struct intel_crtc_state *pipe_config); void i9xx_crtc_clock_get(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config); int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n); diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c index 7ef2d40997b2..b464633b551b 100644 --- a/drivers/gpu/drm/i915/display/intel_pch_display.c +++ b/drivers/gpu/drm/i915/display/intel_pch_display.c @@ -386,7 +386,8 @@ void ilk_pch_get_config(struct intel_crtc_state *crtc_state) crtc_state->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> FDI_DP_PORT_WIDTH_SHIFT) + 1; - ilk_get_fdi_m_n_config(crtc, crtc_state); + intel_cpu_transcoder_get_m_n(crtc, crtc_state->cpu_transcoder, + &crtc_state->fdi_m_n, NULL); if (HAS_PCH_IBX(dev_priv)) { /* @@ -509,7 +510,8 @@ void lpt_pch_get_config(struct intel_crtc_state *crtc_state) crtc_state->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> FDI_DP_PORT_WIDTH_SHIFT) + 1; - ilk_get_fdi_m_n_config(crtc, crtc_state); + intel_cpu_transcoder_get_m_n(crtc, crtc_state->cpu_transcoder, + &crtc_state->fdi_m_n, NULL); crtc_state->hw.adjusted_mode.crtc_clock = lpt_get_iclkip(dev_priv); } -- cgit From be0c94ee215043c0a5cdbffc5c45b5073054e125 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 28 Jan 2022 12:37:44 +0200 Subject: drm/i915: Split intel_cpu_transcoder_set_m_n() into M1/N1 vs. M2/N2 variants MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make things a bit more explicit by splitting intel_cpu_transcoder_set_m_n() into separate variants for M1/N1 vs. M2/N2. Makes the DRRS M/N programming at least more obvious. Note that for the MST and DRRS cases we don't need to call the M2/N2 variant at all since the transcoders that support those do not have the M2/N2 registers. Same could be said for i9xx_crtc_enable() but I want to do a higher level code sharing between that valleyview_crtc_enable() later in which case we do need the M2/N2 variant. This is also why I keep the transcoder_has_m2_n2() in intel_cpu_transcoder_set_m2_n2() so the caller doesn't have necessarily care what the chosen transcoder supports. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220128103757.22461-5-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_ddi.c | 7 +-- drivers/gpu/drm/i915/display/intel_display.c | 75 ++++++++++++++++------------ drivers/gpu/drm/i915/display/intel_display.h | 7 +-- drivers/gpu/drm/i915/display/intel_dp_mst.c | 5 +- drivers/gpu/drm/i915/display/intel_drrs.c | 5 +- 5 files changed, 54 insertions(+), 45 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 2eb868eaab8f..dead4b72719f 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -2510,9 +2510,10 @@ static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state, if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) { intel_ddi_set_dp_msa(crtc_state, conn_state); - intel_cpu_transcoder_set_m_n(crtc_state, - &crtc_state->dp_m_n, - &crtc_state->dp_m2_n2); + intel_cpu_transcoder_set_m1_n1(crtc_state, + &crtc_state->dp_m_n); + intel_cpu_transcoder_set_m2_n2(crtc_state, + &crtc_state->dp_m2_n2); } } diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 1f4896a1f79c..eced29e4532c 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -1835,21 +1835,23 @@ static void ilk_crtc_enable(struct intel_atomic_state *state, intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); if (intel_crtc_has_dp_encoder(new_crtc_state)) { - if (new_crtc_state->has_pch_encoder) + if (new_crtc_state->has_pch_encoder) { intel_pch_transcoder_set_m_n(new_crtc_state, &new_crtc_state->dp_m_n); - else - intel_cpu_transcoder_set_m_n(new_crtc_state, - &new_crtc_state->dp_m_n, - &new_crtc_state->dp_m2_n2); + } else { + intel_cpu_transcoder_set_m1_n1(new_crtc_state, + &new_crtc_state->dp_m_n); + intel_cpu_transcoder_set_m2_n2(new_crtc_state, + &new_crtc_state->dp_m2_n2); + } } intel_set_transcoder_timings(new_crtc_state); intel_set_pipe_src_size(new_crtc_state); if (new_crtc_state->has_pch_encoder) - intel_cpu_transcoder_set_m_n(new_crtc_state, - &new_crtc_state->fdi_m_n, NULL); + intel_cpu_transcoder_set_m1_n1(new_crtc_state, + &new_crtc_state->fdi_m_n); ilk_set_pipeconf(new_crtc_state); @@ -2015,8 +2017,8 @@ static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_sta crtc_state->pixel_multiplier - 1); if (crtc_state->has_pch_encoder) - intel_cpu_transcoder_set_m_n(crtc_state, - &crtc_state->fdi_m_n, NULL); + intel_cpu_transcoder_set_m1_n1(crtc_state, + &crtc_state->fdi_m_n); hsw_set_frame_start_delay(crtc_state); @@ -2455,10 +2457,12 @@ static void valleyview_crtc_enable(struct intel_atomic_state *state, if (drm_WARN_ON(&dev_priv->drm, crtc->active)) return; - if (intel_crtc_has_dp_encoder(new_crtc_state)) - intel_cpu_transcoder_set_m_n(new_crtc_state, - &new_crtc_state->dp_m_n, - &new_crtc_state->dp_m2_n2); + if (intel_crtc_has_dp_encoder(new_crtc_state)) { + intel_cpu_transcoder_set_m1_n1(new_crtc_state, + &new_crtc_state->dp_m_n); + intel_cpu_transcoder_set_m2_n2(new_crtc_state, + &new_crtc_state->dp_m2_n2); + } intel_set_transcoder_timings(new_crtc_state); intel_set_pipe_src_size(new_crtc_state); @@ -2509,10 +2513,12 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state, if (drm_WARN_ON(&dev_priv->drm, crtc->active)) return; - if (intel_crtc_has_dp_encoder(new_crtc_state)) - intel_cpu_transcoder_set_m_n(new_crtc_state, - &new_crtc_state->dp_m_n, - &new_crtc_state->dp_m2_n2); + if (intel_crtc_has_dp_encoder(new_crtc_state)) { + intel_cpu_transcoder_set_m1_n1(new_crtc_state, + &new_crtc_state->dp_m_n); + intel_cpu_transcoder_set_m2_n2(new_crtc_state, + &new_crtc_state->dp_m2_n2); + } intel_set_transcoder_timings(new_crtc_state); intel_set_pipe_src_size(new_crtc_state); @@ -3159,34 +3165,37 @@ static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv, return DISPLAY_VER(dev_priv) == 7 || IS_CHERRYVIEW(dev_priv); } -void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, - const struct intel_link_m_n *m_n, - const struct intel_link_m_n *m2_n2) +void intel_cpu_transcoder_set_m1_n1(const struct intel_crtc_state *crtc_state, + const struct intel_link_m_n *m_n) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; enum transcoder transcoder = crtc_state->cpu_transcoder; - if (DISPLAY_VER(dev_priv) >= 5) { + if (DISPLAY_VER(dev_priv) >= 5) intel_set_m_n(dev_priv, m_n, PIPE_DATA_M1(transcoder), PIPE_DATA_N1(transcoder), PIPE_LINK_M1(transcoder), PIPE_LINK_N1(transcoder)); - /* - * M2_N2 registers are set only if DRRS is supported - * (to make sure the registers are not unnecessarily accessed). - */ - if (m2_n2 && crtc_state->has_drrs && - transcoder_has_m2_n2(dev_priv, transcoder)) { - intel_set_m_n(dev_priv, m2_n2, - PIPE_DATA_M2(transcoder), PIPE_DATA_N2(transcoder), - PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder)); - } - } else { + else intel_set_m_n(dev_priv, m_n, PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe), PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe)); - } +} + +void intel_cpu_transcoder_set_m2_n2(const struct intel_crtc_state *crtc_state, + const struct intel_link_m_n *m_n) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum transcoder transcoder = crtc_state->cpu_transcoder; + + if (!transcoder_has_m2_n2(dev_priv, transcoder)) + return; + + intel_set_m_n(dev_priv, m_n, + PIPE_DATA_M2(transcoder), PIPE_DATA_N2(transcoder), + PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder)); } static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state) diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index 2747a7f2c6cd..036e28581019 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -604,9 +604,10 @@ bool intel_fuzzy_clock_check(int clock1, int clock2); void intel_display_prepare_reset(struct drm_i915_private *dev_priv); void intel_display_finish_reset(struct drm_i915_private *dev_priv); -void intel_cpu_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, - const struct intel_link_m_n *m_n, - const struct intel_link_m_n *m2_n2); +void intel_cpu_transcoder_set_m1_n1(const struct intel_crtc_state *crtc_state, + const struct intel_link_m_n *m_n); +void intel_cpu_transcoder_set_m2_n2(const struct intel_crtc_state *crtc_state, + const struct intel_link_m_n *m_n); void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, enum transcoder cpu_transcoder, struct intel_link_m_n *m_n, diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 7031bd786822..4e8d65fa6086 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -523,9 +523,8 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state, intel_ddi_set_dp_msa(pipe_config, conn_state); - intel_cpu_transcoder_set_m_n(pipe_config, - &pipe_config->dp_m_n, - &pipe_config->dp_m2_n2); + intel_cpu_transcoder_set_m1_n1(pipe_config, + &pipe_config->dp_m_n); } static void intel_mst_enable_dp(struct intel_atomic_state *state, diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c index c978badbc82f..a911066c7809 100644 --- a/drivers/gpu/drm/i915/display/intel_drrs.c +++ b/drivers/gpu/drm/i915/display/intel_drrs.c @@ -115,9 +115,8 @@ static void intel_drrs_set_refresh_rate_m_n(const struct intel_crtc_state *crtc_state, enum drrs_refresh_rate_type refresh_type) { - intel_cpu_transcoder_set_m_n(crtc_state, refresh_type == DRRS_LOW_RR ? - &crtc_state->dp_m2_n2 : &crtc_state->dp_m_n, - NULL); + intel_cpu_transcoder_set_m1_n1(crtc_state, refresh_type == DRRS_LOW_RR ? + &crtc_state->dp_m2_n2 : &crtc_state->dp_m_n); } static void intel_drrs_set_state(struct drm_i915_private *dev_priv, -- cgit From 5cd0664483c1be4a71bcf4ec643f5d3c782e0319 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 28 Jan 2022 12:37:45 +0200 Subject: drm/i915: Split intel_cpu_transcoder_get_m_n() into M1/N1 vs. M2/N2 variants MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As with intel_cpu_transcoder_set_m_n() let's split the readout counterpart into explicit M1/N1 vs. M2/N2 variants as well. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220128103757.22461-6-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/display/g4x_dp.c | 12 +++++---- drivers/gpu/drm/i915/display/intel_ddi.c | 12 ++++----- drivers/gpu/drm/i915/display/intel_display.c | 32 ++++++++++++++---------- drivers/gpu/drm/i915/display/intel_display.h | 10 +++++--- drivers/gpu/drm/i915/display/intel_pch_display.c | 8 +++--- 5 files changed, 42 insertions(+), 32 deletions(-) diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c index 771bff714772..07432f6b56ac 100644 --- a/drivers/gpu/drm/i915/display/g4x_dp.c +++ b/drivers/gpu/drm/i915/display/g4x_dp.c @@ -337,12 +337,14 @@ static void g4x_dp_get_m_n(struct intel_crtc_state *crtc_state) { struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - if (crtc_state->has_pch_encoder) + if (crtc_state->has_pch_encoder) { intel_pch_transcoder_get_m_n(crtc, &crtc_state->dp_m_n); - else - intel_cpu_transcoder_get_m_n(crtc, crtc_state->cpu_transcoder, - &crtc_state->dp_m_n, - &crtc_state->dp_m2_n2); + } else { + intel_cpu_transcoder_get_m1_n1(crtc, crtc_state->cpu_transcoder, + &crtc_state->dp_m_n); + intel_cpu_transcoder_get_m2_n2(crtc, crtc_state->cpu_transcoder, + &crtc_state->dp_m2_n2); + } } static void intel_dp_get_config(struct intel_encoder *encoder, diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index dead4b72719f..b02b327331f8 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -3362,9 +3362,10 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder, pipe_config->lane_count = ((temp & DDI_PORT_WIDTH_MASK) >> DDI_PORT_WIDTH_SHIFT) + 1; - intel_cpu_transcoder_get_m_n(crtc, cpu_transcoder, - &pipe_config->dp_m_n, - &pipe_config->dp_m2_n2); + intel_cpu_transcoder_get_m1_n1(crtc, cpu_transcoder, + &pipe_config->dp_m_n); + intel_cpu_transcoder_get_m2_n2(crtc, cpu_transcoder, + &pipe_config->dp_m2_n2); if (DISPLAY_VER(dev_priv) >= 11) { i915_reg_t dp_tp_ctl = dp_tp_ctl_reg(encoder, pipe_config); @@ -3401,9 +3402,8 @@ static void intel_ddi_read_func_ctl(struct intel_encoder *encoder, pipe_config->mst_master_transcoder = REG_FIELD_GET(TRANS_DDI_MST_TRANSPORT_SELECT_MASK, temp); - intel_cpu_transcoder_get_m_n(crtc, cpu_transcoder, - &pipe_config->dp_m_n, - &pipe_config->dp_m2_n2); + intel_cpu_transcoder_get_m1_n1(crtc, cpu_transcoder, + &pipe_config->dp_m_n); pipe_config->infoframes.enable |= intel_hdmi_infoframes_enabled(encoder, pipe_config); diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index eced29e4532c..cb5b89815163 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -3877,29 +3877,35 @@ void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, PCH_TRANS_LINK_M1(pipe), PCH_TRANS_LINK_N1(pipe)); } -void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, - enum transcoder transcoder, - struct intel_link_m_n *m_n, - struct intel_link_m_n *m2_n2) +void intel_cpu_transcoder_get_m1_n1(struct intel_crtc *crtc, + enum transcoder transcoder, + struct intel_link_m_n *m_n) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; - if (DISPLAY_VER(dev_priv) >= 5) { + if (DISPLAY_VER(dev_priv) >= 5) intel_get_m_n(dev_priv, m_n, PIPE_DATA_M1(transcoder), PIPE_DATA_N1(transcoder), PIPE_LINK_M1(transcoder), PIPE_LINK_N1(transcoder)); - - if (m2_n2 && transcoder_has_m2_n2(dev_priv, transcoder)) { - intel_get_m_n(dev_priv, m2_n2, - PIPE_DATA_M2(transcoder), PIPE_DATA_N2(transcoder), - PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder)); - } - } else { + else intel_get_m_n(dev_priv, m_n, PIPE_DATA_M_G4X(pipe), PIPE_DATA_N_G4X(pipe), PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe)); - } +} + +void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc, + enum transcoder transcoder, + struct intel_link_m_n *m_n) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + + if (!transcoder_has_m2_n2(dev_priv, transcoder)) + return; + + intel_get_m_n(dev_priv, m_n, + PIPE_DATA_M2(transcoder), PIPE_DATA_N2(transcoder), + PIPE_LINK_M2(transcoder), PIPE_LINK_N2(transcoder)); } static void ilk_get_pfit_pos_size(struct intel_crtc_state *crtc_state, diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index 036e28581019..9a232bdef0f2 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -608,10 +608,12 @@ void intel_cpu_transcoder_set_m1_n1(const struct intel_crtc_state *crtc_state, const struct intel_link_m_n *m_n); void intel_cpu_transcoder_set_m2_n2(const struct intel_crtc_state *crtc_state, const struct intel_link_m_n *m_n); -void intel_cpu_transcoder_get_m_n(struct intel_crtc *crtc, - enum transcoder cpu_transcoder, - struct intel_link_m_n *m_n, - struct intel_link_m_n *m2_n2); +void intel_cpu_transcoder_get_m1_n1(struct intel_crtc *crtc, + enum transcoder cpu_transcoder, + struct intel_link_m_n *m_n); +void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc, + enum transcoder cpu_transcoder, + struct intel_link_m_n *m_n); void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, struct intel_link_m_n *m_n); void i9xx_crtc_clock_get(struct intel_crtc *crtc, diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c index b464633b551b..dd010be534a2 100644 --- a/drivers/gpu/drm/i915/display/intel_pch_display.c +++ b/drivers/gpu/drm/i915/display/intel_pch_display.c @@ -386,8 +386,8 @@ void ilk_pch_get_config(struct intel_crtc_state *crtc_state) crtc_state->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> FDI_DP_PORT_WIDTH_SHIFT) + 1; - intel_cpu_transcoder_get_m_n(crtc, crtc_state->cpu_transcoder, - &crtc_state->fdi_m_n, NULL); + intel_cpu_transcoder_get_m1_n1(crtc, crtc_state->cpu_transcoder, + &crtc_state->fdi_m_n); if (HAS_PCH_IBX(dev_priv)) { /* @@ -510,8 +510,8 @@ void lpt_pch_get_config(struct intel_crtc_state *crtc_state) crtc_state->fdi_lanes = ((FDI_DP_PORT_WIDTH_MASK & tmp) >> FDI_DP_PORT_WIDTH_SHIFT) + 1; - intel_cpu_transcoder_get_m_n(crtc, crtc_state->cpu_transcoder, - &crtc_state->fdi_m_n, NULL); + intel_cpu_transcoder_get_m1_n1(crtc, crtc_state->cpu_transcoder, + &crtc_state->fdi_m_n); crtc_state->hw.adjusted_mode.crtc_clock = lpt_get_iclkip(dev_priv); } -- cgit From 0adc41de818c1a051c18732db57b9ee95b30898e Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 28 Jan 2022 12:37:46 +0200 Subject: drm/i915: Pass crtc+cpu_transcoder to intel_cpu_transcoder_set_m_n() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Instead of passing in the whole crtc state let's pass in just the bits of state we need. This will help with the DRRS code which shouldn't really be accessing the atomic state stuff directly as it gets called outside the normal atomic flows. v2: Fix set_m1_n1 vs. set_m2_n2 fumble for i9xx (Jani) Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220128103757.22461-7-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_ddi.c | 6 +++-- drivers/gpu/drm/i915/display/intel_display.c | 37 ++++++++++++++-------------- drivers/gpu/drm/i915/display/intel_display.h | 6 +++-- drivers/gpu/drm/i915/display/intel_dp_mst.c | 3 ++- drivers/gpu/drm/i915/display/intel_drrs.c | 5 +++- 5 files changed, 32 insertions(+), 25 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index b02b327331f8..360f62665b54 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -2498,6 +2498,8 @@ static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; if (DISPLAY_VER(dev_priv) >= 12) tgl_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state); @@ -2510,9 +2512,9 @@ static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state, if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) { intel_ddi_set_dp_msa(crtc_state, conn_state); - intel_cpu_transcoder_set_m1_n1(crtc_state, + intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, &crtc_state->dp_m_n); - intel_cpu_transcoder_set_m2_n2(crtc_state, + intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, &crtc_state->dp_m2_n2); } } diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index cb5b89815163..8df7838e628d 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -118,7 +118,7 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state); static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state); -static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, +static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc, const struct intel_link_m_n *m_n); static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state); static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state); @@ -1816,6 +1816,7 @@ static void ilk_crtc_enable(struct intel_atomic_state *state, const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; enum pipe pipe = crtc->pipe; if (drm_WARN_ON(&dev_priv->drm, crtc->active)) @@ -1836,12 +1837,11 @@ static void ilk_crtc_enable(struct intel_atomic_state *state, if (intel_crtc_has_dp_encoder(new_crtc_state)) { if (new_crtc_state->has_pch_encoder) { - intel_pch_transcoder_set_m_n(new_crtc_state, - &new_crtc_state->dp_m_n); + intel_pch_transcoder_set_m_n(crtc, &new_crtc_state->dp_m_n); } else { - intel_cpu_transcoder_set_m1_n1(new_crtc_state, + intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, &new_crtc_state->dp_m_n); - intel_cpu_transcoder_set_m2_n2(new_crtc_state, + intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, &new_crtc_state->dp_m2_n2); } } @@ -1850,7 +1850,7 @@ static void ilk_crtc_enable(struct intel_atomic_state *state, intel_set_pipe_src_size(new_crtc_state); if (new_crtc_state->has_pch_encoder) - intel_cpu_transcoder_set_m1_n1(new_crtc_state, + intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, &new_crtc_state->fdi_m_n); ilk_set_pipeconf(new_crtc_state); @@ -2017,7 +2017,7 @@ static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_sta crtc_state->pixel_multiplier - 1); if (crtc_state->has_pch_encoder) - intel_cpu_transcoder_set_m1_n1(crtc_state, + intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, &crtc_state->fdi_m_n); hsw_set_frame_start_delay(crtc_state); @@ -2452,15 +2452,16 @@ static void valleyview_crtc_enable(struct intel_atomic_state *state, const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; enum pipe pipe = crtc->pipe; if (drm_WARN_ON(&dev_priv->drm, crtc->active)) return; if (intel_crtc_has_dp_encoder(new_crtc_state)) { - intel_cpu_transcoder_set_m1_n1(new_crtc_state, + intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, &new_crtc_state->dp_m_n); - intel_cpu_transcoder_set_m2_n2(new_crtc_state, + intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, &new_crtc_state->dp_m2_n2); } @@ -2508,15 +2509,16 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state, const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; enum pipe pipe = crtc->pipe; if (drm_WARN_ON(&dev_priv->drm, crtc->active)) return; if (intel_crtc_has_dp_encoder(new_crtc_state)) { - intel_cpu_transcoder_set_m1_n1(new_crtc_state, + intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, &new_crtc_state->dp_m_n); - intel_cpu_transcoder_set_m2_n2(new_crtc_state, + intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, &new_crtc_state->dp_m2_n2); } @@ -3140,10 +3142,9 @@ static void intel_set_m_n(struct drm_i915_private *i915, intel_de_write(i915, link_n_reg, m_n->link_n); } -static void intel_pch_transcoder_set_m_n(const struct intel_crtc_state *crtc_state, +static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc, const struct intel_link_m_n *m_n) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; @@ -3165,13 +3166,12 @@ static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv, return DISPLAY_VER(dev_priv) == 7 || IS_CHERRYVIEW(dev_priv); } -void intel_cpu_transcoder_set_m1_n1(const struct intel_crtc_state *crtc_state, +void intel_cpu_transcoder_set_m1_n1(struct intel_crtc *crtc, + enum transcoder transcoder, const struct intel_link_m_n *m_n) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; - enum transcoder transcoder = crtc_state->cpu_transcoder; if (DISPLAY_VER(dev_priv) >= 5) intel_set_m_n(dev_priv, m_n, @@ -3183,12 +3183,11 @@ void intel_cpu_transcoder_set_m1_n1(const struct intel_crtc_state *crtc_state, PIPE_LINK_M_G4X(pipe), PIPE_LINK_N_G4X(pipe)); } -void intel_cpu_transcoder_set_m2_n2(const struct intel_crtc_state *crtc_state, +void intel_cpu_transcoder_set_m2_n2(struct intel_crtc *crtc, + enum transcoder transcoder, const struct intel_link_m_n *m_n) { - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum transcoder transcoder = crtc_state->cpu_transcoder; if (!transcoder_has_m2_n2(dev_priv, transcoder)) return; diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index 9a232bdef0f2..e8b41b67a366 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -604,9 +604,11 @@ bool intel_fuzzy_clock_check(int clock1, int clock2); void intel_display_prepare_reset(struct drm_i915_private *dev_priv); void intel_display_finish_reset(struct drm_i915_private *dev_priv); -void intel_cpu_transcoder_set_m1_n1(const struct intel_crtc_state *crtc_state, +void intel_cpu_transcoder_set_m1_n1(struct intel_crtc *crtc, + enum transcoder cpu_transcoder, const struct intel_link_m_n *m_n); -void intel_cpu_transcoder_set_m2_n2(const struct intel_crtc_state *crtc_state, +void intel_cpu_transcoder_set_m2_n2(struct intel_crtc *crtc, + enum transcoder cpu_transcoder, const struct intel_link_m_n *m_n); void intel_cpu_transcoder_get_m1_n1(struct intel_crtc *crtc, enum transcoder cpu_transcoder, diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 4e8d65fa6086..30edb9117443 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -473,6 +473,7 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state, struct intel_digital_port *dig_port = intel_mst->primary; struct intel_dp *intel_dp = &dig_port->dp; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); + struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); struct intel_connector *connector = to_intel_connector(conn_state->connector); int ret; @@ -523,7 +524,7 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state, intel_ddi_set_dp_msa(pipe_config, conn_state); - intel_cpu_transcoder_set_m1_n1(pipe_config, + intel_cpu_transcoder_set_m1_n1(crtc, pipe_config->cpu_transcoder, &pipe_config->dp_m_n); } diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c index a911066c7809..53f014b4436b 100644 --- a/drivers/gpu/drm/i915/display/intel_drrs.c +++ b/drivers/gpu/drm/i915/display/intel_drrs.c @@ -115,7 +115,10 @@ static void intel_drrs_set_refresh_rate_m_n(const struct intel_crtc_state *crtc_state, enum drrs_refresh_rate_type refresh_type) { - intel_cpu_transcoder_set_m1_n1(crtc_state, refresh_type == DRRS_LOW_RR ? + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + + intel_cpu_transcoder_set_m1_n1(crtc, crtc_state->cpu_transcoder, + refresh_type == DRRS_LOW_RR ? &crtc_state->dp_m2_n2 : &crtc_state->dp_m_n); } -- cgit From e57c1a3bd5e8e0c7181f65ae55581f0236a8f284 Mon Sep 17 00:00:00 2001 From: Yongzhi Liu Date: Fri, 28 Jan 2022 05:41:02 -0800 Subject: drm/v3d: fix missing unlock [why] Unlock is needed on the error handling path to prevent dead lock. v3d_submit_cl_ioctl and v3d_submit_csd_ioctl is missing unlock. [how] Fix this by changing goto target on the error handling path. So changing the goto to target an error handling path that includes drm_gem_unlock reservations. Signed-off-by: Yongzhi Liu Reviewed-by: Melissa Wen Signed-off-by: Melissa Wen Link: https://patchwork.freedesktop.org/patch/msgid/1643377262-109975-1-git-send-email-lyz_cs@pku.edu.cn --- drivers/gpu/drm/v3d/v3d_gem.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index c7ed2e1cbab6..92bc0faee84f 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -798,7 +798,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, if (!render->base.perfmon) { ret = -ENOENT; - goto fail; + goto fail_perfmon; } } @@ -847,6 +847,7 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data, fail_unreserve: mutex_unlock(&v3d->sched_lock); +fail_perfmon: drm_gem_unlock_reservations(last_job->bo, last_job->bo_count, &acquire_ctx); fail: @@ -1027,7 +1028,7 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data, args->perfmon_id); if (!job->base.perfmon) { ret = -ENOENT; - goto fail; + goto fail_perfmon; } } @@ -1056,6 +1057,7 @@ v3d_submit_csd_ioctl(struct drm_device *dev, void *data, fail_unreserve: mutex_unlock(&v3d->sched_lock); +fail_perfmon: drm_gem_unlock_reservations(clean_job->bo, clean_job->bo_count, &acquire_ctx); fail: -- cgit From a68819cc557cf0a37b7fce50d412abdb73bd69d8 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 28 Jan 2022 12:37:47 +0200 Subject: drm/i915: Move PCH transcoder M/N setup into the PCH code MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Do the PCH transcoder M/N setup next to where all the other PCH transcoder stuff is programmed. Matches the spec modeset sequence better. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220128103757.22461-8-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/display/g4x_dp.c | 1 + drivers/gpu/drm/i915/display/intel_display.c | 62 ++++++------------------ drivers/gpu/drm/i915/display/intel_display.h | 12 ++++- drivers/gpu/drm/i915/display/intel_pch_display.c | 24 +++++++++ drivers/gpu/drm/i915/display/intel_pch_display.h | 4 ++ 5 files changed, 55 insertions(+), 48 deletions(-) diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c index 07432f6b56ac..34c7640386b8 100644 --- a/drivers/gpu/drm/i915/display/g4x_dp.c +++ b/drivers/gpu/drm/i915/display/g4x_dp.c @@ -18,6 +18,7 @@ #include "intel_fifo_underrun.h" #include "intel_hdmi.h" #include "intel_hotplug.h" +#include "intel_pch_display.h" #include "intel_pps.h" #include "vlv_sideband.h" diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 8df7838e628d..ba774c1c6c19 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -118,8 +118,6 @@ static void intel_set_transcoder_timings(const struct intel_crtc_state *crtc_state); static void intel_set_pipe_src_size(const struct intel_crtc_state *crtc_state); -static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc, - const struct intel_link_m_n *m_n); static void i9xx_set_pipeconf(const struct intel_crtc_state *crtc_state); static void ilk_set_pipeconf(const struct intel_crtc_state *crtc_state); static void hsw_set_transconf(const struct intel_crtc_state *crtc_state); @@ -1835,24 +1833,19 @@ static void ilk_crtc_enable(struct intel_atomic_state *state, intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); - if (intel_crtc_has_dp_encoder(new_crtc_state)) { - if (new_crtc_state->has_pch_encoder) { - intel_pch_transcoder_set_m_n(crtc, &new_crtc_state->dp_m_n); - } else { - intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, - &new_crtc_state->dp_m_n); - intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, - &new_crtc_state->dp_m2_n2); - } + if (new_crtc_state->has_pch_encoder) { + intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, + &new_crtc_state->fdi_m_n); + } else if (intel_crtc_has_dp_encoder(new_crtc_state)) { + intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, + &new_crtc_state->dp_m_n); + intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, + &new_crtc_state->dp_m2_n2); } intel_set_transcoder_timings(new_crtc_state); intel_set_pipe_src_size(new_crtc_state); - if (new_crtc_state->has_pch_encoder) - intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, - &new_crtc_state->fdi_m_n); - ilk_set_pipeconf(new_crtc_state); crtc->active = true; @@ -3131,10 +3124,10 @@ static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv) } } -static void intel_set_m_n(struct drm_i915_private *i915, - const struct intel_link_m_n *m_n, - i915_reg_t data_m_reg, i915_reg_t data_n_reg, - i915_reg_t link_m_reg, i915_reg_t link_n_reg) +void intel_set_m_n(struct drm_i915_private *i915, + const struct intel_link_m_n *m_n, + i915_reg_t data_m_reg, i915_reg_t data_n_reg, + i915_reg_t link_m_reg, i915_reg_t link_n_reg) { intel_de_write(i915, data_m_reg, TU_SIZE(m_n->tu) | m_n->data_m); intel_de_write(i915, data_n_reg, m_n->data_n); @@ -3142,17 +3135,6 @@ static void intel_set_m_n(struct drm_i915_private *i915, intel_de_write(i915, link_n_reg, m_n->link_n); } -static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc, - const struct intel_link_m_n *m_n) -{ - struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum pipe pipe = crtc->pipe; - - intel_set_m_n(dev_priv, m_n, - PCH_TRANS_DATA_M1(pipe), PCH_TRANS_DATA_N1(pipe), - PCH_TRANS_LINK_M1(pipe), PCH_TRANS_LINK_N1(pipe)); -} - static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv, enum transcoder transcoder) { @@ -3852,10 +3834,10 @@ int ilk_get_lanes_required(int target_clock, int link_bw, int bpp) return DIV_ROUND_UP(bps, link_bw * 8); } -static void intel_get_m_n(struct drm_i915_private *i915, - struct intel_link_m_n *m_n, - i915_reg_t data_m_reg, i915_reg_t data_n_reg, - i915_reg_t link_m_reg, i915_reg_t link_n_reg) +void intel_get_m_n(struct drm_i915_private *i915, + struct intel_link_m_n *m_n, + i915_reg_t data_m_reg, i915_reg_t data_n_reg, + i915_reg_t link_m_reg, i915_reg_t link_n_reg) { m_n->link_m = intel_de_read(i915, link_m_reg) & DATA_LINK_M_N_MASK; m_n->link_n = intel_de_read(i915, link_n_reg) & DATA_LINK_M_N_MASK; @@ -3864,18 +3846,6 @@ static void intel_get_m_n(struct drm_i915_private *i915, m_n->tu = REG_FIELD_GET(TU_SIZE_MASK, intel_de_read(i915, data_m_reg)) + 1; } -void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, - struct intel_link_m_n *m_n) -{ - struct drm_device *dev = crtc->base.dev; - struct drm_i915_private *dev_priv = to_i915(dev); - enum pipe pipe = crtc->pipe; - - intel_get_m_n(dev_priv, m_n, - PCH_TRANS_DATA_M1(pipe), PCH_TRANS_DATA_N1(pipe), - PCH_TRANS_LINK_M1(pipe), PCH_TRANS_LINK_N1(pipe)); -} - void intel_cpu_transcoder_get_m1_n1(struct intel_crtc *crtc, enum transcoder transcoder, struct intel_link_m_n *m_n) diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index e8b41b67a366..c104e578bf5d 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -27,6 +27,8 @@ #include +#include "i915_reg_defs.h" + enum drm_scaling_filter; struct dpll; struct drm_connector; @@ -604,6 +606,14 @@ bool intel_fuzzy_clock_check(int clock1, int clock2); void intel_display_prepare_reset(struct drm_i915_private *dev_priv); void intel_display_finish_reset(struct drm_i915_private *dev_priv); +void intel_set_m_n(struct drm_i915_private *i915, + const struct intel_link_m_n *m_n, + i915_reg_t data_m_reg, i915_reg_t data_n_reg, + i915_reg_t link_m_reg, i915_reg_t link_n_reg); +void intel_get_m_n(struct drm_i915_private *i915, + struct intel_link_m_n *m_n, + i915_reg_t data_m_reg, i915_reg_t data_n_reg, + i915_reg_t link_m_reg, i915_reg_t link_n_reg); void intel_cpu_transcoder_set_m1_n1(struct intel_crtc *crtc, enum transcoder cpu_transcoder, const struct intel_link_m_n *m_n); @@ -616,8 +626,6 @@ void intel_cpu_transcoder_get_m1_n1(struct intel_crtc *crtc, void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc, enum transcoder cpu_transcoder, struct intel_link_m_n *m_n); -void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, - struct intel_link_m_n *m_n); void i9xx_crtc_clock_get(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config); int intel_dotclock_calculate(int link_freq, const struct intel_link_m_n *m_n); diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c index dd010be534a2..3bd96411f306 100644 --- a/drivers/gpu/drm/i915/display/intel_pch_display.c +++ b/drivers/gpu/drm/i915/display/intel_pch_display.c @@ -88,6 +88,28 @@ static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, pipe_name(pipe)); } +static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc, + const struct intel_link_m_n *m_n) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + + intel_set_m_n(dev_priv, m_n, + PCH_TRANS_DATA_M1(pipe), PCH_TRANS_DATA_N1(pipe), + PCH_TRANS_LINK_M1(pipe), PCH_TRANS_LINK_N1(pipe)); +} + +void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, + struct intel_link_m_n *m_n) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + + intel_get_m_n(dev_priv, m_n, + PCH_TRANS_DATA_M1(pipe), PCH_TRANS_DATA_N1(pipe), + PCH_TRANS_LINK_M1(pipe), PCH_TRANS_LINK_N1(pipe)); +} + static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state, enum pipe pch_transcoder) { @@ -278,6 +300,8 @@ void ilk_pch_enable(struct intel_atomic_state *state, /* set transcoder timing, panel must allow it */ assert_pps_unlocked(dev_priv, pipe); + if (intel_crtc_has_dp_encoder(crtc_state)) + intel_pch_transcoder_set_m_n(crtc, &crtc_state->dp_m_n); ilk_pch_transcoder_set_timings(crtc_state, pipe); intel_fdi_normal_train(crtc); diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.h b/drivers/gpu/drm/i915/display/intel_pch_display.h index f915fa4241d7..9a317b361a96 100644 --- a/drivers/gpu/drm/i915/display/intel_pch_display.h +++ b/drivers/gpu/drm/i915/display/intel_pch_display.h @@ -9,6 +9,7 @@ struct intel_atomic_state; struct intel_crtc; struct intel_crtc_state; +struct intel_link_m_n; void ilk_pch_pre_enable(struct intel_atomic_state *state, struct intel_crtc *crtc); @@ -26,4 +27,7 @@ void lpt_pch_disable(struct intel_atomic_state *state, struct intel_crtc *crtc); void lpt_pch_get_config(struct intel_crtc_state *crtc_state); +void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, + struct intel_link_m_n *m_n); + #endif -- cgit From 8de5df3b07efd1a04c549e59e0d72e2b3e2c517f Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 28 Jan 2022 12:37:48 +0200 Subject: drm/i915: Move M/N setup to a more logical place on ddi platforms MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Let's do the cpu transcoder M/N setup next to where we program most other cpu transcoder timings/etc. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220128103757.22461-9-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_ddi.c | 10 +--------- drivers/gpu/drm/i915/display/intel_display.c | 14 ++++++++++---- drivers/gpu/drm/i915/display/intel_dp_mst.c | 4 ---- 3 files changed, 11 insertions(+), 17 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 360f62665b54..354b08d6f81d 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -2498,8 +2498,6 @@ static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state, const struct drm_connector_state *conn_state) { struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; if (DISPLAY_VER(dev_priv) >= 12) tgl_ddi_pre_enable_dp(state, encoder, crtc_state, conn_state); @@ -2509,14 +2507,8 @@ static void intel_ddi_pre_enable_dp(struct intel_atomic_state *state, /* MST will call a setting of MSA after an allocating of Virtual Channel * from MST encoder pre_enable callback. */ - if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) { + if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST)) intel_ddi_set_dp_msa(crtc_state, conn_state); - - intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, - &crtc_state->dp_m_n); - intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, - &crtc_state->dp_m2_n2); - } } static void intel_ddi_pre_enable_hdmi(struct intel_atomic_state *state, diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index ba774c1c6c19..7eeea9295199 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -2003,16 +2003,22 @@ static void hsw_configure_cpu_transcoder(const struct intel_crtc_state *crtc_sta struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + if (crtc_state->has_pch_encoder) { + intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, + &crtc_state->fdi_m_n); + } else if (intel_crtc_has_dp_encoder(crtc_state)) { + intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, + &crtc_state->dp_m_n); + intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, + &crtc_state->dp_m2_n2); + } + intel_set_transcoder_timings(crtc_state); if (cpu_transcoder != TRANSCODER_EDP) intel_de_write(dev_priv, PIPE_MULT(cpu_transcoder), crtc_state->pixel_multiplier - 1); - if (crtc_state->has_pch_encoder) - intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, - &crtc_state->fdi_m_n); - hsw_set_frame_start_delay(crtc_state); hsw_set_transconf(crtc_state); diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 30edb9117443..6b6eab507d30 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -473,7 +473,6 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state, struct intel_digital_port *dig_port = intel_mst->primary; struct intel_dp *intel_dp = &dig_port->dp; struct drm_i915_private *dev_priv = to_i915(encoder->base.dev); - struct intel_crtc *crtc = to_intel_crtc(pipe_config->uapi.crtc); struct intel_connector *connector = to_intel_connector(conn_state->connector); int ret; @@ -523,9 +522,6 @@ static void intel_mst_pre_enable_dp(struct intel_atomic_state *state, intel_ddi_enable_pipe_clock(encoder, pipe_config); intel_ddi_set_dp_msa(pipe_config, conn_state); - - intel_cpu_transcoder_set_m1_n1(crtc, pipe_config->cpu_transcoder, - &pipe_config->dp_m_n); } static void intel_mst_enable_dp(struct intel_atomic_state *state, -- cgit From a35eca01c372dc0f1a3ad663c6a93604c603a782 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Tue, 1 Feb 2022 11:19:09 +0200 Subject: drm/i915: Extract {i9xx,ilk}_configure_cpu_transcoder() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Follow the path laid out by hsw+ and extract helpers to configure the cpu transcoder for earlier platforms as well. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220128103757.22461-10-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_display.c | 73 +++++++++++++++------------- 1 file changed, 40 insertions(+), 33 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 7eeea9295199..76c6ccfce56b 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -1808,13 +1808,32 @@ static void intel_disable_primary_plane(const struct intel_crtc_state *crtc_stat plane->disable_arm(plane, crtc_state); } +static void ilk_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + + if (crtc_state->has_pch_encoder) { + intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, + &crtc_state->fdi_m_n); + } else if (intel_crtc_has_dp_encoder(crtc_state)) { + intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, + &crtc_state->dp_m_n); + intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, + &crtc_state->dp_m2_n2); + } + + intel_set_transcoder_timings(crtc_state); + + ilk_set_pipeconf(crtc_state); +} + static void ilk_crtc_enable(struct intel_atomic_state *state, struct intel_crtc *crtc) { const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; enum pipe pipe = crtc->pipe; if (drm_WARN_ON(&dev_priv->drm, crtc->active)) @@ -1833,21 +1852,10 @@ static void ilk_crtc_enable(struct intel_atomic_state *state, intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, false); intel_set_pch_fifo_underrun_reporting(dev_priv, pipe, false); - if (new_crtc_state->has_pch_encoder) { - intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, - &new_crtc_state->fdi_m_n); - } else if (intel_crtc_has_dp_encoder(new_crtc_state)) { - intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, - &new_crtc_state->dp_m_n); - intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, - &new_crtc_state->dp_m2_n2); - } + ilk_configure_cpu_transcoder(new_crtc_state); - intel_set_transcoder_timings(new_crtc_state); intel_set_pipe_src_size(new_crtc_state); - ilk_set_pipeconf(new_crtc_state); - crtc->active = true; intel_encoders_pre_enable(state, crtc); @@ -2445,26 +2453,36 @@ static void modeset_put_crtc_power_domains(struct intel_crtc *crtc, domains); } +static void i9xx_configure_cpu_transcoder(const struct intel_crtc_state *crtc_state) +{ + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + enum transcoder cpu_transcoder = crtc_state->cpu_transcoder; + + if (intel_crtc_has_dp_encoder(crtc_state)) { + intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, + &crtc_state->dp_m_n); + intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, + &crtc_state->dp_m2_n2); + } + + intel_set_transcoder_timings(crtc_state); + + i9xx_set_pipeconf(crtc_state); +} + static void valleyview_crtc_enable(struct intel_atomic_state *state, struct intel_crtc *crtc) { const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; enum pipe pipe = crtc->pipe; if (drm_WARN_ON(&dev_priv->drm, crtc->active)) return; - if (intel_crtc_has_dp_encoder(new_crtc_state)) { - intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, - &new_crtc_state->dp_m_n); - intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, - &new_crtc_state->dp_m2_n2); - } + i9xx_configure_cpu_transcoder(new_crtc_state); - intel_set_transcoder_timings(new_crtc_state); intel_set_pipe_src_size(new_crtc_state); if (IS_CHERRYVIEW(dev_priv) && pipe == PIPE_B) { @@ -2472,8 +2490,6 @@ static void valleyview_crtc_enable(struct intel_atomic_state *state, intel_de_write(dev_priv, CHV_CANVAS(pipe), 0); } - i9xx_set_pipeconf(new_crtc_state); - crtc->active = true; intel_set_cpu_fifo_underrun_reporting(dev_priv, pipe, true); @@ -2508,24 +2524,15 @@ static void i9xx_crtc_enable(struct intel_atomic_state *state, const struct intel_crtc_state *new_crtc_state = intel_atomic_get_new_crtc_state(state, crtc); struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - enum transcoder cpu_transcoder = new_crtc_state->cpu_transcoder; enum pipe pipe = crtc->pipe; if (drm_WARN_ON(&dev_priv->drm, crtc->active)) return; - if (intel_crtc_has_dp_encoder(new_crtc_state)) { - intel_cpu_transcoder_set_m1_n1(crtc, cpu_transcoder, - &new_crtc_state->dp_m_n); - intel_cpu_transcoder_set_m2_n2(crtc, cpu_transcoder, - &new_crtc_state->dp_m2_n2); - } + i9xx_configure_cpu_transcoder(new_crtc_state); - intel_set_transcoder_timings(new_crtc_state); intel_set_pipe_src_size(new_crtc_state); - i9xx_set_pipeconf(new_crtc_state); - crtc->active = true; if (DISPLAY_VER(dev_priv) != 2) -- cgit From f0d4ce59f4d48622044933054a0e0cefa91ba15e Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 28 Jan 2022 12:37:50 +0200 Subject: drm/i915: Disable DRRS on IVB/HSW port != A MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Currently we allow DRRS on IVB PCH ports, but we're missing a few programming steps meaning it is guaranteed to not work. And on HSW DRRS is not supported on anything but port A ever as only transcoder EDP has the M2/N2 registers (though I'm not sure if HSW ever has eDP on any other port). Starting from BDW all transcoders have the dynamically reprogrammable M/N registers so DRRS could work on any port. Stop initializing DRRS on ports where it cannot possibly work. Cc: stable@vger.kernel.org Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220128103757.22461-11-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_drrs.c | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c index 53f014b4436b..9f673255578e 100644 --- a/drivers/gpu/drm/i915/display/intel_drrs.c +++ b/drivers/gpu/drm/i915/display/intel_drrs.c @@ -413,6 +413,7 @@ intel_drrs_init(struct intel_connector *connector, struct drm_display_mode *fixed_mode) { struct drm_i915_private *dev_priv = to_i915(connector->base.dev); + struct intel_encoder *encoder = connector->encoder; struct drm_display_mode *downclock_mode = NULL; INIT_DELAYED_WORK(&dev_priv->drrs.work, intel_drrs_downclock_work); @@ -424,6 +425,13 @@ intel_drrs_init(struct intel_connector *connector, return NULL; } + if ((DISPLAY_VER(dev_priv) < 8 && !HAS_GMCH(dev_priv)) && + encoder->port != PORT_A) { + drm_dbg_kms(&dev_priv->drm, + "DRRS only supported on eDP port A\n"); + return NULL; + } + if (dev_priv->vbt.drrs_type != SEAMLESS_DRRS_SUPPORT) { drm_dbg_kms(&dev_priv->drm, "VBT doesn't support DRRS\n"); return NULL; -- cgit From c3e27f4307fed7b963d8e99c18dc51682b3431e7 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 28 Jan 2022 12:37:51 +0200 Subject: drm/i915: Extract can_enable_drrs() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Pull the "can we do DRRS?" check into helper in order to reduce the clutter in intel_drrs_compute_config(). Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220128103757.22461-12-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_drrs.c | 31 +++++++++++++++++++------------ 1 file changed, 19 insertions(+), 12 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c index 9f673255578e..3515f1700838 100644 --- a/drivers/gpu/drm/i915/display/intel_drrs.c +++ b/drivers/gpu/drm/i915/display/intel_drrs.c @@ -47,17 +47,13 @@ * requested by userspace. */ -void -intel_drrs_compute_config(struct intel_dp *intel_dp, - struct intel_crtc_state *pipe_config, - int output_bpp, bool constant_n) +static bool can_enable_drrs(struct intel_connector *connector, + const struct intel_crtc_state *pipe_config) { - struct intel_connector *intel_connector = intel_dp->attached_connector; - struct drm_i915_private *dev_priv = dp_to_i915(intel_dp); - int pixel_clock; + const struct drm_i915_private *i915 = to_i915(connector->base.dev); if (pipe_config->vrr.enable) - return; + return false; /* * DRRS and PSR can't be enable together, so giving preference to PSR @@ -66,15 +62,26 @@ intel_drrs_compute_config(struct intel_dp *intel_dp, * after intel_psr_compute_config(). */ if (pipe_config->has_psr) - return; + return false; + + return connector->panel.downclock_mode && + i915->drrs.type == SEAMLESS_DRRS_SUPPORT; +} + +void +intel_drrs_compute_config(struct intel_dp *intel_dp, + struct intel_crtc_state *pipe_config, + int output_bpp, bool constant_n) +{ + struct intel_connector *connector = intel_dp->attached_connector; + int pixel_clock; - if (!intel_connector->panel.downclock_mode || - dev_priv->drrs.type != SEAMLESS_DRRS_SUPPORT) + if (!can_enable_drrs(connector, pipe_config)) return; pipe_config->has_drrs = true; - pixel_clock = intel_connector->panel.downclock_mode->clock; + pixel_clock = connector->panel.downclock_mode->clock; if (pipe_config->splitter.enable) pixel_clock /= pipe_config->splitter.link_count; -- cgit From 6d6c932daef5c5b3cd5e3692e79507d2a3306031 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 28 Jan 2022 12:37:52 +0200 Subject: drm/i915: Fix transcoder_has_m2_n2() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit M2/N2 values are present for all ilk-ivb,vlv,chv (and hsw edp). Make the code reflect that. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220128103757.22461-13-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_display.c | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 76c6ccfce56b..319c73d96d96 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -3154,11 +3154,7 @@ static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv, if (IS_HASWELL(dev_priv)) return transcoder == TRANSCODER_EDP; - /* - * Strictly speaking some registers are available before - * gen7, but we only support DRRS on gen7+ - */ - return DISPLAY_VER(dev_priv) == 7 || IS_CHERRYVIEW(dev_priv); + return IS_DISPLAY_VER(dev_priv, 5, 7) || IS_CHERRYVIEW(dev_priv); } void intel_cpu_transcoder_set_m1_n1(struct intel_crtc *crtc, -- cgit From 1d06c820b2b7ceb38bdf0775fac495db4ad4d10e Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 28 Jan 2022 12:37:53 +0200 Subject: drm/i915: Clear DP M2/N2 when not doing DRRS MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make life simpler by always programming DP M2/N2 with a consistent value. This will lets use do state readout+chec unconditionally. I was first going to just set M2/N2=M1/N1 but then it occurred to me that it might interfere with fastboot on account of BIOS likely leaving the registers zeroed. So let's zero out the values instead (except TU where a zero register value actually means '1'). Still not sure that's the best approach but lets go with it for now. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220128103757.22461-14-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_display.c | 15 +++++++++++---- drivers/gpu/drm/i915/display/intel_display.h | 3 +++ drivers/gpu/drm/i915/display/intel_drrs.c | 6 +++++- 3 files changed, 19 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 319c73d96d96..3483f015154b 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -3137,6 +3137,13 @@ static void intel_panel_sanitize_ssc(struct drm_i915_private *dev_priv) } } +void intel_zero_m_n(struct intel_link_m_n *m_n) +{ + /* corresponds to 0 register value */ + memset(m_n, 0, sizeof(*m_n)); + m_n->tu = 1; +} + void intel_set_m_n(struct drm_i915_private *i915, const struct intel_link_m_n *m_n, i915_reg_t data_m_reg, i915_reg_t data_n_reg, @@ -3148,8 +3155,8 @@ void intel_set_m_n(struct drm_i915_private *i915, intel_de_write(i915, link_n_reg, m_n->link_n); } -static bool transcoder_has_m2_n2(struct drm_i915_private *dev_priv, - enum transcoder transcoder) +bool intel_cpu_transcoder_has_m2_n2(struct drm_i915_private *dev_priv, + enum transcoder transcoder) { if (IS_HASWELL(dev_priv)) return transcoder == TRANSCODER_EDP; @@ -3180,7 +3187,7 @@ void intel_cpu_transcoder_set_m2_n2(struct intel_crtc *crtc, { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - if (!transcoder_has_m2_n2(dev_priv, transcoder)) + if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder)) return; intel_set_m_n(dev_priv, m_n, @@ -3878,7 +3885,7 @@ void intel_cpu_transcoder_get_m2_n2(struct intel_crtc *crtc, { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); - if (!transcoder_has_m2_n2(dev_priv, transcoder)) + if (!intel_cpu_transcoder_has_m2_n2(dev_priv, transcoder)) return; intel_get_m_n(dev_priv, m_n, diff --git a/drivers/gpu/drm/i915/display/intel_display.h b/drivers/gpu/drm/i915/display/intel_display.h index c104e578bf5d..457738aeee3e 100644 --- a/drivers/gpu/drm/i915/display/intel_display.h +++ b/drivers/gpu/drm/i915/display/intel_display.h @@ -606,6 +606,7 @@ bool intel_fuzzy_clock_check(int clock1, int clock2); void intel_display_prepare_reset(struct drm_i915_private *dev_priv); void intel_display_finish_reset(struct drm_i915_private *dev_priv); +void intel_zero_m_n(struct intel_link_m_n *m_n); void intel_set_m_n(struct drm_i915_private *i915, const struct intel_link_m_n *m_n, i915_reg_t data_m_reg, i915_reg_t data_n_reg, @@ -614,6 +615,8 @@ void intel_get_m_n(struct drm_i915_private *i915, struct intel_link_m_n *m_n, i915_reg_t data_m_reg, i915_reg_t data_n_reg, i915_reg_t link_m_reg, i915_reg_t link_n_reg); +bool intel_cpu_transcoder_has_m2_n2(struct drm_i915_private *dev_priv, + enum transcoder transcoder); void intel_cpu_transcoder_set_m1_n1(struct intel_crtc *crtc, enum transcoder cpu_transcoder, const struct intel_link_m_n *m_n); diff --git a/drivers/gpu/drm/i915/display/intel_drrs.c b/drivers/gpu/drm/i915/display/intel_drrs.c index 3515f1700838..fa715b8ea310 100644 --- a/drivers/gpu/drm/i915/display/intel_drrs.c +++ b/drivers/gpu/drm/i915/display/intel_drrs.c @@ -74,10 +74,14 @@ intel_drrs_compute_config(struct intel_dp *intel_dp, int output_bpp, bool constant_n) { struct intel_connector *connector = intel_dp->attached_connector; + struct drm_i915_private *i915 = to_i915(connector->base.dev); int pixel_clock; - if (!can_enable_drrs(connector, pipe_config)) + if (!can_enable_drrs(connector, pipe_config)) { + if (intel_cpu_transcoder_has_m2_n2(i915, pipe_config->cpu_transcoder)) + intel_zero_m_n(&pipe_config->dp_m2_n2); return; + } pipe_config->has_drrs = true; -- cgit From 23015f6f900b8b158f6811b85de1f96769be4dc7 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 28 Jan 2022 12:37:54 +0200 Subject: drm/i915: Program pch transcoder m2/n2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Program the PCH transcoder M2/N2 values appropriately. We're still missing a few things for PCH port DRRS but at least this means we can do readout/state check for dp_m2_n2 unconditionally. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220128103757.22461-15-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/display/g4x_dp.c | 3 +- drivers/gpu/drm/i915/display/intel_pch_display.c | 36 ++++++++++++++++++++---- drivers/gpu/drm/i915/display/intel_pch_display.h | 6 ++-- 3 files changed, 36 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c index 34c7640386b8..f67bbaaad8e0 100644 --- a/drivers/gpu/drm/i915/display/g4x_dp.c +++ b/drivers/gpu/drm/i915/display/g4x_dp.c @@ -339,7 +339,8 @@ static void g4x_dp_get_m_n(struct intel_crtc_state *crtc_state) struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); if (crtc_state->has_pch_encoder) { - intel_pch_transcoder_get_m_n(crtc, &crtc_state->dp_m_n); + intel_pch_transcoder_get_m1_n1(crtc, &crtc_state->dp_m_n); + intel_pch_transcoder_get_m2_n2(crtc, &crtc_state->dp_m2_n2); } else { intel_cpu_transcoder_get_m1_n1(crtc, crtc_state->cpu_transcoder, &crtc_state->dp_m_n); diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.c b/drivers/gpu/drm/i915/display/intel_pch_display.c index 3bd96411f306..9192769e3337 100644 --- a/drivers/gpu/drm/i915/display/intel_pch_display.c +++ b/drivers/gpu/drm/i915/display/intel_pch_display.c @@ -88,8 +88,8 @@ static void assert_pch_transcoder_disabled(struct drm_i915_private *dev_priv, pipe_name(pipe)); } -static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc, - const struct intel_link_m_n *m_n) +static void intel_pch_transcoder_set_m1_n1(struct intel_crtc *crtc, + const struct intel_link_m_n *m_n) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; @@ -99,8 +99,19 @@ static void intel_pch_transcoder_set_m_n(struct intel_crtc *crtc, PCH_TRANS_LINK_M1(pipe), PCH_TRANS_LINK_N1(pipe)); } -void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, - struct intel_link_m_n *m_n) +static void intel_pch_transcoder_set_m2_n2(struct intel_crtc *crtc, + const struct intel_link_m_n *m_n) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + + intel_set_m_n(dev_priv, m_n, + PCH_TRANS_DATA_M2(pipe), PCH_TRANS_DATA_N2(pipe), + PCH_TRANS_LINK_M2(pipe), PCH_TRANS_LINK_N2(pipe)); +} + +void intel_pch_transcoder_get_m1_n1(struct intel_crtc *crtc, + struct intel_link_m_n *m_n) { struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); enum pipe pipe = crtc->pipe; @@ -110,6 +121,17 @@ void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, PCH_TRANS_LINK_M1(pipe), PCH_TRANS_LINK_N1(pipe)); } +void intel_pch_transcoder_get_m2_n2(struct intel_crtc *crtc, + struct intel_link_m_n *m_n) +{ + struct drm_i915_private *dev_priv = to_i915(crtc->base.dev); + enum pipe pipe = crtc->pipe; + + intel_get_m_n(dev_priv, m_n, + PCH_TRANS_DATA_M2(pipe), PCH_TRANS_DATA_N2(pipe), + PCH_TRANS_LINK_M2(pipe), PCH_TRANS_LINK_N2(pipe)); +} + static void ilk_pch_transcoder_set_timings(const struct intel_crtc_state *crtc_state, enum pipe pch_transcoder) { @@ -300,8 +322,10 @@ void ilk_pch_enable(struct intel_atomic_state *state, /* set transcoder timing, panel must allow it */ assert_pps_unlocked(dev_priv, pipe); - if (intel_crtc_has_dp_encoder(crtc_state)) - intel_pch_transcoder_set_m_n(crtc, &crtc_state->dp_m_n); + if (intel_crtc_has_dp_encoder(crtc_state)) { + intel_pch_transcoder_set_m1_n1(crtc, &crtc_state->dp_m_n); + intel_pch_transcoder_set_m2_n2(crtc, &crtc_state->dp_m2_n2); + } ilk_pch_transcoder_set_timings(crtc_state, pipe); intel_fdi_normal_train(crtc); diff --git a/drivers/gpu/drm/i915/display/intel_pch_display.h b/drivers/gpu/drm/i915/display/intel_pch_display.h index 9a317b361a96..749473d99320 100644 --- a/drivers/gpu/drm/i915/display/intel_pch_display.h +++ b/drivers/gpu/drm/i915/display/intel_pch_display.h @@ -27,7 +27,9 @@ void lpt_pch_disable(struct intel_atomic_state *state, struct intel_crtc *crtc); void lpt_pch_get_config(struct intel_crtc_state *crtc_state); -void intel_pch_transcoder_get_m_n(struct intel_crtc *crtc, - struct intel_link_m_n *m_n); +void intel_pch_transcoder_get_m1_n1(struct intel_crtc *crtc, + struct intel_link_m_n *m_n); +void intel_pch_transcoder_get_m2_n2(struct intel_crtc *crtc, + struct intel_link_m_n *m_n); #endif -- cgit From 00dd7f953b9b1d85e97da8065cc2887a5477008f Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 28 Jan 2022 12:37:55 +0200 Subject: drm/i915: Dump dp_m2_n2 always MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit No point in special casing the dp_m2_n2 dumping. Just do it always. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220128103757.22461-16-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_display.c | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 3483f015154b..8db219b9d760 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -5631,11 +5631,11 @@ static void intel_dump_pipe_config(const struct intel_crtc_state *pipe_config, if (intel_crtc_has_dp_encoder(pipe_config)) { intel_dump_m_n_config(pipe_config, "dp m_n", - pipe_config->lane_count, &pipe_config->dp_m_n); - if (pipe_config->has_drrs) - intel_dump_m_n_config(pipe_config, "dp m2_n2", - pipe_config->lane_count, - &pipe_config->dp_m2_n2); + pipe_config->lane_count, + &pipe_config->dp_m_n); + intel_dump_m_n_config(pipe_config, "dp m2_n2", + pipe_config->lane_count, + &pipe_config->dp_m2_n2); } drm_dbg_kms(&dev_priv->drm, -- cgit From 2efb4adf489dd29526c412c4593d12e08076c68a Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 28 Jan 2022 12:37:56 +0200 Subject: drm/i915: Always check dp_m2_n2 on pre-bdw MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit No point in special casing the check of dp_m2_n2 on pre-bdw platforms. Either the transcoder has M2/N2 in which case the values should be set to something sensible, or it doesn't in which case dp_m2_n2 is always zeroed. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220128103757.22461-17-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_display.c | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 8db219b9d760..d55f87891c4c 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -6483,13 +6483,12 @@ intel_pipe_config_compare(const struct intel_crtc_state *current_config, PIPE_CONF_CHECK_I(lane_count); PIPE_CONF_CHECK_X(lane_lat_optim_mask); - if (DISPLAY_VER(dev_priv) < 8) { - PIPE_CONF_CHECK_M_N(dp_m_n); - - if (current_config->has_drrs) - PIPE_CONF_CHECK_M_N(dp_m2_n2); - } else + if (DISPLAY_VER(dev_priv) >= 9 || IS_BROADWELL(dev_priv)) { PIPE_CONF_CHECK_M_N_ALT(dp_m_n, dp_m2_n2); + } else { + PIPE_CONF_CHECK_M_N(dp_m_n); + PIPE_CONF_CHECK_M_N(dp_m2_n2); + } PIPE_CONF_CHECK_X(output_types); -- cgit From 19d36cfafad0395d1b8a9db7a85d64282c42ae94 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Fri, 28 Jan 2022 12:37:57 +0200 Subject: drm/i915: Document BDW+ DRRS M/N programming requirements MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit When reprogramming M/N live on BDW+ we must write the LINK_N register last as it's the one that arms the double buffered register update for all the M/N registers. Document this so that we don't accidentally break things. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220128103757.22461-18-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/display/intel_display.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index d55f87891c4c..01e8cea0053e 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -3152,6 +3152,10 @@ void intel_set_m_n(struct drm_i915_private *i915, intel_de_write(i915, data_m_reg, TU_SIZE(m_n->tu) | m_n->data_m); intel_de_write(i915, data_n_reg, m_n->data_n); intel_de_write(i915, link_m_reg, m_n->link_m); + /* + * On BDW+ writing LINK_N arms the double buffered update + * of all the M/N registers, so it must be written last. + */ intel_de_write(i915, link_n_reg, m_n->link_n); } -- cgit From b5c84a9edcd418cd055becad6a22439e7c5e3bf8 Mon Sep 17 00:00:00 2001 From: Allen Chen Date: Fri, 14 Jan 2022 17:14:39 +0800 Subject: drm/bridge: add it6505 driver This adds support for the iTE IT6505. This device can convert DPI signal to DP output. From: Allen Chen Tested-by: Hsin-yi Wang Signed-off-by: Hermes Wu Signed-off-by: Allen Chen Reviewed-by: AngeloGioacchino Del Regno Signed-off-by: Robert Foss Link: https://patchwork.freedesktop.org/patch/msgid/20220114091502.333083-1-allen.chen@ite.com.tw --- drivers/gpu/drm/bridge/Kconfig | 8 + drivers/gpu/drm/bridge/Makefile | 1 + drivers/gpu/drm/bridge/ite-it6505.c | 3352 +++++++++++++++++++++++++++++++++++ 3 files changed, 3361 insertions(+) create mode 100644 drivers/gpu/drm/bridge/ite-it6505.c diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index fcd93f1aec90..c86f5be4dfe0 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -75,6 +75,14 @@ config DRM_DISPLAY_CONNECTOR on ARM-based platforms. Saying Y here when this driver is not needed will not cause any issue. +config DRM_ITE_IT6505 + tristate "ITE IT6505 DisplayPort bridge" + depends on OF + select DRM_KMS_HELPER + select EXTCON + help + ITE IT6505 DisplayPort bridge chip driver. + config DRM_LONTIUM_LT8912B tristate "Lontium LT8912B DSI/HDMI bridge" depends on OF diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile index f2c73683cfcb..425844c30495 100644 --- a/drivers/gpu/drm/bridge/Makefile +++ b/drivers/gpu/drm/bridge/Makefile @@ -4,6 +4,7 @@ obj-$(CONFIG_DRM_CHIPONE_ICN6211) += chipone-icn6211.o obj-$(CONFIG_DRM_CHRONTEL_CH7033) += chrontel-ch7033.o obj-$(CONFIG_DRM_CROS_EC_ANX7688) += cros-ec-anx7688.o obj-$(CONFIG_DRM_DISPLAY_CONNECTOR) += display-connector.o +obj-$(CONFIG_DRM_ITE_IT6505) += ite-it6505.o obj-$(CONFIG_DRM_LONTIUM_LT8912B) += lontium-lt8912b.o obj-$(CONFIG_DRM_LONTIUM_LT9611) += lontium-lt9611.o obj-$(CONFIG_DRM_LONTIUM_LT9611UXC) += lontium-lt9611uxc.o diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c new file mode 100644 index 000000000000..fb16a176822d --- /dev/null +++ b/drivers/gpu/drm/bridge/ite-it6505.c @@ -0,0 +1,3352 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* + * Copyright (c) 2020, The Linux Foundation. All rights reserved. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include + +#define REG_IC_VER 0x04 + +#define REG_RESET_CTRL 0x05 +#define VIDEO_RESET BIT(0) +#define AUDIO_RESET BIT(1) +#define ALL_LOGIC_RESET BIT(2) +#define AUX_RESET BIT(3) +#define HDCP_RESET BIT(4) + +#define INT_STATUS_01 0x06 +#define INT_MASK_01 0x09 +#define INT_HPD_CHANGE 0 +#define INT_RECEIVE_HPD_IRQ 1 +#define INT_SCDT_CHANGE 2 +#define INT_HDCP_FAIL 3 +#define INT_HDCP_DONE 4 +#define BIT_OFFSET(x) (((x) - INT_STATUS_01) * BITS_PER_BYTE) +#define BIT_INT_HPD INT_HPD_CHANGE +#define BIT_INT_HPD_IRQ INT_RECEIVE_HPD_IRQ +#define BIT_INT_SCDT INT_SCDT_CHANGE +#define BIT_INT_HDCP_FAIL INT_HDCP_FAIL +#define BIT_INT_HDCP_DONE INT_HDCP_DONE + +#define INT_STATUS_02 0x07 +#define INT_MASK_02 0x0A +#define INT_AUX_CMD_FAIL 0 +#define INT_HDCP_KSV_CHECK 1 +#define INT_AUDIO_FIFO_ERROR 2 +#define BIT_INT_AUX_CMD_FAIL (BIT_OFFSET(0x07) + INT_AUX_CMD_FAIL) +#define BIT_INT_HDCP_KSV_CHECK (BIT_OFFSET(0x07) + INT_HDCP_KSV_CHECK) +#define BIT_INT_AUDIO_FIFO_ERROR (BIT_OFFSET(0x07) + INT_AUDIO_FIFO_ERROR) + +#define INT_STATUS_03 0x08 +#define INT_MASK_03 0x0B +#define INT_LINK_TRAIN_FAIL 4 +#define INT_VID_FIFO_ERROR 5 +#define INT_IO_LATCH_FIFO_OVERFLOW 7 +#define BIT_INT_LINK_TRAIN_FAIL (BIT_OFFSET(0x08) + INT_LINK_TRAIN_FAIL) +#define BIT_INT_VID_FIFO_ERROR (BIT_OFFSET(0x08) + INT_VID_FIFO_ERROR) +#define BIT_INT_IO_FIFO_OVERFLOW (BIT_OFFSET(0x08) + INT_IO_LATCH_FIFO_OVERFLOW) + +#define REG_SYSTEM_STS 0x0D +#define INT_STS BIT(0) +#define HPD_STS BIT(1) +#define VIDEO_STB BIT(2) + +#define REG_LINK_TRAIN_STS 0x0E +#define LINK_STATE_CR BIT(2) +#define LINK_STATE_EQ BIT(3) +#define LINK_STATE_NORP BIT(4) + +#define REG_BANK_SEL 0x0F +#define REG_CLK_CTRL0 0x10 +#define M_PCLK_DELAY 0x03 + +#define REG_AUX_OPT 0x11 +#define AUX_AUTO_RST BIT(0) +#define AUX_FIX_FREQ BIT(3) + +#define REG_DATA_CTRL0 0x12 +#define VIDEO_LATCH_EDGE BIT(4) +#define ENABLE_PCLK_COUNTER BIT(7) + +#define REG_PCLK_COUNTER_VALUE 0x13 + +#define REG_501_FIFO_CTRL 0x15 +#define RST_501_FIFO BIT(1) + +#define REG_TRAIN_CTRL0 0x16 +#define FORCE_LBR BIT(0) +#define LANE_COUNT_MASK 0x06 +#define LANE_SWAP BIT(3) +#define SPREAD_AMP_5 BIT(4) +#define FORCE_CR_DONE BIT(5) +#define FORCE_EQ_DONE BIT(6) + +#define REG_TRAIN_CTRL1 0x17 +#define AUTO_TRAIN BIT(0) +#define MANUAL_TRAIN BIT(1) +#define FORCE_RETRAIN BIT(2) + +#define REG_AUX_CTRL 0x23 +#define CLR_EDID_FIFO BIT(0) +#define AUX_USER_MODE BIT(1) +#define AUX_NO_SEGMENT_WR BIT(6) +#define AUX_EN_FIFO_READ BIT(7) + +#define REG_AUX_ADR_0_7 0x24 +#define REG_AUX_ADR_8_15 0x25 +#define REG_AUX_ADR_16_19 0x26 +#define REG_AUX_OUT_DATA0 0x27 + +#define REG_AUX_CMD_REQ 0x2B +#define AUX_BUSY BIT(5) + +#define REG_AUX_DATA_0_7 0x2C +#define REG_AUX_DATA_8_15 0x2D +#define REG_AUX_DATA_16_23 0x2E +#define REG_AUX_DATA_24_31 0x2F + +#define REG_AUX_DATA_FIFO 0x2F + +#define REG_AUX_ERROR_STS 0x9F +#define M_AUX_REQ_FAIL 0x03 + +#define REG_HDCP_CTRL1 0x38 +#define HDCP_CP_ENABLE BIT(0) + +#define REG_HDCP_TRIGGER 0x39 +#define HDCP_TRIGGER_START BIT(0) +#define HDCP_TRIGGER_CPIRQ BIT(1) +#define HDCP_TRIGGER_KSV_DONE BIT(4) +#define HDCP_TRIGGER_KSV_FAIL BIT(5) + +#define REG_HDCP_CTRL2 0x3A +#define HDCP_AN_SEL BIT(0) +#define HDCP_AN_GEN BIT(1) +#define HDCP_HW_HPDIRQ_ACT BIT(2) +#define HDCP_EN_M0_READ BIT(5) + +#define REG_M0_0_7 0x4C +#define REG_AN_0_7 0x4C +#define REG_SP_CTRL0 0x58 +#define REG_IP_CTRL1 0x59 +#define REG_IP_CTRL2 0x5A + +#define REG_LINK_DRV 0x5C +#define DRV_HS BIT(1) + +#define REG_DRV_LN_DATA_SEL 0x5D + +#define REG_AUX 0x5E + +#define REG_VID_BUS_CTRL0 0x60 +#define IN_DDR BIT(2) +#define DDR_CD (0x01 << 6) + +#define REG_VID_BUS_CTRL1 0x61 +#define TX_FIFO_RESET BIT(1) + +#define REG_INPUT_CTRL 0xA0 +#define INPUT_HSYNC_POL BIT(0) +#define INPUT_VSYNC_POL BIT(2) +#define INPUT_INTERLACED BIT(4) + +#define REG_INPUT_HTOTAL 0xA1 +#define REG_INPUT_HACTIVE_START 0xA3 +#define REG_INPUT_HACTIVE_WIDTH 0xA5 +#define REG_INPUT_HFRONT_PORCH 0xA7 +#define REG_INPUT_HSYNC_WIDTH 0xA9 +#define REG_INPUT_VTOTAL 0xAB +#define REG_INPUT_VACTIVE_START 0xAD +#define REG_INPUT_VACTIVE_WIDTH 0xAF +#define REG_INPUT_VFRONT_PORCH 0xB1 +#define REG_INPUT_VSYNC_WIDTH 0xB3 + +#define REG_AUDIO_SRC_CTRL 0xB8 +#define M_AUDIO_I2S_EN 0x0F +#define EN_I2S0 BIT(0) +#define EN_I2S1 BIT(1) +#define EN_I2S2 BIT(2) +#define EN_I2S3 BIT(3) +#define AUDIO_FIFO_RESET BIT(7) + +#define REG_AUDIO_FMT 0xB9 +#define REG_AUDIO_FIFO_SEL 0xBA + +#define REG_AUDIO_CTRL0 0xBB +#define AUDIO_FULL_PKT BIT(4) +#define AUDIO_16B_BOUND BIT(5) + +#define REG_AUDIO_CTRL1 0xBC +#define REG_AUDIO_INPUT_FREQ 0xBE + +#define REG_IEC958_STS0 0xBF +#define REG_IEC958_STS1 0xC0 +#define REG_IEC958_STS2 0xC1 +#define REG_IEC958_STS3 0xC2 +#define REG_IEC958_STS4 0xC3 + +#define REG_HPD_IRQ_TIME 0xC9 +#define REG_AUX_DEBUG_MODE 0xCA +#define REG_AUX_OPT2 0xCB +#define REG_HDCP_OPT 0xCE +#define REG_USER_DRV_PRE 0xCF + +#define REG_DATA_MUTE_CTRL 0xD3 +#define ENABLE_ENHANCED_FRAME BIT(0) +#define ENABLE_AUTO_VIDEO_FIFO_RESET BIT(1) +#define EN_VID_MUTE BIT(4) +#define EN_AUD_MUTE BIT(5) + +#define REG_TIME_STMP_CTRL 0xD4 +#define EN_ENHANCE_VID_STMP BIT(0) +#define EN_ENHANCE_AUD_STMP BIT(2) +#define M_STAMP_STEP 0x30 +#define EN_SSC_GAT BIT(6) + +#define REG_INFOFRAME_CTRL 0xE8 +#define EN_AVI_PKT BIT(0) +#define EN_AUD_PKT BIT(1) +#define EN_MPG_PKT BIT(2) +#define EN_GEN_PKT BIT(3) +#define EN_VID_TIME_STMP BIT(4) +#define EN_AUD_TIME_STMP BIT(5) +#define EN_VID_CTRL_PKT (EN_AVI_PKT | EN_VID_TIME_STMP) +#define EN_AUD_CTRL_PKT (EN_AUD_PKT | EN_AUD_TIME_STMP) + +#define REG_AUDIO_N_0_7 0xDE +#define REG_AUDIO_N_8_15 0xDF +#define REG_AUDIO_N_16_23 0xE0 + +#define REG_AVI_INFO_DB1 0xE9 +#define REG_AVI_INFO_DB2 0xEA +#define REG_AVI_INFO_DB3 0xEB +#define REG_AVI_INFO_DB4 0xEC +#define REG_AVI_INFO_DB5 0xED +#define REG_AVI_INFO_SUM 0xF6 + +#define REG_AUD_INFOFRAM_DB1 0xF7 +#define REG_AUD_INFOFRAM_DB2 0xF8 +#define REG_AUD_INFOFRAM_DB3 0xF9 +#define REG_AUD_INFOFRAM_DB4 0xFA +#define REG_AUD_INFOFRAM_SUM 0xFB + +/* the following six registers are in bank1 */ +#define REG_DRV_0_DB_800_MV 0x7E +#define REG_PRE_0_DB_800_MV 0x7F +#define REG_PRE_3P5_DB_800_MV 0x81 +#define REG_SSC_CTRL0 0x88 +#define REG_SSC_CTRL1 0x89 +#define REG_SSC_CTRL2 0x8A + +#define RBR DP_LINK_BW_1_62 +#define HBR DP_LINK_BW_2_7 +#define HBR2 DP_LINK_BW_5_4 +#define HBR3 DP_LINK_BW_8_1 + +#define DPCD_V_1_1 0x11 +#define MISC_VERB 0xF0 +#define MISC_VERC 0x70 +#define I2S_INPUT_FORMAT_STANDARD 0 +#define I2S_INPUT_FORMAT_32BIT 1 +#define I2S_INPUT_LEFT_JUSTIFIED 0 +#define I2S_INPUT_RIGHT_JUSTIFIED 1 +#define I2S_DATA_1T_DELAY 0 +#define I2S_DATA_NO_DELAY 1 +#define I2S_WS_LEFT_CHANNEL 0 +#define I2S_WS_RIGHT_CHANNEL 1 +#define I2S_DATA_MSB_FIRST 0 +#define I2S_DATA_LSB_FIRST 1 +#define WORD_LENGTH_16BIT 0 +#define WORD_LENGTH_18BIT 1 +#define WORD_LENGTH_20BIT 2 +#define WORD_LENGTH_24BIT 3 +#define DEBUGFS_DIR_NAME "it6505-debugfs" +#define READ_BUFFER_SIZE 200 + +/* Vendor option */ +#define HDCP_DESIRED 1 +#define MAX_LANE_COUNT 4 +#define MAX_LINK_RATE HBR +#define AUTO_TRAIN_RETRY 3 +#define MAX_HDCP_DOWN_STREAM_COUNT 10 +#define MAX_CR_LEVEL 0x03 +#define MAX_EQ_LEVEL 0x03 +#define AUX_WAIT_TIMEOUT_MS 15 +#define AUX_FIFO_MAX_SIZE 32 +#define PIXEL_CLK_DELAY 1 +#define PIXEL_CLK_INVERSE 0 +#define ADJUST_PHASE_THRESHOLD 80000 +#define DPI_PIXEL_CLK_MAX 95000 +#define HDCP_SHA1_FIFO_LEN (MAX_HDCP_DOWN_STREAM_COUNT * 5 + 10) +#define DEFAULT_PWR_ON 0 +#define DEFAULT_DRV_HOLD 0 + +#define AUDIO_SELECT I2S +#define AUDIO_TYPE LPCM +#define AUDIO_SAMPLE_RATE SAMPLE_RATE_48K +#define AUDIO_CHANNEL_COUNT 2 +#define I2S_INPUT_FORMAT I2S_INPUT_FORMAT_32BIT +#define I2S_JUSTIFIED I2S_INPUT_LEFT_JUSTIFIED +#define I2S_DATA_DELAY I2S_DATA_1T_DELAY +#define I2S_WS_CHANNEL I2S_WS_LEFT_CHANNEL +#define I2S_DATA_SEQUENCE I2S_DATA_MSB_FIRST +#define AUDIO_WORD_LENGTH WORD_LENGTH_24BIT + +enum aux_cmd_type { + CMD_AUX_NATIVE_READ = 0x0, + CMD_AUX_NATIVE_WRITE = 0x5, + CMD_AUX_I2C_EDID_READ = 0xB, +}; + +enum aux_cmd_reply { + REPLY_ACK, + REPLY_NACK, + REPLY_DEFER, +}; + +enum link_train_status { + LINK_IDLE, + LINK_BUSY, + LINK_OK, +}; + +enum hdcp_state { + HDCP_AUTH_IDLE, + HDCP_AUTH_GOING, + HDCP_AUTH_DONE, +}; + +struct it6505_platform_data { + struct regulator *pwr18; + struct regulator *ovdd; + struct gpio_desc *gpiod_reset; +}; + +enum it6505_audio_select { + I2S = 0, + SPDIF, +}; + +enum it6505_audio_sample_rate { + SAMPLE_RATE_24K = 0x6, + SAMPLE_RATE_32K = 0x3, + SAMPLE_RATE_48K = 0x2, + SAMPLE_RATE_96K = 0xA, + SAMPLE_RATE_192K = 0xE, + SAMPLE_RATE_44_1K = 0x0, + SAMPLE_RATE_88_2K = 0x8, + SAMPLE_RATE_176_4K = 0xC, +}; + +enum it6505_audio_type { + LPCM = 0, + NLPCM, + DSS, +}; + +struct it6505_audio_data { + enum it6505_audio_select select; + enum it6505_audio_sample_rate sample_rate; + enum it6505_audio_type type; + u8 word_length; + u8 channel_count; + u8 i2s_input_format; + u8 i2s_justified; + u8 i2s_data_delay; + u8 i2s_ws_channel; + u8 i2s_data_sequence; +}; + +struct it6505_audio_sample_rate_map { + enum it6505_audio_sample_rate rate; + int sample_rate_value; +}; + +struct it6505_drm_dp_link { + unsigned char revision; + unsigned int rate; + unsigned int num_lanes; + unsigned long capabilities; +}; + +struct debugfs_entries { + char *name; + const struct file_operations *fops; +}; + +struct it6505 { + struct drm_dp_aux aux; + struct drm_bridge bridge; + struct i2c_client *client; + struct it6505_drm_dp_link link; + struct it6505_platform_data pdata; + /* + * Mutex protects extcon and interrupt functions from interfering + * each other. + */ + struct mutex extcon_lock; + struct mutex mode_lock; /* used to bridge_detect */ + struct mutex aux_lock; /* used to aux data transfers */ + struct regmap *regmap; + struct drm_display_mode source_output_mode; + struct drm_display_mode video_info; + struct notifier_block event_nb; + struct extcon_dev *extcon; + struct work_struct extcon_wq; + enum drm_connector_status connector_status; + enum link_train_status link_state; + struct work_struct link_works; + u8 dpcd[DP_RECEIVER_CAP_SIZE]; + u8 lane_count; + u8 link_rate_bw_code; + u8 sink_count; + bool step_train; + bool branch_device; + bool enable_ssc; + bool lane_swap_disabled; + bool lane_swap; + bool powered; + bool hpd_state; + u32 afe_setting; + enum hdcp_state hdcp_status; + struct delayed_work hdcp_work; + struct work_struct hdcp_wait_ksv_list; + struct completion wait_edid_complete; + u8 auto_train_retry; + bool hdcp_desired; + bool is_repeater; + u8 hdcp_down_stream_count; + u8 bksvs[DRM_HDCP_KSV_LEN]; + u8 sha1_input[HDCP_SHA1_FIFO_LEN]; + bool enable_enhanced_frame; + hdmi_codec_plugged_cb plugged_cb; + struct device *codec_dev; + struct delayed_work delayed_audio; + struct it6505_audio_data audio; + struct dentry *debugfs; + + /* it6505 driver hold option */ + bool enable_drv_hold; +}; + +struct it6505_step_train_para { + u8 voltage_swing[MAX_LANE_COUNT]; + u8 pre_emphasis[MAX_LANE_COUNT]; +}; + +/* + * Vendor option afe settings for different platforms + * 0: without FPC cable + * 1: with FPC cable + */ + +static const u8 afe_setting_table[][3] = { + {0x82, 0x00, 0x45}, + {0x93, 0x2A, 0x85} +}; + +static const struct it6505_audio_sample_rate_map audio_sample_rate_map[] = { + {SAMPLE_RATE_24K, 24000}, + {SAMPLE_RATE_32K, 32000}, + {SAMPLE_RATE_48K, 48000}, + {SAMPLE_RATE_96K, 96000}, + {SAMPLE_RATE_192K, 192000}, + {SAMPLE_RATE_44_1K, 44100}, + {SAMPLE_RATE_88_2K, 88200}, + {SAMPLE_RATE_176_4K, 176400}, +}; + +static const struct regmap_range it6505_bridge_volatile_ranges[] = { + { .range_min = 0, .range_max = 0xFF }, +}; + +static const struct regmap_access_table it6505_bridge_volatile_table = { + .yes_ranges = it6505_bridge_volatile_ranges, + .n_yes_ranges = ARRAY_SIZE(it6505_bridge_volatile_ranges), +}; + +static const struct regmap_config it6505_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .volatile_table = &it6505_bridge_volatile_table, + .cache_type = REGCACHE_NONE, +}; + +static int it6505_read(struct it6505 *it6505, unsigned int reg_addr) +{ + unsigned int value; + int err; + struct device *dev = &it6505->client->dev; + + err = regmap_read(it6505->regmap, reg_addr, &value); + if (err < 0) { + dev_err(dev, "read failed reg[0x%x] err: %d", reg_addr, err); + return err; + } + + return value; +} + +static int it6505_write(struct it6505 *it6505, unsigned int reg_addr, + unsigned int reg_val) +{ + int err; + struct device *dev = &it6505->client->dev; + + err = regmap_write(it6505->regmap, reg_addr, reg_val); + + if (err < 0) { + dev_err(dev, "write failed reg[0x%x] = 0x%x err = %d", + reg_addr, reg_val, err); + return err; + } + + return 0; +} + +static int it6505_set_bits(struct it6505 *it6505, unsigned int reg, + unsigned int mask, unsigned int value) +{ + int err; + struct device *dev = &it6505->client->dev; + + err = regmap_update_bits(it6505->regmap, reg, mask, value); + if (err < 0) { + dev_err(dev, "write reg[0x%x] = 0x%x mask = 0x%x failed err %d", + reg, value, mask, err); + return err; + } + + return 0; +} + +static void it6505_debug_print(struct it6505 *it6505, unsigned int reg, + const char *prefix) +{ + struct device *dev = &it6505->client->dev; + int val; + + if (likely(!(__drm_debug & DRM_UT_DRIVER))) + return; + + val = it6505_read(it6505, reg); + if (val < 0) + DRM_DEV_DEBUG_DRIVER(dev, "%s reg[%02x] read error (%d)", + prefix, reg, val); + else + DRM_DEV_DEBUG_DRIVER(dev, "%s reg[%02x] = 0x%02x", prefix, reg, + val); +} + +static int it6505_dpcd_read(struct it6505 *it6505, unsigned long offset) +{ + u8 value; + int ret; + struct device *dev = &it6505->client->dev; + + ret = drm_dp_dpcd_readb(&it6505->aux, offset, &value); + if (ret < 0) { + dev_err(dev, "DPCD read failed [0x%lx] ret: %d", offset, ret); + return ret; + } + return value; +} + +static int it6505_dpcd_write(struct it6505 *it6505, unsigned long offset, + u8 datain) +{ + int ret; + struct device *dev = &it6505->client->dev; + + ret = drm_dp_dpcd_writeb(&it6505->aux, offset, datain); + if (ret < 0) { + dev_err(dev, "DPCD write failed [0x%lx] ret: %d", offset, ret); + return ret; + } + return 0; +} + +static int it6505_get_dpcd(struct it6505 *it6505, int offset, u8 *dpcd, int num) +{ + int ret; + struct device *dev = &it6505->client->dev; + + ret = drm_dp_dpcd_read(&it6505->aux, offset, dpcd, num); + + if (ret < 0) + return ret; + + DRM_DEV_DEBUG_DRIVER(dev, "ret = %d DPCD[0x%x] = 0x%*ph", ret, offset, + num, dpcd); + + return 0; +} + +static void it6505_dump(struct it6505 *it6505) +{ + unsigned int i, j; + u8 regs[16]; + struct device *dev = &it6505->client->dev; + + for (i = 0; i <= 0xff; i += 16) { + for (j = 0; j < 16; j++) + regs[j] = it6505_read(it6505, i + j); + + DRM_DEV_DEBUG_DRIVER(dev, "[0x%02x] = %16ph", i, regs); + } +} + +static bool it6505_get_sink_hpd_status(struct it6505 *it6505) +{ + int reg_0d; + + reg_0d = it6505_read(it6505, REG_SYSTEM_STS); + + if (reg_0d < 0) + return false; + + return reg_0d & HPD_STS; +} + +static int it6505_read_word(struct it6505 *it6505, unsigned int reg) +{ + int val0, val1; + + val0 = it6505_read(it6505, reg); + if (val0 < 0) + return val0; + + val1 = it6505_read(it6505, reg + 1); + if (val1 < 0) + return val1; + + return (val1 << 8) | val0; +} + +static void it6505_calc_video_info(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + int hsync_pol, vsync_pol, interlaced; + int htotal, hdes, hdew, hfph, hsyncw; + int vtotal, vdes, vdew, vfph, vsyncw; + int rddata, i, pclk, sum = 0; + + usleep_range(10000, 15000); + rddata = it6505_read(it6505, REG_INPUT_CTRL); + hsync_pol = rddata & INPUT_HSYNC_POL; + vsync_pol = (rddata & INPUT_VSYNC_POL) >> 2; + interlaced = (rddata & INPUT_INTERLACED) >> 4; + + htotal = it6505_read_word(it6505, REG_INPUT_HTOTAL) & 0x1FFF; + hdes = it6505_read_word(it6505, REG_INPUT_HACTIVE_START) & 0x1FFF; + hdew = it6505_read_word(it6505, REG_INPUT_HACTIVE_WIDTH) & 0x1FFF; + hfph = it6505_read_word(it6505, REG_INPUT_HFRONT_PORCH) & 0x1FFF; + hsyncw = it6505_read_word(it6505, REG_INPUT_HSYNC_WIDTH) & 0x1FFF; + + vtotal = it6505_read_word(it6505, REG_INPUT_VTOTAL) & 0xFFF; + vdes = it6505_read_word(it6505, REG_INPUT_VACTIVE_START) & 0xFFF; + vdew = it6505_read_word(it6505, REG_INPUT_VACTIVE_WIDTH) & 0xFFF; + vfph = it6505_read_word(it6505, REG_INPUT_VFRONT_PORCH) & 0xFFF; + vsyncw = it6505_read_word(it6505, REG_INPUT_VSYNC_WIDTH) & 0xFFF; + + DRM_DEV_DEBUG_DRIVER(dev, "hsync_pol:%d, vsync_pol:%d, interlaced:%d", + hsync_pol, vsync_pol, interlaced); + DRM_DEV_DEBUG_DRIVER(dev, "hactive_start:%d, vactive_start:%d", + hdes, vdes); + + for (i = 0; i < 10; i++) { + it6505_set_bits(it6505, REG_DATA_CTRL0, ENABLE_PCLK_COUNTER, + ENABLE_PCLK_COUNTER); + usleep_range(10000, 15000); + it6505_set_bits(it6505, REG_DATA_CTRL0, ENABLE_PCLK_COUNTER, + 0x00); + rddata = it6505_read_word(it6505, REG_PCLK_COUNTER_VALUE) & + 0xFFF; + + sum += rddata; + } + + if (sum == 0) { + DRM_DEV_DEBUG_DRIVER(dev, "calc video timing error"); + return; + } + + sum /= 10; + pclk = 13500 * 2048 / sum; + it6505->video_info.clock = pclk; + it6505->video_info.hdisplay = hdew; + it6505->video_info.hsync_start = hdew + hfph; + it6505->video_info.hsync_end = hdew + hfph + hsyncw; + it6505->video_info.htotal = htotal; + it6505->video_info.vdisplay = vdew; + it6505->video_info.vsync_start = vdew + vfph; + it6505->video_info.vsync_end = vdew + vfph + vsyncw; + it6505->video_info.vtotal = vtotal; + + DRM_DEV_DEBUG_DRIVER(dev, DRM_MODE_FMT, + DRM_MODE_ARG(&it6505->video_info)); +} + +static int it6505_drm_dp_link_probe(struct drm_dp_aux *aux, + struct it6505_drm_dp_link *link) +{ + u8 values[3]; + int err; + + memset(link, 0, sizeof(*link)); + + err = drm_dp_dpcd_read(aux, DP_DPCD_REV, values, sizeof(values)); + if (err < 0) + return err; + + link->revision = values[0]; + link->rate = drm_dp_bw_code_to_link_rate(values[1]); + link->num_lanes = values[2] & DP_MAX_LANE_COUNT_MASK; + + if (values[2] & DP_ENHANCED_FRAME_CAP) + link->capabilities = DP_ENHANCED_FRAME_CAP; + + return 0; +} + +static int it6505_drm_dp_link_power_up(struct drm_dp_aux *aux, + struct it6505_drm_dp_link *link) +{ + u8 value; + int err; + + /* DP_SET_POWER register is only available on DPCD v1.1 and later */ + if (link->revision < DPCD_V_1_1) + return 0; + + err = drm_dp_dpcd_readb(aux, DP_SET_POWER, &value); + if (err < 0) + return err; + + value &= ~DP_SET_POWER_MASK; + value |= DP_SET_POWER_D0; + + err = drm_dp_dpcd_writeb(aux, DP_SET_POWER, value); + if (err < 0) + return err; + + /* + * According to the DP 1.1 specification, a "Sink Device must exit the + * power saving state within 1 ms" (Section 2.5.3.1, Table 5-52, "Sink + * Control Field" (register 0x600). + */ + usleep_range(1000, 2000); + + return 0; +} + +static void it6505_clear_int(struct it6505 *it6505) +{ + it6505_write(it6505, INT_STATUS_01, 0xFF); + it6505_write(it6505, INT_STATUS_02, 0xFF); + it6505_write(it6505, INT_STATUS_03, 0xFF); +} + +static void it6505_int_mask_enable(struct it6505 *it6505) +{ + it6505_write(it6505, INT_MASK_01, BIT(INT_HPD_CHANGE) | + BIT(INT_RECEIVE_HPD_IRQ) | BIT(INT_SCDT_CHANGE) | + BIT(INT_HDCP_FAIL) | BIT(INT_HDCP_DONE)); + + it6505_write(it6505, INT_MASK_02, BIT(INT_AUX_CMD_FAIL) | + BIT(INT_HDCP_KSV_CHECK) | BIT(INT_AUDIO_FIFO_ERROR)); + + it6505_write(it6505, INT_MASK_03, BIT(INT_LINK_TRAIN_FAIL) | + BIT(INT_VID_FIFO_ERROR) | BIT(INT_IO_LATCH_FIFO_OVERFLOW)); +} + +static void it6505_int_mask_disable(struct it6505 *it6505) +{ + it6505_write(it6505, INT_MASK_01, 0x00); + it6505_write(it6505, INT_MASK_02, 0x00); + it6505_write(it6505, INT_MASK_03, 0x00); +} + +static void it6505_lane_termination_on(struct it6505 *it6505) +{ + int regcf; + + regcf = it6505_read(it6505, REG_USER_DRV_PRE); + + if (regcf == MISC_VERB) + it6505_set_bits(it6505, REG_DRV_LN_DATA_SEL, 0x80, 0x00); + + if (regcf == MISC_VERC) { + if (it6505->lane_swap) { + switch (it6505->lane_count) { + case 1: + case 2: + it6505_set_bits(it6505, REG_DRV_LN_DATA_SEL, + 0x0C, 0x08); + break; + default: + it6505_set_bits(it6505, REG_DRV_LN_DATA_SEL, + 0x0C, 0x0C); + break; + } + } else { + switch (it6505->lane_count) { + case 1: + case 2: + it6505_set_bits(it6505, REG_DRV_LN_DATA_SEL, + 0x0C, 0x04); + break; + default: + it6505_set_bits(it6505, REG_DRV_LN_DATA_SEL, + 0x0C, 0x0C); + break; + } + } + } +} + +static void it6505_lane_termination_off(struct it6505 *it6505) +{ + int regcf; + + regcf = it6505_read(it6505, REG_USER_DRV_PRE); + + if (regcf == MISC_VERB) + it6505_set_bits(it6505, REG_DRV_LN_DATA_SEL, 0x80, 0x80); + + if (regcf == MISC_VERC) + it6505_set_bits(it6505, REG_DRV_LN_DATA_SEL, 0x0C, 0x00); +} + +static void it6505_lane_power_on(struct it6505 *it6505) +{ + it6505_set_bits(it6505, REG_LINK_DRV, 0xF1, + (it6505->lane_swap ? + GENMASK(7, 8 - it6505->lane_count) : + GENMASK(3 + it6505->lane_count, 4)) | + 0x01); +} + +static void it6505_lane_power_off(struct it6505 *it6505) +{ + it6505_set_bits(it6505, REG_LINK_DRV, 0xF0, 0x00); +} + +static void it6505_lane_off(struct it6505 *it6505) +{ + it6505_lane_power_off(it6505); + it6505_lane_termination_off(it6505); +} + +static void it6505_aux_termination_on(struct it6505 *it6505) +{ + int regcf; + + regcf = it6505_read(it6505, REG_USER_DRV_PRE); + + if (regcf == MISC_VERB) + it6505_lane_termination_on(it6505); + + if (regcf == MISC_VERC) + it6505_set_bits(it6505, REG_DRV_LN_DATA_SEL, 0x80, 0x80); +} + +static void it6505_aux_power_on(struct it6505 *it6505) +{ + it6505_set_bits(it6505, REG_AUX, 0x02, 0x02); +} + +static void it6505_aux_on(struct it6505 *it6505) +{ + it6505_aux_power_on(it6505); + it6505_aux_termination_on(it6505); +} + +static void it6505_aux_reset(struct it6505 *it6505) +{ + it6505_set_bits(it6505, REG_RESET_CTRL, AUX_RESET, AUX_RESET); + it6505_set_bits(it6505, REG_RESET_CTRL, AUX_RESET, 0x00); +} + +static void it6505_reset_logic(struct it6505 *it6505) +{ + regmap_write(it6505->regmap, REG_RESET_CTRL, ALL_LOGIC_RESET); + usleep_range(1000, 1500); +} + +static bool it6505_aux_op_finished(struct it6505 *it6505) +{ + int reg2b = it6505_read(it6505, REG_AUX_CMD_REQ); + + if (reg2b < 0) + return false; + + return (reg2b & AUX_BUSY) == 0; +} + +static int it6505_aux_wait(struct it6505 *it6505) +{ + int status; + unsigned long timeout; + struct device *dev = &it6505->client->dev; + + timeout = jiffies + msecs_to_jiffies(AUX_WAIT_TIMEOUT_MS) + 1; + + while (!it6505_aux_op_finished(it6505)) { + if (time_after(jiffies, timeout)) { + dev_err(dev, "Timed out waiting AUX to finish"); + return -ETIMEDOUT; + } + usleep_range(1000, 2000); + } + + status = it6505_read(it6505, REG_AUX_ERROR_STS); + if (status < 0) { + dev_err(dev, "Failed to read AUX channel: %d", status); + return status; + } + + return 0; +} + +static ssize_t it6505_aux_operation(struct it6505 *it6505, + enum aux_cmd_type cmd, + unsigned int address, u8 *buffer, + size_t size, enum aux_cmd_reply *reply) +{ + int i, ret; + bool aux_write_check = false; + + if (!it6505_get_sink_hpd_status(it6505)) + return -EIO; + + /* set AUX user mode */ + it6505_set_bits(it6505, REG_AUX_CTRL, AUX_USER_MODE, AUX_USER_MODE); + +aux_op_start: + if (cmd == CMD_AUX_I2C_EDID_READ) { + /* AUX EDID FIFO has max length of AUX_FIFO_MAX_SIZE bytes. */ + size = min_t(size_t, size, AUX_FIFO_MAX_SIZE); + /* Enable AUX FIFO read back and clear FIFO */ + it6505_set_bits(it6505, REG_AUX_CTRL, + AUX_EN_FIFO_READ | CLR_EDID_FIFO, + AUX_EN_FIFO_READ | CLR_EDID_FIFO); + + it6505_set_bits(it6505, REG_AUX_CTRL, + AUX_EN_FIFO_READ | CLR_EDID_FIFO, + AUX_EN_FIFO_READ); + } else { + /* The DP AUX transmit buffer has 4 bytes. */ + size = min_t(size_t, size, 4); + it6505_set_bits(it6505, REG_AUX_CTRL, AUX_NO_SEGMENT_WR, + AUX_NO_SEGMENT_WR); + } + + /* Start Address[7:0] */ + it6505_write(it6505, REG_AUX_ADR_0_7, (address >> 0) & 0xFF); + /* Start Address[15:8] */ + it6505_write(it6505, REG_AUX_ADR_8_15, (address >> 8) & 0xFF); + /* WriteNum[3:0]+StartAdr[19:16] */ + it6505_write(it6505, REG_AUX_ADR_16_19, + ((address >> 16) & 0x0F) | ((size - 1) << 4)); + + if (cmd == CMD_AUX_NATIVE_WRITE) + regmap_bulk_write(it6505->regmap, REG_AUX_OUT_DATA0, buffer, + size); + + /* Aux Fire */ + it6505_write(it6505, REG_AUX_CMD_REQ, cmd); + + ret = it6505_aux_wait(it6505); + if (ret < 0) + goto aux_op_err; + + ret = it6505_read(it6505, REG_AUX_ERROR_STS); + if (ret < 0) + goto aux_op_err; + + switch ((ret >> 6) & 0x3) { + case 0: + *reply = REPLY_ACK; + break; + case 1: + *reply = REPLY_DEFER; + ret = -EAGAIN; + goto aux_op_err; + case 2: + *reply = REPLY_NACK; + ret = -EIO; + goto aux_op_err; + case 3: + ret = -ETIMEDOUT; + goto aux_op_err; + } + + /* Read back Native Write data */ + if (cmd == CMD_AUX_NATIVE_WRITE) { + aux_write_check = true; + cmd = CMD_AUX_NATIVE_READ; + goto aux_op_start; + } + + if (cmd == CMD_AUX_I2C_EDID_READ) { + for (i = 0; i < size; i++) { + ret = it6505_read(it6505, REG_AUX_DATA_FIFO); + if (ret < 0) + goto aux_op_err; + buffer[i] = ret; + } + } else { + for (i = 0; i < size; i++) { + ret = it6505_read(it6505, REG_AUX_DATA_0_7 + i); + if (ret < 0) + goto aux_op_err; + + if (aux_write_check && buffer[size - 1 - i] != ret) { + ret = -EINVAL; + goto aux_op_err; + } + + buffer[size - 1 - i] = ret; + } + } + + ret = i; + +aux_op_err: + if (cmd == CMD_AUX_I2C_EDID_READ) { + /* clear AUX FIFO */ + it6505_set_bits(it6505, REG_AUX_CTRL, + AUX_EN_FIFO_READ | CLR_EDID_FIFO, + AUX_EN_FIFO_READ | CLR_EDID_FIFO); + it6505_set_bits(it6505, REG_AUX_CTRL, + AUX_EN_FIFO_READ | CLR_EDID_FIFO, 0x00); + } + + /* Leave AUX user mode */ + it6505_set_bits(it6505, REG_AUX_CTRL, AUX_USER_MODE, 0); + + return ret; +} + +static ssize_t it6505_aux_do_transfer(struct it6505 *it6505, + enum aux_cmd_type cmd, + unsigned int address, u8 *buffer, + size_t size, enum aux_cmd_reply *reply) +{ + int i, ret_size, ret = 0, request_size; + + mutex_lock(&it6505->aux_lock); + for (i = 0; i < size; i += 4) { + request_size = min((int)size - i, 4); + ret_size = it6505_aux_operation(it6505, cmd, address + i, + buffer + i, request_size, + reply); + if (ret_size < 0) { + ret = ret_size; + goto aux_op_err; + } + + ret += ret_size; + } + +aux_op_err: + mutex_unlock(&it6505->aux_lock); + return ret; +} + +static ssize_t it6505_aux_transfer(struct drm_dp_aux *aux, + struct drm_dp_aux_msg *msg) +{ + struct it6505 *it6505 = container_of(aux, struct it6505, aux); + u8 cmd; + bool is_i2c = !(msg->request & DP_AUX_NATIVE_WRITE); + int ret; + enum aux_cmd_reply reply; + + /* IT6505 doesn't support arbitrary I2C read / write. */ + if (is_i2c) + return -EINVAL; + + switch (msg->request) { + case DP_AUX_NATIVE_READ: + cmd = CMD_AUX_NATIVE_READ; + break; + case DP_AUX_NATIVE_WRITE: + cmd = CMD_AUX_NATIVE_WRITE; + break; + default: + return -EINVAL; + } + + ret = it6505_aux_do_transfer(it6505, cmd, msg->address, msg->buffer, + msg->size, &reply); + if (ret < 0) + return ret; + + switch (reply) { + case REPLY_ACK: + msg->reply = DP_AUX_NATIVE_REPLY_ACK; + break; + case REPLY_NACK: + msg->reply = DP_AUX_NATIVE_REPLY_NACK; + break; + case REPLY_DEFER: + msg->reply = DP_AUX_NATIVE_REPLY_DEFER; + break; + } + + return ret; +} + +static int it6505_get_edid_block(void *data, u8 *buf, unsigned int block, + size_t len) +{ + struct it6505 *it6505 = data; + struct device *dev = &it6505->client->dev; + enum aux_cmd_reply reply; + int offset, ret, aux_retry = 100; + + it6505_aux_reset(it6505); + DRM_DEV_DEBUG_DRIVER(dev, "block number = %d", block); + + for (offset = 0; offset < EDID_LENGTH;) { + ret = it6505_aux_do_transfer(it6505, CMD_AUX_I2C_EDID_READ, + block * EDID_LENGTH + offset, + buf + offset, 8, &reply); + + if (ret < 0 && ret != -EAGAIN) + return ret; + + switch (reply) { + case REPLY_ACK: + DRM_DEV_DEBUG_DRIVER(dev, "[0x%02x]: %8ph", offset, + buf + offset); + offset += 8; + aux_retry = 100; + break; + case REPLY_NACK: + return -EIO; + case REPLY_DEFER: + msleep(20); + if (!(--aux_retry)) + return -EIO; + } + } + + return 0; +} + +static void it6505_variable_config(struct it6505 *it6505) +{ + it6505->link_rate_bw_code = HBR; + it6505->lane_count = MAX_LANE_COUNT; + it6505->link_state = LINK_IDLE; + it6505->hdcp_desired = HDCP_DESIRED; + it6505->auto_train_retry = AUTO_TRAIN_RETRY; + it6505->audio.select = AUDIO_SELECT; + it6505->audio.sample_rate = AUDIO_SAMPLE_RATE; + it6505->audio.channel_count = AUDIO_CHANNEL_COUNT; + it6505->audio.type = AUDIO_TYPE; + it6505->audio.i2s_input_format = I2S_INPUT_FORMAT; + it6505->audio.i2s_justified = I2S_JUSTIFIED; + it6505->audio.i2s_data_delay = I2S_DATA_DELAY; + it6505->audio.i2s_ws_channel = I2S_WS_CHANNEL; + it6505->audio.i2s_data_sequence = I2S_DATA_SEQUENCE; + it6505->audio.word_length = AUDIO_WORD_LENGTH; + memset(it6505->sha1_input, 0, sizeof(it6505->sha1_input)); + memset(it6505->bksvs, 0, sizeof(it6505->bksvs)); +} + +static int it6505_send_video_infoframe(struct it6505 *it6505, + struct hdmi_avi_infoframe *frame) +{ + u8 buffer[HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE]; + int err; + struct device *dev = &it6505->client->dev; + + err = hdmi_avi_infoframe_pack(frame, buffer, sizeof(buffer)); + if (err < 0) { + dev_err(dev, "Failed to pack AVI infoframe: %d", err); + return err; + } + + err = it6505_set_bits(it6505, REG_INFOFRAME_CTRL, EN_AVI_PKT, 0x00); + if (err) + return err; + + err = regmap_bulk_write(it6505->regmap, REG_AVI_INFO_DB1, + buffer + HDMI_INFOFRAME_HEADER_SIZE, + frame->length); + if (err) + return err; + + err = it6505_set_bits(it6505, REG_INFOFRAME_CTRL, EN_AVI_PKT, + EN_AVI_PKT); + if (err) + return err; + + return 0; +} + +static void it6505_get_extcon_property(struct it6505 *it6505) +{ + int err; + union extcon_property_value property; + struct device *dev = &it6505->client->dev; + + if (it6505->extcon && !it6505->lane_swap_disabled) { + err = extcon_get_property(it6505->extcon, EXTCON_DISP_DP, + EXTCON_PROP_USB_TYPEC_POLARITY, + &property); + if (err) { + dev_err(dev, "get property fail!"); + return; + } + it6505->lane_swap = property.intval; + } +} + +static void it6505_clk_phase_adjustment(struct it6505 *it6505, + const struct drm_display_mode *mode) +{ + int clock = mode->clock; + + it6505_set_bits(it6505, REG_CLK_CTRL0, M_PCLK_DELAY, + clock < ADJUST_PHASE_THRESHOLD ? PIXEL_CLK_DELAY : 0); + it6505_set_bits(it6505, REG_DATA_CTRL0, VIDEO_LATCH_EDGE, + PIXEL_CLK_INVERSE << 4); +} + +static void it6505_link_reset_step_train(struct it6505 *it6505) +{ + it6505_set_bits(it6505, REG_TRAIN_CTRL0, + FORCE_CR_DONE | FORCE_EQ_DONE, 0x00); + it6505_dpcd_write(it6505, DP_TRAINING_PATTERN_SET, + DP_TRAINING_PATTERN_DISABLE); +} + +static void it6505_init(struct it6505 *it6505) +{ + it6505_write(it6505, REG_AUX_OPT, AUX_AUTO_RST | AUX_FIX_FREQ); + it6505_write(it6505, REG_AUX_CTRL, AUX_NO_SEGMENT_WR); + it6505_write(it6505, REG_HDCP_CTRL2, HDCP_AN_SEL | HDCP_HW_HPDIRQ_ACT); + it6505_write(it6505, REG_VID_BUS_CTRL0, IN_DDR | DDR_CD); + it6505_write(it6505, REG_VID_BUS_CTRL1, 0x01); + it6505_write(it6505, REG_AUDIO_CTRL0, AUDIO_16B_BOUND); + + /* chip internal setting, don't modify */ + it6505_write(it6505, REG_HPD_IRQ_TIME, 0xF5); + it6505_write(it6505, REG_AUX_DEBUG_MODE, 0x4D); + it6505_write(it6505, REG_AUX_OPT2, 0x17); + it6505_write(it6505, REG_HDCP_OPT, 0x60); + it6505_write(it6505, REG_DATA_MUTE_CTRL, + EN_VID_MUTE | EN_AUD_MUTE | ENABLE_AUTO_VIDEO_FIFO_RESET); + it6505_write(it6505, REG_TIME_STMP_CTRL, + EN_SSC_GAT | EN_ENHANCE_VID_STMP | EN_ENHANCE_AUD_STMP); + it6505_write(it6505, REG_INFOFRAME_CTRL, 0x00); + it6505_write(it6505, REG_BANK_SEL, 0x01); + it6505_write(it6505, REG_DRV_0_DB_800_MV, + afe_setting_table[it6505->afe_setting][0]); + it6505_write(it6505, REG_PRE_0_DB_800_MV, + afe_setting_table[it6505->afe_setting][1]); + it6505_write(it6505, REG_PRE_3P5_DB_800_MV, + afe_setting_table[it6505->afe_setting][2]); + it6505_write(it6505, REG_SSC_CTRL0, 0x9E); + it6505_write(it6505, REG_SSC_CTRL1, 0x1C); + it6505_write(it6505, REG_SSC_CTRL2, 0x42); + it6505_write(it6505, REG_BANK_SEL, 0x00); +} + +static void it6505_video_disable(struct it6505 *it6505) +{ + it6505_set_bits(it6505, REG_DATA_MUTE_CTRL, EN_VID_MUTE, EN_VID_MUTE); + it6505_set_bits(it6505, REG_INFOFRAME_CTRL, EN_VID_CTRL_PKT, 0x00); + it6505_set_bits(it6505, REG_RESET_CTRL, VIDEO_RESET, VIDEO_RESET); +} + +static void it6505_video_reset(struct it6505 *it6505) +{ + it6505_link_reset_step_train(it6505); + it6505_set_bits(it6505, REG_DATA_MUTE_CTRL, EN_VID_MUTE, EN_VID_MUTE); + it6505_set_bits(it6505, REG_INFOFRAME_CTRL, EN_VID_CTRL_PKT, 0x00); + it6505_set_bits(it6505, REG_RESET_CTRL, VIDEO_RESET, VIDEO_RESET); + it6505_set_bits(it6505, REG_501_FIFO_CTRL, RST_501_FIFO, RST_501_FIFO); + it6505_set_bits(it6505, REG_501_FIFO_CTRL, RST_501_FIFO, 0x00); + it6505_set_bits(it6505, REG_RESET_CTRL, VIDEO_RESET, 0x00); +} + +static void it6505_update_video_parameter(struct it6505 *it6505, + const struct drm_display_mode *mode) +{ + it6505_clk_phase_adjustment(it6505, mode); + it6505_video_disable(it6505); +} + +static bool it6505_audio_input(struct it6505 *it6505) +{ + int reg05, regbe; + + reg05 = it6505_read(it6505, REG_RESET_CTRL); + it6505_set_bits(it6505, REG_RESET_CTRL, AUDIO_RESET, 0x00); + usleep_range(3000, 4000); + regbe = it6505_read(it6505, REG_AUDIO_INPUT_FREQ); + it6505_write(it6505, REG_RESET_CTRL, reg05); + + return regbe != 0xFF; +} + +static void it6505_setup_audio_channel_status(struct it6505 *it6505) +{ + enum it6505_audio_sample_rate sample_rate = it6505->audio.sample_rate; + u8 audio_word_length_map[] = { 0x02, 0x04, 0x03, 0x0B }; + + /* Channel Status */ + it6505_write(it6505, REG_IEC958_STS0, it6505->audio.type << 1); + it6505_write(it6505, REG_IEC958_STS1, 0x00); + it6505_write(it6505, REG_IEC958_STS2, 0x00); + it6505_write(it6505, REG_IEC958_STS3, sample_rate); + it6505_write(it6505, REG_IEC958_STS4, (~sample_rate << 4) | + audio_word_length_map[it6505->audio.word_length]); +} + +static void it6505_setup_audio_format(struct it6505 *it6505) +{ + /* I2S MODE */ + it6505_write(it6505, REG_AUDIO_FMT, + (it6505->audio.word_length << 5) | + (it6505->audio.i2s_data_sequence << 4) | + (it6505->audio.i2s_ws_channel << 3) | + (it6505->audio.i2s_data_delay << 2) | + (it6505->audio.i2s_justified << 1) | + it6505->audio.i2s_input_format); + if (it6505->audio.select == SPDIF) { + it6505_write(it6505, REG_AUDIO_FIFO_SEL, 0x00); + /* 0x30 = 128*FS */ + it6505_set_bits(it6505, REG_AUX_OPT, 0xF0, 0x30); + } else { + it6505_write(it6505, REG_AUDIO_FIFO_SEL, 0xE4); + } + + it6505_write(it6505, REG_AUDIO_CTRL0, 0x20); + it6505_write(it6505, REG_AUDIO_CTRL1, 0x00); +} + +static void it6505_enable_audio_source(struct it6505 *it6505) +{ + unsigned int audio_source_count; + + audio_source_count = BIT(DIV_ROUND_UP(it6505->audio.channel_count, 2)) + - 1; + + audio_source_count |= it6505->audio.select << 4; + + it6505_write(it6505, REG_AUDIO_SRC_CTRL, audio_source_count); +} + +static void it6505_enable_audio_infoframe(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + u8 audio_info_ca[] = { 0x00, 0x00, 0x01, 0x03, 0x07, 0x0B, 0x0F, 0x1F }; + + DRM_DEV_DEBUG_DRIVER(dev, "infoframe channel_allocation:0x%02x", + audio_info_ca[it6505->audio.channel_count - 1]); + + it6505_write(it6505, REG_AUD_INFOFRAM_DB1, it6505->audio.channel_count + - 1); + it6505_write(it6505, REG_AUD_INFOFRAM_DB2, 0x00); + it6505_write(it6505, REG_AUD_INFOFRAM_DB3, + audio_info_ca[it6505->audio.channel_count - 1]); + it6505_write(it6505, REG_AUD_INFOFRAM_DB4, 0x00); + it6505_write(it6505, REG_AUD_INFOFRAM_SUM, 0x00); + + /* Enable Audio InfoFrame */ + it6505_set_bits(it6505, REG_INFOFRAME_CTRL, EN_AUD_CTRL_PKT, + EN_AUD_CTRL_PKT); +} + +static void it6505_disable_audio(struct it6505 *it6505) +{ + it6505_set_bits(it6505, REG_DATA_MUTE_CTRL, EN_AUD_MUTE, EN_AUD_MUTE); + it6505_set_bits(it6505, REG_AUDIO_SRC_CTRL, M_AUDIO_I2S_EN, 0x00); + it6505_set_bits(it6505, REG_INFOFRAME_CTRL, EN_AUD_CTRL_PKT, 0x00); + it6505_set_bits(it6505, REG_RESET_CTRL, AUDIO_RESET, AUDIO_RESET); +} + +static void it6505_enable_audio(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + int regbe; + + DRM_DEV_DEBUG_DRIVER(dev, "start"); + it6505_disable_audio(it6505); + + it6505_setup_audio_channel_status(it6505); + it6505_setup_audio_format(it6505); + it6505_enable_audio_source(it6505); + it6505_enable_audio_infoframe(it6505); + + it6505_write(it6505, REG_AUDIO_N_0_7, 0x00); + it6505_write(it6505, REG_AUDIO_N_8_15, 0x80); + it6505_write(it6505, REG_AUDIO_N_16_23, 0x00); + + it6505_set_bits(it6505, REG_AUDIO_SRC_CTRL, AUDIO_FIFO_RESET, + AUDIO_FIFO_RESET); + it6505_set_bits(it6505, REG_AUDIO_SRC_CTRL, AUDIO_FIFO_RESET, 0x00); + it6505_set_bits(it6505, REG_RESET_CTRL, AUDIO_RESET, 0x00); + regbe = it6505_read(it6505, REG_AUDIO_INPUT_FREQ); + DRM_DEV_DEBUG_DRIVER(dev, "regbe:0x%02x audio input fs: %d.%d kHz", + regbe, 6750 / regbe, (6750 % regbe) * 10 / regbe); + it6505_set_bits(it6505, REG_DATA_MUTE_CTRL, EN_AUD_MUTE, 0x00); +} + +static bool it6505_use_step_train_check(struct it6505 *it6505) +{ + if (it6505->link.revision >= 0x12) + return it6505->dpcd[DP_TRAINING_AUX_RD_INTERVAL] >= 0x01; + + return true; +} + +static void it6505_parse_link_capabilities(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + struct it6505_drm_dp_link *link = &it6505->link; + int bcaps; + + if (it6505->dpcd[0] == 0) { + it6505_aux_on(it6505); + it6505_get_dpcd(it6505, DP_DPCD_REV, it6505->dpcd, + ARRAY_SIZE(it6505->dpcd)); + } + + DRM_DEV_DEBUG_DRIVER(dev, "DPCD Rev.: %d.%d", + link->revision >> 4, link->revision & 0x0F); + + DRM_DEV_DEBUG_DRIVER(dev, "Sink max link rate: %d.%02d Gbps per lane", + link->rate / 100000, link->rate / 1000 % 100); + + it6505->link_rate_bw_code = drm_dp_link_rate_to_bw_code(link->rate); + DRM_DEV_DEBUG_DRIVER(dev, "link rate bw code:0x%02x", + it6505->link_rate_bw_code); + it6505->link_rate_bw_code = min_t(int, it6505->link_rate_bw_code, + MAX_LINK_RATE); + + it6505->lane_count = link->num_lanes; + DRM_DEV_DEBUG_DRIVER(dev, "Sink support %d lanes training", + it6505->lane_count); + it6505->lane_count = min_t(int, it6505->lane_count, MAX_LANE_COUNT); + + it6505->branch_device = drm_dp_is_branch(it6505->dpcd); + DRM_DEV_DEBUG_DRIVER(dev, "Sink %sbranch device", + it6505->branch_device ? "" : "Not "); + + it6505->enable_enhanced_frame = link->capabilities; + DRM_DEV_DEBUG_DRIVER(dev, "Sink %sSupport Enhanced Framing", + it6505->enable_enhanced_frame ? "" : "Not "); + + it6505->enable_ssc = (it6505->dpcd[DP_MAX_DOWNSPREAD] & + DP_MAX_DOWNSPREAD_0_5); + DRM_DEV_DEBUG_DRIVER(dev, "Maximum Down-Spread: %s, %ssupport SSC!", + it6505->enable_ssc ? "0.5" : "0", + it6505->enable_ssc ? "" : "Not "); + + it6505->step_train = it6505_use_step_train_check(it6505); + if (it6505->step_train) + DRM_DEV_DEBUG_DRIVER(dev, "auto train fail, will step train"); + + bcaps = it6505_dpcd_read(it6505, DP_AUX_HDCP_BCAPS); + DRM_DEV_DEBUG_DRIVER(dev, "bcaps:0x%02x", bcaps); + if (bcaps & DP_BCAPS_HDCP_CAPABLE) { + it6505->is_repeater = (bcaps & DP_BCAPS_REPEATER_PRESENT); + DRM_DEV_DEBUG_DRIVER(dev, "Support HDCP! Downstream is %s!", + it6505->is_repeater ? "repeater" : + "receiver"); + } else { + DRM_DEV_DEBUG_DRIVER(dev, "Sink not support HDCP!"); + it6505->hdcp_desired = false; + } + DRM_DEV_DEBUG_DRIVER(dev, "HDCP %s", + it6505->hdcp_desired ? "desired" : "undesired"); +} + +static void it6505_setup_ssc(struct it6505 *it6505) +{ + it6505_set_bits(it6505, REG_TRAIN_CTRL0, SPREAD_AMP_5, + it6505->enable_ssc ? SPREAD_AMP_5 : 0x00); + if (it6505->enable_ssc) { + it6505_write(it6505, REG_BANK_SEL, 0x01); + it6505_write(it6505, REG_SSC_CTRL0, 0x9E); + it6505_write(it6505, REG_SSC_CTRL1, 0x1C); + it6505_write(it6505, REG_SSC_CTRL2, 0x42); + it6505_write(it6505, REG_BANK_SEL, 0x00); + it6505_write(it6505, REG_SP_CTRL0, 0x07); + it6505_write(it6505, REG_IP_CTRL1, 0x29); + it6505_write(it6505, REG_IP_CTRL2, 0x03); + /* Stamp Interrupt Step */ + it6505_set_bits(it6505, REG_TIME_STMP_CTRL, M_STAMP_STEP, + 0x10); + it6505_dpcd_write(it6505, DP_DOWNSPREAD_CTRL, + DP_SPREAD_AMP_0_5); + } else { + it6505_dpcd_write(it6505, DP_DOWNSPREAD_CTRL, 0x00); + it6505_set_bits(it6505, REG_TIME_STMP_CTRL, M_STAMP_STEP, + 0x00); + } +} + +static inline void it6505_link_rate_setup(struct it6505 *it6505) +{ + it6505_set_bits(it6505, REG_TRAIN_CTRL0, FORCE_LBR, + (it6505->link_rate_bw_code == RBR) ? FORCE_LBR : 0x00); + it6505_set_bits(it6505, REG_LINK_DRV, DRV_HS, + (it6505->link_rate_bw_code == RBR) ? 0x00 : DRV_HS); +} + +static void it6505_lane_count_setup(struct it6505 *it6505) +{ + it6505_get_extcon_property(it6505); + it6505_set_bits(it6505, REG_TRAIN_CTRL0, LANE_SWAP, + it6505->lane_swap ? LANE_SWAP : 0x00); + it6505_set_bits(it6505, REG_TRAIN_CTRL0, LANE_COUNT_MASK, + (it6505->lane_count - 1) << 1); +} + +static void it6505_link_training_setup(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + + if (it6505->enable_enhanced_frame) + it6505_set_bits(it6505, REG_DATA_MUTE_CTRL, + ENABLE_ENHANCED_FRAME, ENABLE_ENHANCED_FRAME); + + it6505_link_rate_setup(it6505); + it6505_lane_count_setup(it6505); + it6505_setup_ssc(it6505); + DRM_DEV_DEBUG_DRIVER(dev, + "%s, %d lanes, %sable ssc, %sable enhanced frame", + it6505->link_rate_bw_code != RBR ? "HBR" : "RBR", + it6505->lane_count, + it6505->enable_ssc ? "en" : "dis", + it6505->enable_enhanced_frame ? "en" : "dis"); +} + +static bool it6505_link_start_auto_train(struct it6505 *it6505) +{ + int timeout = 500, link_training_state; + bool state = false; + + mutex_lock(&it6505->aux_lock); + it6505_set_bits(it6505, REG_TRAIN_CTRL0, + FORCE_CR_DONE | FORCE_EQ_DONE, 0x00); + it6505_write(it6505, REG_TRAIN_CTRL1, FORCE_RETRAIN); + it6505_write(it6505, REG_TRAIN_CTRL1, AUTO_TRAIN); + + while (timeout > 0) { + usleep_range(1000, 2000); + link_training_state = it6505_read(it6505, REG_LINK_TRAIN_STS); + + if (link_training_state > 0 && + (link_training_state & LINK_STATE_NORP)) { + state = true; + goto unlock; + } + + timeout--; + } +unlock: + mutex_unlock(&it6505->aux_lock); + + return state; +} + +static int it6505_drm_dp_link_configure(struct it6505 *it6505) +{ + u8 values[2]; + int err; + struct drm_dp_aux *aux = &it6505->aux; + + values[0] = it6505->link_rate_bw_code; + values[1] = it6505->lane_count; + + if (it6505->enable_enhanced_frame) + values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; + + err = drm_dp_dpcd_write(aux, DP_LINK_BW_SET, values, sizeof(values)); + if (err < 0) + return err; + + return 0; +} + +static bool it6505_check_voltage_swing_max(u8 lane_voltage_swing_pre_emphasis) +{ + return ((lane_voltage_swing_pre_emphasis & 0x03) == MAX_CR_LEVEL); +} + +static bool it6505_check_pre_emphasis_max(u8 lane_voltage_swing_pre_emphasis) +{ + return ((lane_voltage_swing_pre_emphasis & 0x03) == MAX_EQ_LEVEL); +} + +static bool it6505_check_max_voltage_swing_reached(u8 *lane_voltage_swing, + u8 lane_count) +{ + u8 i; + + for (i = 0; i < lane_count; i++) { + if (lane_voltage_swing[i] & DP_TRAIN_MAX_SWING_REACHED) + return true; + } + + return false; +} + +static bool +step_train_lane_voltage_para_set(struct it6505 *it6505, + struct it6505_step_train_para + *lane_voltage_pre_emphasis, + u8 *lane_voltage_pre_emphasis_set) +{ + u8 *voltage_swing = lane_voltage_pre_emphasis->voltage_swing; + u8 *pre_emphasis = lane_voltage_pre_emphasis->pre_emphasis; + u8 i; + + for (i = 0; i < it6505->lane_count; i++) { + voltage_swing[i] &= 0x03; + lane_voltage_pre_emphasis_set[i] = voltage_swing[i]; + if (it6505_check_voltage_swing_max(voltage_swing[i])) + lane_voltage_pre_emphasis_set[i] |= + DP_TRAIN_MAX_SWING_REACHED; + + pre_emphasis[i] &= 0x03; + lane_voltage_pre_emphasis_set[i] |= pre_emphasis[i] + << DP_TRAIN_PRE_EMPHASIS_SHIFT; + if (it6505_check_pre_emphasis_max(pre_emphasis[i])) + lane_voltage_pre_emphasis_set[i] |= + DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; + it6505_dpcd_write(it6505, DP_TRAINING_LANE0_SET + i, + lane_voltage_pre_emphasis_set[i]); + + if (lane_voltage_pre_emphasis_set[i] != + it6505_dpcd_read(it6505, DP_TRAINING_LANE0_SET + i)) + return false; + } + + return true; +} + +static bool +it6505_step_cr_train(struct it6505 *it6505, + struct it6505_step_train_para *lane_voltage_pre_emphasis) +{ + u8 loop_count = 0, i = 0, j; + u8 link_status[DP_LINK_STATUS_SIZE] = { 0 }; + u8 lane_level_config[MAX_LANE_COUNT] = { 0 }; + int pre_emphasis_adjust = -1, voltage_swing_adjust = -1; + const struct drm_dp_aux *aux = &it6505->aux; + + it6505_dpcd_write(it6505, DP_DOWNSPREAD_CTRL, + it6505->enable_ssc ? DP_SPREAD_AMP_0_5 : 0x00); + it6505_dpcd_write(it6505, DP_TRAINING_PATTERN_SET, + DP_TRAINING_PATTERN_1); + + while (loop_count < 5 && i < 10) { + i++; + if (!step_train_lane_voltage_para_set(it6505, + lane_voltage_pre_emphasis, + lane_level_config)) + continue; + drm_dp_link_train_clock_recovery_delay(aux, it6505->dpcd); + drm_dp_dpcd_read_link_status(&it6505->aux, link_status); + + if (drm_dp_clock_recovery_ok(link_status, it6505->lane_count)) { + it6505_set_bits(it6505, REG_TRAIN_CTRL0, FORCE_CR_DONE, + FORCE_CR_DONE); + return true; + } + DRM_DEV_DEBUG_DRIVER(&it6505->client->dev, "cr not done"); + + if (it6505_check_max_voltage_swing_reached(lane_level_config, + it6505->lane_count)) + goto cr_train_fail; + + for (j = 0; j < it6505->lane_count; j++) { + lane_voltage_pre_emphasis->voltage_swing[j] = + drm_dp_get_adjust_request_voltage(link_status, + j) >> + DP_TRAIN_VOLTAGE_SWING_SHIFT; + lane_voltage_pre_emphasis->pre_emphasis[j] = + drm_dp_get_adjust_request_pre_emphasis(link_status, + j) >> + DP_TRAIN_PRE_EMPHASIS_SHIFT; + if (voltage_swing_adjust == + lane_voltage_pre_emphasis->voltage_swing[j] && + pre_emphasis_adjust == + lane_voltage_pre_emphasis->pre_emphasis[j]) { + loop_count++; + continue; + } + + voltage_swing_adjust = + lane_voltage_pre_emphasis->voltage_swing[j]; + pre_emphasis_adjust = + lane_voltage_pre_emphasis->pre_emphasis[j]; + loop_count = 0; + + if (voltage_swing_adjust + pre_emphasis_adjust > + MAX_EQ_LEVEL) + lane_voltage_pre_emphasis->voltage_swing[j] = + MAX_EQ_LEVEL - + lane_voltage_pre_emphasis + ->pre_emphasis[j]; + } + } + +cr_train_fail: + it6505_dpcd_write(it6505, DP_TRAINING_PATTERN_SET, + DP_TRAINING_PATTERN_DISABLE); + + return false; +} + +static bool +it6505_step_eq_train(struct it6505 *it6505, + struct it6505_step_train_para *lane_voltage_pre_emphasis) +{ + u8 loop_count = 0, i, link_status[DP_LINK_STATUS_SIZE] = { 0 }; + u8 lane_level_config[MAX_LANE_COUNT] = { 0 }; + const struct drm_dp_aux *aux = &it6505->aux; + + it6505_dpcd_write(it6505, DP_TRAINING_PATTERN_SET, + DP_TRAINING_PATTERN_2); + + while (loop_count < 6) { + loop_count++; + + if (!step_train_lane_voltage_para_set(it6505, + lane_voltage_pre_emphasis, + lane_level_config)) + continue; + + drm_dp_link_train_channel_eq_delay(aux, it6505->dpcd); + drm_dp_dpcd_read_link_status(&it6505->aux, link_status); + + if (!drm_dp_clock_recovery_ok(link_status, it6505->lane_count)) + goto eq_train_fail; + + if (drm_dp_channel_eq_ok(link_status, it6505->lane_count)) { + it6505_dpcd_write(it6505, DP_TRAINING_PATTERN_SET, + DP_TRAINING_PATTERN_DISABLE); + it6505_set_bits(it6505, REG_TRAIN_CTRL0, FORCE_EQ_DONE, + FORCE_EQ_DONE); + return true; + } + DRM_DEV_DEBUG_DRIVER(&it6505->client->dev, "eq not done"); + + for (i = 0; i < it6505->lane_count; i++) { + lane_voltage_pre_emphasis->voltage_swing[i] = + drm_dp_get_adjust_request_voltage(link_status, + i) >> + DP_TRAIN_VOLTAGE_SWING_SHIFT; + lane_voltage_pre_emphasis->pre_emphasis[i] = + drm_dp_get_adjust_request_pre_emphasis(link_status, + i) >> + DP_TRAIN_PRE_EMPHASIS_SHIFT; + + if (lane_voltage_pre_emphasis->voltage_swing[i] + + lane_voltage_pre_emphasis->pre_emphasis[i] > + MAX_EQ_LEVEL) + lane_voltage_pre_emphasis->voltage_swing[i] = + 0x03 - lane_voltage_pre_emphasis + ->pre_emphasis[i]; + } + } + +eq_train_fail: + it6505_dpcd_write(it6505, DP_TRAINING_PATTERN_SET, + DP_TRAINING_PATTERN_DISABLE); + return false; +} + +static bool it6505_link_start_step_train(struct it6505 *it6505) +{ + int err; + struct it6505_step_train_para lane_voltage_pre_emphasis = { + .voltage_swing = { 0 }, + .pre_emphasis = { 0 }, + }; + + DRM_DEV_DEBUG_DRIVER(&it6505->client->dev, "start"); + err = it6505_drm_dp_link_configure(it6505); + + if (err < 0) + return false; + if (!it6505_step_cr_train(it6505, &lane_voltage_pre_emphasis)) + return false; + if (!it6505_step_eq_train(it6505, &lane_voltage_pre_emphasis)) + return false; + return true; +} + +static bool it6505_get_video_status(struct it6505 *it6505) +{ + int reg_0d; + + reg_0d = it6505_read(it6505, REG_SYSTEM_STS); + + if (reg_0d < 0) + return false; + + return reg_0d & VIDEO_STB; +} + +static void it6505_reset_hdcp(struct it6505 *it6505) +{ + it6505->hdcp_status = HDCP_AUTH_IDLE; + /* Disable CP_Desired */ + it6505_set_bits(it6505, REG_HDCP_CTRL1, HDCP_CP_ENABLE, 0x00); + it6505_set_bits(it6505, REG_RESET_CTRL, HDCP_RESET, HDCP_RESET); +} + +static void it6505_start_hdcp(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + + DRM_DEV_DEBUG_DRIVER(dev, "start"); + it6505_reset_hdcp(it6505); + queue_delayed_work(system_wq, &it6505->hdcp_work, + msecs_to_jiffies(2400)); +} + +static void it6505_stop_hdcp(struct it6505 *it6505) +{ + it6505_reset_hdcp(it6505); + cancel_delayed_work(&it6505->hdcp_work); +} + +static bool it6505_hdcp_is_ksv_valid(u8 *ksv) +{ + int i, ones = 0; + + /* KSV has 20 1's and 20 0's */ + for (i = 0; i < DRM_HDCP_KSV_LEN; i++) + ones += hweight8(ksv[i]); + if (ones != 20) + return false; + return true; +} + +static void it6505_hdcp_part1_auth(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + u8 hdcp_bcaps; + + it6505_set_bits(it6505, REG_RESET_CTRL, HDCP_RESET, 0x00); + /* Disable CP_Desired */ + it6505_set_bits(it6505, REG_HDCP_CTRL1, HDCP_CP_ENABLE, 0x00); + + usleep_range(1000, 1500); + hdcp_bcaps = it6505_dpcd_read(it6505, DP_AUX_HDCP_BCAPS); + DRM_DEV_DEBUG_DRIVER(dev, "DPCD[0x68028]: 0x%02x", + hdcp_bcaps); + + if (!hdcp_bcaps) + return; + + /* clear the repeater List Chk Done and fail bit */ + it6505_set_bits(it6505, REG_HDCP_TRIGGER, + HDCP_TRIGGER_KSV_DONE | HDCP_TRIGGER_KSV_FAIL, + 0x00); + + /* Enable An Generator */ + it6505_set_bits(it6505, REG_HDCP_CTRL2, HDCP_AN_GEN, HDCP_AN_GEN); + /* delay1ms(10);*/ + usleep_range(10000, 15000); + /* Stop An Generator */ + it6505_set_bits(it6505, REG_HDCP_CTRL2, HDCP_AN_GEN, 0x00); + + it6505_set_bits(it6505, REG_HDCP_CTRL1, HDCP_CP_ENABLE, HDCP_CP_ENABLE); + + it6505_set_bits(it6505, REG_HDCP_TRIGGER, HDCP_TRIGGER_START, + HDCP_TRIGGER_START); + + it6505->hdcp_status = HDCP_AUTH_GOING; +} + +static int it6505_sha1_digest(struct it6505 *it6505, u8 *sha1_input, + unsigned int size, u8 *output_av) +{ + struct shash_desc *desc; + struct crypto_shash *tfm; + int err; + struct device *dev = &it6505->client->dev; + + tfm = crypto_alloc_shash("sha1", 0, 0); + if (IS_ERR(tfm)) { + dev_err(dev, "crypto_alloc_shash sha1 failed"); + return PTR_ERR(tfm); + } + desc = kzalloc(sizeof(*desc) + crypto_shash_descsize(tfm), GFP_KERNEL); + if (!desc) { + crypto_free_shash(tfm); + return -ENOMEM; + } + + desc->tfm = tfm; + err = crypto_shash_digest(desc, sha1_input, size, output_av); + if (err) + dev_err(dev, "crypto_shash_digest sha1 failed"); + + crypto_free_shash(tfm); + kfree(desc); + return err; +} + +static int it6505_setup_sha1_input(struct it6505 *it6505, u8 *sha1_input) +{ + struct device *dev = &it6505->client->dev; + u8 binfo[2]; + int down_stream_count, i, err, msg_count = 0; + + err = it6505_get_dpcd(it6505, DP_AUX_HDCP_BINFO, binfo, + ARRAY_SIZE(binfo)); + + if (err < 0) { + dev_err(dev, "Read binfo value Fail"); + return err; + } + + down_stream_count = binfo[0] & 0x7F; + DRM_DEV_DEBUG_DRIVER(dev, "binfo:0x%*ph", (int)ARRAY_SIZE(binfo), + binfo); + + if ((binfo[0] & BIT(7)) || (binfo[1] & BIT(3))) { + dev_err(dev, "HDCP max cascade device exceed"); + return 0; + } + + if (!down_stream_count || + down_stream_count > MAX_HDCP_DOWN_STREAM_COUNT) { + dev_err(dev, "HDCP down stream count Error %d", + down_stream_count); + return 0; + } + + for (i = 0; i < down_stream_count; i++) { + err = it6505_get_dpcd(it6505, DP_AUX_HDCP_KSV_FIFO + + (i % 3) * DRM_HDCP_KSV_LEN, + sha1_input + msg_count, + DRM_HDCP_KSV_LEN); + + if (err < 0) + return err; + + msg_count += 5; + } + + it6505->hdcp_down_stream_count = down_stream_count; + sha1_input[msg_count++] = binfo[0]; + sha1_input[msg_count++] = binfo[1]; + + it6505_set_bits(it6505, REG_HDCP_CTRL2, HDCP_EN_M0_READ, + HDCP_EN_M0_READ); + + err = regmap_bulk_read(it6505->regmap, REG_M0_0_7, + sha1_input + msg_count, 8); + + it6505_set_bits(it6505, REG_HDCP_CTRL2, HDCP_EN_M0_READ, 0x00); + + if (err < 0) { + dev_err(dev, " Warning, Read M value Fail"); + return err; + } + + msg_count += 8; + + return msg_count; +} + +static bool it6505_hdcp_part2_ksvlist_check(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + u8 av[5][4], bv[5][4]; + int i, err; + + i = it6505_setup_sha1_input(it6505, it6505->sha1_input); + if (i <= 0) { + dev_err(dev, "SHA-1 Input length error %d", i); + return false; + } + + it6505_sha1_digest(it6505, it6505->sha1_input, i, (u8 *)av); + + err = it6505_get_dpcd(it6505, DP_AUX_HDCP_V_PRIME(0), (u8 *)bv, + sizeof(bv)); + + if (err < 0) { + dev_err(dev, "Read V' value Fail"); + return false; + } + + for (i = 0; i < 5; i++) + if (bv[i][3] != av[i][0] || bv[i][2] != av[i][1] || + bv[i][1] != av[i][2] || bv[i][0] != av[i][3]) + return false; + + DRM_DEV_DEBUG_DRIVER(dev, "V' all match!!"); + return true; +} + +static void it6505_hdcp_wait_ksv_list(struct work_struct *work) +{ + struct it6505 *it6505 = container_of(work, struct it6505, + hdcp_wait_ksv_list); + struct device *dev = &it6505->client->dev; + unsigned int timeout = 5000; + u8 bstatus = 0; + bool ksv_list_check; + + timeout /= 20; + while (timeout > 0) { + if (!it6505_get_sink_hpd_status(it6505)) + return; + + bstatus = it6505_dpcd_read(it6505, DP_AUX_HDCP_BSTATUS); + + if (bstatus & DP_BSTATUS_READY) + break; + + msleep(20); + timeout--; + } + + if (timeout == 0) { + DRM_DEV_DEBUG_DRIVER(dev, "timeout and ksv list wait failed"); + goto timeout; + } + + ksv_list_check = it6505_hdcp_part2_ksvlist_check(it6505); + DRM_DEV_DEBUG_DRIVER(dev, "ksv list ready, ksv list check %s", + ksv_list_check ? "pass" : "fail"); + if (ksv_list_check) { + it6505_set_bits(it6505, REG_HDCP_TRIGGER, + HDCP_TRIGGER_KSV_DONE, HDCP_TRIGGER_KSV_DONE); + return; + } +timeout: + it6505_set_bits(it6505, REG_HDCP_TRIGGER, + HDCP_TRIGGER_KSV_DONE | HDCP_TRIGGER_KSV_FAIL, + HDCP_TRIGGER_KSV_DONE | HDCP_TRIGGER_KSV_FAIL); +} + +static void it6505_hdcp_work(struct work_struct *work) +{ + struct it6505 *it6505 = container_of(work, struct it6505, + hdcp_work.work); + struct device *dev = &it6505->client->dev; + int ret; + u8 link_status[DP_LINK_STATUS_SIZE] = { 0 }; + + DRM_DEV_DEBUG_DRIVER(dev, "start"); + + if (!it6505_get_sink_hpd_status(it6505)) + return; + + ret = drm_dp_dpcd_read_link_status(&it6505->aux, link_status); + DRM_DEV_DEBUG_DRIVER(dev, "ret: %d link_status: %*ph", ret, + (int)sizeof(link_status), link_status); + + if (ret < 0 || !drm_dp_channel_eq_ok(link_status, it6505->lane_count) || + !it6505_get_video_status(it6505)) { + DRM_DEV_DEBUG_DRIVER(dev, "link train not done or no video"); + return; + } + + ret = it6505_get_dpcd(it6505, DP_AUX_HDCP_BKSV, it6505->bksvs, + ARRAY_SIZE(it6505->bksvs)); + if (ret < 0) { + dev_err(dev, "fail to get bksv ret: %d", ret); + it6505_set_bits(it6505, REG_HDCP_TRIGGER, + HDCP_TRIGGER_KSV_FAIL, HDCP_TRIGGER_KSV_FAIL); + } + + DRM_DEV_DEBUG_DRIVER(dev, "bksv = 0x%*ph", + (int)ARRAY_SIZE(it6505->bksvs), it6505->bksvs); + + if (!it6505_hdcp_is_ksv_valid(it6505->bksvs)) { + dev_err(dev, "Display Port bksv not valid"); + it6505_set_bits(it6505, REG_HDCP_TRIGGER, + HDCP_TRIGGER_KSV_FAIL, HDCP_TRIGGER_KSV_FAIL); + } + + it6505_hdcp_part1_auth(it6505); +} + +static void it6505_show_hdcp_info(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + int i; + u8 *sha1 = it6505->sha1_input; + + DRM_DEV_DEBUG_DRIVER(dev, "hdcp_status: %d is_repeater: %d", + it6505->hdcp_status, it6505->is_repeater); + DRM_DEV_DEBUG_DRIVER(dev, "bksv = 0x%*ph", + (int)ARRAY_SIZE(it6505->bksvs), it6505->bksvs); + + if (it6505->is_repeater) { + DRM_DEV_DEBUG_DRIVER(dev, "hdcp_down_stream_count: %d", + it6505->hdcp_down_stream_count); + DRM_DEV_DEBUG_DRIVER(dev, "sha1_input: 0x%*ph", + (int)ARRAY_SIZE(it6505->sha1_input), + it6505->sha1_input); + for (i = 0; i < it6505->hdcp_down_stream_count; i++) { + DRM_DEV_DEBUG_DRIVER(dev, "KSV_%d = 0x%*ph", i, + DRM_HDCP_KSV_LEN, sha1); + sha1 += DRM_HDCP_KSV_LEN; + } + DRM_DEV_DEBUG_DRIVER(dev, "binfo: 0x%2ph M0: 0x%8ph", + sha1, sha1 + 2); + } +} + +static void it6505_stop_link_train(struct it6505 *it6505) +{ + it6505->link_state = LINK_IDLE; + cancel_work_sync(&it6505->link_works); + it6505_write(it6505, REG_TRAIN_CTRL1, FORCE_RETRAIN); +} + +static void it6505_link_train_ok(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + + it6505->link_state = LINK_OK; + /* disalbe mute enable avi info frame */ + it6505_set_bits(it6505, REG_DATA_MUTE_CTRL, EN_VID_MUTE, 0x00); + it6505_set_bits(it6505, REG_INFOFRAME_CTRL, + EN_VID_CTRL_PKT, EN_VID_CTRL_PKT); + + if (it6505_audio_input(it6505)) { + DRM_DEV_DEBUG_DRIVER(dev, "Enable audio!"); + it6505_enable_audio(it6505); + } + + if (it6505->hdcp_desired) + it6505_start_hdcp(it6505); +} + +static void it6505_link_step_train_process(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + int ret, i, step_retry = 3; + + DRM_DEV_DEBUG_DRIVER(dev, "Start step train"); + + if (it6505->sink_count == 0) { + DRM_DEV_DEBUG_DRIVER(dev, "it6505->sink_count:%d, force eq", + it6505->sink_count); + it6505_set_bits(it6505, REG_TRAIN_CTRL0, FORCE_EQ_DONE, + FORCE_EQ_DONE); + return; + } + + if (!it6505->step_train) { + DRM_DEV_DEBUG_DRIVER(dev, "not support step train"); + return; + } + + /* step training start here */ + for (i = 0; i < step_retry; i++) { + it6505_link_reset_step_train(it6505); + ret = it6505_link_start_step_train(it6505); + DRM_DEV_DEBUG_DRIVER(dev, "step train %s, retry:%d times", + ret ? "pass" : "failed", i + 1); + if (ret) { + it6505_link_train_ok(it6505); + return; + } + } + + DRM_DEV_DEBUG_DRIVER(dev, "training fail"); + it6505->link_state = LINK_IDLE; + it6505_video_reset(it6505); +} + +static void it6505_link_training_work(struct work_struct *work) +{ + struct it6505 *it6505 = container_of(work, struct it6505, link_works); + struct device *dev = &it6505->client->dev; + int ret; + + DRM_DEV_DEBUG_DRIVER(dev, "it6505->sink_count: %d", + it6505->sink_count); + + if (!it6505_get_sink_hpd_status(it6505)) + return; + + it6505_link_training_setup(it6505); + it6505_reset_hdcp(it6505); + it6505_aux_reset(it6505); + + if (it6505->auto_train_retry < 1) { + it6505_link_step_train_process(it6505); + return; + } + + ret = it6505_link_start_auto_train(it6505); + DRM_DEV_DEBUG_DRIVER(dev, "auto train %s, auto_train_retry: %d", + ret ? "pass" : "failed", it6505->auto_train_retry); + it6505->auto_train_retry--; + + if (ret) { + it6505_link_train_ok(it6505); + return; + } + + it6505_dump(it6505); +} + +static void it6505_plugged_status_to_codec(struct it6505 *it6505) +{ + enum drm_connector_status status = it6505->connector_status; + + if (it6505->plugged_cb && it6505->codec_dev) + it6505->plugged_cb(it6505->codec_dev, + status == connector_status_connected); +} + +static int it6505_process_hpd_irq(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + int ret, dpcd_sink_count, dp_irq_vector, bstatus; + u8 link_status[DP_LINK_STATUS_SIZE]; + + if (!it6505_get_sink_hpd_status(it6505)) { + DRM_DEV_DEBUG_DRIVER(dev, "HPD_IRQ HPD low"); + it6505->sink_count = 0; + return 0; + } + + ret = it6505_dpcd_read(it6505, DP_SINK_COUNT); + if (ret < 0) + return ret; + + dpcd_sink_count = DP_GET_SINK_COUNT(ret); + DRM_DEV_DEBUG_DRIVER(dev, "dpcd_sink_count: %d it6505->sink_count:%d", + dpcd_sink_count, it6505->sink_count); + + if (it6505->branch_device && dpcd_sink_count != it6505->sink_count) { + memset(it6505->dpcd, 0, sizeof(it6505->dpcd)); + it6505->sink_count = dpcd_sink_count; + it6505_reset_logic(it6505); + it6505_int_mask_enable(it6505); + it6505_init(it6505); + return 0; + } + + dp_irq_vector = it6505_dpcd_read(it6505, DP_DEVICE_SERVICE_IRQ_VECTOR); + if (dp_irq_vector < 0) + return dp_irq_vector; + + DRM_DEV_DEBUG_DRIVER(dev, "dp_irq_vector = 0x%02x", dp_irq_vector); + + if (dp_irq_vector & DP_CP_IRQ) { + it6505_set_bits(it6505, REG_HDCP_TRIGGER, HDCP_TRIGGER_CPIRQ, + HDCP_TRIGGER_CPIRQ); + + bstatus = it6505_dpcd_read(it6505, DP_AUX_HDCP_BSTATUS); + if (bstatus < 0) + return bstatus; + + DRM_DEV_DEBUG_DRIVER(dev, "Bstatus = 0x%02x", bstatus); + } + + ret = drm_dp_dpcd_read_link_status(&it6505->aux, link_status); + if (ret < 0) { + dev_err(dev, "Fail to read link status ret: %d", ret); + return ret; + } + + DRM_DEV_DEBUG_DRIVER(dev, "link status = 0x%*ph", + (int)ARRAY_SIZE(link_status), link_status); + + if (!drm_dp_channel_eq_ok(link_status, it6505->lane_count)) { + it6505->auto_train_retry = AUTO_TRAIN_RETRY; + it6505_video_reset(it6505); + } + + return 0; +} + +static void it6505_irq_hpd(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + + it6505->hpd_state = it6505_get_sink_hpd_status(it6505); + DRM_DEV_DEBUG_DRIVER(dev, "hpd change interrupt, change to %s", + it6505->hpd_state ? "high" : "low"); + + if (it6505->bridge.dev) + drm_helper_hpd_irq_event(it6505->bridge.dev); + DRM_DEV_DEBUG_DRIVER(dev, "it6505->sink_count: %d", + it6505->sink_count); + + if (it6505->hpd_state) { + wait_for_completion_timeout(&it6505->wait_edid_complete, + msecs_to_jiffies(6000)); + it6505_lane_termination_on(it6505); + it6505_lane_power_on(it6505); + + /* + * for some dongle which issue HPD_irq + * when sink count change from 0->1 + * it6505 not able to receive HPD_IRQ + * if HW never go into trainig done + */ + + if (it6505->branch_device && it6505->sink_count == 0) + schedule_work(&it6505->link_works); + + if (!it6505_get_video_status(it6505)) + it6505_video_reset(it6505); + + it6505_calc_video_info(it6505); + } else { + memset(it6505->dpcd, 0, sizeof(it6505->dpcd)); + + if (it6505->hdcp_desired) + it6505_stop_hdcp(it6505); + + it6505_video_disable(it6505); + it6505_disable_audio(it6505); + it6505_stop_link_train(it6505); + it6505_lane_off(it6505); + it6505_link_reset_step_train(it6505); + } +} + +static void it6505_irq_hpd_irq(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + + DRM_DEV_DEBUG_DRIVER(dev, "hpd_irq interrupt"); + + if (it6505_process_hpd_irq(it6505) < 0) + DRM_DEV_DEBUG_DRIVER(dev, "process hpd_irq fail!"); +} + +static void it6505_irq_scdt(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + bool data; + + data = it6505_get_video_status(it6505); + DRM_DEV_DEBUG_DRIVER(dev, "video stable change interrupt, %s", + data ? "stable" : "unstable"); + it6505_calc_video_info(it6505); + it6505_link_reset_step_train(it6505); + + if (data) + schedule_work(&it6505->link_works); +} + +static void it6505_irq_hdcp_done(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + + DRM_DEV_DEBUG_DRIVER(dev, "hdcp done interrupt"); + it6505->hdcp_status = HDCP_AUTH_DONE; + it6505_show_hdcp_info(it6505); +} + +static void it6505_irq_hdcp_fail(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + + DRM_DEV_DEBUG_DRIVER(dev, "hdcp fail interrupt"); + it6505->hdcp_status = HDCP_AUTH_IDLE; + it6505_show_hdcp_info(it6505); + it6505_start_hdcp(it6505); +} + +static void it6505_irq_aux_cmd_fail(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + + DRM_DEV_DEBUG_DRIVER(dev, "AUX PC Request Fail Interrupt"); +} + +static void it6505_irq_hdcp_ksv_check(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + + DRM_DEV_DEBUG_DRIVER(dev, "HDCP event Interrupt"); + schedule_work(&it6505->hdcp_wait_ksv_list); +} + +static void it6505_irq_audio_fifo_error(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + + DRM_DEV_DEBUG_DRIVER(dev, "audio fifo error Interrupt"); + + if (it6505_audio_input(it6505)) + it6505_enable_audio(it6505); +} + +static void it6505_irq_link_train_fail(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + + DRM_DEV_DEBUG_DRIVER(dev, "link training fail interrupt"); + schedule_work(&it6505->link_works); +} + +static void it6505_irq_video_fifo_error(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + + DRM_DEV_DEBUG_DRIVER(dev, "video fifo overflow interrupt"); + it6505->auto_train_retry = AUTO_TRAIN_RETRY; + flush_work(&it6505->link_works); + it6505_stop_hdcp(it6505); + it6505_video_reset(it6505); +} + +static void it6505_irq_io_latch_fifo_overflow(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + + DRM_DEV_DEBUG_DRIVER(dev, "IO latch fifo overflow interrupt"); + it6505->auto_train_retry = AUTO_TRAIN_RETRY; + flush_work(&it6505->link_works); + it6505_stop_hdcp(it6505); + it6505_video_reset(it6505); +} + +static bool it6505_test_bit(unsigned int bit, const unsigned int *addr) +{ + return 1 & (addr[bit / BITS_PER_BYTE] >> (bit % BITS_PER_BYTE)); +} + +static irqreturn_t it6505_int_threaded_handler(int unused, void *data) +{ + struct it6505 *it6505 = data; + struct device *dev = &it6505->client->dev; + static const struct { + int bit; + void (*handler)(struct it6505 *it6505); + } irq_vec[] = { + { BIT_INT_HPD, it6505_irq_hpd }, + { BIT_INT_HPD_IRQ, it6505_irq_hpd_irq }, + { BIT_INT_SCDT, it6505_irq_scdt }, + { BIT_INT_HDCP_FAIL, it6505_irq_hdcp_fail }, + { BIT_INT_HDCP_DONE, it6505_irq_hdcp_done }, + { BIT_INT_AUX_CMD_FAIL, it6505_irq_aux_cmd_fail }, + { BIT_INT_HDCP_KSV_CHECK, it6505_irq_hdcp_ksv_check }, + { BIT_INT_AUDIO_FIFO_ERROR, it6505_irq_audio_fifo_error }, + { BIT_INT_LINK_TRAIN_FAIL, it6505_irq_link_train_fail }, + { BIT_INT_VID_FIFO_ERROR, it6505_irq_video_fifo_error }, + { BIT_INT_IO_FIFO_OVERFLOW, it6505_irq_io_latch_fifo_overflow }, + }; + int int_status[3], i; + + msleep(100); + mutex_lock(&it6505->extcon_lock); + + if (it6505->enable_drv_hold || !it6505->powered) + goto unlock; + + int_status[0] = it6505_read(it6505, INT_STATUS_01); + int_status[1] = it6505_read(it6505, INT_STATUS_02); + int_status[2] = it6505_read(it6505, INT_STATUS_03); + + it6505_write(it6505, INT_STATUS_01, int_status[0]); + it6505_write(it6505, INT_STATUS_02, int_status[1]); + it6505_write(it6505, INT_STATUS_03, int_status[2]); + + DRM_DEV_DEBUG_DRIVER(dev, "reg06 = 0x%02x", int_status[0]); + DRM_DEV_DEBUG_DRIVER(dev, "reg07 = 0x%02x", int_status[1]); + DRM_DEV_DEBUG_DRIVER(dev, "reg08 = 0x%02x", int_status[2]); + it6505_debug_print(it6505, REG_SYSTEM_STS, ""); + + if (it6505_test_bit(irq_vec[0].bit, (unsigned int *)int_status)) + irq_vec[0].handler(it6505); + + if (!it6505->hpd_state) + goto unlock; + + for (i = 1; i < ARRAY_SIZE(irq_vec); i++) { + if (it6505_test_bit(irq_vec[i].bit, (unsigned int *)int_status)) + irq_vec[i].handler(it6505); + } + +unlock: + mutex_unlock(&it6505->extcon_lock); + + return IRQ_HANDLED; +} + +static int it6505_poweron(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + struct it6505_platform_data *pdata = &it6505->pdata; + int err; + + DRM_DEV_DEBUG_DRIVER(dev, "it6505 start powered on"); + + if (it6505->powered) { + DRM_DEV_DEBUG_DRIVER(dev, "it6505 already powered on"); + return 0; + } + + if (pdata->pwr18) { + err = regulator_enable(pdata->pwr18); + if (err) { + DRM_DEV_DEBUG_DRIVER(dev, "Failed to enable VDD18: %d", + err); + return err; + } + } + + if (pdata->ovdd) { + /* time interval between IVDD and OVDD at least be 1ms */ + usleep_range(1000, 2000); + err = regulator_enable(pdata->ovdd); + if (err) { + regulator_disable(pdata->pwr18); + return err; + } + } + /* time interval between OVDD and SYSRSTN at least be 10ms */ + if (pdata->gpiod_reset) { + usleep_range(10000, 20000); + gpiod_set_value_cansleep(pdata->gpiod_reset, 0); + usleep_range(1000, 2000); + gpiod_set_value_cansleep(pdata->gpiod_reset, 1); + usleep_range(10000, 20000); + } + + it6505_reset_logic(it6505); + it6505_int_mask_enable(it6505); + it6505_init(it6505); + it6505_lane_off(it6505); + + it6505->powered = true; + + return 0; +} + +static int it6505_poweroff(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + struct it6505_platform_data *pdata = &it6505->pdata; + int err; + + DRM_DEV_DEBUG_DRIVER(dev, "it6505 start power off"); + + if (!it6505->powered) { + DRM_DEV_DEBUG_DRIVER(dev, "power had been already off"); + return 0; + } + + if (pdata->gpiod_reset) + gpiod_set_value_cansleep(pdata->gpiod_reset, 0); + + if (pdata->pwr18) { + err = regulator_disable(pdata->pwr18); + if (err) + return err; + } + + if (pdata->ovdd) { + err = regulator_disable(pdata->ovdd); + if (err) + return err; + } + + it6505->powered = false; + it6505->sink_count = 0; + + return 0; +} + +static enum drm_connector_status it6505_detect(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + enum drm_connector_status status = connector_status_disconnected; + int dp_sink_count; + + DRM_DEV_DEBUG_DRIVER(dev, "it6505->sink_count:%d powered:%d", + it6505->sink_count, it6505->powered); + + mutex_lock(&it6505->mode_lock); + + if (!it6505->powered) + goto unlock; + + if (it6505->enable_drv_hold) { + status = it6505_get_sink_hpd_status(it6505) ? + connector_status_connected : + connector_status_disconnected; + goto unlock; + } + + if (it6505_get_sink_hpd_status(it6505)) { + it6505_aux_on(it6505); + it6505_drm_dp_link_probe(&it6505->aux, &it6505->link); + it6505_drm_dp_link_power_up(&it6505->aux, &it6505->link); + it6505->auto_train_retry = AUTO_TRAIN_RETRY; + + if (it6505->dpcd[0] == 0) { + it6505_get_dpcd(it6505, DP_DPCD_REV, it6505->dpcd, + ARRAY_SIZE(it6505->dpcd)); + it6505_variable_config(it6505); + it6505_parse_link_capabilities(it6505); + } + + dp_sink_count = it6505_dpcd_read(it6505, DP_SINK_COUNT); + it6505->sink_count = DP_GET_SINK_COUNT(dp_sink_count); + DRM_DEV_DEBUG_DRIVER(dev, "it6505->sink_count:%d branch:%d", + it6505->sink_count, it6505->branch_device); + + if (it6505->branch_device) { + status = (it6505->sink_count != 0) ? + connector_status_connected : + connector_status_disconnected; + } else { + status = connector_status_connected; + } + } else { + it6505->sink_count = 0; + memset(it6505->dpcd, 0, sizeof(it6505->dpcd)); + } + +unlock: + if (it6505->connector_status != status) { + it6505->connector_status = status; + it6505_plugged_status_to_codec(it6505); + } + + mutex_unlock(&it6505->mode_lock); + + return status; +} + +static int it6505_extcon_notifier(struct notifier_block *self, + unsigned long event, void *ptr) +{ + struct it6505 *it6505 = container_of(self, struct it6505, event_nb); + + schedule_work(&it6505->extcon_wq); + return NOTIFY_DONE; +} + +static void it6505_extcon_work(struct work_struct *work) +{ + struct it6505 *it6505 = container_of(work, struct it6505, extcon_wq); + struct device *dev = &it6505->client->dev; + int state = extcon_get_state(it6505->extcon, EXTCON_DISP_DP); + unsigned int pwroffretry = 0; + + if (it6505->enable_drv_hold) + return; + + mutex_lock(&it6505->extcon_lock); + + DRM_DEV_DEBUG_DRIVER(dev, "EXTCON_DISP_DP = 0x%02x", state); + if (state > 0) { + DRM_DEV_DEBUG_DRIVER(dev, "start to power on"); + msleep(100); + it6505_poweron(it6505); + } else { + DRM_DEV_DEBUG_DRIVER(dev, "start to power off"); + while (it6505_poweroff(it6505) && pwroffretry++ < 5) { + DRM_DEV_DEBUG_DRIVER(dev, "power off fail %d times", + pwroffretry); + } + + drm_helper_hpd_irq_event(it6505->bridge.dev); + memset(it6505->dpcd, 0, sizeof(it6505->dpcd)); + DRM_DEV_DEBUG_DRIVER(dev, "power off it6505 success!"); + } + + mutex_unlock(&it6505->extcon_lock); +} + +static int it6505_use_notifier_module(struct it6505 *it6505) +{ + int ret; + struct device *dev = &it6505->client->dev; + + it6505->event_nb.notifier_call = it6505_extcon_notifier; + INIT_WORK(&it6505->extcon_wq, it6505_extcon_work); + ret = devm_extcon_register_notifier(&it6505->client->dev, + it6505->extcon, EXTCON_DISP_DP, + &it6505->event_nb); + if (ret) { + dev_err(dev, "failed to register notifier for DP"); + return ret; + } + + schedule_work(&it6505->extcon_wq); + + return 0; +} + +static void it6505_remove_notifier_module(struct it6505 *it6505) +{ + if (it6505->extcon) { + devm_extcon_unregister_notifier(&it6505->client->dev, + it6505->extcon, EXTCON_DISP_DP, + &it6505->event_nb); + + flush_work(&it6505->extcon_wq); + } +} + +static void __maybe_unused it6505_delayed_audio(struct work_struct *work) +{ + struct it6505 *it6505 = container_of(work, struct it6505, + delayed_audio.work); + + DRM_DEV_DEBUG_DRIVER(&it6505->client->dev, "start"); + + if (!it6505->powered) + return; + + if (!it6505->enable_drv_hold) + it6505_enable_audio(it6505); +} + +static int __maybe_unused it6505_audio_setup_hw_params(struct it6505 *it6505, + struct hdmi_codec_params + *params) +{ + struct device *dev = &it6505->client->dev; + int i = 0; + + DRM_DEV_DEBUG_DRIVER(dev, "%s %d Hz, %d bit, %d channels\n", __func__, + params->sample_rate, params->sample_width, + params->cea.channels); + + if (!it6505->bridge.encoder) + return -ENODEV; + + if (params->cea.channels <= 1 || params->cea.channels > 8) { + DRM_DEV_DEBUG_DRIVER(dev, "channel number: %d not support", + it6505->audio.channel_count); + return -EINVAL; + } + + it6505->audio.channel_count = params->cea.channels; + + while (i < ARRAY_SIZE(audio_sample_rate_map) && + params->sample_rate != + audio_sample_rate_map[i].sample_rate_value) { + i++; + } + if (i == ARRAY_SIZE(audio_sample_rate_map)) { + DRM_DEV_DEBUG_DRIVER(dev, "sample rate: %d Hz not support", + params->sample_rate); + return -EINVAL; + } + it6505->audio.sample_rate = audio_sample_rate_map[i].rate; + + switch (params->sample_width) { + case 16: + it6505->audio.word_length = WORD_LENGTH_16BIT; + break; + case 18: + it6505->audio.word_length = WORD_LENGTH_18BIT; + break; + case 20: + it6505->audio.word_length = WORD_LENGTH_20BIT; + break; + case 24: + case 32: + it6505->audio.word_length = WORD_LENGTH_24BIT; + break; + default: + DRM_DEV_DEBUG_DRIVER(dev, "wordlength: %d bit not support", + params->sample_width); + return -EINVAL; + } + + return 0; +} + +static void __maybe_unused it6505_audio_shutdown(struct device *dev, void *data) +{ + struct it6505 *it6505 = dev_get_drvdata(dev); + + if (it6505->powered) + it6505_disable_audio(it6505); +} + +static int __maybe_unused it6505_audio_hook_plugged_cb(struct device *dev, + void *data, + hdmi_codec_plugged_cb fn, + struct device *codec_dev) +{ + struct it6505 *it6505 = data; + + it6505->plugged_cb = fn; + it6505->codec_dev = codec_dev; + it6505_plugged_status_to_codec(it6505); + + return 0; +} + +static inline struct it6505 *bridge_to_it6505(struct drm_bridge *bridge) +{ + return container_of(bridge, struct it6505, bridge); +} + +static int it6505_bridge_attach(struct drm_bridge *bridge, + enum drm_bridge_attach_flags flags) +{ + struct it6505 *it6505 = bridge_to_it6505(bridge); + struct device *dev = &it6505->client->dev; + int ret; + + if (!(flags & DRM_BRIDGE_ATTACH_NO_CONNECTOR)) { + DRM_ERROR("DRM_BRIDGE_ATTACH_NO_CONNECTOR must be supplied"); + return -EINVAL; + } + + if (!bridge->encoder) { + dev_err(dev, "Parent encoder object not found"); + return -ENODEV; + } + + /* Register aux channel */ + it6505->aux.name = "DP-AUX"; + it6505->aux.dev = dev; + it6505->aux.drm_dev = bridge->dev; + it6505->aux.transfer = it6505_aux_transfer; + + ret = drm_dp_aux_register(&it6505->aux); + + if (ret < 0) { + dev_err(dev, "Failed to register aux: %d", ret); + return ret; + } + + if (it6505->extcon) { + ret = it6505_use_notifier_module(it6505); + if (ret < 0) { + dev_err(dev, "use notifier module failed"); + return ret; + } + } + + return 0; +} + +static void it6505_bridge_detach(struct drm_bridge *bridge) +{ + struct it6505 *it6505 = bridge_to_it6505(bridge); + + flush_work(&it6505->link_works); + it6505_remove_notifier_module(it6505); +} + +static enum drm_mode_status +it6505_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, + const struct drm_display_mode *mode) +{ + struct it6505 *it6505 = bridge_to_it6505(bridge); + + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + return MODE_NO_INTERLACE; + + if (mode->clock > DPI_PIXEL_CLK_MAX) + return MODE_CLOCK_HIGH; + + it6505->video_info.clock = mode->clock; + + return MODE_OK; +} + +static void it6505_bridge_atomic_enable(struct drm_bridge *bridge, + struct drm_bridge_state *old_state) +{ + struct it6505 *it6505 = bridge_to_it6505(bridge); + struct device *dev = &it6505->client->dev; + struct drm_atomic_state *state = old_state->base.state; + struct hdmi_avi_infoframe frame; + struct drm_crtc_state *crtc_state; + struct drm_connector_state *conn_state; + struct drm_display_mode *mode; + struct drm_connector *connector; + int ret; + + DRM_DEV_DEBUG_DRIVER(dev, "start"); + + connector = drm_atomic_get_new_connector_for_encoder(state, + bridge->encoder); + + if (WARN_ON(!connector)) + return; + + conn_state = drm_atomic_get_new_connector_state(state, connector); + + if (WARN_ON(!conn_state)) + return; + + crtc_state = drm_atomic_get_new_crtc_state(state, conn_state->crtc); + + if (WARN_ON(!crtc_state)) + return; + + mode = &crtc_state->adjusted_mode; + + if (WARN_ON(!mode)) + return; + + ret = drm_hdmi_avi_infoframe_from_display_mode(&frame, + connector, + mode); + if (ret) + dev_err(dev, "Failed to setup AVI infoframe: %d", ret); + + it6505_update_video_parameter(it6505, mode); + + ret = it6505_send_video_infoframe(it6505, &frame); + + if (ret) + dev_err(dev, "Failed to send AVI infoframe: %d", ret); + + it6505_int_mask_enable(it6505); + it6505_video_reset(it6505); +} + +static void it6505_bridge_atomic_disable(struct drm_bridge *bridge, + struct drm_bridge_state *old_state) +{ + struct it6505 *it6505 = bridge_to_it6505(bridge); + struct device *dev = &it6505->client->dev; + + DRM_DEV_DEBUG_DRIVER(dev, "start"); + + if (it6505->powered) + it6505_video_disable(it6505); +} + +static enum drm_connector_status +it6505_bridge_detect(struct drm_bridge *bridge) +{ + struct it6505 *it6505 = bridge_to_it6505(bridge); + + return it6505_detect(it6505); +} + +static struct edid *it6505_bridge_get_edid(struct drm_bridge *bridge, + struct drm_connector *connector) +{ + struct it6505 *it6505 = bridge_to_it6505(bridge); + struct device *dev = &it6505->client->dev; + struct edid *edid; + + edid = drm_do_get_edid(connector, it6505_get_edid_block, it6505); + + if (!edid) { + DRM_DEV_DEBUG_DRIVER(dev, "failed to get edid!"); + return NULL; + } + + return edid; +} + +static const struct drm_bridge_funcs it6505_bridge_funcs = { + .atomic_duplicate_state = drm_atomic_helper_bridge_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_reset = drm_atomic_helper_bridge_reset, + .attach = it6505_bridge_attach, + .detach = it6505_bridge_detach, + .mode_valid = it6505_bridge_mode_valid, + .atomic_enable = it6505_bridge_atomic_enable, + .atomic_disable = it6505_bridge_atomic_disable, + .detect = it6505_bridge_detect, + .get_edid = it6505_bridge_get_edid, +}; + +static __maybe_unused int it6505_bridge_resume(struct device *dev) +{ + struct it6505 *it6505 = dev_get_drvdata(dev); + + return it6505_poweron(it6505); +} + +static __maybe_unused int it6505_bridge_suspend(struct device *dev) +{ + struct it6505 *it6505 = dev_get_drvdata(dev); + + return it6505_poweroff(it6505); +} + +static SIMPLE_DEV_PM_OPS(it6505_bridge_pm_ops, it6505_bridge_suspend, + it6505_bridge_resume); + +static int it6505_init_pdata(struct it6505 *it6505) +{ + struct it6505_platform_data *pdata = &it6505->pdata; + struct device *dev = &it6505->client->dev; + + /* 1.0V digital core power regulator */ + pdata->pwr18 = devm_regulator_get(dev, "pwr18"); + if (IS_ERR(pdata->pwr18)) { + dev_err(dev, "pwr18 regulator not found"); + return PTR_ERR(pdata->pwr18); + } + + pdata->ovdd = devm_regulator_get(dev, "ovdd"); + if (IS_ERR(pdata->ovdd)) { + dev_err(dev, "ovdd regulator not found"); + return PTR_ERR(pdata->ovdd); + } + + pdata->gpiod_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(pdata->gpiod_reset)) { + dev_err(dev, "gpiod_reset gpio not found"); + return PTR_ERR(pdata->gpiod_reset); + } + + return 0; +} + +static void it6505_parse_dt(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + u32 *afe_setting = &it6505->afe_setting; + + it6505->lane_swap_disabled = + device_property_read_bool(dev, "no-laneswap"); + + if (it6505->lane_swap_disabled) + it6505->lane_swap = false; + + if (device_property_read_u32(dev, "afe-setting", afe_setting) == 0) { + if (*afe_setting >= ARRAY_SIZE(afe_setting_table)) { + dev_err(dev, "afe setting error, use default"); + *afe_setting = 0; + } + } else { + *afe_setting = 0; + } + DRM_DEV_DEBUG_DRIVER(dev, "using afe_setting: %d", *afe_setting); +} + +static ssize_t receive_timing_debugfs_show(struct file *file, char __user *buf, + size_t len, loff_t *ppos) +{ + struct it6505 *it6505 = file->private_data; + struct drm_display_mode *vid = &it6505->video_info; + u8 read_buf[READ_BUFFER_SIZE]; + u8 *str = read_buf, *end = read_buf + PAGE_SIZE; + ssize_t ret, count; + + if (!it6505) + return -ENODEV; + + it6505_calc_video_info(it6505); + str += scnprintf(str, end - str, "---video timing---\n"); + str += scnprintf(str, end - str, "PCLK:%d.%03dMHz\n", + vid->clock / 1000, vid->clock % 1000); + str += scnprintf(str, end - str, "HTotal:%d\n", vid->htotal); + str += scnprintf(str, end - str, "HActive:%d\n", vid->hdisplay); + str += scnprintf(str, end - str, "HFrontPorch:%d\n", + vid->hsync_start - vid->hdisplay); + str += scnprintf(str, end - str, "HSyncWidth:%d\n", + vid->hsync_end - vid->hsync_start); + str += scnprintf(str, end - str, "HBackPorch:%d\n", + vid->htotal - vid->hsync_end); + str += scnprintf(str, end - str, "VTotal:%d\n", vid->vtotal); + str += scnprintf(str, end - str, "VActive:%d\n", vid->vdisplay); + str += scnprintf(str, end - str, "VFrontPorch:%d\n", + vid->vsync_start - vid->vdisplay); + str += scnprintf(str, end - str, "VSyncWidth:%d\n", + vid->vsync_end - vid->vsync_start); + str += scnprintf(str, end - str, "VBackPorch:%d\n", + vid->vtotal - vid->vsync_end); + + count = str - read_buf; + ret = simple_read_from_buffer(buf, len, ppos, read_buf, count); + + return ret; +} + +static int force_power_on_off_debugfs_write(void *data, u64 value) +{ + struct it6505 *it6505 = data; + + if (!it6505) + return -ENODEV; + + if (value) + it6505_poweron(it6505); + else + it6505_poweroff(it6505); + + return 0; +} + +static int enable_drv_hold_debugfs_show(void *data, u64 *buf) +{ + struct it6505 *it6505 = data; + + if (!it6505) + return -ENODEV; + + *buf = it6505->enable_drv_hold; + + return 0; +} + +static int enable_drv_hold_debugfs_write(void *data, u64 drv_hold) +{ + struct it6505 *it6505 = data; + + if (!it6505) + return -ENODEV; + + it6505->enable_drv_hold = drv_hold; + + if (it6505->enable_drv_hold) { + it6505_int_mask_disable(it6505); + } else { + it6505_clear_int(it6505); + it6505_int_mask_enable(it6505); + + if (it6505->powered) { + it6505->connector_status = + it6505_get_sink_hpd_status(it6505) ? + connector_status_connected : + connector_status_disconnected; + } else { + it6505->connector_status = + connector_status_disconnected; + } + } + + return 0; +} + +static const struct file_operations receive_timing_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = receive_timing_debugfs_show, + .llseek = default_llseek, +}; + +DEFINE_DEBUGFS_ATTRIBUTE(fops_force_power, NULL, + force_power_on_off_debugfs_write, "%llu\n"); + +DEFINE_DEBUGFS_ATTRIBUTE(fops_enable_drv_hold, enable_drv_hold_debugfs_show, + enable_drv_hold_debugfs_write, "%llu\n"); + +static const struct debugfs_entries debugfs_entry[] = { + { "receive_timing", &receive_timing_fops }, + { "force_power_on_off", &fops_force_power }, + { "enable_drv_hold", &fops_enable_drv_hold }, + { NULL, NULL }, +}; + +static void debugfs_create_files(struct it6505 *it6505) +{ + int i = 0; + + while (debugfs_entry[i].name && debugfs_entry[i].fops) { + debugfs_create_file(debugfs_entry[i].name, 0644, + it6505->debugfs, it6505, + debugfs_entry[i].fops); + i++; + } +} + +static void debugfs_init(struct it6505 *it6505) +{ + struct device *dev = &it6505->client->dev; + + it6505->debugfs = debugfs_create_dir(DEBUGFS_DIR_NAME, NULL); + + if (IS_ERR(it6505->debugfs)) { + dev_err(dev, "failed to create debugfs root"); + return; + } + + debugfs_create_files(it6505); +} + +static void it6505_debugfs_remove(struct it6505 *it6505) +{ + debugfs_remove_recursive(it6505->debugfs); +} + +static void it6505_shutdown(struct i2c_client *client) +{ + struct it6505 *it6505 = dev_get_drvdata(&client->dev); + + if (it6505->powered) + it6505_lane_off(it6505); +} + +static int it6505_i2c_probe(struct i2c_client *client, + const struct i2c_device_id *id) +{ + struct it6505 *it6505; + struct device *dev = &client->dev; + struct extcon_dev *extcon; + int err, intp_irq; + + it6505 = devm_kzalloc(&client->dev, sizeof(*it6505), GFP_KERNEL); + if (!it6505) + return -ENOMEM; + + mutex_init(&it6505->extcon_lock); + mutex_init(&it6505->mode_lock); + mutex_init(&it6505->aux_lock); + + it6505->bridge.of_node = client->dev.of_node; + it6505->connector_status = connector_status_disconnected; + it6505->client = client; + i2c_set_clientdata(client, it6505); + + /* get extcon device from DTS */ + extcon = extcon_get_edev_by_phandle(dev, 0); + if (PTR_ERR(extcon) == -EPROBE_DEFER) + return -EPROBE_DEFER; + if (IS_ERR(extcon)) { + dev_err(dev, "can not get extcon device!"); + return PTR_ERR(extcon); + } + + it6505->extcon = extcon; + + it6505->regmap = devm_regmap_init_i2c(client, &it6505_regmap_config); + if (IS_ERR(it6505->regmap)) { + dev_err(dev, "regmap i2c init failed"); + err = PTR_ERR(it6505->regmap); + return err; + } + + err = it6505_init_pdata(it6505); + if (err) { + dev_err(dev, "Failed to initialize pdata: %d", err); + return err; + } + + it6505_parse_dt(it6505); + + intp_irq = client->irq; + + if (!intp_irq) { + dev_err(dev, "Failed to get INTP IRQ"); + err = -ENODEV; + return err; + } + + err = devm_request_threaded_irq(&client->dev, intp_irq, NULL, + it6505_int_threaded_handler, + IRQF_TRIGGER_LOW | IRQF_ONESHOT, + "it6505-intp", it6505); + if (err) { + dev_err(dev, "Failed to request INTP threaded IRQ: %d", err); + return err; + } + + INIT_WORK(&it6505->link_works, it6505_link_training_work); + INIT_WORK(&it6505->hdcp_wait_ksv_list, it6505_hdcp_wait_ksv_list); + INIT_DELAYED_WORK(&it6505->hdcp_work, it6505_hdcp_work); + init_completion(&it6505->wait_edid_complete); + memset(it6505->dpcd, 0, sizeof(it6505->dpcd)); + it6505->powered = false; + it6505->enable_drv_hold = DEFAULT_DRV_HOLD; + + if (DEFAULT_PWR_ON) + it6505_poweron(it6505); + + DRM_DEV_DEBUG_DRIVER(dev, "it6505 device name: %s", dev_name(dev)); + debugfs_init(it6505); + + it6505->bridge.funcs = &it6505_bridge_funcs; + it6505->bridge.type = DRM_MODE_CONNECTOR_DisplayPort; + it6505->bridge.ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID | + DRM_BRIDGE_OP_HPD; + drm_bridge_add(&it6505->bridge); + + return 0; +} + +static int it6505_i2c_remove(struct i2c_client *client) +{ + struct it6505 *it6505 = i2c_get_clientdata(client); + + drm_bridge_remove(&it6505->bridge); + drm_dp_aux_unregister(&it6505->aux); + it6505_debugfs_remove(it6505); + it6505_poweroff(it6505); + + return 0; +} + +static const struct i2c_device_id it6505_id[] = { + { "it6505", 0 }, + { } +}; + +MODULE_DEVICE_TABLE(i2c, it6505_id); + +static const struct of_device_id it6505_of_match[] = { + { .compatible = "ite,it6505" }, + { } +}; + +static struct i2c_driver it6505_i2c_driver = { + .driver = { + .name = "it6505", + .of_match_table = it6505_of_match, + .pm = &it6505_bridge_pm_ops, + }, + .probe = it6505_i2c_probe, + .remove = it6505_i2c_remove, + .shutdown = it6505_shutdown, + .id_table = it6505_id, +}; + +module_i2c_driver(it6505_i2c_driver); + +MODULE_AUTHOR("Allen Chen "); +MODULE_DESCRIPTION("IT6505 DisplayPort Transmitter driver"); +MODULE_LICENSE("GPL v2"); -- cgit From 657b15d672f4d89cf0750793473b8963429f8ae3 Mon Sep 17 00:00:00 2001 From: Ville Syrjälä Date: Mon, 24 Jan 2022 21:31:35 +0200 Subject: drm/i915: s/GRAPHICS_VER/DISPLAY_VER/ where appropriate MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use DISPLAY_VER rather than GRAPHICS_VER to determine availability of display hardware features. Signed-off-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220124193136.2397-1-ville.syrjala@linux.intel.com Reviewed-by: Jani Nikula --- drivers/gpu/drm/i915/i915_drv.h | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index f954e3926603..512ba039a8f5 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -1465,8 +1465,8 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \ (IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv)) -#define HAS_GMBUS_IRQ(dev_priv) (GRAPHICS_VER(dev_priv) >= 4) -#define HAS_GMBUS_BURST_READ(dev_priv) (GRAPHICS_VER(dev_priv) >= 11 || \ +#define HAS_GMBUS_IRQ(dev_priv) (DISPLAY_VER(dev_priv) >= 4) +#define HAS_GMBUS_BURST_READ(dev_priv) (DISPLAY_VER(dev_priv) >= 11 || \ IS_GEMINILAKE(dev_priv) || \ IS_KABYLAKE(dev_priv)) @@ -1478,9 +1478,9 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define SUPPORTS_TV(dev_priv) (INTEL_INFO(dev_priv)->display.supports_tv) #define I915_HAS_HOTPLUG(dev_priv) (INTEL_INFO(dev_priv)->display.has_hotplug) -#define HAS_FW_BLC(dev_priv) (GRAPHICS_VER(dev_priv) > 2) +#define HAS_FW_BLC(dev_priv) (DISPLAY_VER(dev_priv) > 2) #define HAS_FBC(dev_priv) (INTEL_INFO(dev_priv)->display.fbc_mask != 0) -#define HAS_CUR_FBC(dev_priv) (!HAS_GMCH(dev_priv) && GRAPHICS_VER(dev_priv) >= 7) +#define HAS_CUR_FBC(dev_priv) (!HAS_GMCH(dev_priv) && DISPLAY_VER(dev_priv) >= 7) #define HAS_IPS(dev_priv) (IS_HSW_ULT(dev_priv) || IS_BROADWELL(dev_priv)) @@ -1493,7 +1493,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_PSR(dev_priv) (INTEL_INFO(dev_priv)->display.has_psr) #define HAS_PSR_HW_TRACKING(dev_priv) \ (INTEL_INFO(dev_priv)->display.has_psr_hw_tracking) -#define HAS_PSR2_SEL_FETCH(dev_priv) (GRAPHICS_VER(dev_priv) >= 12) +#define HAS_PSR2_SEL_FETCH(dev_priv) (DISPLAY_VER(dev_priv) >= 12) #define HAS_TRANSCODER(dev_priv, trans) ((INTEL_INFO(dev_priv)->display.cpu_transcoder_mask & BIT(trans)) != 0) #define HAS_RC6(dev_priv) (INTEL_INFO(dev_priv)->has_rc6) @@ -1504,7 +1504,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_DMC(dev_priv) (INTEL_INFO(dev_priv)->display.has_dmc) -#define HAS_MSO(i915) (GRAPHICS_VER(i915) >= 12) +#define HAS_MSO(i915) (DISPLAY_VER(i915) >= 12) #define HAS_RUNTIME_PM(dev_priv) (INTEL_INFO(dev_priv)->has_runtime_pm) #define HAS_64BIT_RELOC(dev_priv) (INTEL_INFO(dev_priv)->has_64bit_reloc) @@ -1537,7 +1537,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_GMCH(dev_priv) (INTEL_INFO(dev_priv)->display.has_gmch) -#define HAS_LSPCON(dev_priv) (IS_GRAPHICS_VER(dev_priv, 9, 10)) +#define HAS_LSPCON(dev_priv) (IS_DISPLAY_VER(dev_priv, 9, 10)) /* DPF == dynamic parity feature */ #define HAS_L3_DPF(dev_priv) (INTEL_INFO(dev_priv)->has_l3_dpf) @@ -1551,7 +1551,7 @@ IS_SUBPLATFORM(const struct drm_i915_private *i915, #define HAS_DISPLAY(dev_priv) (INTEL_INFO(dev_priv)->display.pipe_mask != 0) -#define HAS_VRR(i915) (GRAPHICS_VER(i915) >= 11) +#define HAS_VRR(i915) (DISPLAY_VER(i915) >= 11) #define HAS_ASYNC_FLIPS(i915) (DISPLAY_VER(i915) >= 5) @@ -1581,7 +1581,7 @@ i915_print_iommu_status(struct drm_i915_private *i915, struct drm_printer *p); static inline bool intel_scanout_needs_vtd_wa(struct drm_i915_private *dev_priv) { - return GRAPHICS_VER(dev_priv) >= 6 && intel_vtd_active(dev_priv); + return DISPLAY_VER(dev_priv) >= 6 && intel_vtd_active(dev_priv); } static inline bool -- cgit From fbb9b194e15a63c56c5664e76ccd0e85c6100cea Mon Sep 17 00:00:00 2001 From: Cameron Williams Date: Tue, 1 Feb 2022 10:12:51 +0000 Subject: USB: serial: ftdi_sio: add support for Brainboxes US-159/235/320 This patch adds support for the Brainboxes US-159, US-235 and US-320 USB-to-Serial devices. Signed-off-by: Cameron Williams Cc: stable@vger.kernel.org Signed-off-by: Johan Hovold --- drivers/usb/serial/ftdi_sio.c | 3 +++ drivers/usb/serial/ftdi_sio_ids.h | 3 +++ 2 files changed, 6 insertions(+) diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 4edebd14ef29..49c08f07c969 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -969,6 +969,7 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_023_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_VX_034_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_101_PID) }, + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_159_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_1_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_2_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_3_PID) }, @@ -977,12 +978,14 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_6_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_7_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_160_8_PID) }, + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_235_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_257_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_1_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_2_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_3_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_279_4_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_313_PID) }, + { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_320_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_324_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_346_1_PID) }, { USB_DEVICE(BRAINBOXES_VID, BRAINBOXES_US_346_2_PID) }, diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 755858ca20ba..d1a9564697a4 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -1506,6 +1506,9 @@ #define BRAINBOXES_VX_023_PID 0x1003 /* VX-023 ExpressCard 1 Port RS422/485 */ #define BRAINBOXES_VX_034_PID 0x1004 /* VX-034 ExpressCard 2 Port RS422/485 */ #define BRAINBOXES_US_101_PID 0x1011 /* US-101 1xRS232 */ +#define BRAINBOXES_US_159_PID 0x1021 /* US-159 1xRS232 */ +#define BRAINBOXES_US_235_PID 0x1017 /* US-235 1xRS232 */ +#define BRAINBOXES_US_320_PID 0x1019 /* US-320 1xRS422/485 */ #define BRAINBOXES_US_324_PID 0x1013 /* US-324 1xRS422/485 1Mbaud */ #define BRAINBOXES_US_606_1_PID 0x2001 /* US-606 6 Port RS232 Serial Port 1 and 2 */ #define BRAINBOXES_US_606_2_PID 0x2002 /* US-606 6 Port RS232 Serial Port 3 and 4 */ -- cgit From b50f8f09c622297d3cf46e332e17ba8adedec9af Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Tue, 1 Feb 2022 11:42:52 +0100 Subject: USB: serial: cp210x: add NCR Retail IO box id Add the device id for NCR's Retail IO box (CP2105) used in NCR FastLane SelfServ Checkout - R6C: https://www.ncr.com/product-catalog/ncr-fastlane-selfserv-checkout-r6c Reported-by: Scott Russell Cc: stable@vger.kernel.org Reviewed-by: Greg Kroah-Hartman Signed-off-by: Johan Hovold --- drivers/usb/serial/cp210x.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 8a60c0d56863..5172e7ac16fd 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -51,6 +51,7 @@ static void cp210x_enable_event_mode(struct usb_serial_port *port); static void cp210x_disable_event_mode(struct usb_serial_port *port); static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x0404, 0x034C) }, /* NCR Retail IO Box */ { USB_DEVICE(0x045B, 0x0053) }, /* Renesas RX610 RX-Stick */ { USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */ { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ -- cgit From 6ca0c6283340d819bf9c7d8e76be33c9fbd903ab Mon Sep 17 00:00:00 2001 From: Johan Hovold Date: Tue, 1 Feb 2022 11:42:53 +0100 Subject: USB: serial: cp210x: add CPI Bulk Coin Recycler id Add the device id for the Crane Payment Innovation / Money Controls Bulk Coin Recycler: https://www.cranepi.com/en/system/files/Support/OM_BCR_EN_V1-04_0.pdf Reported-by: Scott Russell Cc: stable@vger.kernel.org Reviewed-by: Greg Kroah-Hartman Signed-off-by: Johan Hovold --- drivers/usb/serial/cp210x.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 5172e7ac16fd..a27f7efcec6a 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -69,6 +69,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x0FCF, 0x1004) }, /* Dynastream ANT2USB */ { USB_DEVICE(0x0FCF, 0x1006) }, /* Dynastream ANT development board */ { USB_DEVICE(0x0FDE, 0xCA05) }, /* OWL Wireless Electricity Monitor CM-160 */ + { USB_DEVICE(0x106F, 0x0003) }, /* CPI / Money Controls Bulk Coin Recycler */ { USB_DEVICE(0x10A6, 0xAA26) }, /* Knock-off DCU-11 cable */ { USB_DEVICE(0x10AB, 0x10C5) }, /* Siemens MC60 Cable */ { USB_DEVICE(0x10B5, 0xAC70) }, /* Nokia CA-42 USB */ -- cgit From 57dfd7b53dec740afe402135fdd1c5708ec337f0 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Fri, 28 Jan 2022 00:51:48 +0000 Subject: KVM: x86: Move delivery of non-APICv interrupt into vendor code Handle non-APICv interrupt delivery in vendor code, even though it means VMX and SVM will temporarily have duplicate code. SVM's AVIC has a race condition that requires KVM to fall back to legacy interrupt injection _after_ the interrupt has been logged in the vIRR, i.e. to fix the race, SVM will need to open code the full flow anyways[*]. Refactor the code so that the SVM bug without introducing other issues, e.g. SVM would return "success" and thus invoke trace_kvm_apicv_accept_irq() even when delivery through the AVIC failed, and to opportunistically prepare for using KVM_X86_OP to fill each vendor's kvm_x86_ops struct, which will rely on the vendor function matching the kvm_x86_op pointer name. No functional change intended. [*] https://lore.kernel.org/all/20211213104634.199141-4-mlevitsk@redhat.com Signed-off-by: Sean Christopherson Message-Id: <20220128005208.4008533-3-seanjc@google.com> Signed-off-by: Paolo Bonzini --- arch/x86/include/asm/kvm-x86-ops.h | 2 +- arch/x86/include/asm/kvm_host.h | 3 ++- arch/x86/kvm/lapic.c | 10 ++-------- arch/x86/kvm/svm/svm.c | 17 ++++++++++++++++- arch/x86/kvm/vmx/vmx.c | 17 ++++++++++++++++- 5 files changed, 37 insertions(+), 12 deletions(-) diff --git a/arch/x86/include/asm/kvm-x86-ops.h b/arch/x86/include/asm/kvm-x86-ops.h index 631d5040b31e..d39e0de06be2 100644 --- a/arch/x86/include/asm/kvm-x86-ops.h +++ b/arch/x86/include/asm/kvm-x86-ops.h @@ -82,7 +82,7 @@ KVM_X86_OP_NULL(guest_apic_has_interrupt) KVM_X86_OP(load_eoi_exitmap) KVM_X86_OP(set_virtual_apic_mode) KVM_X86_OP_NULL(set_apic_access_page_addr) -KVM_X86_OP(deliver_posted_interrupt) +KVM_X86_OP(deliver_interrupt) KVM_X86_OP_NULL(sync_pir_to_irr) KVM_X86_OP(set_tss_addr) KVM_X86_OP(set_identity_map_addr) diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index efee29d5ad4f..80e285533371 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -1409,7 +1409,8 @@ struct kvm_x86_ops { void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap); void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu); void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu); - int (*deliver_posted_interrupt)(struct kvm_vcpu *vcpu, int vector); + void (*deliver_interrupt)(struct kvm_lapic *apic, int delivery_mode, + int trig_mode, int vector); int (*sync_pir_to_irr)(struct kvm_vcpu *vcpu); int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr); diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 4662469240bc..d7e6fde82d25 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c @@ -1096,14 +1096,8 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, apic->regs + APIC_TMR); } - if (static_call(kvm_x86_deliver_posted_interrupt)(vcpu, vector)) { - kvm_lapic_set_irr(vector, apic); - kvm_make_request(KVM_REQ_EVENT, vcpu); - kvm_vcpu_kick(vcpu); - } else { - trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode, - trig_mode, vector); - } + static_call(kvm_x86_deliver_interrupt)(apic, delivery_mode, + trig_mode, vector); break; case APIC_DM_REMRD: diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 9cef8e4598df..5772dd6f79a4 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -3291,6 +3291,21 @@ static void svm_set_irq(struct kvm_vcpu *vcpu) SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR; } +static void svm_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode, + int trig_mode, int vector) +{ + struct kvm_vcpu *vcpu = apic->vcpu; + + if (svm_deliver_avic_intr(vcpu, vector)) { + kvm_lapic_set_irr(vector, apic); + kvm_make_request(KVM_REQ_EVENT, vcpu); + kvm_vcpu_kick(vcpu); + } else { + trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode, + trig_mode, vector); + } +} + static void svm_update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr) { struct vcpu_svm *svm = to_svm(vcpu); @@ -4545,7 +4560,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = { .pmu_ops = &amd_pmu_ops, .nested_ops = &svm_nested_ops, - .deliver_posted_interrupt = svm_deliver_avic_intr, + .deliver_interrupt = svm_deliver_interrupt, .dy_apicv_has_pending_interrupt = svm_dy_apicv_has_pending_interrupt, .update_pi_irte = svm_update_pi_irte, .setup_mce = svm_setup_mce, diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index b1165bb13a5a..3c0ba5b1bbcf 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -4041,6 +4041,21 @@ static int vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu, int vector) return 0; } +static void vmx_deliver_interrupt(struct kvm_lapic *apic, int delivery_mode, + int trig_mode, int vector) +{ + struct kvm_vcpu *vcpu = apic->vcpu; + + if (vmx_deliver_posted_interrupt(vcpu, vector)) { + kvm_lapic_set_irr(vector, apic); + kvm_make_request(KVM_REQ_EVENT, vcpu); + kvm_vcpu_kick(vcpu); + } else { + trace_kvm_apicv_accept_irq(vcpu->vcpu_id, delivery_mode, + trig_mode, vector); + } +} + /* * Set up the vmcs's constant host-state fields, i.e., host-state fields that * will not change in the lifetime of the guest. @@ -7766,7 +7781,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = { .hwapic_isr_update = vmx_hwapic_isr_update, .guest_apic_has_interrupt = vmx_guest_apic_has_interrupt, .sync_pir_to_irr = vmx_sync_pir_to_irr, - .deliver_posted_interrupt = vmx_deliver_posted_interrupt, + .deliver_interrupt = vmx_deliver_interrupt, .dy_apicv_has_pending_interrupt = pi_has_pending_interrupt, .set_tss_addr = vmx_set_tss_addr, -- cgit From ee12595147ac1fbfb5bcb23837e26dd58d94b15d Mon Sep 17 00:00:00 2001 From: Dan Carpenter Date: Fri, 28 Jan 2022 22:57:01 +0300 Subject: fanotify: Fix stale file descriptor in copy_event_to_user() This code calls fd_install() which gives the userspace access to the fd. Then if copy_info_records_to_user() fails it calls put_unused_fd(fd) but that will not release it and leads to a stale entry in the file descriptor table. Generally you can't trust the fd after a call to fd_install(). The fix is to delay the fd_install() until everything else has succeeded. Fortunately it requires CAP_SYS_ADMIN to reach this code so the security impact is less. Fixes: f644bc449b37 ("fanotify: fix copy_event_to_user() fid error clean up") Link: https://lore.kernel.org/r/20220128195656.GA26981@kili Signed-off-by: Dan Carpenter Reviewed-by: Mathias Krause Signed-off-by: Jan Kara --- fs/notify/fanotify/fanotify_user.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c index 1026f67b1d1e..2ff6bd85ba8f 100644 --- a/fs/notify/fanotify/fanotify_user.c +++ b/fs/notify/fanotify/fanotify_user.c @@ -701,9 +701,6 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group, if (fanotify_is_perm_event(event->mask)) FANOTIFY_PERM(event)->fd = fd; - if (f) - fd_install(fd, f); - if (info_mode) { ret = copy_info_records_to_user(event, info, info_mode, pidfd, buf, count); @@ -711,6 +708,9 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group, goto out_close_fd; } + if (f) + fd_install(fd, f); + return metadata.event_len; out_close_fd: -- cgit From 881cc731df6af99a21622e9be25a23b81adcd10b Mon Sep 17 00:00:00 2001 From: Jonathan McDowell Date: Mon, 31 Jan 2022 13:56:41 +0000 Subject: net: phy: Fix qca8081 with speeds lower than 2.5Gb/s A typo in qca808x_read_status means we try to set SMII mode on the port rather than SGMII when the link speed is not 2.5Gb/s. This results in no traffic due to the mismatch in configuration between the phy and the mac. v2: Only change interface mode when the link is up Fixes: 79c7bc0521545 ("net: phy: add qca8081 read_status") Cc: stable@vger.kernel.org Signed-off-by: Jonathan McDowell Reviewed-by: Russell King (Oracle) Signed-off-by: David S. Miller --- drivers/net/phy/at803x.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c index 5b6c0d120e09..29aa811af430 100644 --- a/drivers/net/phy/at803x.c +++ b/drivers/net/phy/at803x.c @@ -1688,19 +1688,19 @@ static int qca808x_read_status(struct phy_device *phydev) if (ret < 0) return ret; - if (phydev->link && phydev->speed == SPEED_2500) - phydev->interface = PHY_INTERFACE_MODE_2500BASEX; - else - phydev->interface = PHY_INTERFACE_MODE_SMII; - - /* generate seed as a lower random value to make PHY linked as SLAVE easily, - * except for master/slave configuration fault detected. - * the reason for not putting this code into the function link_change_notify is - * the corner case where the link partner is also the qca8081 PHY and the seed - * value is configured as the same value, the link can't be up and no link change - * occurs. - */ - if (!phydev->link) { + if (phydev->link) { + if (phydev->speed == SPEED_2500) + phydev->interface = PHY_INTERFACE_MODE_2500BASEX; + else + phydev->interface = PHY_INTERFACE_MODE_SGMII; + } else { + /* generate seed as a lower random value to make PHY linked as SLAVE easily, + * except for master/slave configuration fault detected. + * the reason for not putting this code into the function link_change_notify is + * the corner case where the link partner is also the qca8081 PHY and the seed + * value is configured as the same value, the link can't be up and no link change + * occurs. + */ if (phydev->master_slave_state == MASTER_SLAVE_STATE_ERR) { qca808x_phy_ms_seed_enable(phydev, false); } else { -- cgit From ef9989afda73332df566852d6e9ca695c05f10ce Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 1 Feb 2022 13:29:22 +0000 Subject: kvm: add guest_state_{enter,exit}_irqoff() When transitioning to/from guest mode, it is necessary to inform lockdep, tracing, and RCU in a specific order, similar to the requirements for transitions to/from user mode. Additionally, it is necessary to perform vtime accounting for a window around running the guest, with RCU enabled, such that timer interrupts taken from the guest can be accounted as guest time. Most architectures don't handle all the necessary pieces, and a have a number of common bugs, including unsafe usage of RCU during the window between guest_enter() and guest_exit(). On x86, this was dealt with across commits: 87fa7f3e98a1310e ("x86/kvm: Move context tracking where it belongs") 0642391e2139a2c1 ("x86/kvm/vmx: Add hardirq tracing to guest enter/exit") 9fc975e9efd03e57 ("x86/kvm/svm: Add hardirq tracing on guest enter/exit") 3ebccdf373c21d86 ("x86/kvm/vmx: Move guest enter/exit into .noinstr.text") 135961e0a7d555fc ("x86/kvm/svm: Move guest enter/exit into .noinstr.text") 160457140187c5fb ("KVM: x86: Defer vtime accounting 'til after IRQ handling") bc908e091b326467 ("KVM: x86: Consolidate guest enter/exit logic to common helpers") ... but those fixes are specific to x86, and as the resulting logic (while correct) is split across generic helper functions and x86-specific helper functions, it is difficult to see that the entry/exit accounting is balanced. This patch adds generic helpers which architectures can use to handle guest entry/exit consistently and correctly. The guest_{enter,exit}() helpers are split into guest_timing_{enter,exit}() to perform vtime accounting, and guest_context_{enter,exit}() to perform the necessary context tracking and RCU management. The existing guest_{enter,exit}() heleprs are left as wrappers of these. Atop this, new guest_state_enter_irqoff() and guest_state_exit_irqoff() helpers are added to handle the ordering of lockdep, tracing, and RCU manageent. These are inteneded to mirror exit_to_user_mode() and enter_from_user_mode(). Subsequent patches will migrate architectures over to the new helpers, following a sequence: guest_timing_enter_irqoff(); guest_state_enter_irqoff(); < run the vcpu > guest_state_exit_irqoff(); < take any pending IRQs > guest_timing_exit_irqoff(); This sequences handles all of the above correctly, and more clearly balances the entry and exit portions, making it easier to understand. The existing helpers are marked as deprecated, and will be removed once all architectures have been converted. There should be no functional change as a result of this patch. Signed-off-by: Mark Rutland Reviewed-by: Marc Zyngier Reviewed-by: Paolo Bonzini Reviewed-by: Nicolas Saenz Julienne Message-Id: <20220201132926.3301912-2-mark.rutland@arm.com> Signed-off-by: Paolo Bonzini --- include/linux/kvm_host.h | 112 +++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 109 insertions(+), 3 deletions(-) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index f079820f52b5..b3810976a27f 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -29,7 +29,9 @@ #include #include #include +#include #include +#include #include #include #include @@ -368,8 +370,11 @@ struct kvm_vcpu { u64 last_used_slot_gen; }; -/* must be called with irqs disabled */ -static __always_inline void guest_enter_irqoff(void) +/* + * Start accounting time towards a guest. + * Must be called before entering guest context. + */ +static __always_inline void guest_timing_enter_irqoff(void) { /* * This is running in ioctl context so its safe to assume that it's the @@ -378,7 +383,18 @@ static __always_inline void guest_enter_irqoff(void) instrumentation_begin(); vtime_account_guest_enter(); instrumentation_end(); +} +/* + * Enter guest context and enter an RCU extended quiescent state. + * + * Between guest_context_enter_irqoff() and guest_context_exit_irqoff() it is + * unsafe to use any code which may directly or indirectly use RCU, tracing + * (including IRQ flag tracing), or lockdep. All code in this period must be + * non-instrumentable. + */ +static __always_inline void guest_context_enter_irqoff(void) +{ /* * KVM does not hold any references to rcu protected data when it * switches CPU into a guest mode. In fact switching to a guest mode @@ -394,16 +410,79 @@ static __always_inline void guest_enter_irqoff(void) } } -static __always_inline void guest_exit_irqoff(void) +/* + * Deprecated. Architectures should move to guest_timing_enter_irqoff() and + * guest_state_enter_irqoff(). + */ +static __always_inline void guest_enter_irqoff(void) +{ + guest_timing_enter_irqoff(); + guest_context_enter_irqoff(); +} + +/** + * guest_state_enter_irqoff - Fixup state when entering a guest + * + * Entry to a guest will enable interrupts, but the kernel state is interrupts + * disabled when this is invoked. Also tell RCU about it. + * + * 1) Trace interrupts on state + * 2) Invoke context tracking if enabled to adjust RCU state + * 3) Tell lockdep that interrupts are enabled + * + * Invoked from architecture specific code before entering a guest. + * Must be called with interrupts disabled and the caller must be + * non-instrumentable. + * The caller has to invoke guest_timing_enter_irqoff() before this. + * + * Note: this is analogous to exit_to_user_mode(). + */ +static __always_inline void guest_state_enter_irqoff(void) +{ + instrumentation_begin(); + trace_hardirqs_on_prepare(); + lockdep_hardirqs_on_prepare(CALLER_ADDR0); + instrumentation_end(); + + guest_context_enter_irqoff(); + lockdep_hardirqs_on(CALLER_ADDR0); +} + +/* + * Exit guest context and exit an RCU extended quiescent state. + * + * Between guest_context_enter_irqoff() and guest_context_exit_irqoff() it is + * unsafe to use any code which may directly or indirectly use RCU, tracing + * (including IRQ flag tracing), or lockdep. All code in this period must be + * non-instrumentable. + */ +static __always_inline void guest_context_exit_irqoff(void) { context_tracking_guest_exit(); +} +/* + * Stop accounting time towards a guest. + * Must be called after exiting guest context. + */ +static __always_inline void guest_timing_exit_irqoff(void) +{ instrumentation_begin(); /* Flush the guest cputime we spent on the guest */ vtime_account_guest_exit(); instrumentation_end(); } +/* + * Deprecated. Architectures should move to guest_state_exit_irqoff() and + * guest_timing_exit_irqoff(). + */ +static __always_inline void guest_exit_irqoff(void) +{ + guest_context_exit_irqoff(); + guest_timing_exit_irqoff(); +} + static inline void guest_exit(void) { unsigned long flags; @@ -413,6 +492,33 @@ static inline void guest_exit(void) local_irq_restore(flags); } +/** + * guest_state_exit_irqoff - Establish state when returning from guest mode + * + * Entry from a guest disables interrupts, but guest mode is traced as + * interrupts enabled. Also with NO_HZ_FULL RCU might be idle. + * + * 1) Tell lockdep that interrupts are disabled + * 2) Invoke context tracking if enabled to reactivate RCU + * 3) Trace interrupts off state + * + * Invoked from architecture specific code after exiting a guest. + * Must be invoked with interrupts disabled and the caller must be + * non-instrumentable. + * The caller has to invoke guest_timing_exit_irqoff() after this. + * + * Note: this is analogous to enter_from_user_mode(). + */ +static __always_inline void guest_state_exit_irqoff(void) +{ + lockdep_hardirqs_off(CALLER_ADDR0); + guest_context_exit_irqoff(); + + instrumentation_begin(); + trace_hardirqs_off_finish(); + instrumentation_end(); +} + static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) { /* -- cgit From 72e3244512b34756a7e8aa67eff45cdcb040ac4e Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 1 Feb 2022 13:29:26 +0000 Subject: kvm/mips: rework guest entry logic In kvm_arch_vcpu_ioctl_run() we use guest_enter_irqoff() and guest_exit_irqoff() directly, with interrupts masked between these. As we don't handle any timer ticks during this window, we will not account time spent within the guest as guest time, which is unfortunate. Additionally, we do not inform lockdep or tracing that interrupts will be enabled during guest execution, which caan lead to misleading traces and warnings that interrupts have been enabled for overly-long periods. This patch fixes these issues by using the new timing and context entry/exit helpers to ensure that interrupts are handled during guest vtime but with RCU watching, with a sequence: guest_timing_enter_irqoff(); guest_state_enter_irqoff(); < run the vcpu > guest_state_exit_irqoff(); < take any pending IRQs > guest_timing_exit_irqoff(); In addition, as guest exits during the "run the vcpu" step are handled by kvm_mips_handle_exit(), a wrapper function is added which ensures that such exists are handled with a sequence: guest_state_exit_irqoff(); < handle the exit > guest_state_enter_irqoff(); This means that exits which stop the vCPU running will have a redundant guest_state_enter_irqoff() .. guest_state_exit_irqoff() sequence, which can be addressed with future rework. Since instrumentation may make use of RCU, we must also ensure that no instrumented code is run during the EQS. I've split out the critical section into a new kvm_mips_enter_exit_vcpu() helper which is marked noinstr. Signed-off-by: Mark Rutland Cc: Aleksandar Markovic Cc: Frederic Weisbecker Cc: Huacai Chen Cc: Paolo Bonzini Cc: Paul E. McKenney Cc: Thomas Bogendoerfer Message-Id: <20220201132926.3301912-6-mark.rutland@arm.com> Signed-off-by: Paolo Bonzini --- arch/mips/kvm/mips.c | 50 ++++++++++++++++++++++++++++++++++++++++++++++---- 1 file changed, 46 insertions(+), 4 deletions(-) diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index e59cb6246f76..a25e0b73ee70 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -414,6 +414,24 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, return -ENOIOCTLCMD; } +/* + * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while + * the vCPU is running. + * + * This must be noinstr as instrumentation may make use of RCU, and this is not + * safe during the EQS. + */ +static int noinstr kvm_mips_vcpu_enter_exit(struct kvm_vcpu *vcpu) +{ + int ret; + + guest_state_enter_irqoff(); + ret = kvm_mips_callbacks->vcpu_run(vcpu); + guest_state_exit_irqoff(); + + return ret; +} + int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) { int r = -EINTR; @@ -434,7 +452,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) lose_fpu(1); local_irq_disable(); - guest_enter_irqoff(); + guest_timing_enter_irqoff(); trace_kvm_enter(vcpu); /* @@ -445,10 +463,23 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) */ smp_store_mb(vcpu->mode, IN_GUEST_MODE); - r = kvm_mips_callbacks->vcpu_run(vcpu); + r = kvm_mips_vcpu_enter_exit(vcpu); + + /* + * We must ensure that any pending interrupts are taken before + * we exit guest timing so that timer ticks are accounted as + * guest time. Transiently unmask interrupts so that any + * pending interrupts are taken. + * + * TODO: is there a barrier which ensures that pending interrupts are + * recognised? Currently this just hopes that the CPU takes any pending + * interrupts between the enable and disable. + */ + local_irq_enable(); + local_irq_disable(); trace_kvm_out(vcpu); - guest_exit_irqoff(); + guest_timing_exit_irqoff(); local_irq_enable(); out: @@ -1168,7 +1199,7 @@ static void kvm_mips_set_c0_status(void) /* * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV) */ -int kvm_mips_handle_exit(struct kvm_vcpu *vcpu) +static int __kvm_mips_handle_exit(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; u32 cause = vcpu->arch.host_cp0_cause; @@ -1357,6 +1388,17 @@ int kvm_mips_handle_exit(struct kvm_vcpu *vcpu) return ret; } +int noinstr kvm_mips_handle_exit(struct kvm_vcpu *vcpu) +{ + int ret; + + guest_state_exit_irqoff(); + ret = __kvm_mips_handle_exit(vcpu); + guest_state_enter_irqoff(); + + return ret; +} + /* Enable FPU for guest and restore context */ void kvm_own_fpu(struct kvm_vcpu *vcpu) { -- cgit From b2d2af7e5df37ee3a9ba6b405bdbb7691a5c2dfc Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 1 Feb 2022 13:29:24 +0000 Subject: kvm/x86: rework guest entry logic For consistency and clarity, migrate x86 over to the generic helpers for guest timing and lockdep/RCU/tracing management, and remove the x86-specific helpers. Prior to this patch, the guest timing was entered in kvm_guest_enter_irqoff() (called by svm_vcpu_enter_exit() and svm_vcpu_enter_exit()), and was exited by the call to vtime_account_guest_exit() within vcpu_enter_guest(). To minimize duplication and to more clearly balance entry and exit, both entry and exit of guest timing are placed in vcpu_enter_guest(), using the new guest_timing_{enter,exit}_irqoff() helpers. When context tracking is used a small amount of additional time will be accounted towards guests; tick-based accounting is unnaffected as IRQs are disabled at this point and not enabled until after the return from the guest. This also corrects (benign) mis-balanced context tracking accounting introduced in commits: ae95f566b3d22ade ("KVM: X86: TSCDEADLINE MSR emulation fastpath") 26efe2fd92e50822 ("KVM: VMX: Handle preemption timer fastpath") Where KVM can enter a guest multiple times, calling vtime_guest_enter() without a corresponding call to vtime_account_guest_exit(), and with vtime_account_system() called when vtime_account_guest() should be used. As account_system_time() checks PF_VCPU and calls account_guest_time(), this doesn't result in any functional problem, but is unnecessarily confusing. Signed-off-by: Mark Rutland Acked-by: Paolo Bonzini Reviewed-by: Nicolas Saenz Julienne Cc: Borislav Petkov Cc: Dave Hansen Cc: Ingo Molnar Cc: Jim Mattson Cc: Joerg Roedel Cc: Sean Christopherson Cc: Thomas Gleixner Cc: Vitaly Kuznetsov Cc: Wanpeng Li Message-Id: <20220201132926.3301912-4-mark.rutland@arm.com> Signed-off-by: Paolo Bonzini --- arch/x86/kvm/svm/svm.c | 4 ++-- arch/x86/kvm/vmx/vmx.c | 4 ++-- arch/x86/kvm/x86.c | 4 +++- arch/x86/kvm/x86.h | 45 --------------------------------------------- 4 files changed, 7 insertions(+), 50 deletions(-) diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index 5772dd6f79a4..ea2f7f3614af 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -3630,7 +3630,7 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu) struct vcpu_svm *svm = to_svm(vcpu); unsigned long vmcb_pa = svm->current_vmcb->pa; - kvm_guest_enter_irqoff(); + guest_state_enter_irqoff(); if (sev_es_guest(vcpu->kvm)) { __svm_sev_es_vcpu_run(vmcb_pa); @@ -3650,7 +3650,7 @@ static noinstr void svm_vcpu_enter_exit(struct kvm_vcpu *vcpu) vmload(__sme_page_pa(sd->save_area)); } - kvm_guest_exit_irqoff(); + guest_state_exit_irqoff(); } static __no_kcsan fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 3c0ba5b1bbcf..c0c256c33d21 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -6767,7 +6767,7 @@ static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu) static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu, struct vcpu_vmx *vmx) { - kvm_guest_enter_irqoff(); + guest_state_enter_irqoff(); /* L1D Flush includes CPU buffer clear to mitigate MDS */ if (static_branch_unlikely(&vmx_l1d_should_flush)) @@ -6783,7 +6783,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu, vcpu->arch.cr2 = native_read_cr2(); - kvm_guest_exit_irqoff(); + guest_state_exit_irqoff(); } static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c25a6ef0ff06..fec3dd4f0718 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -10088,6 +10088,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) set_debugreg(0, 7); } + guest_timing_enter_irqoff(); + for (;;) { /* * Assert that vCPU vs. VM APICv state is consistent. An APICv @@ -10172,7 +10174,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) * of accounting via context tracking, but the loss of accuracy is * acceptable for all known use cases. */ - vtime_account_guest_exit(); + guest_timing_exit_irqoff(); if (lapic_in_kernel(vcpu)) { s64 delta = vcpu->arch.apic->lapic_timer.advance_expire_delta; diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 1ebd5a7594da..20c7a1fb90bb 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h @@ -10,51 +10,6 @@ void kvm_spurious_fault(void); -static __always_inline void kvm_guest_enter_irqoff(void) -{ - /* - * VMENTER enables interrupts (host state), but the kernel state is - * interrupts disabled when this is invoked. Also tell RCU about - * it. This is the same logic as for exit_to_user_mode(). - * - * This ensures that e.g. latency analysis on the host observes - * guest mode as interrupt enabled. - * - * guest_enter_irqoff() informs context tracking about the - * transition to guest mode and if enabled adjusts RCU state - * accordingly. - */ - instrumentation_begin(); - trace_hardirqs_on_prepare(); - lockdep_hardirqs_on_prepare(CALLER_ADDR0); - instrumentation_end(); - - guest_enter_irqoff(); - lockdep_hardirqs_on(CALLER_ADDR0); -} - -static __always_inline void kvm_guest_exit_irqoff(void) -{ - /* - * VMEXIT disables interrupts (host state), but tracing and lockdep - * have them in state 'on' as recorded before entering guest mode. - * Same as enter_from_user_mode(). - * - * context_tracking_guest_exit() restores host context and reinstates - * RCU if enabled and required. - * - * This needs to be done immediately after VM-Exit, before any code - * that might contain tracepoints or call out to the greater world, - * e.g. before x86_spec_ctrl_restore_host(). - */ - lockdep_hardirqs_off(CALLER_ADDR0); - context_tracking_guest_exit(); - - instrumentation_begin(); - trace_hardirqs_off_finish(); - instrumentation_end(); -} - #define KVM_NESTED_VMENTER_CONSISTENCY_CHECK(consistency_check) \ ({ \ bool failed = (consistency_check); \ -- cgit From b43a76f423aa304037603fd6165c4a534d2c09a7 Mon Sep 17 00:00:00 2001 From: Bernard Metzler Date: Sun, 30 Jan 2022 18:08:15 +0100 Subject: RDMA/siw: Fix broken RDMA Read Fence/Resume logic. Code unconditionally resumed fenced SQ processing after next RDMA Read completion, even if other RDMA Read responses are still outstanding, or ORQ is full. Also adds comments for better readability of fence processing, and removes orq_get_tail() helper, which is not needed anymore. Fixes: 8b6a361b8c48 ("rdma/siw: receive path") Fixes: a531975279f3 ("rdma/siw: main include file") Link: https://lore.kernel.org/r/20220130170815.1940-1-bmt@zurich.ibm.com Reported-by: Jared Holzman Signed-off-by: Bernard Metzler Signed-off-by: Jason Gunthorpe --- drivers/infiniband/sw/siw/siw.h | 7 +------ drivers/infiniband/sw/siw/siw_qp_rx.c | 20 +++++++++++--------- 2 files changed, 12 insertions(+), 15 deletions(-) diff --git a/drivers/infiniband/sw/siw/siw.h b/drivers/infiniband/sw/siw/siw.h index 368959ae9a8c..df03d84c6868 100644 --- a/drivers/infiniband/sw/siw/siw.h +++ b/drivers/infiniband/sw/siw/siw.h @@ -644,14 +644,9 @@ static inline struct siw_sqe *orq_get_current(struct siw_qp *qp) return &qp->orq[qp->orq_get % qp->attrs.orq_size]; } -static inline struct siw_sqe *orq_get_tail(struct siw_qp *qp) -{ - return &qp->orq[qp->orq_put % qp->attrs.orq_size]; -} - static inline struct siw_sqe *orq_get_free(struct siw_qp *qp) { - struct siw_sqe *orq_e = orq_get_tail(qp); + struct siw_sqe *orq_e = &qp->orq[qp->orq_put % qp->attrs.orq_size]; if (READ_ONCE(orq_e->flags) == 0) return orq_e; diff --git a/drivers/infiniband/sw/siw/siw_qp_rx.c b/drivers/infiniband/sw/siw/siw_qp_rx.c index 60116f20653c..875ea6f1b04a 100644 --- a/drivers/infiniband/sw/siw/siw_qp_rx.c +++ b/drivers/infiniband/sw/siw/siw_qp_rx.c @@ -1153,11 +1153,12 @@ static int siw_check_tx_fence(struct siw_qp *qp) spin_lock_irqsave(&qp->orq_lock, flags); - rreq = orq_get_current(qp); - /* free current orq entry */ + rreq = orq_get_current(qp); WRITE_ONCE(rreq->flags, 0); + qp->orq_get++; + if (qp->tx_ctx.orq_fence) { if (unlikely(tx_waiting->wr_status != SIW_WR_QUEUED)) { pr_warn("siw: [QP %u]: fence resume: bad status %d\n", @@ -1165,10 +1166,12 @@ static int siw_check_tx_fence(struct siw_qp *qp) rv = -EPROTO; goto out; } - /* resume SQ processing */ + /* resume SQ processing, if possible */ if (tx_waiting->sqe.opcode == SIW_OP_READ || tx_waiting->sqe.opcode == SIW_OP_READ_LOCAL_INV) { - rreq = orq_get_tail(qp); + + /* SQ processing was stopped because of a full ORQ */ + rreq = orq_get_free(qp); if (unlikely(!rreq)) { pr_warn("siw: [QP %u]: no ORQE\n", qp_id(qp)); rv = -EPROTO; @@ -1181,15 +1184,14 @@ static int siw_check_tx_fence(struct siw_qp *qp) resume_tx = 1; } else if (siw_orq_empty(qp)) { + /* + * SQ processing was stopped by fenced work request. + * Resume since all previous Read's are now completed. + */ qp->tx_ctx.orq_fence = 0; resume_tx = 1; - } else { - pr_warn("siw: [QP %u]: fence resume: orq idx: %d:%d\n", - qp_id(qp), qp->orq_get, qp->orq_put); - rv = -EPROTO; } } - qp->orq_get++; out: spin_unlock_irqrestore(&qp->orq_lock, flags); -- cgit From f3136c4ce7acf64bee43135971ca52a880572e32 Mon Sep 17 00:00:00 2001 From: Leon Romanovsky Date: Mon, 31 Jan 2022 11:45:26 +0200 Subject: RDMA/mlx4: Don't continue event handler after memory allocation failure MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The failure to allocate memory during MLX4_DEV_EVENT_PORT_MGMT_CHANGE event handler will cause skip the assignment logic, but ib_dispatch_event() will be called anyway. Fix it by calling to return instead of break after memory allocation failure. Fixes: 00f5ce99dc6e ("mlx4: Use port management change event instead of smp_snoop") Link: https://lore.kernel.org/r/12a0e83f18cfad4b5f62654f141e240d04915e10.1643622264.git.leonro@nvidia.com Signed-off-by: Leon Romanovsky Reviewed-by: Håkon Bugge Signed-off-by: Jason Gunthorpe --- drivers/infiniband/hw/mlx4/main.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c index 1c3d97229988..93b1650eacfa 100644 --- a/drivers/infiniband/hw/mlx4/main.c +++ b/drivers/infiniband/hw/mlx4/main.c @@ -3237,7 +3237,7 @@ static void mlx4_ib_event(struct mlx4_dev *dev, void *ibdev_ptr, case MLX4_DEV_EVENT_PORT_MGMT_CHANGE: ew = kmalloc(sizeof *ew, GFP_ATOMIC); if (!ew) - break; + return; INIT_WORK(&ew->work, handle_port_mgmt_change_event); memcpy(&ew->ib_eqe, eqe, sizeof *eqe); -- cgit From 1c7f0e349aa5f8f80b1cac3d4917405332e14cdf Mon Sep 17 00:00:00 2001 From: Peter Ujfalusi Date: Tue, 1 Feb 2022 13:21:44 +0200 Subject: ALSA: hda: Skip codec shutdown in case the codec is not registered If the codec->registered is not set then it means that pm_runtime is not yet enabled and the codec->pcm_list_head has not been initialized. The access to the not initialized pcm_list_head will lead a kernel crash during shutdown. Reported-by: Guennadi Liakhovetski Signed-off-by: Peter Ujfalusi Tested-by: Guennadi Liakhovetski Fixes: b98444ed597d ("ALSA: hda: Suspend codec at shutdown") Link: https://lore.kernel.org/r/20220201112144.29411-1-peter.ujfalusi@linux.intel.com Signed-off-by: Takashi Iwai --- sound/pci/hda/hda_codec.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 7016b48227bf..f552785d301e 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -3000,6 +3000,10 @@ void snd_hda_codec_shutdown(struct hda_codec *codec) { struct hda_pcm *cpcm; + /* Skip the shutdown if codec is not registered */ + if (!codec->registered) + return; + list_for_each_entry(cpcm, &codec->pcm_list_head, list) snd_pcm_suspend_all(cpcm->pcm); -- cgit From 836f35f79153ce09d813c83f341dba4481996966 Mon Sep 17 00:00:00 2001 From: Mark Pearson Date: Thu, 27 Jan 2022 14:03:58 -0500 Subject: platform/x86: thinkpad_acpi: Fix incorrect use of platform profile on AMD platforms Lenovo AMD based platforms have been offering platform_profiles but they are not working correctly. This is because the mode we are using on the Intel platforms (MMC) is not available on the AMD platforms. This commit adds checking of the functional capabilities returned by the BIOS to confirm if MMC is supported or not. Profiles will not be available if the platform is not MMC capable. I'm investigating and working on an alternative for AMD platforms but that is still work-in-progress. Signed-off-by: Mark Pearson Link: https://lore.kernel.org/r/20220127190358.4078-1-markpearson@lenovo.com Reviewed-by: Hans de Goede Signed-off-by: Hans de Goede --- drivers/platform/x86/thinkpad_acpi.c | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 33f611af6e51..bd045486b933 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -10119,6 +10119,9 @@ static struct ibm_struct proxsensor_driver_data = { #define DYTC_CMD_MMC_GET 8 /* To get current MMC function and mode */ #define DYTC_CMD_RESET 0x1ff /* To reset back to default */ +#define DYTC_CMD_FUNC_CAP 3 /* To get DYTC capabilities */ +#define DYTC_FC_MMC 27 /* MMC Mode supported */ + #define DYTC_GET_FUNCTION_BIT 8 /* Bits 8-11 - function setting */ #define DYTC_GET_MODE_BIT 12 /* Bits 12-15 - mode setting */ @@ -10331,6 +10334,15 @@ static int tpacpi_dytc_profile_init(struct ibm_init_struct *iibm) if (dytc_version < 5) return -ENODEV; + /* Check what capabilities are supported. Currently MMC is needed */ + err = dytc_command(DYTC_CMD_FUNC_CAP, &output); + if (err) + return err; + if (!(output & BIT(DYTC_FC_MMC))) { + dbg_printk(TPACPI_DBG_INIT, " DYTC MMC mode not supported\n"); + return -ENODEV; + } + dbg_printk(TPACPI_DBG_INIT, "DYTC version %d: thermal mode available\n", dytc_version); /* -- cgit From e9cc5d48d4f463fbea19dd93253e98af3b612b7c Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Wed, 12 Feb 2020 11:04:23 -0300 Subject: tools include UAPI: Sync sound/asound.h copy with the kernel sources Picking the changes from: 55b71f6c29f2a78a ("ALSA: uapi: use C90 comment style instead of C99 style") fb6723daf89083a0 ("ALSA: pcm: comment about relation between msbits hw parameter and [S|U]32 formats") b456abe63f60ad93 ("ALSA: pcm: introduce INFO_NO_REWINDS flag") 5aec579e08e4f2be ("ALSA: uapi: Fix a C++ style comment in asound.h") Which entails no changes in the tooling side as it doesn't introduce new SNDRV_PCM_IOCTL_ ioctls. To silence this perf tools build warning: Warning: Kernel ABI header at 'tools/include/uapi/sound/asound.h' differs from latest version at 'include/uapi/sound/asound.h' diff -u tools/include/uapi/sound/asound.h include/uapi/sound/asound.h Cc: Mark Brown Cc: Pierre-Louis Bossart Cc: Takashi Iwai Cc: Takashi Sakamoto Link: https://lore.kernel.org/all/YflN0j09T+6ODHIh@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/include/uapi/sound/asound.h | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/tools/include/uapi/sound/asound.h b/tools/include/uapi/sound/asound.h index 5fbb79e30819..ef0cafe295b2 100644 --- a/tools/include/uapi/sound/asound.h +++ b/tools/include/uapi/sound/asound.h @@ -202,6 +202,11 @@ typedef int __bitwise snd_pcm_format_t; #define SNDRV_PCM_FORMAT_S24_BE ((__force snd_pcm_format_t) 7) /* low three bytes */ #define SNDRV_PCM_FORMAT_U24_LE ((__force snd_pcm_format_t) 8) /* low three bytes */ #define SNDRV_PCM_FORMAT_U24_BE ((__force snd_pcm_format_t) 9) /* low three bytes */ +/* + * For S32/U32 formats, 'msbits' hardware parameter is often used to deliver information about the + * available bit count in most significant bit. It's for the case of so-called 'left-justified' or + * `right-padding` sample which has less width than 32 bit. + */ #define SNDRV_PCM_FORMAT_S32_LE ((__force snd_pcm_format_t) 10) #define SNDRV_PCM_FORMAT_S32_BE ((__force snd_pcm_format_t) 11) #define SNDRV_PCM_FORMAT_U32_LE ((__force snd_pcm_format_t) 12) @@ -300,7 +305,7 @@ typedef int __bitwise snd_pcm_subformat_t; #define SNDRV_PCM_INFO_HAS_LINK_ESTIMATED_ATIME 0x04000000 /* report estimated link audio time */ #define SNDRV_PCM_INFO_HAS_LINK_SYNCHRONIZED_ATIME 0x08000000 /* report synchronized audio/system time */ #define SNDRV_PCM_INFO_EXPLICIT_SYNC 0x10000000 /* needs explicit sync of pointers and data */ - +#define SNDRV_PCM_INFO_NO_REWINDS 0x20000000 /* hardware can only support monotonic changes of appl_ptr */ #define SNDRV_PCM_INFO_DRAIN_TRIGGER 0x40000000 /* internal kernel flag - trigger in drain */ #define SNDRV_PCM_INFO_FIFO_IN_FRAMES 0x80000000 /* internal kernel flag - FIFO size is in frames */ -- cgit From 88443d3f79b8d2b5679f2a1df1482fa024be2353 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Fri, 21 May 2021 16:00:31 -0300 Subject: tools headers UAPI: Sync linux/perf_event.h with the kernel sources To pick the trivial change in: cb1c4aba055f928f ("perf: Add new macros for mem_hops field") Just comment source code alignment. This silences this perf build warning: Warning: Kernel ABI header at 'tools/include/uapi/linux/perf_event.h' differs from latest version at 'include/uapi/linux/perf_event.h' diff -u tools/include/uapi/linux/perf_event.h include/uapi/linux/perf_event.h Cc: Kajol Jain Cc: Michael Ellerman Link: https://lore.kernel.org/lkml/YflPKLhu2AtHmPov@kernel.org/ Signed-off-by: Arnaldo Carvalho de Melo --- tools/include/uapi/linux/perf_event.h | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tools/include/uapi/linux/perf_event.h b/tools/include/uapi/linux/perf_event.h index 4cd39aaccbe7..1b65042ab1db 100644 --- a/tools/include/uapi/linux/perf_event.h +++ b/tools/include/uapi/linux/perf_event.h @@ -1332,9 +1332,9 @@ union perf_mem_data_src { /* hop level */ #define PERF_MEM_HOPS_0 0x01 /* remote core, same node */ -#define PERF_MEM_HOPS_1 0x02 /* remote node, same socket */ -#define PERF_MEM_HOPS_2 0x03 /* remote socket, same board */ -#define PERF_MEM_HOPS_3 0x04 /* remote board */ +#define PERF_MEM_HOPS_1 0x02 /* remote node, same socket */ +#define PERF_MEM_HOPS_2 0x03 /* remote socket, same board */ +#define PERF_MEM_HOPS_3 0x04 /* remote board */ /* 5-7 available */ #define PERF_MEM_HOPS_SHIFT 43 -- cgit From d5381cc9f123d64bee1d1a124cd98faa5fa36ca6 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 1 Jul 2021 13:39:15 -0300 Subject: tools headers cpufeatures: Sync with the kernel sources To pick the changes from: 690a757d610e50c2 ("kvm: x86: Add CPUID support for Intel AMX") This only causes these perf files to be rebuilt: CC /tmp/build/perf/bench/mem-memcpy-x86-64-asm.o CC /tmp/build/perf/bench/mem-memset-x86-64-asm.o And addresses this perf build warning: Warning: Kernel ABI header at 'tools/arch/x86/include/asm/cpufeatures.h' differs from latest version at 'arch/x86/include/asm/cpufeatures.h' diff -u tools/arch/x86/include/asm/cpufeatures.h arch/x86/include/asm/cpufeatures.h Cc: Jing Liu Cc: Paolo Bonzini Link: https://lore.kernel.org/lkml/YflQCEO9FRLeTmlB@kernel.org/ Signed-off-by: Arnaldo Carvalho de Melo --- tools/arch/x86/include/asm/cpufeatures.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h index 18de5f76f198..6db4e2932b3d 100644 --- a/tools/arch/x86/include/asm/cpufeatures.h +++ b/tools/arch/x86/include/asm/cpufeatures.h @@ -299,7 +299,9 @@ /* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */ #define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */ #define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */ +#define X86_FEATURE_AMX_BF16 (18*32+22) /* AMX bf16 Support */ #define X86_FEATURE_AMX_TILE (18*32+24) /* AMX tile Support */ +#define X86_FEATURE_AMX_INT8 (18*32+25) /* AMX int8 Support */ /* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */ #define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */ -- cgit From 100198322b2eb7fb38750cb0fcae5cd533907410 Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Tue, 1 Feb 2022 12:53:16 -0300 Subject: perf beauty: Make the prctl arg regexp more strict to cope with PR_SET_VMA This new PR_SET_VMA value isn't in sequence with all the other prctl arguments and instead uses a big, 0x prefixed hex number: 0x53564d41 (S V M A). This makes it harder to generate a string table as it would be rather sparse, so make the regexp more stricter to avoid catching those. A followup patch for 'perf trace' to cope with such oddities will be needed, but then its a matter for the next merge window. The next patch will update the prctl.h file to cope with this perf build warning: Warning: Kernel ABI header at 'tools/include/uapi/linux/prctl.h' differs from latest version at 'include/uapi/linux/prctl.h' diff -u tools/include/uapi/linux/prctl.h include/uapi/linux/prctl.h Here is the output of this script: $ tools/perf/trace/beauty/prctl_option.sh static const char *prctl_options[] = { [1] = "SET_PDEATHSIG", [2] = "GET_PDEATHSIG", [3] = "GET_DUMPABLE", [4] = "SET_DUMPABLE", [5] = "GET_UNALIGN", [6] = "SET_UNALIGN", [7] = "GET_KEEPCAPS", [8] = "SET_KEEPCAPS", [9] = "GET_FPEMU", [10] = "SET_FPEMU", [11] = "GET_FPEXC", [12] = "SET_FPEXC", [13] = "GET_TIMING", [14] = "SET_TIMING", [15] = "SET_NAME", [16] = "GET_NAME", [19] = "GET_ENDIAN", [20] = "SET_ENDIAN", [21] = "GET_SECCOMP", [22] = "SET_SECCOMP", [23] = "CAPBSET_READ", [24] = "CAPBSET_DROP", [25] = "GET_TSC", [26] = "SET_TSC", [27] = "GET_SECUREBITS", [28] = "SET_SECUREBITS", [29] = "SET_TIMERSLACK", [30] = "GET_TIMERSLACK", [31] = "TASK_PERF_EVENTS_DISABLE", [32] = "TASK_PERF_EVENTS_ENABLE", [33] = "MCE_KILL", [34] = "MCE_KILL_GET", [35] = "SET_MM", [36] = "SET_CHILD_SUBREAPER", [37] = "GET_CHILD_SUBREAPER", [38] = "SET_NO_NEW_PRIVS", [39] = "GET_NO_NEW_PRIVS", [40] = "GET_TID_ADDRESS", [41] = "SET_THP_DISABLE", [42] = "GET_THP_DISABLE", [43] = "MPX_ENABLE_MANAGEMENT", [44] = "MPX_DISABLE_MANAGEMENT", [45] = "SET_FP_MODE", [46] = "GET_FP_MODE", [47] = "CAP_AMBIENT", [50] = "SVE_SET_VL", [51] = "SVE_GET_VL", [52] = "GET_SPECULATION_CTRL", [53] = "SET_SPECULATION_CTRL", [54] = "PAC_RESET_KEYS", [55] = "SET_TAGGED_ADDR_CTRL", [56] = "GET_TAGGED_ADDR_CTRL", [57] = "SET_IO_FLUSHER", [58] = "GET_IO_FLUSHER", [59] = "SET_SYSCALL_USER_DISPATCH", [60] = "PAC_SET_ENABLED_KEYS", [61] = "PAC_GET_ENABLED_KEYS", [62] = "SCHED_CORE", }; static const char *prctl_set_mm_options[] = { [1] = "START_CODE", [2] = "END_CODE", [3] = "START_DATA", [4] = "END_DATA", [5] = "START_STACK", [6] = "START_BRK", [7] = "BRK", [8] = "ARG_START", [9] = "ARG_END", [10] = "ENV_START", [11] = "ENV_END", [12] = "AUXV", [13] = "EXE_FILE", [14] = "MAP", [15] = "MAP_SIZE", }; $ Cc: Adrian Hunter Cc: Colin Cross Cc: Ian Rogers Cc: Jiri Olsa Cc: Kees Cook Cc: Namhyung Kim Cc: Suren Baghdasaryan Link: https://lore.kernel.org/lkml/YflZqY0rYQ3d1bKt@kernel.org Signed-off-by: Arnaldo Carvalho de Melo --- tools/perf/trace/beauty/prctl_option.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/perf/trace/beauty/prctl_option.sh b/tools/perf/trace/beauty/prctl_option.sh index 3109d7b05e11..3d278785fe57 100755 --- a/tools/perf/trace/beauty/prctl_option.sh +++ b/tools/perf/trace/beauty/prctl_option.sh @@ -4,7 +4,7 @@ [ $# -eq 1 ] && header_dir=$1 || header_dir=tools/include/uapi/linux/ printf "static const char *prctl_options[] = {\n" -regex='^#define[[:space:]]+PR_(\w+)[[:space:]]*([[:xdigit:]]+).*' +regex='^#define[[:space:]]{1}PR_(\w+)[[:space:]]*([[:xdigit:]]+)([[:space:]]*\/.*)?$' egrep $regex ${header_dir}/prctl.h | grep -v PR_SET_PTRACER | \ sed -r "s/$regex/\2 \1/g" | \ sort -n | xargs printf "\t[%s] = \"%s\",\n" -- cgit From fc45e6588d57b65378612fce07089276141509dc Mon Sep 17 00:00:00 2001 From: Arnaldo Carvalho de Melo Date: Thu, 11 Feb 2021 12:50:52 -0300 Subject: tools headers UAPI: Sync linux/prctl.h with the kernel sources To pick the changes in: 9a10064f5625d557 ("mm: add a field to store names for private anonymous memory") That don't result in any changes in tooling: $ tools/perf/trace/beauty/prctl_option.sh > before $ cp include/uapi/linux/prctl.h tools/include/uapi/linux/prctl.h $ tools/perf/trace/beauty/prctl_option.sh > after $ diff -u before after $ This actually adds a new prctl arg, but it has to be dealt with differently, as it is not in sequence with the other arguments. Just silences this perf tools build warning: Warning: Kernel ABI header at 'tools/include/uapi/linux/prctl.h' differs from latest version at 'include/uapi/linux/prctl.h' diff -u tools/include/uapi/linux/prctl.h include/uapi/linux/prctl.h Cc: Adrian Hunter Cc: Colin Cross Cc: Ian Rogers Cc: Jiri Olsa Cc: Kees Cook Cc: Namhyung Kim Cc: Suren Baghdasaryan Signed-off-by: Arnaldo Carvalho de Melo --- tools/include/uapi/linux/prctl.h | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tools/include/uapi/linux/prctl.h b/tools/include/uapi/linux/prctl.h index bb73e9a0b24f..e998764f0262 100644 --- a/tools/include/uapi/linux/prctl.h +++ b/tools/include/uapi/linux/prctl.h @@ -272,4 +272,7 @@ struct prctl_mm_map { # define PR_SCHED_CORE_SCOPE_THREAD_GROUP 1 # define PR_SCHED_CORE_SCOPE_PROCESS_GROUP 2 +#define PR_SET_VMA 0x53564d41 +# define PR_SET_VMA_ANON_NAME 0 + #endif /* _LINUX_PRCTL_H */ -- cgit From 052e04a52dcd3359ba1df25a508a3a93707a3f6e Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 27 Jan 2022 16:02:42 +0000 Subject: cifs: Transition from ->readpages() to ->readahead() Transition the cifs filesystem from using the old ->readpages() method to using the new ->readahead() method. For the moment, this removes any invocation of fscache to read data from the local cache, leaving that to another patch. Signed-off-by: David Howells cc: Steve French cc: Shyam Prasad N cc: Matthew Wilcox cc: Jeff Layton cc: linux-cifs@vger.kernel.org cc: linux-cachefs@redhat.com Reviewed-by: Rohith Surabattula Acked-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/file.c | 172 ++++++++++++--------------------------------------------- 1 file changed, 35 insertions(+), 137 deletions(-) diff --git a/fs/cifs/file.c b/fs/cifs/file.c index 59334be9ed3b..be62dc29dc54 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -4269,8 +4269,6 @@ cifs_readv_complete(struct work_struct *work) for (i = 0; i < rdata->nr_pages; i++) { struct page *page = rdata->pages[i]; - lru_cache_add(page); - if (rdata->result == 0 || (rdata->result == -EAGAIN && got_bytes)) { flush_dcache_page(page); @@ -4340,7 +4338,6 @@ readpages_fill_pages(struct TCP_Server_Info *server, * fill them until the writes are flushed. */ zero_user(page, 0, PAGE_SIZE); - lru_cache_add(page); flush_dcache_page(page); SetPageUptodate(page); unlock_page(page); @@ -4350,7 +4347,6 @@ readpages_fill_pages(struct TCP_Server_Info *server, continue; } else { /* no need to hold page hostage */ - lru_cache_add(page); unlock_page(page); put_page(page); rdata->pages[i] = NULL; @@ -4393,92 +4389,16 @@ cifs_readpages_copy_into_pages(struct TCP_Server_Info *server, return readpages_fill_pages(server, rdata, iter, iter->count); } -static int -readpages_get_pages(struct address_space *mapping, struct list_head *page_list, - unsigned int rsize, struct list_head *tmplist, - unsigned int *nr_pages, loff_t *offset, unsigned int *bytes) +static void cifs_readahead(struct readahead_control *ractl) { - struct page *page, *tpage; - unsigned int expected_index; int rc; - gfp_t gfp = readahead_gfp_mask(mapping); - - INIT_LIST_HEAD(tmplist); - - page = lru_to_page(page_list); - - /* - * Lock the page and put it in the cache. Since no one else - * should have access to this page, we're safe to simply set - * PG_locked without checking it first. - */ - __SetPageLocked(page); - rc = add_to_page_cache_locked(page, mapping, - page->index, gfp); - - /* give up if we can't stick it in the cache */ - if (rc) { - __ClearPageLocked(page); - return rc; - } - - /* move first page to the tmplist */ - *offset = (loff_t)page->index << PAGE_SHIFT; - *bytes = PAGE_SIZE; - *nr_pages = 1; - list_move_tail(&page->lru, tmplist); - - /* now try and add more pages onto the request */ - expected_index = page->index + 1; - list_for_each_entry_safe_reverse(page, tpage, page_list, lru) { - /* discontinuity ? */ - if (page->index != expected_index) - break; - - /* would this page push the read over the rsize? */ - if (*bytes + PAGE_SIZE > rsize) - break; - - __SetPageLocked(page); - rc = add_to_page_cache_locked(page, mapping, page->index, gfp); - if (rc) { - __ClearPageLocked(page); - break; - } - list_move_tail(&page->lru, tmplist); - (*bytes) += PAGE_SIZE; - expected_index++; - (*nr_pages)++; - } - return rc; -} - -static int cifs_readpages(struct file *file, struct address_space *mapping, - struct list_head *page_list, unsigned num_pages) -{ - int rc; - int err = 0; - struct list_head tmplist; - struct cifsFileInfo *open_file = file->private_data; - struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file); + struct cifsFileInfo *open_file = ractl->file->private_data; + struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(ractl->file); struct TCP_Server_Info *server; pid_t pid; - unsigned int xid; + unsigned int xid, last_batch_size = 0; xid = get_xid(); - /* - * Reads as many pages as possible from fscache. Returns -ENOBUFS - * immediately if the cookie is negative - * - * After this point, every page in the list might have PG_fscache set, - * so we will need to clean that up off of every page we don't use. - */ - rc = cifs_readpages_from_fscache(mapping->host, mapping, page_list, - &num_pages); - if (rc == 0) { - free_xid(xid); - return rc; - } if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) pid = open_file->pid; @@ -4489,39 +4409,32 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, server = cifs_pick_channel(tlink_tcon(open_file->tlink)->ses); cifs_dbg(FYI, "%s: file=%p mapping=%p num_pages=%u\n", - __func__, file, mapping, num_pages); + __func__, ractl->file, ractl->mapping, readahead_count(ractl)); /* - * Start with the page at end of list and move it to private - * list. Do the same with any following pages until we hit - * the rsize limit, hit an index discontinuity, or run out of - * pages. Issue the async read and then start the loop again - * until the list is empty. - * - * Note that list order is important. The page_list is in - * the order of declining indexes. When we put the pages in - * the rdata->pages, then we want them in increasing order. + * Chop the readahead request up into rsize-sized read requests. */ - while (!list_empty(page_list) && !err) { - unsigned int i, nr_pages, bytes, rsize; - loff_t offset; - struct page *page, *tpage; + while (readahead_count(ractl) - last_batch_size) { + unsigned int i, nr_pages, got, rsize; + struct page *page; struct cifs_readdata *rdata; struct cifs_credits credits_on_stack; struct cifs_credits *credits = &credits_on_stack; if (open_file->invalidHandle) { rc = cifs_reopen_file(open_file, true); - if (rc == -EAGAIN) - continue; - else if (rc) + if (rc) { + if (rc == -EAGAIN) + continue; break; + } } rc = server->ops->wait_mtu_credits(server, cifs_sb->ctx->rsize, &rsize, credits); if (rc) break; + nr_pages = min_t(size_t, rsize / PAGE_SIZE, readahead_count(ractl)); /* * Give up immediately if rsize is too small to read an entire @@ -4529,16 +4442,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, * reach this point however since we set ra_pages to 0 when the * rsize is smaller than a cache page. */ - if (unlikely(rsize < PAGE_SIZE)) { - add_credits_and_wake_if(server, credits, 0); - free_xid(xid); - return 0; - } - - nr_pages = 0; - err = readpages_get_pages(mapping, page_list, rsize, &tmplist, - &nr_pages, &offset, &bytes); - if (!nr_pages) { + if (unlikely(!nr_pages)) { add_credits_and_wake_if(server, credits, 0); break; } @@ -4546,36 +4450,31 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete); if (!rdata) { /* best to give up if we're out of mem */ - list_for_each_entry_safe(page, tpage, &tmplist, lru) { - list_del(&page->lru); - lru_cache_add(page); - unlock_page(page); - put_page(page); - } - rc = -ENOMEM; add_credits_and_wake_if(server, credits, 0); break; } - rdata->cfile = cifsFileInfo_get(open_file); - rdata->server = server; - rdata->mapping = mapping; - rdata->offset = offset; - rdata->bytes = bytes; - rdata->pid = pid; - rdata->pagesz = PAGE_SIZE; - rdata->tailsz = PAGE_SIZE; + got = __readahead_batch(ractl, rdata->pages, nr_pages); + if (got != nr_pages) { + pr_warn("__readahead_batch() returned %u/%u\n", + got, nr_pages); + nr_pages = got; + } + + rdata->nr_pages = nr_pages; + rdata->bytes = readahead_batch_length(ractl); + rdata->cfile = cifsFileInfo_get(open_file); + rdata->server = server; + rdata->mapping = ractl->mapping; + rdata->offset = readahead_pos(ractl); + rdata->pid = pid; + rdata->pagesz = PAGE_SIZE; + rdata->tailsz = PAGE_SIZE; rdata->read_into_pages = cifs_readpages_read_into_pages; rdata->copy_into_pages = cifs_readpages_copy_into_pages; - rdata->credits = credits_on_stack; - - list_for_each_entry_safe(page, tpage, &tmplist, lru) { - list_del(&page->lru); - rdata->pages[rdata->nr_pages++] = page; - } + rdata->credits = credits_on_stack; rc = adjust_credits(server, &rdata->credits, rdata->bytes); - if (!rc) { if (rdata->cfile->invalidHandle) rc = -EAGAIN; @@ -4587,7 +4486,6 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, add_credits_and_wake_if(server, &rdata->credits, 0); for (i = 0; i < rdata->nr_pages; i++) { page = rdata->pages[i]; - lru_cache_add(page); unlock_page(page); put_page(page); } @@ -4597,10 +4495,10 @@ static int cifs_readpages(struct file *file, struct address_space *mapping, } kref_put(&rdata->refcount, cifs_readdata_release); + last_batch_size = nr_pages; } free_xid(xid); - return rc; } /* @@ -4924,7 +4822,7 @@ oplock_break_done: * In the non-cached mode (mount with cache=none), we shunt off direct read and write requests * so this method should never be called. * - * Direct IO is not yet supported in the cached mode. + * Direct IO is not yet supported in the cached mode. */ static ssize_t cifs_direct_io(struct kiocb *iocb, struct iov_iter *iter) @@ -5006,7 +4904,7 @@ static int cifs_set_page_dirty(struct page *page) const struct address_space_operations cifs_addr_ops = { .readpage = cifs_readpage, - .readpages = cifs_readpages, + .readahead = cifs_readahead, .writepage = cifs_writepage, .writepages = cifs_writepages, .write_begin = cifs_write_begin, -- cgit From bee9f65523218e3baeeecde9295c8fbe9bc08e0a Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 27 Jan 2022 16:02:50 +0000 Subject: netfs, cachefiles: Add a method to query presence of data in the cache Add a netfs_cache_ops method by which a network filesystem can ask the cache about what data it has available and where so that it can make a multipage read more efficient. Signed-off-by: David Howells cc: linux-cachefs@redhat.com Acked-by: Jeff Layton Reviewed-by: Rohith Surabattula Signed-off-by: Steve French --- Documentation/filesystems/netfs_library.rst | 16 ++++++++ fs/cachefiles/io.c | 59 +++++++++++++++++++++++++++++ include/linux/netfs.h | 7 ++++ 3 files changed, 82 insertions(+) diff --git a/Documentation/filesystems/netfs_library.rst b/Documentation/filesystems/netfs_library.rst index 136f8da3d0e2..4f373a8ec47b 100644 --- a/Documentation/filesystems/netfs_library.rst +++ b/Documentation/filesystems/netfs_library.rst @@ -462,6 +462,10 @@ operation table looks like the following:: struct iov_iter *iter, netfs_io_terminated_t term_func, void *term_func_priv); + + int (*query_occupancy)(struct netfs_cache_resources *cres, + loff_t start, size_t len, size_t granularity, + loff_t *_data_start, size_t *_data_len); }; With a termination handler function pointer:: @@ -536,6 +540,18 @@ The methods defined in the table are: indicating whether the termination is definitely happening in the caller's context. + * ``query_occupancy()`` + + [Required] Called to find out where the next piece of data is within a + particular region of the cache. The start and length of the region to be + queried are passed in, along with the granularity to which the answer needs + to be aligned. The function passes back the start and length of the data, + if any, available within that region. Note that there may be a hole at the + front. + + It returns 0 if some data was found, -ENODATA if there was no usable data + within the region or -ENOBUFS if there is no caching on this file. + Note that these methods are passed a pointer to the cache resource structure, not the read request structure as they could be used in other situations where there isn't a read request structure as well, such as writing dirty data to the diff --git a/fs/cachefiles/io.c b/fs/cachefiles/io.c index 04eb52736990..753986ea1583 100644 --- a/fs/cachefiles/io.c +++ b/fs/cachefiles/io.c @@ -191,6 +191,64 @@ presubmission_error: return ret; } +/* + * Query the occupancy of the cache in a region, returning where the next chunk + * of data starts and how long it is. + */ +static int cachefiles_query_occupancy(struct netfs_cache_resources *cres, + loff_t start, size_t len, size_t granularity, + loff_t *_data_start, size_t *_data_len) +{ + struct cachefiles_object *object; + struct file *file; + loff_t off, off2; + + *_data_start = -1; + *_data_len = 0; + + if (!fscache_wait_for_operation(cres, FSCACHE_WANT_READ)) + return -ENOBUFS; + + object = cachefiles_cres_object(cres); + file = cachefiles_cres_file(cres); + granularity = max_t(size_t, object->volume->cache->bsize, granularity); + + _enter("%pD,%li,%llx,%zx/%llx", + file, file_inode(file)->i_ino, start, len, + i_size_read(file_inode(file))); + + off = cachefiles_inject_read_error(); + if (off == 0) + off = vfs_llseek(file, start, SEEK_DATA); + if (off == -ENXIO) + return -ENODATA; /* Beyond EOF */ + if (off < 0 && off >= (loff_t)-MAX_ERRNO) + return -ENOBUFS; /* Error. */ + if (round_up(off, granularity) >= start + len) + return -ENODATA; /* No data in range */ + + off2 = cachefiles_inject_read_error(); + if (off2 == 0) + off2 = vfs_llseek(file, off, SEEK_HOLE); + if (off2 == -ENXIO) + return -ENODATA; /* Beyond EOF */ + if (off2 < 0 && off2 >= (loff_t)-MAX_ERRNO) + return -ENOBUFS; /* Error. */ + + /* Round away partial blocks */ + off = round_up(off, granularity); + off2 = round_down(off2, granularity); + if (off2 <= off) + return -ENODATA; + + *_data_start = off; + if (off2 > start + len) + *_data_len = len; + else + *_data_len = off2 - off; + return 0; +} + /* * Handle completion of a write to the cache. */ @@ -545,6 +603,7 @@ static const struct netfs_cache_ops cachefiles_netfs_cache_ops = { .write = cachefiles_write, .prepare_read = cachefiles_prepare_read, .prepare_write = cachefiles_prepare_write, + .query_occupancy = cachefiles_query_occupancy, }; /* diff --git a/include/linux/netfs.h b/include/linux/netfs.h index b46c39d98bbd..614f22213e21 100644 --- a/include/linux/netfs.h +++ b/include/linux/netfs.h @@ -244,6 +244,13 @@ struct netfs_cache_ops { int (*prepare_write)(struct netfs_cache_resources *cres, loff_t *_start, size_t *_len, loff_t i_size, bool no_space_allocated_yet); + + /* Query the occupancy of the cache in a region, returning where the + * next chunk of data starts and how long it is. + */ + int (*query_occupancy)(struct netfs_cache_resources *cres, + loff_t start, size_t len, size_t granularity, + loff_t *_data_start, size_t *_data_len); }; struct readahead_control; -- cgit From 0174ee9947bd0f24fee2794b35258960d108b7aa Mon Sep 17 00:00:00 2001 From: David Howells Date: Thu, 27 Jan 2022 16:02:58 +0000 Subject: cifs: Implement cache I/O by accessing the cache directly Move cifs to using fscache DIO API instead of the old upstream I/O API as that has been removed. This is a stopgap solution as the intention is that at sometime in the future, the cache will move to using larger blocks and won't be able to store individual pages in order to deal with the potential for data corruption due to the backing filesystem being able insert/remove bridging blocks of zeros into its extent list[1]. cifs then reads and writes cache pages synchronously and one page at a time. The preferred change would be to use the netfs lib, but the new I/O API can be used directly. It's just that as the cache now needs to track data for itself, caching blocks may exceed page size... This code is somewhat borrowed from my "fallback I/O" patchset[2]. Signed-off-by: David Howells cc: Steve French cc: Shyam Prasad N cc: linux-cifs@vger.kernel.org cc: linux-cachefs@redhat.com Link: https://lore.kernel.org/r/YO17ZNOcq+9PajfQ@mit.edu [1] Link: https://lore.kernel.org/r/202112100957.2oEDT20W-lkp@intel.com/ [2] Acked-by: Jeff Layton Signed-off-by: Steve French --- fs/cifs/file.c | 55 +++++++++++++++++++++--- fs/cifs/fscache.c | 126 ++++++++++++++++++++++++++++++++++++++++++++++-------- fs/cifs/fscache.h | 79 +++++++++++++++++++++------------- 3 files changed, 207 insertions(+), 53 deletions(-) diff --git a/fs/cifs/file.c b/fs/cifs/file.c index be62dc29dc54..a50912674915 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -4276,12 +4276,12 @@ cifs_readv_complete(struct work_struct *work) } else SetPageError(page); - unlock_page(page); - if (rdata->result == 0 || (rdata->result == -EAGAIN && got_bytes)) cifs_readpage_to_fscache(rdata->mapping->host, page); + unlock_page(page); + got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes); put_page(page); @@ -4396,7 +4396,11 @@ static void cifs_readahead(struct readahead_control *ractl) struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(ractl->file); struct TCP_Server_Info *server; pid_t pid; - unsigned int xid, last_batch_size = 0; + unsigned int xid, nr_pages, last_batch_size = 0, cache_nr_pages = 0; + pgoff_t next_cached = ULONG_MAX; + bool caching = fscache_cookie_enabled(cifs_inode_cookie(ractl->mapping->host)) && + cifs_inode_cookie(ractl->mapping->host)->cache_priv; + bool check_cache = caching; xid = get_xid(); @@ -4414,12 +4418,52 @@ static void cifs_readahead(struct readahead_control *ractl) /* * Chop the readahead request up into rsize-sized read requests. */ - while (readahead_count(ractl) - last_batch_size) { - unsigned int i, nr_pages, got, rsize; + while ((nr_pages = readahead_count(ractl) - last_batch_size)) { + unsigned int i, got, rsize; struct page *page; struct cifs_readdata *rdata; struct cifs_credits credits_on_stack; struct cifs_credits *credits = &credits_on_stack; + pgoff_t index = readahead_index(ractl) + last_batch_size; + + /* + * Find out if we have anything cached in the range of + * interest, and if so, where the next chunk of cached data is. + */ + if (caching) { + if (check_cache) { + rc = cifs_fscache_query_occupancy( + ractl->mapping->host, index, nr_pages, + &next_cached, &cache_nr_pages); + if (rc < 0) + caching = false; + check_cache = false; + } + + if (index == next_cached) { + /* + * TODO: Send a whole batch of pages to be read + * by the cache. + */ + page = readahead_page(ractl); + + if (cifs_readpage_from_fscache(ractl->mapping->host, + page) < 0) { + /* + * TODO: Deal with cache read failure + * here, but for the moment, delegate + * that to readpage. + */ + caching = false; + } + unlock_page(page); + next_cached++; + cache_nr_pages--; + if (cache_nr_pages == 0) + check_cache = true; + continue; + } + } if (open_file->invalidHandle) { rc = cifs_reopen_file(open_file, true); @@ -4435,6 +4479,7 @@ static void cifs_readahead(struct readahead_control *ractl) if (rc) break; nr_pages = min_t(size_t, rsize / PAGE_SIZE, readahead_count(ractl)); + nr_pages = min_t(size_t, nr_pages, next_cached - index); /* * Give up immediately if rsize is too small to read an entire diff --git a/fs/cifs/fscache.c b/fs/cifs/fscache.c index efaac4d5ff55..33af72e0ac0c 100644 --- a/fs/cifs/fscache.c +++ b/fs/cifs/fscache.c @@ -134,37 +134,127 @@ void cifs_fscache_release_inode_cookie(struct inode *inode) } } +static inline void fscache_end_operation(struct netfs_cache_resources *cres) +{ + const struct netfs_cache_ops *ops = fscache_operation_valid(cres); + + if (ops) + ops->end_operation(cres); +} + /* - * Retrieve a page from FS-Cache + * Fallback page reading interface. */ -int __cifs_readpage_from_fscache(struct inode *inode, struct page *page) +static int fscache_fallback_read_page(struct inode *inode, struct page *page) { - cifs_dbg(FYI, "%s: (fsc:%p, p:%p, i:0x%p\n", - __func__, CIFS_I(inode)->fscache, page, inode); - return -ENOBUFS; // Needs conversion to using netfslib + struct netfs_cache_resources cres; + struct fscache_cookie *cookie = cifs_inode_cookie(inode); + struct iov_iter iter; + struct bio_vec bvec[1]; + int ret; + + memset(&cres, 0, sizeof(cres)); + bvec[0].bv_page = page; + bvec[0].bv_offset = 0; + bvec[0].bv_len = PAGE_SIZE; + iov_iter_bvec(&iter, READ, bvec, ARRAY_SIZE(bvec), PAGE_SIZE); + + ret = fscache_begin_read_operation(&cres, cookie); + if (ret < 0) + return ret; + + ret = fscache_read(&cres, page_offset(page), &iter, NETFS_READ_HOLE_FAIL, + NULL, NULL); + fscache_end_operation(&cres); + return ret; } /* - * Retrieve a set of pages from FS-Cache + * Fallback page writing interface. */ -int __cifs_readpages_from_fscache(struct inode *inode, - struct address_space *mapping, - struct list_head *pages, - unsigned *nr_pages) +static int fscache_fallback_write_page(struct inode *inode, struct page *page, + bool no_space_allocated_yet) { - cifs_dbg(FYI, "%s: (0x%p/%u/0x%p)\n", - __func__, CIFS_I(inode)->fscache, *nr_pages, inode); - return -ENOBUFS; // Needs conversion to using netfslib + struct netfs_cache_resources cres; + struct fscache_cookie *cookie = cifs_inode_cookie(inode); + struct iov_iter iter; + struct bio_vec bvec[1]; + loff_t start = page_offset(page); + size_t len = PAGE_SIZE; + int ret; + + memset(&cres, 0, sizeof(cres)); + bvec[0].bv_page = page; + bvec[0].bv_offset = 0; + bvec[0].bv_len = PAGE_SIZE; + iov_iter_bvec(&iter, WRITE, bvec, ARRAY_SIZE(bvec), PAGE_SIZE); + + ret = fscache_begin_write_operation(&cres, cookie); + if (ret < 0) + return ret; + + ret = cres.ops->prepare_write(&cres, &start, &len, i_size_read(inode), + no_space_allocated_yet); + if (ret == 0) + ret = fscache_write(&cres, page_offset(page), &iter, NULL, NULL); + fscache_end_operation(&cres); + return ret; } -void __cifs_readpage_to_fscache(struct inode *inode, struct page *page) +/* + * Retrieve a page from FS-Cache + */ +int __cifs_readpage_from_fscache(struct inode *inode, struct page *page) { - struct cifsInodeInfo *cifsi = CIFS_I(inode); + int ret; - WARN_ON(!cifsi->fscache); + cifs_dbg(FYI, "%s: (fsc:%p, p:%p, i:0x%p\n", + __func__, cifs_inode_cookie(inode), page, inode); + ret = fscache_fallback_read_page(inode, page); + if (ret < 0) + return ret; + + /* Read completed synchronously */ + SetPageUptodate(page); + return 0; +} + +void __cifs_readpage_to_fscache(struct inode *inode, struct page *page) +{ cifs_dbg(FYI, "%s: (fsc: %p, p: %p, i: %p)\n", - __func__, cifsi->fscache, page, inode); + __func__, cifs_inode_cookie(inode), page, inode); + + fscache_fallback_write_page(inode, page, true); +} + +/* + * Query the cache occupancy. + */ +int __cifs_fscache_query_occupancy(struct inode *inode, + pgoff_t first, unsigned int nr_pages, + pgoff_t *_data_first, + unsigned int *_data_nr_pages) +{ + struct netfs_cache_resources cres; + struct fscache_cookie *cookie = cifs_inode_cookie(inode); + loff_t start, data_start; + size_t len, data_len; + int ret; - // Needs conversion to using netfslib + ret = fscache_begin_read_operation(&cres, cookie); + if (ret < 0) + return ret; + + start = first * PAGE_SIZE; + len = nr_pages * PAGE_SIZE; + ret = cres.ops->query_occupancy(&cres, start, len, PAGE_SIZE, + &data_start, &data_len); + if (ret == 0) { + *_data_first = data_start / PAGE_SIZE; + *_data_nr_pages = len / PAGE_SIZE; + } + + fscache_end_operation(&cres); + return ret; } diff --git a/fs/cifs/fscache.h b/fs/cifs/fscache.h index c6ca49ac33d4..55129908e2c1 100644 --- a/fs/cifs/fscache.h +++ b/fs/cifs/fscache.h @@ -9,6 +9,7 @@ #ifndef _CIFS_FSCACHE_H #define _CIFS_FSCACHE_H +#include #include #include "cifsglob.h" @@ -58,14 +59,6 @@ void cifs_fscache_fill_coherency(struct inode *inode, } -extern int cifs_fscache_release_page(struct page *page, gfp_t gfp); -extern int __cifs_readpage_from_fscache(struct inode *, struct page *); -extern int __cifs_readpages_from_fscache(struct inode *, - struct address_space *, - struct list_head *, - unsigned *); -extern void __cifs_readpage_to_fscache(struct inode *, struct page *); - static inline struct fscache_cookie *cifs_inode_cookie(struct inode *inode) { return CIFS_I(inode)->fscache; @@ -80,33 +73,52 @@ static inline void cifs_invalidate_cache(struct inode *inode, unsigned int flags i_size_read(inode), flags); } -static inline int cifs_readpage_from_fscache(struct inode *inode, - struct page *page) -{ - if (CIFS_I(inode)->fscache) - return __cifs_readpage_from_fscache(inode, page); +extern int __cifs_fscache_query_occupancy(struct inode *inode, + pgoff_t first, unsigned int nr_pages, + pgoff_t *_data_first, + unsigned int *_data_nr_pages); - return -ENOBUFS; +static inline int cifs_fscache_query_occupancy(struct inode *inode, + pgoff_t first, unsigned int nr_pages, + pgoff_t *_data_first, + unsigned int *_data_nr_pages) +{ + if (!cifs_inode_cookie(inode)) + return -ENOBUFS; + return __cifs_fscache_query_occupancy(inode, first, nr_pages, + _data_first, _data_nr_pages); } -static inline int cifs_readpages_from_fscache(struct inode *inode, - struct address_space *mapping, - struct list_head *pages, - unsigned *nr_pages) +extern int __cifs_readpage_from_fscache(struct inode *pinode, struct page *ppage); +extern void __cifs_readpage_to_fscache(struct inode *pinode, struct page *ppage); + + +static inline int cifs_readpage_from_fscache(struct inode *inode, + struct page *page) { - if (CIFS_I(inode)->fscache) - return __cifs_readpages_from_fscache(inode, mapping, pages, - nr_pages); + if (cifs_inode_cookie(inode)) + return __cifs_readpage_from_fscache(inode, page); return -ENOBUFS; } static inline void cifs_readpage_to_fscache(struct inode *inode, struct page *page) { - if (PageFsCache(page)) + if (cifs_inode_cookie(inode)) __cifs_readpage_to_fscache(inode, page); } +static inline int cifs_fscache_release_page(struct page *page, gfp_t gfp) +{ + if (PageFsCache(page)) { + if (current_is_kswapd() || !(gfp & __GFP_FS)) + return false; + wait_on_page_fscache(page); + fscache_note_page_release(cifs_inode_cookie(page->mapping->host)); + } + return true; +} + #else /* CONFIG_CIFS_FSCACHE */ static inline void cifs_fscache_fill_coherency(struct inode *inode, @@ -123,22 +135,29 @@ static inline void cifs_fscache_unuse_inode_cookie(struct inode *inode, bool upd static inline struct fscache_cookie *cifs_inode_cookie(struct inode *inode) { return NULL; } static inline void cifs_invalidate_cache(struct inode *inode, unsigned int flags) {} -static inline int -cifs_readpage_from_fscache(struct inode *inode, struct page *page) +static inline int cifs_fscache_query_occupancy(struct inode *inode, + pgoff_t first, unsigned int nr_pages, + pgoff_t *_data_first, + unsigned int *_data_nr_pages) { + *_data_first = ULONG_MAX; + *_data_nr_pages = 0; return -ENOBUFS; } -static inline int cifs_readpages_from_fscache(struct inode *inode, - struct address_space *mapping, - struct list_head *pages, - unsigned *nr_pages) +static inline int +cifs_readpage_from_fscache(struct inode *inode, struct page *page) { return -ENOBUFS; } -static inline void cifs_readpage_to_fscache(struct inode *inode, - struct page *page) {} +static inline +void cifs_readpage_to_fscache(struct inode *inode, struct page *page) {} + +static inline int nfs_fscache_release_page(struct page *page, gfp_t gfp) +{ + return true; /* May release page */ +} #endif /* CONFIG_CIFS_FSCACHE */ -- cgit From 363c4c3811db330dee9ce27dd3cee6f590d44e4c Mon Sep 17 00:00:00 2001 From: Douglas Anderson Date: Tue, 25 Jan 2022 13:54:09 -0800 Subject: drm/panel-edp: Allow querying the detected panel via sysfs Recently we added generic "edp-panel"s probed by EDID. To support panels in this way we look at the panel ID in the EDID and look up the panel in a table that has power sequence timings. If we find a panel that's not in the table we will still attempt to use it but we'll use conservative timings. While it's likely that these conservative timings will work for most nearly all panels, the performance of turning the panel off and on suffers. We'd like to be able to reliably detect the case that we're using the hardcoded timings without relying on parsing dmesg. This allows us to implement tests that ensure that no devices get shipped that are relying on the conservative timings. Let's add a new sysfs entry to panel devices. It will have one of: * UNKNOWN - We tried to detect a panel but it wasn't in our table. * HARDCODED - We're not using generic "edp-panel" probed by EDID. * A panel name - This is the name of the panel from our table. Signed-off-by: Douglas Anderson Reviewed-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20220125135406.1.I62322abf81dbc1a1b72392a093be0c767da9bf51@changeid --- drivers/gpu/drm/panel/panel-edp.c | 39 ++++++++++++++++++++++++++++++++++----- 1 file changed, 34 insertions(+), 5 deletions(-) diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c index a394a15dc3fb..23da4040e263 100644 --- a/drivers/gpu/drm/panel/panel-edp.c +++ b/drivers/gpu/drm/panel/panel-edp.c @@ -222,6 +222,8 @@ struct panel_edp { struct gpio_desc *enable_gpio; struct gpio_desc *hpd_gpio; + const struct edp_panel_entry *detected_panel; + struct edid *edid; struct drm_display_mode override_mode; @@ -666,7 +668,6 @@ static const struct edp_panel_entry *find_edp_panel(u32 panel_id); static int generic_edp_panel_probe(struct device *dev, struct panel_edp *panel) { - const struct edp_panel_entry *edp_panel; struct panel_desc *desc; u32 panel_id; char vend[4]; @@ -705,14 +706,14 @@ static int generic_edp_panel_probe(struct device *dev, struct panel_edp *panel) } drm_edid_decode_panel_id(panel_id, vend, &product_id); - edp_panel = find_edp_panel(panel_id); + panel->detected_panel = find_edp_panel(panel_id); /* * We're using non-optimized timings and want it really obvious that * someone needs to add an entry to the table, so we'll do a WARN_ON * splat. */ - if (WARN_ON(!edp_panel)) { + if (WARN_ON(!panel->detected_panel)) { dev_warn(dev, "Unknown panel %s %#06x, using conservative timings\n", vend, product_id); @@ -734,12 +735,14 @@ static int generic_edp_panel_probe(struct device *dev, struct panel_edp *panel) */ desc->delay.unprepare = 2000; desc->delay.enable = 200; + + panel->detected_panel = ERR_PTR(-EINVAL); } else { dev_info(dev, "Detected %s %s (%#06x)\n", - vend, edp_panel->name, product_id); + vend, panel->detected_panel->name, product_id); /* Update the delay; everything else comes from EDID */ - desc->delay = *edp_panel->delay; + desc->delay = *panel->detected_panel->delay; } ret = 0; @@ -750,6 +753,28 @@ exit: return ret; } +static ssize_t detected_panel_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct panel_edp *p = dev_get_drvdata(dev); + + if (IS_ERR(p->detected_panel)) + return sysfs_emit(buf, "UNKNOWN\n"); + else if (!p->detected_panel) + return sysfs_emit(buf, "HARDCODED\n"); + else + return sysfs_emit(buf, "%s\n", p->detected_panel->name); +} + +static const DEVICE_ATTR_RO(detected_panel); + +static void edp_panel_remove_detected_panel(void *data) +{ + struct panel_edp *p = data; + + device_remove_file(p->base.dev, &dev_attr_detected_panel); +} + static int panel_edp_probe(struct device *dev, const struct panel_desc *desc, struct drm_dp_aux *aux) { @@ -849,6 +874,10 @@ static int panel_edp_probe(struct device *dev, const struct panel_desc *desc, drm_panel_add(&panel->base); + err = device_create_file(dev, &dev_attr_detected_panel); + if (!err) + devm_add_action_or_reset(dev, edp_panel_remove_detected_panel, panel); + return 0; err_finished_pm_runtime: -- cgit From 46f5cbdef7d4fbb0f857a3caddec6799d0b5bb2f Mon Sep 17 00:00:00 2001 From: David Howells Date: Mon, 31 Jan 2022 17:54:43 +0000 Subject: cifs: Fix the readahead conversion to manage the batch when reading from cache Fix the readahead conversion to correctly manage the last batch skipping when reading from cache. This involves a readahead batch of one page or one folio, so set the batch size according to the number of constituent pages (should be 1 for a filesystem that doesn't do multipage folios yet). Signed-off-by: David Howells cc: Steve French Reviewed-by: Rohith Surabattula Reviewed-by: Shyam Prasad N cc: Jeff Layton cc: linux-cifs@vger.kernel.org Signed-off-by: Steve French --- fs/cifs/file.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fs/cifs/file.c b/fs/cifs/file.c index a50912674915..e7af802dcfa6 100644 --- a/fs/cifs/file.c +++ b/fs/cifs/file.c @@ -4446,7 +4446,7 @@ static void cifs_readahead(struct readahead_control *ractl) * by the cache. */ page = readahead_page(ractl); - + last_batch_size = 1 << thp_order(page); if (cifs_readpage_from_fscache(ractl->mapping->host, page) < 0) { /* -- cgit From 68defd528f94ed1cf11f49a75cc1875dccd781fa Mon Sep 17 00:00:00 2001 From: Sasha Neftin Date: Tue, 7 Dec 2021 13:23:06 +0200 Subject: e1000e: Separate ADP board type from TGP We have the same LAN controller on different PCH's. Separate ADP board type from a TGP which will allow for specific fixes to be applied for ADP platforms. Suggested-by: Kai-Heng Feng Suggested-by: Dima Ruinskiy Signed-off-by: Sasha Neftin Tested-by: Nechama Kraus Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/e1000e/e1000.h | 4 +++- drivers/net/ethernet/intel/e1000e/ich8lan.c | 20 +++++++++++++++++ drivers/net/ethernet/intel/e1000e/netdev.c | 33 +++++++++++++++-------------- 3 files changed, 40 insertions(+), 17 deletions(-) diff --git a/drivers/net/ethernet/intel/e1000e/e1000.h b/drivers/net/ethernet/intel/e1000e/e1000.h index c3def0ee7788..8d06c9d8ff8b 100644 --- a/drivers/net/ethernet/intel/e1000e/e1000.h +++ b/drivers/net/ethernet/intel/e1000e/e1000.h @@ -115,7 +115,8 @@ enum e1000_boards { board_pch_lpt, board_pch_spt, board_pch_cnp, - board_pch_tgp + board_pch_tgp, + board_pch_adp }; struct e1000_ps_page { @@ -502,6 +503,7 @@ extern const struct e1000_info e1000_pch_lpt_info; extern const struct e1000_info e1000_pch_spt_info; extern const struct e1000_info e1000_pch_cnp_info; extern const struct e1000_info e1000_pch_tgp_info; +extern const struct e1000_info e1000_pch_adp_info; extern const struct e1000_info e1000_es2_info; void e1000e_ptp_init(struct e1000_adapter *adapter); diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c index 5e4fc9b4e2ad..c908c84b86d2 100644 --- a/drivers/net/ethernet/intel/e1000e/ich8lan.c +++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c @@ -6021,3 +6021,23 @@ const struct e1000_info e1000_pch_tgp_info = { .phy_ops = &ich8_phy_ops, .nvm_ops = &spt_nvm_ops, }; + +const struct e1000_info e1000_pch_adp_info = { + .mac = e1000_pch_adp, + .flags = FLAG_IS_ICH + | FLAG_HAS_WOL + | FLAG_HAS_HW_TIMESTAMP + | FLAG_HAS_CTRLEXT_ON_LOAD + | FLAG_HAS_AMT + | FLAG_HAS_FLASH + | FLAG_HAS_JUMBO_FRAMES + | FLAG_APME_IN_WUC, + .flags2 = FLAG2_HAS_PHY_STATS + | FLAG2_HAS_EEE, + .pba = 26, + .max_hw_frame_size = 9022, + .get_variants = e1000_get_variants_ich8lan, + .mac_ops = &ich8_mac_ops, + .phy_ops = &ich8_phy_ops, + .nvm_ops = &spt_nvm_ops, +}; diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index 635a95927e93..d2de8bc4c3b7 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -52,6 +52,7 @@ static const struct e1000_info *e1000_info_tbl[] = { [board_pch_spt] = &e1000_pch_spt_info, [board_pch_cnp] = &e1000_pch_cnp_info, [board_pch_tgp] = &e1000_pch_tgp_info, + [board_pch_adp] = &e1000_pch_adp_info, }; struct e1000_reg_info { @@ -7898,22 +7899,22 @@ static const struct pci_device_id e1000_pci_tbl[] = { { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V14), board_pch_tgp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_LM15), board_pch_tgp }, { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_TGP_I219_V15), board_pch_tgp }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM23), board_pch_tgp }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V23), board_pch_tgp }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_tgp }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_tgp }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_tgp }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_tgp }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_tgp }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_tgp }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_tgp }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_tgp }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_tgp }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_tgp }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_tgp }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_tgp }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_tgp }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V21), board_pch_tgp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM23), board_pch_adp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V23), board_pch_adp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM16), board_pch_adp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V16), board_pch_adp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_LM17), board_pch_adp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_ADP_I219_V17), board_pch_adp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_LM22), board_pch_adp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_RPL_I219_V22), board_pch_adp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM18), board_pch_adp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V18), board_pch_adp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_LM19), board_pch_adp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_MTP_I219_V19), board_pch_adp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM20), board_pch_adp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V20), board_pch_adp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_LM21), board_pch_adp }, + { PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LNP_I219_V21), board_pch_adp }, { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ }; -- cgit From cad014b7b5a6897d8c4fad13e2888978bfb7a53f Mon Sep 17 00:00:00 2001 From: Sasha Neftin Date: Tue, 7 Dec 2021 13:23:42 +0200 Subject: e1000e: Handshake with CSME starts from ADL platforms Handshake with CSME/AMT on none provisioned platforms during S0ix flow is not supported on TGL platform and can cause to HW unit hang. Update the handshake with CSME flow to start from the ADL platform. Fixes: 3e55d231716e ("e1000e: Add handshake with the CSME to support S0ix") Signed-off-by: Sasha Neftin Tested-by: Nechama Kraus Signed-off-by: Tony Nguyen --- drivers/net/ethernet/intel/e1000e/netdev.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c index d2de8bc4c3b7..a42aeb555f34 100644 --- a/drivers/net/ethernet/intel/e1000e/netdev.c +++ b/drivers/net/ethernet/intel/e1000e/netdev.c @@ -6342,7 +6342,8 @@ static void e1000e_s0ix_entry_flow(struct e1000_adapter *adapter) u32 mac_data; u16 phy_data; - if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) { + if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID && + hw->mac.type >= e1000_pch_adp) { /* Request ME configure the device for S0ix */ mac_data = er32(H2ME); mac_data |= E1000_H2ME_START_DPG; @@ -6491,7 +6492,8 @@ static void e1000e_s0ix_exit_flow(struct e1000_adapter *adapter) u16 phy_data; u32 i = 0; - if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID) { + if (er32(FWSM) & E1000_ICH_FWSM_FW_VALID && + hw->mac.type >= e1000_pch_adp) { /* Request ME unconfigure the device from S0ix */ mac_data = er32(H2ME); mac_data &= ~E1000_H2ME_START_DPG; -- cgit From 053ca37c87af65f41f5842070c68aa53c3d035f5 Mon Sep 17 00:00:00 2001 From: Bjorn Helgaas Date: Thu, 27 Jan 2022 15:49:49 -0600 Subject: PCI: j721e: Initialize pcie->cdns_pcie before using it Christian reported a NULL pointer dereference in j721e_pcie_probe() caused by 19e863828acf ("PCI: j721e: Drop redundant struct device *"), which removed struct j721e_pcie.dev since there's another copy in struct cdns_pcie.dev reachable via j721e_pcie->cdns_pcie->dev. The problem is that j721e_pcie->cdns_pcie was dereferenced before being initialized: j721e_pcie_probe pcie = devm_kzalloc() # struct j721e_pcie j721e_pcie_ctrl_init(pcie) dev = pcie->cdns_pcie->dev <-- dereference cdns_pcie switch (mode) { case PCI_MODE_RC: cdns_pcie = ... # alloc as part of pci_host_bridge pcie->cdns_pcie = cdns_pcie <-- initialize pcie->cdns_pcie Move the cdns_pcie initialization earlier so it is done before it is used. This also simplifies the error exits. Fixes: 19e863828acf ("PCI: j721e: Drop redundant struct device *") Link: https://lore.kernel.org/r/20220127222951.GA144828@bhelgaas Link: https://lore.kernel.org/r/20220124122132.435743-1-christian.gmeiner@gmail.com Reported-by: Christian Gmeiner Tested-by: Christian Gmeiner Signed-off-by: Bjorn Helgaas --- drivers/pci/controller/cadence/pci-j721e.c | 85 +++++++++++++++--------------- 1 file changed, 42 insertions(+), 43 deletions(-) diff --git a/drivers/pci/controller/cadence/pci-j721e.c b/drivers/pci/controller/cadence/pci-j721e.c index 489586a4cdc7..768d33f9ebc8 100644 --- a/drivers/pci/controller/cadence/pci-j721e.c +++ b/drivers/pci/controller/cadence/pci-j721e.c @@ -356,8 +356,8 @@ static int j721e_pcie_probe(struct platform_device *pdev) const struct j721e_pcie_data *data; struct cdns_pcie *cdns_pcie; struct j721e_pcie *pcie; - struct cdns_pcie_rc *rc; - struct cdns_pcie_ep *ep; + struct cdns_pcie_rc *rc = NULL; + struct cdns_pcie_ep *ep = NULL; struct gpio_desc *gpiod; void __iomem *base; struct clk *clk; @@ -376,6 +376,46 @@ static int j721e_pcie_probe(struct platform_device *pdev) if (!pcie) return -ENOMEM; + switch (mode) { + case PCI_MODE_RC: + if (!IS_ENABLED(CONFIG_PCIE_CADENCE_HOST)) + return -ENODEV; + + bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc)); + if (!bridge) + return -ENOMEM; + + if (!data->byte_access_allowed) + bridge->ops = &cdns_ti_pcie_host_ops; + rc = pci_host_bridge_priv(bridge); + rc->quirk_retrain_flag = data->quirk_retrain_flag; + rc->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag; + + cdns_pcie = &rc->pcie; + cdns_pcie->dev = dev; + cdns_pcie->ops = &j721e_pcie_ops; + pcie->cdns_pcie = cdns_pcie; + break; + case PCI_MODE_EP: + if (!IS_ENABLED(CONFIG_PCIE_CADENCE_EP)) + return -ENODEV; + + ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL); + if (!ep) + return -ENOMEM; + + ep->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag; + + cdns_pcie = &ep->pcie; + cdns_pcie->dev = dev; + cdns_pcie->ops = &j721e_pcie_ops; + pcie->cdns_pcie = cdns_pcie; + break; + default: + dev_err(dev, "INVALID device type %d\n", mode); + return 0; + } + pcie->mode = mode; pcie->linkdown_irq_regfield = data->linkdown_irq_regfield; @@ -426,28 +466,6 @@ static int j721e_pcie_probe(struct platform_device *pdev) switch (mode) { case PCI_MODE_RC: - if (!IS_ENABLED(CONFIG_PCIE_CADENCE_HOST)) { - ret = -ENODEV; - goto err_get_sync; - } - - bridge = devm_pci_alloc_host_bridge(dev, sizeof(*rc)); - if (!bridge) { - ret = -ENOMEM; - goto err_get_sync; - } - - if (!data->byte_access_allowed) - bridge->ops = &cdns_ti_pcie_host_ops; - rc = pci_host_bridge_priv(bridge); - rc->quirk_retrain_flag = data->quirk_retrain_flag; - rc->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag; - - cdns_pcie = &rc->pcie; - cdns_pcie->dev = dev; - cdns_pcie->ops = &j721e_pcie_ops; - pcie->cdns_pcie = cdns_pcie; - gpiod = devm_gpiod_get_optional(dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(gpiod)) { ret = PTR_ERR(gpiod); @@ -497,23 +515,6 @@ static int j721e_pcie_probe(struct platform_device *pdev) break; case PCI_MODE_EP: - if (!IS_ENABLED(CONFIG_PCIE_CADENCE_EP)) { - ret = -ENODEV; - goto err_get_sync; - } - - ep = devm_kzalloc(dev, sizeof(*ep), GFP_KERNEL); - if (!ep) { - ret = -ENOMEM; - goto err_get_sync; - } - ep->quirk_detect_quiet_flag = data->quirk_detect_quiet_flag; - - cdns_pcie = &ep->pcie; - cdns_pcie->dev = dev; - cdns_pcie->ops = &j721e_pcie_ops; - pcie->cdns_pcie = cdns_pcie; - ret = cdns_pcie_init_phy(dev, cdns_pcie); if (ret) { dev_err(dev, "Failed to init phy\n"); @@ -525,8 +526,6 @@ static int j721e_pcie_probe(struct platform_device *pdev) goto err_pcie_setup; break; - default: - dev_err(dev, "INVALID device type %d\n", mode); } return 0; -- cgit From 24f6008564183aa120d07c03d9289519c2fe02af Mon Sep 17 00:00:00 2001 From: "Eric W. Biederman" Date: Thu, 20 Jan 2022 11:04:01 -0600 Subject: cgroup-v1: Require capabilities to set release_agent The cgroup release_agent is called with call_usermodehelper. The function call_usermodehelper starts the release_agent with a full set fo capabilities. Therefore require capabilities when setting the release_agaent. Reported-by: Tabitha Sable Tested-by: Tabitha Sable Fixes: 81a6a5cdd2c5 ("Task Control Groups: automatic userspace notification of idle cgroups") Cc: stable@vger.kernel.org # v2.6.24+ Signed-off-by: "Eric W. Biederman" Signed-off-by: Tejun Heo --- kernel/cgroup/cgroup-v1.c | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/kernel/cgroup/cgroup-v1.c b/kernel/cgroup/cgroup-v1.c index 41e0837a5a0b..0e877dbcfeea 100644 --- a/kernel/cgroup/cgroup-v1.c +++ b/kernel/cgroup/cgroup-v1.c @@ -549,6 +549,14 @@ static ssize_t cgroup_release_agent_write(struct kernfs_open_file *of, BUILD_BUG_ON(sizeof(cgrp->root->release_agent_path) < PATH_MAX); + /* + * Release agent gets called with all capabilities, + * require capabilities to set release agent. + */ + if ((of->file->f_cred->user_ns != &init_user_ns) || + !capable(CAP_SYS_ADMIN)) + return -EPERM; + cgrp = cgroup_kn_lock_live(of->kn, false); if (!cgrp) return -ENODEV; @@ -954,6 +962,12 @@ int cgroup1_parse_param(struct fs_context *fc, struct fs_parameter *param) /* Specifying two release agents is forbidden */ if (ctx->release_agent) return invalfc(fc, "release_agent respecified"); + /* + * Release agent gets called with all capabilities, + * require capabilities to set release agent. + */ + if ((fc->user_ns != &init_user_ns) || !capable(CAP_SYS_ADMIN)) + return invalfc(fc, "Setting release_agent not allowed"); ctx->release_agent = param->string; param->string = NULL; break; -- cgit From 2bf68bbdb6f5a445b26a0e8fe14af229ffcc7f9e Mon Sep 17 00:00:00 2001 From: Douglas Anderson Date: Tue, 1 Feb 2022 09:21:58 -0800 Subject: Revert "drm/panel-edp: Allow querying the detected panel via sysfs" This reverts commit 363c4c3811db330dee9ce27dd3cee6f590d44e4c. Since the point of this attribute is for a test, this should be done in debugfs, not sysfs. Let's revert and a new patch can be added later doing it in debugfs. Signed-off-by: Douglas Anderson Reviewed-by: Javier Martinez Canillas Link: https://patchwork.freedesktop.org/patch/msgid/20220201092152.1.Ibc65ec6fa05017e9856ba9ef557310268429c3ce@changeid --- drivers/gpu/drm/panel/panel-edp.c | 39 +++++---------------------------------- 1 file changed, 5 insertions(+), 34 deletions(-) diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c index 23da4040e263..a394a15dc3fb 100644 --- a/drivers/gpu/drm/panel/panel-edp.c +++ b/drivers/gpu/drm/panel/panel-edp.c @@ -222,8 +222,6 @@ struct panel_edp { struct gpio_desc *enable_gpio; struct gpio_desc *hpd_gpio; - const struct edp_panel_entry *detected_panel; - struct edid *edid; struct drm_display_mode override_mode; @@ -668,6 +666,7 @@ static const struct edp_panel_entry *find_edp_panel(u32 panel_id); static int generic_edp_panel_probe(struct device *dev, struct panel_edp *panel) { + const struct edp_panel_entry *edp_panel; struct panel_desc *desc; u32 panel_id; char vend[4]; @@ -706,14 +705,14 @@ static int generic_edp_panel_probe(struct device *dev, struct panel_edp *panel) } drm_edid_decode_panel_id(panel_id, vend, &product_id); - panel->detected_panel = find_edp_panel(panel_id); + edp_panel = find_edp_panel(panel_id); /* * We're using non-optimized timings and want it really obvious that * someone needs to add an entry to the table, so we'll do a WARN_ON * splat. */ - if (WARN_ON(!panel->detected_panel)) { + if (WARN_ON(!edp_panel)) { dev_warn(dev, "Unknown panel %s %#06x, using conservative timings\n", vend, product_id); @@ -735,14 +734,12 @@ static int generic_edp_panel_probe(struct device *dev, struct panel_edp *panel) */ desc->delay.unprepare = 2000; desc->delay.enable = 200; - - panel->detected_panel = ERR_PTR(-EINVAL); } else { dev_info(dev, "Detected %s %s (%#06x)\n", - vend, panel->detected_panel->name, product_id); + vend, edp_panel->name, product_id); /* Update the delay; everything else comes from EDID */ - desc->delay = *panel->detected_panel->delay; + desc->delay = *edp_panel->delay; } ret = 0; @@ -753,28 +750,6 @@ exit: return ret; } -static ssize_t detected_panel_show(struct device *dev, - struct device_attribute *attr, char *buf) -{ - struct panel_edp *p = dev_get_drvdata(dev); - - if (IS_ERR(p->detected_panel)) - return sysfs_emit(buf, "UNKNOWN\n"); - else if (!p->detected_panel) - return sysfs_emit(buf, "HARDCODED\n"); - else - return sysfs_emit(buf, "%s\n", p->detected_panel->name); -} - -static const DEVICE_ATTR_RO(detected_panel); - -static void edp_panel_remove_detected_panel(void *data) -{ - struct panel_edp *p = data; - - device_remove_file(p->base.dev, &dev_attr_detected_panel); -} - static int panel_edp_probe(struct device *dev, const struct panel_desc *desc, struct drm_dp_aux *aux) { @@ -874,10 +849,6 @@ static int panel_edp_probe(struct device *dev, const struct panel_desc *desc, drm_panel_add(&panel->base); - err = device_create_file(dev, &dev_attr_detected_panel); - if (!err) - devm_add_action_or_reset(dev, edp_panel_remove_detected_panel, panel); - return 0; err_finished_pm_runtime: -- cgit From 8cfe148a7136bc60452a5c6b7ac2d9d15c36909b Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 1 Feb 2022 13:29:23 +0000 Subject: kvm/arm64: rework guest entry logic In kvm_arch_vcpu_ioctl_run() we enter an RCU extended quiescent state (EQS) by calling guest_enter_irqoff(), and unmasked IRQs prior to exiting the EQS by calling guest_exit(). As the IRQ entry code will not wake RCU in this case, we may run the core IRQ code and IRQ handler without RCU watching, leading to various potential problems. Additionally, we do not inform lockdep or tracing that interrupts will be enabled during guest execution, which caan lead to misleading traces and warnings that interrupts have been enabled for overly-long periods. This patch fixes these issues by using the new timing and context entry/exit helpers to ensure that interrupts are handled during guest vtime but with RCU watching, with a sequence: guest_timing_enter_irqoff(); guest_state_enter_irqoff(); < run the vcpu > guest_state_exit_irqoff(); < take any pending IRQs > guest_timing_exit_irqoff(); Since instrumentation may make use of RCU, we must also ensure that no instrumented code is run during the EQS. I've split out the critical section into a new kvm_arm_enter_exit_vcpu() helper which is marked noinstr. Fixes: 1b3d546daf85ed2b ("arm/arm64: KVM: Properly account for guest CPU time") Reported-by: Nicolas Saenz Julienne Signed-off-by: Mark Rutland Reviewed-by: Marc Zyngier Reviewed-by: Nicolas Saenz Julienne Cc: Alexandru Elisei Cc: Catalin Marinas Cc: Frederic Weisbecker Cc: James Morse Cc: Paolo Bonzini Cc: Paul E. McKenney Cc: Suzuki K Poulose Cc: Will Deacon Message-Id: <20220201132926.3301912-3-mark.rutland@arm.com> Signed-off-by: Paolo Bonzini --- arch/arm64/kvm/arm.c | 51 +++++++++++++++++++++++++++++++++------------------ 1 file changed, 33 insertions(+), 18 deletions(-) diff --git a/arch/arm64/kvm/arm.c b/arch/arm64/kvm/arm.c index 868109cf96b4..a069d5925f77 100644 --- a/arch/arm64/kvm/arm.c +++ b/arch/arm64/kvm/arm.c @@ -790,6 +790,24 @@ static bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu, int *ret) xfer_to_guest_mode_work_pending(); } +/* + * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while + * the vCPU is running. + * + * This must be noinstr as instrumentation may make use of RCU, and this is not + * safe during the EQS. + */ +static int noinstr kvm_arm_vcpu_enter_exit(struct kvm_vcpu *vcpu) +{ + int ret; + + guest_state_enter_irqoff(); + ret = kvm_call_hyp_ret(__kvm_vcpu_run, vcpu); + guest_state_exit_irqoff(); + + return ret; +} + /** * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code * @vcpu: The VCPU pointer @@ -874,9 +892,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) * Enter the guest */ trace_kvm_entry(*vcpu_pc(vcpu)); - guest_enter_irqoff(); + guest_timing_enter_irqoff(); - ret = kvm_call_hyp_ret(__kvm_vcpu_run, vcpu); + ret = kvm_arm_vcpu_enter_exit(vcpu); vcpu->mode = OUTSIDE_GUEST_MODE; vcpu->stat.exits++; @@ -911,26 +929,23 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) kvm_arch_vcpu_ctxsync_fp(vcpu); /* - * We may have taken a host interrupt in HYP mode (ie - * while executing the guest). This interrupt is still - * pending, as we haven't serviced it yet! + * We must ensure that any pending interrupts are taken before + * we exit guest timing so that timer ticks are accounted as + * guest time. Transiently unmask interrupts so that any + * pending interrupts are taken. * - * We're now back in SVC mode, with interrupts - * disabled. Enabling the interrupts now will have - * the effect of taking the interrupt again, in SVC - * mode this time. + * Per ARM DDI 0487G.b section D1.13.4, an ISB (or other + * context synchronization event) is necessary to ensure that + * pending interrupts are taken. */ local_irq_enable(); + isb(); + local_irq_disable(); + + guest_timing_exit_irqoff(); + + local_irq_enable(); - /* - * We do local_irq_enable() before calling guest_exit() so - * that if a timer interrupt hits while running the guest we - * account that tick as being spent in the guest. We enable - * preemption after calling guest_exit() so that if we get - * preempted we make sure ticks after that is not counted as - * guest time. - */ - guest_exit(); trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu)); /* Exit types that need handling before we can be preempted */ -- cgit From a4b92cebc31d49b7e6ef0ce584c7f2a2e112877d Mon Sep 17 00:00:00 2001 From: Mark Brown Date: Tue, 1 Feb 2022 14:48:38 +0000 Subject: arm64: Enable Cortex-A510 erratum 2051678 by default The recently added configuration option for Cortex A510 erratum 2051678 does not have a "default y" unlike other errata fixes. This appears to simply be an oversight since the help text suggests enabling the option if unsure and there's nothing in the commit log to suggest it is intentional. Fixes: 297ae1eb23b0 ("arm64: cpufeature: List early Cortex-A510 parts as having broken dbm") Signed-off-by: Mark Brown Link: https://lore.kernel.org/r/20220201144838.20037-1-broonie@kernel.org Signed-off-by: Catalin Marinas --- arch/arm64/Kconfig | 1 + 1 file changed, 1 insertion(+) diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index f2b5a4abef21..c7a474f71eb4 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -672,6 +672,7 @@ config ARM64_WORKAROUND_TRBE_OVERWRITE_FILL_MODE config ARM64_ERRATUM_2051678 bool "Cortex-A510: 2051678: disable Hardware Update of the page table dirty bit" + default y help This options adds the workaround for ARM Cortex-A510 erratum ARM64_ERRATUM_2051678. Affected Coretex-A510 might not respect the ordering rules for -- cgit From 1a2beb3d5a0b4051067ecf49ea799bee340e0e7c Mon Sep 17 00:00:00 2001 From: Christian Brauner Date: Mon, 31 Jan 2022 15:48:54 +0100 Subject: mailmap: update Christian Brauner's email address At least one of the addresses will stop functioning after February. Signed-off-by: Christian Brauner Signed-off-by: Linus Torvalds --- .mailmap | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.mailmap b/.mailmap index b76e520809d0..8cd44b0c6579 100644 --- a/.mailmap +++ b/.mailmap @@ -80,6 +80,9 @@ Chris Chiu Christian Borntraeger Christian Borntraeger Christian Borntraeger +Christian Brauner +Christian Brauner +Christian Brauner Christophe Ricard Christoph Hellwig Colin Ian King -- cgit From b7892f7d5cb2b8187c603dd8ea3a7c44059ccfc2 Mon Sep 17 00:00:00 2001 From: Jean-Philippe Brucker Date: Tue, 1 Feb 2022 09:31:20 +0000 Subject: tools: Ignore errors from `which' when searching a GCC toolchain When cross-building tools with clang, we run `which $(CROSS_COMPILE)gcc` to detect whether a GCC toolchain provides the standard libraries. It is only a helper because some distros put libraries where LLVM does not automatically find them. On other systems, LLVM detects the libc automatically and does not need this. There, it is completely fine not to have a GCC at all, but some versions of `which' display an error when the command is not found: which: no aarch64-linux-gnu-gcc in ($PATH) Since the error can safely be ignored, throw it to /dev/null. Fixes: cebdb7374577 ("tools: Help cross-building with clang") Reported-by: Nathan Chancellor Signed-off-by: Jean-Philippe Brucker Signed-off-by: Daniel Borkmann Tested-by: Nathan Chancellor Reviewed-by: Nathan Chancellor Link: https://lore.kernel.org/bpf/20220201093119.1713207-1-jean-philippe@linaro.org --- tools/scripts/Makefile.include | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tools/scripts/Makefile.include b/tools/scripts/Makefile.include index b0be5f40a3f1..79d102304470 100644 --- a/tools/scripts/Makefile.include +++ b/tools/scripts/Makefile.include @@ -90,7 +90,7 @@ EXTRA_WARNINGS += -Wstrict-aliasing=3 else ifneq ($(CROSS_COMPILE),) CLANG_CROSS_FLAGS := --target=$(notdir $(CROSS_COMPILE:%-=%)) -GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)gcc)) +GCC_TOOLCHAIN_DIR := $(dir $(shell which $(CROSS_COMPILE)gcc 2>/dev/null)) ifneq ($(GCC_TOOLCHAIN_DIR),) CLANG_CROSS_FLAGS += --prefix=$(GCC_TOOLCHAIN_DIR)$(notdir $(CROSS_COMPILE)) CLANG_CROSS_FLAGS += --sysroot=$(shell $(CROSS_COMPILE)gcc -print-sysroot) -- cgit From 472c6e46f589c26057596dcba160712a5b3e02c5 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Mon, 31 Jan 2022 13:20:08 -0800 Subject: xfs: remove XFS_PREALLOC_SYNC Callers can acheive the same thing by calling xfs_log_force_inode() after making their modifications. There is no need for xfs_update_prealloc_flags() to do this. Signed-off-by: Dave Chinner Reviewed-by: Darrick J. Wong Signed-off-by: Darrick J. Wong --- fs/xfs/xfs_file.c | 13 +++++++------ fs/xfs/xfs_inode.h | 3 +-- fs/xfs/xfs_pnfs.c | 6 ++++-- 3 files changed, 12 insertions(+), 10 deletions(-) diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 22ad207bedf4..ed375b3d0614 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -95,8 +95,6 @@ xfs_update_prealloc_flags( ip->i_diflags &= ~XFS_DIFLAG_PREALLOC; xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); - if (flags & XFS_PREALLOC_SYNC) - xfs_trans_set_sync(tp); return xfs_trans_commit(tp); } @@ -1057,9 +1055,6 @@ xfs_file_fallocate( } } - if (file->f_flags & O_DSYNC) - flags |= XFS_PREALLOC_SYNC; - error = xfs_update_prealloc_flags(ip, flags); if (error) goto out_unlock; @@ -1082,8 +1077,14 @@ xfs_file_fallocate( * leave shifted extents past EOF and hence losing access to * the data that is contained within them. */ - if (do_file_insert) + if (do_file_insert) { error = xfs_insert_file_space(ip, offset, len); + if (error) + goto out_unlock; + } + + if (file->f_flags & O_DSYNC) + error = xfs_log_force_inode(ip); out_unlock: xfs_iunlock(ip, iolock); diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index c447bf04205a..3fc6d77f5be9 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -465,8 +465,7 @@ xfs_itruncate_extents( enum xfs_prealloc_flags { XFS_PREALLOC_SET = (1 << 1), XFS_PREALLOC_CLEAR = (1 << 2), - XFS_PREALLOC_SYNC = (1 << 3), - XFS_PREALLOC_INVISIBLE = (1 << 4), + XFS_PREALLOC_INVISIBLE = (1 << 3), }; int xfs_update_prealloc_flags(struct xfs_inode *ip, diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c index d6334abbc0b3..ce6d66f20385 100644 --- a/fs/xfs/xfs_pnfs.c +++ b/fs/xfs/xfs_pnfs.c @@ -164,10 +164,12 @@ xfs_fs_map_blocks( * that the blocks allocated and handed out to the client are * guaranteed to be present even after a server crash. */ - error = xfs_update_prealloc_flags(ip, - XFS_PREALLOC_SET | XFS_PREALLOC_SYNC); + error = xfs_update_prealloc_flags(ip, XFS_PREALLOC_SET); + if (!error) + error = xfs_log_force_inode(ip); if (error) goto out_unlock; + } else { xfs_iunlock(ip, lock_flags); } -- cgit From fbe7e520036583a783b13ff9744e35c2a329d9a4 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Mon, 31 Jan 2022 13:20:09 -0800 Subject: xfs: fallocate() should call file_modified() In XFS, we always update the inode change and modification time when any fallocate() operation succeeds. Furthermore, as various fallocate modes can change the file contents (extending EOF, punching holes, zeroing things, shifting extents), we should drop file privileges like suid just like we do for a regular write(). There's already a VFS helper that figures all this out for us, so use that. The net effect of this is that we no longer drop suid/sgid if the caller is root, but we also now drop file capabilities. We also move the xfs_update_prealloc_flags() function so that it now is only called by the scope that needs to set the the prealloc flag. Based on a patch from Darrick Wong. Signed-off-by: Dave Chinner Reviewed-by: Darrick J. Wong Signed-off-by: Darrick J. Wong --- fs/xfs/xfs_file.c | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index ed375b3d0614..7846d55cba01 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -953,6 +953,10 @@ xfs_file_fallocate( goto out_unlock; } + error = file_modified(file); + if (error) + goto out_unlock; + if (mode & FALLOC_FL_PUNCH_HOLE) { error = xfs_free_file_space(ip, offset, len); if (error) @@ -1053,11 +1057,12 @@ xfs_file_fallocate( if (error) goto out_unlock; } - } - error = xfs_update_prealloc_flags(ip, flags); - if (error) - goto out_unlock; + error = xfs_update_prealloc_flags(ip, XFS_PREALLOC_SET); + if (error) + goto out_unlock; + + } /* Change file size if needed */ if (new_size) { -- cgit From 0b02c8c0d75a738c98c35f02efb36217c170d78c Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Mon, 31 Jan 2022 13:20:09 -0800 Subject: xfs: set prealloc flag in xfs_alloc_file_space() Now that we only call xfs_update_prealloc_flags() from xfs_file_fallocate() in the case where we need to set the preallocation flag, do this in xfs_alloc_file_space() where we already have the inode joined into a transaction and get rid of the call to xfs_update_prealloc_flags() from the fallocate code. This also means that we now correctly avoid setting the XFS_DIFLAG_PREALLOC flag when xfs_is_always_cow_inode() is true, as these inodes will never have preallocated extents. Signed-off-by: Dave Chinner Reviewed-by: Darrick J. Wong Signed-off-by: Darrick J. Wong --- fs/xfs/xfs_bmap_util.c | 9 +++------ fs/xfs/xfs_file.c | 8 -------- 2 files changed, 3 insertions(+), 14 deletions(-) diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c index d4a387d3d0ce..eb2e387ba528 100644 --- a/fs/xfs/xfs_bmap_util.c +++ b/fs/xfs/xfs_bmap_util.c @@ -850,9 +850,6 @@ xfs_alloc_file_space( rblocks = 0; } - /* - * Allocate and setup the transaction. - */ error = xfs_trans_alloc_inode(ip, &M_RES(mp)->tr_write, dblocks, rblocks, false, &tp); if (error) @@ -869,9 +866,9 @@ xfs_alloc_file_space( if (error) goto error; - /* - * Complete the transaction - */ + ip->i_diflags |= XFS_DIFLAG_PREALLOC; + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + error = xfs_trans_commit(tp); xfs_iunlock(ip, XFS_ILOCK_EXCL); if (error) diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 7846d55cba01..082e3ef81418 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -908,7 +908,6 @@ xfs_file_fallocate( struct inode *inode = file_inode(file); struct xfs_inode *ip = XFS_I(inode); long error; - enum xfs_prealloc_flags flags = 0; uint iolock = XFS_IOLOCK_EXCL | XFS_MMAPLOCK_EXCL; loff_t new_size = 0; bool do_file_insert = false; @@ -1006,8 +1005,6 @@ xfs_file_fallocate( } do_file_insert = true; } else { - flags |= XFS_PREALLOC_SET; - if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > i_size_read(inode)) { new_size = offset + len; @@ -1057,11 +1054,6 @@ xfs_file_fallocate( if (error) goto out_unlock; } - - error = xfs_update_prealloc_flags(ip, XFS_PREALLOC_SET); - if (error) - goto out_unlock; - } /* Change file size if needed */ -- cgit From b39a04636fd7454911b80e7b5ab2a66b011a8145 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Mon, 31 Jan 2022 13:20:10 -0800 Subject: xfs: move xfs_update_prealloc_flags() to xfs_pnfs.c The operations that xfs_update_prealloc_flags() perform are now unique to xfs_fs_map_blocks(), so move xfs_update_prealloc_flags() to be a static function in xfs_pnfs.c and cut out all the other functionality that is doesn't use anymore. Signed-off-by: Dave Chinner Reviewed-by: Darrick J. Wong Signed-off-by: Darrick J. Wong --- fs/xfs/xfs_file.c | 32 -------------------------------- fs/xfs/xfs_inode.h | 8 -------- fs/xfs/xfs_pnfs.c | 38 ++++++++++++++++++++++++++++++++++++-- 3 files changed, 36 insertions(+), 42 deletions(-) diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index 082e3ef81418..cecc5dedddff 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -66,38 +66,6 @@ xfs_is_falloc_aligned( return !((pos | len) & mask); } -int -xfs_update_prealloc_flags( - struct xfs_inode *ip, - enum xfs_prealloc_flags flags) -{ - struct xfs_trans *tp; - int error; - - error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_writeid, - 0, 0, 0, &tp); - if (error) - return error; - - xfs_ilock(ip, XFS_ILOCK_EXCL); - xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); - - if (!(flags & XFS_PREALLOC_INVISIBLE)) { - VFS_I(ip)->i_mode &= ~S_ISUID; - if (VFS_I(ip)->i_mode & S_IXGRP) - VFS_I(ip)->i_mode &= ~S_ISGID; - xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); - } - - if (flags & XFS_PREALLOC_SET) - ip->i_diflags |= XFS_DIFLAG_PREALLOC; - if (flags & XFS_PREALLOC_CLEAR) - ip->i_diflags &= ~XFS_DIFLAG_PREALLOC; - - xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); - return xfs_trans_commit(tp); -} - /* * Fsync operations on directories are much simpler than on regular files, * as there is no file data to flush, and thus also no need for explicit diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h index 3fc6d77f5be9..b7e8f14d9fca 100644 --- a/fs/xfs/xfs_inode.h +++ b/fs/xfs/xfs_inode.h @@ -462,14 +462,6 @@ xfs_itruncate_extents( } /* from xfs_file.c */ -enum xfs_prealloc_flags { - XFS_PREALLOC_SET = (1 << 1), - XFS_PREALLOC_CLEAR = (1 << 2), - XFS_PREALLOC_INVISIBLE = (1 << 3), -}; - -int xfs_update_prealloc_flags(struct xfs_inode *ip, - enum xfs_prealloc_flags flags); int xfs_break_layouts(struct inode *inode, uint *iolock, enum layout_break_reason reason); diff --git a/fs/xfs/xfs_pnfs.c b/fs/xfs/xfs_pnfs.c index ce6d66f20385..4abe17312c2b 100644 --- a/fs/xfs/xfs_pnfs.c +++ b/fs/xfs/xfs_pnfs.c @@ -70,6 +70,40 @@ xfs_fs_get_uuid( return 0; } +/* + * We cannot use file based VFS helpers such as file_modified() to update + * inode state as we modify the data/metadata in the inode here. Hence we have + * to open code the timestamp updates and SUID/SGID stripping. We also need + * to set the inode prealloc flag to ensure that the extents we allocate are not + * removed if the inode is reclaimed from memory before xfs_fs_block_commit() + * is from the client to indicate that data has been written and the file size + * can be extended. + */ +static int +xfs_fs_map_update_inode( + struct xfs_inode *ip) +{ + struct xfs_trans *tp; + int error; + + error = xfs_trans_alloc(ip->i_mount, &M_RES(ip->i_mount)->tr_writeid, + 0, 0, 0, &tp); + if (error) + return error; + + xfs_ilock(ip, XFS_ILOCK_EXCL); + xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL); + + VFS_I(ip)->i_mode &= ~S_ISUID; + if (VFS_I(ip)->i_mode & S_IXGRP) + VFS_I(ip)->i_mode &= ~S_ISGID; + xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG); + ip->i_diflags |= XFS_DIFLAG_PREALLOC; + + xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE); + return xfs_trans_commit(tp); +} + /* * Get a layout for the pNFS client. */ @@ -164,7 +198,7 @@ xfs_fs_map_blocks( * that the blocks allocated and handed out to the client are * guaranteed to be present even after a server crash. */ - error = xfs_update_prealloc_flags(ip, XFS_PREALLOC_SET); + error = xfs_fs_map_update_inode(ip); if (!error) error = xfs_log_force_inode(ip); if (error) @@ -257,7 +291,7 @@ xfs_fs_commit_blocks( length = end - start; if (!length) continue; - + /* * Make sure reads through the pagecache see the new data. */ -- cgit From cea267c235e1b1ec3bfc415f6bd420289bcb3bc9 Mon Sep 17 00:00:00 2001 From: Dave Chinner Date: Mon, 31 Jan 2022 13:20:10 -0800 Subject: xfs: ensure log flush at the end of a synchronous fallocate call Since we've started treating fallocate more like a file write, we should flush the log to disk if the user has asked for synchronous writes either by setting it via fcntl flags, or inode flags, or with the sync mount option. We've already got a helper for this, so use it. [The original patch by Darrick was massaged by Dave to fit this patchset] Signed-off-by: Darrick J. Wong Signed-off-by: Dave Chinner Reviewed-by: Darrick J. Wong --- fs/xfs/xfs_file.c | 32 ++++++++++++++++---------------- 1 file changed, 16 insertions(+), 16 deletions(-) diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c index cecc5dedddff..5bddb1e9e0b3 100644 --- a/fs/xfs/xfs_file.c +++ b/fs/xfs/xfs_file.c @@ -861,6 +861,21 @@ xfs_break_layouts( return error; } +/* Does this file, inode, or mount want synchronous writes? */ +static inline bool xfs_file_sync_writes(struct file *filp) +{ + struct xfs_inode *ip = XFS_I(file_inode(filp)); + + if (xfs_has_wsync(ip->i_mount)) + return true; + if (filp->f_flags & (__O_SYNC | O_DSYNC)) + return true; + if (IS_SYNC(file_inode(filp))) + return true; + + return false; +} + #define XFS_FALLOC_FL_SUPPORTED \ (FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE | \ FALLOC_FL_COLLAPSE_RANGE | FALLOC_FL_ZERO_RANGE | \ @@ -1048,7 +1063,7 @@ xfs_file_fallocate( goto out_unlock; } - if (file->f_flags & O_DSYNC) + if (xfs_file_sync_writes(file)) error = xfs_log_force_inode(ip); out_unlock: @@ -1081,21 +1096,6 @@ xfs_file_fadvise( return ret; } -/* Does this file, inode, or mount want synchronous writes? */ -static inline bool xfs_file_sync_writes(struct file *filp) -{ - struct xfs_inode *ip = XFS_I(file_inode(filp)); - - if (xfs_has_wsync(ip->i_mount)) - return true; - if (filp->f_flags & (__O_SYNC | O_DSYNC)) - return true; - if (IS_SYNC(file_inode(filp))) - return true; - - return false; -} - STATIC loff_t xfs_file_remap_range( struct file *file_in, -- cgit From 6dde7acdb3dc2e0b3bcb090aac0b3699396d309f Mon Sep 17 00:00:00 2001 From: Jakub Kicinski Date: Mon, 31 Jan 2022 13:17:30 -0800 Subject: ethernet: smc911x: fix indentation in get/set EEPROM Build bot produced a smatch indentation warning, the code looks correct but it mixes spaces and tabs. Reported-by: kernel test robot Link: https://lore.kernel.org/r/20220131211730.3940875-1-kuba@kernel.org Signed-off-by: Jakub Kicinski --- drivers/net/ethernet/smsc/smc911x.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/smsc/smc911x.c b/drivers/net/ethernet/smsc/smc911x.c index dd6f69ced4ee..fc9cef9dcefc 100644 --- a/drivers/net/ethernet/smsc/smc911x.c +++ b/drivers/net/ethernet/smsc/smc911x.c @@ -1648,7 +1648,7 @@ static int smc911x_ethtool_geteeprom(struct net_device *dev, return ret; if ((ret=smc911x_ethtool_read_eeprom_byte(dev, &eebuf[i]))!=0) return ret; - } + } memcpy(data, eebuf+eeprom->offset, eeprom->len); return 0; } @@ -1667,11 +1667,11 @@ static int smc911x_ethtool_seteeprom(struct net_device *dev, return ret; /* write byte */ if ((ret=smc911x_ethtool_write_eeprom_byte(dev, *data))!=0) - return ret; + return ret; if ((ret=smc911x_ethtool_write_eeprom_cmd(dev, E2P_CMD_EPC_CMD_WRITE_, i ))!=0) return ret; - } - return 0; + } + return 0; } static int smc911x_ethtool_geteeprom_len(struct net_device *dev) -- cgit From 04c2a47ffb13c29778e2a14e414ad4cb5a5db4b5 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 31 Jan 2022 09:20:18 -0800 Subject: net: sched: fix use-after-free in tc_new_tfilter() Whenever tc_new_tfilter() jumps back to replay: label, we need to make sure @q and @chain local variables are cleared again, or risk use-after-free as in [1] For consistency, apply the same fix in tc_ctl_chain() BUG: KASAN: use-after-free in mini_qdisc_pair_swap+0x1b9/0x1f0 net/sched/sch_generic.c:1581 Write of size 8 at addr ffff8880985c4b08 by task syz-executor.4/1945 CPU: 0 PID: 1945 Comm: syz-executor.4 Not tainted 5.17.0-rc1-syzkaller-00495-gff58831fa02d #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Call Trace: __dump_stack lib/dump_stack.c:88 [inline] dump_stack_lvl+0xcd/0x134 lib/dump_stack.c:106 print_address_description.constprop.0.cold+0x8d/0x336 mm/kasan/report.c:255 __kasan_report mm/kasan/report.c:442 [inline] kasan_report.cold+0x83/0xdf mm/kasan/report.c:459 mini_qdisc_pair_swap+0x1b9/0x1f0 net/sched/sch_generic.c:1581 tcf_chain_head_change_item net/sched/cls_api.c:372 [inline] tcf_chain0_head_change.isra.0+0xb9/0x120 net/sched/cls_api.c:386 tcf_chain_tp_insert net/sched/cls_api.c:1657 [inline] tcf_chain_tp_insert_unique net/sched/cls_api.c:1707 [inline] tc_new_tfilter+0x1e67/0x2350 net/sched/cls_api.c:2086 rtnetlink_rcv_msg+0x80d/0xb80 net/core/rtnetlink.c:5583 netlink_rcv_skb+0x153/0x420 net/netlink/af_netlink.c:2494 netlink_unicast_kernel net/netlink/af_netlink.c:1317 [inline] netlink_unicast+0x539/0x7e0 net/netlink/af_netlink.c:1343 netlink_sendmsg+0x904/0xe00 net/netlink/af_netlink.c:1919 sock_sendmsg_nosec net/socket.c:705 [inline] sock_sendmsg+0xcf/0x120 net/socket.c:725 ____sys_sendmsg+0x331/0x810 net/socket.c:2413 ___sys_sendmsg+0xf3/0x170 net/socket.c:2467 __sys_sendmmsg+0x195/0x470 net/socket.c:2553 __do_sys_sendmmsg net/socket.c:2582 [inline] __se_sys_sendmmsg net/socket.c:2579 [inline] __x64_sys_sendmmsg+0x99/0x100 net/socket.c:2579 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0xae RIP: 0033:0x7f2647172059 Code: ff ff c3 66 2e 0f 1f 84 00 00 00 00 00 0f 1f 40 00 48 89 f8 48 89 f7 48 89 d6 48 89 ca 4d 89 c2 4d 89 c8 4c 8b 4c 24 08 0f 05 <48> 3d 01 f0 ff ff 73 01 c3 48 c7 c1 b8 ff ff ff f7 d8 64 89 01 48 RSP: 002b:00007f2645aa5168 EFLAGS: 00000246 ORIG_RAX: 0000000000000133 RAX: ffffffffffffffda RBX: 00007f2647285100 RCX: 00007f2647172059 RDX: 040000000000009f RSI: 00000000200002c0 RDI: 0000000000000006 RBP: 00007f26471cc08d R08: 0000000000000000 R09: 0000000000000000 R10: 9e00000000000000 R11: 0000000000000246 R12: 0000000000000000 R13: 00007fffb3f7f02f R14: 00007f2645aa5300 R15: 0000000000022000 Allocated by task 1944: kasan_save_stack+0x1e/0x40 mm/kasan/common.c:38 kasan_set_track mm/kasan/common.c:45 [inline] set_alloc_info mm/kasan/common.c:436 [inline] ____kasan_kmalloc mm/kasan/common.c:515 [inline] ____kasan_kmalloc mm/kasan/common.c:474 [inline] __kasan_kmalloc+0xa9/0xd0 mm/kasan/common.c:524 kmalloc_node include/linux/slab.h:604 [inline] kzalloc_node include/linux/slab.h:726 [inline] qdisc_alloc+0xac/0xa10 net/sched/sch_generic.c:941 qdisc_create.constprop.0+0xce/0x10f0 net/sched/sch_api.c:1211 tc_modify_qdisc+0x4c5/0x1980 net/sched/sch_api.c:1660 rtnetlink_rcv_msg+0x413/0xb80 net/core/rtnetlink.c:5592 netlink_rcv_skb+0x153/0x420 net/netlink/af_netlink.c:2494 netlink_unicast_kernel net/netlink/af_netlink.c:1317 [inline] netlink_unicast+0x539/0x7e0 net/netlink/af_netlink.c:1343 netlink_sendmsg+0x904/0xe00 net/netlink/af_netlink.c:1919 sock_sendmsg_nosec net/socket.c:705 [inline] sock_sendmsg+0xcf/0x120 net/socket.c:725 ____sys_sendmsg+0x331/0x810 net/socket.c:2413 ___sys_sendmsg+0xf3/0x170 net/socket.c:2467 __sys_sendmmsg+0x195/0x470 net/socket.c:2553 __do_sys_sendmmsg net/socket.c:2582 [inline] __se_sys_sendmmsg net/socket.c:2579 [inline] __x64_sys_sendmmsg+0x99/0x100 net/socket.c:2579 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0xae Freed by task 3609: kasan_save_stack+0x1e/0x40 mm/kasan/common.c:38 kasan_set_track+0x21/0x30 mm/kasan/common.c:45 kasan_set_free_info+0x20/0x30 mm/kasan/generic.c:370 ____kasan_slab_free mm/kasan/common.c:366 [inline] ____kasan_slab_free+0x130/0x160 mm/kasan/common.c:328 kasan_slab_free include/linux/kasan.h:236 [inline] slab_free_hook mm/slub.c:1728 [inline] slab_free_freelist_hook+0x8b/0x1c0 mm/slub.c:1754 slab_free mm/slub.c:3509 [inline] kfree+0xcb/0x280 mm/slub.c:4562 rcu_do_batch kernel/rcu/tree.c:2527 [inline] rcu_core+0x7b8/0x1540 kernel/rcu/tree.c:2778 __do_softirq+0x29b/0x9c2 kernel/softirq.c:558 Last potentially related work creation: kasan_save_stack+0x1e/0x40 mm/kasan/common.c:38 __kasan_record_aux_stack+0xbe/0xd0 mm/kasan/generic.c:348 __call_rcu kernel/rcu/tree.c:3026 [inline] call_rcu+0xb1/0x740 kernel/rcu/tree.c:3106 qdisc_put_unlocked+0x6f/0x90 net/sched/sch_generic.c:1109 tcf_block_release+0x86/0x90 net/sched/cls_api.c:1238 tc_new_tfilter+0xc0d/0x2350 net/sched/cls_api.c:2148 rtnetlink_rcv_msg+0x80d/0xb80 net/core/rtnetlink.c:5583 netlink_rcv_skb+0x153/0x420 net/netlink/af_netlink.c:2494 netlink_unicast_kernel net/netlink/af_netlink.c:1317 [inline] netlink_unicast+0x539/0x7e0 net/netlink/af_netlink.c:1343 netlink_sendmsg+0x904/0xe00 net/netlink/af_netlink.c:1919 sock_sendmsg_nosec net/socket.c:705 [inline] sock_sendmsg+0xcf/0x120 net/socket.c:725 ____sys_sendmsg+0x331/0x810 net/socket.c:2413 ___sys_sendmsg+0xf3/0x170 net/socket.c:2467 __sys_sendmmsg+0x195/0x470 net/socket.c:2553 __do_sys_sendmmsg net/socket.c:2582 [inline] __se_sys_sendmmsg net/socket.c:2579 [inline] __x64_sys_sendmmsg+0x99/0x100 net/socket.c:2579 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0xae The buggy address belongs to the object at ffff8880985c4800 which belongs to the cache kmalloc-1k of size 1024 The buggy address is located 776 bytes inside of 1024-byte region [ffff8880985c4800, ffff8880985c4c00) The buggy address belongs to the page: page:ffffea0002617000 refcount:1 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x985c0 head:ffffea0002617000 order:3 compound_mapcount:0 compound_pincount:0 flags: 0xfff00000010200(slab|head|node=0|zone=1|lastcpupid=0x7ff) raw: 00fff00000010200 0000000000000000 dead000000000122 ffff888010c41dc0 raw: 0000000000000000 0000000000100010 00000001ffffffff 0000000000000000 page dumped because: kasan: bad access detected page_owner tracks the page as allocated page last allocated via order 3, migratetype Unmovable, gfp_mask 0x1d20c0(__GFP_IO|__GFP_FS|__GFP_NOWARN|__GFP_NORETRY|__GFP_COMP|__GFP_NOMEMALLOC|__GFP_HARDWALL), pid 1941, ts 1038999441284, free_ts 1033444432829 prep_new_page mm/page_alloc.c:2434 [inline] get_page_from_freelist+0xa72/0x2f50 mm/page_alloc.c:4165 __alloc_pages+0x1b2/0x500 mm/page_alloc.c:5389 alloc_pages+0x1aa/0x310 mm/mempolicy.c:2271 alloc_slab_page mm/slub.c:1799 [inline] allocate_slab mm/slub.c:1944 [inline] new_slab+0x28a/0x3b0 mm/slub.c:2004 ___slab_alloc+0x87c/0xe90 mm/slub.c:3018 __slab_alloc.constprop.0+0x4d/0xa0 mm/slub.c:3105 slab_alloc_node mm/slub.c:3196 [inline] slab_alloc mm/slub.c:3238 [inline] __kmalloc+0x2fb/0x340 mm/slub.c:4420 kmalloc include/linux/slab.h:586 [inline] kzalloc include/linux/slab.h:715 [inline] __register_sysctl_table+0x112/0x1090 fs/proc/proc_sysctl.c:1335 neigh_sysctl_register+0x2c8/0x5e0 net/core/neighbour.c:3787 devinet_sysctl_register+0xb1/0x230 net/ipv4/devinet.c:2618 inetdev_init+0x286/0x580 net/ipv4/devinet.c:278 inetdev_event+0xa8a/0x15d0 net/ipv4/devinet.c:1532 notifier_call_chain+0xb5/0x200 kernel/notifier.c:84 call_netdevice_notifiers_info+0xb5/0x130 net/core/dev.c:1919 call_netdevice_notifiers_extack net/core/dev.c:1931 [inline] call_netdevice_notifiers net/core/dev.c:1945 [inline] register_netdevice+0x1073/0x1500 net/core/dev.c:9698 veth_newlink+0x59c/0xa90 drivers/net/veth.c:1722 page last free stack trace: reset_page_owner include/linux/page_owner.h:24 [inline] free_pages_prepare mm/page_alloc.c:1352 [inline] free_pcp_prepare+0x374/0x870 mm/page_alloc.c:1404 free_unref_page_prepare mm/page_alloc.c:3325 [inline] free_unref_page+0x19/0x690 mm/page_alloc.c:3404 release_pages+0x748/0x1220 mm/swap.c:956 tlb_batch_pages_flush mm/mmu_gather.c:50 [inline] tlb_flush_mmu_free mm/mmu_gather.c:243 [inline] tlb_flush_mmu+0xe9/0x6b0 mm/mmu_gather.c:250 zap_pte_range mm/memory.c:1441 [inline] zap_pmd_range mm/memory.c:1490 [inline] zap_pud_range mm/memory.c:1519 [inline] zap_p4d_range mm/memory.c:1540 [inline] unmap_page_range+0x1d1d/0x2a30 mm/memory.c:1561 unmap_single_vma+0x198/0x310 mm/memory.c:1606 unmap_vmas+0x16b/0x2f0 mm/memory.c:1638 exit_mmap+0x201/0x670 mm/mmap.c:3178 __mmput+0x122/0x4b0 kernel/fork.c:1114 mmput+0x56/0x60 kernel/fork.c:1135 exit_mm kernel/exit.c:507 [inline] do_exit+0xa3c/0x2a30 kernel/exit.c:793 do_group_exit+0xd2/0x2f0 kernel/exit.c:935 __do_sys_exit_group kernel/exit.c:946 [inline] __se_sys_exit_group kernel/exit.c:944 [inline] __x64_sys_exit_group+0x3a/0x50 kernel/exit.c:944 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x35/0xb0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0xae Memory state around the buggy address: ffff8880985c4a00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ffff8880985c4a80: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb >ffff8880985c4b00: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ^ ffff8880985c4b80: fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb fb ffff8880985c4c00: fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc fc Fixes: 470502de5bdb ("net: sched: unlock rules update API") Signed-off-by: Eric Dumazet Cc: Vlad Buslov Cc: Jiri Pirko Cc: Cong Wang Reported-by: syzbot Link: https://lore.kernel.org/r/20220131172018.3704490-1-eric.dumazet@gmail.com Signed-off-by: Jakub Kicinski --- net/sched/cls_api.c | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c index d4e27c679123..5f0f346b576f 100644 --- a/net/sched/cls_api.c +++ b/net/sched/cls_api.c @@ -1945,9 +1945,9 @@ static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n, bool prio_allocate; u32 parent; u32 chain_index; - struct Qdisc *q = NULL; + struct Qdisc *q; struct tcf_chain_info chain_info; - struct tcf_chain *chain = NULL; + struct tcf_chain *chain; struct tcf_block *block; struct tcf_proto *tp; unsigned long cl; @@ -1976,6 +1976,8 @@ replay: tp = NULL; cl = 0; block = NULL; + q = NULL; + chain = NULL; flags = 0; if (prio == 0) { @@ -2798,8 +2800,8 @@ static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n, struct tcmsg *t; u32 parent; u32 chain_index; - struct Qdisc *q = NULL; - struct tcf_chain *chain = NULL; + struct Qdisc *q; + struct tcf_chain *chain; struct tcf_block *block; unsigned long cl; int err; @@ -2809,6 +2811,7 @@ static int tc_ctl_chain(struct sk_buff *skb, struct nlmsghdr *n, return -EPERM; replay: + q = NULL; err = nlmsg_parse_deprecated(n, sizeof(*t), tca, TCA_MAX, rtm_tca_policy, extack); if (err < 0) -- cgit From c6f6f2444bdbe0079e41914a35081530d0409963 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 31 Jan 2022 17:21:06 -0800 Subject: rtnetlink: make sure to refresh master_dev/m_ops in __rtnl_newlink() While looking at one unrelated syzbot bug, I found the replay logic in __rtnl_newlink() to potentially trigger use-after-free. It is better to clear master_dev and m_ops inside the loop, in case we have to replay it. Fixes: ba7d49b1f0f8 ("rtnetlink: provide api for getting and setting slave info") Signed-off-by: Eric Dumazet Cc: Jiri Pirko Link: https://lore.kernel.org/r/20220201012106.216495-1-eric.dumazet@gmail.com Signed-off-by: Jakub Kicinski --- net/core/rtnetlink.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index e476403231f0..710da8a36729 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -3275,8 +3275,8 @@ static int __rtnl_newlink(struct sk_buff *skb, struct nlmsghdr *nlh, struct nlattr *slave_attr[RTNL_SLAVE_MAX_TYPE + 1]; unsigned char name_assign_type = NET_NAME_USER; struct nlattr *linkinfo[IFLA_INFO_MAX + 1]; - const struct rtnl_link_ops *m_ops = NULL; - struct net_device *master_dev = NULL; + const struct rtnl_link_ops *m_ops; + struct net_device *master_dev; struct net *net = sock_net(skb->sk); const struct rtnl_link_ops *ops; struct nlattr *tb[IFLA_MAX + 1]; @@ -3314,6 +3314,8 @@ replay: else dev = NULL; + master_dev = NULL; + m_ops = NULL; if (dev) { master_dev = netdev_master_upper_dev_get(dev); if (master_dev) -- cgit From e42e70ad6ae2ae511a6143d2e8da929366e58bd9 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 31 Jan 2022 18:23:58 -0800 Subject: af_packet: fix data-race in packet_setsockopt / packet_setsockopt When packet_setsockopt( PACKET_FANOUT_DATA ) reads po->fanout, no lock is held, meaning that another thread can change po->fanout. Given that po->fanout can only be set once during the socket lifetime (it is only cleared from fanout_release()), we can use READ_ONCE()/WRITE_ONCE() to document the race. BUG: KCSAN: data-race in packet_setsockopt / packet_setsockopt write to 0xffff88813ae8e300 of 8 bytes by task 14653 on cpu 0: fanout_add net/packet/af_packet.c:1791 [inline] packet_setsockopt+0x22fe/0x24a0 net/packet/af_packet.c:3931 __sys_setsockopt+0x209/0x2a0 net/socket.c:2180 __do_sys_setsockopt net/socket.c:2191 [inline] __se_sys_setsockopt net/socket.c:2188 [inline] __x64_sys_setsockopt+0x62/0x70 net/socket.c:2188 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x44/0xd0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0xae read to 0xffff88813ae8e300 of 8 bytes by task 14654 on cpu 1: packet_setsockopt+0x691/0x24a0 net/packet/af_packet.c:3935 __sys_setsockopt+0x209/0x2a0 net/socket.c:2180 __do_sys_setsockopt net/socket.c:2191 [inline] __se_sys_setsockopt net/socket.c:2188 [inline] __x64_sys_setsockopt+0x62/0x70 net/socket.c:2188 do_syscall_x64 arch/x86/entry/common.c:50 [inline] do_syscall_64+0x44/0xd0 arch/x86/entry/common.c:80 entry_SYSCALL_64_after_hwframe+0x44/0xae value changed: 0x0000000000000000 -> 0xffff888106f8c000 Reported by Kernel Concurrency Sanitizer on: CPU: 1 PID: 14654 Comm: syz-executor.3 Not tainted 5.16.0-syzkaller #0 Hardware name: Google Google Compute Engine/Google Compute Engine, BIOS Google 01/01/2011 Fixes: 47dceb8ecdc1 ("packet: add classic BPF fanout mode") Signed-off-by: Eric Dumazet Cc: Willem de Bruijn Reported-by: syzbot Link: https://lore.kernel.org/r/20220201022358.330621-1-eric.dumazet@gmail.com Signed-off-by: Jakub Kicinski --- net/packet/af_packet.c | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 85ea7ddb48db..ab87f22cc7ec 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1789,7 +1789,10 @@ static int fanout_add(struct sock *sk, struct fanout_args *args) err = -ENOSPC; if (refcount_read(&match->sk_ref) < match->max_num_members) { __dev_remove_pack(&po->prot_hook); - po->fanout = match; + + /* Paired with packet_setsockopt(PACKET_FANOUT_DATA) */ + WRITE_ONCE(po->fanout, match); + po->rollover = rollover; rollover = NULL; refcount_set(&match->sk_ref, refcount_read(&match->sk_ref) + 1); @@ -3934,7 +3937,8 @@ packet_setsockopt(struct socket *sock, int level, int optname, sockptr_t optval, } case PACKET_FANOUT_DATA: { - if (!po->fanout) + /* Paired with the WRITE_ONCE() in fanout_add() */ + if (!READ_ONCE(po->fanout)) return -EINVAL; return fanout_set_data(po, optval, optlen); -- cgit From 479f5547239d970d3833f15f54a6481fffdb91ec Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Mon, 31 Jan 2022 22:52:54 -0800 Subject: tcp: fix mem under-charging with zerocopy sendmsg() We got reports of following warning in inet_sock_destruct() WARN_ON(sk_forward_alloc_get(sk)); Whenever we add a non zero-copy fragment to a pure zerocopy skb, we have to anticipate that whole skb->truesize will be uncharged when skb is finally freed. skb->data_len is the payload length. But the memory truesize estimated by __zerocopy_sg_from_iter() is page aligned. Fixes: 9b65b17db723 ("net: avoid double accounting for pure zerocopy skbs") Signed-off-by: Eric Dumazet Cc: Talal Ahmad Cc: Arjun Roy Cc: Willem de Bruijn Acked-by: Soheil Hassas Yeganeh Link: https://lore.kernel.org/r/20220201065254.680532-1-eric.dumazet@gmail.com Signed-off-by: Jakub Kicinski --- net/ipv4/tcp.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 78e81465f5f3..bdf108f544a4 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -1322,10 +1322,13 @@ new_segment: /* skb changing from pure zc to mixed, must charge zc */ if (unlikely(skb_zcopy_pure(skb))) { - if (!sk_wmem_schedule(sk, skb->data_len)) + u32 extra = skb->truesize - + SKB_TRUESIZE(skb_end_offset(skb)); + + if (!sk_wmem_schedule(sk, extra)) goto wait_for_space; - sk_mem_charge(sk, skb->data_len); + sk_mem_charge(sk, extra); skb_shinfo(skb)->flags &= ~SKBFL_PURE_ZEROCOPY; } -- cgit From 63e4b45c82ed1bde979da7052229a4229ce9cabf Mon Sep 17 00:00:00 2001 From: Georgi Valkov Date: Tue, 1 Feb 2022 08:16:18 +0100 Subject: ipheth: fix EOVERFLOW in ipheth_rcvbulk_callback When rx_buf is allocated we need to account for IPHETH_IP_ALIGN, which reduces the usable size by 2 bytes. Otherwise we have 1512 bytes usable instead of 1514, and if we receive more than 1512 bytes, ipheth_rcvbulk_callback is called with status -EOVERFLOW, after which the driver malfunctiones and all communication stops. Resolves ipheth 2-1:4.2: ipheth_rcvbulk_callback: urb status: -75 Fixes: f33d9e2b48a3 ("usbnet: ipheth: fix connectivity with iOS 14") Signed-off-by: Georgi Valkov Tested-by: Jan Kiszka Link: https://lore.kernel.org/all/B60B8A4B-92A0-49B3-805D-809A2433B46C@abv.bg/ Link: https://lore.kernel.org/all/24851bd2769434a5fc24730dce8e8a984c5a4505.1643699778.git.jan.kiszka@siemens.com/ Signed-off-by: Jakub Kicinski --- drivers/net/usb/ipheth.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c index cd33955df0b6..6a769df0b421 100644 --- a/drivers/net/usb/ipheth.c +++ b/drivers/net/usb/ipheth.c @@ -121,7 +121,7 @@ static int ipheth_alloc_urbs(struct ipheth_device *iphone) if (tx_buf == NULL) goto free_rx_urb; - rx_buf = usb_alloc_coherent(iphone->udev, IPHETH_BUF_SIZE, + rx_buf = usb_alloc_coherent(iphone->udev, IPHETH_BUF_SIZE + IPHETH_IP_ALIGN, GFP_KERNEL, &rx_urb->transfer_dma); if (rx_buf == NULL) goto free_tx_buf; @@ -146,7 +146,7 @@ error_nomem: static void ipheth_free_urbs(struct ipheth_device *iphone) { - usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE, iphone->rx_buf, + usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE + IPHETH_IP_ALIGN, iphone->rx_buf, iphone->rx_urb->transfer_dma); usb_free_coherent(iphone->udev, IPHETH_BUF_SIZE, iphone->tx_buf, iphone->tx_urb->transfer_dma); @@ -317,7 +317,7 @@ static int ipheth_rx_submit(struct ipheth_device *dev, gfp_t mem_flags) usb_fill_bulk_urb(dev->rx_urb, udev, usb_rcvbulkpipe(udev, dev->bulk_in), - dev->rx_buf, IPHETH_BUF_SIZE, + dev->rx_buf, IPHETH_BUF_SIZE + IPHETH_IP_ALIGN, ipheth_rcvbulk_callback, dev); dev->rx_urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; -- cgit From d0cfa548dbde354de986911d3913897b5448faad Mon Sep 17 00:00:00 2001 From: Lior Nahmanson Date: Sun, 30 Jan 2022 13:37:52 +0200 Subject: net: macsec: Verify that send_sci is on when setting Tx sci explicitly When setting Tx sci explicit, the Rx side is expected to use this sci and not recalculate it from the packet.However, in case of Tx sci is explicit and send_sci is off, the receiver is wrongly recalculate the sci from the source MAC address which most likely be different than the explicit sci. Fix by preventing such configuration when macsec newlink is established and return EINVAL error code on such cases. Fixes: c09440f7dcb3 ("macsec: introduce IEEE 802.1AE driver") Signed-off-by: Lior Nahmanson Reviewed-by: Raed Salem Signed-off-by: Raed Salem Link: https://lore.kernel.org/r/1643542672-29403-1-git-send-email-raeds@nvidia.com Signed-off-by: Jakub Kicinski --- drivers/net/macsec.c | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/drivers/net/macsec.c b/drivers/net/macsec.c index 33ff33c05aab..3d0874331763 100644 --- a/drivers/net/macsec.c +++ b/drivers/net/macsec.c @@ -4018,6 +4018,15 @@ static int macsec_newlink(struct net *net, struct net_device *dev, !macsec_check_offload(macsec->offload, macsec)) return -EOPNOTSUPP; + /* send_sci must be set to true when transmit sci explicitly is set */ + if ((data && data[IFLA_MACSEC_SCI]) && + (data && data[IFLA_MACSEC_INC_SCI])) { + u8 send_sci = !!nla_get_u8(data[IFLA_MACSEC_INC_SCI]); + + if (!send_sci) + return -EINVAL; + } + if (data && data[IFLA_MACSEC_ICV_LEN]) icv_len = nla_get_u8(data[IFLA_MACSEC_ICV_LEN]); mtu = real_dev->mtu - icv_len - macsec_extra_len(true); -- cgit From 04f8c12f031fcd0ffa0c72822eb665ceb2c872e7 Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Thu, 6 Jan 2022 16:40:18 +0200 Subject: net/mlx5: Bridge, take rtnl lock in init error handler The mlx5_esw_bridge_cleanup() is expected to be called with rtnl lock taken, which is true for mlx5e_rep_bridge_cleanup() function but not for error handling code in mlx5e_rep_bridge_init(). Add missing rtnl lock/unlock calls and extend both mlx5_esw_bridge_cleanup() and its dual function mlx5_esw_bridge_init() with ASSERT_RTNL() to verify the invariant from now on. Fixes: 7cd6a54a8285 ("net/mlx5: Bridge, handle FDB events") Fixes: 19e9bfa044f3 ("net/mlx5: Bridge, add offload infrastructure") Signed-off-by: Vlad Buslov Reviewed-by: Roi Dayan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c | 2 ++ drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c index c6d2f8c78db7..d5cb27667005 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c @@ -509,7 +509,9 @@ err_register_swdev_blk: err_register_swdev: destroy_workqueue(br_offloads->wq); err_alloc_wq: + rtnl_lock(); mlx5_esw_bridge_cleanup(esw); + rtnl_unlock(); } void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c index f690f430f40f..05e08cec5a8c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c @@ -1574,6 +1574,8 @@ struct mlx5_esw_bridge_offloads *mlx5_esw_bridge_init(struct mlx5_eswitch *esw) { struct mlx5_esw_bridge_offloads *br_offloads; + ASSERT_RTNL(); + br_offloads = kvzalloc(sizeof(*br_offloads), GFP_KERNEL); if (!br_offloads) return ERR_PTR(-ENOMEM); @@ -1590,6 +1592,8 @@ void mlx5_esw_bridge_cleanup(struct mlx5_eswitch *esw) { struct mlx5_esw_bridge_offloads *br_offloads = esw->br_offloads; + ASSERT_RTNL(); + if (!br_offloads) return; -- cgit From 350d9a823734b5a7e767cddc3bdde5f0bcbb7ff4 Mon Sep 17 00:00:00 2001 From: Vlad Buslov Date: Thu, 6 Jan 2022 18:45:26 +0200 Subject: net/mlx5: Bridge, ensure dev_name is null-terminated Even though net_device->name is guaranteed to be null-terminated string of size<=IFNAMSIZ, the test robot complains that return value of netdev_name() can be larger: In file included from include/trace/define_trace.h:102, from drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h:113, from drivers/net/ethernet/mellanox/mlx5/core/esw/bridge.c:12: drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h: In function 'trace_event_raw_event_mlx5_esw_bridge_fdb_template': >> drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h:24:29: warning: 'strncpy' output may be truncated copying 16 bytes from a string of length 20 [-Wstringop-truncation] 24 | strncpy(__entry->dev_name, | ^~~~~~~~~~~~~~~~~~~~~~~~~~ 25 | netdev_name(fdb->dev), | ~~~~~~~~~~~~~~~~~~~~~~ 26 | IFNAMSIZ); | ~~~~~~~~~ This is caused by the fact that default value of IFNAMSIZ is 16, while placeholder value that is returned by netdev_name() for unnamed net devices is larger than that. The offending code is in a tracing function that is only called for mlx5 representors, so there is no straightforward way to reproduce the issue but let's fix it for correctness sake by replacing strncpy() with strscpy() to ensure that resulting string is always null-terminated. Fixes: 9724fd5d9c2a ("net/mlx5: Bridge, add tracepoints") Reported-by: kernel test robot Signed-off-by: Vlad Buslov Reviewed-by: Roi Dayan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h b/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h index 3401188e0a60..51ac24e6ec3c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/esw/diag/bridge_tracepoint.h @@ -21,7 +21,7 @@ DECLARE_EVENT_CLASS(mlx5_esw_bridge_fdb_template, __field(unsigned int, used) ), TP_fast_assign( - strncpy(__entry->dev_name, + strscpy(__entry->dev_name, netdev_name(fdb->dev), IFNAMSIZ); memcpy(__entry->addr, fdb->key.addr, ETH_ALEN); -- cgit From a2446bc77a16cefd27de712d28af2396d6287593 Mon Sep 17 00:00:00 2001 From: Roi Dayan Date: Tue, 4 Jan 2022 10:38:02 +0200 Subject: net/mlx5e: TC, Reject rules with drop and modify hdr action This kind of action is not supported by firmware and generates a syndrome. kernel: mlx5_core 0000:08:00.0: mlx5_cmd_check:777:(pid 102063): SET_FLOW_TABLE_ENTRY(0x936) op_mod(0x0) failed, status bad parameter(0x3), syndrome (0x8708c3) Fixes: d7e75a325cb2 ("net/mlx5e: Add offloading of E-Switch TC pedit (header re-write) actions") Signed-off-by: Roi Dayan Reviewed-by: Oz Shlomo Reviewed-by: Maor Dickman Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 3d908a7e1406..671f76c350db 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -3191,6 +3191,12 @@ actions_match_supported(struct mlx5e_priv *priv, return false; } + if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR && + actions & MLX5_FLOW_CONTEXT_ACTION_DROP) { + NL_SET_ERR_MSG_MOD(extack, "Drop with modify header action is not supported"); + return false; + } + if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR && !modify_header_match_supported(priv, &parse_attr->spec, flow_action, actions, ct_flow, ct_clear, extack)) -- cgit From 4a08a131351e375a2969b98e46df260ed04dcba7 Mon Sep 17 00:00:00 2001 From: Gal Pressman Date: Sun, 16 Jan 2022 09:07:22 +0200 Subject: net/mlx5e: Fix module EEPROM query When querying the module EEPROM, there was a misusage of the 'offset' variable vs the 'query.offset' field. Fix that by always using 'offset' and assigning its value to 'query.offset' right before the mcia register read call. While at it, the cross-pages read size adjustment was changed to be more intuitive. Fixes: e19b0a3474ab ("net/mlx5: Refactor module EEPROM query") Reported-by: Wang Yugui Signed-off-by: Gal Pressman Reviewed-by: Maxim Mikityanskiy Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/port.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/port.c b/drivers/net/ethernet/mellanox/mlx5/core/port.c index 1ef2b6a848c1..7b16a1188aab 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/port.c @@ -406,23 +406,24 @@ int mlx5_query_module_eeprom(struct mlx5_core_dev *dev, switch (module_id) { case MLX5_MODULE_ID_SFP: - mlx5_sfp_eeprom_params_set(&query.i2c_address, &query.page, &query.offset); + mlx5_sfp_eeprom_params_set(&query.i2c_address, &query.page, &offset); break; case MLX5_MODULE_ID_QSFP: case MLX5_MODULE_ID_QSFP_PLUS: case MLX5_MODULE_ID_QSFP28: - mlx5_qsfp_eeprom_params_set(&query.i2c_address, &query.page, &query.offset); + mlx5_qsfp_eeprom_params_set(&query.i2c_address, &query.page, &offset); break; default: mlx5_core_err(dev, "Module ID not recognized: 0x%x\n", module_id); return -EINVAL; } - if (query.offset + size > MLX5_EEPROM_PAGE_LENGTH) + if (offset + size > MLX5_EEPROM_PAGE_LENGTH) /* Cross pages read, read until offset 256 in low page */ - size -= offset + size - MLX5_EEPROM_PAGE_LENGTH; + size = MLX5_EEPROM_PAGE_LENGTH - offset; query.size = size; + query.offset = offset; return mlx5_query_mcia(dev, &query, data); } -- cgit From 3c5193a87b0fea090aa3f769d020337662d87b5e Mon Sep 17 00:00:00 2001 From: Maher Sanalla Date: Thu, 13 Jan 2022 15:48:48 +0200 Subject: net/mlx5: Use del_timer_sync in fw reset flow of halting poll Substitute del_timer() with del_timer_sync() in fw reset polling deactivation flow, in order to prevent a race condition which occurs when del_timer() is called and timer is deactivated while another process is handling the timer interrupt. A situation that led to the following call trace: RIP: 0010:run_timer_softirq+0x137/0x420 recalibrate_cpu_khz+0x10/0x10 ktime_get+0x3e/0xa0 ? sched_clock_cpu+0xb/0xc0 __do_softirq+0xf5/0x2ea irq_exit_rcu+0xc1/0xf0 sysvec_apic_timer_interrupt+0x9e/0xc0 asm_sysvec_apic_timer_interrupt+0x12/0x20 Fixes: 38b9f903f22b ("net/mlx5: Handle sync reset request event") Signed-off-by: Maher Sanalla Reviewed-by: Moshe Shemesh Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c index 0b0234f9d694..84dbe46d5ede 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fw_reset.c @@ -132,7 +132,7 @@ static void mlx5_stop_sync_reset_poll(struct mlx5_core_dev *dev) { struct mlx5_fw_reset *fw_reset = dev->priv.fw_reset; - del_timer(&fw_reset->timer); + del_timer_sync(&fw_reset->timer); } static void mlx5_sync_reset_clear_reset_requested(struct mlx5_core_dev *dev, bool poll_health) -- cgit From 5623ef8a118838aae65363750dfafcba734dc8cb Mon Sep 17 00:00:00 2001 From: Roi Dayan Date: Mon, 17 Jan 2022 15:00:30 +0200 Subject: net/mlx5e: TC, Reject rules with forward and drop actions Such rules are redundant but allowed and passed to the driver. The driver does not support offloading such rules so return an error. Fixes: 03a9d11e6eeb ("net/mlx5e: Add TC drop and mirred/redirect action parsing for SRIOV offloads") Signed-off-by: Roi Dayan Reviewed-by: Oz Shlomo Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 671f76c350db..4c6e3c26c1ab 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -3191,6 +3191,12 @@ actions_match_supported(struct mlx5e_priv *priv, return false; } + if (!(~actions & + (MLX5_FLOW_CONTEXT_ACTION_FWD_DEST | MLX5_FLOW_CONTEXT_ACTION_DROP))) { + NL_SET_ERR_MSG_MOD(extack, "Rule cannot support forward+drop action"); + return false; + } + if (actions & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR && actions & MLX5_FLOW_CONTEXT_ACTION_DROP) { NL_SET_ERR_MSG_MOD(extack, "Drop with modify header action is not supported"); -- cgit From 55b2ca702cfa744a9eb108915996a2294da47e71 Mon Sep 17 00:00:00 2001 From: Dima Chumak Date: Mon, 17 Jan 2022 15:32:16 +0200 Subject: net/mlx5: Fix offloading with ESWITCH_IPV4_TTL_MODIFY_ENABLE Only prio 1 is supported for nic mode when there is no ignore flow level support in firmware. But for switchdev mode, which supports fixed number of statically pre-allocated prios, this restriction is not relevant so it can be relaxed. Fixes: d671e109bd85 ("net/mlx5: Fix tc max supported prio for nic mode") Signed-off-by: Dima Chumak Reviewed-by: Roi Dayan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c index d5e47630e284..e94233a12a32 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c @@ -121,12 +121,13 @@ u32 mlx5_chains_get_nf_ft_chain(struct mlx5_fs_chains *chains) u32 mlx5_chains_get_prio_range(struct mlx5_fs_chains *chains) { - if (!mlx5_chains_prios_supported(chains)) - return 1; - if (mlx5_chains_ignore_flow_level_supported(chains)) return UINT_MAX; + if (!chains->dev->priv.eswitch || + chains->dev->priv.eswitch->mode != MLX5_ESWITCH_OFFLOADS) + return 1; + /* We should get here only for eswitch case */ return FDB_TC_MAX_PRIO; } -- cgit From 880b517691908fb753019b9b27cd082e7617debd Mon Sep 17 00:00:00 2001 From: Roi Dayan Date: Mon, 24 Jan 2022 13:56:26 +0200 Subject: net/mlx5: Bridge, Fix devlink deadlock on net namespace deletion When changing mode to switchdev, rep bridge init registered to netdevice notifier holds the devlink lock and then takes pernet_ops_rwsem. At that time deleting a netns holds pernet_ops_rwsem and then takes the devlink lock. Example sequence is: $ ip netns add foo $ devlink dev eswitch set pci/0000:00:08.0 mode switchdev & $ ip netns del foo deleting netns trace: [ 1185.365555] ? devlink_pernet_pre_exit+0x74/0x1c0 [ 1185.368331] ? mutex_lock_io_nested+0x13f0/0x13f0 [ 1185.370984] ? xt_find_table+0x40/0x100 [ 1185.373244] ? __mutex_lock+0x24a/0x15a0 [ 1185.375494] ? net_generic+0xa0/0x1c0 [ 1185.376844] ? wait_for_completion_io+0x280/0x280 [ 1185.377767] ? devlink_pernet_pre_exit+0x74/0x1c0 [ 1185.378686] devlink_pernet_pre_exit+0x74/0x1c0 [ 1185.379579] ? devlink_nl_cmd_get_dumpit+0x3a0/0x3a0 [ 1185.380557] ? xt_find_table+0xda/0x100 [ 1185.381367] cleanup_net+0x372/0x8e0 changing mode to switchdev trace: [ 1185.411267] down_write+0x13a/0x150 [ 1185.412029] ? down_write_killable+0x180/0x180 [ 1185.413005] register_netdevice_notifier+0x1e/0x210 [ 1185.414000] mlx5e_rep_bridge_init+0x181/0x360 [mlx5_core] [ 1185.415243] mlx5e_uplink_rep_enable+0x269/0x480 [mlx5_core] [ 1185.416464] ? mlx5e_uplink_rep_disable+0x210/0x210 [mlx5_core] [ 1185.417749] mlx5e_attach_netdev+0x232/0x400 [mlx5_core] [ 1185.418906] mlx5e_netdev_attach_profile+0x15b/0x1e0 [mlx5_core] [ 1185.420172] mlx5e_netdev_change_profile+0x15a/0x1d0 [mlx5_core] [ 1185.421459] mlx5e_vport_rep_load+0x557/0x780 [mlx5_core] [ 1185.422624] ? mlx5e_stats_grp_vport_rep_num_stats+0x10/0x10 [mlx5_core] [ 1185.424006] mlx5_esw_offloads_rep_load+0xdb/0x190 [mlx5_core] [ 1185.425277] esw_offloads_enable+0xd74/0x14a0 [mlx5_core] Fix this by registering rep bridges for per net netdev notifier instead of global one, which operats on the net namespace without holding the pernet_ops_rwsem. Fixes: 19e9bfa044f3 ("net/mlx5: Bridge, add offload infrastructure") Signed-off-by: Roi Dayan Reviewed-by: Vlad Buslov Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c index d5cb27667005..48dc121b2cb4 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bridge.c @@ -491,7 +491,7 @@ void mlx5e_rep_bridge_init(struct mlx5e_priv *priv) } br_offloads->netdev_nb.notifier_call = mlx5_esw_bridge_switchdev_port_event; - err = register_netdevice_notifier(&br_offloads->netdev_nb); + err = register_netdevice_notifier_net(&init_net, &br_offloads->netdev_nb); if (err) { esw_warn(mdev, "Failed to register bridge offloads netdevice notifier (err=%d)\n", err); @@ -526,7 +526,7 @@ void mlx5e_rep_bridge_cleanup(struct mlx5e_priv *priv) return; cancel_delayed_work_sync(&br_offloads->update_work); - unregister_netdevice_notifier(&br_offloads->netdev_nb); + unregister_netdevice_notifier_net(&init_net, &br_offloads->netdev_nb); unregister_switchdev_blocking_notifier(&br_offloads->nb_blk); unregister_switchdev_notifier(&br_offloads->nb); destroy_workqueue(br_offloads->wq); -- cgit From b8d91145ed7cfa046cc07bcfb277465b9d45da73 Mon Sep 17 00:00:00 2001 From: Khalid Manaa Date: Wed, 26 Jan 2022 14:14:58 +0200 Subject: net/mlx5e: Fix wrong calculation of header index in HW_GRO The HW doesn't wrap the CQE.shampo.header_index field according to the headers buffer size, instead it always increases it until reaching overflow of u16 size. Thus the mlx5e_handle_rx_cqe_mpwrq_shampo handler should mask the CQE header_index field to find the actual header index in the headers buffer. Fixes: f97d5c2a453e ("net/mlx5e: Add handle SHAMPO cqe support") Signed-off-by: Khalid Manaa Reviewed-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h | 5 +++++ drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 4 ++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index 4cdf8e5b24c2..b789af07829c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -167,6 +167,11 @@ static inline u16 mlx5e_txqsq_get_next_pi(struct mlx5e_txqsq *sq, u16 size) return pi; } +static inline u16 mlx5e_shampo_get_cqe_header_index(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) +{ + return be16_to_cpu(cqe->shampo.header_entry_index) & (rq->mpwqe.shampo->hd_per_wq - 1); +} + struct mlx5e_shampo_umr { u16 len; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index e86ccc22fb82..3a79ecd38003 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -1117,7 +1117,7 @@ static void mlx5e_shampo_update_ipv6_udp_hdr(struct mlx5e_rq *rq, struct ipv6hdr static void mlx5e_shampo_update_fin_psh_flags(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, struct tcphdr *skb_tcp_hd) { - u16 header_index = be16_to_cpu(cqe->shampo.header_entry_index); + u16 header_index = mlx5e_shampo_get_cqe_header_index(rq, cqe); struct tcphdr *last_tcp_hd; void *last_hd_addr; @@ -1973,7 +1973,7 @@ mlx5e_free_rx_shampo_hd_entry(struct mlx5e_rq *rq, u16 header_index) static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe) { u16 data_bcnt = mpwrq_get_cqe_byte_cnt(cqe) - cqe->shampo.header_size; - u16 header_index = be16_to_cpu(cqe->shampo.header_entry_index); + u16 header_index = mlx5e_shampo_get_cqe_header_index(rq, cqe); u32 wqe_offset = be32_to_cpu(cqe->shampo.data_offset); u16 cstrides = mpwrq_get_cqe_consumed_strides(cqe); u32 data_offset = wqe_offset & (PAGE_SIZE - 1); -- cgit From 7957837b816f11eecb9146235bb0715478f4c81f Mon Sep 17 00:00:00 2001 From: Khalid Manaa Date: Wed, 26 Jan 2022 14:25:55 +0200 Subject: net/mlx5e: Fix broken SKB allocation in HW-GRO In case the HW doesn't perform header-data split, it will write the whole packet into the data buffer in the WQ, in this case the SHAMPO CQE handler couldn't use the header entry to build the SKB, instead it should allocate a new memory to build the SKB using the function: mlx5e_skb_from_cqe_mpwrq_nonlinear. Fixes: f97d5c2a453e ("net/mlx5e: Add handle SHAMPO cqe support") Signed-off-by: Khalid Manaa Reviewed-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_rx.c | 26 ++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 3a79ecd38003..ee0a8f5206e3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -1871,7 +1871,7 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, return skb; } -static void +static struct sk_buff * mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, struct mlx5_cqe64 *cqe, u16 header_index) { @@ -1895,7 +1895,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, skb = mlx5e_build_linear_skb(rq, hdr, frag_size, rx_headroom, head_size); if (unlikely(!skb)) - return; + return NULL; /* queue up for recycling/reuse */ page_ref_inc(head->page); @@ -1907,7 +1907,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, ALIGN(head_size, sizeof(long))); if (unlikely(!skb)) { rq->stats->buff_alloc_err++; - return; + return NULL; } prefetchw(skb->data); @@ -1918,9 +1918,7 @@ mlx5e_skb_from_cqe_shampo(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, skb->tail += head_size; skb->len += head_size; } - rq->hw_gro_data->skb = skb; - NAPI_GRO_CB(skb)->count = 1; - skb_shinfo(skb)->gso_size = mpwrq_get_cqe_byte_cnt(cqe) - head_size; + return skb; } static void @@ -1980,6 +1978,7 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq u32 cqe_bcnt = mpwrq_get_cqe_byte_cnt(cqe); u16 wqe_id = be16_to_cpu(cqe->wqe_id); u32 page_idx = wqe_offset >> PAGE_SHIFT; + u16 head_size = cqe->shampo.header_size; struct sk_buff **skb = &rq->hw_gro_data->skb; bool flush = cqe->shampo.flush; bool match = cqe->shampo.match; @@ -2011,9 +2010,16 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq } if (!*skb) { - mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index); + if (likely(head_size)) + *skb = mlx5e_skb_from_cqe_shampo(rq, wi, cqe, header_index); + else + *skb = mlx5e_skb_from_cqe_mpwrq_nonlinear(rq, wi, cqe_bcnt, data_offset, + page_idx); if (unlikely(!*skb)) goto free_hd_entry; + + NAPI_GRO_CB(*skb)->count = 1; + skb_shinfo(*skb)->gso_size = cqe_bcnt - head_size; } else { NAPI_GRO_CB(*skb)->count++; if (NAPI_GRO_CB(*skb)->count == 2 && @@ -2027,8 +2033,10 @@ static void mlx5e_handle_rx_cqe_mpwrq_shampo(struct mlx5e_rq *rq, struct mlx5_cq } } - di = &wi->umr.dma_info[page_idx]; - mlx5e_fill_skb_data(*skb, rq, di, data_bcnt, data_offset); + if (likely(head_size)) { + di = &wi->umr.dma_info[page_idx]; + mlx5e_fill_skb_data(*skb, rq, di, data_bcnt, data_offset); + } mlx5e_shampo_complete_rx_cqe(rq, cqe, cqe_bcnt, *skb); if (flush) -- cgit From ec41332e02bd0acf1f24206867bb6a02f5877a62 Mon Sep 17 00:00:00 2001 From: Maor Dickman Date: Thu, 13 Jan 2022 15:11:42 +0200 Subject: net/mlx5e: Fix handling of wrong devices during bond netevent Current implementation of bond netevent handler only check if the handled netdev is VF representor and it missing a check if the VF representor is on the same phys device of the bond handling the netevent. Fix by adding the missing check and optimizing the check if the netdev is VF representor so it will not access uninitialized private data and crashes. BUG: kernel NULL pointer dereference, address: 000000000000036c PGD 0 P4D 0 Oops: 0000 [#1] SMP NOPTI Workqueue: eth3bond0 bond_mii_monitor [bonding] RIP: 0010:mlx5e_is_uplink_rep+0xc/0x50 [mlx5_core] RSP: 0018:ffff88812d69fd60 EFLAGS: 00010282 RAX: 0000000000000000 RBX: ffff8881cf800000 RCX: 0000000000000000 RDX: ffff88812d69fe10 RSI: 000000000000001b RDI: ffff8881cf800880 RBP: ffff8881cf800000 R08: 00000445cabccf2b R09: 0000000000000008 R10: 0000000000000004 R11: 0000000000000008 R12: ffff88812d69fe10 R13: 00000000fffffffe R14: ffff88820c0f9000 R15: 0000000000000000 FS: 0000000000000000(0000) GS:ffff88846fb00000(0000) knlGS:0000000000000000 CS: 0010 DS: 0000 ES: 0000 CR0: 0000000080050033 CR2: 000000000000036c CR3: 0000000103d80006 CR4: 0000000000370ea0 DR0: 0000000000000000 DR1: 0000000000000000 DR2: 0000000000000000 DR3: 0000000000000000 DR6: 00000000fffe0ff0 DR7: 0000000000000400 Call Trace: mlx5e_eswitch_uplink_rep+0x31/0x40 [mlx5_core] mlx5e_rep_is_lag_netdev+0x94/0xc0 [mlx5_core] mlx5e_rep_esw_bond_netevent+0xeb/0x3d0 [mlx5_core] raw_notifier_call_chain+0x41/0x60 call_netdevice_notifiers_info+0x34/0x80 netdev_lower_state_changed+0x4e/0xa0 bond_mii_monitor+0x56b/0x640 [bonding] process_one_work+0x1b9/0x390 worker_thread+0x4d/0x3d0 ? rescuer_thread+0x350/0x350 kthread+0x124/0x150 ? set_kthread_struct+0x40/0x40 ret_from_fork+0x1f/0x30 Fixes: 7e51891a237f ("net/mlx5e: Use netdev events to set/del egress acl forward-to-vport rule") Signed-off-by: Maor Dickman Reviewed-by: Roi Dayan Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/en/rep/bond.c | 32 ++++++++++------------ 1 file changed, 14 insertions(+), 18 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c index 9c076aa20306..b6f5c1bcdbcd 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/rep/bond.c @@ -183,18 +183,7 @@ void mlx5e_rep_bond_unslave(struct mlx5_eswitch *esw, static bool mlx5e_rep_is_lag_netdev(struct net_device *netdev) { - struct mlx5e_rep_priv *rpriv; - struct mlx5e_priv *priv; - - /* A given netdev is not a representor or not a slave of LAG configuration */ - if (!mlx5e_eswitch_rep(netdev) || !netif_is_lag_port(netdev)) - return false; - - priv = netdev_priv(netdev); - rpriv = priv->ppriv; - - /* Egress acl forward to vport is supported only non-uplink representor */ - return rpriv->rep->vport != MLX5_VPORT_UPLINK; + return netif_is_lag_port(netdev) && mlx5e_eswitch_vf_rep(netdev); } static void mlx5e_rep_changelowerstate_event(struct net_device *netdev, void *ptr) @@ -210,9 +199,6 @@ static void mlx5e_rep_changelowerstate_event(struct net_device *netdev, void *pt u16 fwd_vport_num; int err; - if (!mlx5e_rep_is_lag_netdev(netdev)) - return; - info = ptr; lag_info = info->lower_state_info; /* This is not an event of a representor becoming active slave */ @@ -266,9 +252,6 @@ static void mlx5e_rep_changeupper_event(struct net_device *netdev, void *ptr) struct net_device *lag_dev; struct mlx5e_priv *priv; - if (!mlx5e_rep_is_lag_netdev(netdev)) - return; - priv = netdev_priv(netdev); rpriv = priv->ppriv; lag_dev = info->upper_dev; @@ -293,6 +276,19 @@ static int mlx5e_rep_esw_bond_netevent(struct notifier_block *nb, unsigned long event, void *ptr) { struct net_device *netdev = netdev_notifier_info_to_dev(ptr); + struct mlx5e_rep_priv *rpriv; + struct mlx5e_rep_bond *bond; + struct mlx5e_priv *priv; + + if (!mlx5e_rep_is_lag_netdev(netdev)) + return NOTIFY_DONE; + + bond = container_of(nb, struct mlx5e_rep_bond, nb); + priv = netdev_priv(netdev); + rpriv = mlx5_eswitch_get_uplink_priv(priv->mdev->priv.eswitch, REP_ETH); + /* Verify VF representor is on the same device of the bond handling the netevent. */ + if (rpriv->uplink_priv.bond != bond) + return NOTIFY_DONE; switch (event) { case NETDEV_CHANGELOWERSTATE: -- cgit From d8e5883d694bb053b19c4142a2d1f43a34f6fe2c Mon Sep 17 00:00:00 2001 From: Maor Dickman Date: Sun, 30 Jan 2022 16:00:41 +0200 Subject: net/mlx5: E-Switch, Fix uninitialized variable modact The variable modact is not initialized before used in command modify header allocation which can cause command to fail. Fix by initializing modact with zeros. Addresses-Coverity: ("Uninitialized scalar variable") Fixes: 8f1e0b97cc70 ("net/mlx5: E-Switch, Mark miss packets with new chain id mapping") Signed-off-by: Maor Dickman Reviewed-by: Roi Dayan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c index e94233a12a32..df58cba37930 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/lib/fs_chains.c @@ -212,7 +212,7 @@ static int create_chain_restore(struct fs_chain *chain) { struct mlx5_eswitch *esw = chain->chains->dev->priv.eswitch; - char modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)]; + u8 modact[MLX5_UN_SZ_BYTES(set_add_copy_action_in_auto)] = {}; struct mlx5_fs_chains *chains = chain->chains; enum mlx5e_tc_attr_to_reg chain_to_reg; struct mlx5_modify_hdr *mod_hdr; -- cgit From 736dfe4e68b868829a1e89dfef4a44c1580d4478 Mon Sep 17 00:00:00 2001 From: Maxim Mikityanskiy Date: Tue, 18 Jan 2022 13:31:54 +0200 Subject: net/mlx5e: Don't treat small ceil values as unlimited in HTB offload The hardware spec defines max_average_bw == 0 as "unlimited bandwidth". max_average_bw is calculated as `ceil / BYTES_IN_MBIT`, which can become 0 when ceil is small, leading to an undesired effect of having no bandwidth limit. This commit fixes it by rounding up small values of ceil to 1 Mbit/s. Fixes: 214baf22870c ("net/mlx5e: Support HTB offload") Signed-off-by: Maxim Mikityanskiy Reviewed-by: Tariq Toukan Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en/qos.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c index 00449df98a5e..c1e07496c89c 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/qos.c @@ -570,7 +570,8 @@ static int mlx5e_htb_convert_rate(struct mlx5e_priv *priv, u64 rate, static void mlx5e_htb_convert_ceil(struct mlx5e_priv *priv, u64 ceil, u32 *max_average_bw) { - *max_average_bw = div_u64(ceil, BYTES_IN_MBIT); + /* Hardware treats 0 as "unlimited", set at least 1. */ + *max_average_bw = max_t(u32, div_u64(ceil, BYTES_IN_MBIT), 1); qos_dbg(priv->mdev, "Convert: ceil %llu -> max_average_bw %u\n", ceil, *max_average_bw); -- cgit From 5352859b3bfa0ca188b2f1d2c1436fddc781e3b6 Mon Sep 17 00:00:00 2001 From: Raed Salem Date: Thu, 2 Dec 2021 17:43:50 +0200 Subject: net/mlx5e: IPsec: Fix crypto offload for non TCP/UDP encapsulated traffic IPsec crypto offload always set the ethernet segment checksum flags with the inner L4 header checksum flag enabled for encapsulated IPsec offloaded packet regardless of the encapsulated L4 header type, and even if it doesn't exists in the first place, this breaks non TCP/UDP traffic as such. Set the inner L4 checksum flag only when the encapsulated L4 header protocol is TCP/UDP using software parser swp_inner_l4_offset field as indication. Fixes: 5cfb540ef27b ("net/mlx5e: Set IPsec WAs only in IP's non checksum partial case.") Signed-off-by: Raed Salem Reviewed-by: Maor Dickman Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h index b98db50c3418..428881e0adcb 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.h @@ -131,14 +131,17 @@ static inline bool mlx5e_ipsec_txwqe_build_eseg_csum(struct mlx5e_txqsq *sq, struct sk_buff *skb, struct mlx5_wqe_eth_seg *eseg) { - struct xfrm_offload *xo = xfrm_offload(skb); + u8 inner_ipproto; if (!mlx5e_ipsec_eseg_meta(eseg)) return false; eseg->cs_flags = MLX5_ETH_WQE_L3_CSUM; - if (xo->inner_ipproto) { - eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM | MLX5_ETH_WQE_L3_INNER_CSUM; + inner_ipproto = xfrm_offload(skb)->inner_ipproto; + if (inner_ipproto) { + eseg->cs_flags |= MLX5_ETH_WQE_L3_INNER_CSUM; + if (inner_ipproto == IPPROTO_TCP || inner_ipproto == IPPROTO_UDP) + eseg->cs_flags |= MLX5_ETH_WQE_L4_INNER_CSUM; } else if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) { eseg->cs_flags |= MLX5_ETH_WQE_L4_CSUM; sq->stats->csum_partial_inner++; -- cgit From de47db0cf7f4a9c555ad204e06baa70b50a70d08 Mon Sep 17 00:00:00 2001 From: Raed Salem Date: Thu, 2 Dec 2021 17:49:01 +0200 Subject: net/mlx5e: IPsec: Fix tunnel mode crypto offload for non TCP/UDP traffic IPsec Tunnel mode crypto offload software parser (SWP) setting in data path currently always set the inner L4 offset regardless of the encapsulated L4 header type and whether it exists in the first place, this breaks non TCP/UDP traffic as such. Set the SWP inner L4 offset only when the IPsec tunnel encapsulated L4 header protocol is TCP/UDP. While at it fix inner ip protocol read for setting MLX5_ETH_WQE_SWP_INNER_L4_UDP flag to address the case where the ip header protocol is IPv6. Fixes: f1267798c980 ("net/mlx5: Fix checksum issue of VXLAN and IPsec crypto offload") Signed-off-by: Raed Salem Reviewed-by: Maor Dickman Signed-off-by: Saeed Mahameed --- .../net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c index 2db9573a3fe6..b56fea142c24 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ipsec_rxtx.c @@ -157,11 +157,20 @@ static void mlx5e_ipsec_set_swp(struct sk_buff *skb, /* Tunnel mode */ if (mode == XFRM_MODE_TUNNEL) { eseg->swp_inner_l3_offset = skb_inner_network_offset(skb) / 2; - eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2; if (xo->proto == IPPROTO_IPV6) eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L3_IPV6; - if (inner_ip_hdr(skb)->protocol == IPPROTO_UDP) + + switch (xo->inner_ipproto) { + case IPPROTO_UDP: eseg->swp_flags |= MLX5_ETH_WQE_SWP_INNER_L4_UDP; + fallthrough; + case IPPROTO_TCP: + /* IP | ESP | IP | [TCP | UDP] */ + eseg->swp_inner_l4_offset = skb_inner_transport_offset(skb) / 2; + break; + default: + break; + } return; } -- cgit From 5b209d1a22afabfb7d644abb10510c5713a3e569 Mon Sep 17 00:00:00 2001 From: Roi Dayan Date: Tue, 1 Feb 2022 15:27:48 +0200 Subject: net/mlx5e: Avoid implicit modify hdr for decap drop rule Currently the driver adds implicit modify hdr action for decap rules on tunnel devices if the port is an ovs port. This is also done if the action is drop and makes the modify hdr redundant and also the FW doesn't support it and will generate a syndrome. kernel: mlx5_core 0000:08:00.0: mlx5_cmd_check:777:(pid 102063): SET_FLOW_TABLE_ENTRY(0x936) op_mod(0x0) failed, status bad parameter(0x3), syndrome (0x8708c3) Fix it by adding the implicit modify hdr only for fwd actions. Fixes: b16eb3c81fe2 ("net/mlx5: Support internal port as decap route device") Fixes: 077cdda764c7 ("net/mlx5e: TC, Fix memory leak with rules with internal port") Signed-off-by: Roi Dayan Reviewed-by: Ariel Levkovich Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_tc.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index 4c6e3c26c1ab..2022fa4a9598 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1414,7 +1414,8 @@ mlx5e_tc_add_fdb_flow(struct mlx5e_priv *priv, if (err) goto err_out; - if (!attr->chain && esw_attr->int_port) { + if (!attr->chain && esw_attr->int_port && + attr->action & MLX5_FLOW_CONTEXT_ACTION_FWD_DEST) { /* If decap route device is internal port, change the * source vport value in reg_c0 back to uplink just in * case the rule performs goto chain > 0. If we have a miss -- cgit From 6d5c900eb64107001e91e1f46bddc254dded8a59 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 24 Jan 2022 09:22:41 -0800 Subject: net/mlx5e: Use struct_group() for memcpy() region In preparation for FORTIFY_SOURCE performing compile-time and run-time field bounds checking for memcpy(), memmove(), and memset(), avoid intentionally writing across neighboring fields. Use struct_group() in struct vlan_ethhdr around members h_dest and h_source, so they can be referenced together. This will allow memcpy() and sizeof() to more easily reason about sizes, improve readability, and avoid future warnings about writing beyond the end of h_dest. "pahole" shows no size nor member offset changes to struct vlan_ethhdr. "objdump -d" shows no object code changes. Fixes: 34802a42b352 ("net/mlx5e: Do not modify the TX SKB") Signed-off-by: Kees Cook Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en_tx.c | 2 +- include/linux/if_vlan.h | 6 ++++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c index 7fd33b356cc8..ee7ecb88adc1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c @@ -208,7 +208,7 @@ static inline void mlx5e_insert_vlan(void *start, struct sk_buff *skb, u16 ihs) int cpy1_sz = 2 * ETH_ALEN; int cpy2_sz = ihs - cpy1_sz; - memcpy(vhdr, skb->data, cpy1_sz); + memcpy(&vhdr->addrs, skb->data, cpy1_sz); vhdr->h_vlan_proto = skb->vlan_proto; vhdr->h_vlan_TCI = cpu_to_be16(skb_vlan_tag_get(skb)); memcpy(&vhdr->h_vlan_encapsulated_proto, skb->data + cpy1_sz, cpy2_sz); diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 8420fe504927..2be4dd7e90a9 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -46,8 +46,10 @@ struct vlan_hdr { * @h_vlan_encapsulated_proto: packet type ID or len */ struct vlan_ethhdr { - unsigned char h_dest[ETH_ALEN]; - unsigned char h_source[ETH_ALEN]; + struct_group(addrs, + unsigned char h_dest[ETH_ALEN]; + unsigned char h_source[ETH_ALEN]; + ); __be16 h_vlan_proto; __be16 h_vlan_TCI; __be16 h_vlan_encapsulated_proto; -- cgit From ad5185735f7dab342fdd0dd41044da4c9ccfef67 Mon Sep 17 00:00:00 2001 From: Kees Cook Date: Mon, 24 Jan 2022 09:20:28 -0800 Subject: net/mlx5e: Avoid field-overflowing memcpy() In preparation for FORTIFY_SOURCE performing compile-time and run-time field bounds checking for memcpy(), memmove(), and memset(), avoid intentionally writing across neighboring fields. Use flexible arrays instead of zero-element arrays (which look like they are always overflowing) and split the cross-field memcpy() into two halves that can be appropriately bounds-checked by the compiler. We were doing: #define ETH_HLEN 14 #define VLAN_HLEN 4 ... #define MLX5E_XDP_MIN_INLINE (ETH_HLEN + VLAN_HLEN) ... struct mlx5e_tx_wqe *wqe = mlx5_wq_cyc_get_wqe(wq, pi); ... struct mlx5_wqe_eth_seg *eseg = &wqe->eth; struct mlx5_wqe_data_seg *dseg = wqe->data; ... memcpy(eseg->inline_hdr.start, xdptxd->data, MLX5E_XDP_MIN_INLINE); target is wqe->eth.inline_hdr.start (which the compiler sees as being 2 bytes in size), but copying 18, intending to write across start (really vlan_tci, 2 bytes). The remaining 16 bytes get written into wqe->data[0], covering byte_count (4 bytes), lkey (4 bytes), and addr (8 bytes). struct mlx5e_tx_wqe { struct mlx5_wqe_ctrl_seg ctrl; /* 0 16 */ struct mlx5_wqe_eth_seg eth; /* 16 16 */ struct mlx5_wqe_data_seg data[]; /* 32 0 */ /* size: 32, cachelines: 1, members: 3 */ /* last cacheline: 32 bytes */ }; struct mlx5_wqe_eth_seg { u8 swp_outer_l4_offset; /* 0 1 */ u8 swp_outer_l3_offset; /* 1 1 */ u8 swp_inner_l4_offset; /* 2 1 */ u8 swp_inner_l3_offset; /* 3 1 */ u8 cs_flags; /* 4 1 */ u8 swp_flags; /* 5 1 */ __be16 mss; /* 6 2 */ __be32 flow_table_metadata; /* 8 4 */ union { struct { __be16 sz; /* 12 2 */ u8 start[2]; /* 14 2 */ } inline_hdr; /* 12 4 */ struct { __be16 type; /* 12 2 */ __be16 vlan_tci; /* 14 2 */ } insert; /* 12 4 */ __be32 trailer; /* 12 4 */ }; /* 12 4 */ /* size: 16, cachelines: 1, members: 9 */ /* last cacheline: 16 bytes */ }; struct mlx5_wqe_data_seg { __be32 byte_count; /* 0 4 */ __be32 lkey; /* 4 4 */ __be64 addr; /* 8 8 */ /* size: 16, cachelines: 1, members: 3 */ /* last cacheline: 16 bytes */ }; So, split the memcpy() so the compiler can reason about the buffer sizes. "pahole" shows no size nor member offset changes to struct mlx5e_tx_wqe nor struct mlx5e_umr_wqe. "objdump -d" shows no meaningful object code changes (i.e. only source line number induced differences and optimizations). Fixes: b5503b994ed5 ("net/mlx5e: XDP TX forwarding support") Signed-off-by: Kees Cook Signed-off-by: Saeed Mahameed --- drivers/net/ethernet/mellanox/mlx5/core/en.h | 6 +++--- drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c | 4 +++- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 812e6810cb3b..c14e06ca64d8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -224,7 +224,7 @@ static inline int mlx5e_get_max_num_channels(struct mlx5_core_dev *mdev) struct mlx5e_tx_wqe { struct mlx5_wqe_ctrl_seg ctrl; struct mlx5_wqe_eth_seg eth; - struct mlx5_wqe_data_seg data[0]; + struct mlx5_wqe_data_seg data[]; }; struct mlx5e_rx_wqe_ll { @@ -241,8 +241,8 @@ struct mlx5e_umr_wqe { struct mlx5_wqe_umr_ctrl_seg uctrl; struct mlx5_mkey_seg mkc; union { - struct mlx5_mtt inline_mtts[0]; - struct mlx5_klm inline_klms[0]; + DECLARE_FLEX_ARRAY(struct mlx5_mtt, inline_mtts); + DECLARE_FLEX_ARRAY(struct mlx5_klm, inline_klms); }; }; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index 338d65e2c9ce..56e10c84a706 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -341,8 +341,10 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd, /* copy the inline part if required */ if (sq->min_inline_mode != MLX5_INLINE_MODE_NONE) { - memcpy(eseg->inline_hdr.start, xdptxd->data, MLX5E_XDP_MIN_INLINE); + memcpy(eseg->inline_hdr.start, xdptxd->data, sizeof(eseg->inline_hdr.start)); eseg->inline_hdr.sz = cpu_to_be16(MLX5E_XDP_MIN_INLINE); + memcpy(dseg, xdptxd->data + sizeof(eseg->inline_hdr.start), + MLX5E_XDP_MIN_INLINE - sizeof(eseg->inline_hdr.start)); dma_len -= MLX5E_XDP_MIN_INLINE; dma_addr += MLX5E_XDP_MIN_INLINE; dseg++; -- cgit From 9a8406ba1a9a2965c27e0db1d7753471d12ee9ff Mon Sep 17 00:00:00 2001 From: Liu Ying Date: Mon, 24 Jan 2022 10:40:07 +0800 Subject: phy: dphy: Correct clk_pre parameter MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The D-PHY specification (v1.2) explicitly mentions that the T-CLK-PRE parameter's unit is Unit Interval(UI) and the minimum value is 8. Also, kernel doc of the 'clk_pre' member of struct phy_configure_opts_mipi_dphy mentions that it should be in UI. However, the dphy core driver wrongly sets 'clk_pre' to 8000, which seems to hint that it's in picoseconds. So, let's fix the dphy core driver to correctly reflect the T-CLK-PRE parameter's minimum value according to the D-PHY specification. I'm assuming that all impacted custom drivers shall program values in TxByteClkHS cycles into hardware for the T-CLK-PRE parameter. The D-PHY specification mentions that the frequency of TxByteClkHS is exactly 1/8 the High-Speed(HS) bit rate(each HS bit consumes one UI). So, relevant custom driver code is changed to program those values as DIV_ROUND_UP(cfg->clk_pre, BITS_PER_BYTE), then. Note that I've only tested the patch with RM67191 DSI panel on i.MX8mq EVK. Help is needed to test with other i.MX8mq, Meson and Rockchip platforms, as I don't have the hardwares. Fixes: 2ed869990e14 ("phy: Add MIPI D-PHY configuration options") Tested-by: Liu Ying # RM67191 DSI panel on i.MX8mq EVK Reviewed-by: Andrzej Hajda Reviewed-by: Neil Armstrong # for phy-meson-axg-mipi-dphy.c Tested-by: Neil Armstrong # for phy-meson-axg-mipi-dphy.c Tested-by: Guido Günther # Librem 5 (imx8mq) with it's rather picky panel Reviewed-by: Laurent Pinchart Signed-off-by: Liu Ying Link: https://lore.kernel.org/r/20220124024007.1465018-1-victor.liu@nxp.com Signed-off-by: Vinod Koul --- drivers/gpu/drm/bridge/nwl-dsi.c | 12 +++++------- drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c | 3 ++- drivers/phy/phy-core-mipi-dphy.c | 4 ++-- drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c | 3 ++- 4 files changed, 11 insertions(+), 11 deletions(-) diff --git a/drivers/gpu/drm/bridge/nwl-dsi.c b/drivers/gpu/drm/bridge/nwl-dsi.c index a7389a0facfb..af07eeb47ca0 100644 --- a/drivers/gpu/drm/bridge/nwl-dsi.c +++ b/drivers/gpu/drm/bridge/nwl-dsi.c @@ -7,6 +7,7 @@ */ #include +#include #include #include #include @@ -196,12 +197,9 @@ static u32 ps2bc(struct nwl_dsi *dsi, unsigned long long ps) /* * ui2bc - UI time periods to byte clock cycles */ -static u32 ui2bc(struct nwl_dsi *dsi, unsigned long long ui) +static u32 ui2bc(unsigned int ui) { - u32 bpp = mipi_dsi_pixel_format_to_bpp(dsi->format); - - return DIV64_U64_ROUND_UP(ui * dsi->lanes, - dsi->mode.clock * 1000 * bpp); + return DIV_ROUND_UP(ui, BITS_PER_BYTE); } /* @@ -232,12 +230,12 @@ static int nwl_dsi_config_host(struct nwl_dsi *dsi) } /* values in byte clock cycles */ - cycles = ui2bc(dsi, cfg->clk_pre); + cycles = ui2bc(cfg->clk_pre); DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_t_pre: 0x%x\n", cycles); nwl_dsi_write(dsi, NWL_DSI_CFG_T_PRE, cycles); cycles = ps2bc(dsi, cfg->lpx + cfg->clk_prepare + cfg->clk_zero); DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_tx_gap (pre): 0x%x\n", cycles); - cycles += ui2bc(dsi, cfg->clk_pre); + cycles += ui2bc(cfg->clk_pre); DRM_DEV_DEBUG_DRIVER(dsi->dev, "cfg_t_post: 0x%x\n", cycles); nwl_dsi_write(dsi, NWL_DSI_CFG_T_POST, cycles); cycles = ps2bc(dsi, cfg->hs_exit); diff --git a/drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c b/drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c index cd2332bf0e31..fdbd64c03e12 100644 --- a/drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c +++ b/drivers/phy/amlogic/phy-meson-axg-mipi-dphy.c @@ -9,6 +9,7 @@ #include #include +#include #include #include #include @@ -250,7 +251,7 @@ static int phy_meson_axg_mipi_dphy_power_on(struct phy *phy) (DIV_ROUND_UP(priv->config.clk_zero, temp) << 16) | (DIV_ROUND_UP(priv->config.clk_prepare, temp) << 24)); regmap_write(priv->regmap, MIPI_DSI_CLK_TIM1, - DIV_ROUND_UP(priv->config.clk_pre, temp)); + DIV_ROUND_UP(priv->config.clk_pre, BITS_PER_BYTE)); regmap_write(priv->regmap, MIPI_DSI_HS_TIM, DIV_ROUND_UP(priv->config.hs_exit, temp) | diff --git a/drivers/phy/phy-core-mipi-dphy.c b/drivers/phy/phy-core-mipi-dphy.c index 288c9c67aa74..ccb4045685cd 100644 --- a/drivers/phy/phy-core-mipi-dphy.c +++ b/drivers/phy/phy-core-mipi-dphy.c @@ -36,7 +36,7 @@ int phy_mipi_dphy_get_default_config(unsigned long pixel_clock, cfg->clk_miss = 0; cfg->clk_post = 60000 + 52 * ui; - cfg->clk_pre = 8000; + cfg->clk_pre = 8; cfg->clk_prepare = 38000; cfg->clk_settle = 95000; cfg->clk_term_en = 0; @@ -97,7 +97,7 @@ int phy_mipi_dphy_config_validate(struct phy_configure_opts_mipi_dphy *cfg) if (cfg->clk_post < (60000 + 52 * ui)) return -EINVAL; - if (cfg->clk_pre < 8000) + if (cfg->clk_pre < 8) return -EINVAL; if (cfg->clk_prepare < 38000 || cfg->clk_prepare > 95000) diff --git a/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c index 347dc79a18c1..630e01b5c19b 100644 --- a/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c +++ b/drivers/phy/rockchip/phy-rockchip-inno-dsidphy.c @@ -5,6 +5,7 @@ * Author: Wyon Bi */ +#include #include #include #include @@ -364,7 +365,7 @@ static void inno_dsidphy_mipi_mode_enable(struct inno_dsidphy *inno) * The value of counter for HS Tclk-pre * Tclk-pre = Tpin_txbyteclkhs * value */ - clk_pre = DIV_ROUND_UP(cfg->clk_pre, t_txbyteclkhs); + clk_pre = DIV_ROUND_UP(cfg->clk_pre, BITS_PER_BYTE); /* * The value of counter for HS Tlpx Time -- cgit From 0fa0f99fc84e41057cbdd2efbfe91c6b2f47dd9d Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 1 Feb 2022 14:54:19 +0200 Subject: nvme: fix a possible use-after-free in controller reset during load Unlike .queue_rq, in .submit_async_event drivers may not check the ctrl readiness for AER submission. This may lead to a use-after-free condition that was observed with nvme-tcp. The race condition may happen in the following scenario: 1. driver executes its reset_ctrl_work 2. -> nvme_stop_ctrl - flushes ctrl async_event_work 3. ctrl sends AEN which is received by the host, which in turn schedules AEN handling 4. teardown admin queue (which releases the queue socket) 5. AEN processed, submits another AER, calling the driver to submit 6. driver attempts to send the cmd ==> use-after-free In order to fix that, add ctrl state check to validate the ctrl is actually able to accept the AER submission. This addresses the above race in controller resets because the driver during teardown should: 1. change ctrl state to RESETTING 2. flush async_event_work (as well as other async work elements) So after 1,2, any other AER command will find the ctrl state to be RESETTING and bail out without submitting the AER. Signed-off-by: Sagi Grimberg --- drivers/nvme/host/core.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 5e0bfda04bd7..961a5f8a44d2 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -4253,7 +4253,14 @@ static void nvme_async_event_work(struct work_struct *work) container_of(work, struct nvme_ctrl, async_event_work); nvme_aen_uevent(ctrl); - ctrl->ops->submit_async_event(ctrl); + + /* + * The transport drivers must guarantee AER submission here is safe by + * flushing ctrl async_event_work after changing the controller state + * from LIVE and before freeing the admin queue. + */ + if (ctrl->state == NVME_CTRL_LIVE) + ctrl->ops->submit_async_event(ctrl); } static bool nvme_ctrl_pp_status(struct nvme_ctrl *ctrl) -- cgit From ff9fc7ebf5c06de1ef72a69f9b1ab40af8b07f9e Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 1 Feb 2022 14:54:20 +0200 Subject: nvme-tcp: fix possible use-after-free in transport error_recovery work While nvme_tcp_submit_async_event_work is checking the ctrl and queue state before preparing the AER command and scheduling io_work, in order to fully prevent a race where this check is not reliable the error recovery work must flush async_event_work before continuing to destroy the admin queue after setting the ctrl state to RESETTING such that there is no race .submit_async_event and the error recovery handler itself changing the ctrl state. Tested-by: Chris Leech Signed-off-by: Sagi Grimberg --- drivers/nvme/host/tcp.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/nvme/host/tcp.c b/drivers/nvme/host/tcp.c index 4ceb28675fdf..01e24b5703db 100644 --- a/drivers/nvme/host/tcp.c +++ b/drivers/nvme/host/tcp.c @@ -2096,6 +2096,7 @@ static void nvme_tcp_error_recovery_work(struct work_struct *work) struct nvme_ctrl *ctrl = &tcp_ctrl->ctrl; nvme_stop_keep_alive(ctrl); + flush_work(&ctrl->async_event_work); nvme_tcp_teardown_io_queues(ctrl, false); /* unquiesce to fail fast pending requests */ nvme_start_queues(ctrl); -- cgit From b6bb1722f34bbdbabed27acdceaf585d300c5fd2 Mon Sep 17 00:00:00 2001 From: Sagi Grimberg Date: Tue, 1 Feb 2022 14:54:21 +0200 Subject: nvme-rdma: fix possible use-after-free in transport error_recovery work While nvme_rdma_submit_async_event_work is checking the ctrl and queue state before preparing the AER command and scheduling io_work, in order to fully prevent a race where this check is not reliable the error recovery work must flush async_event_work before continuing to destroy the admin queue after setting the ctrl state to RESETTING such that there is no race .submit_async_event and the error recovery handler itself changing the ctrl state. Signed-off-by: Sagi Grimberg --- drivers/nvme/host/rdma.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c index 850f84d204d0..9c55e4be8a39 100644 --- a/drivers/nvme/host/rdma.c +++ b/drivers/nvme/host/rdma.c @@ -1200,6 +1200,7 @@ static void nvme_rdma_error_recovery_work(struct work_struct *work) struct nvme_rdma_ctrl, err_work); nvme_stop_keep_alive(&ctrl->ctrl); + flush_work(&ctrl->ctrl.async_event_work); nvme_rdma_teardown_io_queues(ctrl, false); nvme_start_queues(&ctrl->ctrl); nvme_rdma_teardown_admin_queue(ctrl, false); -- cgit From cd9f7f7ac5932129fe81b4c7559cfcb226ec7c5c Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Tue, 1 Feb 2022 12:53:05 +0100 Subject: drm/fb-helper: Mark screen buffers in system memory with FBINFO_VIRTFB Mark screen buffers in system memory with FBINFO_VIRTFB. Otherwise, fbdev deferred I/O marks mmap'ed areas of system memory with VM_IO. (There's an inverse relationship between the two flags.) For shadow buffers, also set the FBINFO_READS_FAST hint. v3: * change FB_ to FBINFO_ in commit description v2: * updated commit description (Daniel) * added Fixes tag Signed-off-by: Thomas Zimmermann Fixes: d536540f304c ("drm/fb-helper: Add generic fbdev emulation .fb_probe function") Reviewed-by: Daniel Vetter Cc: dri-devel@lists.freedesktop.org Cc: # v4.19+ Link: https://patchwork.freedesktop.org/patch/msgid/20220201115305.9333-1-tzimmermann@suse.de --- drivers/gpu/drm/drm_fb_helper.c | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c index 9727a59d35fd..805c5a666490 100644 --- a/drivers/gpu/drm/drm_fb_helper.c +++ b/drivers/gpu/drm/drm_fb_helper.c @@ -2340,6 +2340,7 @@ static int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper, fbi->fbops = &drm_fbdev_fb_ops; fbi->screen_size = sizes->surface_height * fb->pitches[0]; fbi->fix.smem_len = fbi->screen_size; + fbi->flags = FBINFO_DEFAULT; drm_fb_helper_fill_info(fbi, fb_helper, sizes); @@ -2347,19 +2348,21 @@ static int drm_fb_helper_generic_probe(struct drm_fb_helper *fb_helper, fbi->screen_buffer = vzalloc(fbi->screen_size); if (!fbi->screen_buffer) return -ENOMEM; + fbi->flags |= FBINFO_VIRTFB | FBINFO_READS_FAST; fbi->fbdefio = &drm_fbdev_defio; - fb_deferred_io_init(fbi); } else { /* buffer is mapped for HW framebuffer */ ret = drm_client_buffer_vmap(fb_helper->buffer, &map); if (ret) return ret; - if (map.is_iomem) + if (map.is_iomem) { fbi->screen_base = map.vaddr_iomem; - else + } else { fbi->screen_buffer = map.vaddr; + fbi->flags |= FBINFO_VIRTFB; + } /* * Shamelessly leak the physical address to user-space. As -- cgit From 2c212e1baedcd782b2535a3f86bc491977677c0e Mon Sep 17 00:00:00 2001 From: Janis Schoetterl-Glausch Date: Fri, 28 Jan 2022 15:06:43 +0100 Subject: KVM: s390: Return error on SIDA memop on normal guest Refuse SIDA memops on guests which are not protected. For normal guests, the secure instruction data address designation, which determines the location we access, is not under control of KVM. Fixes: 19e122776886 (KVM: S390: protvirt: Introduce instruction data area bounce buffer) Signed-off-by: Janis Schoetterl-Glausch Cc: stable@vger.kernel.org Signed-off-by: Christian Borntraeger --- arch/s390/kvm/kvm-s390.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index 577f1ead6a51..2296b1ff1e02 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -4667,6 +4667,8 @@ static long kvm_s390_guest_sida_op(struct kvm_vcpu *vcpu, return -EINVAL; if (mop->size + mop->sida_offset > sida_size(vcpu->arch.sie_block)) return -E2BIG; + if (!kvm_s390_pv_cpu_is_protected(vcpu)) + return -EINVAL; switch (mop->op) { case KVM_S390_MEMOP_SIDA_READ: -- cgit From 70c0b80d0bbb97c072c4a9c3e8b0f68a9e22d7d2 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Tue, 25 Jan 2022 10:32:51 +0100 Subject: drm/edid: Clear EDID Deep Color Modes in drm_reset_display_info() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Even though we have the other drm_display_info fields reset, the DC modes are missing. This shouldn't be an issue since it's explicitly reset every time a new EDID is parsed. Suggested-by: Ville Syrjälä Signed-off-by: Maxime Ripard Reviewed-by: Jani Nikula Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220125093251.594772-1-maxime@cerno.tech --- drivers/gpu/drm/drm_edid.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c index a504542238ed..a7663f9a11d2 100644 --- a/drivers/gpu/drm/drm_edid.c +++ b/drivers/gpu/drm/drm_edid.c @@ -5340,6 +5340,9 @@ drm_reset_display_info(struct drm_connector *connector) info->rgb_quant_range_selectable = false; memset(&info->hdmi, 0, sizeof(info->hdmi)); + info->edid_hdmi_rgb444_dc_modes = 0; + info->edid_hdmi_ycbcr444_dc_modes = 0; + info->non_desktop = 0; memset(&info->monitor_range, 0, sizeof(info->monitor_range)); -- cgit From 34554946143df8aaeaa4ce87a1bf3ba04a8ec20b Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Thu, 27 Jan 2022 12:14:04 +0100 Subject: drm/vc4: hdmi: Simplify the connector state retrieval When we have the entire DRM state, retrieving the connector state only requires the drm_connector pointer. Fortunately for us, we have allocated it as a part of the vc4_hdmi structure, so we can retrieve get a pointer by simply accessing our field in that structure. Signed-off-by: Maxime Ripard Acked-by: Thomas Zimmermann Link: https://patchwork.freedesktop.org/patch/msgid/20220127111404.221882-1-maxime@cerno.tech --- drivers/gpu/drm/vc4/vc4_hdmi.c | 21 +++------------------ 1 file changed, 3 insertions(+), 18 deletions(-) diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index e3121eb5f605..aef0e78f86bb 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -1018,30 +1018,15 @@ static void vc4_hdmi_recenter_fifo(struct vc4_hdmi *vc4_hdmi) "VC4_HDMI_FIFO_CTL_RECENTER_DONE"); } -static struct drm_connector_state * -vc4_hdmi_encoder_get_connector_state(struct drm_encoder *encoder, - struct drm_atomic_state *state) -{ - struct drm_connector_state *conn_state; - struct drm_connector *connector; - unsigned int i; - - for_each_new_connector_in_state(state, connector, conn_state, i) { - if (conn_state->best_encoder == encoder) - return conn_state; - } - - return NULL; -} - static void vc4_hdmi_encoder_pre_crtc_configure(struct drm_encoder *encoder, struct drm_atomic_state *state) { + struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); + struct drm_connector *connector = &vc4_hdmi->connector; struct drm_connector_state *conn_state = - vc4_hdmi_encoder_get_connector_state(encoder, state); + drm_atomic_get_new_connector_state(state, connector); struct vc4_hdmi_connector_state *vc4_conn_state = conn_state_to_vc4_hdmi_conn_state(conn_state); - struct vc4_hdmi *vc4_hdmi = encoder_to_vc4_hdmi(encoder); struct drm_display_mode *mode = &vc4_hdmi->saved_adjusted_mode; unsigned long pixel_rate = vc4_conn_state->pixel_rate; unsigned long bvb_rate, hsm_rate; -- cgit From ea8a12e350e86aaa5fe7815db864b35fae2356f1 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Thu, 27 Jan 2022 15:30:44 +0100 Subject: dt-bindings: display: Turn lvds.yaml into a generic schema The lvds.yaml file so far was both defining the generic LVDS properties (such as data-mapping) that could be used for any LVDS sink, but also the panel-lvds binding. That last binding was to describe LVDS panels simple enough, and had a number of other bindings using it as a base to specialise it further. However, this situation makes it fairly hard to extend and reuse both the generic parts, and the panel-lvds itself. Let's remove the panel-lvds parts and leave only the generic LVDS properties. Reviewed-by: Laurent Pinchart Reviewed-by: Rob Herring Signed-off-by: Maxime Ripard Link: https://lore.kernel.org/r/20220127143045.310199-1-maxime@cerno.tech --- .../bindings/display/bridge/lvds-codec.yaml | 2 +- .../devicetree/bindings/display/lvds.yaml | 90 ++++++++++++++++ .../display/panel/advantech,idk-1110wr.yaml | 19 +++- .../display/panel/innolux,ee101ia-01d.yaml | 23 +++- .../devicetree/bindings/display/panel/lvds.yaml | 117 --------------------- .../display/panel/mitsubishi,aa104xd12.yaml | 19 +++- .../display/panel/mitsubishi,aa121td01.yaml | 19 +++- .../bindings/display/panel/sgd,gktw70sdae4se.yaml | 19 +++- MAINTAINERS | 2 +- 9 files changed, 181 insertions(+), 129 deletions(-) create mode 100644 Documentation/devicetree/bindings/display/lvds.yaml delete mode 100644 Documentation/devicetree/bindings/display/panel/lvds.yaml diff --git a/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml b/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml index 080c59f5118b..e9617cece7cc 100644 --- a/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml +++ b/Documentation/devicetree/bindings/display/bridge/lvds-codec.yaml @@ -68,7 +68,7 @@ properties: - vesa-24 description: | The color signals mapping order. See details in - Documentation/devicetree/bindings/display/panel/lvds.yaml + Documentation/devicetree/bindings/display/lvds.yaml port@1: $ref: /schemas/graph.yaml#/properties/port diff --git a/Documentation/devicetree/bindings/display/lvds.yaml b/Documentation/devicetree/bindings/display/lvds.yaml new file mode 100644 index 000000000000..7cd2ce7e9c33 --- /dev/null +++ b/Documentation/devicetree/bindings/display/lvds.yaml @@ -0,0 +1,90 @@ +# SPDX-License-Identifier: GPL-2.0 +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/lvds.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: LVDS Display Common Properties + +maintainers: + - Laurent Pinchart + - Thierry Reding + +description: |+ + LVDS is a physical layer specification defined in ANSI/TIA/EIA-644-A. Multiple + incompatible data link layers have been used over time to transmit image data + to LVDS devices. This bindings supports devices compatible with the following + specifications. + + [JEIDA] "Digital Interface Standards for Monitor", JEIDA-59-1999, February + 1999 (Version 1.0), Japan Electronic Industry Development Association (JEIDA) + [LDI] "Open LVDS Display Interface", May 1999 (Version 0.95), National + Semiconductor + [VESA] "VESA Notebook Panel Standard", October 2007 (Version 1.0), Video + Electronics Standards Association (VESA) + + Device compatible with those specifications have been marketed under the + FPD-Link and FlatLink brands. + +properties: + data-mapping: + enum: + - jeida-18 + - jeida-24 + - vesa-24 + description: | + The color signals mapping order. + + LVDS data mappings are defined as follows. + + - "jeida-18" - 18-bit data mapping compatible with the [JEIDA], [LDI] and + [VESA] specifications. Data are transferred as follows on 3 LVDS lanes. + + Slot 0 1 2 3 4 5 6 + ________________ _________________ + Clock \_______________________/ + ______ ______ ______ ______ ______ ______ ______ + DATA0 ><__G0__><__R5__><__R4__><__R3__><__R2__><__R1__><__R0__>< + DATA1 ><__B1__><__B0__><__G5__><__G4__><__G3__><__G2__><__G1__>< + DATA2 ><_CTL2_><_CTL1_><_CTL0_><__B5__><__B4__><__B3__><__B2__>< + + - "jeida-24" - 24-bit data mapping compatible with the [DSIM] and [LDI] + specifications. Data are transferred as follows on 4 LVDS lanes. + + Slot 0 1 2 3 4 5 6 + ________________ _________________ + Clock \_______________________/ + ______ ______ ______ ______ ______ ______ ______ + DATA0 ><__G2__><__R7__><__R6__><__R5__><__R4__><__R3__><__R2__>< + DATA1 ><__B3__><__B2__><__G7__><__G6__><__G5__><__G4__><__G3__>< + DATA2 ><_CTL2_><_CTL1_><_CTL0_><__B7__><__B6__><__B5__><__B4__>< + DATA3 ><_CTL3_><__B1__><__B0__><__G1__><__G0__><__R1__><__R0__>< + + - "vesa-24" - 24-bit data mapping compatible with the [VESA] specification. + Data are transferred as follows on 4 LVDS lanes. + + Slot 0 1 2 3 4 5 6 + ________________ _________________ + Clock \_______________________/ + ______ ______ ______ ______ ______ ______ ______ + DATA0 ><__G0__><__R5__><__R4__><__R3__><__R2__><__R1__><__R0__>< + DATA1 ><__B1__><__B0__><__G5__><__G4__><__G3__><__G2__><__G1__>< + DATA2 ><_CTL2_><_CTL1_><_CTL0_><__B5__><__B4__><__B3__><__B2__>< + DATA3 ><_CTL3_><__B7__><__B6__><__G7__><__G6__><__R7__><__R6__>< + + Control signals are mapped as follows. + + CTL0: HSync + CTL1: VSync + CTL2: Data Enable + CTL3: 0 + + data-mirror: + type: boolean + description: + If set, reverse the bit order described in the data mappings below on all + data lanes, transmitting bits for slots 6 to 0 instead of 0 to 6. + +additionalProperties: true + +... diff --git a/Documentation/devicetree/bindings/display/panel/advantech,idk-1110wr.yaml b/Documentation/devicetree/bindings/display/panel/advantech,idk-1110wr.yaml index 93878c2cd370..3a8c2c11f9bd 100644 --- a/Documentation/devicetree/bindings/display/panel/advantech,idk-1110wr.yaml +++ b/Documentation/devicetree/bindings/display/panel/advantech,idk-1110wr.yaml @@ -11,13 +11,23 @@ maintainers: - Thierry Reding allOf: - - $ref: lvds.yaml# + - $ref: panel-common.yaml# + - $ref: /schemas/display/lvds.yaml/# + +select: + properties: + compatible: + contains: + const: advantech,idk-1110wr + + required: + - compatible properties: compatible: items: - const: advantech,idk-1110wr - - {} # panel-lvds, but not listed here to avoid false select + - const: panel-lvds data-mapping: const: jeida-24 @@ -35,6 +45,11 @@ additionalProperties: false required: - compatible + - data-mapping + - width-mm + - height-mm + - panel-timing + - port examples: - |+ diff --git a/Documentation/devicetree/bindings/display/panel/innolux,ee101ia-01d.yaml b/Documentation/devicetree/bindings/display/panel/innolux,ee101ia-01d.yaml index a69681e724cb..566e11f6bfc0 100644 --- a/Documentation/devicetree/bindings/display/panel/innolux,ee101ia-01d.yaml +++ b/Documentation/devicetree/bindings/display/panel/innolux,ee101ia-01d.yaml @@ -11,15 +11,26 @@ maintainers: - Thierry Reding allOf: - - $ref: lvds.yaml# + - $ref: panel-common.yaml# + - $ref: /schemas/display/lvds.yaml/# + +select: + properties: + compatible: + contains: + const: innolux,ee101ia-01d + + required: + - compatible properties: compatible: items: - const: innolux,ee101ia-01d - - {} # panel-lvds, but not listed here to avoid false select + - const: panel-lvds backlight: true + data-mapping: true enable-gpios: true power-supply: true width-mm: true @@ -27,5 +38,13 @@ properties: panel-timing: true port: true +required: + - compatible + - data-mapping + - width-mm + - height-mm + - panel-timing + - port + additionalProperties: false ... diff --git a/Documentation/devicetree/bindings/display/panel/lvds.yaml b/Documentation/devicetree/bindings/display/panel/lvds.yaml deleted file mode 100644 index 49460c9dceea..000000000000 --- a/Documentation/devicetree/bindings/display/panel/lvds.yaml +++ /dev/null @@ -1,117 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -%YAML 1.2 ---- -$id: http://devicetree.org/schemas/display/panel/lvds.yaml# -$schema: http://devicetree.org/meta-schemas/core.yaml# - -title: LVDS Display Panel - -maintainers: - - Laurent Pinchart - - Thierry Reding - -description: |+ - LVDS is a physical layer specification defined in ANSI/TIA/EIA-644-A. Multiple - incompatible data link layers have been used over time to transmit image data - to LVDS panels. This bindings supports display panels compatible with the - following specifications. - - [JEIDA] "Digital Interface Standards for Monitor", JEIDA-59-1999, February - 1999 (Version 1.0), Japan Electronic Industry Development Association (JEIDA) - [LDI] "Open LVDS Display Interface", May 1999 (Version 0.95), National - Semiconductor - [VESA] "VESA Notebook Panel Standard", October 2007 (Version 1.0), Video - Electronics Standards Association (VESA) - - Device compatible with those specifications have been marketed under the - FPD-Link and FlatLink brands. - -allOf: - - $ref: panel-common.yaml# - -properties: - compatible: - contains: - const: panel-lvds - description: - Shall contain "panel-lvds" in addition to a mandatory panel-specific - compatible string defined in individual panel bindings. The "panel-lvds" - value shall never be used on its own. - - data-mapping: - enum: - - jeida-18 - - jeida-24 - - vesa-24 - description: | - The color signals mapping order. - - LVDS data mappings are defined as follows. - - - "jeida-18" - 18-bit data mapping compatible with the [JEIDA], [LDI] and - [VESA] specifications. Data are transferred as follows on 3 LVDS lanes. - - Slot 0 1 2 3 4 5 6 - ________________ _________________ - Clock \_______________________/ - ______ ______ ______ ______ ______ ______ ______ - DATA0 ><__G0__><__R5__><__R4__><__R3__><__R2__><__R1__><__R0__>< - DATA1 ><__B1__><__B0__><__G5__><__G4__><__G3__><__G2__><__G1__>< - DATA2 ><_CTL2_><_CTL1_><_CTL0_><__B5__><__B4__><__B3__><__B2__>< - - - "jeida-24" - 24-bit data mapping compatible with the [DSIM] and [LDI] - specifications. Data are transferred as follows on 4 LVDS lanes. - - Slot 0 1 2 3 4 5 6 - ________________ _________________ - Clock \_______________________/ - ______ ______ ______ ______ ______ ______ ______ - DATA0 ><__G2__><__R7__><__R6__><__R5__><__R4__><__R3__><__R2__>< - DATA1 ><__B3__><__B2__><__G7__><__G6__><__G5__><__G4__><__G3__>< - DATA2 ><_CTL2_><_CTL1_><_CTL0_><__B7__><__B6__><__B5__><__B4__>< - DATA3 ><_CTL3_><__B1__><__B0__><__G1__><__G0__><__R1__><__R0__>< - - - "vesa-24" - 24-bit data mapping compatible with the [VESA] specification. - Data are transferred as follows on 4 LVDS lanes. - - Slot 0 1 2 3 4 5 6 - ________________ _________________ - Clock \_______________________/ - ______ ______ ______ ______ ______ ______ ______ - DATA0 ><__G0__><__R5__><__R4__><__R3__><__R2__><__R1__><__R0__>< - DATA1 ><__B1__><__B0__><__G5__><__G4__><__G3__><__G2__><__G1__>< - DATA2 ><_CTL2_><_CTL1_><_CTL0_><__B5__><__B4__><__B3__><__B2__>< - DATA3 ><_CTL3_><__B7__><__B6__><__G7__><__G6__><__R7__><__R6__>< - - Control signals are mapped as follows. - - CTL0: HSync - CTL1: VSync - CTL2: Data Enable - CTL3: 0 - - data-mirror: - type: boolean - description: - If set, reverse the bit order described in the data mappings below on all - data lanes, transmitting bits for slots 6 to 0 instead of 0 to 6. - - port: true - ports: true - -required: - - compatible - - data-mapping - - width-mm - - height-mm - - panel-timing - -oneOf: - - required: - - port - - required: - - ports - -additionalProperties: true - -... diff --git a/Documentation/devicetree/bindings/display/panel/mitsubishi,aa104xd12.yaml b/Documentation/devicetree/bindings/display/panel/mitsubishi,aa104xd12.yaml index b5e7ee230fa6..5cf3c588f46d 100644 --- a/Documentation/devicetree/bindings/display/panel/mitsubishi,aa104xd12.yaml +++ b/Documentation/devicetree/bindings/display/panel/mitsubishi,aa104xd12.yaml @@ -11,13 +11,23 @@ maintainers: - Thierry Reding allOf: - - $ref: lvds.yaml# + - $ref: panel-common.yaml# + - $ref: /schemas/display/lvds.yaml/# + +select: + properties: + compatible: + contains: + const: mitsubishi,aa104xd12 + + required: + - compatible properties: compatible: items: - const: mitsubishi,aa104xd12 - - {} # panel-lvds, but not listed here to avoid false select + - const: panel-lvds vcc-supply: description: Reference to the regulator powering the panel VCC pins. @@ -39,6 +49,11 @@ additionalProperties: false required: - compatible - vcc-supply + - data-mapping + - width-mm + - height-mm + - panel-timing + - port examples: - |+ diff --git a/Documentation/devicetree/bindings/display/panel/mitsubishi,aa121td01.yaml b/Documentation/devicetree/bindings/display/panel/mitsubishi,aa121td01.yaml index 977c50a85b67..54750cc5440d 100644 --- a/Documentation/devicetree/bindings/display/panel/mitsubishi,aa121td01.yaml +++ b/Documentation/devicetree/bindings/display/panel/mitsubishi,aa121td01.yaml @@ -11,13 +11,23 @@ maintainers: - Thierry Reding allOf: - - $ref: lvds.yaml# + - $ref: panel-common.yaml# + - $ref: /schemas/display/lvds.yaml/# + +select: + properties: + compatible: + contains: + const: mitsubishi,aa121td01 + + required: + - compatible properties: compatible: items: - const: mitsubishi,aa121td01 - - {} # panel-lvds, but not listed here to avoid false select + - const: panel-lvds vcc-supply: description: Reference to the regulator powering the panel VCC pins. @@ -39,6 +49,11 @@ additionalProperties: false required: - compatible - vcc-supply + - data-mapping + - width-mm + - height-mm + - panel-timing + - port examples: - |+ diff --git a/Documentation/devicetree/bindings/display/panel/sgd,gktw70sdae4se.yaml b/Documentation/devicetree/bindings/display/panel/sgd,gktw70sdae4se.yaml index e63a570ae59d..44e02decdf3a 100644 --- a/Documentation/devicetree/bindings/display/panel/sgd,gktw70sdae4se.yaml +++ b/Documentation/devicetree/bindings/display/panel/sgd,gktw70sdae4se.yaml @@ -11,13 +11,23 @@ maintainers: - Thierry Reding allOf: - - $ref: lvds.yaml# + - $ref: panel-common.yaml# + - $ref: /schemas/display/lvds.yaml/# + +select: + properties: + compatible: + contains: + const: sgd,gktw70sdae4se + + required: + - compatible properties: compatible: items: - const: sgd,gktw70sdae4se - - {} # panel-lvds, but not listed here to avoid false select + - const: panel-lvds data-mapping: const: jeida-18 @@ -35,6 +45,11 @@ additionalProperties: false required: - compatible + - port + - data-mapping + - width-mm + - height-mm + - panel-timing examples: - |+ diff --git a/MAINTAINERS b/MAINTAINERS index d03ad8da1f36..d11f91f77647 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6018,7 +6018,7 @@ L: dri-devel@lists.freedesktop.org T: git git://anongit.freedesktop.org/drm/drm-misc S: Maintained F: drivers/gpu/drm/panel/panel-lvds.c -F: Documentation/devicetree/bindings/display/panel/lvds.yaml +F: Documentation/devicetree/bindings/display/lvds.yaml DRM DRIVER FOR MANTIX MLAF057WE51 PANELS M: Guido Günther -- cgit From 1b3cf0133fbdeac863510fc8899efdbed91c15c0 Mon Sep 17 00:00:00 2001 From: Maxime Ripard Date: Tue, 11 Jan 2022 12:06:35 +0100 Subject: dt-bindings: panel: Introduce a panel-lvds binding Following the previous patch, let's introduce a generic panel-lvds binding that documents the panels that don't have any particular constraint documented. Reviewed-by: Rob Herring Signed-off-by: Maxime Ripard Link: https://lore.kernel.org/r/20220111110635.804371-2-maxime@cerno.tech --- .../bindings/display/panel/panel-lvds.yaml | 57 ++++++++++++++++++++++ MAINTAINERS | 1 + 2 files changed, 58 insertions(+) create mode 100644 Documentation/devicetree/bindings/display/panel/panel-lvds.yaml diff --git a/Documentation/devicetree/bindings/display/panel/panel-lvds.yaml b/Documentation/devicetree/bindings/display/panel/panel-lvds.yaml new file mode 100644 index 000000000000..fcc50db6a812 --- /dev/null +++ b/Documentation/devicetree/bindings/display/panel/panel-lvds.yaml @@ -0,0 +1,57 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/display/panel/panel-lvds.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Generic LVDS Display Panel Device Tree Bindings + +maintainers: + - Lad Prabhakar + - Thierry Reding + +allOf: + - $ref: panel-common.yaml# + - $ref: /schemas/display/lvds.yaml/# + +select: + properties: + compatible: + contains: + const: panel-lvds + + not: + properties: + compatible: + contains: + enum: + - advantech,idk-1110wr + - advantech,idk-2121wr + - innolux,ee101ia-01d + - mitsubishi,aa104xd12 + - mitsubishi,aa121td01 + - sgd,gktw70sdae4se + + required: + - compatible + +properties: + compatible: + items: + - enum: + - auo,b101ew05 + - tbs,a711-panel + + - const: panel-lvds + +unevaluatedProperties: false + +required: + - compatible + - data-mapping + - width-mm + - height-mm + - panel-timing + - port + +... diff --git a/MAINTAINERS b/MAINTAINERS index d11f91f77647..0d914702950f 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6019,6 +6019,7 @@ T: git git://anongit.freedesktop.org/drm/drm-misc S: Maintained F: drivers/gpu/drm/panel/panel-lvds.c F: Documentation/devicetree/bindings/display/lvds.yaml +F: Documentation/devicetree/bindings/display/panel/panel-lvds.yaml DRM DRIVER FOR MANTIX MLAF057WE51 PANELS M: Guido Günther -- cgit From eba1e44beef88aa722f07755f79f604cd5d92290 Mon Sep 17 00:00:00 2001 From: Marc Zyngier Date: Wed, 2 Feb 2022 10:34:54 +0000 Subject: irqchip/gic-v3-its: Skip HP notifier when no ITS is registered We have some systems out there that have both LPI support and an ITS, but that don't expose the ITS in their firmware tables (either because it is broken or because they run under a hypervisor that hides it...). Is such a configuration, we still register the HP notifier to free the allocated tables if needed, resulting in a warning as there is no memory to free (nothing was allocated the first place). Fix it by keying the HP notifier on the presence of at least one sucessfully probed ITS. Fixes: d23bc2bc1d63 ("irqchip/gic-v3-its: Postpone LPI pending table freeing and memreserve") Reported-by: Steev Klimaszewski Tested-by: Steev Klimaszewski Signed-off-by: Marc Zyngier Cc: Valentin Schneider Link: https://lore.kernel.org/r/20220202103454.2480465-1-maz@kernel.org --- drivers/irqchip/irq-gic-v3-its.c | 3 +++ 1 file changed, 3 insertions(+) diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c index 9e93ff2b6375..cd772973114a 100644 --- a/drivers/irqchip/irq-gic-v3-its.c +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -5517,6 +5517,9 @@ int __init its_lpi_memreserve_init(void) if (!efi_enabled(EFI_CONFIG_TABLES)) return 0; + if (list_empty(&its_nodes)) + return 0; + gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID; state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "irqchip/arm/gicv3/memreserve:online", -- cgit From 321a8be37e1a93cefeae990107533142c8515933 Mon Sep 17 00:00:00 2001 From: Guo Ren Date: Sun, 30 Jan 2022 21:56:33 +0800 Subject: dt-bindings: update riscv plic compatible string Add the compatible string "thead,c900-plic" to the riscv plic bindings to support allwinner d1 SOC which contains c906 core. Signed-off-by: Guo Ren Cc: Anup Patel Cc: Heiko Stuebner Cc: Rob Herring Cc: Rob Herring Cc: Palmer Dabbelt Cc: Samuel Holland Reviewed-by: Rob Herring Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220130135634.1213301-2-guoren@kernel.org --- .../interrupt-controller/sifive,plic-1.0.0.yaml | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml b/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml index 0dfa6b26e099..27092c6a86c4 100644 --- a/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml +++ b/Documentation/devicetree/bindings/interrupt-controller/sifive,plic-1.0.0.yaml @@ -35,6 +35,10 @@ description: contains a specific memory layout, which is documented in chapter 8 of the SiFive U5 Coreplex Series Manual . + The thead,c900-plic is different from sifive,plic-1.0.0 in opensbi, the + T-HEAD PLIC implementation requires setting a delegation bit to allow access + from S-mode. So add thead,c900-plic to distinguish them. + maintainers: - Sagar Kadam - Paul Walmsley @@ -42,12 +46,17 @@ maintainers: properties: compatible: - items: - - enum: - - sifive,fu540-c000-plic - - starfive,jh7100-plic - - canaan,k210-plic - - const: sifive,plic-1.0.0 + oneOf: + - items: + - enum: + - sifive,fu540-c000-plic + - starfive,jh7100-plic + - canaan,k210-plic + - const: sifive,plic-1.0.0 + - items: + - enum: + - allwinner,sun20i-d1-plic + - const: thead,c900-plic reg: maxItems: 1 -- cgit From 1d4df649cbb4b26d19bea38ecff4b65b10a1bbca Mon Sep 17 00:00:00 2001 From: Guo Ren Date: Sun, 30 Jan 2022 21:56:34 +0800 Subject: irqchip/sifive-plic: Add missing thead,c900-plic match string The thead,c900-plic has been used in opensbi to distinguish PLIC [1]. Although PLICs have the same behaviors in Linux, they are different hardware with some custom initializing in firmware(opensbi). Qute opensbi patch commit-msg by Samuel: The T-HEAD PLIC implementation requires setting a delegation bit to allow access from S-mode. Now that the T-HEAD PLIC has its own compatible string, set this bit automatically from the PLIC driver, instead of reaching into the PLIC's MMIO space from another driver. [1]: https://github.com/riscv-software-src/opensbi/commit/78c2b19218bd62653b9fb31623a42ced45f38ea6 Signed-off-by: Guo Ren Cc: Anup Patel Cc: Marc Zyngier Cc: Palmer Dabbelt Cc: Samuel Holland Cc: Thomas Gleixner Tested-by: Samuel Holland Signed-off-by: Marc Zyngier Link: https://lore.kernel.org/r/20220130135634.1213301-3-guoren@kernel.org --- drivers/irqchip/irq-sifive-plic.c | 1 + 1 file changed, 1 insertion(+) diff --git a/drivers/irqchip/irq-sifive-plic.c b/drivers/irqchip/irq-sifive-plic.c index 259065d271ef..09cc98266d30 100644 --- a/drivers/irqchip/irq-sifive-plic.c +++ b/drivers/irqchip/irq-sifive-plic.c @@ -398,3 +398,4 @@ out_free_priv: IRQCHIP_DECLARE(sifive_plic, "sifive,plic-1.0.0", plic_init); IRQCHIP_DECLARE(riscv_plic0, "riscv,plic0", plic_init); /* for legacy systems */ +IRQCHIP_DECLARE(thead_c900_plic, "thead,c900-plic", plic_init); /* for firmware driver */ -- cgit From 2cba05451a6d0c703bb74f1a250691404f27c4f1 Mon Sep 17 00:00:00 2001 From: Geert Uytterhoeven Date: Mon, 31 Jan 2022 11:35:53 +0100 Subject: gpio: aggregator: Fix calling into sleeping GPIO controllers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit If the parent GPIO controller is a sleeping controller (e.g. a GPIO controller connected to I2C), getting or setting a GPIO triggers a might_sleep() warning. This happens because the GPIO Aggregator takes the can_sleep flag into account only for its internal locking, not for calling into the parent GPIO controller. Fix this by using the gpiod_[gs]et*_cansleep() APIs when calling into a sleeping GPIO controller. Reported-by: Mikko Salomäki Fixes: 828546e24280f721 ("gpio: Add GPIO Aggregator") Signed-off-by: Geert Uytterhoeven Reviewed-by: Andy Shevchenko Signed-off-by: Bartosz Golaszewski --- drivers/gpio/gpio-aggregator.c | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/drivers/gpio/gpio-aggregator.c b/drivers/gpio/gpio-aggregator.c index 869dc952cf45..0cb2664085cf 100644 --- a/drivers/gpio/gpio-aggregator.c +++ b/drivers/gpio/gpio-aggregator.c @@ -278,7 +278,8 @@ static int gpio_fwd_get(struct gpio_chip *chip, unsigned int offset) { struct gpiochip_fwd *fwd = gpiochip_get_data(chip); - return gpiod_get_value(fwd->descs[offset]); + return chip->can_sleep ? gpiod_get_value_cansleep(fwd->descs[offset]) + : gpiod_get_value(fwd->descs[offset]); } static int gpio_fwd_get_multiple(struct gpiochip_fwd *fwd, unsigned long *mask, @@ -293,7 +294,10 @@ static int gpio_fwd_get_multiple(struct gpiochip_fwd *fwd, unsigned long *mask, for_each_set_bit(i, mask, fwd->chip.ngpio) descs[j++] = fwd->descs[i]; - error = gpiod_get_array_value(j, descs, NULL, values); + if (fwd->chip.can_sleep) + error = gpiod_get_array_value_cansleep(j, descs, NULL, values); + else + error = gpiod_get_array_value(j, descs, NULL, values); if (error) return error; @@ -328,7 +332,10 @@ static void gpio_fwd_set(struct gpio_chip *chip, unsigned int offset, int value) { struct gpiochip_fwd *fwd = gpiochip_get_data(chip); - gpiod_set_value(fwd->descs[offset], value); + if (chip->can_sleep) + gpiod_set_value_cansleep(fwd->descs[offset], value); + else + gpiod_set_value(fwd->descs[offset], value); } static void gpio_fwd_set_multiple(struct gpiochip_fwd *fwd, unsigned long *mask, @@ -343,7 +350,10 @@ static void gpio_fwd_set_multiple(struct gpiochip_fwd *fwd, unsigned long *mask, descs[j++] = fwd->descs[i]; } - gpiod_set_array_value(j, descs, NULL, values); + if (fwd->chip.can_sleep) + gpiod_set_array_value_cansleep(j, descs, NULL, values); + else + gpiod_set_array_value(j, descs, NULL, values); } static void gpio_fwd_set_multiple_locked(struct gpio_chip *chip, -- cgit From a01994f5e5c79d3a35e5e8cf4252c7f2147323c3 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 27 Jan 2022 12:32:51 +0100 Subject: x86/perf: Default set FREEZE_ON_SMI for all Kyle reported that rr[0] has started to malfunction on Comet Lake and later CPUs due to EFI starting to make use of CPL3 [1] and the PMU event filtering not distinguishing between regular CPL3 and SMM CPL3. Since this is a privilege violation, default disable SMM visibility where possible. Administrators wanting to observe SMM cycles can easily change this using the sysfs attribute while regular users don't have access to this file. [0] https://rr-project.org/ [1] See the Intel white paper "Trustworthy SMM on the Intel vPro Platform" at https://bugzilla.kernel.org/attachment.cgi?id=300300, particularly the end of page 5. Reported-by: Kyle Huey Suggested-by: Andrew Cooper Signed-off-by: Peter Zijlstra (Intel) Cc: stable@kernel.org Link: https://lkml.kernel.org/r/YfKChjX61OW4CkYm@hirez.programming.kicks-ass.net --- arch/x86/events/intel/core.c | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index c91434056c29..a3c7ca876aeb 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -4703,6 +4703,19 @@ static __initconst const struct x86_pmu intel_pmu = { .lbr_read = intel_pmu_lbr_read_64, .lbr_save = intel_pmu_lbr_save, .lbr_restore = intel_pmu_lbr_restore, + + /* + * SMM has access to all 4 rings and while traditionally SMM code only + * ran in CPL0, 2021-era firmware is starting to make use of CPL3 in SMM. + * + * Since the EVENTSEL.{USR,OS} CPL filtering makes no distinction + * between SMM or not, this results in what should be pure userspace + * counters including SMM data. + * + * This is a clear privilege issue, therefore globally disable + * counting SMM by default. + */ + .attr_freeze_on_smi = 1, }; static __init void intel_clovertown_quirk(void) -- cgit From 3c25fc97f5590060464cabfa25710970ecddbc96 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Mon, 31 Jan 2022 11:34:05 +0100 Subject: perf: Copy perf_event_attr::sig_data on modification The intent has always been that perf_event_attr::sig_data should also be modifiable along with PERF_EVENT_IOC_MODIFY_ATTRIBUTES, because it is observable by user space if SIGTRAP on events is requested. Currently only PERF_TYPE_BREAKPOINT is modifiable, and explicitly copies relevant breakpoint-related attributes in hw_breakpoint_copy_attr(). This misses copying perf_event_attr::sig_data. Since sig_data is not specific to PERF_TYPE_BREAKPOINT, introduce a helper to copy generic event-type-independent attributes on modification. Fixes: 97ba62b27867 ("perf: Add support for SIGTRAP on perf events") Reported-by: Dmitry Vyukov Signed-off-by: Marco Elver Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Dmitry Vyukov Link: https://lore.kernel.org/r/20220131103407.1971678-1-elver@google.com --- kernel/events/core.c | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/kernel/events/core.c b/kernel/events/core.c index 76c754e45d01..57c7197838db 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -3238,6 +3238,15 @@ static int perf_event_modify_breakpoint(struct perf_event *bp, return err; } +/* + * Copy event-type-independent attributes that may be modified. + */ +static void perf_event_modify_copy_attr(struct perf_event_attr *to, + const struct perf_event_attr *from) +{ + to->sig_data = from->sig_data; +} + static int perf_event_modify_attr(struct perf_event *event, struct perf_event_attr *attr) { @@ -3260,10 +3269,17 @@ static int perf_event_modify_attr(struct perf_event *event, WARN_ON_ONCE(event->ctx->parent_ctx); mutex_lock(&event->child_mutex); + /* + * Event-type-independent attributes must be copied before event-type + * modification, which will validate that final attributes match the + * source attributes after all relevant attributes have been copied. + */ + perf_event_modify_copy_attr(&event->attr, attr); err = func(event, attr); if (err) goto out; list_for_each_entry(child, &event->child_list, child_list) { + perf_event_modify_copy_attr(&child->attr, attr); err = func(child, attr); if (err) goto out; -- cgit From 95d29fa104523b1756323f7003294b1711c27808 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Mon, 31 Jan 2022 11:34:06 +0100 Subject: selftests/perf_events: Test modification of perf_event_attr::sig_data Test that PERF_EVENT_IOC_MODIFY_ATTRIBUTES correctly modifies perf_event_attr::sig_data as well. Signed-off-by: Marco Elver Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Dmitry Vyukov Link: https://lore.kernel.org/r/20220131103407.1971678-2-elver@google.com --- tools/testing/selftests/perf_events/sigtrap_threads.c | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/tools/testing/selftests/perf_events/sigtrap_threads.c b/tools/testing/selftests/perf_events/sigtrap_threads.c index 8e83cf91513a..6d849dc2bee0 100644 --- a/tools/testing/selftests/perf_events/sigtrap_threads.c +++ b/tools/testing/selftests/perf_events/sigtrap_threads.c @@ -44,9 +44,10 @@ static struct { } ctx; /* Unique value to check si_perf_data is correctly set from perf_event_attr::sig_data. */ -#define TEST_SIG_DATA(addr) (~(unsigned long)(addr)) +#define TEST_SIG_DATA(addr, id) (~(unsigned long)(addr) + id) -static struct perf_event_attr make_event_attr(bool enabled, volatile void *addr) +static struct perf_event_attr make_event_attr(bool enabled, volatile void *addr, + unsigned long id) { struct perf_event_attr attr = { .type = PERF_TYPE_BREAKPOINT, @@ -60,7 +61,7 @@ static struct perf_event_attr make_event_attr(bool enabled, volatile void *addr) .inherit_thread = 1, /* ... but only cloned with CLONE_THREAD. */ .remove_on_exec = 1, /* Required by sigtrap. */ .sigtrap = 1, /* Request synchronous SIGTRAP on event. */ - .sig_data = TEST_SIG_DATA(addr), + .sig_data = TEST_SIG_DATA(addr, id), }; return attr; } @@ -110,7 +111,7 @@ FIXTURE(sigtrap_threads) FIXTURE_SETUP(sigtrap_threads) { - struct perf_event_attr attr = make_event_attr(false, &ctx.iterate_on); + struct perf_event_attr attr = make_event_attr(false, &ctx.iterate_on, 0); struct sigaction action = {}; int i; @@ -165,7 +166,7 @@ TEST_F(sigtrap_threads, enable_event) EXPECT_EQ(ctx.tids_want_signal, 0); EXPECT_EQ(ctx.first_siginfo.si_addr, &ctx.iterate_on); EXPECT_EQ(ctx.first_siginfo.si_perf_type, PERF_TYPE_BREAKPOINT); - EXPECT_EQ(ctx.first_siginfo.si_perf_data, TEST_SIG_DATA(&ctx.iterate_on)); + EXPECT_EQ(ctx.first_siginfo.si_perf_data, TEST_SIG_DATA(&ctx.iterate_on, 0)); /* Check enabled for parent. */ ctx.iterate_on = 0; @@ -175,7 +176,7 @@ TEST_F(sigtrap_threads, enable_event) /* Test that modification propagates to all inherited events. */ TEST_F(sigtrap_threads, modify_and_enable_event) { - struct perf_event_attr new_attr = make_event_attr(true, &ctx.iterate_on); + struct perf_event_attr new_attr = make_event_attr(true, &ctx.iterate_on, 42); EXPECT_EQ(ioctl(self->fd, PERF_EVENT_IOC_MODIFY_ATTRIBUTES, &new_attr), 0); run_test_threads(_metadata, self); @@ -184,7 +185,7 @@ TEST_F(sigtrap_threads, modify_and_enable_event) EXPECT_EQ(ctx.tids_want_signal, 0); EXPECT_EQ(ctx.first_siginfo.si_addr, &ctx.iterate_on); EXPECT_EQ(ctx.first_siginfo.si_perf_type, PERF_TYPE_BREAKPOINT); - EXPECT_EQ(ctx.first_siginfo.si_perf_data, TEST_SIG_DATA(&ctx.iterate_on)); + EXPECT_EQ(ctx.first_siginfo.si_perf_data, TEST_SIG_DATA(&ctx.iterate_on, 42)); /* Check enabled for parent. */ ctx.iterate_on = 0; @@ -204,7 +205,7 @@ TEST_F(sigtrap_threads, signal_stress) EXPECT_EQ(ctx.tids_want_signal, 0); EXPECT_EQ(ctx.first_siginfo.si_addr, &ctx.iterate_on); EXPECT_EQ(ctx.first_siginfo.si_perf_type, PERF_TYPE_BREAKPOINT); - EXPECT_EQ(ctx.first_siginfo.si_perf_data, TEST_SIG_DATA(&ctx.iterate_on)); + EXPECT_EQ(ctx.first_siginfo.si_perf_data, TEST_SIG_DATA(&ctx.iterate_on, 0)); } TEST_HARNESS_MAIN -- cgit From ddecd22878601a606d160680fa85802b75d92eb6 Mon Sep 17 00:00:00 2001 From: Marco Elver Date: Mon, 31 Jan 2022 11:34:07 +0100 Subject: perf: uapi: Document perf_event_attr::sig_data truncation on 32 bit architectures Due to the alignment requirements of siginfo_t, as described in 3ddb3fd8cdb0 ("signal, perf: Fix siginfo_t by avoiding u64 on 32-bit architectures"), siginfo_t::si_perf_data is limited to an unsigned long. However, perf_event_attr::sig_data is an u64, to avoid having to deal with compat conversions. Due to being an u64, it may not immediately be clear to users that sig_data is truncated on 32 bit architectures. Add a comment to explicitly point this out, and hopefully help some users save time by not having to deduce themselves what's happening. Reported-by: Dmitry Vyukov Signed-off-by: Marco Elver Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Dmitry Vyukov Link: https://lore.kernel.org/r/20220131103407.1971678-3-elver@google.com --- include/uapi/linux/perf_event.h | 2 ++ 1 file changed, 2 insertions(+) diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 1b65042ab1db..82858b697c05 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -465,6 +465,8 @@ struct perf_event_attr { /* * User provided data if sigtrap=1, passed back to user via * siginfo_t::si_perf_data, e.g. to permit user to identify the event. + * Note, siginfo_t::si_perf_data is long-sized, and sig_data will be + * truncated accordingly on 32 bit architectures. */ __u64 sig_data; }; -- cgit From 1d9093457b243061a9bba23543c38726e864a643 Mon Sep 17 00:00:00 2001 From: Tristan Hume Date: Thu, 27 Jan 2022 17:08:06 -0500 Subject: perf/x86/intel/pt: Fix crash with stop filters in single-range mode Add a check for !buf->single before calling pt_buffer_region_size in a place where a missing check can cause a kernel crash. Fixes a bug introduced by commit 670638477aed ("perf/x86/intel/pt: Opportunistically use single range output mode"), which added a support for PT single-range output mode. Since that commit if a PT stop filter range is hit while tracing, the kernel will crash because of a null pointer dereference in pt_handle_status due to calling pt_buffer_region_size without a ToPA configured. The commit which introduced single-range mode guarded almost all uses of the ToPA buffer variables with checks of the buf->single variable, but missed the case where tracing was stopped by the PT hardware, which happens when execution hits a configured stop filter. Tested that hitting a stop filter while PT recording successfully records a trace with this patch but crashes without this patch. Fixes: 670638477aed ("perf/x86/intel/pt: Opportunistically use single range output mode") Signed-off-by: Tristan Hume Signed-off-by: Peter Zijlstra (Intel) Reviewed-by: Adrian Hunter Cc: stable@kernel.org Link: https://lkml.kernel.org/r/20220127220806.73664-1-tristan@thume.ca --- arch/x86/events/intel/pt.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/arch/x86/events/intel/pt.c b/arch/x86/events/intel/pt.c index 7f406c14715f..2d33bba9a144 100644 --- a/arch/x86/events/intel/pt.c +++ b/arch/x86/events/intel/pt.c @@ -897,8 +897,9 @@ static void pt_handle_status(struct pt *pt) * means we are already losing data; need to let the decoder * know. */ - if (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) || - buf->output_off == pt_buffer_region_size(buf)) { + if (!buf->single && + (!intel_pt_validate_hw_cap(PT_CAP_topa_multiple_entries) || + buf->output_off == pt_buffer_region_size(buf))) { perf_aux_output_flag(&pt->handle, PERF_AUX_FLAG_TRUNCATED); advance++; -- cgit From 6455317e4d0d8395e8e4a2fd1ec8d6502267dd02 Mon Sep 17 00:00:00 2001 From: Mark Rutland Date: Tue, 1 Feb 2022 13:29:25 +0000 Subject: kvm/riscv: rework guest entry logic In kvm_arch_vcpu_ioctl_run() we enter an RCU extended quiescent state (EQS) by calling guest_enter_irqoff(), and unmask IRQs prior to exiting the EQS by calling guest_exit(). As the IRQ entry code will not wake RCU in this case, we may run the core IRQ code and IRQ handler without RCU watching, leading to various potential problems. Additionally, we do not inform lockdep or tracing that interrupts will be enabled during guest execution, which caan lead to misleading traces and warnings that interrupts have been enabled for overly-long periods. This patch fixes these issues by using the new timing and context entry/exit helpers to ensure that interrupts are handled during guest vtime but with RCU watching, with a sequence: guest_timing_enter_irqoff(); guest_state_enter_irqoff(); < run the vcpu > guest_state_exit_irqoff(); < take any pending IRQs > guest_timing_exit_irqoff(); Since instrumentation may make use of RCU, we must also ensure that no instrumented code is run during the EQS. I've split out the critical section into a new kvm_riscv_enter_exit_vcpu() helper which is marked noinstr. Fixes: 99cdc6c18c2d815e ("RISC-V: Add initial skeletal KVM support") Signed-off-by: Mark Rutland Cc: Albert Ou Cc: Anup Patel Cc: Atish Patra Cc: Frederic Weisbecker Cc: Palmer Dabbelt Cc: Paolo Bonzini Cc: Paul E. McKenney Cc: Paul Walmsley Tested-by: Anup Patel Signed-off-by: Anup Patel --- arch/riscv/kvm/vcpu.c | 44 +++++++++++++++++++++++++++----------------- 1 file changed, 27 insertions(+), 17 deletions(-) diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index 0c5239e05721..f64f62057378 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -699,6 +699,20 @@ static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu) csr_write(CSR_HVIP, csr->hvip); } +/* + * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while + * the vCPU is running. + * + * This must be noinstr as instrumentation may make use of RCU, and this is not + * safe during the EQS. + */ +static void noinstr kvm_riscv_vcpu_enter_exit(struct kvm_vcpu *vcpu) +{ + guest_state_enter_irqoff(); + __kvm_riscv_switch_to(&vcpu->arch); + guest_state_exit_irqoff(); +} + int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) { int ret; @@ -790,9 +804,9 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) continue; } - guest_enter_irqoff(); + guest_timing_enter_irqoff(); - __kvm_riscv_switch_to(&vcpu->arch); + kvm_riscv_vcpu_enter_exit(vcpu); vcpu->mode = OUTSIDE_GUEST_MODE; vcpu->stat.exits++; @@ -812,25 +826,21 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) kvm_riscv_vcpu_sync_interrupts(vcpu); /* - * We may have taken a host interrupt in VS/VU-mode (i.e. - * while executing the guest). This interrupt is still - * pending, as we haven't serviced it yet! + * We must ensure that any pending interrupts are taken before + * we exit guest timing so that timer ticks are accounted as + * guest time. Transiently unmask interrupts so that any + * pending interrupts are taken. * - * We're now back in HS-mode with interrupts disabled - * so enabling the interrupts now will have the effect - * of taking the interrupt again, in HS-mode this time. + * There's no barrier which ensures that pending interrupts are + * recognised, so we just hope that the CPU takes any pending + * interrupts between the enable and disable. */ local_irq_enable(); + local_irq_disable(); - /* - * We do local_irq_enable() before calling guest_exit() so - * that if a timer interrupt hits while running the guest - * we account that tick as being spent in the guest. We - * enable preemption after calling guest_exit() so that if - * we get preempted we make sure ticks after that is not - * counted as guest time. - */ - guest_exit(); + guest_timing_exit_irqoff(); + + local_irq_enable(); preempt_enable(); -- cgit From de1d7b6a51dab546160d252e47baa54adf104d4a Mon Sep 17 00:00:00 2001 From: Mayuresh Chitale Date: Mon, 31 Jan 2022 16:33:07 +0530 Subject: RISC-V: KVM: make CY, TM, and IR counters accessible in VU mode Those applications that run in VU mode and access the time CSR cause a virtual instruction trap as Guest kernel currently does not initialize the scounteren CSR. To fix this, we should make CY, TM, and IR counters accessibile by default in VU mode (similar to OpenSBI). Fixes: a33c72faf2d73 ("RISC-V: KVM: Implement VCPU create, init and destroy functions") Cc: stable@vger.kernel.org Signed-off-by: Mayuresh Chitale Signed-off-by: Anup Patel --- arch/riscv/kvm/vcpu.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index f64f62057378..624166004e36 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -90,6 +90,7 @@ int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) { struct kvm_cpu_context *cntx; + struct kvm_vcpu_csr *reset_csr = &vcpu->arch.guest_reset_csr; /* Mark this VCPU never ran */ vcpu->arch.ran_atleast_once = false; @@ -106,6 +107,9 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) cntx->hstatus |= HSTATUS_SPVP; cntx->hstatus |= HSTATUS_SPV; + /* By default, make CY, TM, and IR counters accessible in VU mode */ + reset_csr->scounteren = 0x7; + /* Setup VCPU timer */ kvm_riscv_vcpu_timer_init(vcpu); -- cgit From 403271548a840dd4f884088d6333e09f899be5ff Mon Sep 17 00:00:00 2001 From: Anup Patel Date: Mon, 31 Jan 2022 22:12:32 +0530 Subject: RISC-V: KVM: Fix SBI implementation version The SBI implementation version returned by KVM RISC-V should be the Host Linux version code. Fixes: c62a76859723 ("RISC-V: KVM: Add SBI v0.2 base extension") Signed-off-by: Anup Patel Reviewed-by: Atish Patra Signed-off-by: Anup Patel --- arch/riscv/kvm/vcpu_sbi_base.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/arch/riscv/kvm/vcpu_sbi_base.c b/arch/riscv/kvm/vcpu_sbi_base.c index 4ecf377f483b..48f431091cdb 100644 --- a/arch/riscv/kvm/vcpu_sbi_base.c +++ b/arch/riscv/kvm/vcpu_sbi_base.c @@ -9,6 +9,7 @@ #include #include #include +#include #include #include #include @@ -32,7 +33,7 @@ static int kvm_sbi_ext_base_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, *out_val = KVM_SBI_IMPID; break; case SBI_EXT_BASE_GET_IMP_VERSION: - *out_val = 0; + *out_val = LINUX_VERSION_CODE; break; case SBI_EXT_BASE_PROBE_EXT: if ((cp->a0 >= SBI_EXT_EXPERIMENTAL_START && -- cgit From 1148836fd3226c20de841084aba24184d4fbbe77 Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Wed, 2 Feb 2022 14:55:29 +0100 Subject: Revert "fbdev: Garbage collect fbdev scrolling acceleration, part 1 (from TODO list)" This reverts commit b3ec8cdf457e5e63d396fe1346cc788cf7c1b578. Revert the second (of 2) commits which disabled scrolling acceleration in fbcon/fbdev. It introduced a regression for fbdev-supported graphic cards because of the performance penalty by doing screen scrolling by software instead of using the existing graphic card 2D hardware acceleration. Console scrolling acceleration was disabled by dropping code which checked at runtime the driver hardware capabilities for the BINFO_HWACCEL_COPYAREA or FBINFO_HWACCEL_FILLRECT flags and if set, it enabled scrollmode SCROLL_MOVE which uses hardware acceleration to move screen contents. After dropping those checks scrollmode was hard-wired to SCROLL_REDRAW instead, which forces all graphic cards to redraw every character at the new screen position when scrolling. This change effectively disabled all hardware-based scrolling acceleration for ALL drivers, because now all kind of 2D hardware acceleration (bitblt, fillrect) in the drivers isn't used any longer. The original commit message mentions that only 3 DRM drivers (nouveau, omapdrm and gma500) used hardware acceleration in the past and thus code for checking and using scrolling acceleration is obsolete. This statement is NOT TRUE, because beside the DRM drivers there are around 35 other fbdev drivers which depend on fbdev/fbcon and still provide hardware acceleration for fbdev/fbcon. The original commit message also states that syzbot found lots of bugs in fbcon and thus it's "often the solution to just delete code and remove features". This is true, and the bugs - which actually affected all users of fbcon, including DRM - were fixed, or code was dropped like e.g. the support for software scrollback in vgacon (commit 973c096f6a85). So to further analyze which bugs were found by syzbot, I've looked through all patches in drivers/video which were tagged with syzbot or syzkaller back to year 2005. The vast majority fixed the reported issues on a higher level, e.g. when screen is to be resized, or when font size is to be changed. The few ones which touched driver code fixed a real driver bug, e.g. by adding a check. But NONE of those patches touched code of either the SCROLL_MOVE or the SCROLL_REDRAW case. That means, there was no real reason why SCROLL_MOVE had to be ripped-out and just SCROLL_REDRAW had to be used instead. The only reason I can imagine so far was that SCROLL_MOVE wasn't used by DRM and as such it was assumed that it could go away. That argument completely missed the fact that SCROLL_MOVE is still heavily used by fbdev (non-DRM) drivers. Some people mention that using memcpy() instead of the hardware acceleration is pretty much the same speed. But that's not true, at least not for older graphic cards and machines where we see speed decreases by factor 10 and more and thus this change leads to console responsiveness way worse than before. That's why the original commit is to be reverted. By reverting we reintroduce hardware-based scrolling acceleration and fix the performance regression for fbdev drivers. There isn't any impact on DRM when reverting those patches. Signed-off-by: Helge Deller Acked-by: Geert Uytterhoeven Acked-by: Sven Schnelle Cc: stable@vger.kernel.org # v5.16+ Signed-off-by: Helge Deller Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20220202135531.92183-2-deller@gmx.de --- Documentation/gpu/todo.rst | 13 +- drivers/video/fbdev/core/bitblit.c | 16 + drivers/video/fbdev/core/fbcon.c | 509 ++++++++++++++++++++++++++++++-- drivers/video/fbdev/core/fbcon.h | 59 ++++ drivers/video/fbdev/core/fbcon_ccw.c | 28 +- drivers/video/fbdev/core/fbcon_cw.c | 28 +- drivers/video/fbdev/core/fbcon_rotate.h | 9 + drivers/video/fbdev/core/fbcon_ud.c | 37 ++- drivers/video/fbdev/core/tileblit.c | 16 + drivers/video/fbdev/skeletonfb.c | 12 +- include/linux/fb.h | 2 +- 11 files changed, 678 insertions(+), 51 deletions(-) diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst index da138dd39883..29506815d24a 100644 --- a/Documentation/gpu/todo.rst +++ b/Documentation/gpu/todo.rst @@ -303,19 +303,16 @@ Level: Advanced Garbage collect fbdev scrolling acceleration -------------------------------------------- -Scroll acceleration has been disabled in fbcon. Now it works as the old -SCROLL_REDRAW mode. A ton of code was removed in fbcon.c and the hook bmove was -removed from fbcon_ops. -Remaining tasks: +Scroll acceleration is disabled in fbcon by hard-wiring p->scrollmode = +SCROLL_REDRAW. There's a ton of code this will allow us to remove: -- a bunch of the hooks in fbcon_ops could be removed or simplified by calling +- lots of code in fbcon.c + +- a bunch of the hooks in fbcon_ops, maybe the remaining hooks could be called directly instead of the function table (with a switch on p->rotate) - fb_copyarea is unused after this, and can be deleted from all drivers -- after that, fb_copyarea can be deleted from fb_ops in include/linux/fb.h as - well as cfb_copyarea - Note that not all acceleration code can be deleted, since clearing and cursor support is still accelerated, which might be good candidates for further deletion projects. diff --git a/drivers/video/fbdev/core/bitblit.c b/drivers/video/fbdev/core/bitblit.c index 01fae2c96965..f98e8f298bc1 100644 --- a/drivers/video/fbdev/core/bitblit.c +++ b/drivers/video/fbdev/core/bitblit.c @@ -43,6 +43,21 @@ static void update_attr(u8 *dst, u8 *src, int attribute, } } +static void bit_bmove(struct vc_data *vc, struct fb_info *info, int sy, + int sx, int dy, int dx, int height, int width) +{ + struct fb_copyarea area; + + area.sx = sx * vc->vc_font.width; + area.sy = sy * vc->vc_font.height; + area.dx = dx * vc->vc_font.width; + area.dy = dy * vc->vc_font.height; + area.height = height * vc->vc_font.height; + area.width = width * vc->vc_font.width; + + info->fbops->fb_copyarea(info, &area); +} + static void bit_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width) { @@ -378,6 +393,7 @@ static int bit_update_start(struct fb_info *info) void fbcon_set_bitops(struct fbcon_ops *ops) { + ops->bmove = bit_bmove; ops->clear = bit_clear; ops->putcs = bit_putcs; ops->clear_margins = bit_clear_margins; diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 99ecd9a6d844..fc34caddf9cf 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -173,6 +173,8 @@ static void fbcon_putcs(struct vc_data *vc, const unsigned short *s, int count, int ypos, int xpos); static void fbcon_clear_margins(struct vc_data *vc, int bottom_only); static void fbcon_cursor(struct vc_data *vc, int mode); +static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx, + int height, int width); static int fbcon_switch(struct vc_data *vc); static int fbcon_blank(struct vc_data *vc, int blank, int mode_switch); static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table); @@ -180,8 +182,16 @@ static void fbcon_set_palette(struct vc_data *vc, const unsigned char *table); /* * Internal routines */ +static __inline__ void ywrap_up(struct vc_data *vc, int count); +static __inline__ void ywrap_down(struct vc_data *vc, int count); +static __inline__ void ypan_up(struct vc_data *vc, int count); +static __inline__ void ypan_down(struct vc_data *vc, int count); +static void fbcon_bmove_rec(struct vc_data *vc, struct fbcon_display *p, int sy, int sx, + int dy, int dx, int height, int width, u_int y_break); static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var, int unit); +static void fbcon_redraw_move(struct vc_data *vc, struct fbcon_display *p, + int line, int count, int dy); static void fbcon_modechanged(struct fb_info *info); static void fbcon_set_all_vcs(struct fb_info *info); static void fbcon_start(void); @@ -1125,6 +1135,14 @@ static void fbcon_init(struct vc_data *vc, int init) ops->graphics = 0; + /* + * No more hw acceleration for fbcon. + * + * FIXME: Garbage collect all the now dead code after sufficient time + * has passed. + */ + p->scrollmode = SCROLL_REDRAW; + /* * ++guenther: console.c:vc_allocate() relies on initializing * vc_{cols,rows}, but we must not set those if we are only @@ -1211,13 +1229,14 @@ finished: * This system is now divided into two levels because of complications * caused by hardware scrolling. Top level functions: * - * fbcon_clear(), fbcon_putc(), fbcon_clear_margins() + * fbcon_bmove(), fbcon_clear(), fbcon_putc(), fbcon_clear_margins() * * handles y values in range [0, scr_height-1] that correspond to real * screen positions. y_wrap shift means that first line of bitmap may be * anywhere on this display. These functions convert lineoffsets to * bitmap offsets and deal with the wrap-around case by splitting blits. * + * fbcon_bmove_physical_8() -- These functions fast implementations * fbcon_clear_physical_8() -- of original fbcon_XXX fns. * fbcon_putc_physical_8() -- (font width != 8) may be added later * @@ -1390,6 +1409,224 @@ static void fbcon_set_disp(struct fb_info *info, struct fb_var_screeninfo *var, } } +static __inline__ void ywrap_up(struct vc_data *vc, int count) +{ + struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_display *p = &fb_display[vc->vc_num]; + + p->yscroll += count; + if (p->yscroll >= p->vrows) /* Deal with wrap */ + p->yscroll -= p->vrows; + ops->var.xoffset = 0; + ops->var.yoffset = p->yscroll * vc->vc_font.height; + ops->var.vmode |= FB_VMODE_YWRAP; + ops->update_start(info); + scrollback_max += count; + if (scrollback_max > scrollback_phys_max) + scrollback_max = scrollback_phys_max; + scrollback_current = 0; +} + +static __inline__ void ywrap_down(struct vc_data *vc, int count) +{ + struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_display *p = &fb_display[vc->vc_num]; + + p->yscroll -= count; + if (p->yscroll < 0) /* Deal with wrap */ + p->yscroll += p->vrows; + ops->var.xoffset = 0; + ops->var.yoffset = p->yscroll * vc->vc_font.height; + ops->var.vmode |= FB_VMODE_YWRAP; + ops->update_start(info); + scrollback_max -= count; + if (scrollback_max < 0) + scrollback_max = 0; + scrollback_current = 0; +} + +static __inline__ void ypan_up(struct vc_data *vc, int count) +{ + struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fbcon_display *p = &fb_display[vc->vc_num]; + struct fbcon_ops *ops = info->fbcon_par; + + p->yscroll += count; + if (p->yscroll > p->vrows - vc->vc_rows) { + ops->bmove(vc, info, p->vrows - vc->vc_rows, + 0, 0, 0, vc->vc_rows, vc->vc_cols); + p->yscroll -= p->vrows - vc->vc_rows; + } + + ops->var.xoffset = 0; + ops->var.yoffset = p->yscroll * vc->vc_font.height; + ops->var.vmode &= ~FB_VMODE_YWRAP; + ops->update_start(info); + fbcon_clear_margins(vc, 1); + scrollback_max += count; + if (scrollback_max > scrollback_phys_max) + scrollback_max = scrollback_phys_max; + scrollback_current = 0; +} + +static __inline__ void ypan_up_redraw(struct vc_data *vc, int t, int count) +{ + struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_display *p = &fb_display[vc->vc_num]; + + p->yscroll += count; + + if (p->yscroll > p->vrows - vc->vc_rows) { + p->yscroll -= p->vrows - vc->vc_rows; + fbcon_redraw_move(vc, p, t + count, vc->vc_rows - count, t); + } + + ops->var.xoffset = 0; + ops->var.yoffset = p->yscroll * vc->vc_font.height; + ops->var.vmode &= ~FB_VMODE_YWRAP; + ops->update_start(info); + fbcon_clear_margins(vc, 1); + scrollback_max += count; + if (scrollback_max > scrollback_phys_max) + scrollback_max = scrollback_phys_max; + scrollback_current = 0; +} + +static __inline__ void ypan_down(struct vc_data *vc, int count) +{ + struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fbcon_display *p = &fb_display[vc->vc_num]; + struct fbcon_ops *ops = info->fbcon_par; + + p->yscroll -= count; + if (p->yscroll < 0) { + ops->bmove(vc, info, 0, 0, p->vrows - vc->vc_rows, + 0, vc->vc_rows, vc->vc_cols); + p->yscroll += p->vrows - vc->vc_rows; + } + + ops->var.xoffset = 0; + ops->var.yoffset = p->yscroll * vc->vc_font.height; + ops->var.vmode &= ~FB_VMODE_YWRAP; + ops->update_start(info); + fbcon_clear_margins(vc, 1); + scrollback_max -= count; + if (scrollback_max < 0) + scrollback_max = 0; + scrollback_current = 0; +} + +static __inline__ void ypan_down_redraw(struct vc_data *vc, int t, int count) +{ + struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fbcon_ops *ops = info->fbcon_par; + struct fbcon_display *p = &fb_display[vc->vc_num]; + + p->yscroll -= count; + + if (p->yscroll < 0) { + p->yscroll += p->vrows - vc->vc_rows; + fbcon_redraw_move(vc, p, t, vc->vc_rows - count, t + count); + } + + ops->var.xoffset = 0; + ops->var.yoffset = p->yscroll * vc->vc_font.height; + ops->var.vmode &= ~FB_VMODE_YWRAP; + ops->update_start(info); + fbcon_clear_margins(vc, 1); + scrollback_max -= count; + if (scrollback_max < 0) + scrollback_max = 0; + scrollback_current = 0; +} + +static void fbcon_redraw_move(struct vc_data *vc, struct fbcon_display *p, + int line, int count, int dy) +{ + unsigned short *s = (unsigned short *) + (vc->vc_origin + vc->vc_size_row * line); + + while (count--) { + unsigned short *start = s; + unsigned short *le = advance_row(s, 1); + unsigned short c; + int x = 0; + unsigned short attr = 1; + + do { + c = scr_readw(s); + if (attr != (c & 0xff00)) { + attr = c & 0xff00; + if (s > start) { + fbcon_putcs(vc, start, s - start, + dy, x); + x += s - start; + start = s; + } + } + console_conditional_schedule(); + s++; + } while (s < le); + if (s > start) + fbcon_putcs(vc, start, s - start, dy, x); + console_conditional_schedule(); + dy++; + } +} + +static void fbcon_redraw_blit(struct vc_data *vc, struct fb_info *info, + struct fbcon_display *p, int line, int count, int ycount) +{ + int offset = ycount * vc->vc_cols; + unsigned short *d = (unsigned short *) + (vc->vc_origin + vc->vc_size_row * line); + unsigned short *s = d + offset; + struct fbcon_ops *ops = info->fbcon_par; + + while (count--) { + unsigned short *start = s; + unsigned short *le = advance_row(s, 1); + unsigned short c; + int x = 0; + + do { + c = scr_readw(s); + + if (c == scr_readw(d)) { + if (s > start) { + ops->bmove(vc, info, line + ycount, x, + line, x, 1, s-start); + x += s - start + 1; + start = s + 1; + } else { + x++; + start++; + } + } + + scr_writew(c, d); + console_conditional_schedule(); + s++; + d++; + } while (s < le); + if (s > start) + ops->bmove(vc, info, line + ycount, x, line, x, 1, + s-start); + console_conditional_schedule(); + if (ycount > 0) + line++; + else { + line--; + /* NOTE: We subtract two lines from these pointers */ + s -= vc->vc_size_row; + d -= vc->vc_size_row; + } + } +} + static void fbcon_redraw(struct vc_data *vc, struct fbcon_display *p, int line, int count, int offset) { @@ -1450,6 +1687,7 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b, { struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; struct fbcon_display *p = &fb_display[vc->vc_num]; + int scroll_partial = info->flags & FBINFO_PARTIAL_PAN_OK; if (fbcon_is_inactive(vc, info)) return true; @@ -1466,32 +1704,249 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b, case SM_UP: if (count > vc->vc_rows) /* Maximum realistic size */ count = vc->vc_rows; - fbcon_redraw(vc, p, t, b - t - count, - count * vc->vc_cols); - fbcon_clear(vc, b - count, 0, count, vc->vc_cols); - scr_memsetw((unsigned short *) (vc->vc_origin + - vc->vc_size_row * - (b - count)), - vc->vc_video_erase_char, - vc->vc_size_row * count); - return true; + if (logo_shown >= 0) + goto redraw_up; + switch (p->scrollmode) { + case SCROLL_MOVE: + fbcon_redraw_blit(vc, info, p, t, b - t - count, + count); + fbcon_clear(vc, b - count, 0, count, vc->vc_cols); + scr_memsetw((unsigned short *) (vc->vc_origin + + vc->vc_size_row * + (b - count)), + vc->vc_video_erase_char, + vc->vc_size_row * count); + return true; + + case SCROLL_WRAP_MOVE: + if (b - t - count > 3 * vc->vc_rows >> 2) { + if (t > 0) + fbcon_bmove(vc, 0, 0, count, 0, t, + vc->vc_cols); + ywrap_up(vc, count); + if (vc->vc_rows - b > 0) + fbcon_bmove(vc, b - count, 0, b, 0, + vc->vc_rows - b, + vc->vc_cols); + } else if (info->flags & FBINFO_READS_FAST) + fbcon_bmove(vc, t + count, 0, t, 0, + b - t - count, vc->vc_cols); + else + goto redraw_up; + fbcon_clear(vc, b - count, 0, count, vc->vc_cols); + break; + + case SCROLL_PAN_REDRAW: + if ((p->yscroll + count <= + 2 * (p->vrows - vc->vc_rows)) + && ((!scroll_partial && (b - t == vc->vc_rows)) + || (scroll_partial + && (b - t - count > + 3 * vc->vc_rows >> 2)))) { + if (t > 0) + fbcon_redraw_move(vc, p, 0, t, count); + ypan_up_redraw(vc, t, count); + if (vc->vc_rows - b > 0) + fbcon_redraw_move(vc, p, b, + vc->vc_rows - b, b); + } else + fbcon_redraw_move(vc, p, t + count, b - t - count, t); + fbcon_clear(vc, b - count, 0, count, vc->vc_cols); + break; + + case SCROLL_PAN_MOVE: + if ((p->yscroll + count <= + 2 * (p->vrows - vc->vc_rows)) + && ((!scroll_partial && (b - t == vc->vc_rows)) + || (scroll_partial + && (b - t - count > + 3 * vc->vc_rows >> 2)))) { + if (t > 0) + fbcon_bmove(vc, 0, 0, count, 0, t, + vc->vc_cols); + ypan_up(vc, count); + if (vc->vc_rows - b > 0) + fbcon_bmove(vc, b - count, 0, b, 0, + vc->vc_rows - b, + vc->vc_cols); + } else if (info->flags & FBINFO_READS_FAST) + fbcon_bmove(vc, t + count, 0, t, 0, + b - t - count, vc->vc_cols); + else + goto redraw_up; + fbcon_clear(vc, b - count, 0, count, vc->vc_cols); + break; + + case SCROLL_REDRAW: + redraw_up: + fbcon_redraw(vc, p, t, b - t - count, + count * vc->vc_cols); + fbcon_clear(vc, b - count, 0, count, vc->vc_cols); + scr_memsetw((unsigned short *) (vc->vc_origin + + vc->vc_size_row * + (b - count)), + vc->vc_video_erase_char, + vc->vc_size_row * count); + return true; + } + break; case SM_DOWN: if (count > vc->vc_rows) /* Maximum realistic size */ count = vc->vc_rows; - fbcon_redraw(vc, p, b - 1, b - t - count, - -count * vc->vc_cols); - fbcon_clear(vc, t, 0, count, vc->vc_cols); - scr_memsetw((unsigned short *) (vc->vc_origin + - vc->vc_size_row * - t), - vc->vc_video_erase_char, - vc->vc_size_row * count); - return true; + if (logo_shown >= 0) + goto redraw_down; + switch (p->scrollmode) { + case SCROLL_MOVE: + fbcon_redraw_blit(vc, info, p, b - 1, b - t - count, + -count); + fbcon_clear(vc, t, 0, count, vc->vc_cols); + scr_memsetw((unsigned short *) (vc->vc_origin + + vc->vc_size_row * + t), + vc->vc_video_erase_char, + vc->vc_size_row * count); + return true; + + case SCROLL_WRAP_MOVE: + if (b - t - count > 3 * vc->vc_rows >> 2) { + if (vc->vc_rows - b > 0) + fbcon_bmove(vc, b, 0, b - count, 0, + vc->vc_rows - b, + vc->vc_cols); + ywrap_down(vc, count); + if (t > 0) + fbcon_bmove(vc, count, 0, 0, 0, t, + vc->vc_cols); + } else if (info->flags & FBINFO_READS_FAST) + fbcon_bmove(vc, t, 0, t + count, 0, + b - t - count, vc->vc_cols); + else + goto redraw_down; + fbcon_clear(vc, t, 0, count, vc->vc_cols); + break; + + case SCROLL_PAN_MOVE: + if ((count - p->yscroll <= p->vrows - vc->vc_rows) + && ((!scroll_partial && (b - t == vc->vc_rows)) + || (scroll_partial + && (b - t - count > + 3 * vc->vc_rows >> 2)))) { + if (vc->vc_rows - b > 0) + fbcon_bmove(vc, b, 0, b - count, 0, + vc->vc_rows - b, + vc->vc_cols); + ypan_down(vc, count); + if (t > 0) + fbcon_bmove(vc, count, 0, 0, 0, t, + vc->vc_cols); + } else if (info->flags & FBINFO_READS_FAST) + fbcon_bmove(vc, t, 0, t + count, 0, + b - t - count, vc->vc_cols); + else + goto redraw_down; + fbcon_clear(vc, t, 0, count, vc->vc_cols); + break; + + case SCROLL_PAN_REDRAW: + if ((count - p->yscroll <= p->vrows - vc->vc_rows) + && ((!scroll_partial && (b - t == vc->vc_rows)) + || (scroll_partial + && (b - t - count > + 3 * vc->vc_rows >> 2)))) { + if (vc->vc_rows - b > 0) + fbcon_redraw_move(vc, p, b, vc->vc_rows - b, + b - count); + ypan_down_redraw(vc, t, count); + if (t > 0) + fbcon_redraw_move(vc, p, count, t, 0); + } else + fbcon_redraw_move(vc, p, t, b - t - count, t + count); + fbcon_clear(vc, t, 0, count, vc->vc_cols); + break; + + case SCROLL_REDRAW: + redraw_down: + fbcon_redraw(vc, p, b - 1, b - t - count, + -count * vc->vc_cols); + fbcon_clear(vc, t, 0, count, vc->vc_cols); + scr_memsetw((unsigned short *) (vc->vc_origin + + vc->vc_size_row * + t), + vc->vc_video_erase_char, + vc->vc_size_row * count); + return true; + } } return false; } + +static void fbcon_bmove(struct vc_data *vc, int sy, int sx, int dy, int dx, + int height, int width) +{ + struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fbcon_display *p = &fb_display[vc->vc_num]; + + if (fbcon_is_inactive(vc, info)) + return; + + if (!width || !height) + return; + + /* Split blits that cross physical y_wrap case. + * Pathological case involves 4 blits, better to use recursive + * code rather than unrolled case + * + * Recursive invocations don't need to erase the cursor over and + * over again, so we use fbcon_bmove_rec() + */ + fbcon_bmove_rec(vc, p, sy, sx, dy, dx, height, width, + p->vrows - p->yscroll); +} + +static void fbcon_bmove_rec(struct vc_data *vc, struct fbcon_display *p, int sy, int sx, + int dy, int dx, int height, int width, u_int y_break) +{ + struct fb_info *info = registered_fb[con2fb_map[vc->vc_num]]; + struct fbcon_ops *ops = info->fbcon_par; + u_int b; + + if (sy < y_break && sy + height > y_break) { + b = y_break - sy; + if (dy < sy) { /* Avoid trashing self */ + fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width, + y_break); + fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx, + height - b, width, y_break); + } else { + fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx, + height - b, width, y_break); + fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width, + y_break); + } + return; + } + + if (dy < y_break && dy + height > y_break) { + b = y_break - dy; + if (dy < sy) { /* Avoid trashing self */ + fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width, + y_break); + fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx, + height - b, width, y_break); + } else { + fbcon_bmove_rec(vc, p, sy + b, sx, dy + b, dx, + height - b, width, y_break); + fbcon_bmove_rec(vc, p, sy, sx, dy, dx, b, width, + y_break); + } + return; + } + ops->bmove(vc, info, real_y(p, sy), sx, real_y(p, dy), dx, + height, width); +} + static void updatescrollmode(struct fbcon_display *p, struct fb_info *info, struct vc_data *vc) @@ -1664,7 +2119,21 @@ static int fbcon_switch(struct vc_data *vc) updatescrollmode(p, info, vc); - scrollback_phys_max = 0; + switch (p->scrollmode) { + case SCROLL_WRAP_MOVE: + scrollback_phys_max = p->vrows - vc->vc_rows; + break; + case SCROLL_PAN_MOVE: + case SCROLL_PAN_REDRAW: + scrollback_phys_max = p->vrows - 2 * vc->vc_rows; + if (scrollback_phys_max < 0) + scrollback_phys_max = 0; + break; + default: + scrollback_phys_max = 0; + break; + } + scrollback_max = 0; scrollback_current = 0; diff --git a/drivers/video/fbdev/core/fbcon.h b/drivers/video/fbdev/core/fbcon.h index a00603b4451a..5246d0f2574b 100644 --- a/drivers/video/fbdev/core/fbcon.h +++ b/drivers/video/fbdev/core/fbcon.h @@ -29,6 +29,7 @@ struct fbcon_display { /* Filled in by the low-level console driver */ const u_char *fontdata; int userfont; /* != 0 if fontdata kmalloc()ed */ + u_short scrollmode; /* Scroll Method */ u_short inverse; /* != 0 text black on white as default */ short yscroll; /* Hardware scrolling */ int vrows; /* number of virtual rows */ @@ -51,6 +52,8 @@ struct fbcon_display { }; struct fbcon_ops { + void (*bmove)(struct vc_data *vc, struct fb_info *info, int sy, + int sx, int dy, int dx, int height, int width); void (*clear)(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width); void (*putcs)(struct vc_data *vc, struct fb_info *info, @@ -149,6 +152,62 @@ static inline int attr_col_ec(int shift, struct vc_data *vc, #define attr_bgcol_ec(bgshift, vc, info) attr_col_ec(bgshift, vc, info, 0) #define attr_fgcol_ec(fgshift, vc, info) attr_col_ec(fgshift, vc, info, 1) + /* + * Scroll Method + */ + +/* There are several methods fbcon can use to move text around the screen: + * + * Operation Pan Wrap + *--------------------------------------------- + * SCROLL_MOVE copyarea No No + * SCROLL_PAN_MOVE copyarea Yes No + * SCROLL_WRAP_MOVE copyarea No Yes + * SCROLL_REDRAW imageblit No No + * SCROLL_PAN_REDRAW imageblit Yes No + * SCROLL_WRAP_REDRAW imageblit No Yes + * + * (SCROLL_WRAP_REDRAW is not implemented yet) + * + * In general, fbcon will choose the best scrolling + * method based on the rule below: + * + * Pan/Wrap > accel imageblit > accel copyarea > + * soft imageblit > (soft copyarea) + * + * Exception to the rule: Pan + accel copyarea is + * preferred over Pan + accel imageblit. + * + * The above is typical for PCI/AGP cards. Unless + * overridden, fbcon will never use soft copyarea. + * + * If you need to override the above rule, set the + * appropriate flags in fb_info->flags. For example, + * to prefer copyarea over imageblit, set + * FBINFO_READS_FAST. + * + * Other notes: + * + use the hardware engine to move the text + * (hw-accelerated copyarea() and fillrect()) + * + use hardware-supported panning on a large virtual screen + * + amifb can not only pan, but also wrap the display by N lines + * (i.e. visible line i = physical line (i+N) % yres). + * + read what's already rendered on the screen and + * write it in a different place (this is cfb_copyarea()) + * + re-render the text to the screen + * + * Whether to use wrapping or panning can only be figured out at + * runtime (when we know whether our font height is a multiple + * of the pan/wrap step) + * + */ + +#define SCROLL_MOVE 0x001 +#define SCROLL_PAN_MOVE 0x002 +#define SCROLL_WRAP_MOVE 0x003 +#define SCROLL_REDRAW 0x004 +#define SCROLL_PAN_REDRAW 0x005 + #ifdef CONFIG_FB_TILEBLITTING extern void fbcon_set_tileops(struct vc_data *vc, struct fb_info *info); #endif diff --git a/drivers/video/fbdev/core/fbcon_ccw.c b/drivers/video/fbdev/core/fbcon_ccw.c index ffa78936eaab..9cd2c4b05c32 100644 --- a/drivers/video/fbdev/core/fbcon_ccw.c +++ b/drivers/video/fbdev/core/fbcon_ccw.c @@ -59,12 +59,31 @@ static void ccw_update_attr(u8 *dst, u8 *src, int attribute, } } + +static void ccw_bmove(struct vc_data *vc, struct fb_info *info, int sy, + int sx, int dy, int dx, int height, int width) +{ + struct fbcon_ops *ops = info->fbcon_par; + struct fb_copyarea area; + u32 vyres = GETVYRES(ops->p->scrollmode, info); + + area.sx = sy * vc->vc_font.height; + area.sy = vyres - ((sx + width) * vc->vc_font.width); + area.dx = dy * vc->vc_font.height; + area.dy = vyres - ((dx + width) * vc->vc_font.width); + area.width = height * vc->vc_font.height; + area.height = width * vc->vc_font.width; + + info->fbops->fb_copyarea(info, &area); +} + static void ccw_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width) { + struct fbcon_ops *ops = info->fbcon_par; struct fb_fillrect region; int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; - u32 vyres = info->var.yres; + u32 vyres = GETVYRES(ops->p->scrollmode, info); region.color = attr_bgcol_ec(bgshift,vc,info); region.dx = sy * vc->vc_font.height; @@ -121,7 +140,7 @@ static void ccw_putcs(struct vc_data *vc, struct fb_info *info, u32 cnt, pitch, size; u32 attribute = get_attribute(info, scr_readw(s)); u8 *dst, *buf = NULL; - u32 vyres = info->var.yres; + u32 vyres = GETVYRES(ops->p->scrollmode, info); if (!ops->fontbuffer) return; @@ -210,7 +229,7 @@ static void ccw_cursor(struct vc_data *vc, struct fb_info *info, int mode, int attribute, use_sw = vc->vc_cursor_type & CUR_SW; int err = 1, dx, dy; char *src; - u32 vyres = info->var.yres; + u32 vyres = GETVYRES(ops->p->scrollmode, info); if (!ops->fontbuffer) return; @@ -368,7 +387,7 @@ static int ccw_update_start(struct fb_info *info) { struct fbcon_ops *ops = info->fbcon_par; u32 yoffset; - u32 vyres = info->var.yres; + u32 vyres = GETVYRES(ops->p->scrollmode, info); int err; yoffset = (vyres - info->var.yres) - ops->var.xoffset; @@ -383,6 +402,7 @@ static int ccw_update_start(struct fb_info *info) void fbcon_rotate_ccw(struct fbcon_ops *ops) { + ops->bmove = ccw_bmove; ops->clear = ccw_clear; ops->putcs = ccw_putcs; ops->clear_margins = ccw_clear_margins; diff --git a/drivers/video/fbdev/core/fbcon_cw.c b/drivers/video/fbdev/core/fbcon_cw.c index 92e5b7fb51ee..88d89fad3f05 100644 --- a/drivers/video/fbdev/core/fbcon_cw.c +++ b/drivers/video/fbdev/core/fbcon_cw.c @@ -44,12 +44,31 @@ static void cw_update_attr(u8 *dst, u8 *src, int attribute, } } + +static void cw_bmove(struct vc_data *vc, struct fb_info *info, int sy, + int sx, int dy, int dx, int height, int width) +{ + struct fbcon_ops *ops = info->fbcon_par; + struct fb_copyarea area; + u32 vxres = GETVXRES(ops->p->scrollmode, info); + + area.sx = vxres - ((sy + height) * vc->vc_font.height); + area.sy = sx * vc->vc_font.width; + area.dx = vxres - ((dy + height) * vc->vc_font.height); + area.dy = dx * vc->vc_font.width; + area.width = height * vc->vc_font.height; + area.height = width * vc->vc_font.width; + + info->fbops->fb_copyarea(info, &area); +} + static void cw_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width) { + struct fbcon_ops *ops = info->fbcon_par; struct fb_fillrect region; int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; - u32 vxres = info->var.xres; + u32 vxres = GETVXRES(ops->p->scrollmode, info); region.color = attr_bgcol_ec(bgshift,vc,info); region.dx = vxres - ((sy + height) * vc->vc_font.height); @@ -106,7 +125,7 @@ static void cw_putcs(struct vc_data *vc, struct fb_info *info, u32 cnt, pitch, size; u32 attribute = get_attribute(info, scr_readw(s)); u8 *dst, *buf = NULL; - u32 vxres = info->var.xres; + u32 vxres = GETVXRES(ops->p->scrollmode, info); if (!ops->fontbuffer) return; @@ -193,7 +212,7 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode, int attribute, use_sw = vc->vc_cursor_type & CUR_SW; int err = 1, dx, dy; char *src; - u32 vxres = info->var.xres; + u32 vxres = GETVXRES(ops->p->scrollmode, info); if (!ops->fontbuffer) return; @@ -350,7 +369,7 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode, static int cw_update_start(struct fb_info *info) { struct fbcon_ops *ops = info->fbcon_par; - u32 vxres = info->var.xres; + u32 vxres = GETVXRES(ops->p->scrollmode, info); u32 xoffset; int err; @@ -366,6 +385,7 @@ static int cw_update_start(struct fb_info *info) void fbcon_rotate_cw(struct fbcon_ops *ops) { + ops->bmove = cw_bmove; ops->clear = cw_clear; ops->putcs = cw_putcs; ops->clear_margins = cw_clear_margins; diff --git a/drivers/video/fbdev/core/fbcon_rotate.h b/drivers/video/fbdev/core/fbcon_rotate.h index b528b2e54283..e233444cda66 100644 --- a/drivers/video/fbdev/core/fbcon_rotate.h +++ b/drivers/video/fbdev/core/fbcon_rotate.h @@ -11,6 +11,15 @@ #ifndef _FBCON_ROTATE_H #define _FBCON_ROTATE_H +#define GETVYRES(s,i) ({ \ + (s == SCROLL_REDRAW || s == SCROLL_MOVE) ? \ + (i)->var.yres : (i)->var.yres_virtual; }) + +#define GETVXRES(s,i) ({ \ + (s == SCROLL_REDRAW || s == SCROLL_MOVE || !(i)->fix.xpanstep) ? \ + (i)->var.xres : (i)->var.xres_virtual; }) + + static inline int pattern_test_bit(u32 x, u32 y, u32 pitch, const char *pat) { u32 tmp = (y * pitch) + x, index = tmp / 8, bit = tmp % 8; diff --git a/drivers/video/fbdev/core/fbcon_ud.c b/drivers/video/fbdev/core/fbcon_ud.c index 09619bd8e021..8d5e66b1bdfb 100644 --- a/drivers/video/fbdev/core/fbcon_ud.c +++ b/drivers/video/fbdev/core/fbcon_ud.c @@ -44,13 +44,33 @@ static void ud_update_attr(u8 *dst, u8 *src, int attribute, } } + +static void ud_bmove(struct vc_data *vc, struct fb_info *info, int sy, + int sx, int dy, int dx, int height, int width) +{ + struct fbcon_ops *ops = info->fbcon_par; + struct fb_copyarea area; + u32 vyres = GETVYRES(ops->p->scrollmode, info); + u32 vxres = GETVXRES(ops->p->scrollmode, info); + + area.sy = vyres - ((sy + height) * vc->vc_font.height); + area.sx = vxres - ((sx + width) * vc->vc_font.width); + area.dy = vyres - ((dy + height) * vc->vc_font.height); + area.dx = vxres - ((dx + width) * vc->vc_font.width); + area.height = height * vc->vc_font.height; + area.width = width * vc->vc_font.width; + + info->fbops->fb_copyarea(info, &area); +} + static void ud_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width) { + struct fbcon_ops *ops = info->fbcon_par; struct fb_fillrect region; int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; - u32 vyres = info->var.yres; - u32 vxres = info->var.xres; + u32 vyres = GETVYRES(ops->p->scrollmode, info); + u32 vxres = GETVXRES(ops->p->scrollmode, info); region.color = attr_bgcol_ec(bgshift,vc,info); region.dy = vyres - ((sy + height) * vc->vc_font.height); @@ -142,8 +162,8 @@ static void ud_putcs(struct vc_data *vc, struct fb_info *info, u32 mod = vc->vc_font.width % 8, cnt, pitch, size; u32 attribute = get_attribute(info, scr_readw(s)); u8 *dst, *buf = NULL; - u32 vyres = info->var.yres; - u32 vxres = info->var.xres; + u32 vyres = GETVYRES(ops->p->scrollmode, info); + u32 vxres = GETVXRES(ops->p->scrollmode, info); if (!ops->fontbuffer) return; @@ -239,8 +259,8 @@ static void ud_cursor(struct vc_data *vc, struct fb_info *info, int mode, int attribute, use_sw = vc->vc_cursor_type & CUR_SW; int err = 1, dx, dy; char *src; - u32 vyres = info->var.yres; - u32 vxres = info->var.xres; + u32 vyres = GETVYRES(ops->p->scrollmode, info); + u32 vxres = GETVXRES(ops->p->scrollmode, info); if (!ops->fontbuffer) return; @@ -390,8 +410,8 @@ static int ud_update_start(struct fb_info *info) { struct fbcon_ops *ops = info->fbcon_par; int xoffset, yoffset; - u32 vyres = info->var.yres; - u32 vxres = info->var.xres; + u32 vyres = GETVYRES(ops->p->scrollmode, info); + u32 vxres = GETVXRES(ops->p->scrollmode, info); int err; xoffset = vxres - info->var.xres - ops->var.xoffset; @@ -409,6 +429,7 @@ static int ud_update_start(struct fb_info *info) void fbcon_rotate_ud(struct fbcon_ops *ops) { + ops->bmove = ud_bmove; ops->clear = ud_clear; ops->putcs = ud_putcs; ops->clear_margins = ud_clear_margins; diff --git a/drivers/video/fbdev/core/tileblit.c b/drivers/video/fbdev/core/tileblit.c index 72af95053bcb..2768eff247ba 100644 --- a/drivers/video/fbdev/core/tileblit.c +++ b/drivers/video/fbdev/core/tileblit.c @@ -16,6 +16,21 @@ #include #include "fbcon.h" +static void tile_bmove(struct vc_data *vc, struct fb_info *info, int sy, + int sx, int dy, int dx, int height, int width) +{ + struct fb_tilearea area; + + area.sx = sx; + area.sy = sy; + area.dx = dx; + area.dy = dy; + area.height = height; + area.width = width; + + info->tileops->fb_tilecopy(info, &area); +} + static void tile_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width) { @@ -118,6 +133,7 @@ void fbcon_set_tileops(struct vc_data *vc, struct fb_info *info) struct fb_tilemap map; struct fbcon_ops *ops = info->fbcon_par; + ops->bmove = tile_bmove; ops->clear = tile_clear; ops->putcs = tile_putcs; ops->clear_margins = tile_clear_margins; diff --git a/drivers/video/fbdev/skeletonfb.c b/drivers/video/fbdev/skeletonfb.c index 0fe922f726e9..bcacfb6934fa 100644 --- a/drivers/video/fbdev/skeletonfb.c +++ b/drivers/video/fbdev/skeletonfb.c @@ -505,15 +505,15 @@ void xxxfb_fillrect(struct fb_info *p, const struct fb_fillrect *region) } /** - * xxxfb_copyarea - OBSOLETE function. + * xxxfb_copyarea - REQUIRED function. Can use generic routines if + * non acclerated hardware and packed pixel based. * Copies one area of the screen to another area. - * Will be deleted in a future version * * @info: frame buffer structure that represents a single frame buffer * @area: Structure providing the data to copy the framebuffer contents * from one region to another. * - * This drawing operation copied a rectangular area from one area of the + * This drawing operation copies a rectangular area from one area of the * screen to another area. */ void xxxfb_copyarea(struct fb_info *p, const struct fb_copyarea *area) @@ -645,9 +645,9 @@ static const struct fb_ops xxxfb_ops = { .fb_setcolreg = xxxfb_setcolreg, .fb_blank = xxxfb_blank, .fb_pan_display = xxxfb_pan_display, - .fb_fillrect = xxxfb_fillrect, /* Needed !!! */ - .fb_copyarea = xxxfb_copyarea, /* Obsolete */ - .fb_imageblit = xxxfb_imageblit, /* Needed !!! */ + .fb_fillrect = xxxfb_fillrect, /* Needed !!! */ + .fb_copyarea = xxxfb_copyarea, /* Needed !!! */ + .fb_imageblit = xxxfb_imageblit, /* Needed !!! */ .fb_cursor = xxxfb_cursor, /* Optional !!! */ .fb_sync = xxxfb_sync, .fb_ioctl = xxxfb_ioctl, diff --git a/include/linux/fb.h b/include/linux/fb.h index 3da95842b207..02f362c661c8 100644 --- a/include/linux/fb.h +++ b/include/linux/fb.h @@ -262,7 +262,7 @@ struct fb_ops { /* Draws a rectangle */ void (*fb_fillrect) (struct fb_info *info, const struct fb_fillrect *rect); - /* Copy data from area to another. Obsolete. */ + /* Copy data from area to another */ void (*fb_copyarea) (struct fb_info *info, const struct fb_copyarea *region); /* Draws a image to the display */ void (*fb_imageblit) (struct fb_info *info, const struct fb_image *image); -- cgit From 87ab9f6b7417349aa197a6c7098d4fdd4beebb74 Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Wed, 2 Feb 2022 14:55:30 +0100 Subject: Revert "fbcon: Disable accelerated scrolling" This reverts commit 39aead8373b3c20bb5965c024dfb51a94e526151. Revert the first (of 2) commits which disabled scrolling acceleration in fbcon/fbdev. It introduced a regression for fbdev-supported graphic cards because of the performance penalty by doing screen scrolling by software instead of using the existing graphic card 2D hardware acceleration. Console scrolling acceleration was disabled by dropping code which checked at runtime the driver hardware capabilities for the BINFO_HWACCEL_COPYAREA or FBINFO_HWACCEL_FILLRECT flags and if set, it enabled scrollmode SCROLL_MOVE which uses hardware acceleration to move screen contents. After dropping those checks scrollmode was hard-wired to SCROLL_REDRAW instead, which forces all graphic cards to redraw every character at the new screen position when scrolling. This change effectively disabled all hardware-based scrolling acceleration for ALL drivers, because now all kind of 2D hardware acceleration (bitblt, fillrect) in the drivers isn't used any longer. The original commit message mentions that only 3 DRM drivers (nouveau, omapdrm and gma500) used hardware acceleration in the past and thus code for checking and using scrolling acceleration is obsolete. This statement is NOT TRUE, because beside the DRM drivers there are around 35 other fbdev drivers which depend on fbdev/fbcon and still provide hardware acceleration for fbdev/fbcon. The original commit message also states that syzbot found lots of bugs in fbcon and thus it's "often the solution to just delete code and remove features". This is true, and the bugs - which actually affected all users of fbcon, including DRM - were fixed, or code was dropped like e.g. the support for software scrollback in vgacon (commit 973c096f6a85). So to further analyze which bugs were found by syzbot, I've looked through all patches in drivers/video which were tagged with syzbot or syzkaller back to year 2005. The vast majority fixed the reported issues on a higher level, e.g. when screen is to be resized, or when font size is to be changed. The few ones which touched driver code fixed a real driver bug, e.g. by adding a check. But NONE of those patches touched code of either the SCROLL_MOVE or the SCROLL_REDRAW case. That means, there was no real reason why SCROLL_MOVE had to be ripped-out and just SCROLL_REDRAW had to be used instead. The only reason I can imagine so far was that SCROLL_MOVE wasn't used by DRM and as such it was assumed that it could go away. That argument completely missed the fact that SCROLL_MOVE is still heavily used by fbdev (non-DRM) drivers. Some people mention that using memcpy() instead of the hardware acceleration is pretty much the same speed. But that's not true, at least not for older graphic cards and machines where we see speed decreases by factor 10 and more and thus this change leads to console responsiveness way worse than before. That's why the original commit is to be reverted. By reverting we reintroduce hardware-based scrolling acceleration and fix the performance regression for fbdev drivers. There isn't any impact on DRM when reverting those patches. Signed-off-by: Helge Deller Acked-by: Geert Uytterhoeven Acked-by: Sven Schnelle Cc: stable@vger.kernel.org # v5.10+ Signed-off-by: Helge Deller Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20220202135531.92183-3-deller@gmx.de --- Documentation/gpu/todo.rst | 21 ------------------- drivers/video/fbdev/core/fbcon.c | 45 +++++++++++++++++++++++++++++++++------- 2 files changed, 37 insertions(+), 29 deletions(-) diff --git a/Documentation/gpu/todo.rst b/Documentation/gpu/todo.rst index 29506815d24a..a1212b5b3026 100644 --- a/Documentation/gpu/todo.rst +++ b/Documentation/gpu/todo.rst @@ -300,27 +300,6 @@ Contact: Daniel Vetter, Noralf Tronnes Level: Advanced -Garbage collect fbdev scrolling acceleration --------------------------------------------- - -Scroll acceleration is disabled in fbcon by hard-wiring p->scrollmode = -SCROLL_REDRAW. There's a ton of code this will allow us to remove: - -- lots of code in fbcon.c - -- a bunch of the hooks in fbcon_ops, maybe the remaining hooks could be called - directly instead of the function table (with a switch on p->rotate) - -- fb_copyarea is unused after this, and can be deleted from all drivers - -Note that not all acceleration code can be deleted, since clearing and cursor -support is still accelerated, which might be good candidates for further -deletion projects. - -Contact: Daniel Vetter - -Level: Intermediate - idr_init_base() --------------- diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index fc34caddf9cf..0cc2a36b674a 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -1025,7 +1025,7 @@ static void fbcon_init(struct vc_data *vc, int init) struct vc_data *svc = *default_mode; struct fbcon_display *t, *p = &fb_display[vc->vc_num]; int logo = 1, new_rows, new_cols, rows, cols; - int ret; + int cap, ret; if (WARN_ON(info_idx == -1)) return; @@ -1034,6 +1034,7 @@ static void fbcon_init(struct vc_data *vc, int init) con2fb_map[vc->vc_num] = info_idx; info = registered_fb[con2fb_map[vc->vc_num]]; + cap = info->flags; if (logo_shown < 0 && console_loglevel <= CONSOLE_LOGLEVEL_QUIET) logo_shown = FBCON_LOGO_DONTSHOW; @@ -1135,13 +1136,11 @@ static void fbcon_init(struct vc_data *vc, int init) ops->graphics = 0; - /* - * No more hw acceleration for fbcon. - * - * FIXME: Garbage collect all the now dead code after sufficient time - * has passed. - */ - p->scrollmode = SCROLL_REDRAW; + if ((cap & FBINFO_HWACCEL_COPYAREA) && + !(cap & FBINFO_HWACCEL_DISABLED)) + p->scrollmode = SCROLL_MOVE; + else /* default to something safe */ + p->scrollmode = SCROLL_REDRAW; /* * ++guenther: console.c:vc_allocate() relies on initializing @@ -1953,15 +1952,45 @@ static void updatescrollmode(struct fbcon_display *p, { struct fbcon_ops *ops = info->fbcon_par; int fh = vc->vc_font.height; + int cap = info->flags; + u16 t = 0; + int ypan = FBCON_SWAP(ops->rotate, info->fix.ypanstep, + info->fix.xpanstep); + int ywrap = FBCON_SWAP(ops->rotate, info->fix.ywrapstep, t); int yres = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres); int vyres = FBCON_SWAP(ops->rotate, info->var.yres_virtual, info->var.xres_virtual); + int good_pan = (cap & FBINFO_HWACCEL_YPAN) && + divides(ypan, vc->vc_font.height) && vyres > yres; + int good_wrap = (cap & FBINFO_HWACCEL_YWRAP) && + divides(ywrap, vc->vc_font.height) && + divides(vc->vc_font.height, vyres) && + divides(vc->vc_font.height, yres); + int reading_fast = cap & FBINFO_READS_FAST; + int fast_copyarea = (cap & FBINFO_HWACCEL_COPYAREA) && + !(cap & FBINFO_HWACCEL_DISABLED); + int fast_imageblit = (cap & FBINFO_HWACCEL_IMAGEBLIT) && + !(cap & FBINFO_HWACCEL_DISABLED); p->vrows = vyres/fh; if (yres > (fh * (vc->vc_rows + 1))) p->vrows -= (yres - (fh * vc->vc_rows)) / fh; if ((yres % fh) && (vyres % fh < yres % fh)) p->vrows--; + + if (good_wrap || good_pan) { + if (reading_fast || fast_copyarea) + p->scrollmode = good_wrap ? + SCROLL_WRAP_MOVE : SCROLL_PAN_MOVE; + else + p->scrollmode = good_wrap ? SCROLL_REDRAW : + SCROLL_PAN_REDRAW; + } else { + if (reading_fast || (fast_copyarea && !fast_imageblit)) + p->scrollmode = SCROLL_MOVE; + else + p->scrollmode = SCROLL_REDRAW; + } } #define PITCH(w) (((w) + 7) >> 3) -- cgit From a3f781a9d6114c1d1e01defb7aa234dec45d2a5f Mon Sep 17 00:00:00 2001 From: Helge Deller Date: Wed, 2 Feb 2022 14:55:31 +0100 Subject: fbcon: Add option to enable legacy hardware acceleration Add a config option CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION to enable bitblt and fillrect hardware acceleration in the framebuffer console. If disabled, such acceleration will not be used, even if it is supported by the graphics hardware driver. If you plan to use DRM as your main graphics output system, you should disable this option since it will prevent compiling in code which isn't used later on when DRM takes over. For all other configurations, e.g. if none of your graphic cards support DRM (yet), DRM isn't available for your architecture, or you can't be sure that the graphic card in the target system will support DRM, you most likely want to enable this option. In the non-accelerated case (e.g. when DRM is used), the inlined fb_scrollmode() function is hardcoded to return SCROLL_REDRAW and as such the compiler is able to optimize much unneccesary code away. In this v3 patch version I additionally changed the GETVYRES() and GETVXRES() macros to take a pointer to the fbcon_display struct. This fixes the build when console rotation is enabled and helps the compiler again to optimize out code. Signed-off-by: Helge Deller Cc: stable@vger.kernel.org # v5.10+ Signed-off-by: Helge Deller Signed-off-by: Daniel Vetter Link: https://patchwork.freedesktop.org/patch/msgid/20220202135531.92183-4-deller@gmx.de --- drivers/video/console/Kconfig | 20 +++++++++++++++++ drivers/video/fbdev/core/fbcon.c | 39 +++++++++++++++++++++++---------- drivers/video/fbdev/core/fbcon.h | 15 ++++++++++++- drivers/video/fbdev/core/fbcon_ccw.c | 10 ++++----- drivers/video/fbdev/core/fbcon_cw.c | 10 ++++----- drivers/video/fbdev/core/fbcon_rotate.h | 4 ++-- drivers/video/fbdev/core/fbcon_ud.c | 20 ++++++++--------- 7 files changed, 84 insertions(+), 34 deletions(-) diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig index 840d9813b0bc..fcc46380e7c9 100644 --- a/drivers/video/console/Kconfig +++ b/drivers/video/console/Kconfig @@ -78,6 +78,26 @@ config FRAMEBUFFER_CONSOLE help Low-level framebuffer-based console driver. +config FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION + bool "Enable legacy fbcon hardware acceleration code" + depends on FRAMEBUFFER_CONSOLE + default y if PARISC + default n + help + This option enables the fbcon (framebuffer text-based) hardware + acceleration for graphics drivers which were written for the fbdev + graphics interface. + + On modern machines, on mainstream machines (like x86-64) or when + using a modern Linux distribution those fbdev drivers usually aren't used. + So enabling this option wouldn't have any effect, which is why you want + to disable this option on such newer machines. + + If you compile this kernel for older machines which still require the + fbdev drivers, you may want to say Y. + + If unsure, select n. + config FRAMEBUFFER_CONSOLE_DETECT_PRIMARY bool "Map the console to the primary display device" depends on FRAMEBUFFER_CONSOLE diff --git a/drivers/video/fbdev/core/fbcon.c b/drivers/video/fbdev/core/fbcon.c index 0cc2a36b674a..f36829eeb5a9 100644 --- a/drivers/video/fbdev/core/fbcon.c +++ b/drivers/video/fbdev/core/fbcon.c @@ -1136,11 +1136,13 @@ static void fbcon_init(struct vc_data *vc, int init) ops->graphics = 0; +#ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION if ((cap & FBINFO_HWACCEL_COPYAREA) && !(cap & FBINFO_HWACCEL_DISABLED)) p->scrollmode = SCROLL_MOVE; else /* default to something safe */ p->scrollmode = SCROLL_REDRAW; +#endif /* * ++guenther: console.c:vc_allocate() relies on initializing @@ -1705,7 +1707,7 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b, count = vc->vc_rows; if (logo_shown >= 0) goto redraw_up; - switch (p->scrollmode) { + switch (fb_scrollmode(p)) { case SCROLL_MOVE: fbcon_redraw_blit(vc, info, p, t, b - t - count, count); @@ -1795,7 +1797,7 @@ static bool fbcon_scroll(struct vc_data *vc, unsigned int t, unsigned int b, count = vc->vc_rows; if (logo_shown >= 0) goto redraw_down; - switch (p->scrollmode) { + switch (fb_scrollmode(p)) { case SCROLL_MOVE: fbcon_redraw_blit(vc, info, p, b - 1, b - t - count, -count); @@ -1946,12 +1948,12 @@ static void fbcon_bmove_rec(struct vc_data *vc, struct fbcon_display *p, int sy, height, width); } -static void updatescrollmode(struct fbcon_display *p, +static void updatescrollmode_accel(struct fbcon_display *p, struct fb_info *info, struct vc_data *vc) { +#ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION struct fbcon_ops *ops = info->fbcon_par; - int fh = vc->vc_font.height; int cap = info->flags; u16 t = 0; int ypan = FBCON_SWAP(ops->rotate, info->fix.ypanstep, @@ -1972,12 +1974,6 @@ static void updatescrollmode(struct fbcon_display *p, int fast_imageblit = (cap & FBINFO_HWACCEL_IMAGEBLIT) && !(cap & FBINFO_HWACCEL_DISABLED); - p->vrows = vyres/fh; - if (yres > (fh * (vc->vc_rows + 1))) - p->vrows -= (yres - (fh * vc->vc_rows)) / fh; - if ((yres % fh) && (vyres % fh < yres % fh)) - p->vrows--; - if (good_wrap || good_pan) { if (reading_fast || fast_copyarea) p->scrollmode = good_wrap ? @@ -1991,6 +1987,27 @@ static void updatescrollmode(struct fbcon_display *p, else p->scrollmode = SCROLL_REDRAW; } +#endif +} + +static void updatescrollmode(struct fbcon_display *p, + struct fb_info *info, + struct vc_data *vc) +{ + struct fbcon_ops *ops = info->fbcon_par; + int fh = vc->vc_font.height; + int yres = FBCON_SWAP(ops->rotate, info->var.yres, info->var.xres); + int vyres = FBCON_SWAP(ops->rotate, info->var.yres_virtual, + info->var.xres_virtual); + + p->vrows = vyres/fh; + if (yres > (fh * (vc->vc_rows + 1))) + p->vrows -= (yres - (fh * vc->vc_rows)) / fh; + if ((yres % fh) && (vyres % fh < yres % fh)) + p->vrows--; + + /* update scrollmode in case hardware acceleration is used */ + updatescrollmode_accel(p, info, vc); } #define PITCH(w) (((w) + 7) >> 3) @@ -2148,7 +2165,7 @@ static int fbcon_switch(struct vc_data *vc) updatescrollmode(p, info, vc); - switch (p->scrollmode) { + switch (fb_scrollmode(p)) { case SCROLL_WRAP_MOVE: scrollback_phys_max = p->vrows - vc->vc_rows; break; diff --git a/drivers/video/fbdev/core/fbcon.h b/drivers/video/fbdev/core/fbcon.h index 5246d0f2574b..969d41ecede5 100644 --- a/drivers/video/fbdev/core/fbcon.h +++ b/drivers/video/fbdev/core/fbcon.h @@ -29,7 +29,9 @@ struct fbcon_display { /* Filled in by the low-level console driver */ const u_char *fontdata; int userfont; /* != 0 if fontdata kmalloc()ed */ - u_short scrollmode; /* Scroll Method */ +#ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION + u_short scrollmode; /* Scroll Method, use fb_scrollmode() */ +#endif u_short inverse; /* != 0 text black on white as default */ short yscroll; /* Hardware scrolling */ int vrows; /* number of virtual rows */ @@ -208,6 +210,17 @@ static inline int attr_col_ec(int shift, struct vc_data *vc, #define SCROLL_REDRAW 0x004 #define SCROLL_PAN_REDRAW 0x005 +static inline u_short fb_scrollmode(struct fbcon_display *fb) +{ +#ifdef CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION + return fb->scrollmode; +#else + /* hardcoded to SCROLL_REDRAW if acceleration was disabled. */ + return SCROLL_REDRAW; +#endif +} + + #ifdef CONFIG_FB_TILEBLITTING extern void fbcon_set_tileops(struct vc_data *vc, struct fb_info *info); #endif diff --git a/drivers/video/fbdev/core/fbcon_ccw.c b/drivers/video/fbdev/core/fbcon_ccw.c index 9cd2c4b05c32..2789ace79634 100644 --- a/drivers/video/fbdev/core/fbcon_ccw.c +++ b/drivers/video/fbdev/core/fbcon_ccw.c @@ -65,7 +65,7 @@ static void ccw_bmove(struct vc_data *vc, struct fb_info *info, int sy, { struct fbcon_ops *ops = info->fbcon_par; struct fb_copyarea area; - u32 vyres = GETVYRES(ops->p->scrollmode, info); + u32 vyres = GETVYRES(ops->p, info); area.sx = sy * vc->vc_font.height; area.sy = vyres - ((sx + width) * vc->vc_font.width); @@ -83,7 +83,7 @@ static void ccw_clear(struct vc_data *vc, struct fb_info *info, int sy, struct fbcon_ops *ops = info->fbcon_par; struct fb_fillrect region; int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; - u32 vyres = GETVYRES(ops->p->scrollmode, info); + u32 vyres = GETVYRES(ops->p, info); region.color = attr_bgcol_ec(bgshift,vc,info); region.dx = sy * vc->vc_font.height; @@ -140,7 +140,7 @@ static void ccw_putcs(struct vc_data *vc, struct fb_info *info, u32 cnt, pitch, size; u32 attribute = get_attribute(info, scr_readw(s)); u8 *dst, *buf = NULL; - u32 vyres = GETVYRES(ops->p->scrollmode, info); + u32 vyres = GETVYRES(ops->p, info); if (!ops->fontbuffer) return; @@ -229,7 +229,7 @@ static void ccw_cursor(struct vc_data *vc, struct fb_info *info, int mode, int attribute, use_sw = vc->vc_cursor_type & CUR_SW; int err = 1, dx, dy; char *src; - u32 vyres = GETVYRES(ops->p->scrollmode, info); + u32 vyres = GETVYRES(ops->p, info); if (!ops->fontbuffer) return; @@ -387,7 +387,7 @@ static int ccw_update_start(struct fb_info *info) { struct fbcon_ops *ops = info->fbcon_par; u32 yoffset; - u32 vyres = GETVYRES(ops->p->scrollmode, info); + u32 vyres = GETVYRES(ops->p, info); int err; yoffset = (vyres - info->var.yres) - ops->var.xoffset; diff --git a/drivers/video/fbdev/core/fbcon_cw.c b/drivers/video/fbdev/core/fbcon_cw.c index 88d89fad3f05..86a254c1b2b7 100644 --- a/drivers/video/fbdev/core/fbcon_cw.c +++ b/drivers/video/fbdev/core/fbcon_cw.c @@ -50,7 +50,7 @@ static void cw_bmove(struct vc_data *vc, struct fb_info *info, int sy, { struct fbcon_ops *ops = info->fbcon_par; struct fb_copyarea area; - u32 vxres = GETVXRES(ops->p->scrollmode, info); + u32 vxres = GETVXRES(ops->p, info); area.sx = vxres - ((sy + height) * vc->vc_font.height); area.sy = sx * vc->vc_font.width; @@ -68,7 +68,7 @@ static void cw_clear(struct vc_data *vc, struct fb_info *info, int sy, struct fbcon_ops *ops = info->fbcon_par; struct fb_fillrect region; int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; - u32 vxres = GETVXRES(ops->p->scrollmode, info); + u32 vxres = GETVXRES(ops->p, info); region.color = attr_bgcol_ec(bgshift,vc,info); region.dx = vxres - ((sy + height) * vc->vc_font.height); @@ -125,7 +125,7 @@ static void cw_putcs(struct vc_data *vc, struct fb_info *info, u32 cnt, pitch, size; u32 attribute = get_attribute(info, scr_readw(s)); u8 *dst, *buf = NULL; - u32 vxres = GETVXRES(ops->p->scrollmode, info); + u32 vxres = GETVXRES(ops->p, info); if (!ops->fontbuffer) return; @@ -212,7 +212,7 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode, int attribute, use_sw = vc->vc_cursor_type & CUR_SW; int err = 1, dx, dy; char *src; - u32 vxres = GETVXRES(ops->p->scrollmode, info); + u32 vxres = GETVXRES(ops->p, info); if (!ops->fontbuffer) return; @@ -369,7 +369,7 @@ static void cw_cursor(struct vc_data *vc, struct fb_info *info, int mode, static int cw_update_start(struct fb_info *info) { struct fbcon_ops *ops = info->fbcon_par; - u32 vxres = GETVXRES(ops->p->scrollmode, info); + u32 vxres = GETVXRES(ops->p, info); u32 xoffset; int err; diff --git a/drivers/video/fbdev/core/fbcon_rotate.h b/drivers/video/fbdev/core/fbcon_rotate.h index e233444cda66..01cbe303b8a2 100644 --- a/drivers/video/fbdev/core/fbcon_rotate.h +++ b/drivers/video/fbdev/core/fbcon_rotate.h @@ -12,11 +12,11 @@ #define _FBCON_ROTATE_H #define GETVYRES(s,i) ({ \ - (s == SCROLL_REDRAW || s == SCROLL_MOVE) ? \ + (fb_scrollmode(s) == SCROLL_REDRAW || fb_scrollmode(s) == SCROLL_MOVE) ? \ (i)->var.yres : (i)->var.yres_virtual; }) #define GETVXRES(s,i) ({ \ - (s == SCROLL_REDRAW || s == SCROLL_MOVE || !(i)->fix.xpanstep) ? \ + (fb_scrollmode(s) == SCROLL_REDRAW || fb_scrollmode(s) == SCROLL_MOVE || !(i)->fix.xpanstep) ? \ (i)->var.xres : (i)->var.xres_virtual; }) diff --git a/drivers/video/fbdev/core/fbcon_ud.c b/drivers/video/fbdev/core/fbcon_ud.c index 8d5e66b1bdfb..23bc045769d0 100644 --- a/drivers/video/fbdev/core/fbcon_ud.c +++ b/drivers/video/fbdev/core/fbcon_ud.c @@ -50,8 +50,8 @@ static void ud_bmove(struct vc_data *vc, struct fb_info *info, int sy, { struct fbcon_ops *ops = info->fbcon_par; struct fb_copyarea area; - u32 vyres = GETVYRES(ops->p->scrollmode, info); - u32 vxres = GETVXRES(ops->p->scrollmode, info); + u32 vyres = GETVYRES(ops->p, info); + u32 vxres = GETVXRES(ops->p, info); area.sy = vyres - ((sy + height) * vc->vc_font.height); area.sx = vxres - ((sx + width) * vc->vc_font.width); @@ -69,8 +69,8 @@ static void ud_clear(struct vc_data *vc, struct fb_info *info, int sy, struct fbcon_ops *ops = info->fbcon_par; struct fb_fillrect region; int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; - u32 vyres = GETVYRES(ops->p->scrollmode, info); - u32 vxres = GETVXRES(ops->p->scrollmode, info); + u32 vyres = GETVYRES(ops->p, info); + u32 vxres = GETVXRES(ops->p, info); region.color = attr_bgcol_ec(bgshift,vc,info); region.dy = vyres - ((sy + height) * vc->vc_font.height); @@ -162,8 +162,8 @@ static void ud_putcs(struct vc_data *vc, struct fb_info *info, u32 mod = vc->vc_font.width % 8, cnt, pitch, size; u32 attribute = get_attribute(info, scr_readw(s)); u8 *dst, *buf = NULL; - u32 vyres = GETVYRES(ops->p->scrollmode, info); - u32 vxres = GETVXRES(ops->p->scrollmode, info); + u32 vyres = GETVYRES(ops->p, info); + u32 vxres = GETVXRES(ops->p, info); if (!ops->fontbuffer) return; @@ -259,8 +259,8 @@ static void ud_cursor(struct vc_data *vc, struct fb_info *info, int mode, int attribute, use_sw = vc->vc_cursor_type & CUR_SW; int err = 1, dx, dy; char *src; - u32 vyres = GETVYRES(ops->p->scrollmode, info); - u32 vxres = GETVXRES(ops->p->scrollmode, info); + u32 vyres = GETVYRES(ops->p, info); + u32 vxres = GETVXRES(ops->p, info); if (!ops->fontbuffer) return; @@ -410,8 +410,8 @@ static int ud_update_start(struct fb_info *info) { struct fbcon_ops *ops = info->fbcon_par; int xoffset, yoffset; - u32 vyres = GETVYRES(ops->p->scrollmode, info); - u32 vxres = GETVXRES(ops->p->scrollmode, info); + u32 vyres = GETVYRES(ops->p, info); + u32 vxres = GETVXRES(ops->p, info); int err; xoffset = vxres - info->var.xres - ops->var.xoffset; -- cgit From 3e1f941dd9f33776b3df4e30f741fe445ff773f3 Mon Sep 17 00:00:00 2001 From: Ilya Dryomov Date: Tue, 1 Feb 2022 11:04:20 +0100 Subject: block: fix DIO handling regressions in blkdev_read_iter() Commit ceaa762527f4 ("block: move direct_IO into our own read_iter handler") introduced several regressions for bdev DIO: 1. read spanning EOF always returns 0 instead of the number of bytes read. This is because "count" is assigned early and isn't updated when the iterator is truncated: $ lsblk -o name,size /dev/vdb NAME SIZE vdb 1G $ xfs_io -d -c 'pread -b 4M 1021M 4M' /dev/vdb read 0/4194304 bytes at offset 1070596096 0.000000 bytes, 0 ops; 0.0007 sec (0.000000 bytes/sec and 0.0000 ops/sec) instead of $ xfs_io -d -c 'pread -b 4M 1021M 4M' /dev/vdb read 3145728/4194304 bytes at offset 1070596096 3 MiB, 1 ops; 0.0007 sec (3.865 GiB/sec and 1319.2612 ops/sec) 2. truncated iterator isn't reexpanded 3. iterator isn't reverted on blkdev_direct_IO() error 4. zero size read no longer skips atime update Fixes: ceaa762527f4 ("block: move direct_IO into our own read_iter handler") Signed-off-by: Ilya Dryomov Reviewed-by: Christoph Hellwig Link: https://lore.kernel.org/r/20220201100420.25875-1-idryomov@gmail.com Signed-off-by: Jens Axboe --- block/fops.c | 33 +++++++++++++++++++-------------- 1 file changed, 19 insertions(+), 14 deletions(-) diff --git a/block/fops.c b/block/fops.c index 26bf15c770d2..4f59e0f5bf30 100644 --- a/block/fops.c +++ b/block/fops.c @@ -566,34 +566,37 @@ static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to) { struct block_device *bdev = iocb->ki_filp->private_data; loff_t size = bdev_nr_bytes(bdev); - size_t count = iov_iter_count(to); loff_t pos = iocb->ki_pos; size_t shorted = 0; ssize_t ret = 0; + size_t count; - if (unlikely(pos + count > size)) { + if (unlikely(pos + iov_iter_count(to) > size)) { if (pos >= size) return 0; size -= pos; - if (count > size) { - shorted = count - size; - iov_iter_truncate(to, size); - } + shorted = iov_iter_count(to) - size; + iov_iter_truncate(to, size); } + count = iov_iter_count(to); + if (!count) + goto reexpand; /* skip atime */ + if (iocb->ki_flags & IOCB_DIRECT) { struct address_space *mapping = iocb->ki_filp->f_mapping; if (iocb->ki_flags & IOCB_NOWAIT) { - if (filemap_range_needs_writeback(mapping, iocb->ki_pos, - iocb->ki_pos + count - 1)) - return -EAGAIN; + if (filemap_range_needs_writeback(mapping, pos, + pos + count - 1)) { + ret = -EAGAIN; + goto reexpand; + } } else { - ret = filemap_write_and_wait_range(mapping, - iocb->ki_pos, - iocb->ki_pos + count - 1); + ret = filemap_write_and_wait_range(mapping, pos, + pos + count - 1); if (ret < 0) - return ret; + goto reexpand; } file_accessed(iocb->ki_filp); @@ -603,12 +606,14 @@ static ssize_t blkdev_read_iter(struct kiocb *iocb, struct iov_iter *to) iocb->ki_pos += ret; count -= ret; } + iov_iter_revert(to, count - iov_iter_count(to)); if (ret < 0 || !count) - return ret; + goto reexpand; } ret = filemap_read(iocb, to, ret); +reexpand: if (unlikely(shorted)) iov_iter_reexpand(to, iov_iter_count(to) + shorted); return ret; -- cgit From f0bb41fad02e0310fa7b222c7254a3603ecaca1b Mon Sep 17 00:00:00 2001 From: Jani Nikula Date: Wed, 2 Feb 2022 13:25:08 +0200 Subject: drm/i915/vga: switch to use VGA definitions from video/vga.h MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The video/vga.h has macros for the VGA registers. Switch to use them. v2: Use direct 0x01 instead of the confusing VGA_SEQ_CLOCK_MODE (Ville) Suggested-by: Matt Roper Cc: Ville Syrjälä Signed-off-by: Jani Nikula Reviewed-by: Ville Syrjälä Link: https://patchwork.freedesktop.org/patch/msgid/20220202112509.1886660-1-jani.nikula@intel.com --- drivers/gpu/drm/i915/display/intel_vga.c | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/drivers/gpu/drm/i915/display/intel_vga.c b/drivers/gpu/drm/i915/display/intel_vga.c index fa779f7ea415..b5d058404c14 100644 --- a/drivers/gpu/drm/i915/display/intel_vga.c +++ b/drivers/gpu/drm/i915/display/intel_vga.c @@ -7,6 +7,7 @@ #include #include +#include