aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/pm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/pm')
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c14
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h11
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c33
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h12
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/arcturus_ppsmc.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_aldebaran.h17
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h46
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h4
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h80
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_7_ppsmc.h9
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h31
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c10
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c18
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c11
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c39
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c14
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c98
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c115
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c289
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c41
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c17
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c92
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c32
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c83
28 files changed, 874 insertions, 249 deletions
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index d1bf073adf54..956b6ce81c84 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -107,6 +107,20 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
return ret;
}
+int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret = -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_set_gfx_power_up_by_imu(smu);
+ mutex_unlock(&adev->pm.mutex);
+
+ msleep(10);
+
+ return ret;
+}
+
int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index 3e78b3057277..65624d091ed2 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -45,6 +45,13 @@ enum amdgpu_int_thermal_type {
THERMAL_TYPE_KV,
};
+enum amdgpu_runpm_mode {
+ AMDGPU_RUNPM_NONE,
+ AMDGPU_RUNPM_PX,
+ AMDGPU_RUNPM_BOCO,
+ AMDGPU_RUNPM_BACO,
+};
+
struct amdgpu_ps {
u32 caps; /* vbios flags */
u32 class; /* vbios flags */
@@ -355,6 +362,8 @@ struct amdgpu_pm {
struct amdgpu_ctx *stable_pstate_ctx;
struct config_table_setting config_table;
+ /* runtime mode */
+ enum amdgpu_runpm_mode rpm_mode;
};
int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
@@ -386,6 +395,8 @@ int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev);
int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
enum pp_mp1_state mp1_state);
+int amdgpu_dpm_set_gfx_power_up_by_imu(struct amdgpu_device *adev);
+
int amdgpu_dpm_baco_exit(struct amdgpu_device *adev);
int amdgpu_dpm_baco_enter(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index a601024ba4de..7510d470b864 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -66,6 +66,7 @@ static int smu_set_fan_control_mode(void *handle, u32 value);
static int smu_set_power_limit(void *handle, uint32_t limit);
static int smu_set_fan_speed_rpm(void *handle, uint32_t speed);
static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
+static int smu_set_mp1_state(void *handle, enum pp_mp1_state mp1_state);
static int smu_sys_get_pp_feature_mask(void *handle,
char *buf)
@@ -134,6 +135,14 @@ int smu_get_dpm_freq_range(struct smu_context *smu,
return ret;
}
+int smu_set_gfx_power_up_by_imu(struct smu_context *smu)
+{
+ if (!smu->ppt_funcs && !smu->ppt_funcs->set_gfx_power_up_by_imu)
+ return -EOPNOTSUPP;
+
+ return smu->ppt_funcs->set_gfx_power_up_by_imu(smu);
+}
+
static u32 smu_get_mclk(void *handle, bool low)
{
struct smu_context *smu = handle;
@@ -1352,6 +1361,15 @@ static int smu_hw_init(void *handle)
}
if (smu->is_apu) {
+ if ((smu->ppt_funcs->set_gfx_power_up_by_imu) &&
+ likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
+ ret = smu->ppt_funcs->set_gfx_power_up_by_imu(smu);
+ if (ret) {
+ dev_err(adev->dev, "Failed to Enable gfx imu!\n");
+ return ret;
+ }
+ }
+
smu_dpm_set_vcn_enable(smu, true);
smu_dpm_set_jpeg_enable(smu, true);
smu_set_gfx_cgpg(smu, true);
@@ -1400,6 +1418,18 @@ static int smu_disable_dpms(struct smu_context *smu)
((adev->in_runpm || adev->in_s4) && amdgpu_asic_supports_baco(adev)));
/*
+ * For SMU 13.0.0 and 13.0.7, PMFW will handle the DPM features(disablement or others)
+ * properly on suspend/reset/unload. Driver involvement may cause some unexpected issues.
+ */
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(13, 0, 0):
+ case IP_VERSION(13, 0, 7):
+ return 0;
+ default:
+ break;
+ }
+
+ /*
* For custom pptable uploading, skip the DPM features
* disable process on Navi1x ASICs.
* - As the gfx related features are under control of
@@ -1436,7 +1466,7 @@ static int smu_disable_dpms(struct smu_context *smu)
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 5):
case IP_VERSION(11, 0, 9):
- case IP_VERSION(13, 0, 0):
+ case IP_VERSION(13, 0, 7):
return 0;
default:
break;
@@ -2467,7 +2497,6 @@ static int smu_set_power_profile_mode(void *handle,
return smu_bump_power_profile_mode(smu, param, param_size);
}
-
static int smu_get_fan_control_mode(void *handle, u32 *fan_mode)
{
struct smu_context *smu = handle;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index a6a7b6c33683..b81c657c7386 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -432,6 +432,7 @@ struct smu_baco_context
{
uint32_t state;
bool platform_support;
+ bool maco_support;
};
struct smu_freq_info {
@@ -563,6 +564,10 @@ struct smu_context
struct stb_context stb_context;
struct firmware pptable_firmware;
+
+ u32 param_reg;
+ u32 msg_reg;
+ u32 resp_reg;
};
struct i2c_adapter;
@@ -698,6 +703,11 @@ struct pptable_funcs {
int (*dpm_set_jpeg_enable)(struct smu_context *smu, bool enable);
/**
+ * @set_gfx_power_up_by_imu: Enable GFX engine with IMU
+ */
+ int (*set_gfx_power_up_by_imu)(struct smu_context *smu);
+
+ /**
* @read_sensor: Read data from a sensor.
* &sensor: Sensor to read data from.
* &data: Sensor reading.
@@ -1438,6 +1448,8 @@ int smu_get_dpm_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
int smu_set_soft_freq_range(struct smu_context *smu, enum smu_clk_type clk_type,
uint32_t min, uint32_t max);
+int smu_set_gfx_power_up_by_imu(struct smu_context *smu);
+
int smu_set_ac_dc(struct smu_context *smu);
int smu_allow_xgmi_power_down(struct smu_context *smu, bool en);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/arcturus_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/arcturus_ppsmc.h
index 45f5d29bc705..15b313baa0ee 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/arcturus_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/arcturus_ppsmc.h
@@ -120,7 +120,7 @@
#define PPSMC_MSG_ReadSerialNumTop32 0x40
#define PPSMC_MSG_ReadSerialNumBottom32 0x41
-/* paramater for MSG_LightSBR
+/* parameter for MSG_LightSBR
* 1 -- Enable light secondary bus reset, only do nbio respond without further handling,
* leave driver to handle the real reset
* 0 -- Disable LightSBR, default behavior, SMU will pass the reset to PSP
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_aldebaran.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_aldebaran.h
index 0f67c56c2863..7a6075daa7b2 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_aldebaran.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_aldebaran.h
@@ -519,7 +519,22 @@ typedef struct {
} EccInfo_t;
typedef struct {
- EccInfo_t EccInfo[ALDEBARAN_UMC_CHANNEL_NUM];
+ uint64_t mca_umc_status;
+ uint64_t mca_umc_addr;
+
+ uint16_t ce_count_lo_chip;
+ uint16_t ce_count_hi_chip;
+
+ uint32_t eccPadding;
+
+ uint64_t mca_ceumc_addr;
+} EccInfo_V2_t;
+
+typedef struct {
+ union {
+ EccInfo_t EccInfo[ALDEBARAN_UMC_CHANNEL_NUM];
+ EccInfo_V2_t EccInfo_V2[ALDEBARAN_UMC_CHANNEL_NUM];
+ };
} EccInfoTable_t;
// These defines are used with the following messages:
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
index c1f76236da26..063f4a737605 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_0.h
@@ -24,12 +24,8 @@
#ifndef SMU13_DRIVER_IF_V13_0_0_H
#define SMU13_DRIVER_IF_V13_0_0_H
-// *** IMPORTANT ***
-// PMFW TEAM: Always increment the interface version on any change to this file
-#define SMU13_DRIVER_IF_VERSION 0x23
-
//Increment this version if SkuTable_t or BoardTable_t change
-#define PPTABLE_VERSION 0x1D
+#define PPTABLE_VERSION 0x24
#define NUM_GFXCLK_DPM_LEVELS 16
#define NUM_SOCCLK_DPM_LEVELS 8
@@ -97,7 +93,7 @@
#define FEATURE_MEM_TEMP_READ_BIT 47
#define FEATURE_ATHUB_MMHUB_PG_BIT 48
#define FEATURE_SOC_PCC_BIT 49
-#define FEATURE_SPARE_50_BIT 50
+#define FEATURE_EDC_PWRBRK_BIT 50
#define FEATURE_SPARE_51_BIT 51
#define FEATURE_SPARE_52_BIT 52
#define FEATURE_SPARE_53_BIT 53
@@ -973,8 +969,8 @@ typedef struct {
uint16_t Vmin_Hot_Eol[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) End-of-life Vset to be used at hot.
uint16_t Vmin_Cold_Eol[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) End-of-life Vset to be used at cold.
uint16_t Vmin_Aging_Offset[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Worst-case aging margin
- uint16_t Vmin_Plat_Offset_Hot[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Platform offset apply to T0 Hot
- uint16_t Vmin_Plat_Offset_Cold[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Platform offset apply to T0 Cold
+ uint16_t Spare_Vmin_Plat_Offset_Hot[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Platform offset apply to T0 Hot
+ uint16_t Spare_Vmin_Plat_Offset_Cold[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Platform offset apply to T0 Cold
//This is a fixed/minimum VMIN aging degradation offset which is applied at T0. This reflects the minimum amount of aging already accounted for.
uint16_t VcBtcFixedVminAgingOffset[PMFW_VOLT_PLANE_COUNT];
@@ -1041,7 +1037,8 @@ typedef struct {
uint16_t GfxclkFreqGfxUlv; // in MHz
uint8_t GfxIdlePadding2[2];
- uint32_t GfxoffSpare[16];
+ uint32_t GfxOffEntryHysteresis;
+ uint32_t GfxoffSpare[15];
// GFX GPO
uint32_t GfxGpoSpare[16];
@@ -1192,8 +1189,17 @@ typedef struct {
// SECTION: Advanced Options
uint32_t DebugOverrides;
+ // Section: Total Board Power idle vs active coefficients
+ uint8_t TotalBoardPowerSupport;
+ uint8_t TotalBoardPowerPadding[3];
+
+ int16_t TotalIdleBoardPowerM;
+ int16_t TotalIdleBoardPowerB;
+ int16_t TotalBoardPowerM;
+ int16_t TotalBoardPowerB;
+
// SECTION: Sku Reserved
- uint32_t Spare[64];
+ uint32_t Spare[61];
// Padding for MMHUB - do not modify this
uint32_t MmHubPadding[8];
@@ -1258,7 +1264,8 @@ typedef struct {
// SECTION: Clock Spread Spectrum
// UCLK Spread Spectrum
- uint16_t UclkSpreadPadding;
+ uint8_t UclkTrainingModeSpreadPercent;
+ uint8_t UclkSpreadPadding;
uint16_t UclkSpreadFreq; // kHz
// UCLK Spread Spectrum
@@ -1271,11 +1278,7 @@ typedef struct {
// Section: Memory Config
uint8_t DramWidth; // Width of interface to the channel for each DRAM module. See DRAM_BIT_WIDTH_TYPE_e
- uint8_t PaddingMem1[3];
-
- // Section: Total Board Power
- uint16_t TotalBoardPower; //Only needed for TCP Estimated case, where TCP = TGP+Total Board Power
- uint16_t BoardPowerPadding;
+ uint8_t PaddingMem1[7];
// SECTION: UMC feature flags
uint8_t HsrEnabled;
@@ -1359,8 +1362,14 @@ typedef struct {
uint16_t AverageDclk0Frequency ;
uint16_t AverageVclk1Frequency ;
uint16_t AverageDclk1Frequency ;
+ uint16_t PCIeBusy;
+ uint16_t dGPU_W_MAX;
+ uint16_t padding;
+
+ uint32_t MetricsCounter;
uint16_t AvgVoltage[SVI_PLANE_COUNT];
+ uint16_t AvgCurrent[SVI_PLANE_COUNT];
uint16_t AverageGfxActivity ;
uint16_t AverageUclkActivity ;
@@ -1368,8 +1377,11 @@ typedef struct {
uint16_t Vcn1ActivityPercentage ;
uint32_t EnergyAccumulator;
- uint16_t AverageSocketPower ;
+ uint16_t AverageSocketPower;
+ uint16_t AverageTotalBoardPower;
+
uint16_t AvgTemperature[TEMP_COUNT];
+ uint16_t TempPadding;
uint8_t PcieRate ;
uint8_t PcieWidth ;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
index 76f695a1d065..ae2d337158f3 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_4.h
@@ -27,7 +27,7 @@
// *** IMPORTANT ***
// SMU TEAM: Always increment the interface version if
// any structure is changed in this file
-#define PMFW_DRIVER_IF_VERSION 4
+#define PMFW_DRIVER_IF_VERSION 5
typedef struct {
int32_t value;
@@ -197,6 +197,8 @@ typedef struct {
uint16_t SkinTemp;
uint16_t DeviceState;
+ uint16_t CurTemp; //[centi-Celsius]
+ uint16_t spare2;
} SmuMetrics_t;
typedef struct {
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h
index d99b4b47d49d..25c08f963f49 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_7.h
@@ -25,10 +25,10 @@
// *** IMPORTANT ***
// PMFW TEAM: Always increment the interface version on any change to this file
-#define SMU13_DRIVER_IF_VERSION 0x28
+#define SMU13_DRIVER_IF_VERSION 0x2C
//Increment this version if SkuTable_t or BoardTable_t change
-#define PPTABLE_VERSION 0x1D
+#define PPTABLE_VERSION 0x20
#define NUM_GFXCLK_DPM_LEVELS 16
#define NUM_SOCCLK_DPM_LEVELS 8
@@ -112,6 +112,22 @@
#define FEATURE_SPARE_63_BIT 63
#define NUM_FEATURES 64
+#define ALLOWED_FEATURE_CTRL_DEFAULT 0xFFFFFFFFFFFFFFFFULL
+#define ALLOWED_FEATURE_CTRL_SCPM (1 << FEATURE_DPM_GFXCLK_BIT) | \
+ (1 << FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT) | \
+ (1 << FEATURE_DPM_UCLK_BIT) | \
+ (1 << FEATURE_DPM_FCLK_BIT) | \
+ (1 << FEATURE_DPM_SOCCLK_BIT) | \
+ (1 << FEATURE_DPM_MP0CLK_BIT) | \
+ (1 << FEATURE_DPM_LINK_BIT) | \
+ (1 << FEATURE_DPM_DCN_BIT) | \
+ (1 << FEATURE_DS_GFXCLK_BIT) | \
+ (1 << FEATURE_DS_SOCCLK_BIT) | \
+ (1 << FEATURE_DS_FCLK_BIT) | \
+ (1 << FEATURE_DS_LCLK_BIT) | \
+ (1 << FEATURE_DS_DCFCLK_BIT) | \
+ (1 << FEATURE_DS_UCLK_BIT)
+
//For use with feature control messages
typedef enum {
FEATURE_PWR_ALL,
@@ -136,6 +152,7 @@ typedef enum {
#define DEBUG_OVERRIDE_DISABLE_DFLL 0x00000200
#define DEBUG_OVERRIDE_ENABLE_RLC_VF_BRINGUP_MODE 0x00000400
#define DEBUG_OVERRIDE_DFLL_MASTER_MODE 0x00000800
+#define DEBUG_OVERRIDE_ENABLE_PROFILING_MODE 0x00001000
// VR Mapping Bit Defines
#define VR_MAPPING_VR_SELECT_MASK 0x01
@@ -662,7 +679,7 @@ typedef struct {
#define PP_NUM_OD_VF_CURVE_POINTS PP_NUM_RTAVFS_PWL_ZONES + 1
-#define PP_OD_FEATURE_VF_CURVE_BIT 0
+#define PP_OD_FEATURE_GFX_VF_CURVE_BIT 0
#define PP_OD_FEATURE_VMAX_BIT 1
#define PP_OD_FEATURE_PPT_BIT 2
#define PP_OD_FEATURE_FAN_CURVE_BIT 3
@@ -671,6 +688,8 @@ typedef struct {
#define PP_OD_FEATURE_TDC_BIT 6
#define PP_OD_FEATURE_GFXCLK_BIT 7
#define PP_OD_FEATURE_UCLK_BIT 8
+#define PP_OD_FEATURE_ZERO_FAN_BIT 9
+#define PP_OD_FEATURE_TEMPERATURE_BIT 10
typedef enum {
PP_OD_POWER_FEATURE_ALWAYS_ENABLED,
@@ -689,8 +708,8 @@ typedef struct {
uint8_t RuntimePwrSavingFeaturesCtrl;
//Frequency changes
- uint16_t GfxclkFmin; // MHz
- uint16_t GfxclkFmax; // MHz
+ int16_t GfxclkFmin; // MHz
+ int16_t GfxclkFmax; // MHz
uint16_t UclkFmin; // MHz
uint16_t UclkFmax; // MHz
@@ -701,17 +720,17 @@ typedef struct {
//Fan control
uint8_t FanLinearPwmPoints[NUM_OD_FAN_MAX_POINTS];
uint8_t FanLinearTempPoints[NUM_OD_FAN_MAX_POINTS];
- uint16_t FanMaximumRpm;
uint16_t FanMinimumPwm;
- uint16_t FanAcousticLimitRpm;
+ uint16_t AcousticTargetRpmThreshold;
+ uint16_t AcousticLimitRpmThreshold;
uint16_t FanTargetTemperature; // Degree Celcius
uint8_t FanZeroRpmEnable;
uint8_t FanZeroRpmStopTemp;
uint8_t FanMode;
- uint8_t Padding[1];
-
+ uint8_t MaxOpTemp;
+ uint8_t Padding[4];
- uint32_t Spare[13];
+ uint32_t Spare[12];
uint32_t MmHubPadding[8]; // SMU internal use. Adding here instead of external as a workaround
} OverDriveTable_t;
@@ -740,17 +759,17 @@ typedef struct {
uint8_t FanLinearPwmPoints;
uint8_t FanLinearTempPoints;
- uint16_t FanMaximumRpm;
uint16_t FanMinimumPwm;
- uint16_t FanAcousticLimitRpm;
+ uint16_t AcousticTargetRpmThreshold;
+ uint16_t AcousticLimitRpmThreshold;
uint16_t FanTargetTemperature; // Degree Celcius
uint8_t FanZeroRpmEnable;
uint8_t FanZeroRpmStopTemp;
uint8_t FanMode;
- uint8_t Padding[1];
+ uint8_t MaxOpTemp;
+ uint8_t Padding[4];
-
- uint32_t Spare[13];
+ uint32_t Spare[12];
} OverDriveLimits_t;
@@ -996,8 +1015,8 @@ typedef struct {
uint16_t Vmin_Hot_Eol[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) End-of-life Vset to be used at hot.
uint16_t Vmin_Cold_Eol[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) End-of-life Vset to be used at cold.
uint16_t Vmin_Aging_Offset[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Worst-case aging margin
- uint16_t Vmin_Plat_Offset_Hot[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Platform offset apply to T0 Hot
- uint16_t Vmin_Plat_Offset_Cold[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Platform offset apply to T0 Cold
+ uint16_t Spare_Vmin_Plat_Offset_Hot[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Platform offset apply to T0 Hot
+ uint16_t Spare_Vmin_Plat_Offset_Cold[PMFW_VOLT_PLANE_COUNT]; //In mV(Q2) Platform offset apply to T0 Cold
//This is a fixed/minimum VMIN aging degradation offset which is applied at T0. This reflects the minimum amount of aging already accounted for.
uint16_t VcBtcFixedVminAgingOffset[PMFW_VOLT_PLANE_COUNT];
@@ -1018,7 +1037,8 @@ typedef struct {
uint16_t SocketPowerLimitAcTau[PPT_THROTTLER_COUNT]; // Time constant of LPF in ms
uint16_t SocketPowerLimitDcTau[PPT_THROTTLER_COUNT]; // Time constant of LPF in ms
- uint32_t SpareVmin[12];
+ QuadraticInt_t Vmin_droop;
+ uint32_t SpareVmin[9];
//SECTION: DPM Configuration 1
@@ -1062,11 +1082,15 @@ typedef struct {
uint16_t GfxclkFreqGfxUlv; // in MHz
uint8_t GfxIdlePadding2[2];
-
- uint32_t GfxoffSpare[16];
+ uint32_t GfxOffEntryHysteresis; //For RLC to count after it enters CGCG, and before triggers GFXOFF entry
+ uint32_t GfxoffSpare[15];
// GFX GPO
- uint32_t GfxGpoSpare[16];
+ float DfllBtcMasterScalerM;
+ int32_t DfllBtcMasterScalerB;
+ float DfllBtcSlaveScalerM;
+ int32_t DfllBtcSlaveScalerB;
+ uint32_t GfxGpoSpare[12];
// GFX DCS
@@ -1307,9 +1331,11 @@ typedef struct {
uint32_t PostVoltageSetBacoDelay; // in microseconds. Amount of time FW will wait after power good is established or PSI0 command is issued
uint32_t BacoEntryDelay; // in milliseconds. Amount of time FW will wait to trigger BACO entry after receiving entry notification from OS
+ uint8_t FuseWritePowerMuxPresent;
+ uint8_t FuseWritePadding[3];
// SECTION: Board Reserved
- uint32_t BoardSpare[64];
+ uint32_t BoardSpare[63];
// SECTION: Structure Padding
@@ -1382,8 +1408,14 @@ typedef struct {
uint16_t AverageDclk0Frequency ;
uint16_t AverageVclk1Frequency ;
uint16_t AverageDclk1Frequency ;
+ uint16_t PCIeBusy ;
+ uint16_t dGPU_W_MAX ;
+ uint16_t padding ;
+
+ uint32_t MetricsCounter ;
uint16_t AvgVoltage[SVI_PLANE_COUNT];
+ uint16_t AvgCurrent[SVI_PLANE_COUNT];
uint16_t AverageGfxActivity ;
uint16_t AverageUclkActivity ;
@@ -1415,11 +1447,13 @@ typedef struct {
uint16_t AverageUclkActivity_MAX;
+ uint32_t PublicSerialNumberLower;
+ uint32_t PublicSerialNumberUpper;
} SmuMetrics_t;
typedef struct {
SmuMetrics_t SmuMetrics;
- uint32_t Spare[32];
+ uint32_t Spare[30];
// Padding - ignore
uint32_t MmHubPadding[8]; // SMU internal use
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_7_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_7_ppsmc.h
index 3f2d0a9e4745..6aaefca9b595 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_7_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_7_ppsmc.h
@@ -128,7 +128,12 @@
#define PPSMC_MSG_EnableAudioStutterWA 0x44
#define PPSMC_MSG_PowerUpUmsch 0x45
#define PPSMC_MSG_PowerDownUmsch 0x46
-#define PPSMC_Message_Count 0x4C
-
+#define PPSMC_MSG_SetDcsArch 0x47
+#define PPSMC_MSG_TriggerVFFLR 0x48
+#define PPSMC_MSG_SetNumBadMemoryPagesRetired 0x49
+#define PPSMC_MSG_SetBadMemoryPagesRetiredFlagsPerChannel 0x4A
+#define PPSMC_MSG_SetPriorityDeltaGain 0x4B
+#define PPSMC_MSG_AllowIHHostInterrupt 0x4C
+#define PPSMC_Message_Count 0x4D
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index a1cb8e73e171..19084a4fcb2b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -74,6 +74,7 @@
__SMU_DUMMY_MAP(OverDriveSetPercentage), \
__SMU_DUMMY_MAP(SetMinDeepSleepDcefclk), \
__SMU_DUMMY_MAP(ReenableAcDcInterrupt), \
+ __SMU_DUMMY_MAP(AllowIHHostInterrupt), \
__SMU_DUMMY_MAP(NotifyPowerSource), \
__SMU_DUMMY_MAP(SetUclkFastSwitch), \
__SMU_DUMMY_MAP(SetUclkDownHyst), \
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
index acb3be292096..a9215494dcdd 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
@@ -316,5 +316,7 @@ int smu_v11_0_handle_passthrough_sbr(struct smu_context *smu, bool enable);
int smu_v11_0_restore_user_od_settings(struct smu_context *smu);
+void smu_v11_0_set_smu_mailbox_registers(struct smu_context *smu);
+
#endif
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index afa1991e26f9..f442bf085a31 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -28,10 +28,10 @@
#define SMU13_DRIVER_IF_VERSION_INV 0xFFFFFFFF
#define SMU13_DRIVER_IF_VERSION_YELLOW_CARP 0x04
#define SMU13_DRIVER_IF_VERSION_ALDE 0x08
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x04
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_4 0x05
#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0 0x28
-#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x28
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_0 0x30
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_7 0x2C
#define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms
@@ -43,6 +43,7 @@
/* address block */
#define smnMP1_FIRMWARE_FLAGS 0x3010024
+#define smnMP1_V13_0_4_FIRMWARE_FLAGS 0x3010028
#define smnMP0_FW_INTF 0x30101c0
#define smnMP1_PUB_CTRL 0x3010b14
@@ -278,19 +279,7 @@ int smu_v13_0_run_btc(struct smu_context *smu);
int smu_v13_0_deep_sleep_control(struct smu_context *smu,
bool enablement);
-int smu_v13_0_gfx_ulv_control(struct smu_context *smu,
- bool enablement);
-
-bool smu_v13_0_baco_is_support(struct smu_context *smu);
-
-enum smu_baco_state smu_v13_0_baco_get_state(struct smu_context *smu);
-
-int smu_v13_0_baco_set_state(struct smu_context *smu,
- enum smu_baco_state state);
-
-int smu_v13_0_baco_enter(struct smu_context *smu);
-
-int smu_v13_0_baco_exit(struct smu_context *smu);
+int smu_v13_0_set_gfx_power_up_by_imu(struct smu_context *smu);
int smu_v13_0_od_edit_dpm_table(struct smu_context *smu,
enum PP_OD_DPM_TABLE_COMMAND type,
@@ -298,5 +287,15 @@ int smu_v13_0_od_edit_dpm_table(struct smu_context *smu,
uint32_t size);
int smu_v13_0_set_default_dpm_tables(struct smu_context *smu);
+
+void smu_v13_0_set_smu_mailbox_registers(struct smu_context *smu);
+
+int smu_v13_0_mode1_reset(struct smu_context *smu);
+
+int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu,
+ void **table,
+ uint32_t *size,
+ uint32_t pptable_id);
+
#endif
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 201563072189..445005571f76 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -573,12 +573,13 @@ static int arcturus_get_clk_table(struct smu_context *smu,
struct pp_clock_levels_with_latency *clocks,
struct smu_11_0_dpm_table *dpm_table)
{
- int i, count;
+ uint32_t i;
- count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
- clocks->num_levels = count;
+ clocks->num_levels = min_t(uint32_t,
+ dpm_table->count,
+ (uint32_t)PP_MAX_CLOCK_LEVELS);
- for (i = 0; i < count; i++) {
+ for (i = 0; i < clocks->num_levels; i++) {
clocks->data[i].clocks_in_khz =
dpm_table->dpm_levels[i].value * 1000;
clocks->data[i].latency_in_us = 0;
@@ -2509,4 +2510,5 @@ void arcturus_set_ppt_funcs(struct smu_context *smu)
smu->table_map = arcturus_table_map;
smu->pwr_src_map = arcturus_pwr_src_map;
smu->workload_map = arcturus_workload_map;
+ smu_v11_0_set_smu_mailbox_registers(smu);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
index f1a4a720d426..ca4d97b7f576 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
@@ -591,4 +591,5 @@ void cyan_skillfish_set_ppt_funcs(struct smu_context *smu)
smu->message_map = cyan_skillfish_message_map;
smu->table_map = cyan_skillfish_table_map;
smu->is_apu = true;
+ smu_v11_0_set_smu_mailbox_registers(smu);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 5f22fc3430f4..0bcd4fe0ef17 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -3580,4 +3580,5 @@ void navi10_set_ppt_funcs(struct smu_context *smu)
smu->table_map = navi10_table_map;
smu->pwr_src_map = navi10_pwr_src_map;
smu->workload_map = navi10_workload_map;
+ smu_v11_0_set_smu_mailbox_registers(smu);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index 78f3d9e722bb..6db67f082d91 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -886,6 +886,7 @@ static void sienna_cichlid_stb_init(struct smu_context *smu);
static int sienna_cichlid_init_smc_tables(struct smu_context *smu)
{
+ struct amdgpu_device *adev = smu->adev;
int ret = 0;
ret = sienna_cichlid_tables_init(smu);
@@ -896,7 +897,8 @@ static int sienna_cichlid_init_smc_tables(struct smu_context *smu)
if (ret)
return ret;
- sienna_cichlid_stb_init(smu);
+ if (!amdgpu_sriov_vf(adev))
+ sienna_cichlid_stb_init(smu);
return smu_v11_0_init_smc_tables(smu);
}
@@ -1464,19 +1466,19 @@ static int sienna_cichlid_populate_umd_state_clk(struct smu_context *smu)
pstate_table->socclk_pstate.min = soc_table->min;
pstate_table->socclk_pstate.peak = soc_table->max;
- switch (adev->asic_type) {
- case CHIP_SIENNA_CICHLID:
- case CHIP_NAVY_FLOUNDER:
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(11, 0, 7):
+ case IP_VERSION(11, 0, 11):
pstate_table->gfxclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK;
pstate_table->uclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK;
pstate_table->socclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK;
break;
- case CHIP_DIMGREY_CAVEFISH:
+ case IP_VERSION(11, 0, 12):
pstate_table->gfxclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_GFXCLK;
pstate_table->uclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_MEMCLK;
pstate_table->socclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_SOCCLK;
break;
- case CHIP_BEIGE_GOBY:
+ case IP_VERSION(11, 0, 13):
pstate_table->gfxclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_GFXCLK;
pstate_table->uclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_MEMCLK;
pstate_table->socclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_SOCCLK;
@@ -4192,7 +4194,7 @@ static int sienna_cichlid_get_default_config_table_settings(struct smu_context *
table->gfx_activity_average_tau = 10;
table->mem_activity_average_tau = 10;
table->socket_power_average_tau = 100;
- if (adev->asic_type != CHIP_SIENNA_CICHLID)
+ if (adev->ip_versions[MP1_HWIP][0] != IP_VERSION(11, 0, 7))
table->apu_socket_power_average_tau = 100;
return 0;
@@ -4281,6 +4283,7 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.dump_pptable = sienna_cichlid_dump_pptable,
.init_microcode = smu_v11_0_init_microcode,
.load_microcode = smu_v11_0_load_microcode,
+ .fini_microcode = smu_v11_0_fini_microcode,
.init_smc_tables = sienna_cichlid_init_smc_tables,
.fini_smc_tables = smu_v11_0_fini_smc_tables,
.init_power = smu_v11_0_init_power,
@@ -4357,4 +4360,5 @@ void sienna_cichlid_set_ppt_funcs(struct smu_context *smu)
smu->table_map = sienna_cichlid_table_map;
smu->pwr_src_map = sienna_cichlid_pwr_src_map;
smu->workload_map = sienna_cichlid_workload_map;
+ smu_v11_0_set_smu_mailbox_registers(smu);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index 5f8809f6990d..dccbd9f70723 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -1228,6 +1228,8 @@ int smu_v11_0_set_fan_speed_rpm(struct smu_context *smu,
uint32_t crystal_clock_freq = 2500;
uint32_t tach_period;
+ if (speed == 0)
+ return -EINVAL;
/*
* To prevent from possible overheat, some ASICs may have requirement
* for minimum fan speed:
@@ -2197,3 +2199,12 @@ int smu_v11_0_restore_user_od_settings(struct smu_context *smu)
return ret;
}
+
+void smu_v11_0_set_smu_mailbox_registers(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_82);
+ smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_66);
+ smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index 5551e1426ef5..89504ff8e9ed 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -46,6 +46,18 @@
#undef pr_info
#undef pr_debug
+// Registers related to GFXOFF
+// addressBlock: smuio_smuio_SmuSmuioDec
+// base address: 0x5a000
+#define mmSMUIO_GFX_MISC_CNTL 0x00c5
+#define mmSMUIO_GFX_MISC_CNTL_BASE_IDX 0
+
+//SMUIO_GFX_MISC_CNTL
+#define SMUIO_GFX_MISC_CNTL__SMU_GFX_cold_vs_gfxoff__SHIFT 0x0
+#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT 0x1
+#define SMUIO_GFX_MISC_CNTL__SMU_GFX_cold_vs_gfxoff_MASK 0x00000001L
+#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK 0x00000006L
+
#define FEATURE_MASK(feature) (1ULL << feature)
#define SMC_DPM_FEATURE ( \
FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \
@@ -2045,6 +2057,31 @@ static int vangogh_mode2_reset(struct smu_context *smu)
return vangogh_mode_reset(smu, SMU_RESET_MODE_2);
}
+/**
+ * vangogh_get_gfxoff_status - Get gfxoff status
+ *
+ * @smu: amdgpu_device pointer
+ *
+ * Get current gfxoff status
+ *
+ * Return:
+ * * 0 - GFXOFF (default if enabled).
+ * * 1 - Transition out of GFX State.
+ * * 2 - Not in GFXOFF.
+ * * 3 - Transition into GFXOFF.
+ */
+static u32 vangogh_get_gfxoff_status(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ u32 reg, gfxoff_status;
+
+ reg = RREG32_SOC15(SMUIO, 0, mmSMUIO_GFX_MISC_CNTL);
+ gfxoff_status = (reg & SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK)
+ >> SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT;
+
+ return gfxoff_status;
+}
+
static int vangogh_get_power_limit(struct smu_context *smu,
uint32_t *current_power_limit,
uint32_t *default_power_limit,
@@ -2199,6 +2236,7 @@ static const struct pptable_funcs vangogh_ppt_funcs = {
.post_init = vangogh_post_smu_init,
.mode2_reset = vangogh_mode2_reset,
.gfx_off_control = smu_v11_0_gfx_off_control,
+ .get_gfx_off_status = vangogh_get_gfxoff_status,
.get_ppt_limit = vangogh_get_ppt_limit,
.get_power_limit = vangogh_get_power_limit,
.set_power_limit = vangogh_set_power_limit,
@@ -2213,4 +2251,5 @@ void vangogh_set_ppt_funcs(struct smu_context *smu)
smu->table_map = vangogh_table_map;
smu->workload_map = vangogh_workload_map;
smu->is_apu = true;
+ smu_v11_0_set_smu_mailbox_registers(smu);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index 012e3bd99cc2..85e22210963f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -41,6 +41,15 @@
#undef pr_info
#undef pr_debug
+#define mmMP1_SMN_C2PMSG_66 0x0282
+#define mmMP1_SMN_C2PMSG_66_BASE_IDX 0
+
+#define mmMP1_SMN_C2PMSG_82 0x0292
+#define mmMP1_SMN_C2PMSG_82_BASE_IDX 0
+
+#define mmMP1_SMN_C2PMSG_90 0x029a
+#define mmMP1_SMN_C2PMSG_90_BASE_IDX 0
+
static struct cmn2asic_msg_mapping renoir_message_map[SMU_MSG_MAX_COUNT] = {
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
@@ -1447,6 +1456,8 @@ static const struct pptable_funcs renoir_ppt_funcs = {
void renoir_set_ppt_funcs(struct smu_context *smu)
{
+ struct amdgpu_device *adev = smu->adev;
+
smu->ppt_funcs = &renoir_ppt_funcs;
smu->message_map = renoir_message_map;
smu->clock_map = renoir_clk_map;
@@ -1454,4 +1465,7 @@ void renoir_set_ppt_funcs(struct smu_context *smu)
smu->workload_map = renoir_workload_map;
smu->smc_driver_if_version = SMU12_DRIVER_IF_VERSION;
smu->is_apu = true;
+ smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_82);
+ smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_66);
+ smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index fb130409309c..619aee51b123 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -83,6 +83,12 @@
#define SUPPORT_ECCTABLE_SMU_VERSION 0x00442a00
/*
+ * SMU support mca_ceumc_addr in ECCTABLE since version 68.55.0,
+ * use this to check mca_ceumc_addr record whether support
+ */
+#define SUPPORT_ECCTABLE_V2_SMU_VERSION 0x00443700
+
+/*
* SMU support BAD CHENNEL info MSG since version 68.51.00,
* use this to check ECCTALE feature whether support
*/
@@ -549,12 +555,13 @@ static int aldebaran_get_clk_table(struct smu_context *smu,
struct pp_clock_levels_with_latency *clocks,
struct smu_13_0_dpm_table *dpm_table)
{
- int i, count;
+ uint32_t i;
- count = (dpm_table->count > MAX_NUM_CLOCKS) ? MAX_NUM_CLOCKS : dpm_table->count;
- clocks->num_levels = count;
+ clocks->num_levels = min_t(uint32_t,
+ dpm_table->count,
+ (uint32_t)PP_MAX_CLOCK_LEVELS);
- for (i = 0; i < count; i++) {
+ for (i = 0; i < clocks->num_levels; i++) {
clocks->data[i].clocks_in_khz =
dpm_table->dpm_levels[i].value * 1000;
clocks->data[i].latency_in_us = 0;
@@ -739,7 +746,7 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
struct smu_13_0_dpm_table *single_dpm_table;
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
struct smu_13_0_dpm_context *dpm_context = NULL;
- uint32_t display_levels;
+ int display_levels;
uint32_t freq_values[3] = {0};
uint32_t min_clk, max_clk;
@@ -771,7 +778,7 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
return ret;
}
- display_levels = clocks.num_levels;
+ display_levels = (clocks.num_levels == 1) ? 1 : 2;
min_clk = pstate_table->gfxclk_pstate.curr.min;
max_clk = pstate_table->gfxclk_pstate.curr.max;
@@ -781,30 +788,20 @@ static int aldebaran_print_clk_levels(struct smu_context *smu,
/* fine-grained dpm has only 2 levels */
if (now > min_clk && now < max_clk) {
- display_levels = clocks.num_levels + 1;
+ display_levels++;
freq_values[2] = max_clk;
freq_values[1] = now;
}
- /*
- * For DPM disabled case, there will be only one clock level.
- * And it's safe to assume that is always the current clock.
- */
- if (display_levels == clocks.num_levels) {
- for (i = 0; i < clocks.num_levels; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i,
- freq_values[i],
- (clocks.num_levels == 1) ?
- "*" :
- (aldebaran_freqs_in_same_level(
- freq_values[i], now) ?
- "*" :
- ""));
- } else {
- for (i = 0; i < display_levels; i++)
- size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i,
- freq_values[i], i == 1 ? "*" : "");
- }
+ for (i = 0; i < display_levels; i++)
+ size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i,
+ freq_values[i],
+ (display_levels == 1) ?
+ "*" :
+ (aldebaran_freqs_in_same_level(
+ freq_values[i], now) ?
+ "*" :
+ ""));
break;
@@ -1803,7 +1800,8 @@ static ssize_t aldebaran_get_gpu_metrics(struct smu_context *smu,
return sizeof(struct gpu_metrics_v1_3);
}
-static int aldebaran_check_ecc_table_support(struct smu_context *smu)
+static int aldebaran_check_ecc_table_support(struct smu_context *smu,
+ int *ecctable_version)
{
uint32_t if_version = 0xff, smu_version = 0xff;
int ret = 0;
@@ -1816,6 +1814,11 @@ static int aldebaran_check_ecc_table_support(struct smu_context *smu)
if (smu_version < SUPPORT_ECCTABLE_SMU_VERSION)
ret = -EOPNOTSUPP;
+ else if (smu_version >= SUPPORT_ECCTABLE_SMU_VERSION &&
+ smu_version < SUPPORT_ECCTABLE_V2_SMU_VERSION)
+ *ecctable_version = 1;
+ else
+ *ecctable_version = 2;
return ret;
}
@@ -1827,9 +1830,10 @@ static ssize_t aldebaran_get_ecc_info(struct smu_context *smu,
EccInfoTable_t *ecc_table = NULL;
struct ecc_info_per_ch *ecc_info_per_channel = NULL;
int i, ret = 0;
+ int table_version = 0;
struct umc_ecc_info *eccinfo = (struct umc_ecc_info *)table;
- ret = aldebaran_check_ecc_table_support(smu);
+ ret = aldebaran_check_ecc_table_support(smu, &table_version);
if (ret)
return ret;
@@ -1845,16 +1849,33 @@ static ssize_t aldebaran_get_ecc_info(struct smu_context *smu,
ecc_table = (EccInfoTable_t *)smu_table->ecc_table;
- for (i = 0; i < ALDEBARAN_UMC_CHANNEL_NUM; i++) {
- ecc_info_per_channel = &(eccinfo->ecc[i]);
- ecc_info_per_channel->ce_count_lo_chip =
- ecc_table->EccInfo[i].ce_count_lo_chip;
- ecc_info_per_channel->ce_count_hi_chip =
- ecc_table->EccInfo[i].ce_count_hi_chip;
- ecc_info_per_channel->mca_umc_status =
- ecc_table->EccInfo[i].mca_umc_status;
- ecc_info_per_channel->mca_umc_addr =
- ecc_table->EccInfo[i].mca_umc_addr;
+ if (table_version == 1) {
+ for (i = 0; i < ALDEBARAN_UMC_CHANNEL_NUM; i++) {
+ ecc_info_per_channel = &(eccinfo->ecc[i]);
+ ecc_info_per_channel->ce_count_lo_chip =
+ ecc_table->EccInfo[i].ce_count_lo_chip;
+ ecc_info_per_channel->ce_count_hi_chip =
+ ecc_table->EccInfo[i].ce_count_hi_chip;
+ ecc_info_per_channel->mca_umc_status =
+ ecc_table->EccInfo[i].mca_umc_status;
+ ecc_info_per_channel->mca_umc_addr =
+ ecc_table->EccInfo[i].mca_umc_addr;
+ }
+ } else if (table_version == 2) {
+ for (i = 0; i < ALDEBARAN_UMC_CHANNEL_NUM; i++) {
+ ecc_info_per_channel = &(eccinfo->ecc[i]);
+ ecc_info_per_channel->ce_count_lo_chip =
+ ecc_table->EccInfo_V2[i].ce_count_lo_chip;
+ ecc_info_per_channel->ce_count_hi_chip =
+ ecc_table->EccInfo_V2[i].ce_count_hi_chip;
+ ecc_info_per_channel->mca_umc_status =
+ ecc_table->EccInfo_V2[i].mca_umc_status;
+ ecc_info_per_channel->mca_umc_addr =
+ ecc_table->EccInfo_V2[i].mca_umc_addr;
+ ecc_info_per_channel->mca_ceumc_addr =
+ ecc_table->EccInfo_V2[i].mca_ceumc_addr;
+ }
+ eccinfo->record_ce_addr_supported = 1;
}
return ret;
@@ -2117,4 +2138,5 @@ void aldebaran_set_ppt_funcs(struct smu_context *smu)
smu->clock_map = aldebaran_clk_map;
smu->feature_map = aldebaran_feature_mask_map;
smu->table_map = aldebaran_table_map;
+ smu_v13_0_set_smu_mailbox_registers(smu);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index ef9b56de143b..24488f4cb78c 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -60,6 +60,15 @@ MODULE_FIRMWARE("amdgpu/aldebaran_smc.bin");
MODULE_FIRMWARE("amdgpu/smu_13_0_0.bin");
MODULE_FIRMWARE("amdgpu/smu_13_0_7.bin");
+#define mmMP1_SMN_C2PMSG_66 0x0282
+#define mmMP1_SMN_C2PMSG_66_BASE_IDX 0
+
+#define mmMP1_SMN_C2PMSG_82 0x0292
+#define mmMP1_SMN_C2PMSG_82_BASE_IDX 0
+
+#define mmMP1_SMN_C2PMSG_90 0x029a
+#define mmMP1_SMN_C2PMSG_90_BASE_IDX 0
+
#define SMU13_VOLTAGE_SCALE 4
#define LINK_WIDTH_MAX 6
@@ -75,9 +84,6 @@ MODULE_FIRMWARE("amdgpu/smu_13_0_7.bin");
static const int link_width[] = {0, 1, 2, 4, 8, 12, 16};
static const int link_speed[] = {25, 50, 80, 160};
-static int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu, void **table, uint32_t *size,
- uint32_t pptable_id);
-
int smu_v13_0_init_microcode(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
@@ -203,6 +209,9 @@ int smu_v13_0_init_pptable_microcode(struct smu_context *smu)
if (!adev->scpm_enabled)
return 0;
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7))
+ return 0;
+
/* override pptable_id from driver parameter */
if (amdgpu_smu_pptable_id >= 0) {
pptable_id = amdgpu_smu_pptable_id;
@@ -210,24 +219,17 @@ int smu_v13_0_init_pptable_microcode(struct smu_context *smu)
} else {
pptable_id = smu->smu_table.boot_values.pp_table_id;
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7) &&
- pptable_id == 3667)
- pptable_id = 36671;
-
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 7) &&
- pptable_id == 3688)
- pptable_id = 36881;
/*
* Temporary solution for SMU V13.0.0 with SCPM enabled:
- * - use 36831 signed pptable when pp_table_id is 3683
- * - use 36641 signed pptable when pp_table_id is 3664 or 0
- * TODO: drop these when the pptable carried in vbios is ready.
+ * - use vbios carried pptable when pptable_id is 3664, 3715 or 3795
+ * - use 36831 soft pptable when pptable_id is 3683
*/
if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) {
switch (pptable_id) {
- case 0:
case 3664:
- pptable_id = 36641;
+ case 3715:
+ case 3795:
+ pptable_id = 0;
break;
case 3683:
pptable_id = 36831;
@@ -264,8 +266,16 @@ int smu_v13_0_check_fw_status(struct smu_context *smu)
struct amdgpu_device *adev = smu->adev;
uint32_t mp1_fw_flags;
- mp1_fw_flags = RREG32_PCIE(MP1_Public |
- (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
+ switch (adev->ip_versions[MP1_HWIP][0]) {
+ case IP_VERSION(13, 0, 4):
+ mp1_fw_flags = RREG32_PCIE(MP1_Public |
+ (smnMP1_V13_0_4_FIRMWARE_FLAGS & 0xffffffff));
+ break;
+ default:
+ mp1_fw_flags = RREG32_PCIE(MP1_Public |
+ (smnMP1_FIRMWARE_FLAGS & 0xffffffff));
+ break;
+ }
if ((mp1_fw_flags & MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED_MASK) >>
MP1_FIRMWARE_FLAGS__INTERRUPTS_ENABLED__SHIFT)
@@ -408,8 +418,10 @@ static int smu_v13_0_get_pptable_from_vbios(struct smu_context *smu, void **tabl
return 0;
}
-static int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu, void **table, uint32_t *size,
- uint32_t pptable_id)
+int smu_v13_0_get_pptable_from_firmware(struct smu_context *smu,
+ void **table,
+ uint32_t *size,
+ uint32_t pptable_id)
{
const struct smc_firmware_header_v1_0 *hdr;
struct amdgpu_device *adev = smu->adev;
@@ -461,7 +473,7 @@ int smu_v13_0_setup_pptable(struct smu_context *smu)
/*
* Temporary solution for SMU V13.0.0 with SCPM disabled:
- * - use 3664 or 3683 on request
+ * - use 3664, 3683 or 3715 on request
* - use 3664 when pptable_id is 0
* TODO: drop these when the pptable carried in vbios is ready.
*/
@@ -472,6 +484,7 @@ int smu_v13_0_setup_pptable(struct smu_context *smu)
break;
case 3664:
case 3683:
+ case 3715:
break;
default:
dev_err(adev->dev, "Unsupported pptable id %d\n", pptable_id);
@@ -714,6 +727,8 @@ int smu_v13_0_get_vbios_bootup_values(struct smu_context *smu)
smu->smu_table.boot_values.vclk = smu_info_v3_6->bootup_vclk_10khz;
smu->smu_table.boot_values.dclk = smu_info_v3_6->bootup_dclk_10khz;
smu->smu_table.boot_values.fclk = smu_info_v3_6->bootup_fclk_10khz;
+ } else if ((frev == 3) && (crev == 1)) {
+ return 0;
} else if ((frev == 4) && (crev == 0)) {
smu_info_v4_0 = (struct atom_smu_info_v4_0 *)header;
@@ -1065,12 +1080,33 @@ int smu_v13_0_set_power_limit(struct smu_context *smu,
return 0;
}
+static int smu_v13_0_allow_ih_interrupt(struct smu_context *smu)
+{
+ return smu_cmn_send_smc_msg(smu,
+ SMU_MSG_AllowIHHostInterrupt,
+ NULL);
+}
+
+static int smu_v13_0_process_pending_interrupt(struct smu_context *smu)
+{
+ int ret = 0;
+
+ if (smu->dc_controlled_by_gpio &&
+ smu_cmn_feature_is_enabled(smu, SMU_FEATURE_ACDC_BIT))
+ ret = smu_v13_0_allow_ih_interrupt(smu);
+
+ return ret;
+}
+
int smu_v13_0_enable_thermal_alert(struct smu_context *smu)
{
- if (smu->smu_table.thermal_controller_type)
- return amdgpu_irq_get(smu->adev, &smu->irq_source, 0);
+ int ret = 0;
- return 0;
+ ret = amdgpu_irq_get(smu->adev, &smu->irq_source, 0);
+ if (ret)
+ return ret;
+
+ return smu_v13_0_process_pending_interrupt(smu);
}
int smu_v13_0_disable_thermal_alert(struct smu_context *smu)
@@ -2257,7 +2293,8 @@ int smu_v13_0_baco_set_state(struct smu_context *smu,
if (state == SMU_BACO_STATE_ENTER) {
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_EnterBaco,
- 0,
+ smu_baco->maco_support ?
+ BACO_SEQ_BAMACO : BACO_SEQ_BACO,
NULL);
} else {
ret = smu_cmn_send_smc_msg(smu,
@@ -2297,6 +2334,16 @@ int smu_v13_0_baco_exit(struct smu_context *smu)
SMU_BACO_STATE_EXIT);
}
+int smu_v13_0_set_gfx_power_up_by_imu(struct smu_context *smu)
+{
+ uint16_t index;
+
+ index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
+ SMU_MSG_EnableGfxImu);
+ /* Param 1 to tell PMFW to enable GFXOFF feature */
+ return smu_cmn_send_msg_without_waiting(smu, index, 1);
+}
+
int smu_v13_0_od_edit_dpm_table(struct smu_context *smu,
enum PP_OD_DPM_TABLE_COMMAND type,
long input[], uint32_t size)
@@ -2386,3 +2433,23 @@ int smu_v13_0_set_default_dpm_tables(struct smu_context *smu)
return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0,
smu_table->clocks_table, false);
}
+
+void smu_v13_0_set_smu_mailbox_registers(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_82);
+ smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_66);
+ smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
+}
+
+int smu_v13_0_mode1_reset(struct smu_context *smu)
+{
+ int ret = 0;
+
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_Mode1Reset, NULL);
+ if (!ret)
+ msleep(SMU13_MODE1_RESET_WAIT_TIME_IN_MS);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
index 7432b3e76d3d..7db2fd9ea74a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_0_ppt.c
@@ -117,6 +117,8 @@ static struct cmn2asic_msg_mapping smu_v13_0_0_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(SetMGpuFanBoostLimitRpm, PPSMC_MSG_SetMGpuFanBoostLimitRpm, 0),
MSG_MAP(GetPptLimit, PPSMC_MSG_GetPptLimit, 0),
MSG_MAP(NotifyPowerSource, PPSMC_MSG_NotifyPowerSource, 0),
+ MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0),
+ MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0),
};
static struct cmn2asic_mapping smu_v13_0_0_clk_map[SMU_CLK_COUNT] = {
@@ -194,6 +196,7 @@ static struct cmn2asic_mapping smu_v13_0_0_table_map[SMU_TABLE_COUNT] = {
TAB_MAP(DRIVER_SMU_CONFIG),
TAB_MAP(ACTIVITY_MONITOR_COEFF),
[SMU_TABLE_COMBO_PPTABLE] = {1, TABLE_COMBO_PPTABLE},
+ TAB_MAP(I2C_COMMANDS),
};
static struct cmn2asic_mapping smu_v13_0_0_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
@@ -300,6 +303,19 @@ smu_v13_0_0_get_allowed_feature_mask(struct smu_context *smu,
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_OUT_OF_BAND_MONITOR_BIT);
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_SOC_CG_BIT);
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_FCLK_BIT);
+
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_FW_CTF_BIT);
+
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_UCLK_BIT);
+
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_VR0HOT_BIT);
+
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT);
+
+ if (adev->pm.pp_feature & PP_ULV_MASK)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_ULV_BIT);
+
return 0;
}
@@ -317,6 +333,9 @@ static int smu_v13_0_0_check_powerplay_table(struct smu_context *smu)
powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO)
smu_baco->platform_support = true;
+ if (powerplay_table->platform_caps & SMU_13_0_0_PP_PLATFORM_CAP_MACO)
+ smu_baco->maco_support = true;
+
table_context->thermal_controller_type =
powerplay_table->thermal_controller_type;
@@ -369,11 +388,29 @@ static int smu_v13_0_0_append_powerplay_table(struct smu_context *smu)
return 0;
}
-static int smu_v13_0_0_setup_pptable(struct smu_context *smu)
+static int smu_v13_0_0_get_pptable_from_pmfw(struct smu_context *smu,
+ void **table,
+ uint32_t *size)
{
struct smu_table_context *smu_table = &smu->smu_table;
void *combo_pptable = smu_table->combo_pptable;
+ int ret = 0;
+
+ ret = smu_cmn_get_combo_pptable(smu);
+ if (ret)
+ return ret;
+
+ *table = combo_pptable;
+ *size = sizeof(struct smu_13_0_0_powerplay_table);
+
+ return 0;
+}
+
+static int smu_v13_0_0_setup_pptable(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
struct amdgpu_device *adev = smu->adev;
+ uint32_t pptable_id;
int ret = 0;
/*
@@ -382,17 +419,51 @@ static int smu_v13_0_0_setup_pptable(struct smu_context *smu)
* rely on the combo pptable(and its revelant SMU message).
*/
if (adev->scpm_enabled) {
- ret = smu_cmn_get_combo_pptable(smu);
- if (ret)
- return ret;
-
- smu->smu_table.power_play_table = combo_pptable;
- smu->smu_table.power_play_table_size = sizeof(struct smu_13_0_0_powerplay_table);
+ ret = smu_v13_0_0_get_pptable_from_pmfw(smu,
+ &smu_table->power_play_table,
+ &smu_table->power_play_table_size);
} else {
- ret = smu_v13_0_setup_pptable(smu);
- if (ret)
- return ret;
+ /* override pptable_id from driver parameter */
+ if (amdgpu_smu_pptable_id >= 0) {
+ pptable_id = amdgpu_smu_pptable_id;
+ dev_info(adev->dev, "override pptable id %d\n", pptable_id);
+ } else {
+ pptable_id = smu_table->boot_values.pp_table_id;
+ }
+
+ /*
+ * Temporary solution for SMU V13.0.0 with SCPM disabled:
+ * - use vbios carried pptable when pptable_id is 3664, 3715 or 3795
+ * - use soft pptable when pptable_id is 3683
+ */
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 0)) {
+ switch (pptable_id) {
+ case 3664:
+ case 3715:
+ case 3795:
+ pptable_id = 0;
+ break;
+ case 3683:
+ break;
+ default:
+ dev_err(adev->dev, "Unsupported pptable id %d\n", pptable_id);
+ return -EINVAL;
+ }
+ }
+
+ /* force using vbios pptable in sriov mode */
+ if ((amdgpu_sriov_vf(adev) || !pptable_id) && (amdgpu_emu_mode != 1))
+ ret = smu_v13_0_0_get_pptable_from_pmfw(smu,
+ &smu_table->power_play_table,
+ &smu_table->power_play_table_size);
+ else
+ ret = smu_v13_0_get_pptable_from_firmware(smu,
+ &smu_table->power_play_table,
+ &smu_table->power_play_table_size,
+ pptable_id);
}
+ if (ret)
+ return ret;
ret = smu_v13_0_0_store_powerplay_table(smu);
if (ret)
@@ -840,7 +911,7 @@ static int smu_v13_0_0_read_sensor(struct smu_context *smu,
break;
case AMDGPU_PP_SENSOR_GFX_MCLK:
ret = smu_v13_0_0_get_smu_metrics_data(smu,
- METRICS_AVERAGE_UCLK,
+ METRICS_CURR_UCLK,
(uint32_t *)data);
*(uint32_t *)data *= 100;
*size = 4;
@@ -1576,14 +1647,206 @@ static int smu_v13_0_0_set_power_profile_mode(struct smu_context *smu,
NULL);
}
+static bool smu_v13_0_0_is_mode1_reset_supported(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ u32 smu_version;
+
+ /* SRIOV does not support SMU mode1 reset */
+ if (amdgpu_sriov_vf(adev))
+ return false;
+
+ /* PMFW support is available since 78.41 */
+ smu_cmn_get_smc_version(smu, NULL, &smu_version);
+ if (smu_version < 0x004e2900)
+ return false;
+
+ return true;
+}
+
+static int smu_v13_0_0_i2c_xfer(struct i2c_adapter *i2c_adap,
+ struct i2c_msg *msg, int num_msgs)
+{
+ struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap);
+ struct amdgpu_device *adev = smu_i2c->adev;
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *table = &smu_table->driver_table;
+ SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
+ int i, j, r, c;
+ u16 dir;
+
+ if (!adev->pm.dpm_enabled)
+ return -EBUSY;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return -ENOMEM;
+
+ req->I2CcontrollerPort = smu_i2c->port;
+ req->I2CSpeed = I2C_SPEED_FAST_400K;
+ req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */
+ dir = msg[0].flags & I2C_M_RD;
+
+ for (c = i = 0; i < num_msgs; i++) {
+ for (j = 0; j < msg[i].len; j++, c++) {
+ SwI2cCmd_t *cmd = &req->SwI2cCmds[c];
+
+ if (!(msg[i].flags & I2C_M_RD)) {
+ /* write */
+ cmd->CmdConfig |= CMDCONFIG_READWRITE_MASK;
+ cmd->ReadWriteData = msg[i].buf[j];
+ }
+
+ if ((dir ^ msg[i].flags) & I2C_M_RD) {
+ /* The direction changes.
+ */
+ dir = msg[i].flags & I2C_M_RD;
+ cmd->CmdConfig |= CMDCONFIG_RESTART_MASK;
+ }
+
+ req->NumCmds++;
+
+ /*
+ * Insert STOP if we are at the last byte of either last
+ * message for the transaction or the client explicitly
+ * requires a STOP at this particular message.
+ */
+ if ((j == msg[i].len - 1) &&
+ ((i == num_msgs - 1) || (msg[i].flags & I2C_M_STOP))) {
+ cmd->CmdConfig &= ~CMDCONFIG_RESTART_MASK;
+ cmd->CmdConfig |= CMDCONFIG_STOP_MASK;
+ }
+ }
+ }
+ mutex_lock(&adev->pm.mutex);
+ r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
+ mutex_unlock(&adev->pm.mutex);
+ if (r)
+ goto fail;
+
+ for (c = i = 0; i < num_msgs; i++) {
+ if (!(msg[i].flags & I2C_M_RD)) {
+ c += msg[i].len;
+ continue;
+ }
+ for (j = 0; j < msg[i].len; j++, c++) {
+ SwI2cCmd_t *cmd = &res->SwI2cCmds[c];
+
+ msg[i].buf[j] = cmd->ReadWriteData;
+ }
+ }
+ r = num_msgs;
+fail:
+ kfree(req);
+ return r;
+}
+
+static u32 smu_v13_0_0_i2c_func(struct i2c_adapter *adap)
+{
+ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
+}
+
+static const struct i2c_algorithm smu_v13_0_0_i2c_algo = {
+ .master_xfer = smu_v13_0_0_i2c_xfer,
+ .functionality = smu_v13_0_0_i2c_func,
+};
+
+static const struct i2c_adapter_quirks smu_v13_0_0_i2c_control_quirks = {
+ .flags = I2C_AQ_COMB | I2C_AQ_COMB_SAME_ADDR | I2C_AQ_NO_ZERO_LEN,
+ .max_read_len = MAX_SW_I2C_COMMANDS,
+ .max_write_len = MAX_SW_I2C_COMMANDS,
+ .max_comb_1st_msg_len = 2,
+ .max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS - 2,
+};
+
+static int smu_v13_0_0_i2c_control_init(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int res, i;
+
+ for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
+ struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
+ struct i2c_adapter *control = &smu_i2c->adapter;
+
+ smu_i2c->adev = adev;
+ smu_i2c->port = i;
+ mutex_init(&smu_i2c->mutex);
+ control->owner = THIS_MODULE;
+ control->class = I2C_CLASS_SPD;
+ control->dev.parent = &adev->pdev->dev;
+ control->algo = &smu_v13_0_0_i2c_algo;
+ snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i);
+ control->quirks = &smu_v13_0_0_i2c_control_quirks;
+ i2c_set_adapdata(control, smu_i2c);
+
+ res = i2c_add_adapter(control);
+ if (res) {
+ DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
+ goto Out_err;
+ }
+ }
+
+ /* assign the buses used for the FRU EEPROM and RAS EEPROM */
+ /* XXX ideally this would be something in a vbios data table */
+ adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter;
+ adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
+
+ return 0;
+Out_err:
+ for ( ; i >= 0; i--) {
+ struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
+ struct i2c_adapter *control = &smu_i2c->adapter;
+
+ i2c_del_adapter(control);
+ }
+ return res;
+}
+
+static void smu_v13_0_0_i2c_control_fini(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int i;
+
+ for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
+ struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
+ struct i2c_adapter *control = &smu_i2c->adapter;
+
+ i2c_del_adapter(control);
+ }
+ adev->pm.ras_eeprom_i2c_bus = NULL;
+ adev->pm.fru_eeprom_i2c_bus = NULL;
+}
+
+static int smu_v13_0_0_set_mp1_state(struct smu_context *smu,
+ enum pp_mp1_state mp1_state)
+{
+ int ret;
+
+ switch (mp1_state) {
+ case PP_MP1_STATE_UNLOAD:
+ ret = smu_cmn_set_mp1_state(smu, mp1_state);
+ break;
+ default:
+ /* Ignore others */
+ ret = 0;
+ }
+
+ return ret;
+}
+
static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.get_allowed_feature_mask = smu_v13_0_0_get_allowed_feature_mask,
.set_default_dpm_table = smu_v13_0_0_set_default_dpm_table,
+ .i2c_init = smu_v13_0_0_i2c_control_init,
+ .i2c_fini = smu_v13_0_0_i2c_control_fini,
.is_dpm_running = smu_v13_0_0_is_dpm_running,
.dump_pptable = smu_v13_0_0_dump_pptable,
.init_microcode = smu_v13_0_init_microcode,
.load_microcode = smu_v13_0_load_microcode,
+ .fini_microcode = smu_v13_0_fini_microcode,
.init_smc_tables = smu_v13_0_0_init_smc_tables,
+ .fini_smc_tables = smu_v13_0_fini_smc_tables,
.init_power = smu_v13_0_init_power,
.fini_power = smu_v13_0_fini_power,
.check_fw_status = smu_v13_0_check_fw_status,
@@ -1638,6 +1901,9 @@ static const struct pptable_funcs smu_v13_0_0_ppt_funcs = {
.baco_set_state = smu_v13_0_baco_set_state,
.baco_enter = smu_v13_0_baco_enter,
.baco_exit = smu_v13_0_baco_exit,
+ .mode1_reset_is_support = smu_v13_0_0_is_mode1_reset_supported,
+ .mode1_reset = smu_v13_0_mode1_reset,
+ .set_mp1_state = smu_v13_0_0_set_mp1_state,
};
void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu)
@@ -1649,4 +1915,5 @@ void smu_v13_0_0_set_ppt_funcs(struct smu_context *smu)
smu->table_map = smu_v13_0_0_table_map;
smu->pwr_src_map = smu_v13_0_0_pwr_src_map;
smu->workload_map = smu_v13_0_0_workload_map;
+ smu_v13_0_set_smu_mailbox_registers(smu);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
index 5a17b51aa0f9..97e1d55dcaad 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_4_ppt.c
@@ -43,6 +43,15 @@
#undef pr_info
#undef pr_debug
+#define mmMP1_SMN_C2PMSG_66 0x0282
+#define mmMP1_SMN_C2PMSG_66_BASE_IDX 1
+
+#define mmMP1_SMN_C2PMSG_82 0x0292
+#define mmMP1_SMN_C2PMSG_82_BASE_IDX 1
+
+#define mmMP1_SMN_C2PMSG_90 0x029a
+#define mmMP1_SMN_C2PMSG_90_BASE_IDX 1
+
#define FEATURE_MASK(feature) (1ULL << feature)
#define SMC_DPM_FEATURE ( \
@@ -62,7 +71,6 @@ static struct cmn2asic_msg_mapping smu_v13_0_4_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
MSG_MAP(GetSmuVersion, PPSMC_MSG_GetPmfwVersion, 1),
MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
- MSG_MAP(EnableGfxOff, PPSMC_MSG_EnableGfxOff, 1),
MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 1),
MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 1),
MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 1),
@@ -190,6 +198,9 @@ static int smu_v13_0_4_fini_smc_tables(struct smu_context *smu)
kfree(smu_table->watermarks_table);
smu_table->watermarks_table = NULL;
+ kfree(smu_table->gpu_metrics_table);
+ smu_table->gpu_metrics_table = NULL;
+
return 0;
}
@@ -210,27 +221,10 @@ static int smu_v13_0_4_system_features_control(struct smu_context *smu, bool en)
{
struct amdgpu_device *adev = smu->adev;
int ret = 0;
- /* SMU fw need this message to trigger IMU to complete the initialization */
- if (en)
- ret = smu_cmn_send_smc_msg(smu, SMU_MSG_EnableGfxImu, NULL);
- else {
- if (!adev->in_s0ix)
- ret = smu_cmn_send_smc_msg(smu,
- SMU_MSG_PrepareMp1ForUnload,
- NULL);
- }
- return ret;
-}
-static int smu_v13_0_4_post_smu_init(struct smu_context *smu)
-{
- struct amdgpu_device *adev = smu->adev;
- int ret = 0;
+ if (!en && !adev->in_s0ix)
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
- /* allow message will be sent after enable message */
- ret = smu_cmn_send_smc_msg(smu, SMU_MSG_EnableGfxOff, NULL);
- if (ret)
- dev_err(adev->dev, "Failed to Enable GfxOff!\n");
return ret;
}
@@ -1022,7 +1016,6 @@ static const struct pptable_funcs smu_v13_0_4_ppt_funcs = {
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.set_driver_table_location = smu_v13_0_set_driver_table_location,
.gfx_off_control = smu_v13_0_gfx_off_control,
- .post_init = smu_v13_0_4_post_smu_init,
.mode2_reset = smu_v13_0_4_mode2_reset,
.get_dpm_ultimate_freq = smu_v13_0_4_get_dpm_ultimate_freq,
.od_edit_dpm_table = smu_v13_0_od_edit_dpm_table,
@@ -1030,13 +1023,19 @@ static const struct pptable_funcs smu_v13_0_4_ppt_funcs = {
.force_clk_levels = smu_v13_0_4_force_clk_levels,
.set_performance_level = smu_v13_0_4_set_performance_level,
.set_fine_grain_gfx_freq_parameters = smu_v13_0_4_set_fine_grain_gfx_freq_parameters,
+ .set_gfx_power_up_by_imu = smu_v13_0_set_gfx_power_up_by_imu,
};
void smu_v13_0_4_set_ppt_funcs(struct smu_context *smu)
{
+ struct amdgpu_device *adev = smu->adev;
+
smu->ppt_funcs = &smu_v13_0_4_ppt_funcs;
smu->message_map = smu_v13_0_4_message_map;
smu->feature_map = smu_v13_0_4_feature_mask_map;
smu->table_map = smu_v13_0_4_table_map;
smu->is_apu = true;
+ smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_82);
+ smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_66);
+ smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_SMN_C2PMSG_90);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
index b81711c4ff33..66445964efbd 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
@@ -42,6 +42,15 @@
#undef pr_info
#undef pr_debug
+#define mmMP1_C2PMSG_2 (0xbee142 + 0xb00000 / 4)
+#define mmMP1_C2PMSG_2_BASE_IDX 0
+
+#define mmMP1_C2PMSG_34 (0xbee262 + 0xb00000 / 4)
+#define mmMP1_C2PMSG_34_BASE_IDX 0
+
+#define mmMP1_C2PMSG_33 (0xbee261 + 0xb00000 / 4)
+#define mmMP1_C2PMSG_33_BASE_IDX 0
+
#define FEATURE_MASK(feature) (1ULL << feature)
#define SMC_DPM_FEATURE ( \
FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \
@@ -167,6 +176,9 @@ static int smu_v13_0_5_fini_smc_tables(struct smu_context *smu)
kfree(smu_table->watermarks_table);
smu_table->watermarks_table = NULL;
+ kfree(smu_table->gpu_metrics_table);
+ smu_table->gpu_metrics_table = NULL;
+
return 0;
}
@@ -1049,9 +1061,14 @@ static const struct pptable_funcs smu_v13_0_5_ppt_funcs = {
void smu_v13_0_5_set_ppt_funcs(struct smu_context *smu)
{
+ struct amdgpu_device *adev = smu->adev;
+
smu->ppt_funcs = &smu_v13_0_5_ppt_funcs;
smu->message_map = smu_v13_0_5_message_map;
smu->feature_map = smu_v13_0_5_feature_mask_map;
smu->table_map = smu_v13_0_5_table_map;
smu->is_apu = true;
+ smu->param_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_C2PMSG_34);
+ smu->msg_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_C2PMSG_2);
+ smu->resp_reg = SOC15_REG_OFFSET(MP1, 0, mmMP1_C2PMSG_33);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
index 4e1861fb2c6a..fcf24c540859 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_7_ppt.c
@@ -97,6 +97,7 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(UseDefaultPPTable, PPSMC_MSG_UseDefaultPPTable, 0),
MSG_MAP(RunDcBtc, PPSMC_MSG_RunDcBtc, 0),
MSG_MAP(EnterBaco, PPSMC_MSG_EnterBaco, 0),
+ MSG_MAP(ExitBaco, PPSMC_MSG_ExitBaco, 0),
MSG_MAP(SetSoftMinByFreq, PPSMC_MSG_SetSoftMinByFreq, 1),
MSG_MAP(SetSoftMaxByFreq, PPSMC_MSG_SetSoftMaxByFreq, 1),
MSG_MAP(SetHardMinByFreq, PPSMC_MSG_SetHardMinByFreq, 1),
@@ -110,11 +111,15 @@ static struct cmn2asic_msg_mapping smu_v13_0_7_message_map[SMU_MSG_MAX_COUNT] =
MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 0),
MSG_MAP(GetDcModeMaxDpmFreq, PPSMC_MSG_GetDcModeMaxDpmFreq, 1),
MSG_MAP(OverridePcieParameters, PPSMC_MSG_OverridePcieParameters, 0),
+ MSG_MAP(ReenableAcDcInterrupt, PPSMC_MSG_ReenableAcDcInterrupt, 0),
+ MSG_MAP(AllowIHHostInterrupt, PPSMC_MSG_AllowIHHostInterrupt, 0),
MSG_MAP(DramLogSetDramAddrHigh, PPSMC_MSG_DramLogSetDramAddrHigh, 0),
MSG_MAP(DramLogSetDramAddrLow, PPSMC_MSG_DramLogSetDramAddrLow, 0),
MSG_MAP(DramLogSetDramSize, PPSMC_MSG_DramLogSetDramSize, 0),
MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 0),
MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 0),
+ MSG_MAP(Mode1Reset, PPSMC_MSG_Mode1Reset, 0),
+ MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 0),
};
static struct cmn2asic_mapping smu_v13_0_7_clk_map[SMU_CLK_COUNT] = {
@@ -246,8 +251,12 @@ smu_v13_0_7_get_allowed_feature_mask(struct smu_context *smu,
if (adev->pm.pp_feature & PP_SCLK_DPM_MASK) {
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFXCLK_BIT);
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_IMU_BIT);
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_GFX_POWER_OPTIMIZER_BIT);
}
+ if (adev->pm.pp_feature & PP_GFXOFF_MASK)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFXOFF_BIT);
+
if (adev->pm.pp_feature & PP_MCLK_DPM_MASK) {
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_UCLK_BIT);
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_FCLK_BIT);
@@ -263,6 +272,9 @@ smu_v13_0_7_get_allowed_feature_mask(struct smu_context *smu,
if (adev->pm.pp_feature & PP_SCLK_DEEP_SLEEP_MASK)
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_GFXCLK_BIT);
+ if (adev->pm.pp_feature & PP_ULV_MASK)
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_ULV_BIT);
+
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_LCLK_BIT);
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_MP0CLK_BIT);
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_MM_DPM_BIT);
@@ -281,6 +293,7 @@ smu_v13_0_7_get_allowed_feature_mask(struct smu_context *smu,
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_BACO_MPCLK_DS_BIT);
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_GFX_PCC_DFLL_BIT);
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_SOC_CG_BIT);
+ *(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_BACO_BIT);
if (adev->pm.pp_feature & PP_DCEFCLK_DPM_MASK)
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DPM_DCN_BIT);
@@ -298,6 +311,8 @@ static int smu_v13_0_7_check_powerplay_table(struct smu_context *smu)
struct smu_13_0_7_powerplay_table *powerplay_table =
table_context->power_play_table;
struct smu_baco_context *smu_baco = &smu->smu_baco;
+ PPTable_t *smc_pptable = table_context->driver_pptable;
+ BoardTable_t *BoardTable = &smc_pptable->BoardTable;
if (powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_HARDWAREDC)
smu->dc_controlled_by_gpio = true;
@@ -306,6 +321,9 @@ static int smu_v13_0_7_check_powerplay_table(struct smu_context *smu)
powerplay_table->platform_caps & SMU_13_0_7_PP_PLATFORM_CAP_MACO)
smu_baco->platform_support = true;
+ if (smu_baco->platform_support && (BoardTable->HsrEnabled || BoardTable->VddqOffEnabled))
+ smu_baco->maco_support = true;
+
table_context->thermal_controller_type =
powerplay_table->thermal_controller_type;
@@ -382,11 +400,27 @@ static int smu_v13_0_7_append_powerplay_table(struct smu_context *smu)
return 0;
}
+static int smu_v13_0_7_get_pptable_from_pmfw(struct smu_context *smu,
+ void **table,
+ uint32_t *size)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ void *combo_pptable = smu_table->combo_pptable;
+ int ret = 0;
+
+ ret = smu_cmn_get_combo_pptable(smu);
+ if (ret)
+ return ret;
+
+ *table = combo_pptable;
+ *size = sizeof(struct smu_13_0_7_powerplay_table);
+
+ return 0;
+}
static int smu_v13_0_7_setup_pptable(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
- void *combo_pptable = smu_table->combo_pptable;
struct amdgpu_device *adev = smu->adev;
int ret = 0;
@@ -395,18 +429,11 @@ static int smu_v13_0_7_setup_pptable(struct smu_context *smu)
* be used directly by driver. To get the raw pptable, we need to
* rely on the combo pptable(and its revelant SMU message).
*/
- if (adev->scpm_enabled) {
- ret = smu_cmn_get_combo_pptable(smu);
- if (ret)
- return ret;
-
- smu->smu_table.power_play_table = combo_pptable;
- smu->smu_table.power_play_table_size = sizeof(struct smu_13_0_7_powerplay_table);
- } else {
- ret = smu_v13_0_setup_pptable(smu);
- if (ret)
- return ret;
- }
+ ret = smu_v13_0_7_get_pptable_from_pmfw(smu,
+ &smu_table->power_play_table,
+ &smu_table->power_play_table_size);
+ if (ret)
+ return ret;
ret = smu_v13_0_7_store_powerplay_table(smu);
if (ret)
@@ -1532,6 +1559,33 @@ static int smu_v13_0_7_set_power_profile_mode(struct smu_context *smu, long *inp
return ret;
}
+static int smu_v13_0_7_set_mp1_state(struct smu_context *smu,
+ enum pp_mp1_state mp1_state)
+{
+ int ret;
+
+ switch (mp1_state) {
+ case PP_MP1_STATE_UNLOAD:
+ ret = smu_cmn_set_mp1_state(smu, mp1_state);
+ break;
+ default:
+ /* Ignore others */
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static bool smu_v13_0_7_is_mode1_reset_supported(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ /* SRIOV does not support SMU mode1 reset */
+ if (amdgpu_sriov_vf(adev))
+ return false;
+
+ return true;
+}
static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.get_allowed_feature_mask = smu_v13_0_7_get_allowed_feature_mask,
.set_default_dpm_table = smu_v13_0_7_set_default_dpm_table,
@@ -1539,8 +1593,11 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.dump_pptable = smu_v13_0_7_dump_pptable,
.init_microcode = smu_v13_0_init_microcode,
.load_microcode = smu_v13_0_load_microcode,
+ .fini_microcode = smu_v13_0_fini_microcode,
.init_smc_tables = smu_v13_0_7_init_smc_tables,
+ .fini_smc_tables = smu_v13_0_fini_smc_tables,
.init_power = smu_v13_0_init_power,
+ .fini_power = smu_v13_0_fini_power,
.check_fw_status = smu_v13_0_7_check_fw_status,
.setup_pptable = smu_v13_0_7_setup_pptable,
.check_fw_version = smu_v13_0_check_fw_version,
@@ -1583,6 +1640,14 @@ static const struct pptable_funcs smu_v13_0_7_ppt_funcs = {
.set_tool_table_location = smu_v13_0_set_tool_table_location,
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.set_pp_feature_mask = smu_cmn_set_pp_feature_mask,
+ .baco_is_support = smu_v13_0_baco_is_support,
+ .baco_get_state = smu_v13_0_baco_get_state,
+ .baco_set_state = smu_v13_0_baco_set_state,
+ .baco_enter = smu_v13_0_baco_enter,
+ .baco_exit = smu_v13_0_baco_exit,
+ .mode1_reset_is_support = smu_v13_0_7_is_mode1_reset_supported,
+ .mode1_reset = smu_v13_0_mode1_reset,
+ .set_mp1_state = smu_v13_0_7_set_mp1_state,
};
void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu)
@@ -1594,4 +1659,5 @@ void smu_v13_0_7_set_ppt_funcs(struct smu_context *smu)
smu->table_map = smu_v13_0_7_table_map;
smu->pwr_src_map = smu_v13_0_7_pwr_src_map;
smu->workload_map = smu_v13_0_7_workload_map;
+ smu_v13_0_set_smu_mailbox_registers(smu);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
index feff4f8c927c..04e56b0b3033 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
@@ -42,6 +42,11 @@
#undef pr_info
#undef pr_debug
+#define regSMUIO_GFX_MISC_CNTL 0x00c5
+#define regSMUIO_GFX_MISC_CNTL_BASE_IDX 0
+#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK 0x00000006L
+#define SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT 0x1L
+
#define FEATURE_MASK(feature) (1ULL << feature)
#define SMC_DPM_FEATURE ( \
FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \
@@ -587,6 +592,31 @@ static ssize_t yellow_carp_get_gpu_metrics(struct smu_context *smu,
return sizeof(struct gpu_metrics_v2_1);
}
+/**
+ * yellow_carp_get_gfxoff_status - get gfxoff status
+ *
+ * @smu: smu_context pointer
+ *
+ * This function will be used to get gfxoff status
+ *
+ * Returns 0=GFXOFF(default).
+ * Returns 1=Transition out of GFX State.
+ * Returns 2=Not in GFXOFF.
+ * Returns 3=Transition into GFXOFF.
+ */
+static uint32_t yellow_carp_get_gfxoff_status(struct smu_context *smu)
+{
+ uint32_t reg;
+ uint32_t gfxoff_status = 0;
+ struct amdgpu_device *adev = smu->adev;
+
+ reg = RREG32_SOC15(SMUIO, 0, regSMUIO_GFX_MISC_CNTL);
+ gfxoff_status = (reg & SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS_MASK)
+ >> SMUIO_GFX_MISC_CNTL__PWR_GFXOFF_STATUS__SHIFT;
+
+ return gfxoff_status;
+}
+
static int yellow_carp_set_default_dpm_tables(struct smu_context *smu)
{
struct smu_table_context *smu_table = &smu->smu_table;
@@ -1186,6 +1216,7 @@ static const struct pptable_funcs yellow_carp_ppt_funcs = {
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.set_driver_table_location = smu_v13_0_set_driver_table_location,
.gfx_off_control = smu_v13_0_gfx_off_control,
+ .get_gfx_off_status = yellow_carp_get_gfxoff_status,
.post_init = yellow_carp_post_smu_init,
.mode2_reset = yellow_carp_mode2_reset,
.get_dpm_ultimate_freq = yellow_carp_get_dpm_ultimate_freq,
@@ -1203,4 +1234,5 @@ void yellow_carp_set_ppt_funcs(struct smu_context *smu)
smu->feature_map = yellow_carp_feature_mask_map;
smu->table_map = yellow_carp_table_map;
smu->is_apu = true;
+ smu_v13_0_set_smu_mailbox_registers(smu);
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index 5de7da75d14a..15e4298c7cc8 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -37,31 +37,6 @@
#undef pr_info
#undef pr_debug
-/*
- * Although these are defined in each ASIC's specific header file.
- * They share the same definitions and values. That makes common
- * APIs for SMC messages issuing for all ASICs possible.
- */
-#define mmMP1_SMN_C2PMSG_66 0x0282
-#define mmMP1_SMN_C2PMSG_66_BASE_IDX 0
-
-#define mmMP1_SMN_C2PMSG_82 0x0292
-#define mmMP1_SMN_C2PMSG_82_BASE_IDX 0
-
-#define mmMP1_SMN_C2PMSG_90 0x029a
-#define mmMP1_SMN_C2PMSG_90_BASE_IDX 0
-
-/* SMU 13.0.5 has its specific mailbox messaging registers */
-
-#define mmMP1_C2PMSG_2 (0xbee142 + 0xb00000 / 4)
-#define mmMP1_C2PMSG_2_BASE_IDX 0
-
-#define mmMP1_C2PMSG_34 (0xbee262 + 0xb00000 / 4)
-#define mmMP1_C2PMSG_34_BASE_IDX 0
-
-#define mmMP1_C2PMSG_33 (0xbee261 + 0xb00000 / 4)
-#define mmMP1_C2PMSG_33_BASE_IDX 0
-
#define MP1_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
#undef __SMU_DUMMY_MAP
@@ -90,10 +65,7 @@ static void smu_cmn_read_arg(struct smu_context *smu,
{
struct amdgpu_device *adev = smu->adev;
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 5))
- *arg = RREG32_SOC15(MP1, 0, mmMP1_C2PMSG_34);
- else
- *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
+ *arg = RREG32(smu->param_reg);
}
/* Redefine the SMU error codes here.
@@ -139,10 +111,7 @@ static u32 __smu_cmn_poll_stat(struct smu_context *smu)
u32 reg;
for ( ; timeout > 0; timeout--) {
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 5))
- reg = RREG32_SOC15(MP1, 0, mmMP1_C2PMSG_33);
- else
- reg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
+ reg = RREG32(smu->resp_reg);
if ((reg & MP1_C2PMSG_90__CONTENT_MASK) != 0)
break;
@@ -164,13 +133,8 @@ static void __smu_cmn_reg_print_error(struct smu_context *smu,
switch (reg_c2pmsg_90) {
case SMU_RESP_NONE: {
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 5)) {
- msg_idx = RREG32_SOC15(MP1, 0, mmMP1_C2PMSG_2);
- prm = RREG32_SOC15(MP1, 0, mmMP1_C2PMSG_34);
- } else {
- msg_idx = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66);
- prm = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
- }
+ msg_idx = RREG32(smu->msg_reg);
+ prm = RREG32(smu->param_reg);
dev_err_ratelimited(adev->dev,
"SMU: I'm not done with your previous command: SMN_C2PMSG_66:0x%08X SMN_C2PMSG_82:0x%08X",
msg_idx, prm);
@@ -264,16 +228,9 @@ static void __smu_cmn_send_msg(struct smu_context *smu,
{
struct amdgpu_device *adev = smu->adev;
- if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 5)) {
- WREG32_SOC15(MP1, 0, mmMP1_C2PMSG_33, 0);
- WREG32_SOC15(MP1, 0, mmMP1_C2PMSG_34, param);
- WREG32_SOC15(MP1, 0, mmMP1_C2PMSG_2, msg);
- } else {
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
- }
-
+ WREG32(smu->resp_reg, 0);
+ WREG32(smu->param_reg, param);
+ WREG32(smu->msg_reg, msg);
}
/**
@@ -725,16 +682,13 @@ static const char *smu_get_feature_name(struct smu_context *smu,
size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
char *buf)
{
+ int8_t sort_feature[max(SMU_FEATURE_COUNT, SMU_FEATURE_MAX)];
uint64_t feature_mask;
- int feature_index = 0;
+ int i, feature_index;
uint32_t count = 0;
- int8_t sort_feature[SMU_FEATURE_COUNT];
size_t size = 0;
- int ret = 0, i;
- int feature_id;
- ret = __smu_get_enabled_features(smu, &feature_mask);
- if (ret)
+ if (__smu_get_enabled_features(smu, &feature_mask))
return 0;
size = sysfs_emit_at(buf, size, "features high: 0x%08x low: 0x%08x\n",
@@ -755,22 +709,15 @@ size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
size += sysfs_emit_at(buf, size, "%-2s. %-20s %-3s : %-s\n",
"No", "Feature", "Bit", "State");
- for (i = 0; i < SMU_FEATURE_COUNT; i++) {
- if (sort_feature[i] < 0)
- continue;
-
- /* convert to asic spcific feature ID */
- feature_id = smu_cmn_to_asic_specific_index(smu,
- CMN2ASIC_MAPPING_FEATURE,
- sort_feature[i]);
- if (feature_id < 0)
+ for (feature_index = 0; feature_index < SMU_FEATURE_MAX; feature_index++) {
+ if (sort_feature[feature_index] < 0)
continue;
size += sysfs_emit_at(buf, size, "%02d. %-20s (%2d) : %s\n",
count++,
- smu_get_feature_name(smu, sort_feature[i]),
- i,
- !!test_bit(feature_id, (unsigned long *)&feature_mask) ?
+ smu_get_feature_name(smu, sort_feature[feature_index]),
+ feature_index,
+ !!test_bit(feature_index, (unsigned long *)&feature_mask) ?
"enabled" : "disabled");
}