aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/pm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/pm')
-rw-r--r--drivers/gpu/drm/amd/pm/Makefile13
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm.c2515
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c95
-rw-r--r--drivers/gpu/drm/amd/pm/amdgpu_pm.c619
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h374
-rw-r--r--drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h32
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/Makefile32
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/cik_dpm.h (renamed from drivers/gpu/drm/amd/pm/powerplay/cik_dpm.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c (renamed from drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c)37
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.h (renamed from drivers/gpu/drm/amd/pm/powerplay/kv_dpm.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/kv_smc.c (renamed from drivers/gpu/drm/amd/pm/powerplay/kv_smc.c)0
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c1081
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h38
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/ppsmc.h (renamed from drivers/gpu/drm/amd/pm/powerplay/ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/r600_dpm.h (renamed from drivers/gpu/drm/amd/pm/powerplay/r600_dpm.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c (renamed from drivers/gpu/drm/amd/pm/powerplay/si_dpm.c)188
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.h (renamed from drivers/gpu/drm/amd/pm/powerplay/si_dpm.h)15
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c (renamed from drivers/gpu/drm/amd/pm/powerplay/si_smc.c)0
-rw-r--r--drivers/gpu/drm/amd/pm/legacy-dpm/sislands_smc.h (renamed from drivers/gpu/drm/amd/pm/powerplay/sislands_smc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/Makefile4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c400
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c8
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c10
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c68
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/amd_powerplay.h (renamed from drivers/gpu/drm/amd/pm/inc/amd_powerplay.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/cz_ppsmc.h (renamed from drivers/gpu/drm/amd/pm/inc/cz_ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/fiji_ppsmc.h (renamed from drivers/gpu/drm/amd/pm/inc/fiji_ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h (renamed from drivers/gpu/drm/amd/pm/inc/hardwaremanager.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h (renamed from drivers/gpu/drm/amd/pm/inc/hwmgr.h)4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/polaris10_pwrvirus.h (renamed from drivers/gpu/drm/amd/pm/inc/polaris10_pwrvirus.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/power_state.h (renamed from drivers/gpu/drm/amd/pm/inc/power_state.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/pp_debug.h (renamed from drivers/gpu/drm/amd/pm/inc/pp_debug.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/pp_endian.h (renamed from drivers/gpu/drm/amd/pm/inc/pp_endian.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/pp_thermal.h (renamed from drivers/gpu/drm/amd/pm/inc/pp_thermal.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/ppinterrupt.h (renamed from drivers/gpu/drm/amd/pm/inc/ppinterrupt.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/rv_ppsmc.h (renamed from drivers/gpu/drm/amd/pm/inc/rv_ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu10.h (renamed from drivers/gpu/drm/amd/pm/inc/smu10.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu10_driver_if.h (renamed from drivers/gpu/drm/amd/pm/inc/smu10_driver_if.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu11_driver_if.h (renamed from drivers/gpu/drm/amd/pm/inc/smu11_driver_if.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu7.h (renamed from drivers/gpu/drm/amd/pm/inc/smu7.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu71.h (renamed from drivers/gpu/drm/amd/pm/inc/smu71.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu71_discrete.h (renamed from drivers/gpu/drm/amd/pm/inc/smu71_discrete.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu72.h (renamed from drivers/gpu/drm/amd/pm/inc/smu72.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu72_discrete.h (renamed from drivers/gpu/drm/amd/pm/inc/smu72_discrete.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu73.h (renamed from drivers/gpu/drm/amd/pm/inc/smu73.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu73_discrete.h (renamed from drivers/gpu/drm/amd/pm/inc/smu73_discrete.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu74.h (renamed from drivers/gpu/drm/amd/pm/inc/smu74.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu74_discrete.h (renamed from drivers/gpu/drm/amd/pm/inc/smu74_discrete.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu75.h (renamed from drivers/gpu/drm/amd/pm/inc/smu75.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu75_discrete.h (renamed from drivers/gpu/drm/amd/pm/inc/smu75_discrete.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu7_common.h (renamed from drivers/gpu/drm/amd/pm/inc/smu7_common.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu7_discrete.h (renamed from drivers/gpu/drm/amd/pm/inc/smu7_discrete.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu7_fusion.h (renamed from drivers/gpu/drm/amd/pm/inc/smu7_fusion.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu7_ppsmc.h (renamed from drivers/gpu/drm/amd/pm/inc/smu7_ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu8.h (renamed from drivers/gpu/drm/amd/pm/inc/smu8.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu8_fusion.h (renamed from drivers/gpu/drm/amd/pm/inc/smu8_fusion.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu9.h (renamed from drivers/gpu/drm/amd/pm/inc/smu9.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu9_driver_if.h (renamed from drivers/gpu/drm/amd/pm/inc/smu9_driver_if.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu_ucode_xfer_cz.h (renamed from drivers/gpu/drm/amd/pm/inc/smu_ucode_xfer_cz.h)2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smu_ucode_xfer_vi.h (renamed from drivers/gpu/drm/amd/pm/inc/smu_ucode_xfer_vi.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/smumgr.h (renamed from drivers/gpu/drm/amd/pm/inc/smumgr.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/tonga_ppsmc.h (renamed from drivers/gpu/drm/amd/pm/inc/tonga_ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/vega10_ppsmc.h (renamed from drivers/gpu/drm/amd/pm/inc/vega10_ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/vega12/smu9_driver_if.h (renamed from drivers/gpu/drm/amd/pm/inc/vega12/smu9_driver_if.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/vega12_ppsmc.h (renamed from drivers/gpu/drm/amd/pm/inc/vega12_ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/inc/vega20_ppsmc.h (renamed from drivers/gpu/drm/amd/pm/inc/vega20_ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c4
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c11
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/smu9_smumgr.c2
-rw-r--r--drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c691
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h (renamed from drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h)66
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/aldebaran_ppsmc.h (renamed from drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h)3
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/arcturus_ppsmc.h (renamed from drivers/gpu/drm/amd/pm/inc/arcturus_ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_arcturus.h (renamed from drivers/gpu/drm/amd/pm/inc/smu11_driver_if_arcturus.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_cyan_skillfish.h (renamed from drivers/gpu/drm/amd/pm/inc/smu11_driver_if_cyan_skillfish.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_navi10.h (renamed from drivers/gpu/drm/amd/pm/inc/smu11_driver_if_navi10.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_sienna_cichlid.h (renamed from drivers/gpu/drm/amd/pm/inc/smu11_driver_if_sienna_cichlid.h)82
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_vangogh.h (renamed from drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu12_driver_if.h (renamed from drivers/gpu/drm/amd/pm/inc/smu12_driver_if.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_aldebaran.h (renamed from drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_5.h140
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_yellow_carp.h (renamed from drivers/gpu/drm/amd/pm/inc/smu13_driver_if_yellow_carp.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_0_7_ppsmc.h (renamed from drivers/gpu/drm/amd/pm/inc/smu_v11_0_7_ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_0_ppsmc.h (renamed from drivers/gpu/drm/amd/pm/inc/smu_v11_0_ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_5_pmfw.h (renamed from drivers/gpu/drm/amd/pm/inc/smu_v11_5_pmfw.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_5_ppsmc.h (renamed from drivers/gpu/drm/amd/pm/inc/smu_v11_5_ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_8_pmfw.h (renamed from drivers/gpu/drm/amd/pm/inc/smu_v11_8_pmfw.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_8_ppsmc.h (renamed from drivers/gpu/drm/amd/pm/inc/smu_v11_8_ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v12_0_ppsmc.h (renamed from drivers/gpu/drm/amd/pm/inc/smu_v12_0_ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_1_pmfw.h (renamed from drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_pmfw.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_1_ppsmc.h (renamed from drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_ppsmc.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_5_pmfw.h126
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_5_ppsmc.h74
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_11_0_cdr_table.h (renamed from drivers/gpu/drm/amd/pm/inc/smu_11_0_cdr_table.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h (renamed from drivers/gpu/drm/amd/pm/inc/smu_types.h)6
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h (renamed from drivers/gpu/drm/amd/pm/inc/smu_v11_0.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_7_pptable.h (renamed from drivers/gpu/drm/amd/pm/inc/smu_v11_0_7_pptable.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_pptable.h (renamed from drivers/gpu/drm/amd/pm/inc/smu_v11_0_pptable.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h (renamed from drivers/gpu/drm/amd/pm/inc/smu_v12_0.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h (renamed from drivers/gpu/drm/amd/pm/inc/smu_v13_0.h)1
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_pptable.h (renamed from drivers/gpu/drm/amd/pm/inc/smu_v13_0_pptable.h)0
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c102
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c80
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c476
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c533
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h8
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c70
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c79
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c28
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c10
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile2
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c133
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c81
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c1057
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.h29
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c47
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c264
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h16
-rw-r--r--drivers/gpu/drm/amd/pm/swsmu/smu_internal.h10
121 files changed, 6266 insertions, 3512 deletions
diff --git a/drivers/gpu/drm/amd/pm/Makefile b/drivers/gpu/drm/amd/pm/Makefile
index 8cf6eff1ea93..51751db436b0 100644
--- a/drivers/gpu/drm/amd/pm/Makefile
+++ b/drivers/gpu/drm/amd/pm/Makefile
@@ -21,26 +21,29 @@
#
subdir-ccflags-y += \
- -I$(FULL_AMD_PATH)/pm/inc/ \
-I$(FULL_AMD_PATH)/include/asic_reg \
-I$(FULL_AMD_PATH)/include \
+ -I$(FULL_AMD_PATH)/pm/inc/ \
-I$(FULL_AMD_PATH)/pm/swsmu \
+ -I$(FULL_AMD_PATH)/pm/swsmu/inc \
+ -I$(FULL_AMD_PATH)/pm/swsmu/inc/pmfw_if \
-I$(FULL_AMD_PATH)/pm/swsmu/smu11 \
-I$(FULL_AMD_PATH)/pm/swsmu/smu12 \
-I$(FULL_AMD_PATH)/pm/swsmu/smu13 \
- -I$(FULL_AMD_PATH)/pm/powerplay \
+ -I$(FULL_AMD_PATH)/pm/powerplay/inc \
-I$(FULL_AMD_PATH)/pm/powerplay/smumgr\
- -I$(FULL_AMD_PATH)/pm/powerplay/hwmgr
+ -I$(FULL_AMD_PATH)/pm/powerplay/hwmgr \
+ -I$(FULL_AMD_PATH)/pm/legacy-dpm
AMD_PM_PATH = ../pm
-PM_LIBS = swsmu powerplay
+PM_LIBS = swsmu powerplay legacy-dpm
AMD_PM = $(addsuffix /Makefile,$(addprefix $(FULL_AMD_PATH)/pm/,$(PM_LIBS)))
include $(AMD_PM)
-PM_MGR = amdgpu_dpm.o amdgpu_pm.o
+PM_MGR = amdgpu_dpm.o amdgpu_pm.o amdgpu_dpm_internal.o
AMD_PM_POWER = $(addprefix $(AMD_PM_PATH)/,$(PM_MGR))
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
index 08362d506534..89fbee568be4 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm.c
@@ -31,896 +31,41 @@
#include "amdgpu_display.h"
#include "hwmgr.h"
#include <linux/power_supply.h>
+#include "amdgpu_smu.h"
-#define WIDTH_4K 3840
+#define amdgpu_dpm_enable_bapm(adev, e) \
+ ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
-void amdgpu_dpm_print_class_info(u32 class, u32 class2)
-{
- const char *s;
-
- switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
- case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
- default:
- s = "none";
- break;
- case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
- s = "battery";
- break;
- case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
- s = "balanced";
- break;
- case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
- s = "performance";
- break;
- }
- printk("\tui class: %s\n", s);
- printk("\tinternal class:");
- if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
- (class2 == 0))
- pr_cont(" none");
- else {
- if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
- pr_cont(" boot");
- if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
- pr_cont(" thermal");
- if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
- pr_cont(" limited_pwr");
- if (class & ATOM_PPLIB_CLASSIFICATION_REST)
- pr_cont(" rest");
- if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
- pr_cont(" forced");
- if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
- pr_cont(" 3d_perf");
- if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
- pr_cont(" ovrdrv");
- if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
- pr_cont(" uvd");
- if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
- pr_cont(" 3d_low");
- if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
- pr_cont(" acpi");
- if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
- pr_cont(" uvd_hd2");
- if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
- pr_cont(" uvd_hd");
- if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
- pr_cont(" uvd_sd");
- if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
- pr_cont(" limited_pwr2");
- if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
- pr_cont(" ulv");
- if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
- pr_cont(" uvd_mvc");
- }
- pr_cont("\n");
-}
-
-void amdgpu_dpm_print_cap_info(u32 caps)
-{
- printk("\tcaps:");
- if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
- pr_cont(" single_disp");
- if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
- pr_cont(" video");
- if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
- pr_cont(" no_dc");
- pr_cont("\n");
-}
-
-void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
- struct amdgpu_ps *rps)
-{
- printk("\tstatus:");
- if (rps == adev->pm.dpm.current_ps)
- pr_cont(" c");
- if (rps == adev->pm.dpm.requested_ps)
- pr_cont(" r");
- if (rps == adev->pm.dpm.boot_ps)
- pr_cont(" b");
- pr_cont("\n");
-}
-
-void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev)
-{
- struct drm_device *ddev = adev_to_drm(adev);
- struct drm_crtc *crtc;
- struct amdgpu_crtc *amdgpu_crtc;
-
- adev->pm.dpm.new_active_crtcs = 0;
- adev->pm.dpm.new_active_crtc_count = 0;
- if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
- list_for_each_entry(crtc,
- &ddev->mode_config.crtc_list, head) {
- amdgpu_crtc = to_amdgpu_crtc(crtc);
- if (amdgpu_crtc->enabled) {
- adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
- adev->pm.dpm.new_active_crtc_count++;
- }
- }
- }
-}
-
-
-u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
-{
- struct drm_device *dev = adev_to_drm(adev);
- struct drm_crtc *crtc;
- struct amdgpu_crtc *amdgpu_crtc;
- u32 vblank_in_pixels;
- u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
-
- if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- amdgpu_crtc = to_amdgpu_crtc(crtc);
- if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
- vblank_in_pixels =
- amdgpu_crtc->hw_mode.crtc_htotal *
- (amdgpu_crtc->hw_mode.crtc_vblank_end -
- amdgpu_crtc->hw_mode.crtc_vdisplay +
- (amdgpu_crtc->v_border * 2));
-
- vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
- break;
- }
- }
- }
-
- return vblank_time_us;
-}
-
-u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
-{
- struct drm_device *dev = adev_to_drm(adev);
- struct drm_crtc *crtc;
- struct amdgpu_crtc *amdgpu_crtc;
- u32 vrefresh = 0;
-
- if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- amdgpu_crtc = to_amdgpu_crtc(crtc);
- if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
- vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
- break;
- }
- }
- }
-
- return vrefresh;
-}
-
-bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
-{
- switch (sensor) {
- case THERMAL_TYPE_RV6XX:
- case THERMAL_TYPE_RV770:
- case THERMAL_TYPE_EVERGREEN:
- case THERMAL_TYPE_SUMO:
- case THERMAL_TYPE_NI:
- case THERMAL_TYPE_SI:
- case THERMAL_TYPE_CI:
- case THERMAL_TYPE_KV:
- return true;
- case THERMAL_TYPE_ADT7473_WITH_INTERNAL:
- case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
- return false; /* need special handling */
- case THERMAL_TYPE_NONE:
- case THERMAL_TYPE_EXTERNAL:
- case THERMAL_TYPE_EXTERNAL_GPIO:
- default:
- return false;
- }
-}
-
-union power_info {
- struct _ATOM_POWERPLAY_INFO info;
- struct _ATOM_POWERPLAY_INFO_V2 info_2;
- struct _ATOM_POWERPLAY_INFO_V3 info_3;
- struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
- struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
- struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
- struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
- struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
-};
-
-union fan_info {
- struct _ATOM_PPLIB_FANTABLE fan;
- struct _ATOM_PPLIB_FANTABLE2 fan2;
- struct _ATOM_PPLIB_FANTABLE3 fan3;
-};
-
-static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
- ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
-{
- u32 size = atom_table->ucNumEntries *
- sizeof(struct amdgpu_clock_voltage_dependency_entry);
- int i;
- ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
-
- amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
- if (!amdgpu_table->entries)
- return -ENOMEM;
-
- entry = &atom_table->entries[0];
- for (i = 0; i < atom_table->ucNumEntries; i++) {
- amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
- (entry->ucClockHigh << 16);
- amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
- entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
- ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
- }
- amdgpu_table->count = atom_table->ucNumEntries;
-
- return 0;
-}
-
-int amdgpu_get_platform_caps(struct amdgpu_device *adev)
-{
- struct amdgpu_mode_info *mode_info = &adev->mode_info;
- union power_info *power_info;
- int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- u16 data_offset;
- u8 frev, crev;
-
- if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
- &frev, &crev, &data_offset))
- return -EINVAL;
- power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
-
- adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
- adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
- adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
-
- return 0;
-}
-
-/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
-#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
-
-int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
-{
- struct amdgpu_mode_info *mode_info = &adev->mode_info;
- union power_info *power_info;
- union fan_info *fan_info;
- ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
- int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- u16 data_offset;
- u8 frev, crev;
- int ret, i;
-
- if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
- &frev, &crev, &data_offset))
- return -EINVAL;
- power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
-
- /* fan table */
- if (le16_to_cpu(power_info->pplib.usTableSize) >=
- sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
- if (power_info->pplib3.usFanTableOffset) {
- fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib3.usFanTableOffset));
- adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
- adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
- adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
- adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
- adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
- adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
- adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
- if (fan_info->fan.ucFanTableFormat >= 2)
- adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
- else
- adev->pm.dpm.fan.t_max = 10900;
- adev->pm.dpm.fan.cycle_delay = 100000;
- if (fan_info->fan.ucFanTableFormat >= 3) {
- adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
- adev->pm.dpm.fan.default_max_fan_pwm =
- le16_to_cpu(fan_info->fan3.usFanPWMMax);
- adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
- adev->pm.dpm.fan.fan_output_sensitivity =
- le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
- }
- adev->pm.dpm.fan.ucode_fan_control = true;
- }
- }
-
- /* clock dependancy tables, shedding tables */
- if (le16_to_cpu(power_info->pplib.usTableSize) >=
- sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
- if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
- dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
- ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
- dep_table);
- if (ret) {
- amdgpu_free_extended_power_table(adev);
- return ret;
- }
- }
- if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
- dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
- ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
- dep_table);
- if (ret) {
- amdgpu_free_extended_power_table(adev);
- return ret;
- }
- }
- if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
- dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
- ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
- dep_table);
- if (ret) {
- amdgpu_free_extended_power_table(adev);
- return ret;
- }
- }
- if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
- dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
- ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
- dep_table);
- if (ret) {
- amdgpu_free_extended_power_table(adev);
- return ret;
- }
- }
- if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
- ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
- (ATOM_PPLIB_Clock_Voltage_Limit_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
- if (clk_v->ucNumEntries) {
- adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
- le16_to_cpu(clk_v->entries[0].usSclkLow) |
- (clk_v->entries[0].ucSclkHigh << 16);
- adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
- le16_to_cpu(clk_v->entries[0].usMclkLow) |
- (clk_v->entries[0].ucMclkHigh << 16);
- adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
- le16_to_cpu(clk_v->entries[0].usVddc);
- adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
- le16_to_cpu(clk_v->entries[0].usVddci);
- }
- }
- if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
- ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
- (ATOM_PPLIB_PhaseSheddingLimits_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
- ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
-
- adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
- kcalloc(psl->ucNumEntries,
- sizeof(struct amdgpu_phase_shedding_limits_entry),
- GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
- amdgpu_free_extended_power_table(adev);
- return -ENOMEM;
- }
-
- entry = &psl->entries[0];
- for (i = 0; i < psl->ucNumEntries; i++) {
- adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
- le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
- adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
- le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
- adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
- le16_to_cpu(entry->usVoltage);
- entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
- ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
- }
- adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
- psl->ucNumEntries;
- }
- }
-
- /* cac data */
- if (le16_to_cpu(power_info->pplib.usTableSize) >=
- sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
- adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
- adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
- adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
- adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
- if (adev->pm.dpm.tdp_od_limit)
- adev->pm.dpm.power_control = true;
- else
- adev->pm.dpm.power_control = false;
- adev->pm.dpm.tdp_adjustment = 0;
- adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
- adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
- adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
- if (power_info->pplib5.usCACLeakageTableOffset) {
- ATOM_PPLIB_CAC_Leakage_Table *cac_table =
- (ATOM_PPLIB_CAC_Leakage_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
- ATOM_PPLIB_CAC_Leakage_Record *entry;
- u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
- adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
- amdgpu_free_extended_power_table(adev);
- return -ENOMEM;
- }
- entry = &cac_table->entries[0];
- for (i = 0; i < cac_table->ucNumEntries; i++) {
- if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
- adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
- le16_to_cpu(entry->usVddc1);
- adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
- le16_to_cpu(entry->usVddc2);
- adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
- le16_to_cpu(entry->usVddc3);
- } else {
- adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
- le16_to_cpu(entry->usVddc);
- adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
- le32_to_cpu(entry->ulLeakageValue);
- }
- entry = (ATOM_PPLIB_CAC_Leakage_Record *)
- ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
- }
- adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
- }
- }
-
- /* ext tables */
- if (le16_to_cpu(power_info->pplib.usTableSize) >=
- sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
- ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
- if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
- ext_hdr->usVCETableOffset) {
- VCEClockInfoArray *array = (VCEClockInfoArray *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
- ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
- (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
- 1 + array->ucNumEntries * sizeof(VCEClockInfo));
- ATOM_PPLIB_VCE_State_Table *states =
- (ATOM_PPLIB_VCE_State_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
- 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
- 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
- ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
- ATOM_PPLIB_VCE_State_Record *state_entry;
- VCEClockInfo *vce_clk;
- u32 size = limits->numEntries *
- sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
- adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
- kzalloc(size, GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
- amdgpu_free_extended_power_table(adev);
- return -ENOMEM;
- }
- adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
- limits->numEntries;
- entry = &limits->entries[0];
- state_entry = &states->entries[0];
- for (i = 0; i < limits->numEntries; i++) {
- vce_clk = (VCEClockInfo *)
- ((u8 *)&array->entries[0] +
- (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
- adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
- le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
- adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
- le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
- adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
- le16_to_cpu(entry->usVoltage);
- entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
- ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
- }
- adev->pm.dpm.num_of_vce_states =
- states->numEntries > AMD_MAX_VCE_LEVELS ?
- AMD_MAX_VCE_LEVELS : states->numEntries;
- for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
- vce_clk = (VCEClockInfo *)
- ((u8 *)&array->entries[0] +
- (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
- adev->pm.dpm.vce_states[i].evclk =
- le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
- adev->pm.dpm.vce_states[i].ecclk =
- le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
- adev->pm.dpm.vce_states[i].clk_idx =
- state_entry->ucClockInfoIndex & 0x3f;
- adev->pm.dpm.vce_states[i].pstate =
- (state_entry->ucClockInfoIndex & 0xc0) >> 6;
- state_entry = (ATOM_PPLIB_VCE_State_Record *)
- ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
- }
- }
- if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
- ext_hdr->usUVDTableOffset) {
- UVDClockInfoArray *array = (UVDClockInfoArray *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
- ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
- (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
- 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
- ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
- u32 size = limits->numEntries *
- sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
- adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
- kzalloc(size, GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
- amdgpu_free_extended_power_table(adev);
- return -ENOMEM;
- }
- adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
- limits->numEntries;
- entry = &limits->entries[0];
- for (i = 0; i < limits->numEntries; i++) {
- UVDClockInfo *uvd_clk = (UVDClockInfo *)
- ((u8 *)&array->entries[0] +
- (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
- adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
- le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
- adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
- le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
- adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
- le16_to_cpu(entry->usVoltage);
- entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
- ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
- }
- }
- if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
- ext_hdr->usSAMUTableOffset) {
- ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
- (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
- ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
- u32 size = limits->numEntries *
- sizeof(struct amdgpu_clock_voltage_dependency_entry);
- adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
- kzalloc(size, GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
- amdgpu_free_extended_power_table(adev);
- return -ENOMEM;
- }
- adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
- limits->numEntries;
- entry = &limits->entries[0];
- for (i = 0; i < limits->numEntries; i++) {
- adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
- le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
- adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
- le16_to_cpu(entry->usVoltage);
- entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
- ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
- }
- }
- if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
- ext_hdr->usPPMTableOffset) {
- ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usPPMTableOffset));
- adev->pm.dpm.dyn_state.ppm_table =
- kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.ppm_table) {
- amdgpu_free_extended_power_table(adev);
- return -ENOMEM;
- }
- adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
- adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
- le16_to_cpu(ppm->usCpuCoreNumber);
- adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
- le32_to_cpu(ppm->ulPlatformTDP);
- adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
- le32_to_cpu(ppm->ulSmallACPlatformTDP);
- adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
- le32_to_cpu(ppm->ulPlatformTDC);
- adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
- le32_to_cpu(ppm->ulSmallACPlatformTDC);
- adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
- le32_to_cpu(ppm->ulApuTDP);
- adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
- le32_to_cpu(ppm->ulDGpuTDP);
- adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
- le32_to_cpu(ppm->ulDGpuUlvPower);
- adev->pm.dpm.dyn_state.ppm_table->tj_max =
- le32_to_cpu(ppm->ulTjmax);
- }
- if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
- ext_hdr->usACPTableOffset) {
- ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
- (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
- ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
- u32 size = limits->numEntries *
- sizeof(struct amdgpu_clock_voltage_dependency_entry);
- adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
- kzalloc(size, GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
- amdgpu_free_extended_power_table(adev);
- return -ENOMEM;
- }
- adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
- limits->numEntries;
- entry = &limits->entries[0];
- for (i = 0; i < limits->numEntries; i++) {
- adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
- le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
- adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
- le16_to_cpu(entry->usVoltage);
- entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
- ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
- }
- }
- if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
- ext_hdr->usPowerTuneTableOffset) {
- u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
- ATOM_PowerTune_Table *pt;
- adev->pm.dpm.dyn_state.cac_tdp_table =
- kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
- if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
- amdgpu_free_extended_power_table(adev);
- return -ENOMEM;
- }
- if (rev > 0) {
- ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
- adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
- ppt->usMaximumPowerDeliveryLimit;
- pt = &ppt->power_tune_table;
- } else {
- ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
- adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
- pt = &ppt->power_tune_table;
- }
- adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
- adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
- le16_to_cpu(pt->usConfigurableTDP);
- adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
- adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
- le16_to_cpu(pt->usBatteryPowerLimit);
- adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
- le16_to_cpu(pt->usSmallPowerLimit);
- adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
- le16_to_cpu(pt->usLowCACLeakage);
- adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
- le16_to_cpu(pt->usHighCACLeakage);
- }
- if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
- ext_hdr->usSclkVddgfxTableOffset) {
- dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
- (mode_info->atom_context->bios + data_offset +
- le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
- ret = amdgpu_parse_clk_voltage_dep_table(
- &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
- dep_table);
- if (ret) {
- kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
- return ret;
- }
- }
- }
-
- return 0;
-}
-
-void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
-{
- struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
-
- kfree(dyn_state->vddc_dependency_on_sclk.entries);
- kfree(dyn_state->vddci_dependency_on_mclk.entries);
- kfree(dyn_state->vddc_dependency_on_mclk.entries);
- kfree(dyn_state->mvdd_dependency_on_mclk.entries);
- kfree(dyn_state->cac_leakage_table.entries);
- kfree(dyn_state->phase_shedding_limits_table.entries);
- kfree(dyn_state->ppm_table);
- kfree(dyn_state->cac_tdp_table);
- kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
- kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
- kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
- kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
- kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
-}
-
-static const char *pp_lib_thermal_controller_names[] = {
- "NONE",
- "lm63",
- "adm1032",
- "adm1030",
- "max6649",
- "lm64",
- "f75375",
- "RV6xx",
- "RV770",
- "adt7473",
- "NONE",
- "External GPIO",
- "Evergreen",
- "emc2103",
- "Sumo",
- "Northern Islands",
- "Southern Islands",
- "lm96163",
- "Sea Islands",
- "Kaveri/Kabini",
-};
-
-void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
-{
- struct amdgpu_mode_info *mode_info = &adev->mode_info;
- ATOM_PPLIB_POWERPLAYTABLE *power_table;
- int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
- ATOM_PPLIB_THERMALCONTROLLER *controller;
- struct amdgpu_i2c_bus_rec i2c_bus;
- u16 data_offset;
- u8 frev, crev;
-
- if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
- &frev, &crev, &data_offset))
- return;
- power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
- (mode_info->atom_context->bios + data_offset);
- controller = &power_table->sThermalController;
-
- /* add the i2c bus for thermal/fan chip */
- if (controller->ucType > 0) {
- if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
- adev->pm.no_fan = true;
- adev->pm.fan_pulses_per_revolution =
- controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
- if (adev->pm.fan_pulses_per_revolution) {
- adev->pm.fan_min_rpm = controller->ucFanMinRPM;
- adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
- }
- if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
- DRM_INFO("Internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
- } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
- DRM_INFO("Internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
- } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
- DRM_INFO("Internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
- } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
- DRM_INFO("Internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
- } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
- DRM_INFO("Internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- adev->pm.int_thermal_type = THERMAL_TYPE_NI;
- } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
- DRM_INFO("Internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- adev->pm.int_thermal_type = THERMAL_TYPE_SI;
- } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
- DRM_INFO("Internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- adev->pm.int_thermal_type = THERMAL_TYPE_CI;
- } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
- DRM_INFO("Internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- adev->pm.int_thermal_type = THERMAL_TYPE_KV;
- } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
- DRM_INFO("External GPIO thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
- } else if (controller->ucType ==
- ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
- DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
- } else if (controller->ucType ==
- ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
- DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
- } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
- DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
- pp_lib_thermal_controller_names[controller->ucType],
- controller->ucI2cAddress >> 1,
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
- i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
- adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
- if (adev->pm.i2c_bus) {
- struct i2c_board_info info = { };
- const char *name = pp_lib_thermal_controller_names[controller->ucType];
- info.addr = controller->ucI2cAddress >> 1;
- strlcpy(info.type, name, sizeof(info.type));
- i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info);
- }
- } else {
- DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
- controller->ucType,
- controller->ucI2cAddress >> 1,
- (controller->ucFanParameters &
- ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
- }
- }
-}
-
-enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
- u32 sys_mask,
- enum amdgpu_pcie_gen asic_gen,
- enum amdgpu_pcie_gen default_gen)
+int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
{
- switch (asic_gen) {
- case AMDGPU_PCIE_GEN1:
- return AMDGPU_PCIE_GEN1;
- case AMDGPU_PCIE_GEN2:
- return AMDGPU_PCIE_GEN2;
- case AMDGPU_PCIE_GEN3:
- return AMDGPU_PCIE_GEN3;
- default:
- if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) &&
- (default_gen == AMDGPU_PCIE_GEN3))
- return AMDGPU_PCIE_GEN3;
- else if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) &&
- (default_gen == AMDGPU_PCIE_GEN2))
- return AMDGPU_PCIE_GEN2;
- else
- return AMDGPU_PCIE_GEN1;
- }
- return AMDGPU_PCIE_GEN1;
-}
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
-struct amd_vce_state*
-amdgpu_get_vce_clock_state(void *handle, u32 idx)
-{
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (!pp_funcs->get_sclk)
+ return 0;
- if (idx < adev->pm.dpm.num_of_vce_states)
- return &adev->pm.dpm.vce_states[idx];
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_sclk((adev)->powerplay.pp_handle,
+ low);
+ mutex_unlock(&adev->pm.mutex);
- return NULL;
+ return ret;
}
-int amdgpu_dpm_get_sclk(struct amdgpu_device *adev, bool low)
+int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
- return pp_funcs->get_sclk((adev)->powerplay.pp_handle, (low));
-}
+ if (!pp_funcs->get_mclk)
+ return 0;
-int amdgpu_dpm_get_mclk(struct amdgpu_device *adev, bool low)
-{
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_mclk((adev)->powerplay.pp_handle,
+ low);
+ mutex_unlock(&adev->pm.mutex);
- return pp_funcs->get_mclk((adev)->powerplay.pp_handle, (low));
+ return ret;
}
int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block_type, bool gate)
@@ -935,52 +80,20 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
return 0;
}
+ mutex_lock(&adev->pm.mutex);
+
switch (block_type) {
case AMD_IP_BLOCK_TYPE_UVD:
case AMD_IP_BLOCK_TYPE_VCE:
- if (pp_funcs && pp_funcs->set_powergating_by_smu) {
- /*
- * TODO: need a better lock mechanism
- *
- * Here adev->pm.mutex lock protection is enforced on
- * UVD and VCE cases only. Since for other cases, there
- * may be already lock protection in amdgpu_pm.c.
- * This is a quick fix for the deadlock issue below.
- * NFO: task ocltst:2028 blocked for more than 120 seconds.
- * Tainted: G OE 5.0.0-37-generic #40~18.04.1-Ubuntu
- * echo 0 > /proc/sys/kernel/hung_task_timeout_secs" disables this message.
- * cltst D 0 2028 2026 0x00000000
- * all Trace:
- * __schedule+0x2c0/0x870
- * schedule+0x2c/0x70
- * schedule_preempt_disabled+0xe/0x10
- * __mutex_lock.isra.9+0x26d/0x4e0
- * __mutex_lock_slowpath+0x13/0x20
- * ? __mutex_lock_slowpath+0x13/0x20
- * mutex_lock+0x2f/0x40
- * amdgpu_dpm_set_powergating_by_smu+0x64/0xe0 [amdgpu]
- * gfx_v8_0_enable_gfx_static_mg_power_gating+0x3c/0x70 [amdgpu]
- * gfx_v8_0_set_powergating_state+0x66/0x260 [amdgpu]
- * amdgpu_device_ip_set_powergating_state+0x62/0xb0 [amdgpu]
- * pp_dpm_force_performance_level+0xe7/0x100 [amdgpu]
- * amdgpu_set_dpm_forced_performance_level+0x129/0x330 [amdgpu]
- */
- mutex_lock(&adev->pm.mutex);
- ret = (pp_funcs->set_powergating_by_smu(
- (adev)->powerplay.pp_handle, block_type, gate));
- mutex_unlock(&adev->pm.mutex);
- }
- break;
case AMD_IP_BLOCK_TYPE_GFX:
case AMD_IP_BLOCK_TYPE_VCN:
case AMD_IP_BLOCK_TYPE_SDMA:
case AMD_IP_BLOCK_TYPE_JPEG:
case AMD_IP_BLOCK_TYPE_GMC:
case AMD_IP_BLOCK_TYPE_ACP:
- if (pp_funcs && pp_funcs->set_powergating_by_smu) {
+ if (pp_funcs && pp_funcs->set_powergating_by_smu)
ret = (pp_funcs->set_powergating_by_smu(
(adev)->powerplay.pp_handle, block_type, gate));
- }
break;
default:
break;
@@ -989,6 +102,8 @@ int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev, uint32_t block
if (!ret)
atomic_set(&adev->pm.pwr_state[block_type], pwr_state);
+ mutex_unlock(&adev->pm.mutex);
+
return ret;
}
@@ -1001,9 +116,13 @@ int amdgpu_dpm_baco_enter(struct amdgpu_device *adev)
if (!pp_funcs || !pp_funcs->set_asic_baco_state)
return -ENOENT;
+ mutex_lock(&adev->pm.mutex);
+
/* enter BACO state */
ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
+ mutex_unlock(&adev->pm.mutex);
+
return ret;
}
@@ -1016,9 +135,13 @@ int amdgpu_dpm_baco_exit(struct amdgpu_device *adev)
if (!pp_funcs || !pp_funcs->set_asic_baco_state)
return -ENOENT;
+ mutex_lock(&adev->pm.mutex);
+
/* exit BACO state */
ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
+ mutex_unlock(&adev->pm.mutex);
+
return ret;
}
@@ -1029,9 +152,13 @@ int amdgpu_dpm_set_mp1_state(struct amdgpu_device *adev,
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (pp_funcs && pp_funcs->set_mp1_state) {
+ mutex_lock(&adev->pm.mutex);
+
ret = pp_funcs->set_mp1_state(
adev->powerplay.pp_handle,
mp1_state);
+
+ mutex_unlock(&adev->pm.mutex);
}
return ret;
@@ -1042,25 +169,37 @@ bool amdgpu_dpm_is_baco_supported(struct amdgpu_device *adev)
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
void *pp_handle = adev->powerplay.pp_handle;
bool baco_cap;
+ int ret = 0;
if (!pp_funcs || !pp_funcs->get_asic_baco_capability)
return false;
- if (pp_funcs->get_asic_baco_capability(pp_handle, &baco_cap))
- return false;
+ mutex_lock(&adev->pm.mutex);
- return baco_cap;
+ ret = pp_funcs->get_asic_baco_capability(pp_handle,
+ &baco_cap);
+
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret ? false : baco_cap;
}
int amdgpu_dpm_mode2_reset(struct amdgpu_device *adev)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
void *pp_handle = adev->powerplay.pp_handle;
+ int ret = 0;
if (!pp_funcs || !pp_funcs->asic_reset_mode_2)
return -ENOENT;
- return pp_funcs->asic_reset_mode_2(pp_handle);
+ mutex_lock(&adev->pm.mutex);
+
+ ret = pp_funcs->asic_reset_mode_2(pp_handle);
+
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
}
int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
@@ -1072,37 +211,47 @@ int amdgpu_dpm_baco_reset(struct amdgpu_device *adev)
if (!pp_funcs || !pp_funcs->set_asic_baco_state)
return -ENOENT;
+ mutex_lock(&adev->pm.mutex);
+
/* enter BACO state */
ret = pp_funcs->set_asic_baco_state(pp_handle, 1);
if (ret)
- return ret;
+ goto out;
/* exit BACO state */
ret = pp_funcs->set_asic_baco_state(pp_handle, 0);
- if (ret)
- return ret;
- return 0;
+out:
+ mutex_unlock(&adev->pm.mutex);
+ return ret;
}
bool amdgpu_dpm_is_mode1_reset_supported(struct amdgpu_device *adev)
{
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ bool support_mode1_reset = false;
- if (is_support_sw_smu(adev))
- return smu_mode1_reset_is_support(smu);
+ if (is_support_sw_smu(adev)) {
+ mutex_lock(&adev->pm.mutex);
+ support_mode1_reset = smu_mode1_reset_is_support(smu);
+ mutex_unlock(&adev->pm.mutex);
+ }
- return false;
+ return support_mode1_reset;
}
int amdgpu_dpm_mode1_reset(struct amdgpu_device *adev)
{
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret = -EOPNOTSUPP;
- if (is_support_sw_smu(adev))
- return smu_mode1_reset(smu);
+ if (is_support_sw_smu(adev)) {
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_mode1_reset(smu);
+ mutex_unlock(&adev->pm.mutex);
+ }
- return -EOPNOTSUPP;
+ return ret;
}
int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
@@ -1115,9 +264,12 @@ int amdgpu_dpm_switch_power_profile(struct amdgpu_device *adev,
if (amdgpu_sriov_vf(adev))
return 0;
- if (pp_funcs && pp_funcs->switch_power_profile)
+ if (pp_funcs && pp_funcs->switch_power_profile) {
+ mutex_lock(&adev->pm.mutex);
ret = pp_funcs->switch_power_profile(
adev->powerplay.pp_handle, type, en);
+ mutex_unlock(&adev->pm.mutex);
+ }
return ret;
}
@@ -1128,9 +280,12 @@ int amdgpu_dpm_set_xgmi_pstate(struct amdgpu_device *adev,
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
- if (pp_funcs && pp_funcs->set_xgmi_pstate)
+ if (pp_funcs && pp_funcs->set_xgmi_pstate) {
+ mutex_lock(&adev->pm.mutex);
ret = pp_funcs->set_xgmi_pstate(adev->powerplay.pp_handle,
pstate);
+ mutex_unlock(&adev->pm.mutex);
+ }
return ret;
}
@@ -1142,20 +297,27 @@ int amdgpu_dpm_set_df_cstate(struct amdgpu_device *adev,
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
void *pp_handle = adev->powerplay.pp_handle;
- if (pp_funcs && pp_funcs->set_df_cstate)
+ if (pp_funcs && pp_funcs->set_df_cstate) {
+ mutex_lock(&adev->pm.mutex);
ret = pp_funcs->set_df_cstate(pp_handle, cstate);
+ mutex_unlock(&adev->pm.mutex);
+ }
return ret;
}
int amdgpu_dpm_allow_xgmi_power_down(struct amdgpu_device *adev, bool en)
{
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret = 0;
- if (is_support_sw_smu(adev))
- return smu_allow_xgmi_power_down(smu, en);
+ if (is_support_sw_smu(adev)) {
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_allow_xgmi_power_down(smu, en);
+ mutex_unlock(&adev->pm.mutex);
+ }
- return 0;
+ return ret;
}
int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
@@ -1165,8 +327,11 @@ int amdgpu_dpm_enable_mgpu_fan_boost(struct amdgpu_device *adev)
adev->powerplay.pp_funcs;
int ret = 0;
- if (pp_funcs && pp_funcs->enable_mgpu_fan_boost)
+ if (pp_funcs && pp_funcs->enable_mgpu_fan_boost) {
+ mutex_lock(&adev->pm.mutex);
ret = pp_funcs->enable_mgpu_fan_boost(pp_handle);
+ mutex_unlock(&adev->pm.mutex);
+ }
return ret;
}
@@ -1179,9 +344,12 @@ int amdgpu_dpm_set_clockgating_by_smu(struct amdgpu_device *adev,
adev->powerplay.pp_funcs;
int ret = 0;
- if (pp_funcs && pp_funcs->set_clockgating_by_smu)
+ if (pp_funcs && pp_funcs->set_clockgating_by_smu) {
+ mutex_lock(&adev->pm.mutex);
ret = pp_funcs->set_clockgating_by_smu(pp_handle,
msg_id);
+ mutex_unlock(&adev->pm.mutex);
+ }
return ret;
}
@@ -1194,9 +362,12 @@ int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
adev->powerplay.pp_funcs;
int ret = -EOPNOTSUPP;
- if (pp_funcs && pp_funcs->smu_i2c_bus_access)
+ if (pp_funcs && pp_funcs->smu_i2c_bus_access) {
+ mutex_lock(&adev->pm.mutex);
ret = pp_funcs->smu_i2c_bus_access(pp_handle,
acquire);
+ mutex_unlock(&adev->pm.mutex);
+ }
return ret;
}
@@ -1209,13 +380,15 @@ void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev)
adev->pm.ac_power = true;
else
adev->pm.ac_power = false;
+
if (adev->powerplay.pp_funcs &&
adev->powerplay.pp_funcs->enable_bapm)
amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power);
- mutex_unlock(&adev->pm.mutex);
if (is_support_sw_smu(adev))
- smu_set_ac_dc(&adev->smu);
+ smu_set_ac_dc(adev->powerplay.pp_handle);
+
+ mutex_unlock(&adev->pm.mutex);
}
}
@@ -1223,394 +396,1236 @@ int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors senso
void *data, uint32_t *size)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
- int ret = 0;
+ int ret = -EINVAL;
if (!data || !size)
return -EINVAL;
- if (pp_funcs && pp_funcs->read_sensor)
- ret = pp_funcs->read_sensor((adev)->powerplay.pp_handle,
- sensor, data, size);
- else
- ret = -EINVAL;
+ if (pp_funcs && pp_funcs->read_sensor) {
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->read_sensor(adev->powerplay.pp_handle,
+ sensor,
+ data,
+ size);
+ mutex_unlock(&adev->pm.mutex);
+ }
return ret;
}
-void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
+void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev)
{
- struct amdgpu_device *adev =
- container_of(work, struct amdgpu_device,
- pm.dpm.thermal.work);
- /* switch to the thermal state */
- enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
- int temp, size = sizeof(temp);
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
if (!adev->pm.dpm_enabled)
return;
- if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP,
- (void *)&temp, &size)) {
- if (temp < adev->pm.dpm.thermal.min_temp)
- /* switch back the user state */
- dpm_state = adev->pm.dpm.user_state;
- } else {
- if (adev->pm.dpm.thermal.high_to_low)
- /* switch back the user state */
- dpm_state = adev->pm.dpm.user_state;
- }
+ if (!pp_funcs->pm_compute_clocks)
+ return;
+
mutex_lock(&adev->pm.mutex);
- if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
- adev->pm.dpm.thermal_active = true;
- else
- adev->pm.dpm.thermal_active = false;
- adev->pm.dpm.state = dpm_state;
+ pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle);
mutex_unlock(&adev->pm.mutex);
+}
- amdgpu_pm_compute_clocks(adev);
+void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
+{
+ int ret = 0;
+
+ ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
+ if (ret)
+ DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
+ enable ? "enable" : "disable", ret);
}
-static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
- enum amd_pm_state_type dpm_state)
+void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
{
- int i;
- struct amdgpu_ps *ps;
- u32 ui_class;
- bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
- true : false;
+ int ret = 0;
- /* check if the vblank period is too short to adjust the mclk */
- if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
- if (amdgpu_dpm_vblank_too_short(adev))
- single_display = false;
- }
+ ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
+ if (ret)
+ DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
+ enable ? "enable" : "disable", ret);
+}
- /* certain older asics have a separare 3D performance state,
- * so try that first if the user selected performance
- */
- if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
- dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
- /* balanced states don't exist at the moment */
- if (dpm_state == POWER_STATE_TYPE_BALANCED)
- dpm_state = POWER_STATE_TYPE_PERFORMANCE;
-
-restart_search:
- /* Pick the best power state based on current conditions */
- for (i = 0; i < adev->pm.dpm.num_ps; i++) {
- ps = &adev->pm.dpm.ps[i];
- ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
- switch (dpm_state) {
- /* user states */
- case POWER_STATE_TYPE_BATTERY:
- if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
- if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
- if (single_display)
- return ps;
- } else
- return ps;
- }
- break;
- case POWER_STATE_TYPE_BALANCED:
- if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
- if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
- if (single_display)
- return ps;
- } else
- return ps;
- }
- break;
- case POWER_STATE_TYPE_PERFORMANCE:
- if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
- if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
- if (single_display)
- return ps;
- } else
- return ps;
- }
- break;
- /* internal states */
- case POWER_STATE_TYPE_INTERNAL_UVD:
- if (adev->pm.dpm.uvd_ps)
- return adev->pm.dpm.uvd_ps;
- else
- break;
- case POWER_STATE_TYPE_INTERNAL_UVD_SD:
- if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_UVD_HD:
- if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
- if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
- if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_BOOT:
- return adev->pm.dpm.boot_ps;
- case POWER_STATE_TYPE_INTERNAL_THERMAL:
- if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_ACPI:
- if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_ULV:
- if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
- return ps;
- break;
- case POWER_STATE_TYPE_INTERNAL_3DPERF:
- if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
- return ps;
- break;
- default:
- break;
- }
- }
- /* use a fallback state if we didn't match */
- switch (dpm_state) {
- case POWER_STATE_TYPE_INTERNAL_UVD_SD:
- dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
- goto restart_search;
- case POWER_STATE_TYPE_INTERNAL_UVD_HD:
- case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
- case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
- if (adev->pm.dpm.uvd_ps) {
- return adev->pm.dpm.uvd_ps;
- } else {
- dpm_state = POWER_STATE_TYPE_PERFORMANCE;
- goto restart_search;
- }
- case POWER_STATE_TYPE_INTERNAL_THERMAL:
- dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
- goto restart_search;
- case POWER_STATE_TYPE_INTERNAL_ACPI:
- dpm_state = POWER_STATE_TYPE_BATTERY;
- goto restart_search;
- case POWER_STATE_TYPE_BATTERY:
- case POWER_STATE_TYPE_BALANCED:
- case POWER_STATE_TYPE_INTERNAL_3DPERF:
- dpm_state = POWER_STATE_TYPE_PERFORMANCE;
- goto restart_search;
- default:
- break;
- }
+void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
+{
+ int ret = 0;
- return NULL;
+ ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
+ if (ret)
+ DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
+ enable ? "enable" : "disable", ret);
}
-static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
+int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
{
- struct amdgpu_ps *ps;
- enum amd_pm_state_type dpm_state;
- int ret;
- bool equal = false;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int r = 0;
- /* if dpm init failed */
- if (!adev->pm.dpm_enabled)
- return;
+ if (!pp_funcs || !pp_funcs->load_firmware)
+ return 0;
- if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
- /* add other state override checks here */
- if ((!adev->pm.dpm.thermal_active) &&
- (!adev->pm.dpm.uvd_active))
- adev->pm.dpm.state = adev->pm.dpm.user_state;
+ mutex_lock(&adev->pm.mutex);
+ r = pp_funcs->load_firmware(adev->powerplay.pp_handle);
+ if (r) {
+ pr_err("smu firmware loading failed\n");
+ goto out;
}
- dpm_state = adev->pm.dpm.state;
- ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
- if (ps)
- adev->pm.dpm.requested_ps = ps;
- else
- return;
+ if (smu_version)
+ *smu_version = adev->pm.fw_version;
- if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) {
- printk("switching from power state:\n");
- amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
- printk("switching to power state:\n");
- amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
+out:
+ mutex_unlock(&adev->pm.mutex);
+ return r;
+}
+
+int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable)
+{
+ int ret = 0;
+
+ if (is_support_sw_smu(adev)) {
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_handle_passthrough_sbr(adev->powerplay.pp_handle,
+ enable);
+ mutex_unlock(&adev->pm.mutex);
}
- /* update whether vce is active */
- ps->vce_active = adev->pm.dpm.vce_active;
- if (adev->powerplay.pp_funcs->display_configuration_changed)
- amdgpu_dpm_display_configuration_changed(adev);
+ return ret;
+}
- ret = amdgpu_dpm_pre_set_power_state(adev);
- if (ret)
- return;
+int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret = 0;
- if (adev->powerplay.pp_funcs->check_state_equal) {
- if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
- equal = false;
- }
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_send_hbm_bad_pages_num(smu, size);
+ mutex_unlock(&adev->pm.mutex);
- if (equal)
- return;
+ return ret;
+}
- amdgpu_dpm_set_power_state(adev);
- amdgpu_dpm_post_set_power_state(adev);
-
- adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
- adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
-
- if (adev->powerplay.pp_funcs->force_performance_level) {
- if (adev->pm.dpm.thermal_active) {
- enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
- /* force low perf level for thermal */
- amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
- /* save the user's level */
- adev->pm.dpm.forced_level = level;
- } else {
- /* otherwise, user selected level */
- amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level);
- }
+int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret = 0;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_send_hbm_bad_channel_flag(smu, size);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
+ enum pp_clock_type type,
+ uint32_t *min,
+ uint32_t *max)
+{
+ int ret = 0;
+
+ if (type != PP_SCLK)
+ return -EINVAL;
+
+ if (!is_support_sw_smu(adev))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_get_dpm_freq_range(adev->powerplay.pp_handle,
+ SMU_SCLK,
+ min,
+ max);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
+ enum pp_clock_type type,
+ uint32_t min,
+ uint32_t max)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret = 0;
+
+ if (type != PP_SCLK)
+ return -EINVAL;
+
+ if (!is_support_sw_smu(adev))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_set_soft_freq_range(smu,
+ SMU_SCLK,
+ min,
+ max);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret = 0;
+
+ if (!is_support_sw_smu(adev))
+ return 0;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_write_watermarks_table(smu);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev,
+ enum smu_event_type event,
+ uint64_t event_arg)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret = 0;
+
+ if (!is_support_sw_smu(adev))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_wait_for_event(smu, event, event_arg);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret = 0;
+
+ if (!is_support_sw_smu(adev))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_get_status_gfxoff(smu, value);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+
+ if (!is_support_sw_smu(adev))
+ return 0;
+
+ return atomic64_read(&smu->throttle_int_counter);
+}
+
+/* amdgpu_dpm_gfx_state_change - Handle gfx power state change set
+ * @adev: amdgpu_device pointer
+ * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry)
+ *
+ */
+void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
+ enum gfx_change_state state)
+{
+ mutex_lock(&adev->pm.mutex);
+ if (adev->powerplay.pp_funcs &&
+ adev->powerplay.pp_funcs->gfx_state_change_set)
+ ((adev)->powerplay.pp_funcs->gfx_state_change_set(
+ (adev)->powerplay.pp_handle, state));
+ mutex_unlock(&adev->pm.mutex);
+}
+
+int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
+ void *umc_ecc)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ int ret = 0;
+
+ if (!is_support_sw_smu(adev))
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = smu_get_ecc_info(smu, umc_ecc);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
+ uint32_t idx)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ struct amd_vce_state *vstate = NULL;
+
+ if (!pp_funcs->get_vce_clock_state)
+ return NULL;
+
+ mutex_lock(&adev->pm.mutex);
+ vstate = pp_funcs->get_vce_clock_state(adev->powerplay.pp_handle,
+ idx);
+ mutex_unlock(&adev->pm.mutex);
+
+ return vstate;
+}
+
+void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev,
+ enum amd_pm_state_type *state)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ mutex_lock(&adev->pm.mutex);
+
+ if (!pp_funcs->get_current_power_state) {
+ *state = adev->pm.dpm.user_state;
+ goto out;
}
+
+ *state = pp_funcs->get_current_power_state(adev->powerplay.pp_handle);
+ if (*state < POWER_STATE_TYPE_DEFAULT ||
+ *state > POWER_STATE_TYPE_INTERNAL_3DPERF)
+ *state = adev->pm.dpm.user_state;
+
+out:
+ mutex_unlock(&adev->pm.mutex);
}
-void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
+void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
+ enum amd_pm_state_type state)
{
- int i = 0;
+ mutex_lock(&adev->pm.mutex);
+ adev->pm.dpm.user_state = state;
+ mutex_unlock(&adev->pm.mutex);
- if (!adev->pm.dpm_enabled)
+ if (is_support_sw_smu(adev))
return;
- if (adev->mode_info.num_crtc)
- amdgpu_display_bandwidth_update(adev);
+ if (amdgpu_dpm_dispatch_task(adev,
+ AMD_PP_TASK_ENABLE_USER_STATE,
+ &state) == -EOPNOTSUPP)
+ amdgpu_dpm_compute_clocks(adev);
+}
- for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
- struct amdgpu_ring *ring = adev->rings[i];
- if (ring && ring->sched.ready)
- amdgpu_fence_wait_empty(ring);
- }
+enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ enum amd_dpm_forced_level level;
+
+ mutex_lock(&adev->pm.mutex);
+ if (pp_funcs->get_performance_level)
+ level = pp_funcs->get_performance_level(adev->powerplay.pp_handle);
+ else
+ level = adev->pm.dpm.forced_level;
+ mutex_unlock(&adev->pm.mutex);
+
+ return level;
+}
+
+int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
+ enum amd_dpm_forced_level level)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ enum amd_dpm_forced_level current_level;
+ uint32_t profile_mode_mask = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
+ AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
+ AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
+
+ if (!pp_funcs->force_performance_level)
+ return 0;
- if (adev->powerplay.pp_funcs->dispatch_tasks) {
- if (!amdgpu_device_has_dc_support(adev)) {
- mutex_lock(&adev->pm.mutex);
- amdgpu_dpm_get_active_displays(adev);
- adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
- adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
- adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
- /* we have issues with mclk switching with
- * refresh rates over 120 hz on the non-DC code.
- */
- if (adev->pm.pm_display_cfg.vrefresh > 120)
- adev->pm.pm_display_cfg.min_vblank_time = 0;
- if (adev->powerplay.pp_funcs->display_configuration_change)
- adev->powerplay.pp_funcs->display_configuration_change(
- adev->powerplay.pp_handle,
- &adev->pm.pm_display_cfg);
- mutex_unlock(&adev->pm.mutex);
+ if (adev->pm.dpm.thermal_active)
+ return -EINVAL;
+
+ current_level = amdgpu_dpm_get_performance_level(adev);
+ if (current_level == level)
+ return 0;
+
+ if (adev->asic_type == CHIP_RAVEN) {
+ if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
+ if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL &&
+ level == AMD_DPM_FORCED_LEVEL_MANUAL)
+ amdgpu_gfx_off_ctrl(adev, false);
+ else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL &&
+ level != AMD_DPM_FORCED_LEVEL_MANUAL)
+ amdgpu_gfx_off_ctrl(adev, true);
}
- amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL);
- } else {
- mutex_lock(&adev->pm.mutex);
- amdgpu_dpm_get_active_displays(adev);
- amdgpu_dpm_change_power_state_locked(adev);
+ }
+
+ if (!(current_level & profile_mode_mask) &&
+ (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT))
+ return -EINVAL;
+
+ if (!(current_level & profile_mode_mask) &&
+ (level & profile_mode_mask)) {
+ /* enter UMD Pstate */
+ amdgpu_device_ip_set_powergating_state(adev,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_PG_STATE_UNGATE);
+ amdgpu_device_ip_set_clockgating_state(adev,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_CG_STATE_UNGATE);
+ } else if ((current_level & profile_mode_mask) &&
+ !(level & profile_mode_mask)) {
+ /* exit UMD Pstate */
+ amdgpu_device_ip_set_clockgating_state(adev,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_CG_STATE_GATE);
+ amdgpu_device_ip_set_powergating_state(adev,
+ AMD_IP_BLOCK_TYPE_GFX,
+ AMD_PG_STATE_GATE);
+ }
+
+ mutex_lock(&adev->pm.mutex);
+
+ if (pp_funcs->force_performance_level(adev->powerplay.pp_handle,
+ level)) {
mutex_unlock(&adev->pm.mutex);
+ return -EINVAL;
}
+
+ adev->pm.dpm.forced_level = level;
+
+ mutex_unlock(&adev->pm.mutex);
+
+ return 0;
}
-void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
+int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev,
+ struct pp_states_info *states)
{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
- if (adev->family == AMDGPU_FAMILY_SI) {
- mutex_lock(&adev->pm.mutex);
- if (enable) {
- adev->pm.dpm.uvd_active = true;
- adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
- } else {
- adev->pm.dpm.uvd_active = false;
- }
- mutex_unlock(&adev->pm.mutex);
+ if (!pp_funcs->get_pp_num_states)
+ return -EOPNOTSUPP;
- amdgpu_pm_compute_clocks(adev);
- } else {
- ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
- if (ret)
- DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
- enable ? "enable" : "disable", ret);
-
- /* enable/disable Low Memory PState for UVD (4k videos) */
- if (adev->asic_type == CHIP_STONEY &&
- adev->uvd.decode_image_width >= WIDTH_4K) {
- struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
-
- if (hwmgr && hwmgr->hwmgr_func &&
- hwmgr->hwmgr_func->update_nbdpm_pstate)
- hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr,
- !enable,
- true);
- }
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_pp_num_states(adev->powerplay.pp_handle,
+ states);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev,
+ enum amd_pp_task task_id,
+ enum amd_pm_state_type *user_state)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->dispatch_tasks)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->dispatch_tasks(adev->powerplay.pp_handle,
+ task_id,
+ user_state);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->get_pp_table)
+ return 0;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_pp_table(adev->powerplay.pp_handle,
+ table);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev,
+ uint32_t type,
+ long *input,
+ uint32_t size)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->set_fine_grain_clk_vol)
+ return 0;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->set_fine_grain_clk_vol(adev->powerplay.pp_handle,
+ type,
+ input,
+ size);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev,
+ uint32_t type,
+ long *input,
+ uint32_t size)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->odn_edit_dpm_table)
+ return 0;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->odn_edit_dpm_table(adev->powerplay.pp_handle,
+ type,
+ input,
+ size);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev,
+ enum pp_clock_type type,
+ char *buf)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->print_clock_levels)
+ return 0;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->print_clock_levels(adev->powerplay.pp_handle,
+ type,
+ buf);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev,
+ enum pp_clock_type type,
+ char *buf,
+ int *offset)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->emit_clock_levels)
+ return -ENOENT;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->emit_clock_levels(adev->powerplay.pp_handle,
+ type,
+ buf,
+ offset);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev,
+ uint64_t ppfeature_masks)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->set_ppfeature_status)
+ return 0;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->set_ppfeature_status(adev->powerplay.pp_handle,
+ ppfeature_masks);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->get_ppfeature_status)
+ return 0;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_ppfeature_status(adev->powerplay.pp_handle,
+ buf);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
+ enum pp_clock_type type,
+ uint32_t mask)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->force_clock_level)
+ return 0;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->force_clock_level(adev->powerplay.pp_handle,
+ type,
+ mask);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->get_sclk_od)
+ return 0;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_sclk_od(adev->powerplay.pp_handle);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ if (is_support_sw_smu(adev))
+ return 0;
+
+ mutex_lock(&adev->pm.mutex);
+ if (pp_funcs->set_sclk_od)
+ pp_funcs->set_sclk_od(adev->powerplay.pp_handle, value);
+ mutex_unlock(&adev->pm.mutex);
+
+ if (amdgpu_dpm_dispatch_task(adev,
+ AMD_PP_TASK_READJUST_POWER_STATE,
+ NULL) == -EOPNOTSUPP) {
+ adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
+ amdgpu_dpm_compute_clocks(adev);
}
+
+ return 0;
}
-void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
+int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev)
{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
- if (adev->family == AMDGPU_FAMILY_SI) {
- mutex_lock(&adev->pm.mutex);
- if (enable) {
- adev->pm.dpm.vce_active = true;
- /* XXX select vce level based on ring/task */
- adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
- } else {
- adev->pm.dpm.vce_active = false;
- }
- mutex_unlock(&adev->pm.mutex);
+ if (!pp_funcs->get_mclk_od)
+ return 0;
- amdgpu_pm_compute_clocks(adev);
- } else {
- ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
- if (ret)
- DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
- enable ? "enable" : "disable", ret);
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_mclk_od(adev->powerplay.pp_handle);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ if (is_support_sw_smu(adev))
+ return 0;
+
+ mutex_lock(&adev->pm.mutex);
+ if (pp_funcs->set_mclk_od)
+ pp_funcs->set_mclk_od(adev->powerplay.pp_handle, value);
+ mutex_unlock(&adev->pm.mutex);
+
+ if (amdgpu_dpm_dispatch_task(adev,
+ AMD_PP_TASK_READJUST_POWER_STATE,
+ NULL) == -EOPNOTSUPP) {
+ adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
+ amdgpu_dpm_compute_clocks(adev);
}
+
+ return 0;
}
-void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
+int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
+ char *buf)
{
- int i;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
- if (adev->powerplay.pp_funcs->print_power_state == NULL)
- return;
+ if (!pp_funcs->get_power_profile_mode)
+ return -EOPNOTSUPP;
- for (i = 0; i < adev->pm.dpm.num_ps; i++)
- amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_power_profile_mode(adev->powerplay.pp_handle,
+ buf);
+ mutex_unlock(&adev->pm.mutex);
+ return ret;
}
-void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable)
+int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
+ long *input, uint32_t size)
{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int ret = 0;
- ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable);
- if (ret)
- DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n",
- enable ? "enable" : "disable", ret);
+ if (!pp_funcs->set_power_profile_mode)
+ return 0;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->set_power_profile_mode(adev->powerplay.pp_handle,
+ input,
+ size);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
}
-int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version)
+int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table)
{
- int r;
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
- if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) {
- r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle);
- if (r) {
- pr_err("smu firmware loading failed\n");
- return r;
- }
+ if (!pp_funcs->get_gpu_metrics)
+ return 0;
- if (smu_version)
- *smu_version = adev->pm.fw_version;
- }
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_gpu_metrics(adev->powerplay.pp_handle,
+ table);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev,
+ uint32_t *fan_mode)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->get_fan_control_mode)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_fan_control_mode(adev->powerplay.pp_handle,
+ fan_mode);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev,
+ uint32_t speed)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->set_fan_speed_pwm)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->set_fan_speed_pwm(adev->powerplay.pp_handle,
+ speed);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev,
+ uint32_t *speed)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->get_fan_speed_pwm)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_fan_speed_pwm(adev->powerplay.pp_handle,
+ speed);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev,
+ uint32_t *speed)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->get_fan_speed_rpm)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_fan_speed_rpm(adev->powerplay.pp_handle,
+ speed);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev,
+ uint32_t speed)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->set_fan_speed_rpm)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->set_fan_speed_rpm(adev->powerplay.pp_handle,
+ speed);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev,
+ uint32_t mode)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->set_fan_control_mode)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->set_fan_control_mode(adev->powerplay.pp_handle,
+ mode);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
+ uint32_t *limit,
+ enum pp_power_limit_level pp_limit_level,
+ enum pp_power_type power_type)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->get_power_limit)
+ return -ENODATA;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_power_limit(adev->powerplay.pp_handle,
+ limit,
+ pp_limit_level,
+ power_type);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
+ uint32_t limit)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->set_power_limit)
+ return -EINVAL;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->set_power_limit(adev->powerplay.pp_handle,
+ limit);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev)
+{
+ bool cclk_dpm_supported = false;
+
+ if (!is_support_sw_smu(adev))
+ return false;
+
+ mutex_lock(&adev->pm.mutex);
+ cclk_dpm_supported = is_support_cclk_dpm(adev);
+ mutex_unlock(&adev->pm.mutex);
+
+ return (int)cclk_dpm_supported;
+}
+
+int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
+ struct seq_file *m)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ if (!pp_funcs->debugfs_print_current_performance_level)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ pp_funcs->debugfs_print_current_performance_level(adev->powerplay.pp_handle,
+ m);
+ mutex_unlock(&adev->pm.mutex);
return 0;
}
+
+int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
+ void **addr,
+ size_t *size)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->get_smu_prv_buf_details)
+ return -ENOSYS;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_smu_prv_buf_details(adev->powerplay.pp_handle,
+ addr,
+ size);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev)
+{
+ struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
+ struct smu_context *smu = adev->powerplay.pp_handle;
+
+ if ((is_support_sw_smu(adev) && smu->od_enabled) ||
+ (is_support_sw_smu(adev) && smu->is_apu) ||
+ (!is_support_sw_smu(adev) && hwmgr->od_enabled))
+ return true;
+
+ return false;
+}
+
+int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
+ const char *buf,
+ size_t size)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->set_pp_table)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->set_pp_table(adev->powerplay.pp_handle,
+ buf,
+ size);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev)
+{
+ struct smu_context *smu = adev->powerplay.pp_handle;
+
+ if (!is_support_sw_smu(adev))
+ return INT_MAX;
+
+ return smu->cpu_core_num;
+}
+
+void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev)
+{
+ if (!is_support_sw_smu(adev))
+ return;
+
+ amdgpu_smu_stb_debug_fs_init(adev);
+}
+
+int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev,
+ const struct amd_pp_display_configuration *input)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->display_configuration_change)
+ return 0;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->display_configuration_change(adev->powerplay.pp_handle,
+ input);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev,
+ enum amd_pp_clock_type type,
+ struct amd_pp_clocks *clocks)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->get_clock_by_type)
+ return 0;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_clock_by_type(adev->powerplay.pp_handle,
+ type,
+ clocks);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev,
+ struct amd_pp_simple_clock_info *clocks)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->get_display_mode_validation_clocks)
+ return 0;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_display_mode_validation_clocks(adev->powerplay.pp_handle,
+ clocks);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev,
+ enum amd_pp_clock_type type,
+ struct pp_clock_levels_with_latency *clocks)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->get_clock_by_type_with_latency)
+ return 0;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_clock_by_type_with_latency(adev->powerplay.pp_handle,
+ type,
+ clocks);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev,
+ enum amd_pp_clock_type type,
+ struct pp_clock_levels_with_voltage *clocks)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->get_clock_by_type_with_voltage)
+ return 0;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_clock_by_type_with_voltage(adev->powerplay.pp_handle,
+ type,
+ clocks);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev,
+ void *clock_ranges)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->set_watermarks_for_clocks_ranges)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->set_watermarks_for_clocks_ranges(adev->powerplay.pp_handle,
+ clock_ranges);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev,
+ struct pp_display_clock_request *clock)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->display_clock_voltage_request)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->display_clock_voltage_request(adev->powerplay.pp_handle,
+ clock);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev,
+ struct amd_pp_clock_info *clocks)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->get_current_clocks)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_current_clocks(adev->powerplay.pp_handle,
+ clocks);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ if (!pp_funcs->notify_smu_enable_pwe)
+ return;
+
+ mutex_lock(&adev->pm.mutex);
+ pp_funcs->notify_smu_enable_pwe(adev->powerplay.pp_handle);
+ mutex_unlock(&adev->pm.mutex);
+}
+
+int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev,
+ uint32_t count)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->set_active_display_count)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->set_active_display_count(adev->powerplay.pp_handle,
+ count);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev,
+ uint32_t clock)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->set_min_deep_sleep_dcefclk)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->set_min_deep_sleep_dcefclk(adev->powerplay.pp_handle,
+ clock);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev,
+ uint32_t clock)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ if (!pp_funcs->set_hard_min_dcefclk_by_freq)
+ return;
+
+ mutex_lock(&adev->pm.mutex);
+ pp_funcs->set_hard_min_dcefclk_by_freq(adev->powerplay.pp_handle,
+ clock);
+ mutex_unlock(&adev->pm.mutex);
+}
+
+void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev,
+ uint32_t clock)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+
+ if (!pp_funcs->set_hard_min_fclk_by_freq)
+ return;
+
+ mutex_lock(&adev->pm.mutex);
+ pp_funcs->set_hard_min_fclk_by_freq(adev->powerplay.pp_handle,
+ clock);
+ mutex_unlock(&adev->pm.mutex);
+}
+
+int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev,
+ bool disable_memory_clock_switch)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->display_disable_memory_clock_switch)
+ return 0;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->display_disable_memory_clock_switch(adev->powerplay.pp_handle,
+ disable_memory_clock_switch);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev,
+ struct pp_smu_nv_clock_table *max_clocks)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->get_max_sustainable_clocks_by_dc)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_max_sustainable_clocks_by_dc(adev->powerplay.pp_handle,
+ max_clocks);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev,
+ unsigned int *clock_values_in_khz,
+ unsigned int *num_states)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->get_uclk_dpm_states)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_uclk_dpm_states(adev->powerplay.pp_handle,
+ clock_values_in_khz,
+ num_states);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
+
+int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
+ struct dpm_clocks *clock_table)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ int ret = 0;
+
+ if (!pp_funcs->get_dpm_clock_table)
+ return -EOPNOTSUPP;
+
+ mutex_lock(&adev->pm.mutex);
+ ret = pp_funcs->get_dpm_clock_table(adev->powerplay.pp_handle,
+ clock_table);
+ mutex_unlock(&adev->pm.mutex);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c b/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c
new file mode 100644
index 000000000000..42efe838fa85
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/amdgpu_dpm_internal.c
@@ -0,0 +1,95 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_display.h"
+#include "hwmgr.h"
+#include "amdgpu_smu.h"
+#include "amdgpu_dpm_internal.h"
+
+void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev)
+{
+ struct drm_device *ddev = adev_to_drm(adev);
+ struct drm_crtc *crtc;
+ struct amdgpu_crtc *amdgpu_crtc;
+
+ adev->pm.dpm.new_active_crtcs = 0;
+ adev->pm.dpm.new_active_crtc_count = 0;
+ if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
+ list_for_each_entry(crtc,
+ &ddev->mode_config.crtc_list, head) {
+ amdgpu_crtc = to_amdgpu_crtc(crtc);
+ if (amdgpu_crtc->enabled) {
+ adev->pm.dpm.new_active_crtcs |= (1 << amdgpu_crtc->crtc_id);
+ adev->pm.dpm.new_active_crtc_count++;
+ }
+ }
+ }
+}
+
+u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev)
+{
+ struct drm_device *dev = adev_to_drm(adev);
+ struct drm_crtc *crtc;
+ struct amdgpu_crtc *amdgpu_crtc;
+ u32 vblank_in_pixels;
+ u32 vblank_time_us = 0xffffffff; /* if the displays are off, vblank time is max */
+
+ if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ amdgpu_crtc = to_amdgpu_crtc(crtc);
+ if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
+ vblank_in_pixels =
+ amdgpu_crtc->hw_mode.crtc_htotal *
+ (amdgpu_crtc->hw_mode.crtc_vblank_end -
+ amdgpu_crtc->hw_mode.crtc_vdisplay +
+ (amdgpu_crtc->v_border * 2));
+
+ vblank_time_us = vblank_in_pixels * 1000 / amdgpu_crtc->hw_mode.clock;
+ break;
+ }
+ }
+ }
+
+ return vblank_time_us;
+}
+
+u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev)
+{
+ struct drm_device *dev = adev_to_drm(adev);
+ struct drm_crtc *crtc;
+ struct amdgpu_crtc *amdgpu_crtc;
+ u32 vrefresh = 0;
+
+ if (adev->mode_info.num_crtc && adev->mode_info.mode_config_initialized) {
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ amdgpu_crtc = to_amdgpu_crtc(crtc);
+ if (crtc->enabled && amdgpu_crtc->enabled && amdgpu_crtc->hw_mode.clock) {
+ vrefresh = drm_mode_vrefresh(&amdgpu_crtc->hw_mode);
+ break;
+ }
+ }
+ }
+
+ return vrefresh;
+}
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
index 48cc009d9bdf..5cd67ddf8495 100644
--- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c
@@ -34,7 +34,6 @@
#include <linux/nospec.h>
#include <linux/pm_runtime.h>
#include <asm/processor.h>
-#include "hwmgr.h"
static const struct cg_flag_name clocks[] = {
{AMD_CG_SUPPORT_GFX_FGCG, "Graphics Fine Grain Clock Gating"},
@@ -132,7 +131,6 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
enum amd_pm_state_type pm;
int ret;
@@ -147,11 +145,7 @@ static ssize_t amdgpu_get_power_dpm_state(struct device *dev,
return ret;
}
- if (pp_funcs->get_current_power_state) {
- pm = amdgpu_dpm_get_current_power_state(adev);
- } else {
- pm = adev->pm.dpm.user_state;
- }
+ amdgpu_dpm_get_current_power_state(adev, &pm);
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
@@ -191,19 +185,8 @@ static ssize_t amdgpu_set_power_dpm_state(struct device *dev,
return ret;
}
- if (is_support_sw_smu(adev)) {
- mutex_lock(&adev->pm.mutex);
- adev->pm.dpm.user_state = state;
- mutex_unlock(&adev->pm.mutex);
- } else if (adev->powerplay.pp_funcs->dispatch_tasks) {
- amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state);
- } else {
- mutex_lock(&adev->pm.mutex);
- adev->pm.dpm.user_state = state;
- mutex_unlock(&adev->pm.mutex);
+ amdgpu_dpm_set_power_state(adev, state);
- amdgpu_pm_compute_clocks(adev);
- }
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
@@ -290,10 +273,7 @@ static ssize_t amdgpu_get_power_dpm_force_performance_level(struct device *dev,
return ret;
}
- if (adev->powerplay.pp_funcs->get_performance_level)
- level = amdgpu_dpm_get_performance_level(adev);
- else
- level = adev->pm.dpm.forced_level;
+ level = amdgpu_dpm_get_performance_level(adev);
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
@@ -318,9 +298,7 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
enum amd_dpm_forced_level level;
- enum amd_dpm_forced_level current_level;
int ret = 0;
if (amdgpu_in_reset(adev))
@@ -358,57 +336,17 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
return ret;
}
- if (pp_funcs->get_performance_level)
- current_level = amdgpu_dpm_get_performance_level(adev);
- else
- current_level = adev->pm.dpm.forced_level;
-
- if (current_level == level) {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return count;
- }
-
- if (adev->asic_type == CHIP_RAVEN) {
- if (!(adev->apu_flags & AMD_APU_IS_RAVEN2)) {
- if (current_level != AMD_DPM_FORCED_LEVEL_MANUAL && level == AMD_DPM_FORCED_LEVEL_MANUAL)
- amdgpu_gfx_off_ctrl(adev, false);
- else if (current_level == AMD_DPM_FORCED_LEVEL_MANUAL && level != AMD_DPM_FORCED_LEVEL_MANUAL)
- amdgpu_gfx_off_ctrl(adev, true);
- }
- }
-
- /* profile_exit setting is valid only when current mode is in profile mode */
- if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD |
- AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK |
- AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK |
- AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) &&
- (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) {
- pr_err("Currently not in any profile mode!\n");
+ mutex_lock(&adev->pm.stable_pstate_ctx_lock);
+ if (amdgpu_dpm_force_performance_level(adev, level)) {
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
+ mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
return -EINVAL;
}
+ /* override whatever a user ctx may have set */
+ adev->pm.stable_pstate_ctx = NULL;
+ mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
- if (pp_funcs->force_performance_level) {
- mutex_lock(&adev->pm.mutex);
- if (adev->pm.dpm.thermal_active) {
- mutex_unlock(&adev->pm.mutex);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return -EINVAL;
- }
- ret = amdgpu_dpm_force_performance_level(adev, level);
- if (ret) {
- mutex_unlock(&adev->pm.mutex);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return -EINVAL;
- } else {
- adev->pm.dpm.forced_level = level;
- }
- mutex_unlock(&adev->pm.mutex);
- }
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
@@ -421,7 +359,6 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
struct pp_states_info data;
uint32_t i;
int buf_len, ret;
@@ -437,11 +374,8 @@ static ssize_t amdgpu_get_pp_num_states(struct device *dev,
return ret;
}
- if (pp_funcs->get_pp_num_states) {
- amdgpu_dpm_get_pp_num_states(adev, &data);
- } else {
+ if (amdgpu_dpm_get_pp_num_states(adev, &data))
memset(&data, 0, sizeof(data));
- }
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
@@ -463,7 +397,6 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
struct pp_states_info data = {0};
enum amd_pm_state_type pm = 0;
int i = 0, ret = 0;
@@ -479,15 +412,16 @@ static ssize_t amdgpu_get_pp_cur_state(struct device *dev,
return ret;
}
- if (pp_funcs->get_current_power_state
- && pp_funcs->get_pp_num_states) {
- pm = amdgpu_dpm_get_current_power_state(adev);
- amdgpu_dpm_get_pp_num_states(adev, &data);
- }
+ amdgpu_dpm_get_current_power_state(adev, &pm);
+
+ ret = amdgpu_dpm_get_pp_num_states(adev, &data);
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
+ if (ret)
+ return ret;
+
for (i = 0; i < data.nums; i++) {
if (pm == data.states[i])
break;
@@ -511,7 +445,7 @@ static ssize_t amdgpu_get_pp_force_state(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- if (adev->pp_force_state_enabled)
+ if (adev->pm.pp_force_state_enabled)
return amdgpu_get_pp_cur_state(dev, attr, buf);
else
return sysfs_emit(buf, "\n");
@@ -525,6 +459,7 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
enum amd_pm_state_type state = 0;
+ struct pp_states_info data;
unsigned long idx;
int ret;
@@ -533,41 +468,49 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- if (strlen(buf) == 1)
- adev->pp_force_state_enabled = false;
- else if (is_support_sw_smu(adev))
- adev->pp_force_state_enabled = false;
- else if (adev->powerplay.pp_funcs->dispatch_tasks &&
- adev->powerplay.pp_funcs->get_pp_num_states) {
- struct pp_states_info data;
-
- ret = kstrtoul(buf, 0, &idx);
- if (ret || idx >= ARRAY_SIZE(data.states))
- return -EINVAL;
+ adev->pm.pp_force_state_enabled = false;
- idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
+ if (strlen(buf) == 1)
+ return count;
- amdgpu_dpm_get_pp_num_states(adev, &data);
- state = data.states[idx];
+ ret = kstrtoul(buf, 0, &idx);
+ if (ret || idx >= ARRAY_SIZE(data.states))
+ return -EINVAL;
- ret = pm_runtime_get_sync(ddev->dev);
- if (ret < 0) {
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
+ idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
- /* only set user selected power states */
- if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
- state != POWER_STATE_TYPE_DEFAULT) {
- amdgpu_dpm_dispatch_task(adev,
- AMD_PP_TASK_ENABLE_USER_STATE, &state);
- adev->pp_force_state_enabled = true;
- }
- pm_runtime_mark_last_busy(ddev->dev);
+ ret = pm_runtime_get_sync(ddev->dev);
+ if (ret < 0) {
pm_runtime_put_autosuspend(ddev->dev);
+ return ret;
+ }
+
+ ret = amdgpu_dpm_get_pp_num_states(adev, &data);
+ if (ret)
+ goto err_out;
+
+ state = data.states[idx];
+
+ /* only set user selected power states */
+ if (state != POWER_STATE_TYPE_INTERNAL_BOOT &&
+ state != POWER_STATE_TYPE_DEFAULT) {
+ ret = amdgpu_dpm_dispatch_task(adev,
+ AMD_PP_TASK_ENABLE_USER_STATE, &state);
+ if (ret)
+ goto err_out;
+
+ adev->pm.pp_force_state_enabled = true;
}
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
return count;
+
+err_out:
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+ return ret;
}
/**
@@ -601,17 +544,13 @@ static ssize_t amdgpu_get_pp_table(struct device *dev,
return ret;
}
- if (adev->powerplay.pp_funcs->get_pp_table) {
- size = amdgpu_dpm_get_pp_table(adev, &table);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- if (size < 0)
- return size;
- } else {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return 0;
- }
+ size = amdgpu_dpm_get_pp_table(adev, &table);
+
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+
+ if (size <= 0)
+ return size;
if (size >= PAGE_SIZE)
size = PAGE_SIZE - 1;
@@ -642,15 +581,13 @@ static ssize_t amdgpu_set_pp_table(struct device *dev,
}
ret = amdgpu_dpm_set_pp_table(adev, buf, count);
- if (ret) {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return ret;
- }
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
+ if (ret)
+ return ret;
+
return count;
}
@@ -866,46 +803,32 @@ static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev,
return ret;
}
- if (adev->powerplay.pp_funcs->set_fine_grain_clk_vol) {
- ret = amdgpu_dpm_set_fine_grain_clk_vol(adev, type,
- parameter,
- parameter_size);
- if (ret) {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return -EINVAL;
- }
- }
+ if (amdgpu_dpm_set_fine_grain_clk_vol(adev,
+ type,
+ parameter,
+ parameter_size))
+ goto err_out;
- if (adev->powerplay.pp_funcs->odn_edit_dpm_table) {
- ret = amdgpu_dpm_odn_edit_dpm_table(adev, type,
- parameter, parameter_size);
- if (ret) {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return -EINVAL;
- }
- }
+ if (amdgpu_dpm_odn_edit_dpm_table(adev, type,
+ parameter, parameter_size))
+ goto err_out;
if (type == PP_OD_COMMIT_DPM_TABLE) {
- if (adev->powerplay.pp_funcs->dispatch_tasks) {
- amdgpu_dpm_dispatch_task(adev,
- AMD_PP_TASK_READJUST_POWER_STATE,
- NULL);
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return count;
- } else {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return -EINVAL;
- }
+ if (amdgpu_dpm_dispatch_task(adev,
+ AMD_PP_TASK_READJUST_POWER_STATE,
+ NULL))
+ goto err_out;
}
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
return count;
+
+err_out:
+ pm_runtime_mark_last_busy(ddev->dev);
+ pm_runtime_put_autosuspend(ddev->dev);
+ return -EINVAL;
}
static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
@@ -914,8 +837,17 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- ssize_t size;
+ int size = 0;
int ret;
+ enum pp_clock_type od_clocks[6] = {
+ OD_SCLK,
+ OD_MCLK,
+ OD_VDDC_CURVE,
+ OD_RANGE,
+ OD_VDDGFX_OFFSET,
+ OD_CCLK,
+ };
+ uint clk_index;
if (amdgpu_in_reset(adev))
return -EPERM;
@@ -928,16 +860,25 @@ static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev,
return ret;
}
- if (adev->powerplay.pp_funcs->print_clock_levels) {
+ for (clk_index = 0 ; clk_index < 6 ; clk_index++) {
+ ret = amdgpu_dpm_emit_clock_levels(adev, od_clocks[clk_index], buf, &size);
+ if (ret)
+ break;
+ }
+ if (ret == -ENOENT) {
size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf);
- size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size);
- size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size);
- size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf+size);
- size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size);
- size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf+size);
- } else {
- size = sysfs_emit(buf, "\n");
+ if (size > 0) {
+ size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf + size);
+ size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf + size);
+ size += amdgpu_dpm_print_clock_levels(adev, OD_VDDGFX_OFFSET, buf + size);
+ size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf + size);
+ size += amdgpu_dpm_print_clock_levels(adev, OD_CCLK, buf + size);
+ }
}
+
+ if (size == 0)
+ size = sysfs_emit(buf, "\n");
+
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
@@ -985,17 +926,14 @@ static ssize_t amdgpu_set_pp_features(struct device *dev,
return ret;
}
- if (adev->powerplay.pp_funcs->set_ppfeature_status) {
- ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
- if (ret) {
- pm_runtime_mark_last_busy(ddev->dev);
- pm_runtime_put_autosuspend(ddev->dev);
- return -EINVAL;
- }
- }
+ ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask);
+
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
+ if (ret)
+ return -EINVAL;
+
return count;
}
@@ -1019,9 +957,8 @@ static ssize_t amdgpu_get_pp_features(struct device *dev,
return ret;
}
- if (adev->powerplay.pp_funcs->get_ppfeature_status)
- size = amdgpu_dpm_get_ppfeature_status(adev, buf);
- else
+ size = amdgpu_dpm_get_ppfeature_status(adev, buf);
+ if (size <= 0)
size = sysfs_emit(buf, "\n");
pm_runtime_mark_last_busy(ddev->dev);
@@ -1066,8 +1003,8 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = drm_to_adev(ddev);
- ssize_t size;
- int ret;
+ int size = 0;
+ int ret = 0;
if (amdgpu_in_reset(adev))
return -EPERM;
@@ -1080,9 +1017,11 @@ static ssize_t amdgpu_get_pp_dpm_clock(struct device *dev,
return ret;
}
- if (adev->powerplay.pp_funcs->print_clock_levels)
+ ret = amdgpu_dpm_emit_clock_levels(adev, type, buf, &size);
+ if (ret == -ENOENT)
size = amdgpu_dpm_print_clock_levels(adev, type, buf);
- else
+
+ if (size == 0)
size = sysfs_emit(buf, "\n");
pm_runtime_mark_last_busy(ddev->dev);
@@ -1151,10 +1090,7 @@ static ssize_t amdgpu_set_pp_dpm_clock(struct device *dev,
return ret;
}
- if (adev->powerplay.pp_funcs->force_clock_level)
- ret = amdgpu_dpm_force_clock_level(adev, type, mask);
- else
- ret = 0;
+ ret = amdgpu_dpm_force_clock_level(adev, type, mask);
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
@@ -1305,10 +1241,7 @@ static ssize_t amdgpu_get_pp_sclk_od(struct device *dev,
return ret;
}
- if (is_support_sw_smu(adev))
- value = 0;
- else if (adev->powerplay.pp_funcs->get_sclk_od)
- value = amdgpu_dpm_get_sclk_od(adev);
+ value = amdgpu_dpm_get_sclk_od(adev);
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
@@ -1342,19 +1275,7 @@ static ssize_t amdgpu_set_pp_sclk_od(struct device *dev,
return ret;
}
- if (is_support_sw_smu(adev)) {
- value = 0;
- } else {
- if (adev->powerplay.pp_funcs->set_sclk_od)
- amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
-
- if (adev->powerplay.pp_funcs->dispatch_tasks) {
- amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
- } else {
- adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
- amdgpu_pm_compute_clocks(adev);
- }
- }
+ amdgpu_dpm_set_sclk_od(adev, (uint32_t)value);
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
@@ -1382,10 +1303,7 @@ static ssize_t amdgpu_get_pp_mclk_od(struct device *dev,
return ret;
}
- if (is_support_sw_smu(adev))
- value = 0;
- else if (adev->powerplay.pp_funcs->get_mclk_od)
- value = amdgpu_dpm_get_mclk_od(adev);
+ value = amdgpu_dpm_get_mclk_od(adev);
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
@@ -1419,19 +1337,7 @@ static ssize_t amdgpu_set_pp_mclk_od(struct device *dev,
return ret;
}
- if (is_support_sw_smu(adev)) {
- value = 0;
- } else {
- if (adev->powerplay.pp_funcs->set_mclk_od)
- amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
-
- if (adev->powerplay.pp_funcs->dispatch_tasks) {
- amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
- } else {
- adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps;
- amdgpu_pm_compute_clocks(adev);
- }
- }
+ amdgpu_dpm_set_mclk_od(adev, (uint32_t)value);
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
@@ -1479,9 +1385,8 @@ static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev,
return ret;
}
- if (adev->powerplay.pp_funcs->get_power_profile_mode)
- size = amdgpu_dpm_get_power_profile_mode(adev, buf);
- else
+ size = amdgpu_dpm_get_power_profile_mode(adev, buf);
+ if (size <= 0)
size = sysfs_emit(buf, "\n");
pm_runtime_mark_last_busy(ddev->dev);
@@ -1545,8 +1450,7 @@ static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev,
return ret;
}
- if (adev->powerplay.pp_funcs->set_power_profile_mode)
- ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
+ ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size);
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
@@ -1812,9 +1716,7 @@ static ssize_t amdgpu_get_gpu_metrics(struct device *dev,
return ret;
}
- if (adev->powerplay.pp_funcs->get_gpu_metrics)
- size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);
-
+ size = amdgpu_dpm_get_gpu_metrics(adev, &gpu_metrics);
if (size <= 0)
goto out;
@@ -1968,7 +1870,7 @@ static ssize_t amdgpu_set_smartshift_bias(struct device *dev,
amdgpu_smartshift_bias = bias;
r = count;
- /* TODO: upadte bias level with SMU message */
+ /* TODO: update bias level with SMU message */
out:
pm_runtime_mark_last_busy(ddev->dev);
@@ -2027,8 +1929,8 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = {
AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
- AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC),
- AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
+ AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
AMDGPU_DEVICE_ATTR_RW(pp_sclk_od, ATTR_FLAG_BASIC),
AMDGPU_DEVICE_ATTR_RW(pp_mclk_od, ATTR_FLAG_BASIC),
AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode, ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),
@@ -2053,7 +1955,6 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
{
struct device_attribute *dev_attr = &attr->dev_attr;
const char *attr_name = dev_attr->attr.name;
- struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
enum amd_asic_type asic_type = adev->asic_type;
if (!(attr->flags & mask)) {
@@ -2076,9 +1977,7 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_od_clk_voltage)) {
*states = ATTR_STATE_UNSUPPORTED;
- if ((is_support_sw_smu(adev) && adev->smu.od_enabled) ||
- (is_support_sw_smu(adev) && adev->smu.is_apu) ||
- (!is_support_sw_smu(adev) && hwmgr->od_enabled))
+ if (amdgpu_dpm_is_overdrive_supported(adev))
*states = ATTR_STATE_SUPPORTED;
} else if (DEVICE_ATTR_IS(mem_busy_percent)) {
if (adev->flags & AMD_IS_APU || asic_type == CHIP_VEGA10)
@@ -2106,8 +2005,7 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
if (!(asic_type == CHIP_VANGOGH || asic_type == CHIP_SIENNA_CICHLID))
*states = ATTR_STATE_UNSUPPORTED;
} else if (DEVICE_ATTR_IS(pp_power_profile_mode)) {
- if (!adev->powerplay.pp_funcs->get_power_profile_mode ||
- amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP)
+ if (amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP)
*states = ATTR_STATE_UNSUPPORTED;
}
@@ -2134,8 +2032,8 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_
}
}
- /* setting should not be allowed from VF */
- if (amdgpu_sriov_vf(adev)) {
+ /* setting should not be allowed from VF if not in one VF mode */
+ if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
dev_attr->attr.mode &= ~S_IWUGO;
dev_attr->store = NULL;
}
@@ -2396,17 +2294,14 @@ static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev,
return ret;
}
- if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return -EINVAL;
- }
-
- pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+ ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ if (ret)
+ return -EINVAL;
+
return sysfs_emit(buf, "%u\n", pwm_mode);
}
@@ -2434,17 +2329,14 @@ static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev,
return ret;
}
- if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return -EINVAL;
- }
-
- amdgpu_dpm_set_fan_control_mode(adev, value);
+ ret = amdgpu_dpm_set_fan_control_mode(adev, value);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ if (ret)
+ return -EINVAL;
+
return count;
}
@@ -2476,32 +2368,29 @@ static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
+ err = kstrtou32(buf, 10, &value);
+ if (err)
+ return err;
+
err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (err < 0) {
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return err;
}
- pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+ err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
+ if (err)
+ goto out;
+
if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
pr_info("manual fan speed control should be enabled first\n");
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return -EINVAL;
- }
-
- err = kstrtou32(buf, 10, &value);
- if (err) {
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return err;
+ err = -EINVAL;
+ goto out;
}
- if (adev->powerplay.pp_funcs->set_fan_speed_pwm)
- err = amdgpu_dpm_set_fan_speed_pwm(adev, value);
- else
- err = -EINVAL;
+ err = amdgpu_dpm_set_fan_speed_pwm(adev, value);
+out:
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
@@ -2530,10 +2419,7 @@ static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev,
return err;
}
- if (adev->powerplay.pp_funcs->get_fan_speed_pwm)
- err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed);
- else
- err = -EINVAL;
+ err = amdgpu_dpm_get_fan_speed_pwm(adev, &speed);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
@@ -2563,10 +2449,7 @@ static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev,
return err;
}
- if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
- err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
- else
- err = -EINVAL;
+ err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
@@ -2660,10 +2543,7 @@ static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev,
return err;
}
- if (adev->powerplay.pp_funcs->get_fan_speed_rpm)
- err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
- else
- err = -EINVAL;
+ err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
@@ -2688,32 +2568,28 @@ static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
+ err = kstrtou32(buf, 10, &value);
+ if (err)
+ return err;
+
err = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (err < 0) {
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return err;
}
- pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+ err = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
+ if (err)
+ goto out;
if (pwm_mode != AMD_FAN_CTRL_MANUAL) {
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return -ENODATA;
- }
-
- err = kstrtou32(buf, 10, &value);
- if (err) {
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return err;
+ err = -ENODATA;
+ goto out;
}
- if (adev->powerplay.pp_funcs->set_fan_speed_rpm)
- err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
- else
- err = -EINVAL;
+ err = amdgpu_dpm_set_fan_speed_rpm(adev, value);
+out:
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
@@ -2742,17 +2618,14 @@ static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev,
return ret;
}
- if (!adev->powerplay.pp_funcs->get_fan_control_mode) {
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return -EINVAL;
- }
-
- pwm_mode = amdgpu_dpm_get_fan_control_mode(adev);
+ ret = amdgpu_dpm_get_fan_control_mode(adev, &pwm_mode);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ if (ret)
+ return -EINVAL;
+
return sysfs_emit(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1);
}
@@ -2788,16 +2661,14 @@ static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev,
return err;
}
- if (!adev->powerplay.pp_funcs->set_fan_control_mode) {
- pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
- pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
- return -EINVAL;
- }
- amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
+ err = amdgpu_dpm_set_fan_control_mode(adev, pwm_mode);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
+ if (err)
+ return -EINVAL;
+
return count;
}
@@ -2933,7 +2804,6 @@ static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
enum pp_power_limit_level pp_limit_level)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
enum pp_power_type power_type = to_sensor_dev_attr(attr)->index;
uint32_t limit;
ssize_t size;
@@ -2944,16 +2814,13 @@ static ssize_t amdgpu_hwmon_show_power_cap_generic(struct device *dev,
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- if ( !(pp_funcs && pp_funcs->get_power_limit))
- return -ENODATA;
-
r = pm_runtime_get_sync(adev_to_drm(adev)->dev);
if (r < 0) {
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
return r;
}
- r = pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit,
+ r = amdgpu_dpm_get_power_limit(adev, &limit,
pp_limit_level, power_type);
if (!r)
@@ -2996,10 +2863,14 @@ static ssize_t amdgpu_hwmon_show_power_label(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- int limit_type = to_sensor_dev_attr(attr)->index;
+ struct amdgpu_device *adev = dev_get_drvdata(dev);
- return sysfs_emit(buf, "%s\n",
- limit_type == SMU_FAST_PPT_LIMIT ? "fastPPT" : "slowPPT");
+ if (adev->asic_type == CHIP_VANGOGH)
+ return sysfs_emit(buf, "%s\n",
+ to_sensor_dev_attr(attr)->index == PP_PWR_TYPE_FAST ?
+ "fastPPT" : "slowPPT");
+ else
+ return sysfs_emit(buf, "PPT\n");
}
static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
@@ -3008,7 +2879,6 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
size_t count)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int limit_type = to_sensor_dev_attr(attr)->index;
int err;
u32 value;
@@ -3034,10 +2904,7 @@ static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev,
return err;
}
- if (pp_funcs && pp_funcs->set_power_limit)
- err = pp_funcs->set_power_limit(adev->powerplay.pp_handle, value);
- else
- err = -EINVAL;
+ err = amdgpu_dpm_set_power_limit(adev, value);
pm_runtime_mark_last_busy(adev_to_drm(adev)->dev);
pm_runtime_put_autosuspend(adev_to_drm(adev)->dev);
@@ -3315,18 +3182,9 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
- /* there is no fan under pp one vf mode */
- if (amdgpu_sriov_is_pp_one_vf(adev) &&
- (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_min.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_input.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_min.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_target.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
- return 0;
+ /* under pp one vf mode manage of hwmon attributes is not supported */
+ if (amdgpu_sriov_is_pp_one_vf(adev))
+ effective_mode &= ~S_IWUSR;
/* Skip fan attributes if fan is not present */
if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr ||
@@ -3374,20 +3232,18 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
attr == &sensor_dev_attr_fan1_enable.dev_attr.attr))
return 0;
- if (!is_support_sw_smu(adev)) {
- /* mask fan attributes if we have no bindings for this asic to expose */
- if ((!adev->powerplay.pp_funcs->get_fan_speed_pwm &&
- attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
- (!adev->powerplay.pp_funcs->get_fan_control_mode &&
- attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
- effective_mode &= ~S_IRUGO;
+ /* mask fan attributes if we have no bindings for this asic to expose */
+ if (((amdgpu_dpm_get_fan_speed_pwm(adev, NULL) == -EOPNOTSUPP) &&
+ attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */
+ ((amdgpu_dpm_get_fan_control_mode(adev, NULL) == -EOPNOTSUPP) &&
+ attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */
+ effective_mode &= ~S_IRUGO;
- if ((!adev->powerplay.pp_funcs->set_fan_speed_pwm &&
- attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
- (!adev->powerplay.pp_funcs->set_fan_control_mode &&
- attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
- effective_mode &= ~S_IWUSR;
- }
+ if (((amdgpu_dpm_set_fan_speed_pwm(adev, U32_MAX) == -EOPNOTSUPP) &&
+ attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */
+ ((amdgpu_dpm_set_fan_control_mode(adev, U32_MAX) == -EOPNOTSUPP) &&
+ attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */
+ effective_mode &= ~S_IWUSR;
if (((adev->family == AMDGPU_FAMILY_SI) ||
((adev->flags & AMD_IS_APU) &&
@@ -3404,22 +3260,20 @@ static umode_t hwmon_attributes_visible(struct kobject *kobj,
(attr == &sensor_dev_attr_power1_average.dev_attr.attr))
return 0;
- if (!is_support_sw_smu(adev)) {
- /* hide max/min values if we can't both query and manage the fan */
- if ((!adev->powerplay.pp_funcs->set_fan_speed_pwm &&
- !adev->powerplay.pp_funcs->get_fan_speed_pwm) &&
- (!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
- !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
- (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
- attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
- return 0;
+ /* hide max/min values if we can't both query and manage the fan */
+ if (((amdgpu_dpm_set_fan_speed_pwm(adev, U32_MAX) == -EOPNOTSUPP) &&
+ (amdgpu_dpm_get_fan_speed_pwm(adev, NULL) == -EOPNOTSUPP) &&
+ (amdgpu_dpm_set_fan_speed_rpm(adev, U32_MAX) == -EOPNOTSUPP) &&
+ (amdgpu_dpm_get_fan_speed_rpm(adev, NULL) == -EOPNOTSUPP)) &&
+ (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_pwm1_min.dev_attr.attr))
+ return 0;
- if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm &&
- !adev->powerplay.pp_funcs->get_fan_speed_rpm) &&
- (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
- attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
- return 0;
- }
+ if ((amdgpu_dpm_set_fan_speed_rpm(adev, U32_MAX) == -EOPNOTSUPP) &&
+ (amdgpu_dpm_get_fan_speed_rpm(adev, NULL) == -EOPNOTSUPP) &&
+ (attr == &sensor_dev_attr_fan1_max.dev_attr.attr ||
+ attr == &sensor_dev_attr_fan1_min.dev_attr.attr))
+ return 0;
if ((adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */
adev->family == AMDGPU_FAMILY_KV) && /* not implemented yet */
@@ -3548,14 +3402,15 @@ static void amdgpu_debugfs_prints_cpu_info(struct seq_file *m,
uint16_t *p_val;
uint32_t size;
int i;
+ uint32_t num_cpu_cores = amdgpu_dpm_get_num_cpu_cores(adev);
- if (is_support_cclk_dpm(adev)) {
- p_val = kcalloc(adev->smu.cpu_core_num, sizeof(uint16_t),
+ if (amdgpu_dpm_is_cclk_dpm_supported(adev)) {
+ p_val = kcalloc(num_cpu_cores, sizeof(uint16_t),
GFP_KERNEL);
if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_CPU_CLK,
(void *)p_val, &size)) {
- for (i = 0; i < adev->smu.cpu_core_num; i++)
+ for (i = 0; i < num_cpu_cores; i++)
seq_printf(m, "\t%u MHz (CPU%d)\n",
*(p_val + i), i);
}
@@ -3683,27 +3538,11 @@ static int amdgpu_debugfs_pm_info_show(struct seq_file *m, void *unused)
return r;
}
- if (!adev->pm.dpm_enabled) {
- seq_printf(m, "dpm not enabled\n");
- pm_runtime_mark_last_busy(dev->dev);
- pm_runtime_put_autosuspend(dev->dev);
- return 0;
- }
-
- if (!is_support_sw_smu(adev) &&
- adev->powerplay.pp_funcs->debugfs_print_current_performance_level) {
- mutex_lock(&adev->pm.mutex);
- if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level)
- adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m);
- else
- seq_printf(m, "Debugfs support not implemented for this asic\n");
- mutex_unlock(&adev->pm.mutex);
- r = 0;
- } else {
+ if (amdgpu_dpm_debugfs_print_current_performance_level(adev, m)) {
r = amdgpu_debugfs_pm_info_pp(m, adev);
+ if (r)
+ goto out;
}
- if (r)
- goto out;
amdgpu_device_ip_get_clockgating_state(adev, &flags);
@@ -3729,21 +3568,18 @@ static ssize_t amdgpu_pm_prv_buffer_read(struct file *f, char __user *buf,
size_t size, loff_t *pos)
{
struct amdgpu_device *adev = file_inode(f)->i_private;
- const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
- void *pp_handle = adev->powerplay.pp_handle;
size_t smu_prv_buf_size;
void *smu_prv_buf;
+ int ret = 0;
if (amdgpu_in_reset(adev))
return -EPERM;
if (adev->in_suspend && !adev->in_runpm)
return -EPERM;
- if (pp_funcs && pp_funcs->get_smu_prv_buf_details)
- pp_funcs->get_smu_prv_buf_details(pp_handle, &smu_prv_buf,
- &smu_prv_buf_size);
- else
- return -ENOSYS;
+ ret = amdgpu_dpm_get_smu_prv_buf_details(adev, &smu_prv_buf, &smu_prv_buf_size);
+ if (ret)
+ return ret;
if (!smu_prv_buf || !smu_prv_buf_size)
return -EINVAL;
@@ -3767,6 +3603,9 @@ void amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
struct drm_minor *minor = adev_to_drm(adev)->primary;
struct dentry *root = minor->debugfs_root;
+ if (!adev->pm.dpm_enabled)
+ return;
+
debugfs_create_file("amdgpu_pm_info", 0444, root, adev,
&amdgpu_debugfs_pm_info_fops);
@@ -3776,6 +3615,6 @@ void amdgpu_debugfs_pm_init(struct amdgpu_device *adev)
&amdgpu_debugfs_pm_prv_buffer_fops,
adev->pm.smu_prv_buffer_size);
- amdgpu_smu_stb_debug_fs_init(adev);
+ amdgpu_dpm_stb_debug_fs_init(adev);
#endif
}
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
index c464a045000d..3e78b3057277 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm.h
@@ -23,6 +23,12 @@
#ifndef __AMDGPU_DPM_H__
#define __AMDGPU_DPM_H__
+/* Argument for PPSMC_MSG_GpuChangeState */
+enum gfx_change_state {
+ sGpuChangeState_D0Entry = 1,
+ sGpuChangeState_D3Entry,
+};
+
enum amdgpu_int_thermal_type {
THERMAL_TYPE_NONE,
THERMAL_TYPE_EXTERNAL,
@@ -39,19 +45,6 @@ enum amdgpu_int_thermal_type {
THERMAL_TYPE_KV,
};
-enum amdgpu_dpm_auto_throttle_src {
- AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL,
- AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL
-};
-
-enum amdgpu_dpm_event_src {
- AMDGPU_DPM_EVENT_SRC_ANALOG = 0,
- AMDGPU_DPM_EVENT_SRC_EXTERNAL = 1,
- AMDGPU_DPM_EVENT_SRC_DIGITAL = 2,
- AMDGPU_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
- AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
-};
-
struct amdgpu_ps {
u32 caps; /* vbios flags */
u32 class; /* vbios flags */
@@ -95,19 +88,6 @@ struct amdgpu_dpm_thermal {
struct amdgpu_irq_src irq;
};
-enum amdgpu_clk_action
-{
- AMDGPU_SCLK_UP = 1,
- AMDGPU_SCLK_DOWN
-};
-
-struct amdgpu_blacklist_clocks
-{
- u32 sclk;
- u32 mclk;
- enum amdgpu_clk_action action;
-};
-
struct amdgpu_clock_and_voltage_limits {
u32 sclk;
u32 mclk;
@@ -246,128 +226,6 @@ struct amdgpu_dpm_fan {
bool ucode_fan_control;
};
-enum amdgpu_pcie_gen {
- AMDGPU_PCIE_GEN1 = 0,
- AMDGPU_PCIE_GEN2 = 1,
- AMDGPU_PCIE_GEN3 = 2,
- AMDGPU_PCIE_GEN_INVALID = 0xffff
-};
-
-#define amdgpu_dpm_pre_set_power_state(adev) \
- ((adev)->powerplay.pp_funcs->pre_set_power_state((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_set_power_state(adev) \
- ((adev)->powerplay.pp_funcs->set_power_state((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_post_set_power_state(adev) \
- ((adev)->powerplay.pp_funcs->post_set_power_state((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_display_configuration_changed(adev) \
- ((adev)->powerplay.pp_funcs->display_configuration_changed((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_print_power_state(adev, ps) \
- ((adev)->powerplay.pp_funcs->print_power_state((adev)->powerplay.pp_handle, (ps)))
-
-#define amdgpu_dpm_vblank_too_short(adev) \
- ((adev)->powerplay.pp_funcs->vblank_too_short((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_enable_bapm(adev, e) \
- ((adev)->powerplay.pp_funcs->enable_bapm((adev)->powerplay.pp_handle, (e)))
-
-#define amdgpu_dpm_set_fan_control_mode(adev, m) \
- ((adev)->powerplay.pp_funcs->set_fan_control_mode((adev)->powerplay.pp_handle, (m)))
-
-#define amdgpu_dpm_get_fan_control_mode(adev) \
- ((adev)->powerplay.pp_funcs->get_fan_control_mode((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_set_fan_speed_pwm(adev, s) \
- ((adev)->powerplay.pp_funcs->set_fan_speed_pwm((adev)->powerplay.pp_handle, (s)))
-
-#define amdgpu_dpm_get_fan_speed_pwm(adev, s) \
- ((adev)->powerplay.pp_funcs->get_fan_speed_pwm((adev)->powerplay.pp_handle, (s)))
-
-#define amdgpu_dpm_get_fan_speed_rpm(adev, s) \
- ((adev)->powerplay.pp_funcs->get_fan_speed_rpm)((adev)->powerplay.pp_handle, (s))
-
-#define amdgpu_dpm_set_fan_speed_rpm(adev, s) \
- ((adev)->powerplay.pp_funcs->set_fan_speed_rpm)((adev)->powerplay.pp_handle, (s))
-
-#define amdgpu_dpm_force_performance_level(adev, l) \
- ((adev)->powerplay.pp_funcs->force_performance_level((adev)->powerplay.pp_handle, (l)))
-
-#define amdgpu_dpm_get_current_power_state(adev) \
- ((adev)->powerplay.pp_funcs->get_current_power_state((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_get_pp_num_states(adev, data) \
- ((adev)->powerplay.pp_funcs->get_pp_num_states((adev)->powerplay.pp_handle, data))
-
-#define amdgpu_dpm_get_pp_table(adev, table) \
- ((adev)->powerplay.pp_funcs->get_pp_table((adev)->powerplay.pp_handle, table))
-
-#define amdgpu_dpm_set_pp_table(adev, buf, size) \
- ((adev)->powerplay.pp_funcs->set_pp_table((adev)->powerplay.pp_handle, buf, size))
-
-#define amdgpu_dpm_print_clock_levels(adev, type, buf) \
- ((adev)->powerplay.pp_funcs->print_clock_levels((adev)->powerplay.pp_handle, type, buf))
-
-#define amdgpu_dpm_force_clock_level(adev, type, level) \
- ((adev)->powerplay.pp_funcs->force_clock_level((adev)->powerplay.pp_handle, type, level))
-
-#define amdgpu_dpm_get_sclk_od(adev) \
- ((adev)->powerplay.pp_funcs->get_sclk_od((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_set_sclk_od(adev, value) \
- ((adev)->powerplay.pp_funcs->set_sclk_od((adev)->powerplay.pp_handle, value))
-
-#define amdgpu_dpm_get_mclk_od(adev) \
- ((adev)->powerplay.pp_funcs->get_mclk_od((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_set_mclk_od(adev, value) \
- ((adev)->powerplay.pp_funcs->set_mclk_od((adev)->powerplay.pp_handle, value))
-
-#define amdgpu_dpm_dispatch_task(adev, task_id, user_state) \
- ((adev)->powerplay.pp_funcs->dispatch_tasks)((adev)->powerplay.pp_handle, (task_id), (user_state))
-
-#define amdgpu_dpm_check_state_equal(adev, cps, rps, equal) \
- ((adev)->powerplay.pp_funcs->check_state_equal((adev)->powerplay.pp_handle, (cps), (rps), (equal)))
-
-#define amdgpu_dpm_get_vce_clock_state(adev, i) \
- ((adev)->powerplay.pp_funcs->get_vce_clock_state((adev)->powerplay.pp_handle, (i)))
-
-#define amdgpu_dpm_get_performance_level(adev) \
- ((adev)->powerplay.pp_funcs->get_performance_level((adev)->powerplay.pp_handle))
-
-#define amdgpu_dpm_reset_power_profile_state(adev, request) \
- ((adev)->powerplay.pp_funcs->reset_power_profile_state(\
- (adev)->powerplay.pp_handle, request))
-
-#define amdgpu_dpm_get_power_profile_mode(adev, buf) \
- ((adev)->powerplay.pp_funcs->get_power_profile_mode(\
- (adev)->powerplay.pp_handle, buf))
-
-#define amdgpu_dpm_set_power_profile_mode(adev, parameter, size) \
- ((adev)->powerplay.pp_funcs->set_power_profile_mode(\
- (adev)->powerplay.pp_handle, parameter, size))
-
-#define amdgpu_dpm_set_fine_grain_clk_vol(adev, type, parameter, size) \
- ((adev)->powerplay.pp_funcs->set_fine_grain_clk_vol(\
- (adev)->powerplay.pp_handle, type, parameter, size))
-
-#define amdgpu_dpm_odn_edit_dpm_table(adev, type, parameter, size) \
- ((adev)->powerplay.pp_funcs->odn_edit_dpm_table(\
- (adev)->powerplay.pp_handle, type, parameter, size))
-
-#define amdgpu_dpm_get_ppfeature_status(adev, buf) \
- ((adev)->powerplay.pp_funcs->get_ppfeature_status(\
- (adev)->powerplay.pp_handle, (buf)))
-
-#define amdgpu_dpm_set_ppfeature_status(adev, ppfeatures) \
- ((adev)->powerplay.pp_funcs->set_ppfeature_status(\
- (adev)->powerplay.pp_handle, (ppfeatures)))
-
-#define amdgpu_dpm_get_gpu_metrics(adev, table) \
- ((adev)->powerplay.pp_funcs->get_gpu_metrics((adev)->powerplay.pp_handle, table))
-
struct amdgpu_dpm {
struct amdgpu_ps *ps;
/* number of valid power states */
@@ -426,6 +284,27 @@ enum ip_power_state {
/* Used to mask smu debug modes */
#define SMU_DEBUG_HALT_ON_ERROR 0x1
+#define MAX_SMU_I2C_BUSES 2
+
+struct amdgpu_smu_i2c_bus {
+ struct i2c_adapter adapter;
+ struct amdgpu_device *adev;
+ int port;
+ struct mutex mutex;
+};
+
+struct config_table_setting
+{
+ uint16_t gfxclk_average_tau;
+ uint16_t socclk_average_tau;
+ uint16_t uclk_average_tau;
+ uint16_t gfx_activity_average_tau;
+ uint16_t mem_activity_average_tau;
+ uint16_t socket_power_average_tau;
+ uint16_t apu_socket_power_average_tau;
+ uint16_t fclk_average_tau;
+};
+
struct amdgpu_pm {
struct mutex mutex;
u32 current_sclk;
@@ -458,8 +337,9 @@ struct amdgpu_pm {
uint32_t pp_feature;
/* Used for I2C access to various EEPROMs on relevant ASICs */
- struct i2c_adapter smu_i2c;
- struct mutex smu_i2c_mutex;
+ struct amdgpu_smu_i2c_bus smu_i2c[MAX_SMU_I2C_BUSES];
+ struct i2c_adapter *ras_eeprom_i2c_bus;
+ struct i2c_adapter *fru_eeprom_i2c_bus;
struct list_head pm_attr_list;
atomic_t pwr_state[AMD_IP_BLOCK_TYPE_NUM];
@@ -468,64 +348,18 @@ struct amdgpu_pm {
* 0 = disabled (default), otherwise enable corresponding debug mode
*/
uint32_t smu_debug_mask;
-};
-
-#define R600_SSTU_DFLT 0
-#define R600_SST_DFLT 0x00C8
-
-/* XXX are these ok? */
-#define R600_TEMP_RANGE_MIN (90 * 1000)
-#define R600_TEMP_RANGE_MAX (120 * 1000)
-
-#define FDO_PWM_MODE_STATIC 1
-#define FDO_PWM_MODE_STATIC_RPM 5
-enum amdgpu_td {
- AMDGPU_TD_AUTO,
- AMDGPU_TD_UP,
- AMDGPU_TD_DOWN,
-};
+ bool pp_force_state_enabled;
-enum amdgpu_display_watermark {
- AMDGPU_DISPLAY_WATERMARK_LOW = 0,
- AMDGPU_DISPLAY_WATERMARK_HIGH = 1,
-};
+ struct mutex stable_pstate_ctx_lock;
+ struct amdgpu_ctx *stable_pstate_ctx;
-enum amdgpu_display_gap
-{
- AMDGPU_PM_DISPLAY_GAP_VBLANK_OR_WM = 0,
- AMDGPU_PM_DISPLAY_GAP_VBLANK = 1,
- AMDGPU_PM_DISPLAY_GAP_WATERMARK = 2,
- AMDGPU_PM_DISPLAY_GAP_IGNORE = 3,
+ struct config_table_setting config_table;
};
-void amdgpu_dpm_print_class_info(u32 class, u32 class2);
-void amdgpu_dpm_print_cap_info(u32 caps);
-void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
- struct amdgpu_ps *rps);
-u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev);
-u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev);
-void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev);
int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
void *data, uint32_t *size);
-bool amdgpu_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor);
-
-int amdgpu_get_platform_caps(struct amdgpu_device *adev);
-
-int amdgpu_parse_extended_power_table(struct amdgpu_device *adev);
-void amdgpu_free_extended_power_table(struct amdgpu_device *adev);
-
-void amdgpu_add_thermal_controller(struct amdgpu_device *adev);
-
-enum amdgpu_pcie_gen amdgpu_get_pcie_gen_support(struct amdgpu_device *adev,
- u32 sys_mask,
- enum amdgpu_pcie_gen asic_gen,
- enum amdgpu_pcie_gen default_gen);
-
-struct amd_vce_state*
-amdgpu_get_vce_clock_state(void *handle, u32 idx);
-
int amdgpu_dpm_set_powergating_by_smu(struct amdgpu_device *adev,
uint32_t block_type, bool gate);
@@ -571,16 +405,140 @@ int amdgpu_dpm_smu_i2c_bus_access(struct amdgpu_device *adev,
void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev);
-int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor,
- void *data, uint32_t *size);
-
-void amdgpu_dpm_thermal_work_handler(struct work_struct *work);
-
-void amdgpu_pm_compute_clocks(struct amdgpu_device *adev);
+void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev);
void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable);
void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable);
void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable);
-void amdgpu_pm_print_power_states(struct amdgpu_device *adev);
int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version);
-
+int amdgpu_dpm_handle_passthrough_sbr(struct amdgpu_device *adev, bool enable);
+int amdgpu_dpm_send_hbm_bad_pages_num(struct amdgpu_device *adev, uint32_t size);
+int amdgpu_dpm_send_hbm_bad_channel_flag(struct amdgpu_device *adev, uint32_t size);
+int amdgpu_dpm_get_dpm_freq_range(struct amdgpu_device *adev,
+ enum pp_clock_type type,
+ uint32_t *min,
+ uint32_t *max);
+int amdgpu_dpm_set_soft_freq_range(struct amdgpu_device *adev,
+ enum pp_clock_type type,
+ uint32_t min,
+ uint32_t max);
+int amdgpu_dpm_write_watermarks_table(struct amdgpu_device *adev);
+int amdgpu_dpm_wait_for_event(struct amdgpu_device *adev, enum smu_event_type event,
+ uint64_t event_arg);
+int amdgpu_dpm_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value);
+uint64_t amdgpu_dpm_get_thermal_throttling_counter(struct amdgpu_device *adev);
+void amdgpu_dpm_gfx_state_change(struct amdgpu_device *adev,
+ enum gfx_change_state state);
+int amdgpu_dpm_get_ecc_info(struct amdgpu_device *adev,
+ void *umc_ecc);
+struct amd_vce_state *amdgpu_dpm_get_vce_clock_state(struct amdgpu_device *adev,
+ uint32_t idx);
+void amdgpu_dpm_get_current_power_state(struct amdgpu_device *adev, enum amd_pm_state_type *state);
+void amdgpu_dpm_set_power_state(struct amdgpu_device *adev,
+ enum amd_pm_state_type state);
+enum amd_dpm_forced_level amdgpu_dpm_get_performance_level(struct amdgpu_device *adev);
+int amdgpu_dpm_force_performance_level(struct amdgpu_device *adev,
+ enum amd_dpm_forced_level level);
+int amdgpu_dpm_get_pp_num_states(struct amdgpu_device *adev,
+ struct pp_states_info *states);
+int amdgpu_dpm_dispatch_task(struct amdgpu_device *adev,
+ enum amd_pp_task task_id,
+ enum amd_pm_state_type *user_state);
+int amdgpu_dpm_get_pp_table(struct amdgpu_device *adev, char **table);
+int amdgpu_dpm_set_fine_grain_clk_vol(struct amdgpu_device *adev,
+ uint32_t type,
+ long *input,
+ uint32_t size);
+int amdgpu_dpm_odn_edit_dpm_table(struct amdgpu_device *adev,
+ uint32_t type,
+ long *input,
+ uint32_t size);
+int amdgpu_dpm_print_clock_levels(struct amdgpu_device *adev,
+ enum pp_clock_type type,
+ char *buf);
+int amdgpu_dpm_emit_clock_levels(struct amdgpu_device *adev,
+ enum pp_clock_type type,
+ char *buf,
+ int *offset);
+int amdgpu_dpm_set_ppfeature_status(struct amdgpu_device *adev,
+ uint64_t ppfeature_masks);
+int amdgpu_dpm_get_ppfeature_status(struct amdgpu_device *adev, char *buf);
+int amdgpu_dpm_force_clock_level(struct amdgpu_device *adev,
+ enum pp_clock_type type,
+ uint32_t mask);
+int amdgpu_dpm_get_sclk_od(struct amdgpu_device *adev);
+int amdgpu_dpm_set_sclk_od(struct amdgpu_device *adev, uint32_t value);
+int amdgpu_dpm_get_mclk_od(struct amdgpu_device *adev);
+int amdgpu_dpm_set_mclk_od(struct amdgpu_device *adev, uint32_t value);
+int amdgpu_dpm_get_power_profile_mode(struct amdgpu_device *adev,
+ char *buf);
+int amdgpu_dpm_set_power_profile_mode(struct amdgpu_device *adev,
+ long *input, uint32_t size);
+int amdgpu_dpm_get_gpu_metrics(struct amdgpu_device *adev, void **table);
+int amdgpu_dpm_get_fan_control_mode(struct amdgpu_device *adev,
+ uint32_t *fan_mode);
+int amdgpu_dpm_set_fan_speed_pwm(struct amdgpu_device *adev,
+ uint32_t speed);
+int amdgpu_dpm_get_fan_speed_pwm(struct amdgpu_device *adev,
+ uint32_t *speed);
+int amdgpu_dpm_get_fan_speed_rpm(struct amdgpu_device *adev,
+ uint32_t *speed);
+int amdgpu_dpm_set_fan_speed_rpm(struct amdgpu_device *adev,
+ uint32_t speed);
+int amdgpu_dpm_set_fan_control_mode(struct amdgpu_device *adev,
+ uint32_t mode);
+int amdgpu_dpm_get_power_limit(struct amdgpu_device *adev,
+ uint32_t *limit,
+ enum pp_power_limit_level pp_limit_level,
+ enum pp_power_type power_type);
+int amdgpu_dpm_set_power_limit(struct amdgpu_device *adev,
+ uint32_t limit);
+int amdgpu_dpm_is_cclk_dpm_supported(struct amdgpu_device *adev);
+int amdgpu_dpm_debugfs_print_current_performance_level(struct amdgpu_device *adev,
+ struct seq_file *m);
+int amdgpu_dpm_get_smu_prv_buf_details(struct amdgpu_device *adev,
+ void **addr,
+ size_t *size);
+int amdgpu_dpm_is_overdrive_supported(struct amdgpu_device *adev);
+int amdgpu_dpm_set_pp_table(struct amdgpu_device *adev,
+ const char *buf,
+ size_t size);
+int amdgpu_dpm_get_num_cpu_cores(struct amdgpu_device *adev);
+void amdgpu_dpm_stb_debug_fs_init(struct amdgpu_device *adev);
+int amdgpu_dpm_display_configuration_change(struct amdgpu_device *adev,
+ const struct amd_pp_display_configuration *input);
+int amdgpu_dpm_get_clock_by_type(struct amdgpu_device *adev,
+ enum amd_pp_clock_type type,
+ struct amd_pp_clocks *clocks);
+int amdgpu_dpm_get_display_mode_validation_clks(struct amdgpu_device *adev,
+ struct amd_pp_simple_clock_info *clocks);
+int amdgpu_dpm_get_clock_by_type_with_latency(struct amdgpu_device *adev,
+ enum amd_pp_clock_type type,
+ struct pp_clock_levels_with_latency *clocks);
+int amdgpu_dpm_get_clock_by_type_with_voltage(struct amdgpu_device *adev,
+ enum amd_pp_clock_type type,
+ struct pp_clock_levels_with_voltage *clocks);
+int amdgpu_dpm_set_watermarks_for_clocks_ranges(struct amdgpu_device *adev,
+ void *clock_ranges);
+int amdgpu_dpm_display_clock_voltage_request(struct amdgpu_device *adev,
+ struct pp_display_clock_request *clock);
+int amdgpu_dpm_get_current_clocks(struct amdgpu_device *adev,
+ struct amd_pp_clock_info *clocks);
+void amdgpu_dpm_notify_smu_enable_pwe(struct amdgpu_device *adev);
+int amdgpu_dpm_set_active_display_count(struct amdgpu_device *adev,
+ uint32_t count);
+int amdgpu_dpm_set_min_deep_sleep_dcefclk(struct amdgpu_device *adev,
+ uint32_t clock);
+void amdgpu_dpm_set_hard_min_dcefclk_by_freq(struct amdgpu_device *adev,
+ uint32_t clock);
+void amdgpu_dpm_set_hard_min_fclk_by_freq(struct amdgpu_device *adev,
+ uint32_t clock);
+int amdgpu_dpm_display_disable_memory_clock_switch(struct amdgpu_device *adev,
+ bool disable_memory_clock_switch);
+int amdgpu_dpm_get_max_sustainable_clocks_by_dc(struct amdgpu_device *adev,
+ struct pp_smu_nv_clock_table *max_clocks);
+enum pp_smu_status amdgpu_dpm_get_uclk_dpm_states(struct amdgpu_device *adev,
+ unsigned int *clock_values_in_khz,
+ unsigned int *num_states);
+int amdgpu_dpm_get_dpm_clock_table(struct amdgpu_device *adev,
+ struct dpm_clocks *clock_table);
#endif
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h
new file mode 100644
index 000000000000..5c2a89f0d5d5
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/inc/amdgpu_dpm_internal.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_DPM_INTERNAL_H__
+#define __AMDGPU_DPM_INTERNAL_H__
+
+void amdgpu_dpm_get_active_displays(struct amdgpu_device *adev);
+
+u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev);
+
+u32 amdgpu_dpm_get_vrefresh(struct amdgpu_device *adev);
+
+#endif
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/Makefile b/drivers/gpu/drm/amd/pm/legacy-dpm/Makefile
new file mode 100644
index 000000000000..baa4265d1daa
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/Makefile
@@ -0,0 +1,32 @@
+#
+# Copyright 2021 Advanced Micro Devices, Inc.
+#
+# Permission is hereby granted, free of charge, to any person obtaining a
+# copy of this software and associated documentation files (the "Software"),
+# to deal in the Software without restriction, including without limitation
+# the rights to use, copy, modify, merge, publish, distribute, sublicense,
+# and/or sell copies of the Software, and to permit persons to whom the
+# Software is furnished to do so, subject to the following conditions:
+#
+# The above copyright notice and this permission notice shall be included in
+# all copies or substantial portions of the Software.
+#
+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+# THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+# OTHER DEALINGS IN THE SOFTWARE.
+#
+
+AMD_LEGACYDPM_PATH = ../pm/legacy-dpm
+
+LEGACYDPM_MGR-y = legacy_dpm.o
+
+LEGACYDPM_MGR-$(CONFIG_DRM_AMDGPU_CIK)+= kv_dpm.o kv_smc.o
+LEGACYDPM_MGR-$(CONFIG_DRM_AMDGPU_SI)+= si_dpm.o si_smc.o
+
+AMD_LEGACYDPM_POWER = $(addprefix $(AMD_LEGACYDPM_PATH)/,$(LEGACYDPM_MGR-y))
+
+AMD_POWERPLAY_FILES += $(AMD_LEGACYDPM_POWER)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/cik_dpm.h b/drivers/gpu/drm/amd/pm/legacy-dpm/cik_dpm.h
index 2fcc4b60153c..2fcc4b60153c 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/cik_dpm.h
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/cik_dpm.h
diff --git a/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
index bcae42cef374..8b23cc9f098a 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.c
@@ -36,6 +36,7 @@
#include "gca/gfx_7_2_d.h"
#include "gca/gfx_7_2_sh_mask.h"
+#include "legacy_dpm.h"
#define KV_MAX_DEEPSLEEP_DIVIDER_ID 5
#define KV_MINIMUM_ENGINE_CLOCK 800
@@ -1256,6 +1257,19 @@ static void kv_dpm_enable_bapm(void *handle, bool enable)
}
}
+static bool kv_is_internal_thermal_sensor(enum amdgpu_int_thermal_type sensor)
+{
+ switch (sensor) {
+ case THERMAL_TYPE_KV:
+ return true;
+ case THERMAL_TYPE_NONE:
+ case THERMAL_TYPE_EXTERNAL:
+ case THERMAL_TYPE_EXTERNAL_GPIO:
+ default:
+ return false;
+ }
+}
+
static int kv_dpm_enable(struct amdgpu_device *adev)
{
struct kv_power_info *pi = kv_get_pi(adev);
@@ -1352,7 +1366,7 @@ static int kv_dpm_enable(struct amdgpu_device *adev)
}
if (adev->irq.installed &&
- amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) {
+ kv_is_internal_thermal_sensor(adev->pm.int_thermal_type)) {
ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX);
if (ret) {
DRM_ERROR("kv_set_thermal_temperature_range failed\n");
@@ -3016,21 +3030,18 @@ static int kv_dpm_sw_init(void *handle)
return 0;
INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
- mutex_lock(&adev->pm.mutex);
ret = kv_dpm_init(adev);
if (ret)
goto dpm_failed;
adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
if (amdgpu_dpm == 1)
amdgpu_pm_print_power_states(adev);
- mutex_unlock(&adev->pm.mutex);
DRM_INFO("amdgpu: dpm initialized\n");
return 0;
dpm_failed:
kv_dpm_fini(adev);
- mutex_unlock(&adev->pm.mutex);
DRM_ERROR("amdgpu: dpm initialization failed\n");
return ret;
}
@@ -3041,9 +3052,7 @@ static int kv_dpm_sw_fini(void *handle)
flush_work(&adev->pm.dpm.thermal.work);
- mutex_lock(&adev->pm.mutex);
kv_dpm_fini(adev);
- mutex_unlock(&adev->pm.mutex);
return 0;
}
@@ -3056,15 +3065,13 @@ static int kv_dpm_hw_init(void *handle)
if (!amdgpu_dpm)
return 0;
- mutex_lock(&adev->pm.mutex);
kv_dpm_setup_asic(adev);
ret = kv_dpm_enable(adev);
if (ret)
adev->pm.dpm_enabled = false;
else
adev->pm.dpm_enabled = true;
- mutex_unlock(&adev->pm.mutex);
- amdgpu_pm_compute_clocks(adev);
+ amdgpu_legacy_dpm_compute_clocks(adev);
return ret;
}
@@ -3072,11 +3079,8 @@ static int kv_dpm_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->pm.dpm_enabled) {
- mutex_lock(&adev->pm.mutex);
+ if (adev->pm.dpm_enabled)
kv_dpm_disable(adev);
- mutex_unlock(&adev->pm.mutex);
- }
return 0;
}
@@ -3086,12 +3090,10 @@ static int kv_dpm_suspend(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->pm.dpm_enabled) {
- mutex_lock(&adev->pm.mutex);
/* disable dpm */
kv_dpm_disable(adev);
/* reset the power state */
adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
- mutex_unlock(&adev->pm.mutex);
}
return 0;
}
@@ -3103,16 +3105,14 @@ static int kv_dpm_resume(void *handle)
if (adev->pm.dpm_enabled) {
/* asic init will reset to the boot state */
- mutex_lock(&adev->pm.mutex);
kv_dpm_setup_asic(adev);
ret = kv_dpm_enable(adev);
if (ret)
adev->pm.dpm_enabled = false;
else
adev->pm.dpm_enabled = true;
- mutex_unlock(&adev->pm.mutex);
if (adev->pm.dpm_enabled)
- amdgpu_pm_compute_clocks(adev);
+ amdgpu_legacy_dpm_compute_clocks(adev);
}
return 0;
}
@@ -3366,6 +3366,7 @@ static const struct amd_pm_funcs kv_dpm_funcs = {
.get_vce_clock_state = amdgpu_get_vce_clock_state,
.check_state_equal = kv_check_state_equal,
.read_sensor = &kv_dpm_read_sensor,
+ .pm_compute_clocks = amdgpu_legacy_dpm_compute_clocks,
};
static const struct amdgpu_irq_src_funcs kv_dpm_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.h b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.h
index 6df0ed41317c..6df0ed41317c 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/kv_dpm.h
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_dpm.h
diff --git a/drivers/gpu/drm/amd/pm/powerplay/kv_smc.c b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_smc.c
index 2d9ab6b8be66..2d9ab6b8be66 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/kv_smc.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/kv_smc.c
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
new file mode 100644
index 000000000000..9613c6181c17
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.c
@@ -0,0 +1,1081 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include "amdgpu.h"
+#include "amdgpu_i2c.h"
+#include "amdgpu_atombios.h"
+#include "atom.h"
+#include "amd_pcie.h"
+#include "legacy_dpm.h"
+#include "amdgpu_dpm_internal.h"
+#include "amdgpu_display.h"
+
+#define amdgpu_dpm_pre_set_power_state(adev) \
+ ((adev)->powerplay.pp_funcs->pre_set_power_state((adev)->powerplay.pp_handle))
+
+#define amdgpu_dpm_post_set_power_state(adev) \
+ ((adev)->powerplay.pp_funcs->post_set_power_state((adev)->powerplay.pp_handle))
+
+#define amdgpu_dpm_display_configuration_changed(adev) \
+ ((adev)->powerplay.pp_funcs->display_configuration_changed((adev)->powerplay.pp_handle))
+
+#define amdgpu_dpm_print_power_state(adev, ps) \
+ ((adev)->powerplay.pp_funcs->print_power_state((adev)->powerplay.pp_handle, (ps)))
+
+#define amdgpu_dpm_vblank_too_short(adev) \
+ ((adev)->powerplay.pp_funcs->vblank_too_short((adev)->powerplay.pp_handle))
+
+#define amdgpu_dpm_check_state_equal(adev, cps, rps, equal) \
+ ((adev)->powerplay.pp_funcs->check_state_equal((adev)->powerplay.pp_handle, (cps), (rps), (equal)))
+
+void amdgpu_dpm_print_class_info(u32 class, u32 class2)
+{
+ const char *s;
+
+ switch (class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) {
+ case ATOM_PPLIB_CLASSIFICATION_UI_NONE:
+ default:
+ s = "none";
+ break;
+ case ATOM_PPLIB_CLASSIFICATION_UI_BATTERY:
+ s = "battery";
+ break;
+ case ATOM_PPLIB_CLASSIFICATION_UI_BALANCED:
+ s = "balanced";
+ break;
+ case ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE:
+ s = "performance";
+ break;
+ }
+ printk("\tui class: %s\n", s);
+ printk("\tinternal class:");
+ if (((class & ~ATOM_PPLIB_CLASSIFICATION_UI_MASK) == 0) &&
+ (class2 == 0))
+ pr_cont(" none");
+ else {
+ if (class & ATOM_PPLIB_CLASSIFICATION_BOOT)
+ pr_cont(" boot");
+ if (class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
+ pr_cont(" thermal");
+ if (class & ATOM_PPLIB_CLASSIFICATION_LIMITEDPOWERSOURCE)
+ pr_cont(" limited_pwr");
+ if (class & ATOM_PPLIB_CLASSIFICATION_REST)
+ pr_cont(" rest");
+ if (class & ATOM_PPLIB_CLASSIFICATION_FORCED)
+ pr_cont(" forced");
+ if (class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
+ pr_cont(" 3d_perf");
+ if (class & ATOM_PPLIB_CLASSIFICATION_OVERDRIVETEMPLATE)
+ pr_cont(" ovrdrv");
+ if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
+ pr_cont(" uvd");
+ if (class & ATOM_PPLIB_CLASSIFICATION_3DLOW)
+ pr_cont(" 3d_low");
+ if (class & ATOM_PPLIB_CLASSIFICATION_ACPI)
+ pr_cont(" acpi");
+ if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
+ pr_cont(" uvd_hd2");
+ if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
+ pr_cont(" uvd_hd");
+ if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
+ pr_cont(" uvd_sd");
+ if (class2 & ATOM_PPLIB_CLASSIFICATION2_LIMITEDPOWERSOURCE_2)
+ pr_cont(" limited_pwr2");
+ if (class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
+ pr_cont(" ulv");
+ if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
+ pr_cont(" uvd_mvc");
+ }
+ pr_cont("\n");
+}
+
+void amdgpu_dpm_print_cap_info(u32 caps)
+{
+ printk("\tcaps:");
+ if (caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY)
+ pr_cont(" single_disp");
+ if (caps & ATOM_PPLIB_SUPPORTS_VIDEO_PLAYBACK)
+ pr_cont(" video");
+ if (caps & ATOM_PPLIB_DISALLOW_ON_DC)
+ pr_cont(" no_dc");
+ pr_cont("\n");
+}
+
+void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
+ struct amdgpu_ps *rps)
+{
+ printk("\tstatus:");
+ if (rps == adev->pm.dpm.current_ps)
+ pr_cont(" c");
+ if (rps == adev->pm.dpm.requested_ps)
+ pr_cont(" r");
+ if (rps == adev->pm.dpm.boot_ps)
+ pr_cont(" b");
+ pr_cont("\n");
+}
+
+void amdgpu_pm_print_power_states(struct amdgpu_device *adev)
+{
+ int i;
+
+ if (adev->powerplay.pp_funcs->print_power_state == NULL)
+ return;
+
+ for (i = 0; i < adev->pm.dpm.num_ps; i++)
+ amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]);
+
+}
+
+union power_info {
+ struct _ATOM_POWERPLAY_INFO info;
+ struct _ATOM_POWERPLAY_INFO_V2 info_2;
+ struct _ATOM_POWERPLAY_INFO_V3 info_3;
+ struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
+ struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
+ struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
+ struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
+ struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
+};
+
+int amdgpu_get_platform_caps(struct amdgpu_device *adev)
+{
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ union power_info *power_info;
+ int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+ u16 data_offset;
+ u8 frev, crev;
+
+ if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset))
+ return -EINVAL;
+ power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+ adev->pm.dpm.platform_caps = le32_to_cpu(power_info->pplib.ulPlatformCaps);
+ adev->pm.dpm.backbias_response_time = le16_to_cpu(power_info->pplib.usBackbiasTime);
+ adev->pm.dpm.voltage_response_time = le16_to_cpu(power_info->pplib.usVoltageTime);
+
+ return 0;
+}
+
+union fan_info {
+ struct _ATOM_PPLIB_FANTABLE fan;
+ struct _ATOM_PPLIB_FANTABLE2 fan2;
+ struct _ATOM_PPLIB_FANTABLE3 fan3;
+};
+
+static int amdgpu_parse_clk_voltage_dep_table(struct amdgpu_clock_voltage_dependency_table *amdgpu_table,
+ ATOM_PPLIB_Clock_Voltage_Dependency_Table *atom_table)
+{
+ u32 size = atom_table->ucNumEntries *
+ sizeof(struct amdgpu_clock_voltage_dependency_entry);
+ int i;
+ ATOM_PPLIB_Clock_Voltage_Dependency_Record *entry;
+
+ amdgpu_table->entries = kzalloc(size, GFP_KERNEL);
+ if (!amdgpu_table->entries)
+ return -ENOMEM;
+
+ entry = &atom_table->entries[0];
+ for (i = 0; i < atom_table->ucNumEntries; i++) {
+ amdgpu_table->entries[i].clk = le16_to_cpu(entry->usClockLow) |
+ (entry->ucClockHigh << 16);
+ amdgpu_table->entries[i].v = le16_to_cpu(entry->usVoltage);
+ entry = (ATOM_PPLIB_Clock_Voltage_Dependency_Record *)
+ ((u8 *)entry + sizeof(ATOM_PPLIB_Clock_Voltage_Dependency_Record));
+ }
+ amdgpu_table->count = atom_table->ucNumEntries;
+
+ return 0;
+}
+
+/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8 24
+#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V9 26
+
+int amdgpu_parse_extended_power_table(struct amdgpu_device *adev)
+{
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ union power_info *power_info;
+ union fan_info *fan_info;
+ ATOM_PPLIB_Clock_Voltage_Dependency_Table *dep_table;
+ int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+ u16 data_offset;
+ u8 frev, crev;
+ int ret, i;
+
+ if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset))
+ return -EINVAL;
+ power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
+
+ /* fan table */
+ if (le16_to_cpu(power_info->pplib.usTableSize) >=
+ sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
+ if (power_info->pplib3.usFanTableOffset) {
+ fan_info = (union fan_info *)(mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib3.usFanTableOffset));
+ adev->pm.dpm.fan.t_hyst = fan_info->fan.ucTHyst;
+ adev->pm.dpm.fan.t_min = le16_to_cpu(fan_info->fan.usTMin);
+ adev->pm.dpm.fan.t_med = le16_to_cpu(fan_info->fan.usTMed);
+ adev->pm.dpm.fan.t_high = le16_to_cpu(fan_info->fan.usTHigh);
+ adev->pm.dpm.fan.pwm_min = le16_to_cpu(fan_info->fan.usPWMMin);
+ adev->pm.dpm.fan.pwm_med = le16_to_cpu(fan_info->fan.usPWMMed);
+ adev->pm.dpm.fan.pwm_high = le16_to_cpu(fan_info->fan.usPWMHigh);
+ if (fan_info->fan.ucFanTableFormat >= 2)
+ adev->pm.dpm.fan.t_max = le16_to_cpu(fan_info->fan2.usTMax);
+ else
+ adev->pm.dpm.fan.t_max = 10900;
+ adev->pm.dpm.fan.cycle_delay = 100000;
+ if (fan_info->fan.ucFanTableFormat >= 3) {
+ adev->pm.dpm.fan.control_mode = fan_info->fan3.ucFanControlMode;
+ adev->pm.dpm.fan.default_max_fan_pwm =
+ le16_to_cpu(fan_info->fan3.usFanPWMMax);
+ adev->pm.dpm.fan.default_fan_output_sensitivity = 4836;
+ adev->pm.dpm.fan.fan_output_sensitivity =
+ le16_to_cpu(fan_info->fan3.usFanOutputSensitivity);
+ }
+ adev->pm.dpm.fan.ucode_fan_control = true;
+ }
+ }
+
+ /* clock dependancy tables, shedding tables */
+ if (le16_to_cpu(power_info->pplib.usTableSize) >=
+ sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE4)) {
+ if (power_info->pplib4.usVddcDependencyOnSCLKOffset) {
+ dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib4.usVddcDependencyOnSCLKOffset));
+ ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
+ dep_table);
+ if (ret) {
+ amdgpu_free_extended_power_table(adev);
+ return ret;
+ }
+ }
+ if (power_info->pplib4.usVddciDependencyOnMCLKOffset) {
+ dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib4.usVddciDependencyOnMCLKOffset));
+ ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
+ dep_table);
+ if (ret) {
+ amdgpu_free_extended_power_table(adev);
+ return ret;
+ }
+ }
+ if (power_info->pplib4.usVddcDependencyOnMCLKOffset) {
+ dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib4.usVddcDependencyOnMCLKOffset));
+ ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
+ dep_table);
+ if (ret) {
+ amdgpu_free_extended_power_table(adev);
+ return ret;
+ }
+ }
+ if (power_info->pplib4.usMvddDependencyOnMCLKOffset) {
+ dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib4.usMvddDependencyOnMCLKOffset));
+ ret = amdgpu_parse_clk_voltage_dep_table(&adev->pm.dpm.dyn_state.mvdd_dependency_on_mclk,
+ dep_table);
+ if (ret) {
+ amdgpu_free_extended_power_table(adev);
+ return ret;
+ }
+ }
+ if (power_info->pplib4.usMaxClockVoltageOnDCOffset) {
+ ATOM_PPLIB_Clock_Voltage_Limit_Table *clk_v =
+ (ATOM_PPLIB_Clock_Voltage_Limit_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib4.usMaxClockVoltageOnDCOffset));
+ if (clk_v->ucNumEntries) {
+ adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk =
+ le16_to_cpu(clk_v->entries[0].usSclkLow) |
+ (clk_v->entries[0].ucSclkHigh << 16);
+ adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk =
+ le16_to_cpu(clk_v->entries[0].usMclkLow) |
+ (clk_v->entries[0].ucMclkHigh << 16);
+ adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc =
+ le16_to_cpu(clk_v->entries[0].usVddc);
+ adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddci =
+ le16_to_cpu(clk_v->entries[0].usVddci);
+ }
+ }
+ if (power_info->pplib4.usVddcPhaseShedLimitsTableOffset) {
+ ATOM_PPLIB_PhaseSheddingLimits_Table *psl =
+ (ATOM_PPLIB_PhaseSheddingLimits_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib4.usVddcPhaseShedLimitsTableOffset));
+ ATOM_PPLIB_PhaseSheddingLimits_Record *entry;
+
+ adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries =
+ kcalloc(psl->ucNumEntries,
+ sizeof(struct amdgpu_phase_shedding_limits_entry),
+ GFP_KERNEL);
+ if (!adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries) {
+ amdgpu_free_extended_power_table(adev);
+ return -ENOMEM;
+ }
+
+ entry = &psl->entries[0];
+ for (i = 0; i < psl->ucNumEntries; i++) {
+ adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].sclk =
+ le16_to_cpu(entry->usSclkLow) | (entry->ucSclkHigh << 16);
+ adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].mclk =
+ le16_to_cpu(entry->usMclkLow) | (entry->ucMclkHigh << 16);
+ adev->pm.dpm.dyn_state.phase_shedding_limits_table.entries[i].voltage =
+ le16_to_cpu(entry->usVoltage);
+ entry = (ATOM_PPLIB_PhaseSheddingLimits_Record *)
+ ((u8 *)entry + sizeof(ATOM_PPLIB_PhaseSheddingLimits_Record));
+ }
+ adev->pm.dpm.dyn_state.phase_shedding_limits_table.count =
+ psl->ucNumEntries;
+ }
+ }
+
+ /* cac data */
+ if (le16_to_cpu(power_info->pplib.usTableSize) >=
+ sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE5)) {
+ adev->pm.dpm.tdp_limit = le32_to_cpu(power_info->pplib5.ulTDPLimit);
+ adev->pm.dpm.near_tdp_limit = le32_to_cpu(power_info->pplib5.ulNearTDPLimit);
+ adev->pm.dpm.near_tdp_limit_adjusted = adev->pm.dpm.near_tdp_limit;
+ adev->pm.dpm.tdp_od_limit = le16_to_cpu(power_info->pplib5.usTDPODLimit);
+ if (adev->pm.dpm.tdp_od_limit)
+ adev->pm.dpm.power_control = true;
+ else
+ adev->pm.dpm.power_control = false;
+ adev->pm.dpm.tdp_adjustment = 0;
+ adev->pm.dpm.sq_ramping_threshold = le32_to_cpu(power_info->pplib5.ulSQRampingThreshold);
+ adev->pm.dpm.cac_leakage = le32_to_cpu(power_info->pplib5.ulCACLeakage);
+ adev->pm.dpm.load_line_slope = le16_to_cpu(power_info->pplib5.usLoadLineSlope);
+ if (power_info->pplib5.usCACLeakageTableOffset) {
+ ATOM_PPLIB_CAC_Leakage_Table *cac_table =
+ (ATOM_PPLIB_CAC_Leakage_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib5.usCACLeakageTableOffset));
+ ATOM_PPLIB_CAC_Leakage_Record *entry;
+ u32 size = cac_table->ucNumEntries * sizeof(struct amdgpu_cac_leakage_table);
+ adev->pm.dpm.dyn_state.cac_leakage_table.entries = kzalloc(size, GFP_KERNEL);
+ if (!adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
+ amdgpu_free_extended_power_table(adev);
+ return -ENOMEM;
+ }
+ entry = &cac_table->entries[0];
+ for (i = 0; i < cac_table->ucNumEntries; i++) {
+ if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_EVV) {
+ adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc1 =
+ le16_to_cpu(entry->usVddc1);
+ adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc2 =
+ le16_to_cpu(entry->usVddc2);
+ adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc3 =
+ le16_to_cpu(entry->usVddc3);
+ } else {
+ adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].vddc =
+ le16_to_cpu(entry->usVddc);
+ adev->pm.dpm.dyn_state.cac_leakage_table.entries[i].leakage =
+ le32_to_cpu(entry->ulLeakageValue);
+ }
+ entry = (ATOM_PPLIB_CAC_Leakage_Record *)
+ ((u8 *)entry + sizeof(ATOM_PPLIB_CAC_Leakage_Record));
+ }
+ adev->pm.dpm.dyn_state.cac_leakage_table.count = cac_table->ucNumEntries;
+ }
+ }
+
+ /* ext tables */
+ if (le16_to_cpu(power_info->pplib.usTableSize) >=
+ sizeof(struct _ATOM_PPLIB_POWERPLAYTABLE3)) {
+ ATOM_PPLIB_EXTENDEDHEADER *ext_hdr = (ATOM_PPLIB_EXTENDEDHEADER *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(power_info->pplib3.usExtendendedHeaderOffset));
+ if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2) &&
+ ext_hdr->usVCETableOffset) {
+ VCEClockInfoArray *array = (VCEClockInfoArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usVCETableOffset) + 1);
+ ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *limits =
+ (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
+ 1 + array->ucNumEntries * sizeof(VCEClockInfo));
+ ATOM_PPLIB_VCE_State_Table *states =
+ (ATOM_PPLIB_VCE_State_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usVCETableOffset) + 1 +
+ 1 + (array->ucNumEntries * sizeof (VCEClockInfo)) +
+ 1 + (limits->numEntries * sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record)));
+ ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *entry;
+ ATOM_PPLIB_VCE_State_Record *state_entry;
+ VCEClockInfo *vce_clk;
+ u32 size = limits->numEntries *
+ sizeof(struct amdgpu_vce_clock_voltage_dependency_entry);
+ adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries =
+ kzalloc(size, GFP_KERNEL);
+ if (!adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries) {
+ amdgpu_free_extended_power_table(adev);
+ return -ENOMEM;
+ }
+ adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.count =
+ limits->numEntries;
+ entry = &limits->entries[0];
+ state_entry = &states->entries[0];
+ for (i = 0; i < limits->numEntries; i++) {
+ vce_clk = (VCEClockInfo *)
+ ((u8 *)&array->entries[0] +
+ (entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
+ adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].evclk =
+ le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
+ adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].ecclk =
+ le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
+ adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table.entries[i].v =
+ le16_to_cpu(entry->usVoltage);
+ entry = (ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record *)
+ ((u8 *)entry + sizeof(ATOM_PPLIB_VCE_Clock_Voltage_Limit_Record));
+ }
+ adev->pm.dpm.num_of_vce_states =
+ states->numEntries > AMD_MAX_VCE_LEVELS ?
+ AMD_MAX_VCE_LEVELS : states->numEntries;
+ for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
+ vce_clk = (VCEClockInfo *)
+ ((u8 *)&array->entries[0] +
+ (state_entry->ucVCEClockInfoIndex * sizeof(VCEClockInfo)));
+ adev->pm.dpm.vce_states[i].evclk =
+ le16_to_cpu(vce_clk->usEVClkLow) | (vce_clk->ucEVClkHigh << 16);
+ adev->pm.dpm.vce_states[i].ecclk =
+ le16_to_cpu(vce_clk->usECClkLow) | (vce_clk->ucECClkHigh << 16);
+ adev->pm.dpm.vce_states[i].clk_idx =
+ state_entry->ucClockInfoIndex & 0x3f;
+ adev->pm.dpm.vce_states[i].pstate =
+ (state_entry->ucClockInfoIndex & 0xc0) >> 6;
+ state_entry = (ATOM_PPLIB_VCE_State_Record *)
+ ((u8 *)state_entry + sizeof(ATOM_PPLIB_VCE_State_Record));
+ }
+ }
+ if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3) &&
+ ext_hdr->usUVDTableOffset) {
+ UVDClockInfoArray *array = (UVDClockInfoArray *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usUVDTableOffset) + 1);
+ ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *limits =
+ (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usUVDTableOffset) + 1 +
+ 1 + (array->ucNumEntries * sizeof (UVDClockInfo)));
+ ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *entry;
+ u32 size = limits->numEntries *
+ sizeof(struct amdgpu_uvd_clock_voltage_dependency_entry);
+ adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries =
+ kzalloc(size, GFP_KERNEL);
+ if (!adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries) {
+ amdgpu_free_extended_power_table(adev);
+ return -ENOMEM;
+ }
+ adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.count =
+ limits->numEntries;
+ entry = &limits->entries[0];
+ for (i = 0; i < limits->numEntries; i++) {
+ UVDClockInfo *uvd_clk = (UVDClockInfo *)
+ ((u8 *)&array->entries[0] +
+ (entry->ucUVDClockInfoIndex * sizeof(UVDClockInfo)));
+ adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].vclk =
+ le16_to_cpu(uvd_clk->usVClkLow) | (uvd_clk->ucVClkHigh << 16);
+ adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].dclk =
+ le16_to_cpu(uvd_clk->usDClkLow) | (uvd_clk->ucDClkHigh << 16);
+ adev->pm.dpm.dyn_state.uvd_clock_voltage_dependency_table.entries[i].v =
+ le16_to_cpu(entry->usVoltage);
+ entry = (ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record *)
+ ((u8 *)entry + sizeof(ATOM_PPLIB_UVD_Clock_Voltage_Limit_Record));
+ }
+ }
+ if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4) &&
+ ext_hdr->usSAMUTableOffset) {
+ ATOM_PPLIB_SAMClk_Voltage_Limit_Table *limits =
+ (ATOM_PPLIB_SAMClk_Voltage_Limit_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usSAMUTableOffset) + 1);
+ ATOM_PPLIB_SAMClk_Voltage_Limit_Record *entry;
+ u32 size = limits->numEntries *
+ sizeof(struct amdgpu_clock_voltage_dependency_entry);
+ adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries =
+ kzalloc(size, GFP_KERNEL);
+ if (!adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries) {
+ amdgpu_free_extended_power_table(adev);
+ return -ENOMEM;
+ }
+ adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.count =
+ limits->numEntries;
+ entry = &limits->entries[0];
+ for (i = 0; i < limits->numEntries; i++) {
+ adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].clk =
+ le16_to_cpu(entry->usSAMClockLow) | (entry->ucSAMClockHigh << 16);
+ adev->pm.dpm.dyn_state.samu_clock_voltage_dependency_table.entries[i].v =
+ le16_to_cpu(entry->usVoltage);
+ entry = (ATOM_PPLIB_SAMClk_Voltage_Limit_Record *)
+ ((u8 *)entry + sizeof(ATOM_PPLIB_SAMClk_Voltage_Limit_Record));
+ }
+ }
+ if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5) &&
+ ext_hdr->usPPMTableOffset) {
+ ATOM_PPLIB_PPM_Table *ppm = (ATOM_PPLIB_PPM_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usPPMTableOffset));
+ adev->pm.dpm.dyn_state.ppm_table =
+ kzalloc(sizeof(struct amdgpu_ppm_table), GFP_KERNEL);
+ if (!adev->pm.dpm.dyn_state.ppm_table) {
+ amdgpu_free_extended_power_table(adev);
+ return -ENOMEM;
+ }
+ adev->pm.dpm.dyn_state.ppm_table->ppm_design = ppm->ucPpmDesign;
+ adev->pm.dpm.dyn_state.ppm_table->cpu_core_number =
+ le16_to_cpu(ppm->usCpuCoreNumber);
+ adev->pm.dpm.dyn_state.ppm_table->platform_tdp =
+ le32_to_cpu(ppm->ulPlatformTDP);
+ adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdp =
+ le32_to_cpu(ppm->ulSmallACPlatformTDP);
+ adev->pm.dpm.dyn_state.ppm_table->platform_tdc =
+ le32_to_cpu(ppm->ulPlatformTDC);
+ adev->pm.dpm.dyn_state.ppm_table->small_ac_platform_tdc =
+ le32_to_cpu(ppm->ulSmallACPlatformTDC);
+ adev->pm.dpm.dyn_state.ppm_table->apu_tdp =
+ le32_to_cpu(ppm->ulApuTDP);
+ adev->pm.dpm.dyn_state.ppm_table->dgpu_tdp =
+ le32_to_cpu(ppm->ulDGpuTDP);
+ adev->pm.dpm.dyn_state.ppm_table->dgpu_ulv_power =
+ le32_to_cpu(ppm->ulDGpuUlvPower);
+ adev->pm.dpm.dyn_state.ppm_table->tj_max =
+ le32_to_cpu(ppm->ulTjmax);
+ }
+ if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6) &&
+ ext_hdr->usACPTableOffset) {
+ ATOM_PPLIB_ACPClk_Voltage_Limit_Table *limits =
+ (ATOM_PPLIB_ACPClk_Voltage_Limit_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usACPTableOffset) + 1);
+ ATOM_PPLIB_ACPClk_Voltage_Limit_Record *entry;
+ u32 size = limits->numEntries *
+ sizeof(struct amdgpu_clock_voltage_dependency_entry);
+ adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries =
+ kzalloc(size, GFP_KERNEL);
+ if (!adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries) {
+ amdgpu_free_extended_power_table(adev);
+ return -ENOMEM;
+ }
+ adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.count =
+ limits->numEntries;
+ entry = &limits->entries[0];
+ for (i = 0; i < limits->numEntries; i++) {
+ adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].clk =
+ le16_to_cpu(entry->usACPClockLow) | (entry->ucACPClockHigh << 16);
+ adev->pm.dpm.dyn_state.acp_clock_voltage_dependency_table.entries[i].v =
+ le16_to_cpu(entry->usVoltage);
+ entry = (ATOM_PPLIB_ACPClk_Voltage_Limit_Record *)
+ ((u8 *)entry + sizeof(ATOM_PPLIB_ACPClk_Voltage_Limit_Record));
+ }
+ }
+ if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7) &&
+ ext_hdr->usPowerTuneTableOffset) {
+ u8 rev = *(u8 *)(mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
+ ATOM_PowerTune_Table *pt;
+ adev->pm.dpm.dyn_state.cac_tdp_table =
+ kzalloc(sizeof(struct amdgpu_cac_tdp_table), GFP_KERNEL);
+ if (!adev->pm.dpm.dyn_state.cac_tdp_table) {
+ amdgpu_free_extended_power_table(adev);
+ return -ENOMEM;
+ }
+ if (rev > 0) {
+ ATOM_PPLIB_POWERTUNE_Table_V1 *ppt = (ATOM_PPLIB_POWERTUNE_Table_V1 *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
+ adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit =
+ ppt->usMaximumPowerDeliveryLimit;
+ pt = &ppt->power_tune_table;
+ } else {
+ ATOM_PPLIB_POWERTUNE_Table *ppt = (ATOM_PPLIB_POWERTUNE_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usPowerTuneTableOffset));
+ adev->pm.dpm.dyn_state.cac_tdp_table->maximum_power_delivery_limit = 255;
+ pt = &ppt->power_tune_table;
+ }
+ adev->pm.dpm.dyn_state.cac_tdp_table->tdp = le16_to_cpu(pt->usTDP);
+ adev->pm.dpm.dyn_state.cac_tdp_table->configurable_tdp =
+ le16_to_cpu(pt->usConfigurableTDP);
+ adev->pm.dpm.dyn_state.cac_tdp_table->tdc = le16_to_cpu(pt->usTDC);
+ adev->pm.dpm.dyn_state.cac_tdp_table->battery_power_limit =
+ le16_to_cpu(pt->usBatteryPowerLimit);
+ adev->pm.dpm.dyn_state.cac_tdp_table->small_power_limit =
+ le16_to_cpu(pt->usSmallPowerLimit);
+ adev->pm.dpm.dyn_state.cac_tdp_table->low_cac_leakage =
+ le16_to_cpu(pt->usLowCACLeakage);
+ adev->pm.dpm.dyn_state.cac_tdp_table->high_cac_leakage =
+ le16_to_cpu(pt->usHighCACLeakage);
+ }
+ if ((le16_to_cpu(ext_hdr->usSize) >= SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V8) &&
+ ext_hdr->usSclkVddgfxTableOffset) {
+ dep_table = (ATOM_PPLIB_Clock_Voltage_Dependency_Table *)
+ (mode_info->atom_context->bios + data_offset +
+ le16_to_cpu(ext_hdr->usSclkVddgfxTableOffset));
+ ret = amdgpu_parse_clk_voltage_dep_table(
+ &adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk,
+ dep_table);
+ if (ret) {
+ kfree(adev->pm.dpm.dyn_state.vddgfx_dependency_on_sclk.entries);
+ return ret;
+ }
+ }
+ }
+
+ return 0;
+}
+
+void amdgpu_free_extended_power_table(struct amdgpu_device *adev)
+{
+ struct amdgpu_dpm_dynamic_state *dyn_state = &adev->pm.dpm.dyn_state;
+
+ kfree(dyn_state->vddc_dependency_on_sclk.entries);
+ kfree(dyn_state->vddci_dependency_on_mclk.entries);
+ kfree(dyn_state->vddc_dependency_on_mclk.entries);
+ kfree(dyn_state->mvdd_dependency_on_mclk.entries);
+ kfree(dyn_state->cac_leakage_table.entries);
+ kfree(dyn_state->phase_shedding_limits_table.entries);
+ kfree(dyn_state->ppm_table);
+ kfree(dyn_state->cac_tdp_table);
+ kfree(dyn_state->vce_clock_voltage_dependency_table.entries);
+ kfree(dyn_state->uvd_clock_voltage_dependency_table.entries);
+ kfree(dyn_state->samu_clock_voltage_dependency_table.entries);
+ kfree(dyn_state->acp_clock_voltage_dependency_table.entries);
+ kfree(dyn_state->vddgfx_dependency_on_sclk.entries);
+}
+
+static const char *pp_lib_thermal_controller_names[] = {
+ "NONE",
+ "lm63",
+ "adm1032",
+ "adm1030",
+ "max6649",
+ "lm64",
+ "f75375",
+ "RV6xx",
+ "RV770",
+ "adt7473",
+ "NONE",
+ "External GPIO",
+ "Evergreen",
+ "emc2103",
+ "Sumo",
+ "Northern Islands",
+ "Southern Islands",
+ "lm96163",
+ "Sea Islands",
+ "Kaveri/Kabini",
+};
+
+void amdgpu_add_thermal_controller(struct amdgpu_device *adev)
+{
+ struct amdgpu_mode_info *mode_info = &adev->mode_info;
+ ATOM_PPLIB_POWERPLAYTABLE *power_table;
+ int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
+ ATOM_PPLIB_THERMALCONTROLLER *controller;
+ struct amdgpu_i2c_bus_rec i2c_bus;
+ u16 data_offset;
+ u8 frev, crev;
+
+ if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
+ &frev, &crev, &data_offset))
+ return;
+ power_table = (ATOM_PPLIB_POWERPLAYTABLE *)
+ (mode_info->atom_context->bios + data_offset);
+ controller = &power_table->sThermalController;
+
+ /* add the i2c bus for thermal/fan chip */
+ if (controller->ucType > 0) {
+ if (controller->ucFanParameters & ATOM_PP_FANPARAMETERS_NOFAN)
+ adev->pm.no_fan = true;
+ adev->pm.fan_pulses_per_revolution =
+ controller->ucFanParameters & ATOM_PP_FANPARAMETERS_TACHOMETER_PULSES_PER_REVOLUTION_MASK;
+ if (adev->pm.fan_pulses_per_revolution) {
+ adev->pm.fan_min_rpm = controller->ucFanMinRPM;
+ adev->pm.fan_max_rpm = controller->ucFanMaxRPM;
+ }
+ if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV6xx) {
+ DRM_INFO("Internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ adev->pm.int_thermal_type = THERMAL_TYPE_RV6XX;
+ } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_RV770) {
+ DRM_INFO("Internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ adev->pm.int_thermal_type = THERMAL_TYPE_RV770;
+ } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EVERGREEN) {
+ DRM_INFO("Internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ adev->pm.int_thermal_type = THERMAL_TYPE_EVERGREEN;
+ } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SUMO) {
+ DRM_INFO("Internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ adev->pm.int_thermal_type = THERMAL_TYPE_SUMO;
+ } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_NISLANDS) {
+ DRM_INFO("Internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ adev->pm.int_thermal_type = THERMAL_TYPE_NI;
+ } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_SISLANDS) {
+ DRM_INFO("Internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ adev->pm.int_thermal_type = THERMAL_TYPE_SI;
+ } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_CISLANDS) {
+ DRM_INFO("Internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ adev->pm.int_thermal_type = THERMAL_TYPE_CI;
+ } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_KAVERI) {
+ DRM_INFO("Internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ adev->pm.int_thermal_type = THERMAL_TYPE_KV;
+ } else if (controller->ucType == ATOM_PP_THERMALCONTROLLER_EXTERNAL_GPIO) {
+ DRM_INFO("External GPIO thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL_GPIO;
+ } else if (controller->ucType ==
+ ATOM_PP_THERMALCONTROLLER_ADT7473_WITH_INTERNAL) {
+ DRM_INFO("ADT7473 with internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ adev->pm.int_thermal_type = THERMAL_TYPE_ADT7473_WITH_INTERNAL;
+ } else if (controller->ucType ==
+ ATOM_PP_THERMALCONTROLLER_EMC2103_WITH_INTERNAL) {
+ DRM_INFO("EMC2103 with internal thermal controller %s fan control\n",
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ adev->pm.int_thermal_type = THERMAL_TYPE_EMC2103_WITH_INTERNAL;
+ } else if (controller->ucType < ARRAY_SIZE(pp_lib_thermal_controller_names)) {
+ DRM_INFO("Possible %s thermal controller at 0x%02x %s fan control\n",
+ pp_lib_thermal_controller_names[controller->ucType],
+ controller->ucI2cAddress >> 1,
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ adev->pm.int_thermal_type = THERMAL_TYPE_EXTERNAL;
+ i2c_bus = amdgpu_atombios_lookup_i2c_gpio(adev, controller->ucI2cLine);
+ adev->pm.i2c_bus = amdgpu_i2c_lookup(adev, &i2c_bus);
+ if (adev->pm.i2c_bus) {
+ struct i2c_board_info info = { };
+ const char *name = pp_lib_thermal_controller_names[controller->ucType];
+ info.addr = controller->ucI2cAddress >> 1;
+ strlcpy(info.type, name, sizeof(info.type));
+ i2c_new_client_device(&adev->pm.i2c_bus->adapter, &info);
+ }
+ } else {
+ DRM_INFO("Unknown thermal controller type %d at 0x%02x %s fan control\n",
+ controller->ucType,
+ controller->ucI2cAddress >> 1,
+ (controller->ucFanParameters &
+ ATOM_PP_FANPARAMETERS_NOFAN) ? "without" : "with");
+ }
+ }
+}
+
+struct amd_vce_state* amdgpu_get_vce_clock_state(void *handle, u32 idx)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (idx < adev->pm.dpm.num_of_vce_states)
+ return &adev->pm.dpm.vce_states[idx];
+
+ return NULL;
+}
+
+static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev,
+ enum amd_pm_state_type dpm_state)
+{
+ int i;
+ struct amdgpu_ps *ps;
+ u32 ui_class;
+ bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ?
+ true : false;
+
+ /* check if the vblank period is too short to adjust the mclk */
+ if (single_display && adev->powerplay.pp_funcs->vblank_too_short) {
+ if (amdgpu_dpm_vblank_too_short(adev))
+ single_display = false;
+ }
+
+ /* certain older asics have a separare 3D performance state,
+ * so try that first if the user selected performance
+ */
+ if (dpm_state == POWER_STATE_TYPE_PERFORMANCE)
+ dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF;
+ /* balanced states don't exist at the moment */
+ if (dpm_state == POWER_STATE_TYPE_BALANCED)
+ dpm_state = POWER_STATE_TYPE_PERFORMANCE;
+
+restart_search:
+ /* Pick the best power state based on current conditions */
+ for (i = 0; i < adev->pm.dpm.num_ps; i++) {
+ ps = &adev->pm.dpm.ps[i];
+ ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK;
+ switch (dpm_state) {
+ /* user states */
+ case POWER_STATE_TYPE_BATTERY:
+ if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) {
+ if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
+ if (single_display)
+ return ps;
+ } else
+ return ps;
+ }
+ break;
+ case POWER_STATE_TYPE_BALANCED:
+ if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) {
+ if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
+ if (single_display)
+ return ps;
+ } else
+ return ps;
+ }
+ break;
+ case POWER_STATE_TYPE_PERFORMANCE:
+ if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
+ if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) {
+ if (single_display)
+ return ps;
+ } else
+ return ps;
+ }
+ break;
+ /* internal states */
+ case POWER_STATE_TYPE_INTERNAL_UVD:
+ if (adev->pm.dpm.uvd_ps)
+ return adev->pm.dpm.uvd_ps;
+ else
+ break;
+ case POWER_STATE_TYPE_INTERNAL_UVD_SD:
+ if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
+ return ps;
+ break;
+ case POWER_STATE_TYPE_INTERNAL_UVD_HD:
+ if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
+ return ps;
+ break;
+ case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
+ if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
+ return ps;
+ break;
+ case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
+ if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
+ return ps;
+ break;
+ case POWER_STATE_TYPE_INTERNAL_BOOT:
+ return adev->pm.dpm.boot_ps;
+ case POWER_STATE_TYPE_INTERNAL_THERMAL:
+ if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL)
+ return ps;
+ break;
+ case POWER_STATE_TYPE_INTERNAL_ACPI:
+ if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI)
+ return ps;
+ break;
+ case POWER_STATE_TYPE_INTERNAL_ULV:
+ if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV)
+ return ps;
+ break;
+ case POWER_STATE_TYPE_INTERNAL_3DPERF:
+ if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE)
+ return ps;
+ break;
+ default:
+ break;
+ }
+ }
+ /* use a fallback state if we didn't match */
+ switch (dpm_state) {
+ case POWER_STATE_TYPE_INTERNAL_UVD_SD:
+ dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD;
+ goto restart_search;
+ case POWER_STATE_TYPE_INTERNAL_UVD_HD:
+ case POWER_STATE_TYPE_INTERNAL_UVD_HD2:
+ case POWER_STATE_TYPE_INTERNAL_UVD_MVC:
+ if (adev->pm.dpm.uvd_ps) {
+ return adev->pm.dpm.uvd_ps;
+ } else {
+ dpm_state = POWER_STATE_TYPE_PERFORMANCE;
+ goto restart_search;
+ }
+ case POWER_STATE_TYPE_INTERNAL_THERMAL:
+ dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI;
+ goto restart_search;
+ case POWER_STATE_TYPE_INTERNAL_ACPI:
+ dpm_state = POWER_STATE_TYPE_BATTERY;
+ goto restart_search;
+ case POWER_STATE_TYPE_BATTERY:
+ case POWER_STATE_TYPE_BALANCED:
+ case POWER_STATE_TYPE_INTERNAL_3DPERF:
+ dpm_state = POWER_STATE_TYPE_PERFORMANCE;
+ goto restart_search;
+ default:
+ break;
+ }
+
+ return NULL;
+}
+
+static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
+{
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ struct amdgpu_ps *ps;
+ enum amd_pm_state_type dpm_state;
+ int ret;
+ bool equal = false;
+
+ /* if dpm init failed */
+ if (!adev->pm.dpm_enabled)
+ return 0;
+
+ if (adev->pm.dpm.user_state != adev->pm.dpm.state) {
+ /* add other state override checks here */
+ if ((!adev->pm.dpm.thermal_active) &&
+ (!adev->pm.dpm.uvd_active))
+ adev->pm.dpm.state = adev->pm.dpm.user_state;
+ }
+ dpm_state = adev->pm.dpm.state;
+
+ ps = amdgpu_dpm_pick_power_state(adev, dpm_state);
+ if (ps)
+ adev->pm.dpm.requested_ps = ps;
+ else
+ return -EINVAL;
+
+ if (amdgpu_dpm == 1 && pp_funcs->print_power_state) {
+ printk("switching from power state:\n");
+ amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps);
+ printk("switching to power state:\n");
+ amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps);
+ }
+
+ /* update whether vce is active */
+ ps->vce_active = adev->pm.dpm.vce_active;
+ if (pp_funcs->display_configuration_changed)
+ amdgpu_dpm_display_configuration_changed(adev);
+
+ ret = amdgpu_dpm_pre_set_power_state(adev);
+ if (ret)
+ return ret;
+
+ if (pp_funcs->check_state_equal) {
+ if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal))
+ equal = false;
+ }
+
+ if (equal)
+ return 0;
+
+ if (pp_funcs->set_power_state)
+ pp_funcs->set_power_state(adev->powerplay.pp_handle);
+
+ amdgpu_dpm_post_set_power_state(adev);
+
+ adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
+ adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
+
+ if (pp_funcs->force_performance_level) {
+ if (adev->pm.dpm.thermal_active) {
+ enum amd_dpm_forced_level level = adev->pm.dpm.forced_level;
+ /* force low perf level for thermal */
+ pp_funcs->force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW);
+ /* save the user's level */
+ adev->pm.dpm.forced_level = level;
+ } else {
+ /* otherwise, user selected level */
+ pp_funcs->force_performance_level(adev, adev->pm.dpm.forced_level);
+ }
+ }
+
+ return 0;
+}
+
+void amdgpu_legacy_dpm_compute_clocks(void *handle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ int i = 0;
+
+ if (adev->mode_info.num_crtc)
+ amdgpu_display_bandwidth_update(adev);
+
+ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+ struct amdgpu_ring *ring = adev->rings[i];
+ if (ring && ring->sched.ready)
+ amdgpu_fence_wait_empty(ring);
+ }
+
+ amdgpu_dpm_get_active_displays(adev);
+
+ amdgpu_dpm_change_power_state_locked(adev);
+}
+
+void amdgpu_dpm_thermal_work_handler(struct work_struct *work)
+{
+ struct amdgpu_device *adev =
+ container_of(work, struct amdgpu_device,
+ pm.dpm.thermal.work);
+ const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ /* switch to the thermal state */
+ enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL;
+ int temp, size = sizeof(temp);
+
+ if (!adev->pm.dpm_enabled)
+ return;
+
+ if (!pp_funcs->read_sensor(adev->powerplay.pp_handle,
+ AMDGPU_PP_SENSOR_GPU_TEMP,
+ (void *)&temp,
+ &size)) {
+ if (temp < adev->pm.dpm.thermal.min_temp)
+ /* switch back the user state */
+ dpm_state = adev->pm.dpm.user_state;
+ } else {
+ if (adev->pm.dpm.thermal.high_to_low)
+ /* switch back the user state */
+ dpm_state = adev->pm.dpm.user_state;
+ }
+
+ if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL)
+ adev->pm.dpm.thermal_active = true;
+ else
+ adev->pm.dpm.thermal_active = false;
+
+ adev->pm.dpm.state = dpm_state;
+
+ amdgpu_legacy_dpm_compute_clocks(adev->powerplay.pp_handle);
+}
diff --git a/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h
new file mode 100644
index 000000000000..93bd3973330c
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/legacy_dpm.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright 2021 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __LEGACY_DPM_H__
+#define __LEGACY_DPM_H__
+
+void amdgpu_dpm_print_class_info(u32 class, u32 class2);
+void amdgpu_dpm_print_cap_info(u32 caps);
+void amdgpu_dpm_print_ps_status(struct amdgpu_device *adev,
+ struct amdgpu_ps *rps);
+int amdgpu_get_platform_caps(struct amdgpu_device *adev);
+int amdgpu_parse_extended_power_table(struct amdgpu_device *adev);
+void amdgpu_free_extended_power_table(struct amdgpu_device *adev);
+void amdgpu_add_thermal_controller(struct amdgpu_device *adev);
+struct amd_vce_state* amdgpu_get_vce_clock_state(void *handle, u32 idx);
+void amdgpu_pm_print_power_states(struct amdgpu_device *adev);
+void amdgpu_legacy_dpm_compute_clocks(void *handle);
+void amdgpu_dpm_thermal_work_handler(struct work_struct *work);
+#endif
diff --git a/drivers/gpu/drm/amd/pm/powerplay/ppsmc.h b/drivers/gpu/drm/amd/pm/legacy-dpm/ppsmc.h
index 8463245f424f..8463245f424f 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/ppsmc.h
diff --git a/drivers/gpu/drm/amd/pm/powerplay/r600_dpm.h b/drivers/gpu/drm/amd/pm/legacy-dpm/r600_dpm.h
index 055321f61ca7..055321f61ca7 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/r600_dpm.h
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/r600_dpm.h
diff --git a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
index 81f82aa05ec2..caae54487f9c 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.c
@@ -28,6 +28,7 @@
#include "amdgpu_pm.h"
#include "amdgpu_dpm.h"
#include "amdgpu_atombios.h"
+#include "amdgpu_dpm_internal.h"
#include "amd_pcie.h"
#include "sid.h"
#include "r600_dpm.h"
@@ -37,6 +38,7 @@
#include <linux/math64.h>
#include <linux/seq_file.h>
#include <linux/firmware.h>
+#include <legacy_dpm.h>
#define MC_CG_ARB_FREQ_F0 0x0a
#define MC_CG_ARB_FREQ_F1 0x0b
@@ -96,6 +98,19 @@ union pplib_clock_info {
struct _ATOM_PPLIB_SI_CLOCK_INFO si;
};
+enum si_dpm_auto_throttle_src {
+ SI_DPM_AUTO_THROTTLE_SRC_THERMAL,
+ SI_DPM_AUTO_THROTTLE_SRC_EXTERNAL
+};
+
+enum si_dpm_event_src {
+ SI_DPM_EVENT_SRC_ANALOG = 0,
+ SI_DPM_EVENT_SRC_EXTERNAL = 1,
+ SI_DPM_EVENT_SRC_DIGITAL = 2,
+ SI_DPM_EVENT_SRC_ANALOG_OR_EXTERNAL = 3,
+ SI_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL = 4
+};
+
static const u32 r600_utc[R600_PM_NUMBER_OF_TC] =
{
R600_UTC_DFLT_00,
@@ -3718,25 +3733,25 @@ static void si_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources)
{
struct rv7xx_power_info *pi = rv770_get_pi(adev);
bool want_thermal_protection;
- enum amdgpu_dpm_event_src dpm_event_src;
+ enum si_dpm_event_src dpm_event_src;
switch (sources) {
case 0:
default:
want_thermal_protection = false;
break;
- case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL):
+ case (1 << SI_DPM_AUTO_THROTTLE_SRC_THERMAL):
want_thermal_protection = true;
- dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGITAL;
+ dpm_event_src = SI_DPM_EVENT_SRC_DIGITAL;
break;
- case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
+ case (1 << SI_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
want_thermal_protection = true;
- dpm_event_src = AMDGPU_DPM_EVENT_SRC_EXTERNAL;
+ dpm_event_src = SI_DPM_EVENT_SRC_EXTERNAL;
break;
- case ((1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
- (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL)):
+ case ((1 << SI_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
+ (1 << SI_DPM_AUTO_THROTTLE_SRC_THERMAL)):
want_thermal_protection = true;
- dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
+ dpm_event_src = SI_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
break;
}
@@ -3750,7 +3765,7 @@ static void si_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources)
}
static void si_enable_auto_throttle_source(struct amdgpu_device *adev,
- enum amdgpu_dpm_auto_throttle_src source,
+ enum si_dpm_auto_throttle_src source,
bool enable)
{
struct rv7xx_power_info *pi = rv770_get_pi(adev);
@@ -3877,6 +3892,40 @@ static int si_set_boot_state(struct amdgpu_device *adev)
}
#endif
+static int si_set_powergating_by_smu(void *handle,
+ uint32_t block_type,
+ bool gate)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ switch (block_type) {
+ case AMD_IP_BLOCK_TYPE_UVD:
+ if (!gate) {
+ adev->pm.dpm.uvd_active = true;
+ adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
+ } else {
+ adev->pm.dpm.uvd_active = false;
+ }
+
+ amdgpu_legacy_dpm_compute_clocks(handle);
+ break;
+ case AMD_IP_BLOCK_TYPE_VCE:
+ if (!gate) {
+ adev->pm.dpm.vce_active = true;
+ /* XXX select vce level based on ring/task */
+ adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
+ } else {
+ adev->pm.dpm.vce_active = false;
+ }
+
+ amdgpu_legacy_dpm_compute_clocks(handle);
+ break;
+ default:
+ break;
+ }
+ return 0;
+}
+
static int si_set_sw_state(struct amdgpu_device *adev)
{
return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_SwitchToSwState) == PPSMC_Result_OK) ?
@@ -4927,6 +4976,31 @@ static int si_populate_smc_initial_state(struct amdgpu_device *adev,
return 0;
}
+static enum si_pcie_gen si_gen_pcie_gen_support(struct amdgpu_device *adev,
+ u32 sys_mask,
+ enum si_pcie_gen asic_gen,
+ enum si_pcie_gen default_gen)
+{
+ switch (asic_gen) {
+ case SI_PCIE_GEN1:
+ return SI_PCIE_GEN1;
+ case SI_PCIE_GEN2:
+ return SI_PCIE_GEN2;
+ case SI_PCIE_GEN3:
+ return SI_PCIE_GEN3;
+ default:
+ if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3) &&
+ (default_gen == SI_PCIE_GEN3))
+ return SI_PCIE_GEN3;
+ else if ((sys_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2) &&
+ (default_gen == SI_PCIE_GEN2))
+ return SI_PCIE_GEN2;
+ else
+ return SI_PCIE_GEN1;
+ }
+ return SI_PCIE_GEN1;
+}
+
static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
SISLANDS_SMC_STATETABLE *table)
{
@@ -4989,10 +5063,10 @@ static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
&table->ACPIState.level.std_vddc);
}
table->ACPIState.level.gen2PCIE =
- (u8)amdgpu_get_pcie_gen_support(adev,
- si_pi->sys_pcie_mask,
- si_pi->boot_pcie_gen,
- AMDGPU_PCIE_GEN1);
+ (u8)si_gen_pcie_gen_support(adev,
+ si_pi->sys_pcie_mask,
+ si_pi->boot_pcie_gen,
+ SI_PCIE_GEN1);
if (si_pi->vddc_phase_shed_control)
si_populate_phase_shedding_value(adev,
@@ -5430,7 +5504,7 @@ static int si_convert_power_level_to_smc(struct amdgpu_device *adev,
bool gmc_pg = false;
if (eg_pi->pcie_performance_request &&
- (si_pi->force_pcie_gen != AMDGPU_PCIE_GEN_INVALID))
+ (si_pi->force_pcie_gen != SI_PCIE_GEN_INVALID))
level->gen2PCIE = (u8)si_pi->force_pcie_gen;
else
level->gen2PCIE = (u8)pl->pcie_gen;
@@ -6147,8 +6221,8 @@ static void si_enable_voltage_control(struct amdgpu_device *adev, bool enable)
WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN);
}
-static enum amdgpu_pcie_gen si_get_maximum_link_speed(struct amdgpu_device *adev,
- struct amdgpu_ps *amdgpu_state)
+static enum si_pcie_gen si_get_maximum_link_speed(struct amdgpu_device *adev,
+ struct amdgpu_ps *amdgpu_state)
{
struct si_ps *state = si_get_ps(amdgpu_state);
int i;
@@ -6177,27 +6251,27 @@ static void si_request_link_speed_change_before_state_change(struct amdgpu_devic
struct amdgpu_ps *amdgpu_current_state)
{
struct si_power_info *si_pi = si_get_pi(adev);
- enum amdgpu_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state);
- enum amdgpu_pcie_gen current_link_speed;
+ enum si_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state);
+ enum si_pcie_gen current_link_speed;
- if (si_pi->force_pcie_gen == AMDGPU_PCIE_GEN_INVALID)
+ if (si_pi->force_pcie_gen == SI_PCIE_GEN_INVALID)
current_link_speed = si_get_maximum_link_speed(adev, amdgpu_current_state);
else
current_link_speed = si_pi->force_pcie_gen;
- si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
+ si_pi->force_pcie_gen = SI_PCIE_GEN_INVALID;
si_pi->pspp_notify_required = false;
if (target_link_speed > current_link_speed) {
switch (target_link_speed) {
#if defined(CONFIG_ACPI)
- case AMDGPU_PCIE_GEN3:
+ case SI_PCIE_GEN3:
if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
break;
- si_pi->force_pcie_gen = AMDGPU_PCIE_GEN2;
- if (current_link_speed == AMDGPU_PCIE_GEN2)
+ si_pi->force_pcie_gen = SI_PCIE_GEN2;
+ if (current_link_speed == SI_PCIE_GEN2)
break;
fallthrough;
- case AMDGPU_PCIE_GEN2:
+ case SI_PCIE_GEN2:
if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
break;
fallthrough;
@@ -6217,13 +6291,13 @@ static void si_notify_link_speed_change_after_state_change(struct amdgpu_device
struct amdgpu_ps *amdgpu_current_state)
{
struct si_power_info *si_pi = si_get_pi(adev);
- enum amdgpu_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state);
+ enum si_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state);
u8 request;
if (si_pi->pspp_notify_required) {
- if (target_link_speed == AMDGPU_PCIE_GEN3)
+ if (target_link_speed == SI_PCIE_GEN3)
request = PCIE_PERF_REQ_PECI_GEN3;
- else if (target_link_speed == AMDGPU_PCIE_GEN2)
+ else if (target_link_speed == SI_PCIE_GEN2)
request = PCIE_PERF_REQ_PECI_GEN2;
else
request = PCIE_PERF_REQ_PECI_GEN1;
@@ -6546,6 +6620,9 @@ static int si_dpm_get_fan_speed_pwm(void *handle,
u64 tmp64;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (!speed)
+ return -EINVAL;
+
if (adev->pm.no_fan)
return -ENOENT;
@@ -6596,10 +6673,13 @@ static int si_dpm_set_fan_speed_pwm(void *handle,
return 0;
}
-static void si_dpm_set_fan_control_mode(void *handle, u32 mode)
+static int si_dpm_set_fan_control_mode(void *handle, u32 mode)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (mode == U32_MAX)
+ return -EINVAL;
+
if (mode) {
/* stop auto-manage */
if (adev->pm.dpm.fan.ucode_fan_control)
@@ -6612,19 +6692,26 @@ static void si_dpm_set_fan_control_mode(void *handle, u32 mode)
else
si_fan_ctrl_set_default_mode(adev);
}
+
+ return 0;
}
-static u32 si_dpm_get_fan_control_mode(void *handle)
+static int si_dpm_get_fan_control_mode(void *handle, u32 *fan_mode)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct si_power_info *si_pi = si_get_pi(adev);
u32 tmp;
+ if (!fan_mode)
+ return -EINVAL;
+
if (si_pi->fan_is_controlled_by_smc)
return 0;
tmp = RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK;
- return (tmp >> FDO_PWM_MODE_SHIFT);
+ *fan_mode = (tmp >> FDO_PWM_MODE_SHIFT);
+
+ return 0;
}
#if 0
@@ -6864,7 +6951,7 @@ static int si_dpm_enable(struct amdgpu_device *adev)
si_enable_sclk_control(adev, true);
si_start_dpm(adev);
- si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
+ si_enable_auto_throttle_source(adev, SI_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
si_thermal_start_thermal_controller(adev);
ni_update_current_ps(adev, boot_ps);
@@ -6904,7 +6991,7 @@ static void si_dpm_disable(struct amdgpu_device *adev)
si_enable_power_containment(adev, boot_ps, false);
si_enable_smc_cac(adev, boot_ps, false);
si_enable_spread_spectrum(adev, false);
- si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
+ si_enable_auto_throttle_source(adev, SI_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
si_stop_dpm(adev);
si_reset_to_default(adev);
si_dpm_stop_smc(adev);
@@ -6946,10 +7033,7 @@ static int si_power_control_set_level(struct amdgpu_device *adev)
ret = si_resume_smc(adev);
if (ret)
return ret;
- ret = si_set_sw_state(adev);
- if (ret)
- return ret;
- return 0;
+ return si_set_sw_state(adev);
}
static void si_set_vce_clock(struct amdgpu_device *adev,
@@ -7148,10 +7232,10 @@ static void si_parse_pplib_clock_info(struct amdgpu_device *adev,
pl->vddc = le16_to_cpu(clock_info->si.usVDDC);
pl->vddci = le16_to_cpu(clock_info->si.usVDDCI);
pl->flags = le32_to_cpu(clock_info->si.ulFlags);
- pl->pcie_gen = amdgpu_get_pcie_gen_support(adev,
- si_pi->sys_pcie_mask,
- si_pi->boot_pcie_gen,
- clock_info->si.ucPCIEGen);
+ pl->pcie_gen = si_gen_pcie_gen_support(adev,
+ si_pi->sys_pcie_mask,
+ si_pi->boot_pcie_gen,
+ clock_info->si.ucPCIEGen);
/* patch up vddc if necessary */
ret = si_get_leakage_voltage_from_leakage_index(adev, pl->vddc,
@@ -7318,7 +7402,7 @@ static int si_dpm_init(struct amdgpu_device *adev)
si_pi->sys_pcie_mask =
adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK;
- si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
+ si_pi->force_pcie_gen = SI_PCIE_GEN_INVALID;
si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev);
si_set_max_cu_value(adev);
@@ -7713,21 +7797,18 @@ static int si_dpm_sw_init(void *handle)
return ret;
INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
- mutex_lock(&adev->pm.mutex);
ret = si_dpm_init(adev);
if (ret)
goto dpm_failed;
adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
if (amdgpu_dpm == 1)
amdgpu_pm_print_power_states(adev);
- mutex_unlock(&adev->pm.mutex);
DRM_INFO("amdgpu: dpm initialized\n");
return 0;
dpm_failed:
si_dpm_fini(adev);
- mutex_unlock(&adev->pm.mutex);
DRM_ERROR("amdgpu: dpm initialization failed\n");
return ret;
}
@@ -7738,9 +7819,7 @@ static int si_dpm_sw_fini(void *handle)
flush_work(&adev->pm.dpm.thermal.work);
- mutex_lock(&adev->pm.mutex);
si_dpm_fini(adev);
- mutex_unlock(&adev->pm.mutex);
return 0;
}
@@ -7754,15 +7833,13 @@ static int si_dpm_hw_init(void *handle)
if (!amdgpu_dpm)
return 0;
- mutex_lock(&adev->pm.mutex);
si_dpm_setup_asic(adev);
ret = si_dpm_enable(adev);
if (ret)
adev->pm.dpm_enabled = false;
else
adev->pm.dpm_enabled = true;
- mutex_unlock(&adev->pm.mutex);
- amdgpu_pm_compute_clocks(adev);
+ amdgpu_legacy_dpm_compute_clocks(adev);
return ret;
}
@@ -7770,11 +7847,8 @@ static int si_dpm_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->pm.dpm_enabled) {
- mutex_lock(&adev->pm.mutex);
+ if (adev->pm.dpm_enabled)
si_dpm_disable(adev);
- mutex_unlock(&adev->pm.mutex);
- }
return 0;
}
@@ -7784,12 +7858,10 @@ static int si_dpm_suspend(void *handle)
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
if (adev->pm.dpm_enabled) {
- mutex_lock(&adev->pm.mutex);
/* disable dpm */
si_dpm_disable(adev);
/* reset the power state */
adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
- mutex_unlock(&adev->pm.mutex);
}
return 0;
}
@@ -7801,16 +7873,14 @@ static int si_dpm_resume(void *handle)
if (adev->pm.dpm_enabled) {
/* asic init will reset to the boot state */
- mutex_lock(&adev->pm.mutex);
si_dpm_setup_asic(adev);
ret = si_dpm_enable(adev);
if (ret)
adev->pm.dpm_enabled = false;
else
adev->pm.dpm_enabled = true;
- mutex_unlock(&adev->pm.mutex);
if (adev->pm.dpm_enabled)
- amdgpu_pm_compute_clocks(adev);
+ amdgpu_legacy_dpm_compute_clocks(adev);
}
return 0;
}
@@ -8055,6 +8125,7 @@ static const struct amd_pm_funcs si_dpm_funcs = {
.print_power_state = &si_dpm_print_power_state,
.debugfs_print_current_performance_level = &si_dpm_debugfs_print_current_performance_level,
.force_performance_level = &si_dpm_force_performance_level,
+ .set_powergating_by_smu = &si_set_powergating_by_smu,
.vblank_too_short = &si_dpm_vblank_too_short,
.set_fan_control_mode = &si_dpm_set_fan_control_mode,
.get_fan_control_mode = &si_dpm_get_fan_control_mode,
@@ -8063,6 +8134,7 @@ static const struct amd_pm_funcs si_dpm_funcs = {
.check_state_equal = &si_check_state_equal,
.get_vce_clock_state = amdgpu_get_vce_clock_state,
.read_sensor = &si_dpm_read_sensor,
+ .pm_compute_clocks = amdgpu_legacy_dpm_compute_clocks,
};
static const struct amdgpu_irq_src_funcs si_dpm_irq_funcs = {
diff --git a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.h b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.h
index bc0be6818e21..11cb7874a6bb 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/si_dpm.h
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_dpm.h
@@ -595,13 +595,20 @@ struct rv7xx_power_info {
RV770_SMC_STATETABLE smc_statetable;
};
+enum si_pcie_gen {
+ SI_PCIE_GEN1 = 0,
+ SI_PCIE_GEN2 = 1,
+ SI_PCIE_GEN3 = 2,
+ SI_PCIE_GEN_INVALID = 0xffff
+};
+
struct rv7xx_pl {
u32 sclk;
u32 mclk;
u16 vddc;
u16 vddci; /* eg+ only */
u32 flags;
- enum amdgpu_pcie_gen pcie_gen; /* si+ only */
+ enum si_pcie_gen pcie_gen; /* si+ only */
};
struct rv7xx_ps {
@@ -967,9 +974,9 @@ struct si_power_info {
struct si_ulv_param ulv;
u32 max_cu;
/* pcie gen */
- enum amdgpu_pcie_gen force_pcie_gen;
- enum amdgpu_pcie_gen boot_pcie_gen;
- enum amdgpu_pcie_gen acpi_pcie_gen;
+ enum si_pcie_gen force_pcie_gen;
+ enum si_pcie_gen boot_pcie_gen;
+ enum si_pcie_gen acpi_pcie_gen;
u32 sys_pcie_mask;
/* flags */
bool enable_dte;
diff --git a/drivers/gpu/drm/amd/pm/powerplay/si_smc.c b/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c
index 8f994ffa9cd1..8f994ffa9cd1 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/si_smc.c
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/si_smc.c
diff --git a/drivers/gpu/drm/amd/pm/powerplay/sislands_smc.h b/drivers/gpu/drm/amd/pm/legacy-dpm/sislands_smc.h
index c7dc117a688c..c7dc117a688c 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/sislands_smc.h
+++ b/drivers/gpu/drm/amd/pm/legacy-dpm/sislands_smc.h
diff --git a/drivers/gpu/drm/amd/pm/powerplay/Makefile b/drivers/gpu/drm/amd/pm/powerplay/Makefile
index 0fb114adc79f..795a3624cbbf 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/Makefile
+++ b/drivers/gpu/drm/amd/pm/powerplay/Makefile
@@ -30,10 +30,6 @@ include $(AMD_POWERPLAY)
POWER_MGR-y = amd_powerplay.o
-POWER_MGR-$(CONFIG_DRM_AMDGPU_CIK)+= kv_dpm.o kv_smc.o
-
-POWER_MGR-$(CONFIG_DRM_AMDGPU_SI)+= si_dpm.o si_smc.o
-
AMD_PP_POWER = $(addprefix $(AMD_PP_PATH)/,$(POWER_MGR-y))
AMD_POWERPLAY_FILES += $(AMD_PP_POWER)
diff --git a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
index 3ab67b232cd4..a2da46bf3985 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/amd_powerplay.c
@@ -31,7 +31,8 @@
#include "power_state.h"
#include "amdgpu.h"
#include "hwmgr.h"
-
+#include "amdgpu_dpm_internal.h"
+#include "amdgpu_display.h"
static const struct amd_pm_funcs pp_dpm_funcs;
@@ -49,7 +50,6 @@ static int amd_powerplay_create(struct amdgpu_device *adev)
hwmgr->adev = adev;
hwmgr->not_vf = !amdgpu_sriov_vf(adev);
hwmgr->device = amdgpu_cgs_create_device(adev);
- mutex_init(&hwmgr->smu_lock);
mutex_init(&hwmgr->msg_lock);
hwmgr->chip_family = adev->family;
hwmgr->chip_id = adev->asic_type;
@@ -177,12 +177,9 @@ static int pp_late_init(void *handle)
struct amdgpu_device *adev = handle;
struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle;
- if (hwmgr && hwmgr->pm_en) {
- mutex_lock(&hwmgr->smu_lock);
+ if (hwmgr && hwmgr->pm_en)
hwmgr_handle_task(hwmgr,
AMD_PP_TASK_COMPLETE_INIT, NULL);
- mutex_unlock(&hwmgr->smu_lock);
- }
if (adev->pm.smu_prv_buffer_size != 0)
pp_reserve_vram_for_smu(adev);
@@ -322,12 +319,6 @@ static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr,
if (*level & profile_mode_mask) {
hwmgr->saved_dpm_level = hwmgr->dpm_level;
hwmgr->en_umd_pstate = true;
- amdgpu_device_ip_set_powergating_state(hwmgr->adev,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_PG_STATE_UNGATE);
- amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_CG_STATE_UNGATE);
}
} else {
/* exit umd pstate, restore level, enable gfx cg*/
@@ -335,12 +326,6 @@ static void pp_dpm_en_umd_pstate(struct pp_hwmgr *hwmgr,
if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
*level = hwmgr->saved_dpm_level;
hwmgr->en_umd_pstate = false;
- amdgpu_device_ip_set_clockgating_state(hwmgr->adev,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_CG_STATE_GATE);
- amdgpu_device_ip_set_powergating_state(hwmgr->adev,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_PG_STATE_GATE);
}
}
}
@@ -356,11 +341,9 @@ static int pp_dpm_force_performance_level(void *handle,
if (level == hwmgr->dpm_level)
return 0;
- mutex_lock(&hwmgr->smu_lock);
pp_dpm_en_umd_pstate(hwmgr, &level);
hwmgr->request_dpm_level = level;
hwmgr_handle_task(hwmgr, AMD_PP_TASK_READJUST_POWER_STATE, NULL);
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
@@ -369,21 +352,16 @@ static enum amd_dpm_forced_level pp_dpm_get_performance_level(
void *handle)
{
struct pp_hwmgr *hwmgr = handle;
- enum amd_dpm_forced_level level;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
- level = hwmgr->dpm_level;
- mutex_unlock(&hwmgr->smu_lock);
- return level;
+ return hwmgr->dpm_level;
}
static uint32_t pp_dpm_get_sclk(void *handle, bool low)
{
struct pp_hwmgr *hwmgr = handle;
- uint32_t clk = 0;
if (!hwmgr || !hwmgr->pm_en)
return 0;
@@ -392,16 +370,12 @@ static uint32_t pp_dpm_get_sclk(void *handle, bool low)
pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
- mutex_lock(&hwmgr->smu_lock);
- clk = hwmgr->hwmgr_func->get_sclk(hwmgr, low);
- mutex_unlock(&hwmgr->smu_lock);
- return clk;
+ return hwmgr->hwmgr_func->get_sclk(hwmgr, low);
}
static uint32_t pp_dpm_get_mclk(void *handle, bool low)
{
struct pp_hwmgr *hwmgr = handle;
- uint32_t clk = 0;
if (!hwmgr || !hwmgr->pm_en)
return 0;
@@ -410,10 +384,7 @@ static uint32_t pp_dpm_get_mclk(void *handle, bool low)
pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
- mutex_lock(&hwmgr->smu_lock);
- clk = hwmgr->hwmgr_func->get_mclk(hwmgr, low);
- mutex_unlock(&hwmgr->smu_lock);
- return clk;
+ return hwmgr->hwmgr_func->get_mclk(hwmgr, low);
}
static void pp_dpm_powergate_vce(void *handle, bool gate)
@@ -427,9 +398,7 @@ static void pp_dpm_powergate_vce(void *handle, bool gate)
pr_info_ratelimited("%s was not implemented.\n", __func__);
return;
}
- mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->powergate_vce(hwmgr, gate);
- mutex_unlock(&hwmgr->smu_lock);
}
static void pp_dpm_powergate_uvd(void *handle, bool gate)
@@ -443,25 +412,18 @@ static void pp_dpm_powergate_uvd(void *handle, bool gate)
pr_info_ratelimited("%s was not implemented.\n", __func__);
return;
}
- mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->powergate_uvd(hwmgr, gate);
- mutex_unlock(&hwmgr->smu_lock);
}
static int pp_dpm_dispatch_tasks(void *handle, enum amd_pp_task task_id,
enum amd_pm_state_type *user_state)
{
- int ret = 0;
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr_handle_task(hwmgr, task_id, user_state);
- mutex_unlock(&hwmgr->smu_lock);
-
- return ret;
+ return hwmgr_handle_task(hwmgr, task_id, user_state);
}
static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
@@ -473,8 +435,6 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
if (!hwmgr || !hwmgr->pm_en || !hwmgr->current_ps)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
-
state = hwmgr->current_ps;
switch (state->classification.ui_label) {
@@ -494,115 +454,107 @@ static enum amd_pm_state_type pp_dpm_get_current_power_state(void *handle)
pm_type = POWER_STATE_TYPE_DEFAULT;
break;
}
- mutex_unlock(&hwmgr->smu_lock);
return pm_type;
}
-static void pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
+static int pp_dpm_set_fan_control_mode(void *handle, uint32_t mode)
{
struct pp_hwmgr *hwmgr = handle;
if (!hwmgr || !hwmgr->pm_en)
- return;
+ return -EOPNOTSUPP;
+
+ if (hwmgr->hwmgr_func->set_fan_control_mode == NULL)
+ return -EOPNOTSUPP;
+
+ if (mode == U32_MAX)
+ return -EINVAL;
- if (hwmgr->hwmgr_func->set_fan_control_mode == NULL) {
- pr_info_ratelimited("%s was not implemented.\n", __func__);
- return;
- }
- mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->set_fan_control_mode(hwmgr, mode);
- mutex_unlock(&hwmgr->smu_lock);
+
+ return 0;
}
-static uint32_t pp_dpm_get_fan_control_mode(void *handle)
+static int pp_dpm_get_fan_control_mode(void *handle, uint32_t *fan_mode)
{
struct pp_hwmgr *hwmgr = handle;
- uint32_t mode = 0;
if (!hwmgr || !hwmgr->pm_en)
- return 0;
+ return -EOPNOTSUPP;
- if (hwmgr->hwmgr_func->get_fan_control_mode == NULL) {
- pr_info_ratelimited("%s was not implemented.\n", __func__);
- return 0;
- }
- mutex_lock(&hwmgr->smu_lock);
- mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
- mutex_unlock(&hwmgr->smu_lock);
- return mode;
+ if (hwmgr->hwmgr_func->get_fan_control_mode == NULL)
+ return -EOPNOTSUPP;
+
+ if (!fan_mode)
+ return -EINVAL;
+
+ *fan_mode = hwmgr->hwmgr_func->get_fan_control_mode(hwmgr);
+ return 0;
}
static int pp_dpm_set_fan_speed_pwm(void *handle, uint32_t speed)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
+ return -EOPNOTSUPP;
+
+ if (hwmgr->hwmgr_func->set_fan_speed_pwm == NULL)
+ return -EOPNOTSUPP;
+
+ if (speed == U32_MAX)
return -EINVAL;
- if (hwmgr->hwmgr_func->set_fan_speed_pwm == NULL) {
- pr_info_ratelimited("%s was not implemented.\n", __func__);
- return 0;
- }
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->set_fan_speed_pwm(hwmgr, speed);
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return hwmgr->hwmgr_func->set_fan_speed_pwm(hwmgr, speed);
}
static int pp_dpm_get_fan_speed_pwm(void *handle, uint32_t *speed)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
- return -EINVAL;
+ return -EOPNOTSUPP;
- if (hwmgr->hwmgr_func->get_fan_speed_pwm == NULL) {
- pr_info_ratelimited("%s was not implemented.\n", __func__);
- return 0;
- }
+ if (hwmgr->hwmgr_func->get_fan_speed_pwm == NULL)
+ return -EOPNOTSUPP;
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->get_fan_speed_pwm(hwmgr, speed);
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ if (!speed)
+ return -EINVAL;
+
+ return hwmgr->hwmgr_func->get_fan_speed_pwm(hwmgr, speed);
}
static int pp_dpm_get_fan_speed_rpm(void *handle, uint32_t *rpm)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
- return -EINVAL;
+ return -EOPNOTSUPP;
if (hwmgr->hwmgr_func->get_fan_speed_rpm == NULL)
+ return -EOPNOTSUPP;
+
+ if (!rpm)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return hwmgr->hwmgr_func->get_fan_speed_rpm(hwmgr, rpm);
}
static int pp_dpm_set_fan_speed_rpm(void *handle, uint32_t rpm)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
+ return -EOPNOTSUPP;
+
+ if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL)
+ return -EOPNOTSUPP;
+
+ if (rpm == U32_MAX)
return -EINVAL;
- if (hwmgr->hwmgr_func->set_fan_speed_rpm == NULL) {
- pr_info_ratelimited("%s was not implemented.\n", __func__);
- return 0;
- }
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->set_fan_speed_rpm(hwmgr, rpm);
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return hwmgr->hwmgr_func->set_fan_speed_rpm(hwmgr, rpm);
}
static int pp_dpm_get_pp_num_states(void *handle,
@@ -616,8 +568,6 @@ static int pp_dpm_get_pp_num_states(void *handle,
if (!hwmgr || !hwmgr->pm_en ||!hwmgr->ps)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
-
data->nums = hwmgr->num_ps;
for (i = 0; i < hwmgr->num_ps; i++) {
@@ -640,23 +590,18 @@ static int pp_dpm_get_pp_num_states(void *handle,
data->states[i] = POWER_STATE_TYPE_DEFAULT;
}
}
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
static int pp_dpm_get_pp_table(void *handle, char **table)
{
struct pp_hwmgr *hwmgr = handle;
- int size = 0;
if (!hwmgr || !hwmgr->pm_en ||!hwmgr->soft_pp_table)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
*table = (char *)hwmgr->soft_pp_table;
- size = hwmgr->soft_pp_table_size;
- mutex_unlock(&hwmgr->smu_lock);
- return size;
+ return hwmgr->soft_pp_table_size;
}
static int amd_powerplay_reset(void *handle)
@@ -683,13 +628,12 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
if (!hwmgr->hardcode_pp_table) {
hwmgr->hardcode_pp_table = kmemdup(hwmgr->soft_pp_table,
hwmgr->soft_pp_table_size,
GFP_KERNEL);
if (!hwmgr->hardcode_pp_table)
- goto err;
+ return ret;
}
memcpy(hwmgr->hardcode_pp_table, buf, size);
@@ -698,17 +642,11 @@ static int pp_dpm_set_pp_table(void *handle, const char *buf, size_t size)
ret = amd_powerplay_reset(handle);
if (ret)
- goto err;
+ return ret;
- if (hwmgr->hwmgr_func->avfs_control) {
+ if (hwmgr->hwmgr_func->avfs_control)
ret = hwmgr->hwmgr_func->avfs_control(hwmgr, false);
- if (ret)
- goto err;
- }
- mutex_unlock(&hwmgr->smu_lock);
- return 0;
-err:
- mutex_unlock(&hwmgr->smu_lock);
+
return ret;
}
@@ -716,7 +654,6 @@ static int pp_dpm_force_clock_level(void *handle,
enum pp_clock_type type, uint32_t mask)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
@@ -731,17 +668,13 @@ static int pp_dpm_force_clock_level(void *handle,
return -EINVAL;
}
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return hwmgr->hwmgr_func->force_clock_level(hwmgr, type, mask);
}
static int pp_dpm_print_clock_levels(void *handle,
enum pp_clock_type type, char *buf)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
@@ -750,16 +683,12 @@ static int pp_dpm_print_clock_levels(void *handle,
pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return hwmgr->hwmgr_func->print_clock_levels(hwmgr, type, buf);
}
static int pp_dpm_get_sclk_od(void *handle)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
@@ -768,16 +697,12 @@ static int pp_dpm_get_sclk_od(void *handle)
pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->get_sclk_od(hwmgr);
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return hwmgr->hwmgr_func->get_sclk_od(hwmgr);
}
static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
@@ -787,16 +712,12 @@ static int pp_dpm_set_sclk_od(void *handle, uint32_t value)
return 0;
}
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return hwmgr->hwmgr_func->set_sclk_od(hwmgr, value);
}
static int pp_dpm_get_mclk_od(void *handle)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
@@ -805,16 +726,12 @@ static int pp_dpm_get_mclk_od(void *handle)
pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->get_mclk_od(hwmgr);
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return hwmgr->hwmgr_func->get_mclk_od(hwmgr);
}
static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
@@ -823,17 +740,13 @@ static int pp_dpm_set_mclk_od(void *handle, uint32_t value)
pr_info_ratelimited("%s was not implemented.\n", __func__);
return 0;
}
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return hwmgr->hwmgr_func->set_mclk_od(hwmgr, value);
}
static int pp_dpm_read_sensor(void *handle, int idx,
void *value, int *size)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en || !value)
return -EINVAL;
@@ -852,10 +765,7 @@ static int pp_dpm_read_sensor(void *handle, int idx,
*((uint32_t *)value) = hwmgr->thermal_controller.fanInfo.ulMaxRPM;
return 0;
default:
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return hwmgr->hwmgr_func->read_sensor(hwmgr, idx, value, size);
}
}
@@ -875,36 +785,28 @@ pp_dpm_get_vce_clock_state(void *handle, unsigned idx)
static int pp_get_power_profile_mode(void *handle, char *buf)
{
struct pp_hwmgr *hwmgr = handle;
- int ret;
if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->get_power_profile_mode)
return -EOPNOTSUPP;
if (!buf)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return hwmgr->hwmgr_func->get_power_profile_mode(hwmgr, buf);
}
static int pp_set_power_profile_mode(void *handle, long *input, uint32_t size)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = -EOPNOTSUPP;
if (!hwmgr || !hwmgr->pm_en || !hwmgr->hwmgr_func->set_power_profile_mode)
- return ret;
+ return -EOPNOTSUPP;
if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
pr_debug("power profile setting is for manual dpm mode only.\n");
return -EINVAL;
}
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, input, size);
}
static int pp_set_fine_grain_clk_vol(void *handle, uint32_t type, long *input, uint32_t size)
@@ -969,8 +871,6 @@ static int pp_dpm_switch_power_profile(void *handle,
if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
-
if (!en) {
hwmgr->workload_mask &= ~(1 << hwmgr->workload_prority[type]);
index = fls(hwmgr->workload_mask);
@@ -985,15 +885,12 @@ static int pp_dpm_switch_power_profile(void *handle,
if (type == PP_SMC_POWER_PROFILE_COMPUTE &&
hwmgr->hwmgr_func->disable_power_features_for_compute_performance) {
- if (hwmgr->hwmgr_func->disable_power_features_for_compute_performance(hwmgr, en)) {
- mutex_unlock(&hwmgr->smu_lock);
+ if (hwmgr->hwmgr_func->disable_power_features_for_compute_performance(hwmgr, en))
return -EINVAL;
- }
}
if (hwmgr->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
hwmgr->hwmgr_func->set_power_profile_mode(hwmgr, &workload, 0);
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
@@ -1023,10 +920,8 @@ static int pp_set_power_limit(void *handle, uint32_t limit)
if (limit > max_power_limit)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->set_power_limit(hwmgr, limit);
hwmgr->power_limit = limit;
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
@@ -1043,8 +938,6 @@ static int pp_get_power_limit(void *handle, uint32_t *limit,
if (power_type != PP_PWR_TYPE_SUSTAINED)
return -EOPNOTSUPP;
- mutex_lock(&hwmgr->smu_lock);
-
switch (pp_limit_level) {
case PP_PWR_LIMIT_CURRENT:
*limit = hwmgr->power_limit;
@@ -1064,8 +957,6 @@ static int pp_get_power_limit(void *handle, uint32_t *limit,
break;
}
- mutex_unlock(&hwmgr->smu_lock);
-
return ret;
}
@@ -1077,9 +968,7 @@ static int pp_display_configuration_change(void *handle,
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
phm_store_dal_configuration_data(hwmgr, display_config);
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
@@ -1087,15 +976,11 @@ static int pp_get_display_power_level(void *handle,
struct amd_pp_simple_clock_info *output)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en ||!output)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
- ret = phm_get_dal_power_level(hwmgr, output);
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return phm_get_dal_power_level(hwmgr, output);
}
static int pp_get_current_clocks(void *handle,
@@ -1109,8 +994,6 @@ static int pp_get_current_clocks(void *handle,
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
-
phm_get_dal_power_level(hwmgr, &simple_clocks);
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps,
@@ -1123,7 +1006,6 @@ static int pp_get_current_clocks(void *handle,
if (ret) {
pr_debug("Error in phm_get_clock_info \n");
- mutex_unlock(&hwmgr->smu_lock);
return -EINVAL;
}
@@ -1146,14 +1028,12 @@ static int pp_get_current_clocks(void *handle,
clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
}
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struct amd_pp_clocks *clocks)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
@@ -1161,10 +1041,7 @@ static int pp_get_clock_by_type(void *handle, enum amd_pp_clock_type type, struc
if (clocks == NULL)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
- ret = phm_get_clock_by_type(hwmgr, type, clocks);
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return phm_get_clock_by_type(hwmgr, type, clocks);
}
static int pp_get_clock_by_type_with_latency(void *handle,
@@ -1172,15 +1049,11 @@ static int pp_get_clock_by_type_with_latency(void *handle,
struct pp_clock_levels_with_latency *clocks)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en ||!clocks)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
- ret = phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return phm_get_clock_by_type_with_latency(hwmgr, type, clocks);
}
static int pp_get_clock_by_type_with_voltage(void *handle,
@@ -1188,50 +1061,34 @@ static int pp_get_clock_by_type_with_voltage(void *handle,
struct pp_clock_levels_with_voltage *clocks)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en ||!clocks)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
-
- ret = phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
-
- mutex_unlock(&hwmgr->smu_lock);
- return ret;
+ return phm_get_clock_by_type_with_voltage(hwmgr, type, clocks);
}
static int pp_set_watermarks_for_clocks_ranges(void *handle,
void *clock_ranges)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en || !clock_ranges)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
- ret = phm_set_watermarks_for_clocks_ranges(hwmgr,
- clock_ranges);
- mutex_unlock(&hwmgr->smu_lock);
-
- return ret;
+ return phm_set_watermarks_for_clocks_ranges(hwmgr,
+ clock_ranges);
}
static int pp_display_clock_voltage_request(void *handle,
struct pp_display_clock_request *clock)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en ||!clock)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
- ret = phm_display_clock_voltage_request(hwmgr, clock);
- mutex_unlock(&hwmgr->smu_lock);
-
- return ret;
+ return phm_display_clock_voltage_request(hwmgr, clock);
}
static int pp_get_display_mode_validation_clocks(void *handle,
@@ -1245,12 +1102,9 @@ static int pp_get_display_mode_validation_clocks(void *handle,
clocks->level = PP_DAL_POWERLEVEL_7;
- mutex_lock(&hwmgr->smu_lock);
-
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
ret = phm_get_max_high_clocks(hwmgr, clocks);
- mutex_unlock(&hwmgr->smu_lock);
return ret;
}
@@ -1362,9 +1216,7 @@ static int pp_notify_smu_enable_pwe(void *handle)
return -EINVAL;
}
- mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->smus_notify_pwe(hwmgr);
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
@@ -1380,9 +1232,7 @@ static int pp_enable_mgpu_fan_boost(void *handle)
hwmgr->hwmgr_func->enable_mgpu_fan_boost == NULL)
return 0;
- mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->enable_mgpu_fan_boost(hwmgr);
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
@@ -1399,9 +1249,7 @@ static int pp_set_min_deep_sleep_dcefclk(void *handle, uint32_t clock)
return -EINVAL;
}
- mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->set_min_deep_sleep_dcefclk(hwmgr, clock);
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
@@ -1418,9 +1266,7 @@ static int pp_set_hard_min_dcefclk_by_freq(void *handle, uint32_t clock)
return -EINVAL;
}
- mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->set_hard_min_dcefclk_by_freq(hwmgr, clock);
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
@@ -1437,9 +1283,7 @@ static int pp_set_hard_min_fclk_by_freq(void *handle, uint32_t clock)
return -EINVAL;
}
- mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->set_hard_min_fclk_by_freq(hwmgr, clock);
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
@@ -1447,16 +1291,11 @@ static int pp_set_hard_min_fclk_by_freq(void *handle, uint32_t clock)
static int pp_set_active_display_count(void *handle, uint32_t count)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
- mutex_lock(&hwmgr->smu_lock);
- ret = phm_set_active_display_count(hwmgr, count);
- mutex_unlock(&hwmgr->smu_lock);
-
- return ret;
+ return phm_set_active_display_count(hwmgr, count);
}
static int pp_get_asic_baco_capability(void *handle, bool *cap)
@@ -1471,9 +1310,7 @@ static int pp_get_asic_baco_capability(void *handle, bool *cap)
!hwmgr->hwmgr_func->get_asic_baco_capability)
return 0;
- mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->get_asic_baco_capability(hwmgr, cap);
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
@@ -1488,9 +1325,7 @@ static int pp_get_asic_baco_state(void *handle, int *state)
if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_asic_baco_state)
return 0;
- mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->get_asic_baco_state(hwmgr, (enum BACO_STATE *)state);
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
@@ -1506,9 +1341,7 @@ static int pp_set_asic_baco_state(void *handle, int state)
!hwmgr->hwmgr_func->set_asic_baco_state)
return 0;
- mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->set_asic_baco_state(hwmgr, (enum BACO_STATE)state);
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
@@ -1516,7 +1349,6 @@ static int pp_set_asic_baco_state(void *handle, int state)
static int pp_get_ppfeature_status(void *handle, char *buf)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en || !buf)
return -EINVAL;
@@ -1526,17 +1358,12 @@ static int pp_get_ppfeature_status(void *handle, char *buf)
return -EINVAL;
}
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->get_ppfeature_status(hwmgr, buf);
- mutex_unlock(&hwmgr->smu_lock);
-
- return ret;
+ return hwmgr->hwmgr_func->get_ppfeature_status(hwmgr, buf);
}
static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
@@ -1546,17 +1373,12 @@ static int pp_set_ppfeature_status(void *handle, uint64_t ppfeature_masks)
return -EINVAL;
}
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->set_ppfeature_status(hwmgr, ppfeature_masks);
- mutex_unlock(&hwmgr->smu_lock);
-
- return ret;
+ return hwmgr->hwmgr_func->set_ppfeature_status(hwmgr, ppfeature_masks);
}
static int pp_asic_reset_mode_2(void *handle)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
@@ -1566,17 +1388,12 @@ static int pp_asic_reset_mode_2(void *handle)
return -EINVAL;
}
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->asic_reset(hwmgr, SMU_ASIC_RESET_MODE_2);
- mutex_unlock(&hwmgr->smu_lock);
-
- return ret;
+ return hwmgr->hwmgr_func->asic_reset(hwmgr, SMU_ASIC_RESET_MODE_2);
}
static int pp_smu_i2c_bus_access(void *handle, bool acquire)
{
struct pp_hwmgr *hwmgr = handle;
- int ret = 0;
if (!hwmgr || !hwmgr->pm_en)
return -EINVAL;
@@ -1586,11 +1403,7 @@ static int pp_smu_i2c_bus_access(void *handle, bool acquire)
return -EINVAL;
}
- mutex_lock(&hwmgr->smu_lock);
- ret = hwmgr->hwmgr_func->smu_i2c_bus_access(hwmgr, acquire);
- mutex_unlock(&hwmgr->smu_lock);
-
- return ret;
+ return hwmgr->hwmgr_func->smu_i2c_bus_access(hwmgr, acquire);
}
static int pp_set_df_cstate(void *handle, enum pp_df_cstate state)
@@ -1603,9 +1416,7 @@ static int pp_set_df_cstate(void *handle, enum pp_df_cstate state)
if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_df_cstate)
return 0;
- mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->set_df_cstate(hwmgr, state);
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
@@ -1620,9 +1431,7 @@ static int pp_set_xgmi_pstate(void *handle, uint32_t pstate)
if (!hwmgr->pm_en || !hwmgr->hwmgr_func->set_xgmi_pstate)
return 0;
- mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->set_xgmi_pstate(hwmgr, pstate);
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
@@ -1630,7 +1439,6 @@ static int pp_set_xgmi_pstate(void *handle, uint32_t pstate)
static ssize_t pp_get_gpu_metrics(void *handle, void **table)
{
struct pp_hwmgr *hwmgr = handle;
- ssize_t size;
if (!hwmgr)
return -EINVAL;
@@ -1638,11 +1446,7 @@ static ssize_t pp_get_gpu_metrics(void *handle, void **table)
if (!hwmgr->pm_en || !hwmgr->hwmgr_func->get_gpu_metrics)
return -EOPNOTSUPP;
- mutex_lock(&hwmgr->smu_lock);
- size = hwmgr->hwmgr_func->get_gpu_metrics(hwmgr, table);
- mutex_unlock(&hwmgr->smu_lock);
-
- return size;
+ return hwmgr->hwmgr_func->get_gpu_metrics(hwmgr, table);
}
static int pp_gfx_state_change_set(void *handle, uint32_t state)
@@ -1657,9 +1461,7 @@ static int pp_gfx_state_change_set(void *handle, uint32_t state)
return -EINVAL;
}
- mutex_lock(&hwmgr->smu_lock);
hwmgr->hwmgr_func->gfx_state_change(hwmgr, state);
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
@@ -1673,16 +1475,49 @@ static int pp_get_prv_buffer_details(void *handle, void **addr, size_t *size)
*addr = NULL;
*size = 0;
- mutex_lock(&hwmgr->smu_lock);
if (adev->pm.smu_prv_buffer) {
amdgpu_bo_kmap(adev->pm.smu_prv_buffer, addr);
*size = adev->pm.smu_prv_buffer_size;
}
- mutex_unlock(&hwmgr->smu_lock);
return 0;
}
+static void pp_pm_compute_clocks(void *handle)
+{
+ struct pp_hwmgr *hwmgr = handle;
+ struct amdgpu_device *adev = hwmgr->adev;
+ int i = 0;
+
+ if (adev->mode_info.num_crtc)
+ amdgpu_display_bandwidth_update(adev);
+
+ for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
+ struct amdgpu_ring *ring = adev->rings[i];
+ if (ring && ring->sched.ready)
+ amdgpu_fence_wait_empty(ring);
+ }
+
+ if (!amdgpu_device_has_dc_support(adev)) {
+ amdgpu_dpm_get_active_displays(adev);
+ adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count;
+ adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev);
+ adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev);
+ /* we have issues with mclk switching with
+ * refresh rates over 120 hz on the non-DC code.
+ */
+ if (adev->pm.pm_display_cfg.vrefresh > 120)
+ adev->pm.pm_display_cfg.min_vblank_time = 0;
+
+ pp_display_configuration_change(handle,
+ &adev->pm.pm_display_cfg);
+ }
+
+ pp_dpm_dispatch_tasks(handle,
+ AMD_PP_TASK_DISPLAY_CONFIG_CHANGE,
+ NULL);
+}
+
static const struct amd_pm_funcs pp_dpm_funcs = {
.load_firmware = pp_dpm_load_fw,
.wait_for_fw_loading_complete = pp_dpm_fw_loading_complete,
@@ -1747,4 +1582,5 @@ static const struct amd_pm_funcs pp_dpm_funcs = {
.get_gpu_metrics = pp_get_gpu_metrics,
.gfx_state_change_set = pp_gfx_state_change_set,
.get_smu_prv_buf_details = pp_get_prv_buffer_details,
+ .pm_compute_clocks = pp_pm_compute_clocks,
};
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
index cd99db0dc2be..e4fcbf8a7eb5 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu7_hwmgr.c
@@ -2109,7 +2109,7 @@ static void smu7_patch_ppt_v1_with_vdd_leakage(struct pp_hwmgr *hwmgr,
}
if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
- pr_err("Voltage value looks like a Leakage ID but it's not patched \n");
+ pr_info("Voltage value looks like a Leakage ID but it's not patched\n");
}
/**
@@ -2592,7 +2592,7 @@ static void smu7_patch_ppt_v0_with_vdd_leakage(struct pp_hwmgr *hwmgr,
}
if (*voltage > ATOM_VIRTUAL_VOLTAGE_ID0)
- pr_err("Voltage value looks like a Leakage ID but it's not patched \n");
+ pr_info("Voltage value looks like a Leakage ID but it's not patched\n");
}
@@ -3295,10 +3295,6 @@ static int smu7_apply_state_adjust_rules(struct pp_hwmgr *hwmgr,
request_ps->classification.ui_label);
data->mclk_ignore_signal = false;
- PP_ASSERT_WITH_CODE(smu7_ps->performance_level_count == 2,
- "VI should always have 2 performance levels",
- );
-
max_limits = adev->pm.ac_power ?
&(hwmgr->dyn_state.max_clock_voltage_on_ac) :
&(hwmgr->dyn_state.max_clock_voltage_on_dc);
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
index 03bf8f069222..b50fd4a4a3d1 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/smu8_hwmgr.c
@@ -1950,9 +1950,12 @@ static void smu8_dpm_powergate_acp(struct pp_hwmgr *hwmgr, bool bgate)
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_ACPPowerON, NULL);
}
+#define WIDTH_4K 3840
+
static void smu8_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
{
struct smu8_hwmgr *data = hwmgr->backend;
+ struct amdgpu_device *adev = hwmgr->adev;
data->uvd_power_gated = bgate;
@@ -1976,6 +1979,12 @@ static void smu8_dpm_powergate_uvd(struct pp_hwmgr *hwmgr, bool bgate)
smu8_dpm_update_uvd_dpm(hwmgr, false);
}
+ /* enable/disable Low Memory PState for UVD (4k videos) */
+ if (adev->asic_type == CHIP_STONEY &&
+ adev->uvd.decode_image_width >= WIDTH_4K)
+ smu8_nbdpm_pstate_enable_disable(hwmgr,
+ bgate,
+ true);
}
static void smu8_dpm_powergate_vce(struct pp_hwmgr *hwmgr, bool bgate)
@@ -2037,7 +2046,6 @@ static const struct pp_hwmgr_func smu8_hwmgr_funcs = {
.power_state_set = smu8_set_power_state_tasks,
.dynamic_state_management_disable = smu8_disable_dpm_tasks,
.notify_cac_buffer_info = smu8_notify_cac_buffer_info,
- .update_nbdpm_pstate = smu8_nbdpm_pstate_enable_disable,
.get_thermal_temperature_range = smu8_get_thermal_temperature_range,
};
diff --git a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
index 3f040be0d158..37324f2009ca 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/hwmgr/vega10_hwmgr.c
@@ -3095,7 +3095,7 @@ static int vega10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
void *pp_table, uint32_t classification_flag)
{
ATOM_Vega10_GFXCLK_Dependency_Record_V2 *patom_record_V2;
- struct vega10_power_state *vega10_power_state =
+ struct vega10_power_state *vega10_ps =
cast_phw_vega10_power_state(&(power_state->hardware));
struct vega10_performance_level *performance_level;
ATOM_Vega10_State *state_entry = (ATOM_Vega10_State *)state;
@@ -3145,17 +3145,17 @@ static int vega10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
power_state->temperatures.min = 0;
power_state->temperatures.max = 0;
- performance_level = &(vega10_power_state->performance_levels
- [vega10_power_state->performance_level_count++]);
+ performance_level = &(vega10_ps->performance_levels
+ [vega10_ps->performance_level_count++]);
PP_ASSERT_WITH_CODE(
- (vega10_power_state->performance_level_count <
+ (vega10_ps->performance_level_count <
NUM_GFXCLK_DPM_LEVELS),
"Performance levels exceeds SMC limit!",
return -1);
PP_ASSERT_WITH_CODE(
- (vega10_power_state->performance_level_count <=
+ (vega10_ps->performance_level_count <=
hwmgr->platform_descriptor.
hardwareActivityPerformanceLevels),
"Performance levels exceeds Driver limit!",
@@ -3169,8 +3169,8 @@ static int vega10_get_pp_table_entry_callback_func(struct pp_hwmgr *hwmgr,
performance_level->mem_clock = mclk_dep_table->entries
[state_entry->ucMemClockIndexLow].ulMemClk;
- performance_level = &(vega10_power_state->performance_levels
- [vega10_power_state->performance_level_count++]);
+ performance_level = &(vega10_ps->performance_levels
+ [vega10_ps->performance_level_count++]);
performance_level->soc_clock = socclk_dep_table->entries
[state_entry->ucSocClockIndexHigh].ulClk;
if (gfxclk_dep_table->ucRevId == 0) {
@@ -3201,11 +3201,11 @@ static int vega10_get_pp_table_entry(struct pp_hwmgr *hwmgr,
unsigned long entry_index, struct pp_power_state *state)
{
int result;
- struct vega10_power_state *ps;
+ struct vega10_power_state *vega10_ps;
state->hardware.magic = PhwVega10_Magic;
- ps = cast_phw_vega10_power_state(&state->hardware);
+ vega10_ps = cast_phw_vega10_power_state(&state->hardware);
result = vega10_get_powerplay_table_entry(hwmgr, entry_index, state,
vega10_get_pp_table_entry_callback_func);
@@ -3218,10 +3218,10 @@ static int vega10_get_pp_table_entry(struct pp_hwmgr *hwmgr,
*/
/* set DC compatible flag if this state supports DC */
if (!state->validation.disallowOnDC)
- ps->dc_compatible = true;
+ vega10_ps->dc_compatible = true;
- ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
- ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
+ vega10_ps->uvd_clks.vclk = state->uvd_clocks.VCLK;
+ vega10_ps->uvd_clks.dclk = state->uvd_clocks.DCLK;
return 0;
}
@@ -4823,33 +4823,41 @@ static int vega10_check_states_equal(struct pp_hwmgr *hwmgr,
const struct pp_hw_power_state *pstate1,
const struct pp_hw_power_state *pstate2, bool *equal)
{
- const struct vega10_power_state *psa;
- const struct vega10_power_state *psb;
+ const struct vega10_power_state *vega10_psa;
+ const struct vega10_power_state *vega10_psb;
int i;
if (pstate1 == NULL || pstate2 == NULL || equal == NULL)
return -EINVAL;
- psa = cast_const_phw_vega10_power_state(pstate1);
- psb = cast_const_phw_vega10_power_state(pstate2);
- /* If the two states don't even have the same number of performance levels they cannot be the same state. */
- if (psa->performance_level_count != psb->performance_level_count) {
+ vega10_psa = cast_const_phw_vega10_power_state(pstate1);
+ vega10_psb = cast_const_phw_vega10_power_state(pstate2);
+
+ /* If the two states don't even have the same number of performance levels
+ * they cannot be the same state.
+ */
+ if (vega10_psa->performance_level_count != vega10_psb->performance_level_count) {
*equal = false;
return 0;
}
- for (i = 0; i < psa->performance_level_count; i++) {
- if (!vega10_are_power_levels_equal(&(psa->performance_levels[i]), &(psb->performance_levels[i]))) {
- /* If we have found even one performance level pair that is different the states are different. */
+ for (i = 0; i < vega10_psa->performance_level_count; i++) {
+ if (!vega10_are_power_levels_equal(&(vega10_psa->performance_levels[i]),
+ &(vega10_psb->performance_levels[i]))) {
+ /* If we have found even one performance level pair
+ * that is different the states are different.
+ */
*equal = false;
return 0;
}
}
/* If all performance levels are the same try to use the UVD clocks to break the tie.*/
- *equal = ((psa->uvd_clks.vclk == psb->uvd_clks.vclk) && (psa->uvd_clks.dclk == psb->uvd_clks.dclk));
- *equal &= ((psa->vce_clks.evclk == psb->vce_clks.evclk) && (psa->vce_clks.ecclk == psb->vce_clks.ecclk));
- *equal &= (psa->sclk_threshold == psb->sclk_threshold);
+ *equal = ((vega10_psa->uvd_clks.vclk == vega10_psb->uvd_clks.vclk) &&
+ (vega10_psa->uvd_clks.dclk == vega10_psb->uvd_clks.dclk));
+ *equal &= ((vega10_psa->vce_clks.evclk == vega10_psb->vce_clks.evclk) &&
+ (vega10_psa->vce_clks.ecclk == vega10_psb->vce_clks.ecclk));
+ *equal &= (vega10_psa->sclk_threshold == vega10_psb->sclk_threshold);
return 0;
}
@@ -5444,19 +5452,19 @@ static int vega10_get_performance_level(struct pp_hwmgr *hwmgr, const struct pp_
PHM_PerformanceLevelDesignation designation, uint32_t index,
PHM_PerformanceLevel *level)
{
- const struct vega10_power_state *ps;
+ const struct vega10_power_state *vega10_ps;
uint32_t i;
if (level == NULL || hwmgr == NULL || state == NULL)
return -EINVAL;
- ps = cast_const_phw_vega10_power_state(state);
+ vega10_ps = cast_const_phw_vega10_power_state(state);
- i = index > ps->performance_level_count - 1 ?
- ps->performance_level_count - 1 : index;
+ i = index > vega10_ps->performance_level_count - 1 ?
+ vega10_ps->performance_level_count - 1 : index;
- level->coreClock = ps->performance_levels[i].gfx_clock;
- level->memory_clock = ps->performance_levels[i].mem_clock;
+ level->coreClock = vega10_ps->performance_levels[i].gfx_clock;
+ level->memory_clock = vega10_ps->performance_levels[i].mem_clock;
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/inc/amd_powerplay.h b/drivers/gpu/drm/amd/pm/powerplay/inc/amd_powerplay.h
index fe3665965416..fe3665965416 100644
--- a/drivers/gpu/drm/amd/pm/inc/amd_powerplay.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/amd_powerplay.h
diff --git a/drivers/gpu/drm/amd/pm/inc/cz_ppsmc.h b/drivers/gpu/drm/amd/pm/powerplay/inc/cz_ppsmc.h
index 9b698780aed8..9b698780aed8 100644
--- a/drivers/gpu/drm/amd/pm/inc/cz_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/cz_ppsmc.h
diff --git a/drivers/gpu/drm/amd/pm/inc/fiji_ppsmc.h b/drivers/gpu/drm/amd/pm/powerplay/inc/fiji_ppsmc.h
index 7ae494569a60..7ae494569a60 100644
--- a/drivers/gpu/drm/amd/pm/inc/fiji_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/fiji_ppsmc.h
diff --git a/drivers/gpu/drm/amd/pm/inc/hardwaremanager.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h
index 6e0be6027705..6e0be6027705 100644
--- a/drivers/gpu/drm/amd/pm/inc/hardwaremanager.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hardwaremanager.h
diff --git a/drivers/gpu/drm/amd/pm/inc/hwmgr.h b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
index 8ed01071fe5a..4f7f2f455301 100644
--- a/drivers/gpu/drm/amd/pm/inc/hwmgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/hwmgr.h
@@ -331,9 +331,6 @@ struct pp_hwmgr_func {
uint32_t mc_addr_low,
uint32_t mc_addr_hi,
uint32_t size);
- int (*update_nbdpm_pstate)(struct pp_hwmgr *hwmgr,
- bool enable,
- bool lock);
int (*get_thermal_temperature_range)(struct pp_hwmgr *hwmgr,
struct PP_TemperatureRange *range);
int (*get_power_profile_mode)(struct pp_hwmgr *hwmgr, char *buf);
@@ -751,7 +748,6 @@ struct pp_hwmgr {
bool not_vf;
bool pm_en;
bool pp_one_vf;
- struct mutex smu_lock;
struct mutex msg_lock;
uint32_t pp_table_version;
diff --git a/drivers/gpu/drm/amd/pm/inc/polaris10_pwrvirus.h b/drivers/gpu/drm/amd/pm/powerplay/inc/polaris10_pwrvirus.h
index 6a53b7e74ccd..6a53b7e74ccd 100644
--- a/drivers/gpu/drm/amd/pm/inc/polaris10_pwrvirus.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/polaris10_pwrvirus.h
diff --git a/drivers/gpu/drm/amd/pm/inc/power_state.h b/drivers/gpu/drm/amd/pm/powerplay/inc/power_state.h
index a5f2227a3971..a5f2227a3971 100644
--- a/drivers/gpu/drm/amd/pm/inc/power_state.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/power_state.h
diff --git a/drivers/gpu/drm/amd/pm/inc/pp_debug.h b/drivers/gpu/drm/amd/pm/powerplay/inc/pp_debug.h
index cea65093b6ad..cea65093b6ad 100644
--- a/drivers/gpu/drm/amd/pm/inc/pp_debug.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/pp_debug.h
diff --git a/drivers/gpu/drm/amd/pm/inc/pp_endian.h b/drivers/gpu/drm/amd/pm/powerplay/inc/pp_endian.h
index f49d1963fe85..f49d1963fe85 100644
--- a/drivers/gpu/drm/amd/pm/inc/pp_endian.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/pp_endian.h
diff --git a/drivers/gpu/drm/amd/pm/inc/pp_thermal.h b/drivers/gpu/drm/amd/pm/powerplay/inc/pp_thermal.h
index f7c41185097e..f7c41185097e 100644
--- a/drivers/gpu/drm/amd/pm/inc/pp_thermal.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/pp_thermal.h
diff --git a/drivers/gpu/drm/amd/pm/inc/ppinterrupt.h b/drivers/gpu/drm/amd/pm/powerplay/inc/ppinterrupt.h
index c067e0925b6b..c067e0925b6b 100644
--- a/drivers/gpu/drm/amd/pm/inc/ppinterrupt.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/ppinterrupt.h
diff --git a/drivers/gpu/drm/amd/pm/inc/rv_ppsmc.h b/drivers/gpu/drm/amd/pm/powerplay/inc/rv_ppsmc.h
index 171f12b82716..171f12b82716 100644
--- a/drivers/gpu/drm/amd/pm/inc/rv_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/rv_ppsmc.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu10.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu10.h
index 9e837a5014c5..9e837a5014c5 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu10.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu10.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu10_driver_if.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu10_driver_if.h
index c498158771cc..c498158771cc 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu10_driver_if.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu10_driver_if.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu11_driver_if.h
index fdc6b7a57bc9..fdc6b7a57bc9 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu11_driver_if.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu7.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu7.h
index e14072d45918..e14072d45918 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu7.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu7.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu71.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu71.h
index 71c9b2d28640..71c9b2d28640 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu71.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu71.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu71_discrete.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu71_discrete.h
index c0e3936d5c2e..c0e3936d5c2e 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu71_discrete.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu71_discrete.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu72.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu72.h
index 9ad1cefff79f..9ad1cefff79f 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu72.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu72.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu72_discrete.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu72_discrete.h
index 2aefbb85f620..2aefbb85f620 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu72_discrete.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu72_discrete.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu73.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu73.h
index c6b12a4c00db..c6b12a4c00db 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu73.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu73.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu73_discrete.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu73_discrete.h
index 5916be08a7fe..5916be08a7fe 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu73_discrete.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu73_discrete.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu74.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu74.h
index fd10a9fa843d..fd10a9fa843d 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu74.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu74.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu74_discrete.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu74_discrete.h
index 350889e408d2..350889e408d2 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu74_discrete.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu74_discrete.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu75.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu75.h
index 771523001533..771523001533 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu75.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu75.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu75_discrete.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu75_discrete.h
index b64e58a22ddf..b64e58a22ddf 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu75_discrete.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu75_discrete.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu7_common.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu7_common.h
index 94bf7b649c20..94bf7b649c20 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu7_common.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu7_common.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu7_discrete.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu7_discrete.h
index ee876745dd12..ee876745dd12 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu7_discrete.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu7_discrete.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu7_fusion.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu7_fusion.h
index 78ada9ffd508..78ada9ffd508 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu7_fusion.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu7_fusion.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu7_ppsmc.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu7_ppsmc.h
index a0a38b8a4b1b..a0a38b8a4b1b 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu7_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu7_ppsmc.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu8.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu8.h
index d758d07b6a31..d758d07b6a31 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu8.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu8.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu8_fusion.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu8_fusion.h
index 0c37c94e9414..0c37c94e9414 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu8_fusion.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu8_fusion.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu9.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu9.h
index 70ac4d477be2..70ac4d477be2 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu9.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu9.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu9_driver_if.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu9_driver_if.h
index 2818c98ff5ca..2818c98ff5ca 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu9_driver_if.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu9_driver_if.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_ucode_xfer_cz.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu_ucode_xfer_cz.h
index eb0f79f9c876..701aae598b58 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_ucode_xfer_cz.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu_ucode_xfer_cz.h
@@ -121,7 +121,7 @@ typedef struct SMU_Task SMU_Task;
struct TOC {
uint8_t JobList[NUM_JOBLIST_ENTRIES];
- SMU_Task tasks[1];
+ SMU_Task tasks[];
};
// META DATA COMMAND Definitions
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_ucode_xfer_vi.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smu_ucode_xfer_vi.h
index 880152c0f775..880152c0f775 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_ucode_xfer_vi.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smu_ucode_xfer_vi.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smumgr.h b/drivers/gpu/drm/amd/pm/powerplay/inc/smumgr.h
index 5f46f1a4f38e..5f46f1a4f38e 100644
--- a/drivers/gpu/drm/amd/pm/inc/smumgr.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/smumgr.h
diff --git a/drivers/gpu/drm/amd/pm/inc/tonga_ppsmc.h b/drivers/gpu/drm/amd/pm/powerplay/inc/tonga_ppsmc.h
index 63631296d751..63631296d751 100644
--- a/drivers/gpu/drm/amd/pm/inc/tonga_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/tonga_ppsmc.h
diff --git a/drivers/gpu/drm/amd/pm/inc/vega10_ppsmc.h b/drivers/gpu/drm/amd/pm/powerplay/inc/vega10_ppsmc.h
index 715b5a168831..715b5a168831 100644
--- a/drivers/gpu/drm/amd/pm/inc/vega10_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/vega10_ppsmc.h
diff --git a/drivers/gpu/drm/amd/pm/inc/vega12/smu9_driver_if.h b/drivers/gpu/drm/amd/pm/powerplay/inc/vega12/smu9_driver_if.h
index b6ffd08784e7..b6ffd08784e7 100644
--- a/drivers/gpu/drm/amd/pm/inc/vega12/smu9_driver_if.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/vega12/smu9_driver_if.h
diff --git a/drivers/gpu/drm/amd/pm/inc/vega12_ppsmc.h b/drivers/gpu/drm/amd/pm/powerplay/inc/vega12_ppsmc.h
index f985c78d746a..f985c78d746a 100644
--- a/drivers/gpu/drm/amd/pm/inc/vega12_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/vega12_ppsmc.h
diff --git a/drivers/gpu/drm/amd/pm/inc/vega20_ppsmc.h b/drivers/gpu/drm/amd/pm/powerplay/inc/vega20_ppsmc.h
index 0c66f0fe1aaf..0c66f0fe1aaf 100644
--- a/drivers/gpu/drm/amd/pm/inc/vega20_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/powerplay/inc/vega20_ppsmc.h
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
index 93a1c7248e26..5ca3c422f7d4 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/ci_smumgr.c
@@ -208,6 +208,7 @@ static int ci_read_smc_sram_dword(struct pp_hwmgr *hwmgr, uint32_t smc_addr,
static int ci_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
{
+ struct amdgpu_device *adev = hwmgr->adev;
int ret;
cgs_write_register(hwmgr->device, mmSMC_RESP_0, 0);
@@ -218,7 +219,8 @@ static int ci_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
if (ret != 1)
- pr_info("\n failed to send message %x ret is %d\n", msg, ret);
+ dev_info(adev->dev,
+ "failed to send message %x ret is %d\n", msg,ret);
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c
index 47b34c6ca924..88a5641465dc 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu10_smumgr.c
@@ -87,7 +87,7 @@ static int smu10_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
smu10_send_msg_to_smc_without_waiting(hwmgr, msg);
if (smu10_wait_for_response(hwmgr) == 0)
- printk("Failed to send Message %x.\n", msg);
+ dev_err(adev->dev, "Failed to send Message %x.\n", msg);
return 0;
}
@@ -108,7 +108,7 @@ static int smu10_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
if (smu10_wait_for_response(hwmgr) == 0)
- printk("Failed to send Message %x.\n", msg);
+ dev_err(adev->dev, "Failed to send Message %x.\n", msg);
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c
index aae25243eb10..5a010cd38303 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu7_smumgr.c
@@ -165,6 +165,7 @@ bool smu7_is_smc_ram_running(struct pp_hwmgr *hwmgr)
int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
{
+ struct amdgpu_device *adev = hwmgr->adev;
int ret;
PHM_WAIT_FIELD_UNEQUAL(hwmgr, SMC_RESP_0, SMC_RESP, 0);
@@ -172,9 +173,10 @@ int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
if (ret == 0xFE)
- pr_debug("last message was not supported\n");
+ dev_dbg(adev->dev, "last message was not supported\n");
else if (ret != 1)
- pr_info("\n last message was failed ret is %d\n", ret);
+ dev_info(adev->dev,
+ "\nlast message was failed ret is %d\n", ret);
cgs_write_register(hwmgr->device, mmSMC_RESP_0, 0);
cgs_write_register(hwmgr->device, mmSMC_MESSAGE_0, msg);
@@ -184,9 +186,10 @@ int smu7_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
ret = PHM_READ_FIELD(hwmgr->device, SMC_RESP_0, SMC_RESP);
if (ret == 0xFE)
- pr_debug("message %x was not supported\n", msg);
+ dev_dbg(adev->dev, "message %x was not supported\n", msg);
else if (ret != 1)
- pr_info("\n failed to send message %x ret is %d \n", msg, ret);
+ dev_dbg(adev->dev,
+ "failed to send message %x ret is %d \n", msg, ret);
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu9_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu9_smumgr.c
index 23e5de3c4ec1..8c9bf4940dc1 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu9_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/smu9_smumgr.c
@@ -126,7 +126,7 @@ int smu9_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
ret = smu9_wait_for_response(hwmgr);
if (ret != 1)
- pr_err("Failed to send message: 0x%x, ret value: 0x%x\n", msg, ret);
+ dev_err(adev->dev, "Failed to send message: 0x%x, ret value: 0x%x\n", msg, ret);
return 0;
}
diff --git a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
index 741fbc87467f..a5c95b180672 100644
--- a/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
+++ b/drivers/gpu/drm/amd/pm/powerplay/smumgr/vega20_smumgr.c
@@ -115,7 +115,7 @@ static int vega20_send_msg_to_smc(struct pp_hwmgr *hwmgr, uint16_t msg)
ret = vega20_wait_for_response(hwmgr);
if (ret != PPSMC_Result_OK)
- pr_err("Failed to send message 0x%x, response 0x%x\n", msg, ret);
+ dev_err(adev->dev, "Failed to send message 0x%x, response 0x%x\n", msg, ret);
return (ret == PPSMC_Result_OK) ? 0 : -EIO;
}
@@ -143,7 +143,7 @@ static int vega20_send_msg_to_smc_with_parameter(struct pp_hwmgr *hwmgr,
ret = vega20_wait_for_response(hwmgr);
if (ret != PPSMC_Result_OK)
- pr_err("Failed to send message 0x%x, response 0x%x\n", msg, ret);
+ dev_err(adev->dev, "Failed to send message 0x%x, response 0x%x\n", msg, ret);
return (ret == PPSMC_Result_OK) ? 0 : -EIO;
}
@@ -520,7 +520,7 @@ static int vega20_smu_init(struct pp_hwmgr *hwmgr)
priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].version = 0x01;
priv->smu_tables.entry[TABLE_ACTIVITY_MONITOR_COEFF].size = sizeof(DpmActivityMonitorCoeffInt_t);
- ret = smu_v11_0_i2c_control_init(&adev->pm.smu_i2c);
+ ret = smu_v11_0_i2c_control_init(adev);
if (ret)
goto err4;
@@ -558,7 +558,7 @@ static int vega20_smu_fini(struct pp_hwmgr *hwmgr)
(struct vega20_smumgr *)(hwmgr->smu_backend);
struct amdgpu_device *adev = hwmgr->adev;
- smu_v11_0_i2c_control_fini(&adev->pm.smu_i2c);
+ smu_v11_0_i2c_control_fini(adev);
if (priv) {
amdgpu_bo_free_kernel(&priv->smu_tables.entry[TABLE_PPTABLE].handle,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
index d93d28c1af95..f1544755d8b4 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/amdgpu_smu.c
@@ -37,6 +37,7 @@
#include "aldebaran_ppt.h"
#include "yellow_carp_ppt.h"
#include "cyan_skillfish_ppt.h"
+#include "smu_v13_0_5_ppt.h"
#include "amd_pcie.h"
/*
@@ -55,11 +56,10 @@ static int smu_force_smuclk_levels(struct smu_context *smu,
uint32_t mask);
static int smu_handle_task(struct smu_context *smu,
enum amd_dpm_forced_level level,
- enum amd_pp_task task_id,
- bool lock_needed);
+ enum amd_pp_task task_id);
static int smu_reset(struct smu_context *smu);
static int smu_set_fan_speed_pwm(void *handle, u32 speed);
-static int smu_set_fan_control_mode(struct smu_context *smu, int value);
+static int smu_set_fan_control_mode(void *handle, u32 value);
static int smu_set_power_limit(void *handle, uint32_t limit);
static int smu_set_fan_speed_rpm(void *handle, uint32_t speed);
static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled);
@@ -68,49 +68,32 @@ static int smu_sys_get_pp_feature_mask(void *handle,
char *buf)
{
struct smu_context *smu = handle;
- int size = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
- size = smu_get_pp_feature_mask(smu, buf);
-
- mutex_unlock(&smu->mutex);
-
- return size;
+ return smu_get_pp_feature_mask(smu, buf);
}
static int smu_sys_set_pp_feature_mask(void *handle,
uint64_t new_mask)
{
struct smu_context *smu = handle;
- int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
- ret = smu_set_pp_feature_mask(smu, new_mask);
-
- mutex_unlock(&smu->mutex);
-
- return ret;
+ return smu_set_pp_feature_mask(smu, new_mask);
}
-int smu_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value)
+int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value)
{
- int ret = 0;
- struct smu_context *smu = &adev->smu;
+ if (!smu->ppt_funcs->get_gfx_off_status)
+ return -EINVAL;
- if (is_support_sw_smu(adev) && smu->ppt_funcs->get_gfx_off_status)
- *value = smu_get_gfx_off_status(smu);
- else
- ret = -EINVAL;
+ *value = smu_get_gfx_off_status(smu);
- return ret;
+ return 0;
}
int smu_set_soft_freq_range(struct smu_context *smu,
@@ -120,16 +103,12 @@ int smu_set_soft_freq_range(struct smu_context *smu,
{
int ret = 0;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->set_soft_freq_limited_range)
ret = smu->ppt_funcs->set_soft_freq_limited_range(smu,
clk_type,
min,
max);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -138,21 +117,17 @@ int smu_get_dpm_freq_range(struct smu_context *smu,
uint32_t *min,
uint32_t *max)
{
- int ret = 0;
+ int ret = -ENOTSUPP;
if (!min && !max)
return -EINVAL;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->get_dpm_ultimate_freq)
ret = smu->ppt_funcs->get_dpm_ultimate_freq(smu,
clk_type,
min,
max);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -184,8 +159,8 @@ static u32 smu_get_sclk(void *handle, bool low)
return clk_freq * 100;
}
-static int smu_dpm_set_vcn_enable_locked(struct smu_context *smu,
- bool enable)
+static int smu_dpm_set_vcn_enable(struct smu_context *smu,
+ bool enable)
{
struct smu_power_context *smu_power = &smu->smu_power;
struct smu_power_gate *power_gate = &smu_power->power_gate;
@@ -204,24 +179,8 @@ static int smu_dpm_set_vcn_enable_locked(struct smu_context *smu,
return ret;
}
-static int smu_dpm_set_vcn_enable(struct smu_context *smu,
- bool enable)
-{
- struct smu_power_context *smu_power = &smu->smu_power;
- struct smu_power_gate *power_gate = &smu_power->power_gate;
- int ret = 0;
-
- mutex_lock(&power_gate->vcn_gate_lock);
-
- ret = smu_dpm_set_vcn_enable_locked(smu, enable);
-
- mutex_unlock(&power_gate->vcn_gate_lock);
-
- return ret;
-}
-
-static int smu_dpm_set_jpeg_enable_locked(struct smu_context *smu,
- bool enable)
+static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
+ bool enable)
{
struct smu_power_context *smu_power = &smu->smu_power;
struct smu_power_gate *power_gate = &smu_power->power_gate;
@@ -240,22 +199,6 @@ static int smu_dpm_set_jpeg_enable_locked(struct smu_context *smu,
return ret;
}
-static int smu_dpm_set_jpeg_enable(struct smu_context *smu,
- bool enable)
-{
- struct smu_power_context *smu_power = &smu->smu_power;
- struct smu_power_gate *power_gate = &smu_power->power_gate;
- int ret = 0;
-
- mutex_lock(&power_gate->jpeg_gate_lock);
-
- ret = smu_dpm_set_jpeg_enable_locked(smu, enable);
-
- mutex_unlock(&power_gate->jpeg_gate_lock);
-
- return ret;
-}
-
/**
* smu_dpm_set_power_gate - power gate/ungate the specific IP block
*
@@ -410,7 +353,7 @@ static void smu_restore_dpm_user_profile(struct smu_context *smu)
if (smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_MANUAL ||
smu->user_dpm_profile.fan_mode == AMD_FAN_CTRL_NONE) {
ret = smu_set_fan_control_mode(smu, smu->user_dpm_profile.fan_mode);
- if (ret) {
+ if (ret != -EOPNOTSUPP) {
smu->user_dpm_profile.fan_speed_pwm = 0;
smu->user_dpm_profile.fan_speed_rpm = 0;
smu->user_dpm_profile.fan_mode = AMD_FAN_CTRL_AUTO;
@@ -419,13 +362,13 @@ static void smu_restore_dpm_user_profile(struct smu_context *smu)
if (smu->user_dpm_profile.fan_speed_pwm) {
ret = smu_set_fan_speed_pwm(smu, smu->user_dpm_profile.fan_speed_pwm);
- if (ret)
+ if (ret != -EOPNOTSUPP)
dev_err(smu->adev->dev, "Failed to set manual fan speed in pwm\n");
}
if (smu->user_dpm_profile.fan_speed_rpm) {
ret = smu_set_fan_speed_rpm(smu, smu->user_dpm_profile.fan_speed_rpm);
- if (ret)
+ if (ret != -EOPNOTSUPP)
dev_err(smu->adev->dev, "Failed to set manual fan speed in rpm\n");
}
}
@@ -471,10 +414,7 @@ bool is_support_sw_smu(struct amdgpu_device *adev)
bool is_support_cclk_dpm(struct amdgpu_device *adev)
{
- struct smu_context *smu = &adev->smu;
-
- if (!is_support_sw_smu(adev))
- return false;
+ struct smu_context *smu = adev->powerplay.pp_handle;
if (!smu_feature_is_enabled(smu, SMU_FEATURE_CCLK_DPM_BIT))
return false;
@@ -488,7 +428,6 @@ static int smu_sys_get_pp_table(void *handle,
{
struct smu_context *smu = handle;
struct smu_table_context *smu_table = &smu->smu_table;
- uint32_t powerplay_table_size;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
@@ -496,18 +435,12 @@ static int smu_sys_get_pp_table(void *handle,
if (!smu_table->power_play_table && !smu_table->hardcode_pptable)
return -EINVAL;
- mutex_lock(&smu->mutex);
-
if (smu_table->hardcode_pptable)
*table = smu_table->hardcode_pptable;
else
*table = smu_table->power_play_table;
- powerplay_table_size = smu_table->power_play_table_size;
-
- mutex_unlock(&smu->mutex);
-
- return powerplay_table_size;
+ return smu_table->power_play_table_size;
}
static int smu_sys_set_pp_table(void *handle,
@@ -527,12 +460,10 @@ static int smu_sys_set_pp_table(void *handle,
return -EIO;
}
- mutex_lock(&smu->mutex);
- if (!smu_table->hardcode_pptable)
- smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
if (!smu_table->hardcode_pptable) {
- ret = -ENOMEM;
- goto failed;
+ smu_table->hardcode_pptable = kzalloc(size, GFP_KERNEL);
+ if (!smu_table->hardcode_pptable)
+ return -ENOMEM;
}
memcpy(smu_table->hardcode_pptable, buf, size);
@@ -551,8 +482,6 @@ static int smu_sys_set_pp_table(void *handle,
smu->uploading_custom_pp_table = false;
-failed:
- mutex_unlock(&smu->mutex);
return ret;
}
@@ -578,7 +507,7 @@ static int smu_get_driver_allowed_feature_mask(struct smu_context *smu)
static int smu_set_funcs(struct amdgpu_device *adev)
{
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
if (adev->pm.pp_feature & PP_OVERDRIVE_MASK)
smu->od_enabled = true;
@@ -604,8 +533,12 @@ static int smu_set_funcs(struct amdgpu_device *adev)
break;
case IP_VERSION(13, 0, 1):
case IP_VERSION(13, 0, 3):
+ case IP_VERSION(13, 0, 8):
yellow_carp_set_ppt_funcs(smu);
break;
+ case IP_VERSION(13, 0, 5):
+ smu_v13_0_5_set_ppt_funcs(smu);
+ break;
case IP_VERSION(11, 0, 8):
cyan_skillfish_set_ppt_funcs(smu);
break;
@@ -630,13 +563,15 @@ static int smu_set_funcs(struct amdgpu_device *adev)
static int smu_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu;
+
+ smu = kzalloc(sizeof(struct smu_context), GFP_KERNEL);
+ if (!smu)
+ return -ENOMEM;
smu->adev = adev;
smu->pm_enabled = !!amdgpu_dpm;
smu->is_apu = false;
- mutex_init(&smu->mutex);
- mutex_init(&smu->smu_baco.mutex);
smu->smu_baco.state = SMU_BACO_STATE_EXIT;
smu->smu_baco.platform_support = false;
smu->user_dpm_profile.fan_mode = -1;
@@ -657,40 +592,45 @@ static int smu_set_default_dpm_table(struct smu_context *smu)
if (!smu->ppt_funcs->set_default_dpm_table)
return 0;
- mutex_lock(&power_gate->vcn_gate_lock);
- mutex_lock(&power_gate->jpeg_gate_lock);
-
vcn_gate = atomic_read(&power_gate->vcn_gated);
jpeg_gate = atomic_read(&power_gate->jpeg_gated);
- ret = smu_dpm_set_vcn_enable_locked(smu, true);
+ ret = smu_dpm_set_vcn_enable(smu, true);
if (ret)
- goto err0_out;
+ return ret;
- ret = smu_dpm_set_jpeg_enable_locked(smu, true);
+ ret = smu_dpm_set_jpeg_enable(smu, true);
if (ret)
- goto err1_out;
+ goto err_out;
ret = smu->ppt_funcs->set_default_dpm_table(smu);
if (ret)
dev_err(smu->adev->dev,
"Failed to setup default dpm clock tables!\n");
- smu_dpm_set_jpeg_enable_locked(smu, !jpeg_gate);
-err1_out:
- smu_dpm_set_vcn_enable_locked(smu, !vcn_gate);
-err0_out:
- mutex_unlock(&power_gate->jpeg_gate_lock);
- mutex_unlock(&power_gate->vcn_gate_lock);
-
+ smu_dpm_set_jpeg_enable(smu, !jpeg_gate);
+err_out:
+ smu_dpm_set_vcn_enable(smu, !vcn_gate);
return ret;
}
+static int smu_apply_default_config_table_settings(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+
+ ret = smu_get_default_config_table_settings(smu,
+ &adev->pm.config_table);
+ if (ret)
+ return ret;
+
+ return smu_set_config_table(smu, &adev->pm.config_table);
+}
static int smu_late_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
int ret = 0;
smu_set_fine_grain_gfx_freq_parameters(smu);
@@ -736,10 +676,15 @@ static int smu_late_init(void *handle)
smu_get_fan_parameters(smu);
- smu_handle_task(&adev->smu,
+ smu_handle_task(smu,
smu->smu_dpm.dpm_level,
- AMD_PP_TASK_COMPLETE_INIT,
- false);
+ AMD_PP_TASK_COMPLETE_INIT);
+
+ ret = smu_apply_default_config_table_settings(smu);
+ if (ret && (ret != -EOPNOTSUPP)) {
+ dev_err(adev->dev, "Failed to apply default DriverSmuConfig settings!\n");
+ return ret;
+ }
smu_restore_dpm_user_profile(smu);
@@ -964,7 +909,7 @@ static int smu_smc_table_sw_init(struct smu_context *smu)
if (ret)
return ret;
- ret = smu_i2c_init(smu, &smu->adev->pm.smu_i2c);
+ ret = smu_i2c_init(smu);
if (ret)
return ret;
@@ -975,7 +920,7 @@ static int smu_smc_table_sw_fini(struct smu_context *smu)
{
int ret;
- smu_i2c_fini(smu, &smu->adev->pm.smu_i2c);
+ smu_i2c_fini(smu);
smu_free_dummy_read_table(smu);
@@ -1015,29 +960,21 @@ static void smu_interrupt_work_fn(struct work_struct *work)
struct smu_context *smu = container_of(work, struct smu_context,
interrupt_work);
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs && smu->ppt_funcs->interrupt_work)
smu->ppt_funcs->interrupt_work(smu);
-
- mutex_unlock(&smu->mutex);
}
static int smu_sw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
int ret;
smu->pool_size = adev->pm.smu_prv_buffer_size;
smu->smu_feature.feature_num = SMU_FEATURE_MAX;
- mutex_init(&smu->smu_feature.mutex);
bitmap_zero(smu->smu_feature.supported, SMU_FEATURE_MAX);
- bitmap_zero(smu->smu_feature.enabled, SMU_FEATURE_MAX);
bitmap_zero(smu->smu_feature.allowed, SMU_FEATURE_MAX);
- mutex_init(&smu->sensor_lock);
- mutex_init(&smu->metrics_lock);
mutex_init(&smu->message_lock);
INIT_WORK(&smu->throttling_logging_work, smu_throttling_logging_work_fn);
@@ -1049,8 +986,6 @@ static int smu_sw_init(void *handle)
atomic_set(&smu->smu_power.power_gate.vcn_gated, 1);
atomic_set(&smu->smu_power.power_gate.jpeg_gated, 1);
- mutex_init(&smu->smu_power.power_gate.vcn_gate_lock);
- mutex_init(&smu->smu_power.power_gate.jpeg_gate_lock);
smu->workload_mask = 1 << smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT];
smu->workload_prority[PP_SMC_POWER_PROFILE_BOOTUP_DEFAULT] = 0;
@@ -1101,7 +1036,7 @@ static int smu_sw_init(void *handle)
static int smu_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
int ret;
ret = smu_smc_table_sw_fini(smu);
@@ -1144,8 +1079,10 @@ static int smu_get_thermal_temperature_range(struct smu_context *smu)
static int smu_smc_hw_setup(struct smu_context *smu)
{
+ struct smu_feature *feature = &smu->smu_feature;
struct amdgpu_device *adev = smu->adev;
uint32_t pcie_gen = 0, pcie_width = 0;
+ uint64_t features_supported;
int ret = 0;
if (adev->in_suspend && smu_is_dpm_running(smu)) {
@@ -1225,6 +1162,15 @@ static int smu_smc_hw_setup(struct smu_context *smu)
return ret;
}
+ ret = smu_feature_get_enabled_mask(smu, &features_supported);
+ if (ret) {
+ dev_err(adev->dev, "Failed to retrieve supported dpm features!\n");
+ return ret;
+ }
+ bitmap_copy(feature->supported,
+ (unsigned long *)&features_supported,
+ feature->feature_num);
+
if (!smu_is_dpm_running(smu))
dev_info(adev->dev, "dpm has been disabled\n");
@@ -1336,7 +1282,7 @@ static int smu_hw_init(void *handle)
{
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) {
smu->pm_enabled = false;
@@ -1352,7 +1298,7 @@ static int smu_hw_init(void *handle)
if (smu->is_apu) {
smu_dpm_set_vcn_enable(smu, true);
smu_dpm_set_jpeg_enable(smu, true);
- smu_set_gfx_cgpg(&adev->smu, true);
+ smu_set_gfx_cgpg(smu, true);
}
if (!smu->pm_enabled)
@@ -1437,9 +1383,7 @@ static int smu_disable_dpms(struct smu_context *smu)
case IP_VERSION(11, 5, 0):
case IP_VERSION(11, 0, 12):
case IP_VERSION(11, 0, 13):
- return smu_disable_all_features_with_exception(smu,
- true,
- SMU_FEATURE_COUNT);
+ return 0;
default:
break;
}
@@ -1455,9 +1399,7 @@ static int smu_disable_dpms(struct smu_context *smu)
case IP_VERSION(11, 0, 0):
case IP_VERSION(11, 0, 5):
case IP_VERSION(11, 0, 9):
- return smu_disable_all_features_with_exception(smu,
- true,
- SMU_FEATURE_BACO_BIT);
+ return 0;
default:
break;
}
@@ -1469,7 +1411,6 @@ static int smu_disable_dpms(struct smu_context *smu)
*/
if (use_baco && smu_feature_is_enabled(smu, SMU_FEATURE_BACO_BIT)) {
ret = smu_disable_all_features_with_exception(smu,
- false,
SMU_FEATURE_BACO_BIT);
if (ret)
dev_err(adev->dev, "Failed to disable smu features except BACO.\n");
@@ -1512,7 +1453,7 @@ static int smu_smc_hw_cleanup(struct smu_context *smu)
static int smu_hw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
@@ -1531,13 +1472,19 @@ static int smu_hw_fini(void *handle)
return smu_smc_hw_cleanup(smu);
}
+static void smu_late_fini(void *handle)
+{
+ struct amdgpu_device *adev = handle;
+ struct smu_context *smu = adev->powerplay.pp_handle;
+
+ kfree(smu);
+}
+
static int smu_reset(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
int ret;
- amdgpu_gfx_off_ctrl(smu->adev, false);
-
ret = smu_hw_fini(adev);
if (ret)
return ret;
@@ -1550,15 +1497,13 @@ static int smu_reset(struct smu_context *smu)
if (ret)
return ret;
- amdgpu_gfx_off_ctrl(smu->adev, true);
-
return 0;
}
static int smu_suspend(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
int ret;
if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
@@ -1575,7 +1520,7 @@ static int smu_suspend(void *handle)
smu->watermarks_bitmap &= ~(WATERMARKS_LOADED);
- smu_set_gfx_cgpg(&adev->smu, false);
+ smu_set_gfx_cgpg(smu, false);
return 0;
}
@@ -1584,7 +1529,7 @@ static int smu_resume(void *handle)
{
int ret;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
if (amdgpu_sriov_vf(adev)&& !amdgpu_sriov_is_pp_one_vf(adev))
return 0;
@@ -1606,7 +1551,7 @@ static int smu_resume(void *handle)
return ret;
}
- smu_set_gfx_cgpg(&adev->smu, true);
+ smu_set_gfx_cgpg(smu, true);
smu->disable_uclk_switch = 0;
@@ -1630,8 +1575,6 @@ static int smu_display_configuration_change(void *handle,
if (!display_config)
return -EINVAL;
- mutex_lock(&smu->mutex);
-
smu_set_min_dcef_deep_sleep(smu,
display_config->min_dcef_deep_sleep_set_clk / 100);
@@ -1640,8 +1583,6 @@ static int smu_display_configuration_change(void *handle,
num_of_active_display++;
}
- mutex_unlock(&smu->mutex);
-
return 0;
}
@@ -1675,14 +1616,7 @@ static int smu_enable_umd_pstate(void *handle,
/* enter umd pstate, save current level, disable gfx cg*/
if (*level & profile_mode_mask) {
smu_dpm_ctx->saved_dpm_level = smu_dpm_ctx->dpm_level;
- smu_dpm_ctx->enable_umd_pstate = true;
smu_gpo_control(smu, false);
- amdgpu_device_ip_set_powergating_state(smu->adev,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_PG_STATE_UNGATE);
- amdgpu_device_ip_set_clockgating_state(smu->adev,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_CG_STATE_UNGATE);
smu_gfx_ulv_control(smu, false);
smu_deep_sleep_control(smu, false);
amdgpu_asic_update_umd_stable_pstate(smu->adev, true);
@@ -1692,16 +1626,9 @@ static int smu_enable_umd_pstate(void *handle,
if (!(*level & profile_mode_mask)) {
if (*level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)
*level = smu_dpm_ctx->saved_dpm_level;
- smu_dpm_ctx->enable_umd_pstate = false;
amdgpu_asic_update_umd_stable_pstate(smu->adev, false);
smu_deep_sleep_control(smu, true);
smu_gfx_ulv_control(smu, true);
- amdgpu_device_ip_set_clockgating_state(smu->adev,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_CG_STATE_GATE);
- amdgpu_device_ip_set_powergating_state(smu->adev,
- AMD_IP_BLOCK_TYPE_GFX,
- AMD_PG_STATE_GATE);
smu_gpo_control(smu, true);
}
}
@@ -1778,22 +1705,18 @@ static int smu_adjust_power_state_dynamic(struct smu_context *smu,
static int smu_handle_task(struct smu_context *smu,
enum amd_dpm_forced_level level,
- enum amd_pp_task task_id,
- bool lock_needed)
+ enum amd_pp_task task_id)
{
int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- if (lock_needed)
- mutex_lock(&smu->mutex);
-
switch (task_id) {
case AMD_PP_TASK_DISPLAY_CONFIG_CHANGE:
ret = smu_pre_display_config_changed(smu);
if (ret)
- goto out;
+ return ret;
ret = smu_adjust_power_state_dynamic(smu, level, false);
break;
case AMD_PP_TASK_COMPLETE_INIT:
@@ -1804,10 +1727,6 @@ static int smu_handle_task(struct smu_context *smu,
break;
}
-out:
- if (lock_needed)
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -1818,7 +1737,7 @@ static int smu_handle_dpm_task(void *handle,
struct smu_context *smu = handle;
struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
- return smu_handle_task(smu, smu_dpm->dpm_level, task_id, true);
+ return smu_handle_task(smu, smu_dpm->dpm_level, task_id);
}
@@ -1837,8 +1756,6 @@ static int smu_switch_power_profile(void *handle,
if (!(type < PP_SMC_POWER_PROFILE_CUSTOM))
return -EINVAL;
- mutex_lock(&smu->mutex);
-
if (!en) {
smu->workload_mask &= ~(1 << smu->workload_prority[type]);
index = fls(smu->workload_mask);
@@ -1855,8 +1772,6 @@ static int smu_switch_power_profile(void *handle,
smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_PERF_DETERMINISM)
smu_bump_power_profile_mode(smu, &workload, 0);
- mutex_unlock(&smu->mutex);
-
return 0;
}
@@ -1864,7 +1779,6 @@ static enum amd_dpm_forced_level smu_get_performance_level(void *handle)
{
struct smu_context *smu = handle;
struct smu_dpm_context *smu_dpm_ctx = &(smu->smu_dpm);
- enum amd_dpm_forced_level level;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
@@ -1872,11 +1786,7 @@ static enum amd_dpm_forced_level smu_get_performance_level(void *handle)
if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
return -EINVAL;
- mutex_lock(&(smu->mutex));
- level = smu_dpm_ctx->dpm_level;
- mutex_unlock(&(smu->mutex));
-
- return level;
+ return smu_dpm_ctx->dpm_level;
}
static int smu_force_performance_level(void *handle,
@@ -1892,19 +1802,12 @@ static int smu_force_performance_level(void *handle,
if (!smu->is_apu && !smu_dpm_ctx->dpm_context)
return -EINVAL;
- mutex_lock(&smu->mutex);
-
ret = smu_enable_umd_pstate(smu, &level);
- if (ret) {
- mutex_unlock(&smu->mutex);
+ if (ret)
return ret;
- }
ret = smu_handle_task(smu, level,
- AMD_PP_TASK_READJUST_POWER_STATE,
- false);
-
- mutex_unlock(&smu->mutex);
+ AMD_PP_TASK_READJUST_POWER_STATE);
/* reset user dpm clock state */
if (!ret && smu_dpm_ctx->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL) {
@@ -1918,16 +1821,11 @@ static int smu_force_performance_level(void *handle,
static int smu_set_display_count(void *handle, uint32_t count)
{
struct smu_context *smu = handle;
- int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
- ret = smu_init_display_count(smu, count);
- mutex_unlock(&smu->mutex);
-
- return ret;
+ return smu_init_display_count(smu, count);
}
static int smu_force_smuclk_levels(struct smu_context *smu,
@@ -1945,8 +1843,6 @@ static int smu_force_smuclk_levels(struct smu_context *smu,
return -EINVAL;
}
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs && smu->ppt_funcs->force_clk_levels) {
ret = smu->ppt_funcs->force_clk_levels(smu, clk_type, mask);
if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
@@ -1955,8 +1851,6 @@ static int smu_force_smuclk_levels(struct smu_context *smu,
}
}
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2015,14 +1909,10 @@ static int smu_set_mp1_state(void *handle,
if (!smu->pm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs &&
smu->ppt_funcs->set_mp1_state)
ret = smu->ppt_funcs->set_mp1_state(smu, mp1_state);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2038,14 +1928,10 @@ static int smu_set_df_cstate(void *handle,
if (!smu->ppt_funcs || !smu->ppt_funcs->set_df_cstate)
return 0;
- mutex_lock(&smu->mutex);
-
ret = smu->ppt_funcs->set_df_cstate(smu, state);
if (ret)
dev_err(smu->adev->dev, "[SetDfCstate] failed!\n");
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2059,38 +1945,25 @@ int smu_allow_xgmi_power_down(struct smu_context *smu, bool en)
if (!smu->ppt_funcs || !smu->ppt_funcs->allow_xgmi_power_down)
return 0;
- mutex_lock(&smu->mutex);
-
ret = smu->ppt_funcs->allow_xgmi_power_down(smu, en);
if (ret)
dev_err(smu->adev->dev, "[AllowXgmiPowerDown] failed!\n");
- mutex_unlock(&smu->mutex);
-
return ret;
}
int smu_write_watermarks_table(struct smu_context *smu)
{
- int ret = 0;
-
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
- ret = smu_set_watermarks_table(smu, NULL);
-
- mutex_unlock(&smu->mutex);
-
- return ret;
+ return smu_set_watermarks_table(smu, NULL);
}
static int smu_set_watermarks_for_clock_ranges(void *handle,
struct pp_smu_wm_range_sets *clock_ranges)
{
struct smu_context *smu = handle;
- int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
@@ -2098,13 +1971,7 @@ static int smu_set_watermarks_for_clock_ranges(void *handle,
if (smu->disable_watermark)
return 0;
- mutex_lock(&smu->mutex);
-
- ret = smu_set_watermarks_table(smu, clock_ranges);
-
- mutex_unlock(&smu->mutex);
-
- return ret;
+ return smu_set_watermarks_table(smu, clock_ranges);
}
int smu_set_ac_dc(struct smu_context *smu)
@@ -2118,14 +1985,12 @@ int smu_set_ac_dc(struct smu_context *smu)
if (smu->dc_controlled_by_gpio)
return 0;
- mutex_lock(&smu->mutex);
ret = smu_set_power_source(smu,
smu->adev->pm.ac_power ? SMU_POWER_SOURCE_AC :
SMU_POWER_SOURCE_DC);
if (ret)
dev_err(smu->adev->dev, "Failed to switch to %s mode!\n",
smu->adev->pm.ac_power ? "AC" : "DC");
- mutex_unlock(&smu->mutex);
return ret;
}
@@ -2138,6 +2003,7 @@ const struct amd_ip_funcs smu_ip_funcs = {
.sw_fini = smu_sw_fini,
.hw_init = smu_hw_init,
.hw_fini = smu_hw_fini,
+ .late_fini = smu_late_fini,
.suspend = smu_suspend,
.resume = smu_resume,
.is_idle = NULL,
@@ -2146,7 +2012,6 @@ const struct amd_ip_funcs smu_ip_funcs = {
.soft_reset = NULL,
.set_clockgating_state = smu_set_clockgating_state,
.set_powergating_state = smu_set_powergating_state,
- .enable_umd_pstate = smu_enable_umd_pstate,
};
const struct amdgpu_ip_block_version smu_v11_0_ip_block =
@@ -2212,13 +2077,9 @@ static int smu_set_gfx_cgpg(struct smu_context *smu, bool enabled)
{
int ret = 0;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->set_gfx_cgpg)
ret = smu->ppt_funcs->set_gfx_cgpg(smu, enabled);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2230,21 +2091,21 @@ static int smu_set_fan_speed_rpm(void *handle, uint32_t speed)
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
+ if (!smu->ppt_funcs->set_fan_speed_rpm)
+ return -EOPNOTSUPP;
- if (smu->ppt_funcs->set_fan_speed_rpm) {
- ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
- if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
- smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM;
- smu->user_dpm_profile.fan_speed_rpm = speed;
+ if (speed == U32_MAX)
+ return -EINVAL;
- /* Override custom PWM setting as they cannot co-exist */
- smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_PWM;
- smu->user_dpm_profile.fan_speed_pwm = 0;
- }
- }
+ ret = smu->ppt_funcs->set_fan_speed_rpm(smu, speed);
+ if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
+ smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_RPM;
+ smu->user_dpm_profile.fan_speed_rpm = speed;
- mutex_unlock(&smu->mutex);
+ /* Override custom PWM setting as they cannot co-exist */
+ smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_PWM;
+ smu->user_dpm_profile.fan_speed_pwm = 0;
+ }
return ret;
}
@@ -2301,8 +2162,6 @@ int smu_get_power_limit(void *handle,
break;
}
- mutex_lock(&smu->mutex);
-
if (limit_type != SMU_DEFAULT_PPT_LIMIT) {
if (smu->ppt_funcs->get_ppt_limit)
ret = smu->ppt_funcs->get_ppt_limit(smu, limit, limit_type, limit_level);
@@ -2336,8 +2195,6 @@ int smu_get_power_limit(void *handle,
}
}
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2350,21 +2207,16 @@ static int smu_set_power_limit(void *handle, uint32_t limit)
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
limit &= (1<<24)-1;
if (limit_type != SMU_DEFAULT_PPT_LIMIT)
- if (smu->ppt_funcs->set_power_limit) {
- ret = smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
- goto out;
- }
+ if (smu->ppt_funcs->set_power_limit)
+ return smu->ppt_funcs->set_power_limit(smu, limit_type, limit);
if (limit > smu->max_power_limit) {
dev_err(smu->adev->dev,
"New power limit (%d) is over the max allowed %d\n",
limit, smu->max_power_limit);
- ret = -EINVAL;
- goto out;
+ return -EINVAL;
}
if (!limit)
@@ -2376,9 +2228,6 @@ static int smu_set_power_limit(void *handle, uint32_t limit)
smu->user_dpm_profile.power_limit = limit;
}
-out:
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2389,21 +2238,14 @@ static int smu_print_smuclk_levels(struct smu_context *smu, enum smu_clk_type cl
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->print_clk_levels)
ret = smu->ppt_funcs->print_clk_levels(smu, clk_type, buf);
- mutex_unlock(&smu->mutex);
-
return ret;
}
-static int smu_print_ppclk_levels(void *handle,
- enum pp_clock_type type,
- char *buf)
+static enum smu_clk_type smu_convert_to_smuclk(enum pp_clock_type type)
{
- struct smu_context *smu = handle;
enum smu_clk_type clk_type;
switch (type) {
@@ -2436,12 +2278,45 @@ static int smu_print_ppclk_levels(void *handle,
case OD_CCLK:
clk_type = SMU_OD_CCLK; break;
default:
- return -EINVAL;
+ clk_type = SMU_CLK_COUNT; break;
}
+ return clk_type;
+}
+
+static int smu_print_ppclk_levels(void *handle,
+ enum pp_clock_type type,
+ char *buf)
+{
+ struct smu_context *smu = handle;
+ enum smu_clk_type clk_type;
+
+ clk_type = smu_convert_to_smuclk(type);
+ if (clk_type == SMU_CLK_COUNT)
+ return -EINVAL;
+
return smu_print_smuclk_levels(smu, clk_type, buf);
}
+static int smu_emit_ppclk_levels(void *handle, enum pp_clock_type type, char *buf, int *offset)
+{
+ struct smu_context *smu = handle;
+ enum smu_clk_type clk_type;
+
+ clk_type = smu_convert_to_smuclk(type);
+ if (clk_type == SMU_CLK_COUNT)
+ return -EINVAL;
+
+ if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
+ return -EOPNOTSUPP;
+
+ if (!smu->ppt_funcs->emit_clk_levels)
+ return -ENOENT;
+
+ return smu->ppt_funcs->emit_clk_levels(smu, clk_type, buf, offset);
+
+}
+
static int smu_od_edit_dpm_table(void *handle,
enum PP_OD_DPM_TABLE_COMMAND type,
long *input, uint32_t size)
@@ -2452,14 +2327,10 @@ static int smu_od_edit_dpm_table(void *handle,
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->od_edit_dpm_table) {
ret = smu->ppt_funcs->od_edit_dpm_table(smu, type, input, size);
}
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2483,8 +2354,6 @@ static int smu_read_sensor(void *handle,
size_val = *size_arg;
size = &size_val;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->read_sensor)
if (!smu->ppt_funcs->read_sensor(smu, sensor, data, size))
goto unlock;
@@ -2499,7 +2368,7 @@ static int smu_read_sensor(void *handle,
*size = 4;
break;
case AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK:
- ret = smu_feature_get_enabled_mask(smu, (uint32_t *)data, 2);
+ ret = smu_feature_get_enabled_mask(smu, (uint64_t *)data);
*size = 8;
break;
case AMDGPU_PP_SENSOR_UVD_POWER:
@@ -2525,8 +2394,6 @@ static int smu_read_sensor(void *handle,
}
unlock:
- mutex_unlock(&smu->mutex);
-
// assign uint32_t to int
*size_arg = size_val;
@@ -2536,7 +2403,6 @@ unlock:
static int smu_get_power_profile_mode(void *handle, char *buf)
{
struct smu_context *smu = handle;
- int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
!smu->ppt_funcs->get_power_profile_mode)
@@ -2544,13 +2410,7 @@ static int smu_get_power_profile_mode(void *handle, char *buf)
if (!buf)
return -EINVAL;
- mutex_lock(&smu->mutex);
-
- ret = smu->ppt_funcs->get_power_profile_mode(smu, buf);
-
- mutex_unlock(&smu->mutex);
-
- return ret;
+ return smu->ppt_funcs->get_power_profile_mode(smu, buf);
}
static int smu_set_power_profile_mode(void *handle,
@@ -2558,76 +2418,66 @@ static int smu_set_power_profile_mode(void *handle,
uint32_t param_size)
{
struct smu_context *smu = handle;
- int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled ||
!smu->ppt_funcs->set_power_profile_mode)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
- smu_bump_power_profile_mode(smu, param, param_size);
-
- mutex_unlock(&smu->mutex);
-
- return ret;
+ return smu_bump_power_profile_mode(smu, param, param_size);
}
-static u32 smu_get_fan_control_mode(void *handle)
+static int smu_get_fan_control_mode(void *handle, u32 *fan_mode)
{
struct smu_context *smu = handle;
- u32 ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
- return AMD_FAN_CTRL_NONE;
+ return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
+ if (!smu->ppt_funcs->get_fan_control_mode)
+ return -EOPNOTSUPP;
- if (smu->ppt_funcs->get_fan_control_mode)
- ret = smu->ppt_funcs->get_fan_control_mode(smu);
+ if (!fan_mode)
+ return -EINVAL;
- mutex_unlock(&smu->mutex);
+ *fan_mode = smu->ppt_funcs->get_fan_control_mode(smu);
- return ret;
+ return 0;
}
-static int smu_set_fan_control_mode(struct smu_context *smu, int value)
+static int smu_set_fan_control_mode(void *handle, u32 value)
{
+ struct smu_context *smu = handle;
int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
- return -EOPNOTSUPP;
+ return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
+ if (!smu->ppt_funcs->set_fan_control_mode)
+ return -EOPNOTSUPP;
- if (smu->ppt_funcs->set_fan_control_mode) {
- ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
- if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE))
- smu->user_dpm_profile.fan_mode = value;
- }
+ if (value == U32_MAX)
+ return -EINVAL;
- mutex_unlock(&smu->mutex);
+ ret = smu->ppt_funcs->set_fan_control_mode(smu, value);
+ if (ret)
+ goto out;
- /* reset user dpm fan speed */
- if (!ret && value != AMD_FAN_CTRL_MANUAL &&
- !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
- smu->user_dpm_profile.fan_speed_pwm = 0;
- smu->user_dpm_profile.fan_speed_rpm = 0;
- smu->user_dpm_profile.flags &= ~(SMU_CUSTOM_FAN_SPEED_RPM | SMU_CUSTOM_FAN_SPEED_PWM);
+ if (!(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
+ smu->user_dpm_profile.fan_mode = value;
+
+ /* reset user dpm fan speed */
+ if (value != AMD_FAN_CTRL_MANUAL) {
+ smu->user_dpm_profile.fan_speed_pwm = 0;
+ smu->user_dpm_profile.fan_speed_rpm = 0;
+ smu->user_dpm_profile.flags &= ~(SMU_CUSTOM_FAN_SPEED_RPM | SMU_CUSTOM_FAN_SPEED_PWM);
+ }
}
+out:
return ret;
}
-static void smu_pp_set_fan_control_mode(void *handle, u32 value)
-{
- struct smu_context *smu = handle;
-
- smu_set_fan_control_mode(smu, value);
-}
-
-
static int smu_get_fan_speed_pwm(void *handle, u32 *speed)
{
struct smu_context *smu = handle;
@@ -2636,12 +2486,13 @@ static int smu_get_fan_speed_pwm(void *handle, u32 *speed)
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
+ if (!smu->ppt_funcs->get_fan_speed_pwm)
+ return -EOPNOTSUPP;
- if (smu->ppt_funcs->get_fan_speed_pwm)
- ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed);
+ if (!speed)
+ return -EINVAL;
- mutex_unlock(&smu->mutex);
+ ret = smu->ppt_funcs->get_fan_speed_pwm(smu, speed);
return ret;
}
@@ -2654,21 +2505,21 @@ static int smu_set_fan_speed_pwm(void *handle, u32 speed)
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
+ if (!smu->ppt_funcs->set_fan_speed_pwm)
+ return -EOPNOTSUPP;
- if (smu->ppt_funcs->set_fan_speed_pwm) {
- ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed);
- if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
- smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM;
- smu->user_dpm_profile.fan_speed_pwm = speed;
+ if (speed == U32_MAX)
+ return -EINVAL;
- /* Override custom RPM setting as they cannot co-exist */
- smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_RPM;
- smu->user_dpm_profile.fan_speed_rpm = 0;
- }
- }
+ ret = smu->ppt_funcs->set_fan_speed_pwm(smu, speed);
+ if (!ret && !(smu->user_dpm_profile.flags & SMU_DPM_USER_PROFILE_RESTORE)) {
+ smu->user_dpm_profile.flags |= SMU_CUSTOM_FAN_SPEED_PWM;
+ smu->user_dpm_profile.fan_speed_pwm = speed;
- mutex_unlock(&smu->mutex);
+ /* Override custom RPM setting as they cannot co-exist */
+ smu->user_dpm_profile.flags &= ~SMU_CUSTOM_FAN_SPEED_RPM;
+ smu->user_dpm_profile.fan_speed_rpm = 0;
+ }
return ret;
}
@@ -2681,12 +2532,13 @@ static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
+ if (!smu->ppt_funcs->get_fan_speed_rpm)
+ return -EOPNOTSUPP;
- if (smu->ppt_funcs->get_fan_speed_rpm)
- ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
+ if (!speed)
+ return -EINVAL;
- mutex_unlock(&smu->mutex);
+ ret = smu->ppt_funcs->get_fan_speed_rpm(smu, speed);
return ret;
}
@@ -2694,18 +2546,11 @@ static int smu_get_fan_speed_rpm(void *handle, uint32_t *speed)
static int smu_set_deep_sleep_dcefclk(void *handle, uint32_t clk)
{
struct smu_context *smu = handle;
- int ret = 0;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
- ret = smu_set_min_dcef_deep_sleep(smu, clk);
-
- mutex_unlock(&smu->mutex);
-
- return ret;
+ return smu_set_min_dcef_deep_sleep(smu, clk);
}
static int smu_get_clock_by_type_with_latency(void *handle,
@@ -2719,8 +2564,6 @@ static int smu_get_clock_by_type_with_latency(void *handle,
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->get_clock_by_type_with_latency) {
switch (type) {
case amd_pp_sys_clock:
@@ -2737,15 +2580,12 @@ static int smu_get_clock_by_type_with_latency(void *handle,
break;
default:
dev_err(smu->adev->dev, "Invalid clock type!\n");
- mutex_unlock(&smu->mutex);
return -EINVAL;
}
ret = smu->ppt_funcs->get_clock_by_type_with_latency(smu, clk_type, clocks);
}
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2758,13 +2598,9 @@ static int smu_display_clock_voltage_request(void *handle,
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->display_clock_voltage_request)
ret = smu->ppt_funcs->display_clock_voltage_request(smu, clock_req);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2778,13 +2614,9 @@ static int smu_display_disable_memory_clock_switch(void *handle,
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->display_disable_memory_clock_switch)
ret = smu->ppt_funcs->display_disable_memory_clock_switch(smu, disable_memory_clock_switch);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2797,13 +2629,9 @@ static int smu_set_xgmi_pstate(void *handle,
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->set_xgmi_pstate)
ret = smu->ppt_funcs->set_xgmi_pstate(smu, pstate);
- mutex_unlock(&smu->mutex);
-
if(ret)
dev_err(smu->adev->dev, "Failed to set XGMI pstate!\n");
@@ -2813,21 +2641,16 @@ static int smu_set_xgmi_pstate(void *handle,
static int smu_get_baco_capability(void *handle, bool *cap)
{
struct smu_context *smu = handle;
- int ret = 0;
*cap = false;
if (!smu->pm_enabled)
return 0;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs && smu->ppt_funcs->baco_is_support)
*cap = smu->ppt_funcs->baco_is_support(smu);
- mutex_unlock(&smu->mutex);
-
- return ret;
+ return 0;
}
static int smu_baco_set_state(void *handle, int state)
@@ -2839,20 +2662,11 @@ static int smu_baco_set_state(void *handle, int state)
return -EOPNOTSUPP;
if (state == 0) {
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->baco_exit)
ret = smu->ppt_funcs->baco_exit(smu);
-
- mutex_unlock(&smu->mutex);
} else if (state == 1) {
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->baco_enter)
ret = smu->ppt_funcs->baco_enter(smu);
-
- mutex_unlock(&smu->mutex);
-
} else {
return -EINVAL;
}
@@ -2871,13 +2685,9 @@ bool smu_mode1_reset_is_support(struct smu_context *smu)
if (!smu->pm_enabled)
return false;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs && smu->ppt_funcs->mode1_reset_is_support)
ret = smu->ppt_funcs->mode1_reset_is_support(smu);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2888,13 +2698,9 @@ bool smu_mode2_reset_is_support(struct smu_context *smu)
if (!smu->pm_enabled)
return false;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs && smu->ppt_funcs->mode2_reset_is_support)
ret = smu->ppt_funcs->mode2_reset_is_support(smu);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2905,13 +2711,9 @@ int smu_mode1_reset(struct smu_context *smu)
if (!smu->pm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->mode1_reset)
ret = smu->ppt_funcs->mode1_reset(smu);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2923,13 +2725,9 @@ static int smu_mode2_reset(void *handle)
if (!smu->pm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->mode2_reset)
ret = smu->ppt_funcs->mode2_reset(smu);
- mutex_unlock(&smu->mutex);
-
if (ret)
dev_err(smu->adev->dev, "Mode2 reset failed!\n");
@@ -2945,13 +2743,9 @@ static int smu_get_max_sustainable_clocks_by_dc(void *handle,
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->get_max_sustainable_clocks_by_dc)
ret = smu->ppt_funcs->get_max_sustainable_clocks_by_dc(smu, max_clocks);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2965,13 +2759,9 @@ static int smu_get_uclk_dpm_states(void *handle,
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->get_uclk_dpm_states)
ret = smu->ppt_funcs->get_uclk_dpm_states(smu, clock_values_in_khz, num_states);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -2983,13 +2773,9 @@ static enum amd_pm_state_type smu_get_current_power_state(void *handle)
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->get_current_power_state)
pm_state = smu->ppt_funcs->get_current_power_state(smu);
- mutex_unlock(&smu->mutex);
-
return pm_state;
}
@@ -3002,20 +2788,15 @@ static int smu_get_dpm_clock_table(void *handle,
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->get_dpm_clock_table)
ret = smu->ppt_funcs->get_dpm_clock_table(smu, clock_table);
- mutex_unlock(&smu->mutex);
-
return ret;
}
static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
{
struct smu_context *smu = handle;
- ssize_t size;
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
@@ -3023,13 +2804,7 @@ static ssize_t smu_sys_get_gpu_metrics(void *handle, void **table)
if (!smu->ppt_funcs->get_gpu_metrics)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
- size = smu->ppt_funcs->get_gpu_metrics(smu, table);
-
- mutex_unlock(&smu->mutex);
-
- return size;
+ return smu->ppt_funcs->get_gpu_metrics(smu, table);
}
static int smu_enable_mgpu_fan_boost(void *handle)
@@ -3040,13 +2815,9 @@ static int smu_enable_mgpu_fan_boost(void *handle)
if (!smu->pm_enabled || !smu->adev->pm.dpm_enabled)
return -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
-
if (smu->ppt_funcs->enable_mgpu_fan_boost)
ret = smu->ppt_funcs->enable_mgpu_fan_boost(smu);
- mutex_unlock(&smu->mutex);
-
return ret;
}
@@ -3056,10 +2827,8 @@ static int smu_gfx_state_change_set(void *handle,
struct smu_context *smu = handle;
int ret = 0;
- mutex_lock(&smu->mutex);
if (smu->ppt_funcs->gfx_state_change_set)
ret = smu->ppt_funcs->gfx_state_change_set(smu, state);
- mutex_unlock(&smu->mutex);
return ret;
}
@@ -3068,10 +2837,8 @@ int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable)
{
int ret = 0;
- mutex_lock(&smu->mutex);
if (smu->ppt_funcs->smu_handle_passthrough_sbr)
ret = smu->ppt_funcs->smu_handle_passthrough_sbr(smu, enable);
- mutex_unlock(&smu->mutex);
return ret;
}
@@ -3080,11 +2847,9 @@ int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc)
{
int ret = -EOPNOTSUPP;
- mutex_lock(&smu->mutex);
if (smu->ppt_funcs &&
smu->ppt_funcs->get_ecc_info)
ret = smu->ppt_funcs->get_ecc_info(smu, umc_ecc);
- mutex_unlock(&smu->mutex);
return ret;
@@ -3101,24 +2866,23 @@ static int smu_get_prv_buffer_details(void *handle, void **addr, size_t *size)
*addr = NULL;
*size = 0;
- mutex_lock(&smu->mutex);
if (memory_pool->bo) {
*addr = memory_pool->cpu_addr;
*size = memory_pool->size;
}
- mutex_unlock(&smu->mutex);
return 0;
}
static const struct amd_pm_funcs swsmu_pm_funcs = {
/* export for sysfs */
- .set_fan_control_mode = smu_pp_set_fan_control_mode,
+ .set_fan_control_mode = smu_set_fan_control_mode,
.get_fan_control_mode = smu_get_fan_control_mode,
.set_fan_speed_pwm = smu_set_fan_speed_pwm,
.get_fan_speed_pwm = smu_get_fan_speed_pwm,
.force_clock_level = smu_force_ppclk_levels,
.print_clock_levels = smu_print_ppclk_levels,
+ .emit_clock_levels = smu_emit_ppclk_levels,
.force_performance_level = smu_force_performance_level,
.read_sensor = smu_read_sensor,
.get_performance_level = smu_get_performance_level,
@@ -3165,17 +2929,13 @@ static const struct amd_pm_funcs swsmu_pm_funcs = {
.get_smu_prv_buf_details = smu_get_prv_buffer_details,
};
-int smu_wait_for_event(struct amdgpu_device *adev, enum smu_event_type event,
+int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
uint64_t event_arg)
{
int ret = -EINVAL;
- struct smu_context *smu = &adev->smu;
- if (smu->ppt_funcs->wait_for_event) {
- mutex_lock(&smu->mutex);
+ if (smu->ppt_funcs->wait_for_event)
ret = smu->ppt_funcs->wait_for_event(smu, event, event_arg);
- mutex_unlock(&smu->mutex);
- }
return ret;
}
@@ -3203,7 +2963,7 @@ int smu_stb_collect_info(struct smu_context *smu, void *buf, uint32_t size)
static int smu_stb_debugfs_open(struct inode *inode, struct file *filp)
{
struct amdgpu_device *adev = filp->f_inode->i_private;
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
unsigned char *buf;
int r;
@@ -3228,7 +2988,7 @@ static ssize_t smu_stb_debugfs_read(struct file *filp, char __user *buf, size_t
loff_t *pos)
{
struct amdgpu_device *adev = filp->f_inode->i_private;
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
if (!filp->private_data)
@@ -3269,7 +3029,7 @@ void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
if (!smu->stb_context.stb_buf_size)
return;
@@ -3281,5 +3041,24 @@ void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev)
&smu_stb_debugfs_fops,
smu->stb_context.stb_buf_size);
#endif
+}
+
+int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size)
+{
+ int ret = 0;
+
+ if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_pages_num)
+ ret = smu->ppt_funcs->send_hbm_bad_pages_num(smu, size);
+ return ret;
+}
+
+int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size)
+{
+ int ret = 0;
+
+ if (smu->ppt_funcs && smu->ppt_funcs->send_hbm_bad_channel_flag)
+ ret = smu->ppt_funcs->send_hbm_bad_channel_flag(smu, size);
+
+ return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
index ba7565bc8104..ef57b6089c69 100644
--- a/drivers/gpu/drm/amd/pm/inc/amdgpu_smu.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/amdgpu_smu.h
@@ -241,11 +241,6 @@ struct smu_user_dpm_profile {
uint32_t clk_dependency;
};
-enum smu_event_type {
-
- SMU_EVENT_RESET_COMPLETE = 0,
-};
-
#define SMU_TABLE_INIT(tables, table_id, s, a, d) \
do { \
tables[table_id].size = s; \
@@ -342,6 +337,7 @@ struct smu_table_context
struct smu_bios_boot_up_values boot_values;
void *driver_pptable;
void *ecc_table;
+ void *driver_smu_config_table;
struct smu_table tables[SMU_TABLE_COUNT];
/*
* The driver table is just a staging buffer for
@@ -368,7 +364,6 @@ struct smu_dpm_context {
uint32_t dpm_context_size;
void *dpm_context;
void *golden_dpm_context;
- bool enable_umd_pstate;
enum amd_dpm_forced_level dpm_level;
enum amd_dpm_forced_level saved_dpm_level;
enum amd_dpm_forced_level requested_dpm_level;
@@ -382,8 +377,6 @@ struct smu_power_gate {
bool vce_gated;
atomic_t vcn_gated;
atomic_t jpeg_gated;
- struct mutex vcn_gate_lock;
- struct mutex jpeg_gate_lock;
};
struct smu_power_context {
@@ -398,8 +391,6 @@ struct smu_feature
uint32_t feature_num;
DECLARE_BITMAP(supported, SMU_FEATURE_MAX);
DECLARE_BITMAP(allowed, SMU_FEATURE_MAX);
- DECLARE_BITMAP(enabled, SMU_FEATURE_MAX);
- struct mutex mutex;
};
struct smu_clocks {
@@ -436,7 +427,6 @@ enum smu_baco_state
struct smu_baco_context
{
- struct mutex mutex;
uint32_t state;
bool platform_support;
};
@@ -494,9 +484,6 @@ struct smu_context
const struct cmn2asic_mapping *table_map;
const struct cmn2asic_mapping *pwr_src_map;
const struct cmn2asic_mapping *workload_map;
- struct mutex mutex;
- struct mutex sensor_lock;
- struct mutex metrics_lock;
struct mutex message_lock;
uint64_t pool_size;
@@ -618,10 +605,24 @@ struct pptable_funcs {
* to buffer. Star current level.
*
* Used for sysfs interfaces.
+ * Return: Number of characters written to the buffer
*/
int (*print_clk_levels)(struct smu_context *smu, enum smu_clk_type clk_type, char *buf);
/**
+ * @emit_clk_levels: Print DPM clock levels for a clock domain
+ * to buffer using sysfs_emit_at. Star current level.
+ *
+ * Used for sysfs interfaces.
+ * &buf: sysfs buffer
+ * &offset: offset within buffer to start printing, which is updated by the
+ * function.
+ *
+ * Return: 0 on Success or Negative to indicate an error occurred.
+ */
+ int (*emit_clk_levels)(struct smu_context *smu, enum smu_clk_type clk_type, char *buf, int *offset);
+
+ /**
* @force_clk_levels: Set a range of allowed DPM levels for a clock
* domain.
* &clk_type: Clock domain.
@@ -829,12 +830,12 @@ struct pptable_funcs {
* other devices. The i2c's EEPROM also stores bad page tables on boards
* with ECC.
*/
- int (*i2c_init)(struct smu_context *smu, struct i2c_adapter *control);
+ int (*i2c_init)(struct smu_context *smu);
/**
* @i2c_fini: Tear down i2c.
*/
- void (*i2c_fini)(struct smu_context *smu, struct i2c_adapter *control);
+ void (*i2c_fini)(struct smu_context *smu);
/**
* @get_unique_id: Get the GPU's unique id. Used for asset tracking.
@@ -988,10 +989,9 @@ struct pptable_funcs {
/**
* @get_enabled_mask: Get a mask of features that are currently enabled
* on the SMU.
- * &feature_mask: Array representing enabled feature mask.
- * &num: Elements in &feature_mask.
+ * &feature_mask: Enabled feature mask.
*/
- int (*get_enabled_mask)(struct smu_context *smu, uint32_t *feature_mask, uint32_t num);
+ int (*get_enabled_mask)(struct smu_context *smu, uint64_t *feature_mask);
/**
* @feature_is_enabled: Test if a feature is enabled.
@@ -1005,7 +1005,6 @@ struct pptable_funcs {
* exception to those in &mask.
*/
int (*disable_all_features_with_exception)(struct smu_context *smu,
- bool no_hw_disablement,
enum smu_feature_mask mask);
/**
@@ -1283,6 +1282,22 @@ struct pptable_funcs {
* @stb_collect_info: Collects Smart Trace Buffers data.
*/
int (*stb_collect_info)(struct smu_context *smu, void *buf, uint32_t size);
+
+ /**
+ * @get_default_config_table_settings: Get the ASIC default DriverSmuConfig table settings.
+ */
+ int (*get_default_config_table_settings)(struct smu_context *smu, struct config_table_setting *table);
+
+ /**
+ * @set_config_table: Apply the input DriverSmuConfig table settings.
+ */
+ int (*set_config_table)(struct smu_context *smu, struct config_table_setting *table);
+
+ /**
+ * @sned_hbm_bad_channel_flag: message SMU to update bad channel info
+ * of SMUBUS table.
+ */
+ int (*send_hbm_bad_channel_flag)(struct smu_context *smu, uint32_t size);
};
typedef enum {
@@ -1395,10 +1410,6 @@ int smu_mode1_reset(struct smu_context *smu);
extern const struct amd_ip_funcs smu_ip_funcs;
-extern const struct amdgpu_ip_block_version smu_v11_0_ip_block;
-extern const struct amdgpu_ip_block_version smu_v12_0_ip_block;
-extern const struct amdgpu_ip_block_version smu_v13_0_ip_block;
-
bool is_support_sw_smu(struct amdgpu_device *adev);
bool is_support_cclk_dpm(struct amdgpu_device *adev);
int smu_write_watermarks_table(struct smu_context *smu);
@@ -1413,15 +1424,16 @@ int smu_set_ac_dc(struct smu_context *smu);
int smu_allow_xgmi_power_down(struct smu_context *smu, bool en);
-int smu_get_status_gfxoff(struct amdgpu_device *adev, uint32_t *value);
+int smu_get_status_gfxoff(struct smu_context *smu, uint32_t *value);
int smu_handle_passthrough_sbr(struct smu_context *smu, bool enable);
-int smu_wait_for_event(struct amdgpu_device *adev, enum smu_event_type event,
+int smu_wait_for_event(struct smu_context *smu, enum smu_event_type event,
uint64_t event_arg);
int smu_get_ecc_info(struct smu_context *smu, void *umc_ecc);
int smu_stb_collect_info(struct smu_context *smu, void *buff, uint32_t size);
void amdgpu_smu_stb_debug_fs_init(struct amdgpu_device *adev);
-
+int smu_send_hbm_bad_pages_num(struct smu_context *smu, uint32_t size);
+int smu_send_hbm_bad_channel_flag(struct smu_context *smu, uint32_t size);
#endif
#endif
diff --git a/drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/aldebaran_ppsmc.h
index ab66a4b9e438..0f498baf6838 100644
--- a/drivers/gpu/drm/amd/pm/inc/aldebaran_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/aldebaran_ppsmc.h
@@ -103,7 +103,8 @@
#define PPSMC_MSG_GfxDriverResetRecovery 0x42
#define PPSMC_MSG_BoardPowerCalibration 0x43
#define PPSMC_MSG_HeavySBR 0x45
-#define PPSMC_Message_Count 0x46
+#define PPSMC_MSG_SetBadHBMPagesRetiredFlagsPerChannel 0x46
+#define PPSMC_Message_Count 0x47
//PPSMC Reset Types
diff --git a/drivers/gpu/drm/amd/pm/inc/arcturus_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/arcturus_ppsmc.h
index 45f5d29bc705..45f5d29bc705 100644
--- a/drivers/gpu/drm/amd/pm/inc/arcturus_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/arcturus_ppsmc.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_arcturus.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_arcturus.h
index 43d43d6addc0..43d43d6addc0 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_arcturus.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_arcturus.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_cyan_skillfish.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_cyan_skillfish.h
index 4884a4e1f261..4884a4e1f261 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_cyan_skillfish.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_cyan_skillfish.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_navi10.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_navi10.h
index 04752ade1016..04752ade1016 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_navi10.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_navi10.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_sienna_cichlid.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_sienna_cichlid.h
index 63b8701fd466..3e4a314ef925 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_sienna_cichlid.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_sienna_cichlid.h
@@ -27,7 +27,7 @@
// *** IMPORTANT ***
// SMU TEAM: Always increment the interface version if
// any structure is changed in this file
-#define SMU11_DRIVER_IF_VERSION 0x3B
+#define SMU11_DRIVER_IF_VERSION 0x40
#define PPTABLE_Sienna_Cichlid_SMU_VERSION 7
@@ -172,6 +172,7 @@ typedef enum {
#define DPM_OVERRIDE_DISABLE_FAST_FCLK_TIMER 0x00001000
#define DPM_OVERRIDE_DISABLE_VCN_PG 0x00002000
#define DPM_OVERRIDE_DISABLE_FMAX_VMAX 0x00004000
+#define DPM_OVERRIDE_ENABLE_eGPU_USB_WA 0x00008000
// VR Mapping Bit Defines
#define VR_MAPPING_VR_SELECT_MASK 0x01
@@ -263,7 +264,22 @@ typedef enum {
#define LED_DISPLAY_ERROR_BIT 2
//RLC Pace Table total number of levels
-#define RLC_PACE_TABLE_NUM_LEVELS 16
+#define RLC_PACE_TABLE_NUM_LEVELS 16
+#define SIENNA_CICHLID_UMC_CHANNEL_NUM 16
+
+typedef struct {
+ uint64_t mca_umc_status;
+ uint64_t mca_umc_addr;
+
+ uint16_t ce_count_lo_chip;
+ uint16_t ce_count_hi_chip;
+
+ uint32_t eccPadding;
+} EccInfo_t;
+
+typedef struct {
+ EccInfo_t EccInfo[SIENNA_CICHLID_UMC_CHANNEL_NUM];
+} EccInfoTable_t;
typedef enum {
DRAM_BIT_WIDTH_DISABLED = 0,
@@ -283,6 +299,7 @@ typedef enum {
#define MAX_SW_I2C_COMMANDS 24
+
typedef enum {
I2C_CONTROLLER_PORT_0 = 0, //CKSVII2C0
I2C_CONTROLLER_PORT_1 = 1, //CKSVII2C1
@@ -1464,9 +1481,67 @@ typedef struct {
} SmuMetrics_V2_t;
typedef struct {
+ uint32_t CurrClock[PPCLK_COUNT];
+
+ uint16_t AverageGfxclkFrequencyPreDs;
+ uint16_t AverageGfxclkFrequencyPostDs;
+ uint16_t AverageFclkFrequencyPreDs;
+ uint16_t AverageFclkFrequencyPostDs;
+ uint16_t AverageUclkFrequencyPreDs;
+ uint16_t AverageUclkFrequencyPostDs;
+
+
+ uint16_t AverageGfxActivity;
+ uint16_t AverageUclkActivity;
+ uint8_t CurrSocVoltageOffset;
+ uint8_t CurrGfxVoltageOffset;
+ uint8_t CurrMemVidOffset;
+ uint8_t Padding8;
+ uint16_t AverageSocketPower;
+ uint16_t TemperatureEdge;
+ uint16_t TemperatureHotspot;
+ uint16_t TemperatureMem;
+ uint16_t TemperatureVrGfx;
+ uint16_t TemperatureVrMem0;
+ uint16_t TemperatureVrMem1;
+ uint16_t TemperatureVrSoc;
+ uint16_t TemperatureLiquid0;
+ uint16_t TemperatureLiquid1;
+ uint16_t TemperaturePlx;
+ uint16_t Padding16;
+ uint32_t AccCnt;
+ uint8_t ThrottlingPercentage[THROTTLER_COUNT];
+
+
+ uint8_t LinkDpmLevel;
+ uint8_t CurrFanPwm;
+ uint16_t CurrFanSpeed;
+
+ //BACO metrics, PMFW-1721
+ //metrics for D3hot entry/exit and driver ARM msgs
+ uint8_t D3HotEntryCountPerMode[D3HOT_SEQUENCE_COUNT];
+ uint8_t D3HotExitCountPerMode[D3HOT_SEQUENCE_COUNT];
+ uint8_t ArmMsgReceivedCountPerMode[D3HOT_SEQUENCE_COUNT];
+
+ //PMFW-4362
+ uint32_t EnergyAccumulator;
+ uint16_t AverageVclk0Frequency;
+ uint16_t AverageDclk0Frequency;
+ uint16_t AverageVclk1Frequency;
+ uint16_t AverageDclk1Frequency;
+ uint16_t VcnUsagePercentage0;
+ uint16_t VcnUsagePercentage1;
+ uint8_t PcieRate;
+ uint8_t PcieWidth;
+ uint16_t AverageGfxclkFrequencyTarget;
+
+} SmuMetrics_V3_t;
+
+typedef struct {
union {
SmuMetrics_t SmuMetrics;
SmuMetrics_V2_t SmuMetrics_V2;
+ SmuMetrics_V3_t SmuMetrics_V3;
};
uint32_t Spare[1];
@@ -1672,7 +1747,8 @@ typedef struct {
#define TABLE_OVERDRIVE 8
#define TABLE_I2C_COMMANDS 9
#define TABLE_PACE 10
-#define TABLE_COUNT 11
+#define TABLE_ECCINFO 11
+#define TABLE_COUNT 12
typedef struct {
float FlopsPerByteTable[RLC_PACE_TABLE_NUM_LEVELS];
diff --git a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_vangogh.h
index 8361ebd8d876..8361ebd8d876 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu11_driver_if_vangogh.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu11_driver_if_vangogh.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu12_driver_if.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu12_driver_if.h
index e9315eb5b48e..e9315eb5b48e 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu12_driver_if.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu12_driver_if.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_aldebaran.h
index 0f67c56c2863..0f67c56c2863 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_aldebaran.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_aldebaran.h
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_5.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_5.h
new file mode 100644
index 000000000000..aa971412b434
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_v13_0_5.h
@@ -0,0 +1,140 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __SMU13_DRIVER_IF_V13_0_5_H__
+#define __SMU13_DRIVER_IF_V13_0_5_H__
+
+#define PMFW_DRIVER_IF_VERSION 4
+
+// Throttler Status Bitmask
+#define THROTTLER_STATUS_BIT_SPL 0
+#define THROTTLER_STATUS_BIT_FPPT 1
+#define THROTTLER_STATUS_BIT_SPPT 2
+#define THROTTLER_STATUS_BIT_SPPT_APU 3
+#define THROTTLER_STATUS_BIT_THM_CORE 4
+#define THROTTLER_STATUS_BIT_THM_GFX 5
+#define THROTTLER_STATUS_BIT_THM_SOC 6
+#define THROTTLER_STATUS_BIT_TDC_VDD 7
+#define THROTTLER_STATUS_BIT_TDC_SOC 8
+#define THROTTLER_STATUS_BIT_PROCHOT_CPU 9
+#define THROTTLER_STATUS_BIT_PROCHOT_GFX 10
+#define THROTTLER_STATUS_BIT_EDC_CPU 11
+#define THROTTLER_STATUS_BIT_EDC_GFX 12
+
+#define NUM_DCFCLK_DPM_LEVELS 4
+#define NUM_DISPCLK_DPM_LEVELS 4
+#define NUM_DPPCLK_DPM_LEVELS 4
+#define NUM_SOCCLK_DPM_LEVELS 4
+#define NUM_VCN_DPM_LEVELS 4
+#define NUM_SOC_VOLTAGE_LEVELS 4
+#define NUM_DF_PSTATE_LEVELS 4
+
+typedef struct {
+ uint16_t MinClock; // This is either DCFCLK or SOCCLK (in MHz)
+ uint16_t MaxClock; // This is either DCFCLK or SOCCLK (in MHz)
+ uint16_t MinMclk;
+ uint16_t MaxMclk;
+ uint8_t WmSetting;
+ uint8_t WmType; // Used for normal pstate change or memory retraining
+ uint8_t Padding[2];
+} WatermarkRowGeneric_t;
+
+#define NUM_WM_RANGES 4
+#define WM_PSTATE_CHG 0
+#define WM_RETRAINING 1
+
+typedef enum {
+ WM_SOCCLK = 0,
+ WM_DCFCLK,
+ WM_COUNT,
+} WM_CLOCK_e;
+
+typedef struct {
+ // Watermarks
+ WatermarkRowGeneric_t WatermarkRow[WM_COUNT][NUM_WM_RANGES];
+ uint32_t MmHubPadding[7]; // SMU internal use
+} Watermarks_t;
+
+typedef struct {
+ uint32_t FClk;
+ uint32_t MemClk;
+ uint32_t Voltage;
+} DfPstateTable_t;
+
+typedef struct {
+ uint16_t GfxclkFrequency; //[MHz]
+ uint16_t SocclkFrequency; //[MHz]
+
+ uint16_t VclkFrequency; //[MHz]
+ uint16_t DclkFrequency; //[MHz]
+
+ uint16_t MemclkFrequency; //[MHz]
+ uint16_t spare;
+
+ uint16_t GfxActivity; //[centi]
+ uint16_t UvdActivity; //[centi]
+
+ uint16_t Voltage[2]; //[mV] indices: VDDCR_VDD, VDDCR_SOC
+ uint16_t Current[2]; //[mA] indices: VDDCR_VDD, VDDCR_SOC
+
+ uint16_t Power[2]; //[mW] indices: VDDCR_VDD, VDDCR_SOC
+ uint16_t GfxTemperature; //[centi-Celsius]
+
+ uint16_t SocTemperature; //[centi-Celsius]
+ uint16_t ThrottlerStatus;
+
+ uint16_t CurrentSocketPower; //[mW]
+ uint16_t spare1;
+} SmuMetrics_t;
+
+//Freq in MHz
+//Voltage in milli volts with 2 fractional bits
+typedef struct {
+ uint32_t DcfClocks[NUM_DCFCLK_DPM_LEVELS];
+ uint32_t DispClocks[NUM_DISPCLK_DPM_LEVELS];
+ uint32_t DppClocks[NUM_DPPCLK_DPM_LEVELS];
+ uint32_t SocClocks[NUM_SOCCLK_DPM_LEVELS];
+ uint32_t VClocks[NUM_VCN_DPM_LEVELS];
+ uint32_t DClocks[NUM_VCN_DPM_LEVELS];
+ uint32_t SocVoltage[NUM_SOC_VOLTAGE_LEVELS];
+ DfPstateTable_t DfPstateTable[NUM_DF_PSTATE_LEVELS];
+ uint8_t NumDcfClkLevelsEnabled;
+ uint8_t NumDispClkLevelsEnabled; //Applies to both Dispclk and Dppclk
+ uint8_t NumSocClkLevelsEnabled;
+ uint8_t VcnClkLevelsEnabled; //Applies to both Vclk and Dclk
+ uint8_t NumDfPstatesEnabled;
+ uint8_t spare[3];
+ uint32_t MinGfxClk;
+ uint32_t MaxGfxClk;
+} DpmClocks_t;
+
+#define TABLE_BIOS_IF 0 // Called by BIOS
+#define TABLE_WATERMARKS 1 // Called by DAL through VBIOS
+#define TABLE_CUSTOM_DPM 2 // Called by Driver
+#define TABLE_SPARE1 3
+#define TABLE_DPMCLOCKS 4 // Called by Driver and VBIOS
+#define TABLE_MOMENTARY_PM 5 // Called by Tools
+#define TABLE_MODERN_STDBY 6 // Called by Tools for Modern Standby Log
+#define TABLE_SMU_METRICS 7 // Called by Driver
+#define TABLE_COUNT 8
+
+#endif
diff --git a/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_yellow_carp.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_yellow_carp.h
index 25540cb28208..25540cb28208 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu13_driver_if_yellow_carp.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu13_driver_if_yellow_carp.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0_7_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_0_7_ppsmc.h
index d2e10a724560..d2e10a724560 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0_7_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_0_7_ppsmc.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_0_ppsmc.h
index 26181b679098..26181b679098 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_0_ppsmc.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_5_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_5_pmfw.h
index 22edd88b8117..22edd88b8117 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_5_pmfw.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_5_pmfw.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_5_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_5_ppsmc.h
index fe130a497d6c..fe130a497d6c 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_5_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_5_ppsmc.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_8_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_8_pmfw.h
index bd4fcb6b9610..bd4fcb6b9610 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_8_pmfw.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_8_pmfw.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_8_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_8_ppsmc.h
index 909a86aa60f3..909a86aa60f3 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_8_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v11_8_ppsmc.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v12_0_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v12_0_ppsmc.h
index 9ac9f3bd3664..9ac9f3bd3664 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v12_0_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v12_0_ppsmc.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_1_pmfw.h
index c5e26d619bf0..c5e26d619bf0 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_pmfw.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_1_pmfw.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_1_ppsmc.h
index fc9198846e70..fc9198846e70 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_1_ppsmc.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_1_ppsmc.h
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_5_pmfw.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_5_pmfw.h
new file mode 100644
index 000000000000..1e4999d22a7c
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_5_pmfw.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __SMU_V13_0_5_PMFW_H__
+#define __SMU_V13_0_5_PMFW_H__
+
+#include "smu13_driver_if_v13_0_5.h"
+
+#pragma pack(push, 1)
+
+#define ENABLE_DEBUG_FEATURES
+
+// Firmware features
+// Feature Control Defines
+#define FEATURE_DATA_CALCULATION_BIT 0
+#define FEATURE_PPT_BIT 1
+#define FEATURE_TDC_BIT 2
+#define FEATURE_THERMAL_BIT 3
+#define FEATURE_FIT_BIT 4
+#define FEATURE_EDC_BIT 5
+#define FEATURE_CSTATE_BOOST_BIT 6
+#define FEATURE_PROCHOT_BIT 7
+#define FEATURE_CCLK_DPM_BIT 8
+#define FEATURE_FCLK_DPM_BIT 9
+#define FEATURE_LCLK_DPM_BIT 10
+#define FEATURE_PSI7_BIT 11
+#define FEATURE_DLDO_BIT 12
+#define FEATURE_SOCCLK_DEEP_SLEEP_BIT 13
+#define FEATURE_LCLK_DEEP_SLEEP_BIT 14
+#define FEATURE_SHUBCLK_DEEP_SLEEP_BIT 15
+#define FEATURE_DVO_BIT 16
+#define FEATURE_CC6_BIT 17
+#define FEATURE_PC6_BIT 18
+#define FEATURE_DF_CSTATES_BIT 19
+#define FEATURE_CLOCK_GATING_BIT 20
+#define FEATURE_FAN_CONTROLLER_BIT 21
+#define FEATURE_CPPC_BIT 22
+#define FEATURE_DLDO_DROPOUT_LIMITER_BIT 23
+#define FEATURE_CPPC_PREFERRED_CORES_BIT 24
+#define FEATURE_GMI_FOLDING_BIT 25
+#define FEATURE_GMI_DLWM_BIT 26
+#define FEATURE_XGMI_DLWM_BIT 27
+#define FEATURE_DF_LIGHT_CSTATE_BIT 28
+#define FEATURE_SMNCLK_DEEP_SLEEP_BIT 29
+#define FEATURE_PCIE_SPEED_CONTROLLER_BIT 30
+#define FEATURE_GFX_DPM_BIT 31
+#define FEATURE_DS_GFXCLK_BIT 32
+#define FEATURE_PCC_BIT 33
+#define FEATURE_spare0_BIT 34
+#define FEATURE_S0I3_BIT 35
+#define FEATURE_VCN_DPM_BIT 36
+#define FEATURE_DS_VCN_BIT 37
+#define FEATURE_MPDMA_TF_CLK_DEEP_SLEEP_BIT 38
+#define FEATURE_MPDMA_PM_CLK_DEEP_SLEEP_BIT 39
+#define FEATURE_VDDOFF_BIT 40
+#define FEATURE_DCFCLK_DPM_BIT 41
+#define FEATURE_DCFCLK_DEEP_SLEEP_BIT 42
+#define FEATURE_ATHUB_PG_BIT 43
+#define FEATURE_SOCCLK_DPM_BIT 44
+#define FEATURE_SHUBCLK_DPM_BIT 45
+#define FEATURE_MP0CLK_DPM_BIT 46
+#define FEATURE_MP0CLK_DEEP_SLEEP_BIT 47
+#define FEATURE_PERCCXPC6_BIT 48
+#define FEATURE_GFXOFF_BIT 49
+#define NUM_FEATURES 50
+
+typedef struct {
+ // MP1_EXT_SCRATCH0
+ uint32_t CurrLevel_ACP : 4;
+ uint32_t CurrLevel_ISP : 4;
+ uint32_t CurrLevel_VCN : 4;
+ uint32_t CurrLevel_LCLK : 4;
+ uint32_t CurrLevel_MP0CLK : 4;
+ uint32_t CurrLevel_FCLK : 4;
+ uint32_t CurrLevel_SOCCLK : 4;
+ uint32_t CurrLevel_DCFCLK : 4;
+ // MP1_EXT_SCRATCH1
+ uint32_t TargLevel_ACP : 4;
+ uint32_t TargLevel_ISP : 4;
+ uint32_t TargLevel_VCN : 4;
+ uint32_t TargLevel_LCLK : 4;
+ uint32_t TargLevel_MP0CLK : 4;
+ uint32_t TargLevel_FCLK : 4;
+ uint32_t TargLevel_SOCCLK : 4;
+ uint32_t TargLevel_DCFCLK : 4;
+ // MP1_EXT_SCRATCH2
+ uint32_t CurrLevel_SHUBCLK : 4;
+ uint32_t TargLevel_SHUBCLK : 4;
+ uint32_t InUlv : 1;
+ uint32_t InS0i2 : 1;
+ uint32_t InWhisperMode : 1;
+ uint32_t GfxOn : 1;
+ uint32_t RsmuCalBusyDpmIndex: 8;
+ uint32_t DpmHandlerId : 8;
+ uint32_t DpmTimerId : 4;
+ // MP1_EXT_SCRATCH3
+ uint32_t ReadWriteSmnRegAddr: 32;
+ // MP1_EXT_SCRATCH4
+ uint32_t Reserved1;
+ // MP1_EXT_SCRATCH5
+ uint32_t FeatureStatus[NUM_FEATURES / 32];
+} FwStatus_t;
+
+#pragma pack(pop)
+
+#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_5_ppsmc.h b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_5_ppsmc.h
new file mode 100644
index 000000000000..c6238c74923a
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/pmfw_if/smu_v13_0_5_ppsmc.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright 2022 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#ifndef __SMU_V13_0_5_PPSMC_H__
+#define __SMU_V13_0_5_PPSMC_H__
+
+// SMU Response Codes:
+#define PPSMC_Result_OK 0x1
+#define PPSMC_Result_Failed 0xFF
+#define PPSMC_Result_UnknownCmd 0xFE
+#define PPSMC_Result_CmdRejectedPrereq 0xFD
+#define PPSMC_Result_CmdRejectedBusy 0xFC
+
+
+// Message Definitions:
+#define PPSMC_MSG_TestMessage 1
+#define PPSMC_MSG_GetSmuVersion 2
+#define PPSMC_MSG_EnableGfxOff 3 ///< Enable GFXOFF
+#define PPSMC_MSG_DisableGfxOff 4 ///< Disable GFXOFF
+#define PPSMC_MSG_PowerDownVcn 5 ///< Power down VCN
+#define PPSMC_MSG_PowerUpVcn 6 ///< Power up VCN; VCN is power gated by default
+#define PPSMC_MSG_SetHardMinVcn 7 ///< For wireless display
+#define PPSMC_MSG_SetSoftMinGfxclk 8 ///< Set SoftMin for GFXCLK, argument is frequency in MHz
+#define PPSMC_MSG_Spare0 9 ///< Spare
+#define PPSMC_MSG_GfxDeviceDriverReset 10 ///< Request GFX mode 2 reset
+#define PPSMC_MSG_SetDriverDramAddrHigh 11 ///< Set high 32 bits of DRAM address for Driver table transfer
+#define PPSMC_MSG_SetDriverDramAddrLow 12 ///< Set low 32 bits of DRAM address for Driver table transfer
+#define PPSMC_MSG_TransferTableSmu2Dram 13 ///< Transfer driver interface table from PMFW SRAM to DRAM
+#define PPSMC_MSG_TransferTableDram2Smu 14 ///< Transfer driver interface table from DRAM to PMFW SRAM
+#define PPSMC_MSG_GetGfxclkFrequency 15 ///< Get GFX clock frequency
+#define PPSMC_MSG_GetEnabledSmuFeatures 16 ///< Get enabled features in PMFW
+#define PPSMC_MSG_SetSoftMaxVcn 17 ///< Set soft max for VCN clocks (VCLK and DCLK)
+#define PPSMC_MSG_PowerDownJpeg 18 ///< Power down Jpeg
+#define PPSMC_MSG_PowerUpJpeg 19 ///< Power up Jpeg; VCN is power gated by default
+#define PPSMC_MSG_SetSoftMaxGfxClk 20
+#define PPSMC_MSG_SetHardMinGfxClk 21 ///< Set hard min for GFX CLK
+#define PPSMC_MSG_AllowGfxOff 22 ///< Inform PMFW of allowing GFXOFF entry
+#define PPSMC_MSG_DisallowGfxOff 23 ///< Inform PMFW of disallowing GFXOFF entry
+#define PPSMC_MSG_SetSoftMinVcn 24 ///< Set soft min for VCN clocks (VCLK and DCLK)
+#define PPSMC_MSG_GetDriverIfVersion 25 ///< Get PMFW_DRIVER_IF version
+#define PPSMC_MSG_PrepareMp1ForUnload 26 ///< Prepare PMFW for GFX driver unload
+#define PPSMC_Message_Count 27
+
+/** @enum Mode_Reset_e
+* Mode reset type, argument for PPSMC_MSG_GfxDeviceDriverReset
+*/
+typedef enum {
+ MODE1_RESET = 1, ///< Mode reset type 1
+ MODE2_RESET = 2 ///< Mode reset type 2
+} Mode_Reset_e;
+/** @}*/
+
+#endif
+
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_11_0_cdr_table.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_11_0_cdr_table.h
index beab6d7b28b7..beab6d7b28b7 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_11_0_cdr_table.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_11_0_cdr_table.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_types.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
index ff8a0bcbd290..9f6f306eeca0 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_types.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_types.h
@@ -190,6 +190,7 @@
__SMU_DUMMY_MAP(PowerUpCvip), \
__SMU_DUMMY_MAP(PowerDownCvip), \
__SMU_DUMMY_MAP(EnableGfxOff), \
+ __SMU_DUMMY_MAP(DisableGfxOff), \
__SMU_DUMMY_MAP(SetSoftMinGfxclk), \
__SMU_DUMMY_MAP(SetSoftMinFclk), \
__SMU_DUMMY_MAP(GetThermalLimit), \
@@ -229,8 +230,10 @@
__SMU_DUMMY_MAP(BoardPowerCalibration), \
__SMU_DUMMY_MAP(RequestGfxclk), \
__SMU_DUMMY_MAP(ForceGfxVid), \
+ __SMU_DUMMY_MAP(Spare0), \
__SMU_DUMMY_MAP(UnforceGfxVid), \
- __SMU_DUMMY_MAP(HeavySBR),
+ __SMU_DUMMY_MAP(HeavySBR), \
+ __SMU_DUMMY_MAP(SetBadHBMPagesRetiredFlagsPerChannel),
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(type) SMU_MSG_##type
@@ -362,6 +365,7 @@ enum smu_clk_type {
__SMU_DUMMY_MAP(SMUIO_CG), \
__SMU_DUMMY_MAP(THM_CG), \
__SMU_DUMMY_MAP(CLK_CG), \
+ __SMU_DUMMY_MAP(DATA_CALCULATION), \
#undef __SMU_DUMMY_MAP
#define __SMU_DUMMY_MAP(feature) SMU_FEATURE_##feature##_BIT
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
index acb3be292096..acb3be292096 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0_7_pptable.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_7_pptable.h
index 247c6e9632ba..247c6e9632ba 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0_7_pptable.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_7_pptable.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v11_0_pptable.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_pptable.h
index 7a63cf8e85ed..7a63cf8e85ed 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v11_0_pptable.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v11_0_pptable.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v12_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h
index 1ad2dff71090..1ad2dff71090 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v12_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v12_0.h
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
index 44af23ae059e..d0d5b9b2c65b 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0.h
@@ -30,6 +30,7 @@
#define SMU13_DRIVER_IF_VERSION_ALDE 0x08
#define SMU13_MODE1_RESET_WAIT_TIME_IN_MS 500 //500ms
+#define SMU13_DRIVER_IF_VERSION_SMU_V13_0_5 0x04
/* MP Apertures */
#define MP0_Public 0x03800000
diff --git a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_pptable.h b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_pptable.h
index 1f311396b706..1f311396b706 100644
--- a/drivers/gpu/drm/amd/pm/inc/smu_v13_0_pptable.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/inc/smu_v13_0_pptable.h
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
index 505d2fb94fd9..201563072189 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/arcturus_ppt.c
@@ -25,6 +25,7 @@
#include <linux/firmware.h>
#include "amdgpu.h"
+#include "amdgpu_dpm.h"
#include "amdgpu_smu.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
@@ -33,7 +34,6 @@
#include "smu11_driver_if_arcturus.h"
#include "soc15_common.h"
#include "atom.h"
-#include "power_state.h"
#include "arcturus_ppt.h"
#include "smu_v11_0_pptable.h"
#include "arcturus_ppsmc.h"
@@ -57,8 +57,6 @@
#undef pr_info
#undef pr_debug
-#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
-
#define ARCTURUS_FEA_MAP(smu_feature, arcturus_feature) \
[smu_feature] = {1, (arcturus_feature)}
@@ -603,15 +601,11 @@ static int arcturus_get_smu_metrics_data(struct smu_context *smu,
SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
-
- ret = smu_cmn_get_metrics_table_locked(smu,
- NULL,
- false);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu,
+ NULL,
+ false);
+ if (ret)
return ret;
- }
switch (member) {
case METRICS_CURR_GFXCLK:
@@ -694,8 +688,6 @@ static int arcturus_get_smu_metrics_data(struct smu_context *smu,
break;
}
- mutex_unlock(&smu->metrics_lock);
-
return ret;
}
@@ -1120,7 +1112,6 @@ static int arcturus_read_sensor(struct smu_context *smu,
if (!data || !size)
return -EINVAL;
- mutex_lock(&smu->sensor_lock);
switch (sensor) {
case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
*(uint32_t *)data = pptable->FanMaximumRpm;
@@ -1181,7 +1172,6 @@ static int arcturus_read_sensor(struct smu_context *smu,
ret = -EOPNOTSUPP;
break;
}
- mutex_unlock(&smu->sensor_lock);
return ret;
}
@@ -2031,15 +2021,12 @@ static void arcturus_dump_pptable(struct smu_context *smu)
static bool arcturus_is_dpm_running(struct smu_context *smu)
{
int ret = 0;
- uint32_t feature_mask[2];
uint64_t feature_enabled;
- ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
+ ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
if (ret)
return false;
- feature_enabled = (uint64_t)feature_mask[1] << 32 | feature_mask[0];
-
return !!(feature_enabled & SMC_DPM_FEATURE);
}
@@ -2071,18 +2058,23 @@ static int arcturus_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
static int arcturus_i2c_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg *msg, int num_msgs)
{
- struct amdgpu_device *adev = to_amdgpu_device(i2c_adap);
- struct smu_table_context *smu_table = &adev->smu.smu_table;
+ struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap);
+ struct amdgpu_device *adev = smu_i2c->adev;
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *table = &smu_table->driver_table;
SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
int i, j, r, c;
u16 dir;
+ if (!adev->pm.dpm_enabled)
+ return -EBUSY;
+
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
- req->I2CcontrollerPort = 0;
+ req->I2CcontrollerPort = smu_i2c->port;
req->I2CSpeed = I2C_SPEED_FAST_400K;
req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */
dir = msg[0].flags & I2C_M_RD;
@@ -2118,9 +2110,9 @@ static int arcturus_i2c_xfer(struct i2c_adapter *i2c_adap,
}
}
}
- mutex_lock(&adev->smu.mutex);
- r = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
- mutex_unlock(&adev->smu.mutex);
+ mutex_lock(&adev->pm.mutex);
+ r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
+ mutex_unlock(&adev->pm.mutex);
if (r)
goto fail;
@@ -2161,28 +2153,60 @@ static const struct i2c_adapter_quirks arcturus_i2c_control_quirks = {
.max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS - 2,
};
-static int arcturus_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control)
+static int arcturus_i2c_control_init(struct smu_context *smu)
{
- struct amdgpu_device *adev = to_amdgpu_device(control);
- int res;
+ struct amdgpu_device *adev = smu->adev;
+ int res, i;
+
+ for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
+ struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
+ struct i2c_adapter *control = &smu_i2c->adapter;
+
+ smu_i2c->adev = adev;
+ smu_i2c->port = i;
+ mutex_init(&smu_i2c->mutex);
+ control->owner = THIS_MODULE;
+ control->class = I2C_CLASS_HWMON;
+ control->dev.parent = &adev->pdev->dev;
+ control->algo = &arcturus_i2c_algo;
+ control->quirks = &arcturus_i2c_control_quirks;
+ snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i);
+ i2c_set_adapdata(control, smu_i2c);
+
+ res = i2c_add_adapter(control);
+ if (res) {
+ DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
+ goto Out_err;
+ }
+ }
- control->owner = THIS_MODULE;
- control->class = I2C_CLASS_HWMON;
- control->dev.parent = &adev->pdev->dev;
- control->algo = &arcturus_i2c_algo;
- control->quirks = &arcturus_i2c_control_quirks;
- snprintf(control->name, sizeof(control->name), "AMDGPU SMU");
+ adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
+ adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter;
- res = i2c_add_adapter(control);
- if (res)
- DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
+ return 0;
+Out_err:
+ for ( ; i >= 0; i--) {
+ struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
+ struct i2c_adapter *control = &smu_i2c->adapter;
+ i2c_del_adapter(control);
+ }
return res;
}
-static void arcturus_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control)
+static void arcturus_i2c_control_fini(struct smu_context *smu)
{
- i2c_del_adapter(control);
+ struct amdgpu_device *adev = smu->adev;
+ int i;
+
+ for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
+ struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
+ struct i2c_adapter *control = &smu_i2c->adapter;
+
+ i2c_del_adapter(control);
+ }
+ adev->pm.ras_eeprom_i2c_bus = NULL;
+ adev->pm.fru_eeprom_i2c_bus = NULL;
}
static void arcturus_get_unique_id(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
index 2238ee19c222..f1a4a720d426 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/cyan_skillfish_ppt.c
@@ -125,22 +125,6 @@ static int cyan_skillfish_init_smc_tables(struct smu_context *smu)
return smu_v11_0_init_smc_tables(smu);
}
-static int cyan_skillfish_finit_smc_tables(struct smu_context *smu)
-{
- struct smu_table_context *smu_table = &smu->smu_table;
-
- kfree(smu_table->metrics_table);
- smu_table->metrics_table = NULL;
-
- kfree(smu_table->gpu_metrics_table);
- smu_table->gpu_metrics_table = NULL;
- smu_table->gpu_metrics_table_size = 0;
-
- smu_table->metrics_time = 0;
-
- return 0;
-}
-
static int
cyan_skillfish_get_smu_metrics_data(struct smu_context *smu,
MetricsMember_t member,
@@ -150,13 +134,9 @@ cyan_skillfish_get_smu_metrics_data(struct smu_context *smu,
SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
-
- ret = smu_cmn_get_metrics_table_locked(smu, NULL, false);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu, NULL, false);
+ if (ret)
return ret;
- }
switch (member) {
case METRICS_CURR_GFXCLK:
@@ -200,8 +180,6 @@ cyan_skillfish_get_smu_metrics_data(struct smu_context *smu,
break;
}
- mutex_unlock(&smu->metrics_lock);
-
return ret;
}
@@ -215,8 +193,6 @@ static int cyan_skillfish_read_sensor(struct smu_context *smu,
if (!data || !size)
return -EINVAL;
- mutex_lock(&smu->sensor_lock);
-
switch (sensor) {
case AMDGPU_PP_SENSOR_GFX_SCLK:
ret = cyan_skillfish_get_smu_metrics_data(smu,
@@ -267,8 +243,6 @@ static int cyan_skillfish_read_sensor(struct smu_context *smu,
break;
}
- mutex_unlock(&smu->sensor_lock);
-
return ret;
}
@@ -376,20 +350,16 @@ static bool cyan_skillfish_is_dpm_running(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
int ret = 0;
- uint32_t feature_mask[2];
uint64_t feature_enabled;
/* we need to re-init after suspend so return false */
if (adev->in_suspend)
return false;
- ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2);
+ ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
if (ret)
return false;
- feature_enabled = (uint64_t)feature_mask[0] |
- ((uint64_t)feature_mask[1] << 32);
-
/*
* cyan_skillfish specific, query default sclk inseted of hard code.
*/
@@ -552,6 +522,46 @@ static int cyan_skillfish_od_edit_dpm_table(struct smu_context *smu,
return ret;
}
+static int cyan_skillfish_get_dpm_ultimate_freq(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *min,
+ uint32_t *max)
+{
+ int ret = 0;
+ uint32_t low, high;
+
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ low = CYAN_SKILLFISH_SCLK_MIN;
+ high = CYAN_SKILLFISH_SCLK_MAX;
+ break;
+ default:
+ ret = cyan_skillfish_get_current_clk_freq(smu, clk_type, &low);
+ if (ret)
+ return ret;
+ high = low;
+ break;
+ }
+
+ if (min)
+ *min = low;
+ if (max)
+ *max = high;
+
+ return 0;
+}
+
+static int cyan_skillfish_get_enabled_mask(struct smu_context *smu,
+ uint64_t *feature_mask)
+{
+ if (!feature_mask)
+ return -EINVAL;
+ memset(feature_mask, 0xff, sizeof(*feature_mask));
+
+ return 0;
+}
+
static const struct pptable_funcs cyan_skillfish_ppt_funcs = {
.check_fw_status = smu_v11_0_check_fw_status,
@@ -559,12 +569,14 @@ static const struct pptable_funcs cyan_skillfish_ppt_funcs = {
.init_power = smu_v11_0_init_power,
.fini_power = smu_v11_0_fini_power,
.init_smc_tables = cyan_skillfish_init_smc_tables,
- .fini_smc_tables = cyan_skillfish_finit_smc_tables,
+ .fini_smc_tables = smu_v11_0_fini_smc_tables,
.read_sensor = cyan_skillfish_read_sensor,
.print_clk_levels = cyan_skillfish_print_clk_levels,
+ .get_enabled_mask = cyan_skillfish_get_enabled_mask,
.is_dpm_running = cyan_skillfish_is_dpm_running,
.get_gpu_metrics = cyan_skillfish_get_gpu_metrics,
.od_edit_dpm_table = cyan_skillfish_od_edit_dpm_table,
+ .get_dpm_ultimate_freq = cyan_skillfish_get_dpm_ultimate_freq,
.register_irq_handler = smu_v11_0_register_irq_handler,
.notify_memory_pool_location = smu_v11_0_notify_memory_pool_location,
.send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
index 2bb7816b245a..5f22fc3430f4 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/navi10_ppt.c
@@ -27,6 +27,7 @@
#include <linux/pci.h>
#include <linux/i2c.h>
#include "amdgpu.h"
+#include "amdgpu_dpm.h"
#include "amdgpu_smu.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
@@ -57,8 +58,6 @@
#undef pr_info
#undef pr_debug
-#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
-
#define FEATURE_MASK(feature) (1ULL << feature)
#define SMC_DPM_FEATURE ( \
FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT) | \
@@ -511,6 +510,8 @@ static int navi10_tables_init(struct smu_context *smu)
SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF,
sizeof(DpmActivityMonitorCoeffInt_t), PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_DRIVER_SMU_CONFIG, sizeof(DriverSmuConfig_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_NV1X_t),
GFP_KERNEL);
@@ -527,8 +528,15 @@ static int navi10_tables_init(struct smu_context *smu)
if (!smu_table->watermarks_table)
goto err2_out;
+ smu_table->driver_smu_config_table =
+ kzalloc(tables[SMU_TABLE_DRIVER_SMU_CONFIG].size, GFP_KERNEL);
+ if (!smu_table->driver_smu_config_table)
+ goto err3_out;
+
return 0;
+err3_out:
+ kfree(smu_table->watermarks_table);
err2_out:
kfree(smu_table->gpu_metrics_table);
err1_out:
@@ -546,15 +554,11 @@ static int navi10_get_legacy_smu_metrics_data(struct smu_context *smu,
(SmuMetrics_legacy_t *)smu_table->metrics_table;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
-
- ret = smu_cmn_get_metrics_table_locked(smu,
- NULL,
- false);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu,
+ NULL,
+ false);
+ if (ret)
return ret;
- }
switch (member) {
case METRICS_CURR_GFXCLK:
@@ -624,8 +628,6 @@ static int navi10_get_legacy_smu_metrics_data(struct smu_context *smu,
break;
}
- mutex_unlock(&smu->metrics_lock);
-
return ret;
}
@@ -638,15 +640,11 @@ static int navi10_get_smu_metrics_data(struct smu_context *smu,
(SmuMetrics_t *)smu_table->metrics_table;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
-
- ret = smu_cmn_get_metrics_table_locked(smu,
- NULL,
- false);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu,
+ NULL,
+ false);
+ if (ret)
return ret;
- }
switch (member) {
case METRICS_CURR_GFXCLK:
@@ -719,8 +717,6 @@ static int navi10_get_smu_metrics_data(struct smu_context *smu,
break;
}
- mutex_unlock(&smu->metrics_lock);
-
return ret;
}
@@ -733,15 +729,11 @@ static int navi12_get_legacy_smu_metrics_data(struct smu_context *smu,
(SmuMetrics_NV12_legacy_t *)smu_table->metrics_table;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
-
- ret = smu_cmn_get_metrics_table_locked(smu,
- NULL,
- false);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu,
+ NULL,
+ false);
+ if (ret)
return ret;
- }
switch (member) {
case METRICS_CURR_GFXCLK:
@@ -811,8 +803,6 @@ static int navi12_get_legacy_smu_metrics_data(struct smu_context *smu,
break;
}
- mutex_unlock(&smu->metrics_lock);
-
return ret;
}
@@ -825,15 +815,11 @@ static int navi12_get_smu_metrics_data(struct smu_context *smu,
(SmuMetrics_NV12_t *)smu_table->metrics_table;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
-
- ret = smu_cmn_get_metrics_table_locked(smu,
- NULL,
- false);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu,
+ NULL,
+ false);
+ if (ret)
return ret;
- }
switch (member) {
case METRICS_CURR_GFXCLK:
@@ -906,8 +892,6 @@ static int navi12_get_smu_metrics_data(struct smu_context *smu,
break;
}
- mutex_unlock(&smu->metrics_lock);
-
return ret;
}
@@ -1261,6 +1245,215 @@ static void navi10_od_setting_get_range(struct smu_11_0_overdrive_table *od_tabl
*max = od_table->max[setting];
}
+static int navi10_emit_clk_levels(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ char *buf,
+ int *offset)
+{
+ uint16_t *curve_settings;
+ int ret = 0;
+ uint32_t cur_value = 0, value = 0;
+ uint32_t freq_values[3] = {0};
+ uint32_t i, levels, mark_index = 0, count = 0;
+ struct smu_table_context *table_context = &smu->smu_table;
+ uint32_t gen_speed, lane_width;
+ struct smu_dpm_context *smu_dpm = &smu->smu_dpm;
+ struct smu_11_0_dpm_context *dpm_context = smu_dpm->dpm_context;
+ PPTable_t *pptable = (PPTable_t *)table_context->driver_pptable;
+ OverDriveTable_t *od_table =
+ (OverDriveTable_t *)table_context->overdrive_table;
+ struct smu_11_0_overdrive_table *od_settings = smu->od_settings;
+ uint32_t min_value, max_value;
+
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ case SMU_SOCCLK:
+ case SMU_MCLK:
+ case SMU_UCLK:
+ case SMU_FCLK:
+ case SMU_VCLK:
+ case SMU_DCLK:
+ case SMU_DCEFCLK:
+ ret = navi10_get_current_clk_freq_by_table(smu, clk_type, &cur_value);
+ if (ret)
+ return ret;
+
+ ret = smu_v11_0_get_dpm_level_count(smu, clk_type, &count);
+ if (ret)
+ return ret;
+
+ if (!navi10_is_support_fine_grained_dpm(smu, clk_type)) {
+ for (i = 0; i < count; i++) {
+ ret = smu_v11_0_get_dpm_freq_by_index(smu,
+ clk_type, i, &value);
+ if (ret)
+ return ret;
+
+ *offset += sysfs_emit_at(buf, *offset,
+ "%d: %uMhz %s\n",
+ i, value,
+ cur_value == value ? "*" : "");
+ }
+ } else {
+ ret = smu_v11_0_get_dpm_freq_by_index(smu,
+ clk_type, 0, &freq_values[0]);
+ if (ret)
+ return ret;
+ ret = smu_v11_0_get_dpm_freq_by_index(smu,
+ clk_type,
+ count - 1,
+ &freq_values[2]);
+ if (ret)
+ return ret;
+
+ freq_values[1] = cur_value;
+ mark_index = cur_value == freq_values[0] ? 0 :
+ cur_value == freq_values[2] ? 2 : 1;
+
+ levels = 3;
+ if (mark_index != 1) {
+ levels = 2;
+ freq_values[1] = freq_values[2];
+ }
+
+ for (i = 0; i < levels; i++) {
+ *offset += sysfs_emit_at(buf, *offset,
+ "%d: %uMhz %s\n",
+ i, freq_values[i],
+ i == mark_index ? "*" : "");
+ }
+ }
+ break;
+ case SMU_PCIE:
+ gen_speed = smu_v11_0_get_current_pcie_link_speed_level(smu);
+ lane_width = smu_v11_0_get_current_pcie_link_width_level(smu);
+ for (i = 0; i < NUM_LINK_LEVELS; i++) {
+ *offset += sysfs_emit_at(buf, *offset, "%d: %s %s %dMhz %s\n", i,
+ (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 0) ? "2.5GT/s," :
+ (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 1) ? "5.0GT/s," :
+ (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 2) ? "8.0GT/s," :
+ (dpm_context->dpm_tables.pcie_table.pcie_gen[i] == 3) ? "16.0GT/s," : "",
+ (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 1) ? "x1" :
+ (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 2) ? "x2" :
+ (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 3) ? "x4" :
+ (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 4) ? "x8" :
+ (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 5) ? "x12" :
+ (dpm_context->dpm_tables.pcie_table.pcie_lane[i] == 6) ? "x16" : "",
+ pptable->LclkFreq[i],
+ (gen_speed == dpm_context->dpm_tables.pcie_table.pcie_gen[i]) &&
+ (lane_width == dpm_context->dpm_tables.pcie_table.pcie_lane[i]) ?
+ "*" : "");
+ }
+ break;
+ case SMU_OD_SCLK:
+ if (!smu->od_enabled || !od_table || !od_settings)
+ return -EOPNOTSUPP;
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS))
+ break;
+ *offset += sysfs_emit_at(buf, *offset, "OD_SCLK:\n0: %uMhz\n1: %uMhz\n",
+ od_table->GfxclkFmin, od_table->GfxclkFmax);
+ break;
+ case SMU_OD_MCLK:
+ if (!smu->od_enabled || !od_table || !od_settings)
+ return -EOPNOTSUPP;
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX))
+ break;
+ *offset += sysfs_emit_at(buf, *offset, "OD_MCLK:\n1: %uMHz\n", od_table->UclkFmax);
+ break;
+ case SMU_OD_VDDC_CURVE:
+ if (!smu->od_enabled || !od_table || !od_settings)
+ return -EOPNOTSUPP;
+ if (!navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE))
+ break;
+ *offset += sysfs_emit_at(buf, *offset, "OD_VDDC_CURVE:\n");
+ for (i = 0; i < 3; i++) {
+ switch (i) {
+ case 0:
+ curve_settings = &od_table->GfxclkFreq1;
+ break;
+ case 1:
+ curve_settings = &od_table->GfxclkFreq2;
+ break;
+ case 2:
+ curve_settings = &od_table->GfxclkFreq3;
+ break;
+ default:
+ break;
+ }
+ *offset += sysfs_emit_at(buf, *offset, "%d: %uMHz %umV\n",
+ i, curve_settings[0],
+ curve_settings[1] / NAVI10_VOLTAGE_SCALE);
+ }
+ break;
+ case SMU_OD_RANGE:
+ if (!smu->od_enabled || !od_table || !od_settings)
+ return -EOPNOTSUPP;
+ *offset += sysfs_emit_at(buf, *offset, "%s:\n", "OD_RANGE");
+
+ if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_LIMITS)) {
+ navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMIN,
+ &min_value, NULL);
+ navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_GFXCLKFMAX,
+ NULL, &max_value);
+ *offset += sysfs_emit_at(buf, *offset, "SCLK: %7uMhz %10uMhz\n",
+ min_value, max_value);
+ }
+
+ if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_UCLK_MAX)) {
+ navi10_od_setting_get_range(od_settings, SMU_11_0_ODSETTING_UCLKFMAX,
+ &min_value, &max_value);
+ *offset += sysfs_emit_at(buf, *offset, "MCLK: %7uMhz %10uMhz\n",
+ min_value, max_value);
+ }
+
+ if (navi10_od_feature_is_supported(od_settings, SMU_11_0_ODCAP_GFXCLK_CURVE)) {
+ navi10_od_setting_get_range(od_settings,
+ SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P1,
+ &min_value, &max_value);
+ *offset += sysfs_emit_at(buf, *offset,
+ "VDDC_CURVE_SCLK[0]: %7uMhz %10uMhz\n",
+ min_value, max_value);
+ navi10_od_setting_get_range(od_settings,
+ SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P1,
+ &min_value, &max_value);
+ *offset += sysfs_emit_at(buf, *offset,
+ "VDDC_CURVE_VOLT[0]: %7dmV %11dmV\n",
+ min_value, max_value);
+ navi10_od_setting_get_range(od_settings,
+ SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P2,
+ &min_value, &max_value);
+ *offset += sysfs_emit_at(buf, *offset,
+ "VDDC_CURVE_SCLK[1]: %7uMhz %10uMhz\n",
+ min_value, max_value);
+ navi10_od_setting_get_range(od_settings,
+ SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P2,
+ &min_value, &max_value);
+ *offset += sysfs_emit_at(buf, *offset,
+ "VDDC_CURVE_VOLT[1]: %7dmV %11dmV\n",
+ min_value, max_value);
+ navi10_od_setting_get_range(od_settings,
+ SMU_11_0_ODSETTING_VDDGFXCURVEFREQ_P3,
+ &min_value, &max_value);
+ *offset += sysfs_emit_at(buf, *offset,
+ "VDDC_CURVE_SCLK[2]: %7uMhz %10uMhz\n",
+ min_value, max_value);
+ navi10_od_setting_get_range(od_settings,
+ SMU_11_0_ODSETTING_VDDGFXCURVEVOLTAGE_P3,
+ &min_value, &max_value);
+ *offset += sysfs_emit_at(buf, *offset,
+ "VDDC_CURVE_VOLT[2]: %7dmV %11dmV\n",
+ min_value, max_value);
+ }
+
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
static int navi10_print_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, char *buf)
{
@@ -1649,8 +1842,8 @@ static int navi10_display_config_changed(struct smu_context *smu)
int ret = 0;
if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
- smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
- smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
+ smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
+ smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays,
smu->display_config->num_display,
NULL);
@@ -1664,15 +1857,12 @@ static int navi10_display_config_changed(struct smu_context *smu)
static bool navi10_is_dpm_running(struct smu_context *smu)
{
int ret = 0;
- uint32_t feature_mask[2];
uint64_t feature_enabled;
- ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
+ ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
if (ret)
return false;
- feature_enabled = (uint64_t)feature_mask[1] << 32 | feature_mask[0];
-
return !!(feature_enabled & SMC_DPM_FEATURE);
}
@@ -1888,13 +2078,13 @@ static int navi10_notify_smc_display_config(struct smu_context *smu)
min_clocks.dcef_clock_in_sr = smu->display_config->min_dcef_deep_sleep_set_clk;
min_clocks.memory_clock = smu->display_config->min_mem_set_clock;
- if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
clock_req.clock_type = amd_pp_dcef_clock;
clock_req.clock_freq_in_khz = min_clocks.dcef_clock * 10;
ret = smu_v11_0_display_clock_voltage_request(smu, &clock_req);
if (!ret) {
- if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetMinDeepSleepDcefclk,
min_clocks.dcef_clock_in_sr/100,
@@ -1988,7 +2178,6 @@ static int navi10_read_sensor(struct smu_context *smu,
if(!data || !size)
return -EINVAL;
- mutex_lock(&smu->sensor_lock);
switch (sensor) {
case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
*(uint32_t *)data = pptable->FanMaximumRpm;
@@ -2048,7 +2237,6 @@ static int navi10_read_sensor(struct smu_context *smu,
ret = -EOPNOTSUPP;
break;
}
- mutex_unlock(&smu->sensor_lock);
return ret;
}
@@ -2708,20 +2896,14 @@ static ssize_t navi10_get_legacy_gpu_metrics(struct smu_context *smu,
SmuMetrics_legacy_t metrics;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
-
- ret = smu_cmn_get_metrics_table_locked(smu,
- NULL,
- true);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu,
+ NULL,
+ true);
+ if (ret)
return ret;
- }
memcpy(&metrics, smu_table->metrics_table, sizeof(SmuMetrics_legacy_t));
- mutex_unlock(&smu->metrics_lock);
-
smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
gpu_metrics->temperature_edge = metrics.TemperatureEdge;
@@ -2778,18 +2960,23 @@ static ssize_t navi10_get_legacy_gpu_metrics(struct smu_context *smu,
static int navi10_i2c_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg *msg, int num_msgs)
{
- struct amdgpu_device *adev = to_amdgpu_device(i2c_adap);
- struct smu_table_context *smu_table = &adev->smu.smu_table;
+ struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap);
+ struct amdgpu_device *adev = smu_i2c->adev;
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *table = &smu_table->driver_table;
SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
int i, j, r, c;
u16 dir;
+ if (!adev->pm.dpm_enabled)
+ return -EBUSY;
+
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
- req->I2CcontrollerPort = 0;
+ req->I2CcontrollerPort = smu_i2c->port;
req->I2CSpeed = I2C_SPEED_FAST_400K;
req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */
dir = msg[0].flags & I2C_M_RD;
@@ -2825,9 +3012,9 @@ static int navi10_i2c_xfer(struct i2c_adapter *i2c_adap,
}
}
}
- mutex_lock(&adev->smu.mutex);
- r = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
- mutex_unlock(&adev->smu.mutex);
+ mutex_lock(&adev->pm.mutex);
+ r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
+ mutex_unlock(&adev->pm.mutex);
if (r)
goto fail;
@@ -2867,28 +3054,60 @@ static const struct i2c_adapter_quirks navi10_i2c_control_quirks = {
.max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS - 2,
};
-static int navi10_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control)
+static int navi10_i2c_control_init(struct smu_context *smu)
{
- struct amdgpu_device *adev = to_amdgpu_device(control);
- int res;
+ struct amdgpu_device *adev = smu->adev;
+ int res, i;
+
+ for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
+ struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
+ struct i2c_adapter *control = &smu_i2c->adapter;
+
+ smu_i2c->adev = adev;
+ smu_i2c->port = i;
+ mutex_init(&smu_i2c->mutex);
+ control->owner = THIS_MODULE;
+ control->class = I2C_CLASS_HWMON;
+ control->dev.parent = &adev->pdev->dev;
+ control->algo = &navi10_i2c_algo;
+ snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i);
+ control->quirks = &navi10_i2c_control_quirks;
+ i2c_set_adapdata(control, smu_i2c);
+
+ res = i2c_add_adapter(control);
+ if (res) {
+ DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
+ goto Out_err;
+ }
+ }
- control->owner = THIS_MODULE;
- control->class = I2C_CLASS_HWMON;
- control->dev.parent = &adev->pdev->dev;
- control->algo = &navi10_i2c_algo;
- snprintf(control->name, sizeof(control->name), "AMDGPU SMU");
- control->quirks = &navi10_i2c_control_quirks;
+ adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
+ adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter;
- res = i2c_add_adapter(control);
- if (res)
- DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
+ return 0;
+Out_err:
+ for ( ; i >= 0; i--) {
+ struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
+ struct i2c_adapter *control = &smu_i2c->adapter;
+ i2c_del_adapter(control);
+ }
return res;
}
-static void navi10_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control)
+static void navi10_i2c_control_fini(struct smu_context *smu)
{
- i2c_del_adapter(control);
+ struct amdgpu_device *adev = smu->adev;
+ int i;
+
+ for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
+ struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
+ struct i2c_adapter *control = &smu_i2c->adapter;
+
+ i2c_del_adapter(control);
+ }
+ adev->pm.ras_eeprom_i2c_bus = NULL;
+ adev->pm.fru_eeprom_i2c_bus = NULL;
}
static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
@@ -2900,20 +3119,14 @@ static ssize_t navi10_get_gpu_metrics(struct smu_context *smu,
SmuMetrics_t metrics;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
-
- ret = smu_cmn_get_metrics_table_locked(smu,
- NULL,
- true);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu,
+ NULL,
+ true);
+ if (ret)
return ret;
- }
memcpy(&metrics, smu_table->metrics_table, sizeof(SmuMetrics_t));
- mutex_unlock(&smu->metrics_lock);
-
smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
gpu_metrics->temperature_edge = metrics.TemperatureEdge;
@@ -2978,20 +3191,14 @@ static ssize_t navi12_get_legacy_gpu_metrics(struct smu_context *smu,
SmuMetrics_NV12_legacy_t metrics;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
-
- ret = smu_cmn_get_metrics_table_locked(smu,
- NULL,
- true);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu,
+ NULL,
+ true);
+ if (ret)
return ret;
- }
memcpy(&metrics, smu_table->metrics_table, sizeof(SmuMetrics_NV12_legacy_t));
- mutex_unlock(&smu->metrics_lock);
-
smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
gpu_metrics->temperature_edge = metrics.TemperatureEdge;
@@ -3059,20 +3266,14 @@ static ssize_t navi12_get_gpu_metrics(struct smu_context *smu,
SmuMetrics_NV12_t metrics;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
-
- ret = smu_cmn_get_metrics_table_locked(smu,
- NULL,
- true);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu,
+ NULL,
+ true);
+ if (ret)
return ret;
- }
memcpy(&metrics, smu_table->metrics_table, sizeof(SmuMetrics_NV12_t));
- mutex_unlock(&smu->metrics_lock);
-
smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
gpu_metrics->temperature_edge = metrics.TemperatureEdge;
@@ -3229,6 +3430,54 @@ static int navi10_post_smu_init(struct smu_context *smu)
return ret;
}
+static int navi10_get_default_config_table_settings(struct smu_context *smu,
+ struct config_table_setting *table)
+{
+ if (!table)
+ return -EINVAL;
+
+ table->gfxclk_average_tau = 10;
+ table->socclk_average_tau = 10;
+ table->uclk_average_tau = 10;
+ table->gfx_activity_average_tau = 10;
+ table->mem_activity_average_tau = 10;
+ table->socket_power_average_tau = 10;
+
+ return 0;
+}
+
+static int navi10_set_config_table(struct smu_context *smu,
+ struct config_table_setting *table)
+{
+ DriverSmuConfig_t driver_smu_config_table;
+
+ if (!table)
+ return -EINVAL;
+
+ memset(&driver_smu_config_table,
+ 0,
+ sizeof(driver_smu_config_table));
+
+ driver_smu_config_table.GfxclkAverageLpfTau =
+ table->gfxclk_average_tau;
+ driver_smu_config_table.SocclkAverageLpfTau =
+ table->socclk_average_tau;
+ driver_smu_config_table.UclkAverageLpfTau =
+ table->uclk_average_tau;
+ driver_smu_config_table.GfxActivityLpfTau =
+ table->gfx_activity_average_tau;
+ driver_smu_config_table.UclkActivityLpfTau =
+ table->mem_activity_average_tau;
+ driver_smu_config_table.SocketPowerLpfTau =
+ table->socket_power_average_tau;
+
+ return smu_cmn_update_table(smu,
+ SMU_TABLE_DRIVER_SMU_CONFIG,
+ 0,
+ (void *)&driver_smu_config_table,
+ true);
+}
+
static const struct pptable_funcs navi10_ppt_funcs = {
.get_allowed_feature_mask = navi10_get_allowed_feature_mask,
.set_default_dpm_table = navi10_set_default_dpm_table,
@@ -3237,6 +3486,7 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.i2c_init = navi10_i2c_control_init,
.i2c_fini = navi10_i2c_control_fini,
.print_clk_levels = navi10_print_clk_levels,
+ .emit_clk_levels = navi10_emit_clk_levels,
.force_clk_levels = navi10_force_clk_levels,
.populate_umd_state_clk = navi10_populate_umd_state_clk,
.get_clock_by_type_with_latency = navi10_get_clock_by_type_with_latency,
@@ -3317,6 +3567,8 @@ static const struct pptable_funcs navi10_ppt_funcs = {
.post_init = navi10_post_smu_init,
.interrupt_work = smu_v11_0_interrupt_work,
.set_mp1_state = smu_cmn_set_mp1_state,
+ .get_default_config_table_settings = navi10_get_default_config_table_settings,
+ .set_config_table = navi10_set_config_table,
};
void navi10_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
index a4207293158c..38f04836c82f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.c
@@ -27,6 +27,7 @@
#include <linux/pci.h>
#include <linux/i2c.h>
#include "amdgpu.h"
+#include "amdgpu_dpm.h"
#include "amdgpu_smu.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
@@ -46,6 +47,7 @@
#include "mp/mp_11_0_sh_mask.h"
#include "asic_reg/mp/mp_11_0_sh_mask.h"
+#include "amdgpu_ras.h"
#include "smu_cmn.h"
/*
@@ -58,8 +60,6 @@
#undef pr_info
#undef pr_debug
-#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
-
#define FEATURE_MASK(feature) (1ULL << feature)
#define SMC_DPM_FEATURE ( \
FEATURE_MASK(FEATURE_DPM_PREFETCHER_BIT) | \
@@ -83,6 +83,12 @@
/* STB FIFO depth is in 64bit units */
#define SIENNA_CICHLID_STB_DEPTH_UNIT_BYTES 8
+/*
+ * SMU support ECCTABLE since version 58.70.0,
+ * use this to check whether ECCTABLE feature is supported.
+ */
+#define SUPPORT_ECCTABLE_SMU_VERSION 0x003a4600
+
static int get_table_size(struct smu_context *smu)
{
if (smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 13))
@@ -225,6 +231,7 @@ static struct cmn2asic_mapping sienna_cichlid_table_map[SMU_TABLE_COUNT] = {
TAB_MAP(OVERDRIVE),
TAB_MAP(I2C_COMMANDS),
TAB_MAP(PACE),
+ TAB_MAP(ECCINFO),
};
static struct cmn2asic_mapping sienna_cichlid_pwr_src_map[SMU_POWER_SOURCE_COUNT] = {
@@ -341,7 +348,7 @@ sienna_cichlid_get_allowed_feature_mask(struct smu_context *smu,
if (smu->dc_controlled_by_gpio)
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_ACDC_BIT);
- if (amdgpu_aspm)
+ if (amdgpu_device_should_use_aspm(adev))
*(uint64_t *)feature_mask |= FEATURE_MASK(FEATURE_DS_LCLK_BIT);
return 0;
@@ -421,6 +428,36 @@ static int sienna_cichlid_store_powerplay_table(struct smu_context *smu)
return 0;
}
+static int sienna_cichlid_patch_pptable_quirk(struct smu_context *smu)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t *board_reserved;
+ uint16_t *freq_table_gfx;
+ uint32_t i;
+
+ /* Fix some OEM SKU specific stability issues */
+ GET_PPTABLE_MEMBER(BoardReserved, &board_reserved);
+ if ((adev->pdev->device == 0x73DF) &&
+ (adev->pdev->revision == 0XC3) &&
+ (adev->pdev->subsystem_device == 0x16C2) &&
+ (adev->pdev->subsystem_vendor == 0x1043))
+ board_reserved[0] = 1387;
+
+ GET_PPTABLE_MEMBER(FreqTableGfx, &freq_table_gfx);
+ if ((adev->pdev->device == 0x73DF) &&
+ (adev->pdev->revision == 0XC3) &&
+ ((adev->pdev->subsystem_device == 0x16C2) ||
+ (adev->pdev->subsystem_device == 0x133C)) &&
+ (adev->pdev->subsystem_vendor == 0x1043)) {
+ for (i = 0; i < NUM_GFXCLK_DPM_LEVELS; i++) {
+ if (freq_table_gfx[i] > 2500)
+ freq_table_gfx[i] = 2500;
+ }
+ }
+
+ return 0;
+}
+
static int sienna_cichlid_setup_pptable(struct smu_context *smu)
{
int ret = 0;
@@ -441,7 +478,7 @@ static int sienna_cichlid_setup_pptable(struct smu_context *smu)
if (ret)
return ret;
- return ret;
+ return sienna_cichlid_patch_pptable_quirk(smu);
}
static int sienna_cichlid_tables_init(struct smu_context *smu)
@@ -466,6 +503,10 @@ static int sienna_cichlid_tables_init(struct smu_context *smu)
SMU_TABLE_INIT(tables, SMU_TABLE_ACTIVITY_MONITOR_COEFF,
sizeof(DpmActivityMonitorCoeffIntExternal_t), PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_ECCINFO, sizeof(EccInfoTable_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_DRIVER_SMU_CONFIG, sizeof(DriverSmuConfigExternal_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
smu_table->metrics_table = kzalloc(sizeof(SmuMetricsExternal_t), GFP_KERNEL);
if (!smu_table->metrics_table)
@@ -481,8 +522,21 @@ static int sienna_cichlid_tables_init(struct smu_context *smu)
if (!smu_table->watermarks_table)
goto err2_out;
+ smu_table->ecc_table = kzalloc(tables[SMU_TABLE_ECCINFO].size, GFP_KERNEL);
+ if (!smu_table->ecc_table)
+ goto err3_out;
+
+ smu_table->driver_smu_config_table =
+ kzalloc(tables[SMU_TABLE_DRIVER_SMU_CONFIG].size, GFP_KERNEL);
+ if (!smu_table->driver_smu_config_table)
+ goto err4_out;
+
return 0;
+err4_out:
+ kfree(smu_table->ecc_table);
+err3_out:
+ kfree(smu_table->watermarks_table);
err2_out:
kfree(smu_table->gpu_metrics_table);
err1_out:
@@ -500,6 +554,11 @@ static uint32_t sienna_cichlid_get_throttler_status_locked(struct smu_context *s
int i;
if ((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
+ (smu->smc_fw_version >= 0x3A4900)) {
+ for (i = 0; i < THROTTLER_COUNT; i++)
+ throttler_status |=
+ (metrics_ext->SmuMetrics_V3.ThrottlingPercentage[i] ? 1U << i : 0);
+ } else if ((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
(smu->smc_fw_version >= 0x3A4300)) {
for (i = 0; i < THROTTLER_COUNT; i++)
throttler_status |=
@@ -520,121 +579,147 @@ static int sienna_cichlid_get_smu_metrics_data(struct smu_context *smu,
&(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics);
SmuMetrics_V2_t *metrics_v2 =
&(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics_V2);
- bool use_metrics_v2 = ((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
- (smu->smc_fw_version >= 0x3A4300)) ? true : false;
+ SmuMetrics_V3_t *metrics_v3 =
+ &(((SmuMetricsExternal_t *)(smu_table->metrics_table))->SmuMetrics_V3);
+ bool use_metrics_v2 = false;
+ bool use_metrics_v3 = false;
uint16_t average_gfx_activity;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
-
- ret = smu_cmn_get_metrics_table_locked(smu,
- NULL,
- false);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ if ((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
+ (smu->smc_fw_version >= 0x3A4900))
+ use_metrics_v3 = true;
+ else if ((smu->adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
+ (smu->smc_fw_version >= 0x3A4300))
+ use_metrics_v2 = true;
+
+ ret = smu_cmn_get_metrics_table(smu,
+ NULL,
+ false);
+ if (ret)
return ret;
- }
switch (member) {
case METRICS_CURR_GFXCLK:
- *value = use_metrics_v2 ? metrics_v2->CurrClock[PPCLK_GFXCLK] :
+ *value = use_metrics_v3 ? metrics_v3->CurrClock[PPCLK_GFXCLK] :
+ use_metrics_v2 ? metrics_v2->CurrClock[PPCLK_GFXCLK] :
metrics->CurrClock[PPCLK_GFXCLK];
break;
case METRICS_CURR_SOCCLK:
- *value = use_metrics_v2 ? metrics_v2->CurrClock[PPCLK_SOCCLK] :
+ *value = use_metrics_v3 ? metrics_v3->CurrClock[PPCLK_SOCCLK] :
+ use_metrics_v2 ? metrics_v2->CurrClock[PPCLK_SOCCLK] :
metrics->CurrClock[PPCLK_SOCCLK];
break;
case METRICS_CURR_UCLK:
- *value = use_metrics_v2 ? metrics_v2->CurrClock[PPCLK_UCLK] :
+ *value = use_metrics_v3 ? metrics_v3->CurrClock[PPCLK_UCLK] :
+ use_metrics_v2 ? metrics_v2->CurrClock[PPCLK_UCLK] :
metrics->CurrClock[PPCLK_UCLK];
break;
case METRICS_CURR_VCLK:
- *value = use_metrics_v2 ? metrics_v2->CurrClock[PPCLK_VCLK_0] :
+ *value = use_metrics_v3 ? metrics_v3->CurrClock[PPCLK_VCLK_0] :
+ use_metrics_v2 ? metrics_v2->CurrClock[PPCLK_VCLK_0] :
metrics->CurrClock[PPCLK_VCLK_0];
break;
case METRICS_CURR_VCLK1:
- *value = use_metrics_v2 ? metrics_v2->CurrClock[PPCLK_VCLK_1] :
+ *value = use_metrics_v3 ? metrics_v3->CurrClock[PPCLK_VCLK_1] :
+ use_metrics_v2 ? metrics_v2->CurrClock[PPCLK_VCLK_1] :
metrics->CurrClock[PPCLK_VCLK_1];
break;
case METRICS_CURR_DCLK:
- *value = use_metrics_v2 ? metrics_v2->CurrClock[PPCLK_DCLK_0] :
+ *value = use_metrics_v3 ? metrics_v3->CurrClock[PPCLK_DCLK_0] :
+ use_metrics_v2 ? metrics_v2->CurrClock[PPCLK_DCLK_0] :
metrics->CurrClock[PPCLK_DCLK_0];
break;
case METRICS_CURR_DCLK1:
- *value = use_metrics_v2 ? metrics_v2->CurrClock[PPCLK_DCLK_1] :
+ *value = use_metrics_v3 ? metrics_v3->CurrClock[PPCLK_DCLK_1] :
+ use_metrics_v2 ? metrics_v2->CurrClock[PPCLK_DCLK_1] :
metrics->CurrClock[PPCLK_DCLK_1];
break;
case METRICS_CURR_DCEFCLK:
- *value = use_metrics_v2 ? metrics_v2->CurrClock[PPCLK_DCEFCLK] :
+ *value = use_metrics_v3 ? metrics_v3->CurrClock[PPCLK_DCEFCLK] :
+ use_metrics_v2 ? metrics_v2->CurrClock[PPCLK_DCEFCLK] :
metrics->CurrClock[PPCLK_DCEFCLK];
break;
case METRICS_CURR_FCLK:
- *value = use_metrics_v2 ? metrics_v2->CurrClock[PPCLK_FCLK] :
+ *value = use_metrics_v3 ? metrics_v3->CurrClock[PPCLK_FCLK] :
+ use_metrics_v2 ? metrics_v2->CurrClock[PPCLK_FCLK] :
metrics->CurrClock[PPCLK_FCLK];
break;
case METRICS_AVERAGE_GFXCLK:
- average_gfx_activity = use_metrics_v2 ? metrics_v2->AverageGfxActivity :
+ average_gfx_activity = use_metrics_v3 ? metrics_v3->AverageGfxActivity :
+ use_metrics_v2 ? metrics_v2->AverageGfxActivity :
metrics->AverageGfxActivity;
if (average_gfx_activity <= SMU_11_0_7_GFX_BUSY_THRESHOLD)
- *value = use_metrics_v2 ? metrics_v2->AverageGfxclkFrequencyPostDs :
+ *value = use_metrics_v3 ? metrics_v3->AverageGfxclkFrequencyPostDs :
+ use_metrics_v2 ? metrics_v2->AverageGfxclkFrequencyPostDs :
metrics->AverageGfxclkFrequencyPostDs;
else
- *value = use_metrics_v2 ? metrics_v2->AverageGfxclkFrequencyPreDs :
+ *value = use_metrics_v3 ? metrics_v3->AverageGfxclkFrequencyPreDs :
+ use_metrics_v2 ? metrics_v2->AverageGfxclkFrequencyPreDs :
metrics->AverageGfxclkFrequencyPreDs;
break;
case METRICS_AVERAGE_FCLK:
- *value = use_metrics_v2 ? metrics_v2->AverageFclkFrequencyPostDs :
+ *value = use_metrics_v3 ? metrics_v3->AverageFclkFrequencyPostDs :
+ use_metrics_v2 ? metrics_v2->AverageFclkFrequencyPostDs :
metrics->AverageFclkFrequencyPostDs;
break;
case METRICS_AVERAGE_UCLK:
- *value = use_metrics_v2 ? metrics_v2->AverageUclkFrequencyPostDs :
+ *value = use_metrics_v3 ? metrics_v3->AverageUclkFrequencyPostDs :
+ use_metrics_v2 ? metrics_v2->AverageUclkFrequencyPostDs :
metrics->AverageUclkFrequencyPostDs;
break;
case METRICS_AVERAGE_GFXACTIVITY:
- *value = use_metrics_v2 ? metrics_v2->AverageGfxActivity :
+ *value = use_metrics_v3 ? metrics_v3->AverageGfxActivity :
+ use_metrics_v2 ? metrics_v2->AverageGfxActivity :
metrics->AverageGfxActivity;
break;
case METRICS_AVERAGE_MEMACTIVITY:
- *value = use_metrics_v2 ? metrics_v2->AverageUclkActivity :
+ *value = use_metrics_v3 ? metrics_v3->AverageUclkActivity :
+ use_metrics_v2 ? metrics_v2->AverageUclkActivity :
metrics->AverageUclkActivity;
break;
case METRICS_AVERAGE_SOCKETPOWER:
- *value = use_metrics_v2 ? metrics_v2->AverageSocketPower << 8 :
+ *value = use_metrics_v3 ? metrics_v3->AverageSocketPower << 8 :
+ use_metrics_v2 ? metrics_v2->AverageSocketPower << 8 :
metrics->AverageSocketPower << 8;
break;
case METRICS_TEMPERATURE_EDGE:
- *value = (use_metrics_v2 ? metrics_v2->TemperatureEdge : metrics->TemperatureEdge) *
- SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ *value = (use_metrics_v3 ? metrics_v3->TemperatureEdge :
+ use_metrics_v2 ? metrics_v2->TemperatureEdge :
+ metrics->TemperatureEdge) * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
case METRICS_TEMPERATURE_HOTSPOT:
- *value = (use_metrics_v2 ? metrics_v2->TemperatureHotspot : metrics->TemperatureHotspot) *
- SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ *value = (use_metrics_v3 ? metrics_v3->TemperatureHotspot :
+ use_metrics_v2 ? metrics_v2->TemperatureHotspot :
+ metrics->TemperatureHotspot) * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
case METRICS_TEMPERATURE_MEM:
- *value = (use_metrics_v2 ? metrics_v2->TemperatureMem : metrics->TemperatureMem) *
- SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ *value = (use_metrics_v3 ? metrics_v3->TemperatureMem :
+ use_metrics_v2 ? metrics_v2->TemperatureMem :
+ metrics->TemperatureMem) * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
case METRICS_TEMPERATURE_VRGFX:
- *value = (use_metrics_v2 ? metrics_v2->TemperatureVrGfx : metrics->TemperatureVrGfx) *
- SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ *value = (use_metrics_v3 ? metrics_v3->TemperatureVrGfx :
+ use_metrics_v2 ? metrics_v2->TemperatureVrGfx :
+ metrics->TemperatureVrGfx) * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
case METRICS_TEMPERATURE_VRSOC:
- *value = (use_metrics_v2 ? metrics_v2->TemperatureVrSoc : metrics->TemperatureVrSoc) *
- SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ *value = (use_metrics_v3 ? metrics_v3->TemperatureVrSoc :
+ use_metrics_v2 ? metrics_v2->TemperatureVrSoc :
+ metrics->TemperatureVrSoc) * SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
break;
case METRICS_THROTTLER_STATUS:
*value = sienna_cichlid_get_throttler_status_locked(smu);
break;
case METRICS_CURR_FANSPEED:
- *value = use_metrics_v2 ? metrics_v2->CurrFanSpeed : metrics->CurrFanSpeed;
+ *value = use_metrics_v3 ? metrics_v3->CurrFanSpeed :
+ use_metrics_v2 ? metrics_v2->CurrFanSpeed : metrics->CurrFanSpeed;
break;
default:
*value = UINT_MAX;
break;
}
- mutex_unlock(&smu->metrics_lock);
-
return ret;
}
@@ -1036,10 +1121,6 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
if (ret)
goto print_clk_out;
- /* no need to disable gfxoff when retrieving the current gfxclk */
- if ((clk_type == SMU_GFXCLK) || (clk_type == SMU_SCLK))
- amdgpu_gfx_off_ctrl(adev, false);
-
ret = smu_v11_0_get_dpm_level_count(smu, clk_type, &count);
if (ret)
goto print_clk_out;
@@ -1168,25 +1249,18 @@ static int sienna_cichlid_print_clk_levels(struct smu_context *smu,
}
print_clk_out:
- if ((clk_type == SMU_GFXCLK) || (clk_type == SMU_SCLK))
- amdgpu_gfx_off_ctrl(adev, true);
-
return size;
}
static int sienna_cichlid_force_clk_levels(struct smu_context *smu,
enum smu_clk_type clk_type, uint32_t mask)
{
- struct amdgpu_device *adev = smu->adev;
int ret = 0;
uint32_t soft_min_level = 0, soft_max_level = 0, min_freq = 0, max_freq = 0;
soft_min_level = mask ? (ffs(mask) - 1) : 0;
soft_max_level = mask ? (fls(mask) - 1) : 0;
- if ((clk_type == SMU_GFXCLK) || (clk_type == SMU_SCLK))
- amdgpu_gfx_off_ctrl(adev, false);
-
switch (clk_type) {
case SMU_GFXCLK:
case SMU_SCLK:
@@ -1220,9 +1294,6 @@ static int sienna_cichlid_force_clk_levels(struct smu_context *smu,
}
forec_level_out:
- if ((clk_type == SMU_GFXCLK) || (clk_type == SMU_SCLK))
- amdgpu_gfx_off_ctrl(adev, true);
-
return 0;
}
@@ -1238,21 +1309,37 @@ static int sienna_cichlid_populate_umd_state_clk(struct smu_context *smu)
&dpm_context->dpm_tables.soc_table;
struct smu_umd_pstate_table *pstate_table =
&smu->pstate_table;
+ struct amdgpu_device *adev = smu->adev;
pstate_table->gfxclk_pstate.min = gfx_table->min;
pstate_table->gfxclk_pstate.peak = gfx_table->max;
- if (gfx_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK)
- pstate_table->gfxclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK;
pstate_table->uclk_pstate.min = mem_table->min;
pstate_table->uclk_pstate.peak = mem_table->max;
- if (mem_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK)
- pstate_table->uclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK;
pstate_table->socclk_pstate.min = soc_table->min;
pstate_table->socclk_pstate.peak = soc_table->max;
- if (soc_table->max >= SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK)
+
+ switch (adev->asic_type) {
+ case CHIP_SIENNA_CICHLID:
+ case CHIP_NAVY_FLOUNDER:
+ pstate_table->gfxclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_GFXCLK;
+ pstate_table->uclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK;
pstate_table->socclk_pstate.standard = SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK;
+ break;
+ case CHIP_DIMGREY_CAVEFISH:
+ pstate_table->gfxclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_GFXCLK;
+ pstate_table->uclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_MEMCLK;
+ pstate_table->socclk_pstate.standard = DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_SOCCLK;
+ break;
+ case CHIP_BEIGE_GOBY:
+ pstate_table->gfxclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_GFXCLK;
+ pstate_table->uclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_MEMCLK;
+ pstate_table->socclk_pstate.standard = BEIGE_GOBY_UMD_PSTATE_PROFILING_SOCCLK;
+ break;
+ default:
+ break;
+ }
return 0;
}
@@ -1287,8 +1374,8 @@ static int sienna_cichlid_display_config_changed(struct smu_context *smu)
int ret = 0;
if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
- smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
- smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
+ smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
+ smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
#if 0
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_NumOfDisplays,
smu->display_config->num_display,
@@ -1304,15 +1391,12 @@ static int sienna_cichlid_display_config_changed(struct smu_context *smu)
static bool sienna_cichlid_is_dpm_running(struct smu_context *smu)
{
int ret = 0;
- uint32_t feature_mask[2];
uint64_t feature_enabled;
- ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
+ ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
if (ret)
return false;
- feature_enabled = (uint64_t)feature_mask[1] << 32 | feature_mask[0];
-
return !!(feature_enabled & SMC_DPM_FEATURE);
}
@@ -1527,13 +1611,13 @@ static int sienna_cichlid_notify_smc_display_config(struct smu_context *smu)
min_clocks.dcef_clock_in_sr = smu->display_config->min_dcef_deep_sleep_set_clk;
min_clocks.memory_clock = smu->display_config->min_mem_set_clock;
- if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT)) {
clock_req.clock_type = amd_pp_dcef_clock;
clock_req.clock_freq_in_khz = min_clocks.dcef_clock * 10;
ret = smu_v11_0_display_clock_voltage_request(smu, &clock_req);
if (!ret) {
- if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DS_DCEFCLK_BIT)) {
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_SetMinDeepSleepDcefclk,
min_clocks.dcef_clock_in_sr/100,
@@ -1625,7 +1709,6 @@ static int sienna_cichlid_read_sensor(struct smu_context *smu,
if(!data || !size)
return -EINVAL;
- mutex_lock(&smu->sensor_lock);
switch (sensor) {
case AMDGPU_PP_SENSOR_MAX_FAN_RPM:
GET_PPTABLE_MEMBER(FanMaximumRpm, &temp);
@@ -1686,7 +1769,6 @@ static int sienna_cichlid_read_sensor(struct smu_context *smu,
ret = -EOPNOTSUPP;
break;
}
- mutex_unlock(&smu->sensor_lock);
return ret;
}
@@ -1865,16 +1947,7 @@ static int sienna_cichlid_get_dpm_ultimate_freq(struct smu_context *smu,
enum smu_clk_type clk_type,
uint32_t *min, uint32_t *max)
{
- struct amdgpu_device *adev = smu->adev;
- int ret;
-
- if (clk_type == SMU_GFXCLK)
- amdgpu_gfx_off_ctrl(adev, false);
- ret = smu_v11_0_get_dpm_ultimate_freq(smu, clk_type, min, max);
- if (clk_type == SMU_GFXCLK)
- amdgpu_gfx_off_ctrl(adev, true);
-
- return ret;
+ return smu_v11_0_get_dpm_ultimate_freq(smu, clk_type, min, max);
}
static void sienna_cichlid_dump_od_table(struct smu_context *smu,
@@ -3458,18 +3531,23 @@ static void sienna_cichlid_dump_pptable(struct smu_context *smu)
static int sienna_cichlid_i2c_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg *msg, int num_msgs)
{
- struct amdgpu_device *adev = to_amdgpu_device(i2c_adap);
- struct smu_table_context *smu_table = &adev->smu.smu_table;
+ struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap);
+ struct amdgpu_device *adev = smu_i2c->adev;
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *table = &smu_table->driver_table;
SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
int i, j, r, c;
u16 dir;
+ if (!adev->pm.dpm_enabled)
+ return -EBUSY;
+
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
- req->I2CcontrollerPort = 1;
+ req->I2CcontrollerPort = smu_i2c->port;
req->I2CSpeed = I2C_SPEED_FAST_400K;
req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */
dir = msg[0].flags & I2C_M_RD;
@@ -3505,9 +3583,9 @@ static int sienna_cichlid_i2c_xfer(struct i2c_adapter *i2c_adap,
}
}
}
- mutex_lock(&adev->smu.mutex);
- r = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
- mutex_unlock(&adev->smu.mutex);
+ mutex_lock(&adev->pm.mutex);
+ r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
+ mutex_unlock(&adev->pm.mutex);
if (r)
goto fail;
@@ -3547,28 +3625,61 @@ static const struct i2c_adapter_quirks sienna_cichlid_i2c_control_quirks = {
.max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS - 2,
};
-static int sienna_cichlid_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control)
+static int sienna_cichlid_i2c_control_init(struct smu_context *smu)
{
- struct amdgpu_device *adev = to_amdgpu_device(control);
- int res;
-
- control->owner = THIS_MODULE;
- control->class = I2C_CLASS_HWMON;
- control->dev.parent = &adev->pdev->dev;
- control->algo = &sienna_cichlid_i2c_algo;
- snprintf(control->name, sizeof(control->name), "AMDGPU SMU");
- control->quirks = &sienna_cichlid_i2c_control_quirks;
+ struct amdgpu_device *adev = smu->adev;
+ int res, i;
+
+ for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
+ struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
+ struct i2c_adapter *control = &smu_i2c->adapter;
+
+ smu_i2c->adev = adev;
+ smu_i2c->port = i;
+ mutex_init(&smu_i2c->mutex);
+ control->owner = THIS_MODULE;
+ control->class = I2C_CLASS_HWMON;
+ control->dev.parent = &adev->pdev->dev;
+ control->algo = &sienna_cichlid_i2c_algo;
+ snprintf(control->name, sizeof(control->name), "AMDGPU SMU %d", i);
+ control->quirks = &sienna_cichlid_i2c_control_quirks;
+ i2c_set_adapdata(control, smu_i2c);
+
+ res = i2c_add_adapter(control);
+ if (res) {
+ DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
+ goto Out_err;
+ }
+ }
+ /* assign the buses used for the FRU EEPROM and RAS EEPROM */
+ /* XXX ideally this would be something in a vbios data table */
+ adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[1].adapter;
+ adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
- res = i2c_add_adapter(control);
- if (res)
- DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
+ return 0;
+Out_err:
+ for ( ; i >= 0; i--) {
+ struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
+ struct i2c_adapter *control = &smu_i2c->adapter;
+ i2c_del_adapter(control);
+ }
return res;
}
-static void sienna_cichlid_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control)
+static void sienna_cichlid_i2c_control_fini(struct smu_context *smu)
{
- i2c_del_adapter(control);
+ struct amdgpu_device *adev = smu->adev;
+ int i;
+
+ for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
+ struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
+ struct i2c_adapter *control = &smu_i2c->adapter;
+
+ i2c_del_adapter(control);
+ }
+ adev->pm.ras_eeprom_i2c_bus = NULL;
+ adev->pm.fru_eeprom_i2c_bus = NULL;
}
static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
@@ -3582,46 +3693,54 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
&(metrics_external.SmuMetrics);
SmuMetrics_V2_t *metrics_v2 =
&(metrics_external.SmuMetrics_V2);
+ SmuMetrics_V3_t *metrics_v3 =
+ &(metrics_external.SmuMetrics_V3);
struct amdgpu_device *adev = smu->adev;
- bool use_metrics_v2 = ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
- (smu->smc_fw_version >= 0x3A4300)) ? true : false;
+ bool use_metrics_v2 = false;
+ bool use_metrics_v3 = false;
uint16_t average_gfx_activity;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
- ret = smu_cmn_get_metrics_table_locked(smu,
- &metrics_external,
- true);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
+ (smu->smc_fw_version >= 0x3A4900))
+ use_metrics_v3 = true;
+ else if ((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) &&
+ (smu->smc_fw_version >= 0x3A4300))
+ use_metrics_v2 = true;
+
+
+ ret = smu_cmn_get_metrics_table(smu,
+ &metrics_external,
+ true);
+ if (ret)
return ret;
- }
smu_cmn_init_soft_gpu_metrics(gpu_metrics, 1, 3);
- gpu_metrics->temperature_edge =
+ gpu_metrics->temperature_edge = use_metrics_v3 ? metrics_v3->TemperatureEdge :
use_metrics_v2 ? metrics_v2->TemperatureEdge : metrics->TemperatureEdge;
- gpu_metrics->temperature_hotspot =
+ gpu_metrics->temperature_hotspot = use_metrics_v3 ? metrics_v3->TemperatureHotspot :
use_metrics_v2 ? metrics_v2->TemperatureHotspot : metrics->TemperatureHotspot;
- gpu_metrics->temperature_mem =
+ gpu_metrics->temperature_mem = use_metrics_v3 ? metrics_v3->TemperatureMem :
use_metrics_v2 ? metrics_v2->TemperatureMem : metrics->TemperatureMem;
- gpu_metrics->temperature_vrgfx =
+ gpu_metrics->temperature_vrgfx = use_metrics_v3 ? metrics_v3->TemperatureVrGfx :
use_metrics_v2 ? metrics_v2->TemperatureVrGfx : metrics->TemperatureVrGfx;
- gpu_metrics->temperature_vrsoc =
+ gpu_metrics->temperature_vrsoc = use_metrics_v3 ? metrics_v3->TemperatureVrSoc :
use_metrics_v2 ? metrics_v2->TemperatureVrSoc : metrics->TemperatureVrSoc;
- gpu_metrics->temperature_vrmem =
+ gpu_metrics->temperature_vrmem = use_metrics_v3 ? metrics_v3->TemperatureVrMem0 :
use_metrics_v2 ? metrics_v2->TemperatureVrMem0 : metrics->TemperatureVrMem0;
- gpu_metrics->average_gfx_activity =
+ gpu_metrics->average_gfx_activity = use_metrics_v3 ? metrics_v3->AverageGfxActivity :
use_metrics_v2 ? metrics_v2->AverageGfxActivity : metrics->AverageGfxActivity;
- gpu_metrics->average_umc_activity =
+ gpu_metrics->average_umc_activity = use_metrics_v3 ? metrics_v3->AverageUclkActivity :
use_metrics_v2 ? metrics_v2->AverageUclkActivity : metrics->AverageUclkActivity;
- gpu_metrics->average_mm_activity =
+ gpu_metrics->average_mm_activity = use_metrics_v3 ?
+ (metrics_v3->VcnUsagePercentage0 + metrics_v3->VcnUsagePercentage1) / 2 :
use_metrics_v2 ? metrics_v2->VcnActivityPercentage : metrics->VcnActivityPercentage;
- gpu_metrics->average_socket_power =
+ gpu_metrics->average_socket_power = use_metrics_v3 ? metrics_v3->AverageSocketPower :
use_metrics_v2 ? metrics_v2->AverageSocketPower : metrics->AverageSocketPower;
- gpu_metrics->energy_accumulator =
+ gpu_metrics->energy_accumulator = use_metrics_v3 ? metrics_v3->EnergyAccumulator :
use_metrics_v2 ? metrics_v2->EnergyAccumulator : metrics->EnergyAccumulator;
if (metrics->CurrGfxVoltageOffset)
@@ -3634,37 +3753,45 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
gpu_metrics->voltage_soc =
(155000 - 625 * metrics->CurrSocVoltageOffset) / 100;
- average_gfx_activity = use_metrics_v2 ? metrics_v2->AverageGfxActivity : metrics->AverageGfxActivity;
+ average_gfx_activity = use_metrics_v3 ? metrics_v3->AverageGfxActivity :
+ use_metrics_v2 ? metrics_v2->AverageGfxActivity : metrics->AverageGfxActivity;
if (average_gfx_activity <= SMU_11_0_7_GFX_BUSY_THRESHOLD)
gpu_metrics->average_gfxclk_frequency =
- use_metrics_v2 ? metrics_v2->AverageGfxclkFrequencyPostDs : metrics->AverageGfxclkFrequencyPostDs;
+ use_metrics_v3 ? metrics_v3->AverageGfxclkFrequencyPostDs :
+ use_metrics_v2 ? metrics_v2->AverageGfxclkFrequencyPostDs :
+ metrics->AverageGfxclkFrequencyPostDs;
else
gpu_metrics->average_gfxclk_frequency =
- use_metrics_v2 ? metrics_v2->AverageGfxclkFrequencyPreDs : metrics->AverageGfxclkFrequencyPreDs;
+ use_metrics_v3 ? metrics_v3->AverageGfxclkFrequencyPreDs :
+ use_metrics_v2 ? metrics_v2->AverageGfxclkFrequencyPreDs :
+ metrics->AverageGfxclkFrequencyPreDs;
+
gpu_metrics->average_uclk_frequency =
- use_metrics_v2 ? metrics_v2->AverageUclkFrequencyPostDs : metrics->AverageUclkFrequencyPostDs;
- gpu_metrics->average_vclk0_frequency =
+ use_metrics_v3 ? metrics_v3->AverageUclkFrequencyPostDs :
+ use_metrics_v2 ? metrics_v2->AverageUclkFrequencyPostDs :
+ metrics->AverageUclkFrequencyPostDs;
+ gpu_metrics->average_vclk0_frequency = use_metrics_v3 ? metrics_v3->AverageVclk0Frequency :
use_metrics_v2 ? metrics_v2->AverageVclk0Frequency : metrics->AverageVclk0Frequency;
- gpu_metrics->average_dclk0_frequency =
+ gpu_metrics->average_dclk0_frequency = use_metrics_v3 ? metrics_v3->AverageDclk0Frequency :
use_metrics_v2 ? metrics_v2->AverageDclk0Frequency : metrics->AverageDclk0Frequency;
- gpu_metrics->average_vclk1_frequency =
+ gpu_metrics->average_vclk1_frequency = use_metrics_v3 ? metrics_v3->AverageVclk1Frequency :
use_metrics_v2 ? metrics_v2->AverageVclk1Frequency : metrics->AverageVclk1Frequency;
- gpu_metrics->average_dclk1_frequency =
+ gpu_metrics->average_dclk1_frequency = use_metrics_v3 ? metrics_v3->AverageDclk1Frequency :
use_metrics_v2 ? metrics_v2->AverageDclk1Frequency : metrics->AverageDclk1Frequency;
- gpu_metrics->current_gfxclk =
+ gpu_metrics->current_gfxclk = use_metrics_v3 ? metrics_v3->CurrClock[PPCLK_GFXCLK] :
use_metrics_v2 ? metrics_v2->CurrClock[PPCLK_GFXCLK] : metrics->CurrClock[PPCLK_GFXCLK];
- gpu_metrics->current_socclk =
+ gpu_metrics->current_socclk = use_metrics_v3 ? metrics_v3->CurrClock[PPCLK_SOCCLK] :
use_metrics_v2 ? metrics_v2->CurrClock[PPCLK_SOCCLK] : metrics->CurrClock[PPCLK_SOCCLK];
- gpu_metrics->current_uclk =
+ gpu_metrics->current_uclk = use_metrics_v3 ? metrics_v3->CurrClock[PPCLK_UCLK] :
use_metrics_v2 ? metrics_v2->CurrClock[PPCLK_UCLK] : metrics->CurrClock[PPCLK_UCLK];
- gpu_metrics->current_vclk0 =
+ gpu_metrics->current_vclk0 = use_metrics_v3 ? metrics_v3->CurrClock[PPCLK_VCLK_0] :
use_metrics_v2 ? metrics_v2->CurrClock[PPCLK_VCLK_0] : metrics->CurrClock[PPCLK_VCLK_0];
- gpu_metrics->current_dclk0 =
+ gpu_metrics->current_dclk0 = use_metrics_v3 ? metrics_v3->CurrClock[PPCLK_DCLK_0] :
use_metrics_v2 ? metrics_v2->CurrClock[PPCLK_DCLK_0] : metrics->CurrClock[PPCLK_DCLK_0];
- gpu_metrics->current_vclk1 =
+ gpu_metrics->current_vclk1 = use_metrics_v3 ? metrics_v3->CurrClock[PPCLK_VCLK_1] :
use_metrics_v2 ? metrics_v2->CurrClock[PPCLK_VCLK_1] : metrics->CurrClock[PPCLK_VCLK_1];
- gpu_metrics->current_dclk1 =
+ gpu_metrics->current_dclk1 = use_metrics_v3 ? metrics_v3->CurrClock[PPCLK_DCLK_1] :
use_metrics_v2 ? metrics_v2->CurrClock[PPCLK_DCLK_1] : metrics->CurrClock[PPCLK_DCLK_1];
gpu_metrics->throttle_status = sienna_cichlid_get_throttler_status_locked(smu);
@@ -3672,12 +3799,15 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
smu_cmn_get_indep_throttler_status(gpu_metrics->throttle_status,
sienna_cichlid_throttler_map);
- gpu_metrics->current_fan_speed = use_metrics_v2 ? metrics_v2->CurrFanSpeed : metrics->CurrFanSpeed;
+ gpu_metrics->current_fan_speed = use_metrics_v3 ? metrics_v3->CurrFanSpeed :
+ use_metrics_v2 ? metrics_v2->CurrFanSpeed : metrics->CurrFanSpeed;
if (((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 7)) && smu->smc_fw_version > 0x003A1E00) ||
((adev->ip_versions[MP1_HWIP][0] == IP_VERSION(11, 0, 11)) && smu->smc_fw_version > 0x00410400)) {
- gpu_metrics->pcie_link_width = use_metrics_v2 ? metrics_v2->PcieWidth : metrics->PcieWidth;
- gpu_metrics->pcie_link_speed = link_speed[use_metrics_v2 ? metrics_v2->PcieRate : metrics->PcieRate];
+ gpu_metrics->pcie_link_width = use_metrics_v3 ? metrics_v3->PcieWidth :
+ use_metrics_v2 ? metrics_v2->PcieWidth : metrics->PcieWidth;
+ gpu_metrics->pcie_link_speed = link_speed[use_metrics_v3 ? metrics_v3->PcieRate :
+ use_metrics_v2 ? metrics_v2->PcieRate : metrics->PcieRate];
} else {
gpu_metrics->pcie_link_width =
smu_v11_0_get_current_pcie_link_width(smu);
@@ -3685,8 +3815,6 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
smu_v11_0_get_current_pcie_link_speed(smu);
}
- mutex_unlock(&smu->metrics_lock);
-
gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
*table = (void *)gpu_metrics;
@@ -3694,6 +3822,60 @@ static ssize_t sienna_cichlid_get_gpu_metrics(struct smu_context *smu,
return sizeof(struct gpu_metrics_v1_3);
}
+static int sienna_cichlid_check_ecc_table_support(struct smu_context *smu)
+{
+ uint32_t if_version = 0xff, smu_version = 0xff;
+ int ret = 0;
+
+ ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
+ if (ret)
+ return -EOPNOTSUPP;
+
+ if (smu_version < SUPPORT_ECCTABLE_SMU_VERSION)
+ ret = -EOPNOTSUPP;
+
+ return ret;
+}
+
+static ssize_t sienna_cichlid_get_ecc_info(struct smu_context *smu,
+ void *table)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ EccInfoTable_t *ecc_table = NULL;
+ struct ecc_info_per_ch *ecc_info_per_channel = NULL;
+ int i, ret = 0;
+ struct umc_ecc_info *eccinfo = (struct umc_ecc_info *)table;
+
+ ret = sienna_cichlid_check_ecc_table_support(smu);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_update_table(smu,
+ SMU_TABLE_ECCINFO,
+ 0,
+ smu_table->ecc_table,
+ false);
+ if (ret) {
+ dev_info(smu->adev->dev, "Failed to export SMU ecc table!\n");
+ return ret;
+ }
+
+ ecc_table = (EccInfoTable_t *)smu_table->ecc_table;
+
+ for (i = 0; i < SIENNA_CICHLID_UMC_CHANNEL_NUM; i++) {
+ ecc_info_per_channel = &(eccinfo->ecc[i]);
+ ecc_info_per_channel->ce_count_lo_chip =
+ ecc_table->EccInfo[i].ce_count_lo_chip;
+ ecc_info_per_channel->ce_count_hi_chip =
+ ecc_table->EccInfo[i].ce_count_hi_chip;
+ ecc_info_per_channel->mca_umc_status =
+ ecc_table->EccInfo[i].mca_umc_status;
+ ecc_info_per_channel->mca_umc_addr =
+ ecc_table->EccInfo[i].mca_umc_addr;
+ }
+
+ return ret;
+}
static int sienna_cichlid_enable_mgpu_fan_boost(struct smu_context *smu)
{
uint16_t *mgpu_fan_boost_limit_rpm;
@@ -3719,7 +3901,7 @@ static int sienna_cichlid_gpo_control(struct smu_context *smu,
int ret = 0;
- if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_GFX_GPO_BIT)) {
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFX_GPO_BIT)) {
ret = smu_cmn_get_smc_version(smu, NULL, &smu_version);
if (ret)
return ret;
@@ -3832,9 +4014,61 @@ static void sienna_cichlid_stb_init(struct smu_context *smu)
}
-int sienna_cichlid_stb_get_data_direct(struct smu_context *smu,
- void *buf,
- uint32_t size)
+static int sienna_cichlid_get_default_config_table_settings(struct smu_context *smu,
+ struct config_table_setting *table)
+{
+ struct amdgpu_device *adev = smu->adev;
+
+ if (!table)
+ return -EINVAL;
+
+ table->gfxclk_average_tau = 10;
+ table->socclk_average_tau = 10;
+ table->fclk_average_tau = 10;
+ table->uclk_average_tau = 10;
+ table->gfx_activity_average_tau = 10;
+ table->mem_activity_average_tau = 10;
+ table->socket_power_average_tau = 100;
+ if (adev->asic_type != CHIP_SIENNA_CICHLID)
+ table->apu_socket_power_average_tau = 100;
+
+ return 0;
+}
+
+static int sienna_cichlid_set_config_table(struct smu_context *smu,
+ struct config_table_setting *table)
+{
+ DriverSmuConfigExternal_t driver_smu_config_table;
+
+ if (!table)
+ return -EINVAL;
+
+ memset(&driver_smu_config_table,
+ 0,
+ sizeof(driver_smu_config_table));
+ driver_smu_config_table.DriverSmuConfig.GfxclkAverageLpfTau =
+ table->gfxclk_average_tau;
+ driver_smu_config_table.DriverSmuConfig.FclkAverageLpfTau =
+ table->fclk_average_tau;
+ driver_smu_config_table.DriverSmuConfig.UclkAverageLpfTau =
+ table->uclk_average_tau;
+ driver_smu_config_table.DriverSmuConfig.GfxActivityLpfTau =
+ table->gfx_activity_average_tau;
+ driver_smu_config_table.DriverSmuConfig.UclkActivityLpfTau =
+ table->mem_activity_average_tau;
+ driver_smu_config_table.DriverSmuConfig.SocketPowerLpfTau =
+ table->socket_power_average_tau;
+
+ return smu_cmn_update_table(smu,
+ SMU_TABLE_DRIVER_SMU_CONFIG,
+ 0,
+ (void *)&driver_smu_config_table,
+ true);
+}
+
+static int sienna_cichlid_stb_get_data_direct(struct smu_context *smu,
+ void *buf,
+ uint32_t size)
{
uint32_t *p = buf;
struct amdgpu_device *adev = smu->adev;
@@ -3945,6 +4179,9 @@ static const struct pptable_funcs sienna_cichlid_ppt_funcs = {
.gpo_control = sienna_cichlid_gpo_control,
.set_mp1_state = sienna_cichlid_set_mp1_state,
.stb_collect_info = sienna_cichlid_stb_get_data_direct,
+ .get_ecc_info = sienna_cichlid_get_ecc_info,
+ .get_default_config_table_settings = sienna_cichlid_get_default_config_table_settings,
+ .set_config_table = sienna_cichlid_set_config_table,
};
void sienna_cichlid_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h
index 38cd0ece24f6..42f705c7a36f 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/sienna_cichlid_ppt.h
@@ -33,6 +33,14 @@ typedef enum {
#define SIENNA_CICHLID_UMD_PSTATE_PROFILING_SOCCLK 960
#define SIENNA_CICHLID_UMD_PSTATE_PROFILING_MEMCLK 1000
+#define DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_GFXCLK 1950
+#define DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_SOCCLK 960
+#define DIMGREY_CAVEFISH_UMD_PSTATE_PROFILING_MEMCLK 676
+
+#define BEIGE_GOBY_UMD_PSTATE_PROFILING_GFXCLK 2200
+#define BEIGE_GOBY_UMD_PSTATE_PROFILING_SOCCLK 960
+#define BEIGE_GOBY_UMD_PSTATE_PROFILING_MEMCLK 1000
+
extern void sienna_cichlid_set_ppt_funcs(struct smu_context *smu);
#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
index 4e9e2cf39859..b87f550af26b 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/smu_v11_0.c
@@ -225,15 +225,15 @@ int smu_v11_0_check_fw_version(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
uint32_t if_version = 0xff, smu_version = 0xff;
- uint16_t smu_major;
- uint8_t smu_minor, smu_debug;
+ uint8_t smu_program, smu_major, smu_minor, smu_debug;
int ret = 0;
ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
if (ret)
return ret;
- smu_major = (smu_version >> 16) & 0xffff;
+ smu_program = (smu_version >> 24) & 0xff;
+ smu_major = (smu_version >> 16) & 0xff;
smu_minor = (smu_version >> 8) & 0xff;
smu_debug = (smu_version >> 0) & 0xff;
if (smu->is_apu)
@@ -287,9 +287,9 @@ int smu_v11_0_check_fw_version(struct smu_context *smu)
*/
if (if_version != smu->smc_driver_if_version) {
dev_info(smu->adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
- "smu fw version = 0x%08x (%d.%d.%d)\n",
+ "smu fw program = %d, version = 0x%08x (%d.%d.%d)\n",
smu->smc_driver_if_version, if_version,
- smu_version, smu_major, smu_minor, smu_debug);
+ smu_program, smu_version, smu_major, smu_minor, smu_debug);
dev_warn(smu->adev->dev, "SMU driver if version not matched\n");
}
@@ -473,8 +473,12 @@ int smu_v11_0_fini_smc_tables(struct smu_context *smu)
kfree(smu_table->hardcode_pptable);
smu_table->hardcode_pptable = NULL;
+ kfree(smu_table->driver_smu_config_table);
+ kfree(smu_table->ecc_table);
kfree(smu_table->metrics_table);
kfree(smu_table->watermarks_table);
+ smu_table->driver_smu_config_table = NULL;
+ smu_table->ecc_table = NULL;
smu_table->metrics_table = NULL;
smu_table->watermarks_table = NULL;
smu_table->metrics_time = 0;
@@ -796,30 +800,8 @@ failed:
int smu_v11_0_system_features_control(struct smu_context *smu,
bool en)
{
- struct smu_feature *feature = &smu->smu_feature;
- uint32_t feature_mask[2];
- int ret = 0;
-
- ret = smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
- SMU_MSG_DisableAllSmuFeatures), NULL);
- if (ret)
- return ret;
-
- bitmap_zero(feature->enabled, feature->feature_num);
- bitmap_zero(feature->supported, feature->feature_num);
-
- if (en) {
- ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
- if (ret)
- return ret;
-
- bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
- feature->feature_num);
- bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
- feature->feature_num);
- }
-
- return ret;
+ return smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
+ SMU_MSG_DisableAllSmuFeatures), NULL);
}
int smu_v11_0_notify_display_change(struct smu_context *smu)
@@ -1372,7 +1354,7 @@ static int smu_v11_0_set_irq_state(struct amdgpu_device *adev,
unsigned tyep,
enum amdgpu_interrupt_state state)
{
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
uint32_t low, high;
uint32_t val = 0;
@@ -1441,7 +1423,7 @@ static int smu_v11_0_irq_process(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
uint32_t client_id = entry->client_id;
uint32_t src_id = entry->src_id;
/*
@@ -1615,13 +1597,8 @@ bool smu_v11_0_baco_is_support(struct smu_context *smu)
enum smu_baco_state smu_v11_0_baco_get_state(struct smu_context *smu)
{
struct smu_baco_context *smu_baco = &smu->smu_baco;
- enum smu_baco_state baco_state;
-
- mutex_lock(&smu_baco->mutex);
- baco_state = smu_baco->state;
- mutex_unlock(&smu_baco->mutex);
- return baco_state;
+ return smu_baco->state;
}
#define D3HOT_BACO_SEQUENCE 0
@@ -1638,8 +1615,6 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
if (smu_v11_0_baco_get_state(smu) == state)
return 0;
- mutex_lock(&smu_baco->mutex);
-
if (state == SMU_BACO_STATE_ENTER) {
switch (adev->ip_versions[MP1_HWIP][0]) {
case IP_VERSION(11, 0, 7):
@@ -1680,18 +1655,16 @@ int smu_v11_0_baco_set_state(struct smu_context *smu, enum smu_baco_state state)
} else {
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_ExitBaco, NULL);
if (ret)
- goto out;
+ return ret;
/* clear vbios scratch 6 and 7 for coming asic reinit */
WREG32(adev->bios_scratch_reg_offset + 6, 0);
WREG32(adev->bios_scratch_reg_offset + 7, 0);
}
- if (ret)
- goto out;
- smu_baco->state = state;
-out:
- mutex_unlock(&smu_baco->mutex);
+ if (!ret)
+ smu_baco->state = state;
+
return ret;
}
@@ -1798,7 +1771,6 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu,
uint32_t min,
uint32_t max)
{
- struct amdgpu_device *adev = smu->adev;
int ret = 0, clk_id = 0;
uint32_t param;
@@ -1811,9 +1783,6 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu,
if (clk_id < 0)
return clk_id;
- if (clk_type == SMU_GFXCLK)
- amdgpu_gfx_off_ctrl(adev, false);
-
if (max > 0) {
param = (uint32_t)((clk_id << 16) | (max & 0xffff));
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
@@ -1831,9 +1800,6 @@ int smu_v11_0_set_soft_freq_limited_range(struct smu_context *smu,
}
out:
- if (clk_type == SMU_GFXCLK)
- amdgpu_gfx_off_ctrl(adev, true);
-
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
index 5cb07ed227fb..5551e1426ef5 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu11/vangogh_ppt.c
@@ -273,15 +273,11 @@ static int vangogh_get_legacy_smu_metrics_data(struct smu_context *smu,
SmuMetrics_legacy_t *metrics = (SmuMetrics_legacy_t *)smu_table->metrics_table;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
-
- ret = smu_cmn_get_metrics_table_locked(smu,
- NULL,
- false);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu,
+ NULL,
+ false);
+ if (ret)
return ret;
- }
switch (member) {
case METRICS_CURR_GFXCLK:
@@ -335,8 +331,6 @@ static int vangogh_get_legacy_smu_metrics_data(struct smu_context *smu,
break;
}
- mutex_unlock(&smu->metrics_lock);
-
return ret;
}
@@ -348,15 +342,11 @@ static int vangogh_get_smu_metrics_data(struct smu_context *smu,
SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
-
- ret = smu_cmn_get_metrics_table_locked(smu,
- NULL,
- false);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu,
+ NULL,
+ false);
+ if (ret)
return ret;
- }
switch (member) {
case METRICS_CURR_GFXCLK:
@@ -410,8 +400,6 @@ static int vangogh_get_smu_metrics_data(struct smu_context *smu,
break;
}
- mutex_unlock(&smu->metrics_lock);
-
return ret;
}
@@ -512,21 +500,17 @@ static bool vangogh_is_dpm_running(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
int ret = 0;
- uint32_t feature_mask[2];
uint64_t feature_enabled;
/* we need to re-init after suspend so return false */
if (adev->in_suspend)
return false;
- ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2);
+ ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
if (ret)
return false;
- feature_enabled = (unsigned long)((uint64_t)feature_mask[0] |
- ((uint64_t)feature_mask[1] << 32));
-
return !!(feature_enabled & SMC_DPM_FEATURE);
}
@@ -1400,7 +1384,7 @@ static int vangogh_set_peak_clock_by_device(struct smu_context *smu)
static int vangogh_set_performance_level(struct smu_context *smu,
enum amd_dpm_forced_level level)
{
- int ret = 0;
+ int ret = 0, i;
uint32_t soc_mask, mclk_mask, fclk_mask;
uint32_t vclk_mask = 0, dclk_mask = 0;
@@ -1494,6 +1478,24 @@ static int vangogh_set_performance_level(struct smu_context *smu,
if (ret)
return ret;
+ if (smu->adev->pm.fw_version >= 0x43f1b00) {
+ for (i = 0; i < smu->cpu_core_num; i++) {
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMinCclk,
+ ((i << 20)
+ | smu->cpu_actual_soft_min_freq),
+ NULL);
+ if (ret)
+ return ret;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxCclk,
+ ((i << 20)
+ | smu->cpu_actual_soft_max_freq),
+ NULL);
+ if (ret)
+ return ret;
+ }
+ }
+
return ret;
}
@@ -1506,7 +1508,6 @@ static int vangogh_read_sensor(struct smu_context *smu,
if (!data || !size)
return -EINVAL;
- mutex_lock(&smu->sensor_lock);
switch (sensor) {
case AMDGPU_PP_SENSOR_GPU_LOAD:
ret = vangogh_common_get_smu_metrics_data(smu,
@@ -1568,7 +1569,6 @@ static int vangogh_read_sensor(struct smu_context *smu,
ret = -EOPNOTSUPP;
break;
}
- mutex_unlock(&smu->sensor_lock);
return ret;
}
@@ -1965,30 +1965,13 @@ static int vangogh_get_dpm_clock_table(struct smu_context *smu, struct dpm_clock
static int vangogh_system_features_control(struct smu_context *smu, bool en)
{
struct amdgpu_device *adev = smu->adev;
- struct smu_feature *feature = &smu->smu_feature;
- uint32_t feature_mask[2];
int ret = 0;
if (adev->pm.fw_version >= 0x43f1700 && !en)
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_RlcPowerNotify,
RLC_STATUS_OFF, NULL);
- bitmap_zero(feature->enabled, feature->feature_num);
- bitmap_zero(feature->supported, feature->feature_num);
-
- if (!en)
- return ret;
-
- ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2);
- if (ret)
- return ret;
-
- bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
- feature->feature_num);
- bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
- feature->feature_num);
-
- return 0;
+ return ret;
}
static int vangogh_post_smu_init(struct smu_context *smu)
@@ -2003,7 +1986,7 @@ static int vangogh_post_smu_init(struct smu_context *smu)
adev->gfx.config.max_sh_per_se * adev->gfx.config.max_shader_engines;
/* allow message will be sent after enable message on Vangogh*/
- if (smu_cmn_feature_is_supported(smu, SMU_FEATURE_DPM_GFXCLK_BIT) &&
+ if (smu_cmn_feature_is_enabled(smu, SMU_FEATURE_DPM_GFXCLK_BIT) &&
(adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_EnableGfxOff, NULL);
if (ret) {
@@ -2196,7 +2179,7 @@ static const struct pptable_funcs vangogh_ppt_funcs = {
.dpm_set_jpeg_enable = vangogh_dpm_set_jpeg_enable,
.is_dpm_running = vangogh_is_dpm_running,
.read_sensor = vangogh_read_sensor,
- .get_enabled_mask = smu_cmn_get_enabled_32_bits_mask,
+ .get_enabled_mask = smu_cmn_get_enabled_mask,
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.set_watermarks_table = vangogh_set_watermarks_table,
.set_driver_table_location = smu_v11_0_set_driver_table_location,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
index 25c4b135f830..fd6c44ece168 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/renoir_ppt.c
@@ -1128,15 +1128,11 @@ static int renoir_get_smu_metrics_data(struct smu_context *smu,
SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
-
- ret = smu_cmn_get_metrics_table_locked(smu,
- NULL,
- false);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu,
+ NULL,
+ false);
+ if (ret)
return ret;
- }
switch (member) {
case METRICS_AVERAGE_GFXCLK:
@@ -1201,8 +1197,6 @@ static int renoir_get_smu_metrics_data(struct smu_context *smu,
break;
}
- mutex_unlock(&smu->metrics_lock);
-
return ret;
}
@@ -1215,7 +1209,6 @@ static int renoir_read_sensor(struct smu_context *smu,
if (!data || !size)
return -EINVAL;
- mutex_lock(&smu->sensor_lock);
switch (sensor) {
case AMDGPU_PP_SENSOR_GPU_LOAD:
ret = renoir_get_smu_metrics_data(smu,
@@ -1283,7 +1276,6 @@ static int renoir_read_sensor(struct smu_context *smu,
ret = -EOPNOTSUPP;
break;
}
- mutex_unlock(&smu->sensor_lock);
return ret;
}
@@ -1374,6 +1366,16 @@ static int renoir_gfx_state_change_set(struct smu_context *smu, uint32_t state)
return 0;
}
+static int renoir_get_enabled_mask(struct smu_context *smu,
+ uint64_t *feature_mask)
+{
+ if (!feature_mask)
+ return -EINVAL;
+ memset(feature_mask, 0xff, sizeof(*feature_mask));
+
+ return 0;
+}
+
static const struct pptable_funcs renoir_ppt_funcs = {
.set_power_state = NULL,
.print_clk_levels = renoir_print_clk_levels,
@@ -1398,7 +1400,7 @@ static const struct pptable_funcs renoir_ppt_funcs = {
.init_smc_tables = renoir_init_smc_tables,
.fini_smc_tables = smu_v12_0_fini_smc_tables,
.set_default_dpm_table = smu_v12_0_set_default_dpm_tables,
- .get_enabled_mask = smu_cmn_get_enabled_mask,
+ .get_enabled_mask = renoir_get_enabled_mask,
.feature_is_enabled = smu_cmn_feature_is_enabled,
.disable_all_features_with_exception = smu_cmn_disable_all_features_with_exception,
.get_dpm_ultimate_freq = renoir_get_dpm_ultimate_freq,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
index 9c91e79c955f..56a02bc60cee 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu12/smu_v12_0.c
@@ -74,15 +74,15 @@ int smu_v12_0_check_fw_version(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
uint32_t if_version = 0xff, smu_version = 0xff;
- uint16_t smu_major;
- uint8_t smu_minor, smu_debug;
+ uint8_t smu_program, smu_major, smu_minor, smu_debug;
int ret = 0;
ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
if (ret)
return ret;
- smu_major = (smu_version >> 16) & 0xffff;
+ smu_program = (smu_version >> 24) & 0xff;
+ smu_major = (smu_version >> 16) & 0xff;
smu_minor = (smu_version >> 8) & 0xff;
smu_debug = (smu_version >> 0) & 0xff;
if (smu->is_apu)
@@ -98,9 +98,9 @@ int smu_v12_0_check_fw_version(struct smu_context *smu)
*/
if (if_version != smu->smc_driver_if_version) {
dev_info(smu->adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
- "smu fw version = 0x%08x (%d.%d.%d)\n",
+ "smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n",
smu->smc_driver_if_version, if_version,
- smu_version, smu_major, smu_minor, smu_debug);
+ smu_program, smu_version, smu_major, smu_minor, smu_debug);
dev_warn(smu->adev->dev, "SMU driver if version not matched\n");
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile b/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile
index d4c4c495762c..613cdd0d8e83 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/Makefile
@@ -23,7 +23,7 @@
# Makefile for the 'smu manager' sub-component of powerplay.
# It provides the smu management services for the driver.
-SMU13_MGR = smu_v13_0.o aldebaran_ppt.o yellow_carp_ppt.o
+SMU13_MGR = smu_v13_0.o aldebaran_ppt.o yellow_carp_ppt.o smu_v13_0_5_ppt.o
AMD_SWSMU_SMU13MGR = $(addprefix $(AMD_SWSMU_PATH)/smu13/,$(SMU13_MGR))
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
index 4885c4ae78b7..cd81f848d45a 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/aldebaran_ppt.c
@@ -25,6 +25,7 @@
#include <linux/firmware.h>
#include "amdgpu.h"
+#include "amdgpu_dpm.h"
#include "amdgpu_smu.h"
#include "atomfirmware.h"
#include "amdgpu_atomfirmware.h"
@@ -33,7 +34,6 @@
#include "smu13_driver_if_aldebaran.h"
#include "soc15_common.h"
#include "atom.h"
-#include "power_state.h"
#include "aldebaran_ppt.h"
#include "smu_v13_0_pptable.h"
#include "aldebaran_ppsmc.h"
@@ -57,8 +57,6 @@
#undef pr_info
#undef pr_debug
-#define to_amdgpu_device(x) (container_of(x, struct amdgpu_device, pm.smu_i2c))
-
#define ALDEBARAN_FEA_MAP(smu_feature, aldebaran_feature) \
[smu_feature] = {1, (aldebaran_feature)}
@@ -84,6 +82,12 @@
*/
#define SUPPORT_ECCTABLE_SMU_VERSION 0x00442a00
+/*
+ * SMU support BAD CHENNEL info MSG since version 68.51.00,
+ * use this to check ECCTALE feature whether support
+ */
+#define SUPPORT_BAD_CHANNEL_INFO_MSG_VERSION 0x00443300
+
static const struct smu_temperature_range smu13_thermal_policy[] =
{
{-273150, 99000, 99000, -273150, 99000, 99000, -273150, 99000, 99000},
@@ -142,6 +146,7 @@ static const struct cmn2asic_msg_mapping aldebaran_message_map[SMU_MSG_MAX_COUNT
MSG_MAP(GfxDriverResetRecovery, PPSMC_MSG_GfxDriverResetRecovery, 0),
MSG_MAP(BoardPowerCalibration, PPSMC_MSG_BoardPowerCalibration, 0),
MSG_MAP(HeavySBR, PPSMC_MSG_HeavySBR, 0),
+ MSG_MAP(SetBadHBMPagesRetiredFlagsPerChannel, PPSMC_MSG_SetBadHBMPagesRetiredFlagsPerChannel, 0),
};
static const struct cmn2asic_mapping aldebaran_clk_map[SMU_CLK_COUNT] = {
@@ -572,15 +577,11 @@ static int aldebaran_get_smu_metrics_data(struct smu_context *smu,
SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
-
- ret = smu_cmn_get_metrics_table_locked(smu,
- NULL,
- false);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu,
+ NULL,
+ false);
+ if (ret)
return ret;
- }
switch (member) {
case METRICS_CURR_GFXCLK:
@@ -654,8 +655,6 @@ static int aldebaran_get_smu_metrics_data(struct smu_context *smu,
break;
}
- mutex_unlock(&smu->metrics_lock);
-
return ret;
}
@@ -1148,7 +1147,6 @@ static int aldebaran_read_sensor(struct smu_context *smu,
if (!data || !size)
return -EINVAL;
- mutex_lock(&smu->sensor_lock);
switch (sensor) {
case AMDGPU_PP_SENSOR_MEM_LOAD:
case AMDGPU_PP_SENSOR_GPU_LOAD:
@@ -1187,7 +1185,6 @@ static int aldebaran_read_sensor(struct smu_context *smu,
ret = -EOPNOTSUPP;
break;
}
- mutex_unlock(&smu->sensor_lock);
return ret;
}
@@ -1460,32 +1457,34 @@ static int aldebaran_usr_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_
static bool aldebaran_is_dpm_running(struct smu_context *smu)
{
int ret;
- uint32_t feature_mask[2];
- unsigned long feature_enabled;
+ uint64_t feature_enabled;
- ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
+ ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
if (ret)
return false;
- feature_enabled = (unsigned long)((uint64_t)feature_mask[0] |
- ((uint64_t)feature_mask[1] << 32));
return !!(feature_enabled & SMC_DPM_FEATURE);
}
static int aldebaran_i2c_xfer(struct i2c_adapter *i2c_adap,
struct i2c_msg *msg, int num_msgs)
{
- struct amdgpu_device *adev = to_amdgpu_device(i2c_adap);
- struct smu_table_context *smu_table = &adev->smu.smu_table;
+ struct amdgpu_smu_i2c_bus *smu_i2c = i2c_get_adapdata(i2c_adap);
+ struct amdgpu_device *adev = smu_i2c->adev;
+ struct smu_context *smu = adev->powerplay.pp_handle;
+ struct smu_table_context *smu_table = &smu->smu_table;
struct smu_table *table = &smu_table->driver_table;
SwI2cRequest_t *req, *res = (SwI2cRequest_t *)table->cpu_addr;
int i, j, r, c;
u16 dir;
+ if (!adev->pm.dpm_enabled)
+ return -EBUSY;
+
req = kzalloc(sizeof(*req), GFP_KERNEL);
if (!req)
return -ENOMEM;
- req->I2CcontrollerPort = 0;
+ req->I2CcontrollerPort = smu_i2c->port;
req->I2CSpeed = I2C_SPEED_FAST_400K;
req->SlaveAddress = msg[0].addr << 1; /* wants an 8-bit address */
dir = msg[0].flags & I2C_M_RD;
@@ -1521,9 +1520,9 @@ static int aldebaran_i2c_xfer(struct i2c_adapter *i2c_adap,
}
}
}
- mutex_lock(&adev->smu.mutex);
- r = smu_cmn_update_table(&adev->smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
- mutex_unlock(&adev->smu.mutex);
+ mutex_lock(&adev->pm.mutex);
+ r = smu_cmn_update_table(smu, SMU_TABLE_I2C_COMMANDS, 0, req, true);
+ mutex_unlock(&adev->pm.mutex);
if (r)
goto fail;
@@ -1563,28 +1562,53 @@ static const struct i2c_adapter_quirks aldebaran_i2c_control_quirks = {
.max_comb_2nd_msg_len = MAX_SW_I2C_COMMANDS - 2,
};
-static int aldebaran_i2c_control_init(struct smu_context *smu, struct i2c_adapter *control)
+static int aldebaran_i2c_control_init(struct smu_context *smu)
{
- struct amdgpu_device *adev = to_amdgpu_device(control);
+ struct amdgpu_device *adev = smu->adev;
+ struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[0];
+ struct i2c_adapter *control = &smu_i2c->adapter;
int res;
+ smu_i2c->adev = adev;
+ smu_i2c->port = 0;
+ mutex_init(&smu_i2c->mutex);
control->owner = THIS_MODULE;
control->class = I2C_CLASS_SPD;
control->dev.parent = &adev->pdev->dev;
control->algo = &aldebaran_i2c_algo;
- snprintf(control->name, sizeof(control->name), "AMDGPU SMU");
+ snprintf(control->name, sizeof(control->name), "AMDGPU SMU 0");
control->quirks = &aldebaran_i2c_control_quirks;
+ i2c_set_adapdata(control, smu_i2c);
res = i2c_add_adapter(control);
- if (res)
+ if (res) {
DRM_ERROR("Failed to register hw i2c, err: %d\n", res);
+ goto Out_err;
+ }
+
+ adev->pm.ras_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
+ adev->pm.fru_eeprom_i2c_bus = &adev->pm.smu_i2c[0].adapter;
+
+ return 0;
+Out_err:
+ i2c_del_adapter(control);
return res;
}
-static void aldebaran_i2c_control_fini(struct smu_context *smu, struct i2c_adapter *control)
+static void aldebaran_i2c_control_fini(struct smu_context *smu)
{
- i2c_del_adapter(control);
+ struct amdgpu_device *adev = smu->adev;
+ int i;
+
+ for (i = 0; i < MAX_SMU_I2C_BUSES; i++) {
+ struct amdgpu_smu_i2c_bus *smu_i2c = &adev->pm.smu_i2c[i];
+ struct i2c_adapter *control = &smu_i2c->adapter;
+
+ i2c_del_adapter(control);
+ }
+ adev->pm.ras_eeprom_i2c_bus = NULL;
+ adev->pm.fru_eeprom_i2c_bus = NULL;
}
static void aldebaran_get_unique_id(struct smu_context *smu)
@@ -1594,17 +1618,14 @@ static void aldebaran_get_unique_id(struct smu_context *smu)
uint32_t upper32 = 0, lower32 = 0;
int ret;
- mutex_lock(&smu->metrics_lock);
- ret = smu_cmn_get_metrics_table_locked(smu, NULL, false);
+ ret = smu_cmn_get_metrics_table(smu, NULL, false);
if (ret)
- goto out_unlock;
+ goto out;
upper32 = metrics->PublicSerialNumUpper32;
lower32 = metrics->PublicSerialNumLower32;
-out_unlock:
- mutex_unlock(&smu->metrics_lock);
-
+out:
adev->unique_id = ((uint64_t)upper32 << 32) | lower32;
if (adev->serial[0] == '\0')
sprintf(adev->serial, "%016llx", adev->unique_id);
@@ -1983,6 +2004,41 @@ static int aldebaran_smu_send_hbm_bad_page_num(struct smu_context *smu,
return ret;
}
+static int aldebaran_check_bad_channel_info_support(struct smu_context *smu)
+{
+ uint32_t if_version = 0xff, smu_version = 0xff;
+ int ret = 0;
+
+ ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
+ if (ret) {
+ /* return not support if failed get smu_version */
+ ret = -EOPNOTSUPP;
+ }
+
+ if (smu_version < SUPPORT_BAD_CHANNEL_INFO_MSG_VERSION)
+ ret = -EOPNOTSUPP;
+
+ return ret;
+}
+
+static int aldebaran_send_hbm_bad_channel_flag(struct smu_context *smu,
+ uint32_t size)
+{
+ int ret = 0;
+
+ ret = aldebaran_check_bad_channel_info_support(smu);
+ if (ret)
+ return ret;
+
+ /* message SMU to update the bad channel info on SMUBUS */
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetBadHBMPagesRetiredFlagsPerChannel, size, NULL);
+ if (ret)
+ dev_err(smu->adev->dev, "[%s] failed to message SMU to update HBM bad channel info\n",
+ __func__);
+
+ return ret;
+}
+
static const struct pptable_funcs aldebaran_ppt_funcs = {
/* init dpm */
.get_allowed_feature_mask = aldebaran_get_allowed_feature_mask,
@@ -2048,6 +2104,7 @@ static const struct pptable_funcs aldebaran_ppt_funcs = {
.i2c_fini = aldebaran_i2c_control_fini,
.send_hbm_bad_pages_num = aldebaran_smu_send_hbm_bad_page_num,
.get_ecc_info = aldebaran_get_ecc_info,
+ .send_hbm_bad_channel_flag = aldebaran_send_hbm_bad_channel_flag,
};
void aldebaran_set_ppt_funcs(struct smu_context *smu)
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
index b54790d3483e..cf09e30bdfe0 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0.c
@@ -198,15 +198,15 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
{
struct amdgpu_device *adev = smu->adev;
uint32_t if_version = 0xff, smu_version = 0xff;
- uint16_t smu_major;
- uint8_t smu_minor, smu_debug;
+ uint8_t smu_program, smu_major, smu_minor, smu_debug;
int ret = 0;
ret = smu_cmn_get_smc_version(smu, &if_version, &smu_version);
if (ret)
return ret;
- smu_major = (smu_version >> 16) & 0xffff;
+ smu_program = (smu_version >> 24) & 0xff;
+ smu_major = (smu_version >> 16) & 0xff;
smu_minor = (smu_version >> 8) & 0xff;
smu_debug = (smu_version >> 0) & 0xff;
if (smu->is_apu)
@@ -218,8 +218,12 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
break;
case IP_VERSION(13, 0, 1):
case IP_VERSION(13, 0, 3):
+ case IP_VERSION(13, 0, 8):
smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_YELLOW_CARP;
break;
+ case IP_VERSION(13, 0, 5):
+ smu->smc_driver_if_version = SMU13_DRIVER_IF_VERSION_SMU_V13_0_5;
+ break;
default:
dev_err(adev->dev, "smu unsupported IP version: 0x%x.\n",
adev->ip_versions[MP1_HWIP][0]);
@@ -229,8 +233,8 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
/* only for dGPU w/ SMU13*/
if (adev->pm.fw)
- dev_dbg(adev->dev, "smu fw reported version = 0x%08x (%d.%d.%d)\n",
- smu_version, smu_major, smu_minor, smu_debug);
+ dev_dbg(smu->adev->dev, "smu fw reported program %d, version = 0x%08x (%d.%d.%d)\n",
+ smu_program, smu_version, smu_major, smu_minor, smu_debug);
/*
* 1. if_version mismatch is not critical as our fw is designed
@@ -242,9 +246,9 @@ int smu_v13_0_check_fw_version(struct smu_context *smu)
*/
if (if_version != smu->smc_driver_if_version) {
dev_info(adev->dev, "smu driver if version = 0x%08x, smu fw if version = 0x%08x, "
- "smu fw version = 0x%08x (%d.%d.%d)\n",
+ "smu fw program = %d, smu fw version = 0x%08x (%d.%d.%d)\n",
smu->smc_driver_if_version, if_version,
- smu_version, smu_major, smu_minor, smu_debug);
+ smu_program, smu_version, smu_major, smu_minor, smu_debug);
dev_warn(adev->dev, "SMU driver if version not matched\n");
}
@@ -722,25 +726,21 @@ int smu_v13_0_set_allowed_mask(struct smu_context *smu)
int ret = 0;
uint32_t feature_mask[2];
- mutex_lock(&feature->mutex);
- if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) || feature->feature_num < 64)
- goto failed;
+ if (bitmap_empty(feature->allowed, SMU_FEATURE_MAX) ||
+ feature->feature_num < 64)
+ return -EINVAL;
bitmap_copy((unsigned long *)feature_mask, feature->allowed, 64);
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskHigh,
feature_mask[1], NULL);
if (ret)
- goto failed;
-
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetAllowedFeaturesMaskLow,
- feature_mask[0], NULL);
- if (ret)
- goto failed;
+ return ret;
-failed:
- mutex_unlock(&feature->mutex);
- return ret;
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_SetAllowedFeaturesMaskLow,
+ feature_mask[0],
+ NULL);
}
int smu_v13_0_gfx_off_control(struct smu_context *smu, bool enable)
@@ -751,6 +751,8 @@ int smu_v13_0_gfx_off_control(struct smu_context *smu, bool enable)
switch (adev->ip_versions[MP1_HWIP][0]) {
case IP_VERSION(13, 0, 1):
case IP_VERSION(13, 0, 3):
+ case IP_VERSION(13, 0, 5):
+ case IP_VERSION(13, 0, 8):
if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
return 0;
if (enable)
@@ -768,30 +770,8 @@ int smu_v13_0_gfx_off_control(struct smu_context *smu, bool enable)
int smu_v13_0_system_features_control(struct smu_context *smu,
bool en)
{
- struct smu_feature *feature = &smu->smu_feature;
- uint32_t feature_mask[2];
- int ret = 0;
-
- ret = smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
- SMU_MSG_DisableAllSmuFeatures), NULL);
- if (ret)
- return ret;
-
- bitmap_zero(feature->enabled, feature->feature_num);
- bitmap_zero(feature->supported, feature->feature_num);
-
- if (en) {
- ret = smu_cmn_get_enabled_mask(smu, feature_mask, 2);
- if (ret)
- return ret;
-
- bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
- feature->feature_num);
- bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
- feature->feature_num);
- }
-
- return ret;
+ return smu_cmn_send_smc_msg(smu, (en ? SMU_MSG_EnableAllSmuFeatures :
+ SMU_MSG_DisableAllSmuFeatures), NULL);
}
int smu_v13_0_notify_display_change(struct smu_context *smu)
@@ -1200,7 +1180,7 @@ static int smu_v13_0_set_irq_state(struct amdgpu_device *adev,
unsigned tyep,
enum amdgpu_interrupt_state state)
{
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
uint32_t low, high;
uint32_t val = 0;
@@ -1275,7 +1255,7 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- struct smu_context *smu = &adev->smu;
+ struct smu_context *smu = adev->powerplay.pp_handle;
uint32_t client_id = entry->client_id;
uint32_t src_id = entry->src_id;
/*
@@ -1321,11 +1301,11 @@ static int smu_v13_0_irq_process(struct amdgpu_device *adev,
switch (ctxid) {
case 0x3:
dev_dbg(adev->dev, "Switched to AC mode!\n");
- smu_v13_0_ack_ac_dc_interrupt(&adev->smu);
+ smu_v13_0_ack_ac_dc_interrupt(smu);
break;
case 0x4:
dev_dbg(adev->dev, "Switched to DC mode!\n");
- smu_v13_0_ack_ac_dc_interrupt(&adev->smu);
+ smu_v13_0_ack_ac_dc_interrupt(smu);
break;
case 0x7:
/*
@@ -1533,7 +1513,6 @@ int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu,
uint32_t min,
uint32_t max)
{
- struct amdgpu_device *adev = smu->adev;
int ret = 0, clk_id = 0;
uint32_t param;
@@ -1546,9 +1525,6 @@ int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu,
if (clk_id < 0)
return clk_id;
- if (clk_type == SMU_GFXCLK)
- amdgpu_gfx_off_ctrl(adev, false);
-
if (max > 0) {
param = (uint32_t)((clk_id << 16) | (max & 0xffff));
ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxByFreq,
@@ -1566,9 +1542,6 @@ int smu_v13_0_set_soft_freq_limited_range(struct smu_context *smu,
}
out:
- if (clk_type == SMU_GFXCLK)
- amdgpu_gfx_off_ctrl(adev, true);
-
return ret;
}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
new file mode 100644
index 000000000000..7bfac029e513
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.c
@@ -0,0 +1,1057 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+
+#define SWSMU_CODE_LAYER_L2
+
+#include "amdgpu.h"
+#include "amdgpu_smu.h"
+#include "smu_v13_0.h"
+#include "smu13_driver_if_v13_0_5.h"
+#include "smu_v13_0_5_ppt.h"
+#include "smu_v13_0_5_ppsmc.h"
+#include "smu_v13_0_5_pmfw.h"
+#include "smu_cmn.h"
+
+/*
+ * DO NOT use these for err/warn/info/debug messages.
+ * Use dev_err, dev_warn, dev_info and dev_dbg instead.
+ * They are more MGPU friendly.
+ */
+#undef pr_err
+#undef pr_warn
+#undef pr_info
+#undef pr_debug
+
+#define FEATURE_MASK(feature) (1ULL << feature)
+#define SMC_DPM_FEATURE ( \
+ FEATURE_MASK(FEATURE_CCLK_DPM_BIT) | \
+ FEATURE_MASK(FEATURE_FCLK_DPM_BIT) | \
+ FEATURE_MASK(FEATURE_LCLK_DPM_BIT) | \
+ FEATURE_MASK(FEATURE_GFX_DPM_BIT) | \
+ FEATURE_MASK(FEATURE_VCN_DPM_BIT) | \
+ FEATURE_MASK(FEATURE_DCFCLK_DPM_BIT) | \
+ FEATURE_MASK(FEATURE_SOCCLK_DPM_BIT)| \
+ FEATURE_MASK(FEATURE_MP0CLK_DPM_BIT)| \
+ FEATURE_MASK(FEATURE_SHUBCLK_DPM_BIT))
+
+static struct cmn2asic_msg_mapping smu_v13_0_5_message_map[SMU_MSG_MAX_COUNT] = {
+ MSG_MAP(TestMessage, PPSMC_MSG_TestMessage, 1),
+ MSG_MAP(GetSmuVersion, PPSMC_MSG_GetSmuVersion, 1),
+ MSG_MAP(PowerDownVcn, PPSMC_MSG_PowerDownVcn, 1),
+ MSG_MAP(PowerUpVcn, PPSMC_MSG_PowerUpVcn, 1),
+ MSG_MAP(SetHardMinVcn, PPSMC_MSG_SetHardMinVcn, 1),
+ MSG_MAP(SetSoftMinGfxclk, PPSMC_MSG_SetSoftMinGfxclk, 1),
+ MSG_MAP(Spare0, PPSMC_MSG_Spare0, 1),
+ MSG_MAP(GfxDeviceDriverReset, PPSMC_MSG_GfxDeviceDriverReset, 1),
+ MSG_MAP(SetDriverDramAddrHigh, PPSMC_MSG_SetDriverDramAddrHigh, 1),
+ MSG_MAP(SetDriverDramAddrLow, PPSMC_MSG_SetDriverDramAddrLow, 1),
+ MSG_MAP(TransferTableSmu2Dram, PPSMC_MSG_TransferTableSmu2Dram, 1),
+ MSG_MAP(TransferTableDram2Smu, PPSMC_MSG_TransferTableDram2Smu , 1),
+ MSG_MAP(GetGfxclkFrequency, PPSMC_MSG_GetGfxclkFrequency, 1),
+ MSG_MAP(GetEnabledSmuFeatures, PPSMC_MSG_GetEnabledSmuFeatures, 1),
+ MSG_MAP(SetSoftMaxVcn, PPSMC_MSG_SetSoftMaxVcn, 1),
+ MSG_MAP(PowerDownJpeg, PPSMC_MSG_PowerDownJpeg, 1),
+ MSG_MAP(PowerUpJpeg, PPSMC_MSG_PowerUpJpeg, 1),
+ MSG_MAP(SetSoftMaxGfxClk, PPSMC_MSG_SetSoftMaxGfxClk, 1),
+ MSG_MAP(SetHardMinGfxClk, PPSMC_MSG_SetHardMinGfxClk, 1),
+ MSG_MAP(AllowGfxOff, PPSMC_MSG_AllowGfxOff, 1),
+ MSG_MAP(DisallowGfxOff, PPSMC_MSG_DisallowGfxOff, 1),
+ MSG_MAP(SetSoftMinVcn, PPSMC_MSG_SetSoftMinVcn, 1),
+ MSG_MAP(GetDriverIfVersion, PPSMC_MSG_GetDriverIfVersion, 1),
+ MSG_MAP(PrepareMp1ForUnload, PPSMC_MSG_PrepareMp1ForUnload, 1),
+};
+
+static struct cmn2asic_mapping smu_v13_0_5_feature_mask_map[SMU_FEATURE_COUNT] = {
+ FEA_MAP(DATA_CALCULATION),
+ FEA_MAP(PPT),
+ FEA_MAP(TDC),
+ FEA_MAP(THERMAL),
+ FEA_MAP(PROCHOT),
+ FEA_MAP(CCLK_DPM),
+ FEA_MAP_REVERSE(FCLK),
+ FEA_MAP(LCLK_DPM),
+ FEA_MAP(DF_CSTATES),
+ FEA_MAP(FAN_CONTROLLER),
+ FEA_MAP(CPPC),
+ FEA_MAP_HALF_REVERSE(GFX),
+ FEA_MAP(DS_GFXCLK),
+ FEA_MAP(S0I3),
+ FEA_MAP(VCN_DPM),
+ FEA_MAP(DS_VCN),
+ FEA_MAP(DCFCLK_DPM),
+ FEA_MAP(ATHUB_PG),
+ FEA_MAP_REVERSE(SOCCLK),
+ FEA_MAP(SHUBCLK_DPM),
+ FEA_MAP(GFXOFF),
+};
+
+static struct cmn2asic_mapping smu_v13_0_5_table_map[SMU_TABLE_COUNT] = {
+ TAB_MAP_VALID(WATERMARKS),
+ TAB_MAP_VALID(SMU_METRICS),
+ TAB_MAP_VALID(CUSTOM_DPM),
+ TAB_MAP_VALID(DPMCLOCKS),
+};
+
+static int smu_v13_0_5_init_smc_tables(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct smu_table *tables = smu_table->tables;
+
+ SMU_TABLE_INIT(tables, SMU_TABLE_WATERMARKS, sizeof(Watermarks_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_DPMCLOCKS, sizeof(DpmClocks_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+ SMU_TABLE_INIT(tables, SMU_TABLE_SMU_METRICS, sizeof(SmuMetrics_t),
+ PAGE_SIZE, AMDGPU_GEM_DOMAIN_VRAM);
+
+ smu_table->clocks_table = kzalloc(sizeof(DpmClocks_t), GFP_KERNEL);
+ if (!smu_table->clocks_table)
+ goto err0_out;
+
+ smu_table->metrics_table = kzalloc(sizeof(SmuMetrics_t), GFP_KERNEL);
+ if (!smu_table->metrics_table)
+ goto err1_out;
+ smu_table->metrics_time = 0;
+
+ smu_table->watermarks_table = kzalloc(sizeof(Watermarks_t), GFP_KERNEL);
+ if (!smu_table->watermarks_table)
+ goto err2_out;
+
+ smu_table->gpu_metrics_table_size = sizeof(struct gpu_metrics_v2_1);
+ smu_table->gpu_metrics_table = kzalloc(smu_table->gpu_metrics_table_size, GFP_KERNEL);
+ if (!smu_table->gpu_metrics_table)
+ goto err3_out;
+
+ return 0;
+
+err3_out:
+ kfree(smu_table->watermarks_table);
+err2_out:
+ kfree(smu_table->metrics_table);
+err1_out:
+ kfree(smu_table->clocks_table);
+err0_out:
+ return -ENOMEM;
+}
+
+static int smu_v13_0_5_fini_smc_tables(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+
+ kfree(smu_table->clocks_table);
+ smu_table->clocks_table = NULL;
+
+ kfree(smu_table->metrics_table);
+ smu_table->metrics_table = NULL;
+
+ kfree(smu_table->watermarks_table);
+ smu_table->watermarks_table = NULL;
+
+ return 0;
+}
+
+static int smu_v13_0_5_system_features_control(struct smu_context *smu, bool en)
+{
+ struct amdgpu_device *adev = smu->adev;
+ int ret = 0;
+
+ if (!en && !adev->in_s0ix)
+ ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
+
+ return ret;
+}
+
+static int smu_v13_0_5_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
+{
+ int ret = 0;
+
+ /* vcn dpm on is a prerequisite for vcn power gate messages */
+ if (enable)
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpVcn,
+ 0, NULL);
+ else
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerDownVcn,
+ 0, NULL);
+
+ return ret;
+}
+
+static int smu_v13_0_5_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
+{
+ int ret = 0;
+
+ if (enable)
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_PowerUpJpeg,
+ 0, NULL);
+ else
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_PowerDownJpeg, 0,
+ NULL);
+
+ return ret;
+}
+
+
+static bool smu_v13_0_5_is_dpm_running(struct smu_context *smu)
+{
+ int ret = 0;
+ uint64_t feature_enabled;
+
+ ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
+
+ if (ret)
+ return false;
+
+ return !!(feature_enabled & SMC_DPM_FEATURE);
+}
+
+static int smu_v13_0_5_mode_reset(struct smu_context *smu, int type)
+{
+ int ret = 0;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, type, NULL);
+ if (ret)
+ dev_err(smu->adev->dev, "Failed to mode reset!\n");
+
+ return ret;
+}
+
+static int smu_v13_0_5_mode2_reset(struct smu_context *smu)
+{
+ return smu_v13_0_5_mode_reset(smu, SMU_RESET_MODE_2);
+}
+
+static int smu_v13_0_5_get_smu_metrics_data(struct smu_context *smu,
+ MetricsMember_t member,
+ uint32_t *value)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+
+ SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
+ int ret = 0;
+
+ ret = smu_cmn_get_metrics_table(smu, NULL, false);
+ if (ret)
+ return ret;
+
+ switch (member) {
+ case METRICS_AVERAGE_GFXCLK:
+ *value = metrics->GfxclkFrequency;
+ break;
+ case METRICS_AVERAGE_SOCCLK:
+ *value = metrics->SocclkFrequency;
+ break;
+ case METRICS_AVERAGE_VCLK:
+ *value = metrics->VclkFrequency;
+ break;
+ case METRICS_AVERAGE_DCLK:
+ *value = metrics->DclkFrequency;
+ break;
+ case METRICS_AVERAGE_UCLK:
+ *value = metrics->MemclkFrequency;
+ break;
+ case METRICS_AVERAGE_GFXACTIVITY:
+ *value = metrics->GfxActivity / 100;
+ break;
+ case METRICS_AVERAGE_VCNACTIVITY:
+ *value = metrics->UvdActivity;
+ break;
+ case METRICS_AVERAGE_SOCKETPOWER:
+ *value = (metrics->CurrentSocketPower << 8) / 1000;
+ break;
+ case METRICS_TEMPERATURE_EDGE:
+ *value = metrics->GfxTemperature / 100 *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ case METRICS_TEMPERATURE_HOTSPOT:
+ *value = metrics->SocTemperature / 100 *
+ SMU_TEMPERATURE_UNITS_PER_CENTIGRADES;
+ break;
+ case METRICS_THROTTLER_STATUS:
+ *value = metrics->ThrottlerStatus;
+ break;
+ case METRICS_VOLTAGE_VDDGFX:
+ *value = metrics->Voltage[0];
+ break;
+ case METRICS_VOLTAGE_VDDSOC:
+ *value = metrics->Voltage[1];
+ break;
+ default:
+ *value = UINT_MAX;
+ break;
+ }
+
+ return ret;
+}
+
+static int smu_v13_0_5_read_sensor(struct smu_context *smu,
+ enum amd_pp_sensors sensor,
+ void *data, uint32_t *size)
+{
+ int ret = 0;
+
+ if (!data || !size)
+ return -EINVAL;
+
+ switch (sensor) {
+ case AMDGPU_PP_SENSOR_GPU_LOAD:
+ ret = smu_v13_0_5_get_smu_metrics_data(smu,
+ METRICS_AVERAGE_GFXACTIVITY,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GPU_POWER:
+ ret = smu_v13_0_5_get_smu_metrics_data(smu,
+ METRICS_AVERAGE_SOCKETPOWER,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_EDGE_TEMP:
+ ret = smu_v13_0_5_get_smu_metrics_data(smu,
+ METRICS_TEMPERATURE_EDGE,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_HOTSPOT_TEMP:
+ ret = smu_v13_0_5_get_smu_metrics_data(smu,
+ METRICS_TEMPERATURE_HOTSPOT,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GFX_MCLK:
+ ret = smu_v13_0_5_get_smu_metrics_data(smu,
+ METRICS_AVERAGE_UCLK,
+ (uint32_t *)data);
+ *(uint32_t *)data *= 100;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_GFX_SCLK:
+ ret = smu_v13_0_5_get_smu_metrics_data(smu,
+ METRICS_AVERAGE_GFXCLK,
+ (uint32_t *)data);
+ *(uint32_t *)data *= 100;
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_VDDGFX:
+ ret = smu_v13_0_5_get_smu_metrics_data(smu,
+ METRICS_VOLTAGE_VDDGFX,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_VDDNB:
+ ret = smu_v13_0_5_get_smu_metrics_data(smu,
+ METRICS_VOLTAGE_VDDSOC,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_SS_APU_SHARE:
+ ret = smu_v13_0_5_get_smu_metrics_data(smu,
+ METRICS_SS_APU_SHARE,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ case AMDGPU_PP_SENSOR_SS_DGPU_SHARE:
+ ret = smu_v13_0_5_get_smu_metrics_data(smu,
+ METRICS_SS_DGPU_SHARE,
+ (uint32_t *)data);
+ *size = 4;
+ break;
+ default:
+ ret = -EOPNOTSUPP;
+ break;
+ }
+
+ return ret;
+}
+
+static int smu_v13_0_5_set_watermarks_table(struct smu_context *smu,
+ struct pp_smu_wm_range_sets *clock_ranges)
+{
+ int i;
+ int ret = 0;
+ Watermarks_t *table = smu->smu_table.watermarks_table;
+
+ if (!table || !clock_ranges)
+ return -EINVAL;
+
+ if (clock_ranges) {
+ if (clock_ranges->num_reader_wm_sets > NUM_WM_RANGES ||
+ clock_ranges->num_writer_wm_sets > NUM_WM_RANGES)
+ return -EINVAL;
+
+ for (i = 0; i < clock_ranges->num_reader_wm_sets; i++) {
+ table->WatermarkRow[WM_DCFCLK][i].MinClock =
+ clock_ranges->reader_wm_sets[i].min_drain_clk_mhz;
+ table->WatermarkRow[WM_DCFCLK][i].MaxClock =
+ clock_ranges->reader_wm_sets[i].max_drain_clk_mhz;
+ table->WatermarkRow[WM_DCFCLK][i].MinMclk =
+ clock_ranges->reader_wm_sets[i].min_fill_clk_mhz;
+ table->WatermarkRow[WM_DCFCLK][i].MaxMclk =
+ clock_ranges->reader_wm_sets[i].max_fill_clk_mhz;
+
+ table->WatermarkRow[WM_DCFCLK][i].WmSetting =
+ clock_ranges->reader_wm_sets[i].wm_inst;
+ }
+
+ for (i = 0; i < clock_ranges->num_writer_wm_sets; i++) {
+ table->WatermarkRow[WM_SOCCLK][i].MinClock =
+ clock_ranges->writer_wm_sets[i].min_fill_clk_mhz;
+ table->WatermarkRow[WM_SOCCLK][i].MaxClock =
+ clock_ranges->writer_wm_sets[i].max_fill_clk_mhz;
+ table->WatermarkRow[WM_SOCCLK][i].MinMclk =
+ clock_ranges->writer_wm_sets[i].min_drain_clk_mhz;
+ table->WatermarkRow[WM_SOCCLK][i].MaxMclk =
+ clock_ranges->writer_wm_sets[i].max_drain_clk_mhz;
+
+ table->WatermarkRow[WM_SOCCLK][i].WmSetting =
+ clock_ranges->writer_wm_sets[i].wm_inst;
+ }
+
+ smu->watermarks_bitmap |= WATERMARKS_EXIST;
+ }
+
+ /* pass data to smu controller */
+ if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
+ !(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
+ ret = smu_cmn_write_watermarks_table(smu);
+ if (ret) {
+ dev_err(smu->adev->dev, "Failed to update WMTABLE!");
+ return ret;
+ }
+ smu->watermarks_bitmap |= WATERMARKS_LOADED;
+ }
+
+ return 0;
+}
+
+static ssize_t smu_v13_0_5_get_gpu_metrics(struct smu_context *smu,
+ void **table)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+ struct gpu_metrics_v2_1 *gpu_metrics =
+ (struct gpu_metrics_v2_1 *)smu_table->gpu_metrics_table;
+ SmuMetrics_t metrics;
+ int ret = 0;
+
+ ret = smu_cmn_get_metrics_table(smu, &metrics, true);
+ if (ret)
+ return ret;
+
+ smu_cmn_init_soft_gpu_metrics(gpu_metrics, 2, 1);
+
+ gpu_metrics->temperature_gfx = metrics.GfxTemperature;
+ gpu_metrics->temperature_soc = metrics.SocTemperature;
+
+ gpu_metrics->average_gfx_activity = metrics.GfxActivity;
+ gpu_metrics->average_mm_activity = metrics.UvdActivity;
+
+ gpu_metrics->average_socket_power = metrics.CurrentSocketPower;
+ gpu_metrics->average_gfx_power = metrics.Power[0];
+ gpu_metrics->average_soc_power = metrics.Power[1];
+ gpu_metrics->average_gfxclk_frequency = metrics.GfxclkFrequency;
+ gpu_metrics->average_socclk_frequency = metrics.SocclkFrequency;
+ gpu_metrics->average_uclk_frequency = metrics.MemclkFrequency;
+ gpu_metrics->average_fclk_frequency = metrics.MemclkFrequency;
+ gpu_metrics->average_vclk_frequency = metrics.VclkFrequency;
+ gpu_metrics->average_dclk_frequency = metrics.DclkFrequency;
+ gpu_metrics->throttle_status = metrics.ThrottlerStatus;
+ gpu_metrics->system_clock_counter = ktime_get_boottime_ns();
+
+ *table = (void *)gpu_metrics;
+
+ return sizeof(struct gpu_metrics_v2_1);
+}
+
+static int smu_v13_0_5_set_default_dpm_tables(struct smu_context *smu)
+{
+ struct smu_table_context *smu_table = &smu->smu_table;
+
+ return smu_cmn_update_table(smu, SMU_TABLE_DPMCLOCKS, 0, smu_table->clocks_table, false);
+}
+
+static int smu_v13_0_5_od_edit_dpm_table(struct smu_context *smu, enum PP_OD_DPM_TABLE_COMMAND type,
+ long input[], uint32_t size)
+{
+ struct smu_dpm_context *smu_dpm = &(smu->smu_dpm);
+ int ret = 0;
+
+ /* Only allowed in manual mode */
+ if (smu_dpm->dpm_level != AMD_DPM_FORCED_LEVEL_MANUAL)
+ return -EINVAL;
+
+ switch (type) {
+ case PP_OD_EDIT_SCLK_VDDC_TABLE:
+ if (size != 2) {
+ dev_err(smu->adev->dev, "Input parameter number not correct\n");
+ return -EINVAL;
+ }
+
+ if (input[0] == 0) {
+ if (input[1] < smu->gfx_default_hard_min_freq) {
+ dev_warn(smu->adev->dev,
+ "Fine grain setting minimum sclk (%ld) MHz is less than the minimum allowed (%d) MHz\n",
+ input[1], smu->gfx_default_hard_min_freq);
+ return -EINVAL;
+ }
+ smu->gfx_actual_hard_min_freq = input[1];
+ } else if (input[0] == 1) {
+ if (input[1] > smu->gfx_default_soft_max_freq) {
+ dev_warn(smu->adev->dev,
+ "Fine grain setting maximum sclk (%ld) MHz is greater than the maximum allowed (%d) MHz\n",
+ input[1], smu->gfx_default_soft_max_freq);
+ return -EINVAL;
+ }
+ smu->gfx_actual_soft_max_freq = input[1];
+ } else {
+ return -EINVAL;
+ }
+ break;
+ case PP_OD_RESTORE_DEFAULT_TABLE:
+ if (size != 0) {
+ dev_err(smu->adev->dev, "Input parameter number not correct\n");
+ return -EINVAL;
+ } else {
+ smu->gfx_actual_hard_min_freq = smu->gfx_default_hard_min_freq;
+ smu->gfx_actual_soft_max_freq = smu->gfx_default_soft_max_freq;
+ }
+ break;
+ case PP_OD_COMMIT_DPM_TABLE:
+ if (size != 0) {
+ dev_err(smu->adev->dev, "Input parameter number not correct\n");
+ return -EINVAL;
+ } else {
+ if (smu->gfx_actual_hard_min_freq > smu->gfx_actual_soft_max_freq) {
+ dev_err(smu->adev->dev,
+ "The setting minimum sclk (%d) MHz is greater than the setting maximum sclk (%d) MHz\n",
+ smu->gfx_actual_hard_min_freq,
+ smu->gfx_actual_soft_max_freq);
+ return -EINVAL;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetHardMinGfxClk,
+ smu->gfx_actual_hard_min_freq, NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Set hard min sclk failed!");
+ return ret;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_SetSoftMaxGfxClk,
+ smu->gfx_actual_soft_max_freq, NULL);
+ if (ret) {
+ dev_err(smu->adev->dev, "Set soft max sclk failed!");
+ return ret;
+ }
+ }
+ break;
+ default:
+ return -ENOSYS;
+ }
+
+ return ret;
+}
+
+static int smu_v13_0_5_get_current_clk_freq(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *value)
+{
+ MetricsMember_t member_type;
+
+ switch (clk_type) {
+ case SMU_SOCCLK:
+ member_type = METRICS_AVERAGE_SOCCLK;
+ break;
+ case SMU_VCLK:
+ member_type = METRICS_AVERAGE_VCLK;
+ break;
+ case SMU_DCLK:
+ member_type = METRICS_AVERAGE_DCLK;
+ break;
+ case SMU_MCLK:
+ member_type = METRICS_AVERAGE_UCLK;
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ return smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetGfxclkFrequency, 0, value);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return smu_v13_0_5_get_smu_metrics_data(smu, member_type, value);
+}
+
+static int smu_v13_0_5_get_dpm_level_count(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *count)
+{
+ DpmClocks_t *clk_table = smu->smu_table.clocks_table;
+
+ switch (clk_type) {
+ case SMU_SOCCLK:
+ *count = clk_table->NumSocClkLevelsEnabled;
+ break;
+ case SMU_VCLK:
+ *count = clk_table->VcnClkLevelsEnabled;
+ break;
+ case SMU_DCLK:
+ *count = clk_table->VcnClkLevelsEnabled;
+ break;
+ case SMU_MCLK:
+ *count = clk_table->NumDfPstatesEnabled;
+ break;
+ case SMU_FCLK:
+ *count = clk_table->NumDfPstatesEnabled;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+static int smu_v13_0_5_get_dpm_freq_by_index(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t dpm_level,
+ uint32_t *freq)
+{
+ DpmClocks_t *clk_table = smu->smu_table.clocks_table;
+
+ if (!clk_table || clk_type >= SMU_CLK_COUNT)
+ return -EINVAL;
+
+ switch (clk_type) {
+ case SMU_SOCCLK:
+ if (dpm_level >= clk_table->NumSocClkLevelsEnabled)
+ return -EINVAL;
+ *freq = clk_table->SocClocks[dpm_level];
+ break;
+ case SMU_VCLK:
+ if (dpm_level >= clk_table->VcnClkLevelsEnabled)
+ return -EINVAL;
+ *freq = clk_table->VClocks[dpm_level];
+ break;
+ case SMU_DCLK:
+ if (dpm_level >= clk_table->VcnClkLevelsEnabled)
+ return -EINVAL;
+ *freq = clk_table->DClocks[dpm_level];
+ break;
+ case SMU_UCLK:
+ case SMU_MCLK:
+ if (dpm_level >= clk_table->NumDfPstatesEnabled)
+ return -EINVAL;
+ *freq = clk_table->DfPstateTable[dpm_level].MemClk;
+ break;
+ case SMU_FCLK:
+ if (dpm_level >= clk_table->NumDfPstatesEnabled)
+ return -EINVAL;
+ *freq = clk_table->DfPstateTable[dpm_level].FClk;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static bool smu_v13_0_5_clk_dpm_is_enabled(struct smu_context *smu,
+ enum smu_clk_type clk_type)
+{
+ enum smu_feature_mask feature_id = 0;
+
+ switch (clk_type) {
+ case SMU_MCLK:
+ case SMU_UCLK:
+ case SMU_FCLK:
+ feature_id = SMU_FEATURE_DPM_FCLK_BIT;
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ feature_id = SMU_FEATURE_DPM_GFXCLK_BIT;
+ break;
+ case SMU_SOCCLK:
+ feature_id = SMU_FEATURE_DPM_SOCCLK_BIT;
+ break;
+ case SMU_VCLK:
+ case SMU_DCLK:
+ feature_id = SMU_FEATURE_VCN_DPM_BIT;
+ break;
+ default:
+ return true;
+ }
+
+ return smu_cmn_feature_is_enabled(smu, feature_id);
+}
+
+static int smu_v13_0_5_get_dpm_ultimate_freq(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t *min,
+ uint32_t *max)
+{
+ DpmClocks_t *clk_table = smu->smu_table.clocks_table;
+ uint32_t clock_limit;
+ uint32_t max_dpm_level, min_dpm_level;
+ int ret = 0;
+
+ if (!smu_v13_0_5_clk_dpm_is_enabled(smu, clk_type)) {
+ switch (clk_type) {
+ case SMU_MCLK:
+ case SMU_UCLK:
+ clock_limit = smu->smu_table.boot_values.uclk;
+ break;
+ case SMU_FCLK:
+ clock_limit = smu->smu_table.boot_values.fclk;
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ clock_limit = smu->smu_table.boot_values.gfxclk;
+ break;
+ case SMU_SOCCLK:
+ clock_limit = smu->smu_table.boot_values.socclk;
+ break;
+ case SMU_VCLK:
+ clock_limit = smu->smu_table.boot_values.vclk;
+ break;
+ case SMU_DCLK:
+ clock_limit = smu->smu_table.boot_values.dclk;
+ break;
+ default:
+ clock_limit = 0;
+ break;
+ }
+
+ /* clock in Mhz unit */
+ if (min)
+ *min = clock_limit / 100;
+ if (max)
+ *max = clock_limit / 100;
+
+ return 0;
+ }
+
+ if (max) {
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ *max = clk_table->MaxGfxClk;
+ break;
+ case SMU_MCLK:
+ case SMU_UCLK:
+ case SMU_FCLK:
+ max_dpm_level = 0;
+ break;
+ case SMU_SOCCLK:
+ max_dpm_level = clk_table->NumSocClkLevelsEnabled - 1;
+ break;
+ case SMU_VCLK:
+ case SMU_DCLK:
+ max_dpm_level = clk_table->VcnClkLevelsEnabled - 1;
+ break;
+ default:
+ ret = -EINVAL;
+ goto failed;
+ }
+
+ if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) {
+ ret = smu_v13_0_5_get_dpm_freq_by_index(smu, clk_type, max_dpm_level, max);
+ if (ret)
+ goto failed;
+ }
+ }
+
+ if (min) {
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ *min = clk_table->MinGfxClk;
+ break;
+ case SMU_MCLK:
+ case SMU_UCLK:
+ case SMU_FCLK:
+ min_dpm_level = clk_table->NumDfPstatesEnabled - 1;
+ break;
+ case SMU_SOCCLK:
+ min_dpm_level = 0;
+ break;
+ case SMU_VCLK:
+ case SMU_DCLK:
+ min_dpm_level = 0;
+ break;
+ default:
+ ret = -EINVAL;
+ goto failed;
+ }
+
+ if (clk_type != SMU_GFXCLK && clk_type != SMU_SCLK) {
+ ret = smu_v13_0_5_get_dpm_freq_by_index(smu, clk_type, min_dpm_level, min);
+ if (ret)
+ goto failed;
+ }
+ }
+
+failed:
+ return ret;
+}
+
+static int smu_v13_0_5_set_soft_freq_limited_range(struct smu_context *smu,
+ enum smu_clk_type clk_type,
+ uint32_t min,
+ uint32_t max)
+{
+ enum smu_message_type msg_set_min, msg_set_max;
+ int ret = 0;
+
+ if (!smu_v13_0_5_clk_dpm_is_enabled(smu, clk_type))
+ return -EINVAL;
+
+ switch (clk_type) {
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ msg_set_min = SMU_MSG_SetHardMinGfxClk;
+ msg_set_max = SMU_MSG_SetSoftMaxGfxClk;
+ break;
+ case SMU_VCLK:
+ case SMU_DCLK:
+ msg_set_min = SMU_MSG_SetHardMinVcn;
+ msg_set_max = SMU_MSG_SetSoftMaxVcn;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_min, min, NULL);
+ if (ret)
+ goto out;
+
+ ret = smu_cmn_send_smc_msg_with_param(smu, msg_set_max, max, NULL);
+ if (ret)
+ goto out;
+
+out:
+ return ret;
+}
+
+static int smu_v13_0_5_print_clk_levels(struct smu_context *smu,
+ enum smu_clk_type clk_type, char *buf)
+{
+ int i, size = 0, ret = 0;
+ uint32_t cur_value = 0, value = 0, count = 0;
+ uint32_t min = 0, max = 0;
+
+ smu_cmn_get_sysfs_buf(&buf, &size);
+
+ switch (clk_type) {
+ case SMU_OD_SCLK:
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_SCLK");
+ size += sysfs_emit_at(buf, size, "0: %10uMhz\n",
+ (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq);
+ size += sysfs_emit_at(buf, size, "1: %10uMhz\n",
+ (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq);
+ break;
+ case SMU_OD_RANGE:
+ size += sysfs_emit_at(buf, size, "%s:\n", "OD_RANGE");
+ size += sysfs_emit_at(buf, size, "SCLK: %7uMhz %10uMhz\n",
+ smu->gfx_default_hard_min_freq, smu->gfx_default_soft_max_freq);
+ break;
+ case SMU_SOCCLK:
+ case SMU_VCLK:
+ case SMU_DCLK:
+ case SMU_MCLK:
+ ret = smu_v13_0_5_get_current_clk_freq(smu, clk_type, &cur_value);
+ if (ret)
+ goto print_clk_out;
+
+ ret = smu_v13_0_5_get_dpm_level_count(smu, clk_type, &count);
+ if (ret)
+ goto print_clk_out;
+
+ for (i = 0; i < count; i++) {
+ ret = smu_v13_0_5_get_dpm_freq_by_index(smu, clk_type, i, &value);
+ if (ret)
+ goto print_clk_out;
+
+ size += sysfs_emit_at(buf, size, "%d: %uMhz %s\n", i, value,
+ cur_value == value ? "*" : "");
+ }
+ break;
+ case SMU_GFXCLK:
+ case SMU_SCLK:
+ ret = smu_v13_0_5_get_current_clk_freq(smu, clk_type, &cur_value);
+ if (ret)
+ goto print_clk_out;
+ min = (smu->gfx_actual_hard_min_freq > 0) ? smu->gfx_actual_hard_min_freq : smu->gfx_default_hard_min_freq;
+ max = (smu->gfx_actual_soft_max_freq > 0) ? smu->gfx_actual_soft_max_freq : smu->gfx_default_soft_max_freq;
+ if (cur_value == max)
+ i = 2;
+ else if (cur_value == min)
+ i = 0;
+ else
+ i = 1;
+ size += sysfs_emit_at(buf, size, "0: %uMhz %s\n", min,
+ i == 0 ? "*" : "");
+ size += sysfs_emit_at(buf, size, "1: %uMhz %s\n",
+ i == 1 ? cur_value : SMU_13_0_5_UMD_PSTATE_GFXCLK,
+ i == 1 ? "*" : "");
+ size += sysfs_emit_at(buf, size, "2: %uMhz %s\n", max,
+ i == 2 ? "*" : "");
+ break;
+ default:
+ break;
+ }
+
+print_clk_out:
+ return size;
+}
+
+
+static int smu_v13_0_5_force_clk_levels(struct smu_context *smu,
+ enum smu_clk_type clk_type, uint32_t mask)
+{
+ uint32_t soft_min_level = 0, soft_max_level = 0;
+ uint32_t min_freq = 0, max_freq = 0;
+ int ret = 0;
+
+ soft_min_level = mask ? (ffs(mask) - 1) : 0;
+ soft_max_level = mask ? (fls(mask) - 1) : 0;
+
+ switch (clk_type) {
+ case SMU_VCLK:
+ case SMU_DCLK:
+ ret = smu_v13_0_5_get_dpm_freq_by_index(smu, clk_type, soft_min_level, &min_freq);
+ if (ret)
+ goto force_level_out;
+
+ ret = smu_v13_0_5_get_dpm_freq_by_index(smu, clk_type, soft_max_level, &max_freq);
+ if (ret)
+ goto force_level_out;
+
+ ret = smu_v13_0_5_set_soft_freq_limited_range(smu, clk_type, min_freq, max_freq);
+ if (ret)
+ goto force_level_out;
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+
+force_level_out:
+ return ret;
+}
+
+static int smu_v13_0_5_set_performance_level(struct smu_context *smu,
+ enum amd_dpm_forced_level level)
+{
+ struct amdgpu_device *adev = smu->adev;
+ uint32_t sclk_min = 0, sclk_max = 0;
+ int ret = 0;
+
+ switch (level) {
+ case AMD_DPM_FORCED_LEVEL_HIGH:
+ smu_v13_0_5_get_dpm_ultimate_freq(smu, SMU_SCLK, NULL, &sclk_max);
+ sclk_min = sclk_max;
+ break;
+ case AMD_DPM_FORCED_LEVEL_LOW:
+ smu_v13_0_5_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, NULL);
+ sclk_max = sclk_min;
+ break;
+ case AMD_DPM_FORCED_LEVEL_AUTO:
+ smu_v13_0_5_get_dpm_ultimate_freq(smu, SMU_SCLK, &sclk_min, &sclk_max);
+ break;
+ case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
+ /* Temporarily do nothing since the optimal clocks haven't been provided yet */
+ break;
+ case AMD_DPM_FORCED_LEVEL_MANUAL:
+ case AMD_DPM_FORCED_LEVEL_PROFILE_EXIT:
+ return 0;
+ default:
+ dev_err(adev->dev, "Invalid performance level %d\n", level);
+ return -EINVAL;
+ }
+
+ if (sclk_min && sclk_max) {
+ ret = smu_v13_0_5_set_soft_freq_limited_range(smu,
+ SMU_SCLK,
+ sclk_min,
+ sclk_max);
+ if (ret)
+ return ret;
+
+ smu->gfx_actual_hard_min_freq = sclk_min;
+ smu->gfx_actual_soft_max_freq = sclk_max;
+ }
+
+ return ret;
+}
+
+static int smu_v13_0_5_set_fine_grain_gfx_freq_parameters(struct smu_context *smu)
+{
+ DpmClocks_t *clk_table = smu->smu_table.clocks_table;
+
+ smu->gfx_default_hard_min_freq = clk_table->MinGfxClk;
+ smu->gfx_default_soft_max_freq = clk_table->MaxGfxClk;
+ smu->gfx_actual_hard_min_freq = 0;
+ smu->gfx_actual_soft_max_freq = 0;
+
+ return 0;
+}
+
+static const struct pptable_funcs smu_v13_0_5_ppt_funcs = {
+ .check_fw_status = smu_v13_0_check_fw_status,
+ .check_fw_version = smu_v13_0_check_fw_version,
+ .init_smc_tables = smu_v13_0_5_init_smc_tables,
+ .fini_smc_tables = smu_v13_0_5_fini_smc_tables,
+ .get_vbios_bootup_values = smu_v13_0_get_vbios_bootup_values,
+ .system_features_control = smu_v13_0_5_system_features_control,
+ .send_smc_msg_with_param = smu_cmn_send_smc_msg_with_param,
+ .send_smc_msg = smu_cmn_send_smc_msg,
+ .dpm_set_vcn_enable = smu_v13_0_5_dpm_set_vcn_enable,
+ .dpm_set_jpeg_enable = smu_v13_0_5_dpm_set_jpeg_enable,
+ .set_default_dpm_table = smu_v13_0_5_set_default_dpm_tables,
+ .read_sensor = smu_v13_0_5_read_sensor,
+ .is_dpm_running = smu_v13_0_5_is_dpm_running,
+ .set_watermarks_table = smu_v13_0_5_set_watermarks_table,
+ .get_gpu_metrics = smu_v13_0_5_get_gpu_metrics,
+ .get_enabled_mask = smu_cmn_get_enabled_mask,
+ .get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
+ .set_driver_table_location = smu_v13_0_set_driver_table_location,
+ .gfx_off_control = smu_v13_0_gfx_off_control,
+ .mode2_reset = smu_v13_0_5_mode2_reset,
+ .get_dpm_ultimate_freq = smu_v13_0_5_get_dpm_ultimate_freq,
+ .od_edit_dpm_table = smu_v13_0_5_od_edit_dpm_table,
+ .print_clk_levels = smu_v13_0_5_print_clk_levels,
+ .force_clk_levels = smu_v13_0_5_force_clk_levels,
+ .set_performance_level = smu_v13_0_5_set_performance_level,
+ .set_fine_grain_gfx_freq_parameters = smu_v13_0_5_set_fine_grain_gfx_freq_parameters,
+};
+
+void smu_v13_0_5_set_ppt_funcs(struct smu_context *smu)
+{
+ smu->ppt_funcs = &smu_v13_0_5_ppt_funcs;
+ smu->message_map = smu_v13_0_5_message_map;
+ smu->feature_map = smu_v13_0_5_feature_mask_map;
+ smu->table_map = smu_v13_0_5_table_map;
+ smu->is_apu = true;
+}
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.h b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.h
new file mode 100644
index 000000000000..40bc0f8e6d61
--- /dev/null
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/smu_v13_0_5_ppt.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright 2020 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __SMU_V13_0_5_PPT_H__
+#define __SMU_V13_0_5_PPT_H__
+
+extern void smu_v13_0_5_set_ppt_funcs(struct smu_context *smu);
+#define SMU_13_0_5_UMD_PSTATE_GFXCLK 1100
+
+#endif
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
index caf1775d48ef..e2d099409123 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu13/yellow_carp_ppt.c
@@ -195,30 +195,13 @@ static int yellow_carp_fini_smc_tables(struct smu_context *smu)
static int yellow_carp_system_features_control(struct smu_context *smu, bool en)
{
- struct smu_feature *feature = &smu->smu_feature;
struct amdgpu_device *adev = smu->adev;
- uint32_t feature_mask[2];
int ret = 0;
if (!en && !adev->in_s0ix)
ret = smu_cmn_send_smc_msg(smu, SMU_MSG_PrepareMp1ForUnload, NULL);
- bitmap_zero(feature->enabled, feature->feature_num);
- bitmap_zero(feature->supported, feature->feature_num);
-
- if (!en)
- return ret;
-
- ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2);
- if (ret)
- return ret;
-
- bitmap_copy(feature->enabled, (unsigned long *)&feature_mask,
- feature->feature_num);
- bitmap_copy(feature->supported, (unsigned long *)&feature_mask,
- feature->feature_num);
-
- return 0;
+ return ret;
}
static int yellow_carp_dpm_set_vcn_enable(struct smu_context *smu, bool enable)
@@ -255,16 +238,13 @@ static int yellow_carp_dpm_set_jpeg_enable(struct smu_context *smu, bool enable)
static bool yellow_carp_is_dpm_running(struct smu_context *smu)
{
int ret = 0;
- uint32_t feature_mask[2];
uint64_t feature_enabled;
- ret = smu_cmn_get_enabled_32_bits_mask(smu, feature_mask, 2);
+ ret = smu_cmn_get_enabled_mask(smu, &feature_enabled);
if (ret)
return false;
- feature_enabled = (uint64_t)feature_mask[1] << 32 | feature_mask[0];
-
return !!(feature_enabled & SMC_DPM_FEATURE);
}
@@ -282,14 +262,9 @@ static int yellow_carp_post_smu_init(struct smu_context *smu)
static int yellow_carp_mode_reset(struct smu_context *smu, int type)
{
- int ret = 0, index = 0;
-
- index = smu_cmn_to_asic_specific_index(smu, CMN2ASIC_MAPPING_MSG,
- SMU_MSG_GfxDeviceDriverReset);
- if (index < 0)
- return index == -EACCES ? 0 : index;
+ int ret = 0;
- ret = smu_cmn_send_smc_msg_with_param(smu, (uint16_t)index, type, NULL);
+ ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GfxDeviceDriverReset, type, NULL);
if (ret)
dev_err(smu->adev->dev, "Failed to mode reset!\n");
@@ -310,13 +285,9 @@ static int yellow_carp_get_smu_metrics_data(struct smu_context *smu,
SmuMetrics_t *metrics = (SmuMetrics_t *)smu_table->metrics_table;
int ret = 0;
- mutex_lock(&smu->metrics_lock);
-
- ret = smu_cmn_get_metrics_table_locked(smu, NULL, false);
- if (ret) {
- mutex_unlock(&smu->metrics_lock);
+ ret = smu_cmn_get_metrics_table(smu, NULL, false);
+ if (ret)
return ret;
- }
switch (member) {
case METRICS_AVERAGE_GFXCLK:
@@ -387,8 +358,6 @@ static int yellow_carp_get_smu_metrics_data(struct smu_context *smu,
break;
}
- mutex_unlock(&smu->metrics_lock);
-
return ret;
}
@@ -401,7 +370,6 @@ static int yellow_carp_read_sensor(struct smu_context *smu,
if (!data || !size)
return -EINVAL;
- mutex_lock(&smu->sensor_lock);
switch (sensor) {
case AMDGPU_PP_SENSOR_GPU_LOAD:
ret = yellow_carp_get_smu_metrics_data(smu,
@@ -469,7 +437,6 @@ static int yellow_carp_read_sensor(struct smu_context *smu,
ret = -EOPNOTSUPP;
break;
}
- mutex_unlock(&smu->sensor_lock);
return ret;
}
@@ -1182,7 +1149,7 @@ static const struct pptable_funcs yellow_carp_ppt_funcs = {
.is_dpm_running = yellow_carp_is_dpm_running,
.set_watermarks_table = yellow_carp_set_watermarks_table,
.get_gpu_metrics = yellow_carp_get_gpu_metrics,
- .get_enabled_mask = smu_cmn_get_enabled_32_bits_mask,
+ .get_enabled_mask = smu_cmn_get_enabled_mask,
.get_pp_feature_mask = smu_cmn_get_pp_feature_mask,
.set_driver_table_location = smu_v13_0_set_driver_table_location,
.gfx_off_control = smu_v13_0_gfx_off_control,
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
index ee1a312fd497..b8d0c70ff668 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.c
@@ -51,6 +51,17 @@
#define mmMP1_SMN_C2PMSG_90 0x029a
#define mmMP1_SMN_C2PMSG_90_BASE_IDX 0
+/* SMU 13.0.5 has its specific mailbox messaging registers */
+
+#define mmMP1_C2PMSG_2 (0xbee142 + 0xb00000 / 4)
+#define mmMP1_C2PMSG_2_BASE_IDX 0
+
+#define mmMP1_C2PMSG_34 (0xbee262 + 0xb00000 / 4)
+#define mmMP1_C2PMSG_34_BASE_IDX 0
+
+#define mmMP1_C2PMSG_33 (0xbee261 + 0xb00000 / 4)
+#define mmMP1_C2PMSG_33_BASE_IDX 0
+
#define MP1_C2PMSG_90__CONTENT_MASK 0xFFFFFFFFL
#undef __SMU_DUMMY_MAP
@@ -59,6 +70,12 @@ static const char * const __smu_message_names[] = {
SMU_MESSAGE_TYPES
};
+#define smu_cmn_call_asic_func(intf, smu, args...) \
+ ((smu)->ppt_funcs ? ((smu)->ppt_funcs->intf ? \
+ (smu)->ppt_funcs->intf(smu, ##args) : \
+ -ENOTSUPP) : \
+ -EINVAL)
+
static const char *smu_get_message_name(struct smu_context *smu,
enum smu_message_type type)
{
@@ -73,7 +90,10 @@ static void smu_cmn_read_arg(struct smu_context *smu,
{
struct amdgpu_device *adev = smu->adev;
- *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 5))
+ *arg = RREG32_SOC15(MP1, 0, mmMP1_C2PMSG_34);
+ else
+ *arg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
}
/* Redefine the SMU error codes here.
@@ -119,7 +139,10 @@ static u32 __smu_cmn_poll_stat(struct smu_context *smu)
u32 reg;
for ( ; timeout > 0; timeout--) {
- reg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 5))
+ reg = RREG32_SOC15(MP1, 0, mmMP1_C2PMSG_33);
+ else
+ reg = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90);
if ((reg & MP1_C2PMSG_90__CONTENT_MASK) != 0)
break;
@@ -137,15 +160,21 @@ static void __smu_cmn_reg_print_error(struct smu_context *smu,
{
struct amdgpu_device *adev = smu->adev;
const char *message = smu_get_message_name(smu, msg);
+ u32 msg_idx, prm;
switch (reg_c2pmsg_90) {
case SMU_RESP_NONE: {
- u32 msg_idx = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66);
- u32 prm = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 5)) {
+ msg_idx = RREG32_SOC15(MP1, 0, mmMP1_C2PMSG_2);
+ prm = RREG32_SOC15(MP1, 0, mmMP1_C2PMSG_34);
+ } else {
+ msg_idx = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66);
+ prm = RREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82);
+ }
dev_err_ratelimited(adev->dev,
"SMU: I'm not done with your previous command: SMN_C2PMSG_66:0x%08X SMN_C2PMSG_82:0x%08X",
msg_idx, prm);
- }
+ }
break;
case SMU_RESP_OK:
/* The SMU executed the command. It completed with a
@@ -235,9 +264,16 @@ static void __smu_cmn_send_msg(struct smu_context *smu,
{
struct amdgpu_device *adev = smu->adev;
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
- WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
+ if (adev->ip_versions[MP1_HWIP][0] == IP_VERSION(13, 0, 5)) {
+ WREG32_SOC15(MP1, 0, mmMP1_C2PMSG_33, 0);
+ WREG32_SOC15(MP1, 0, mmMP1_C2PMSG_34, param);
+ WREG32_SOC15(MP1, 0, mmMP1_C2PMSG_2, msg);
+ } else {
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_90, 0);
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_82, param);
+ WREG32_SOC15(MP1, 0, mmMP1_SMN_C2PMSG_66, msg);
+ }
+
}
/**
@@ -267,7 +303,6 @@ int smu_cmn_send_msg_without_waiting(struct smu_context *smu,
reg = __smu_cmn_poll_stat(smu);
res = __smu_cmn_reg2errno(smu, reg);
if (reg == SMU_RESP_NONE ||
- reg == SMU_RESP_BUSY_OTHER ||
res == -EREMOTEIO)
goto Out;
__smu_cmn_send_msg(smu, msg_index, param);
@@ -361,7 +396,6 @@ int smu_cmn_send_smc_msg_with_param(struct smu_context *smu,
reg = __smu_cmn_poll_stat(smu);
res = __smu_cmn_reg2errno(smu, reg);
if (reg == SMU_RESP_NONE ||
- reg == SMU_RESP_BUSY_OTHER ||
res == -EREMOTEIO) {
__smu_cmn_reg_print_error(smu, reg, index, param, msg);
goto Out;
@@ -481,7 +515,6 @@ int smu_cmn_feature_is_supported(struct smu_context *smu,
{
struct smu_feature *feature = &smu->smu_feature;
int feature_id;
- int ret = 0;
feature_id = smu_cmn_to_asic_specific_index(smu,
CMN2ASIC_MAPPING_FEATURE,
@@ -491,22 +524,33 @@ int smu_cmn_feature_is_supported(struct smu_context *smu,
WARN_ON(feature_id > feature->feature_num);
- mutex_lock(&feature->mutex);
- ret = test_bit(feature_id, feature->supported);
- mutex_unlock(&feature->mutex);
+ return test_bit(feature_id, feature->supported);
+}
- return ret;
+static int __smu_get_enabled_features(struct smu_context *smu,
+ uint64_t *enabled_features)
+{
+ return smu_cmn_call_asic_func(get_enabled_mask, smu, enabled_features);
}
int smu_cmn_feature_is_enabled(struct smu_context *smu,
enum smu_feature_mask mask)
{
- struct smu_feature *feature = &smu->smu_feature;
struct amdgpu_device *adev = smu->adev;
+ uint64_t enabled_features;
int feature_id;
- int ret = 0;
- if (smu->is_apu && adev->family < AMDGPU_FAMILY_VGH)
+ if (__smu_get_enabled_features(smu, &enabled_features)) {
+ dev_err(adev->dev, "Failed to retrieve enabled ppfeatures!\n");
+ return 0;
+ }
+
+ /*
+ * For Renoir and Cyan Skillfish, they are assumed to have all features
+ * enabled. Also considering they have no feature_map available, the
+ * check here can avoid unwanted feature_map check below.
+ */
+ if (enabled_features == ULLONG_MAX)
return 1;
feature_id = smu_cmn_to_asic_specific_index(smu,
@@ -515,13 +559,7 @@ int smu_cmn_feature_is_enabled(struct smu_context *smu,
if (feature_id < 0)
return 0;
- WARN_ON(feature_id > feature->feature_num);
-
- mutex_lock(&feature->mutex);
- ret = test_bit(feature_id, feature->enabled);
- mutex_unlock(&feature->mutex);
-
- return ret;
+ return test_bit(feature_id, (unsigned long *)&enabled_features);
}
bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu,
@@ -552,70 +590,46 @@ bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu,
}
int smu_cmn_get_enabled_mask(struct smu_context *smu,
- uint32_t *feature_mask,
- uint32_t num)
+ uint64_t *feature_mask)
{
- uint32_t feature_mask_high = 0, feature_mask_low = 0;
- struct smu_feature *feature = &smu->smu_feature;
- int ret = 0;
+ uint32_t *feature_mask_high;
+ uint32_t *feature_mask_low;
+ int ret = 0, index = 0;
- if (!feature_mask || num < 2)
+ if (!feature_mask)
return -EINVAL;
- if (bitmap_empty(feature->enabled, feature->feature_num)) {
- ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesHigh, &feature_mask_high);
- if (ret)
- return ret;
+ feature_mask_low = &((uint32_t *)feature_mask)[0];
+ feature_mask_high = &((uint32_t *)feature_mask)[1];
- ret = smu_cmn_send_smc_msg(smu, SMU_MSG_GetEnabledSmuFeaturesLow, &feature_mask_low);
+ index = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_MSG,
+ SMU_MSG_GetEnabledSmuFeatures);
+ if (index > 0) {
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetEnabledSmuFeatures,
+ 0,
+ feature_mask_low);
if (ret)
return ret;
- feature_mask[0] = feature_mask_low;
- feature_mask[1] = feature_mask_high;
+ ret = smu_cmn_send_smc_msg_with_param(smu,
+ SMU_MSG_GetEnabledSmuFeatures,
+ 1,
+ feature_mask_high);
} else {
- bitmap_copy((unsigned long *)feature_mask, feature->enabled,
- feature->feature_num);
- }
-
- return ret;
-}
-
-int smu_cmn_get_enabled_32_bits_mask(struct smu_context *smu,
- uint32_t *feature_mask,
- uint32_t num)
-{
- uint32_t feature_mask_en_low = 0;
- uint32_t feature_mask_en_high = 0;
- struct smu_feature *feature = &smu->smu_feature;
- int ret = 0;
-
- if (!feature_mask || num < 2)
- return -EINVAL;
-
- if (bitmap_empty(feature->enabled, feature->feature_num)) {
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetEnabledSmuFeatures, 0,
- &feature_mask_en_low);
-
+ ret = smu_cmn_send_smc_msg(smu,
+ SMU_MSG_GetEnabledSmuFeaturesHigh,
+ feature_mask_high);
if (ret)
return ret;
- ret = smu_cmn_send_smc_msg_with_param(smu, SMU_MSG_GetEnabledSmuFeatures, 1,
- &feature_mask_en_high);
-
- if (ret)
- return ret;
-
- feature_mask[0] = feature_mask_en_low;
- feature_mask[1] = feature_mask_en_high;
-
- } else {
- bitmap_copy((unsigned long *)feature_mask, feature->enabled,
- feature->feature_num);
+ ret = smu_cmn_send_smc_msg(smu,
+ SMU_MSG_GetEnabledSmuFeaturesLow,
+ feature_mask_low);
}
return ret;
-
}
uint64_t smu_cmn_get_indep_throttler_status(
@@ -635,7 +649,6 @@ int smu_cmn_feature_update_enable_state(struct smu_context *smu,
uint64_t feature_mask,
bool enabled)
{
- struct smu_feature *feature = &smu->smu_feature;
int ret = 0;
if (enabled) {
@@ -649,8 +662,6 @@ int smu_cmn_feature_update_enable_state(struct smu_context *smu,
SMU_MSG_EnableSmuFeaturesHigh,
upper_32_bits(feature_mask),
NULL);
- if (ret)
- return ret;
} else {
ret = smu_cmn_send_smc_msg_with_param(smu,
SMU_MSG_DisableSmuFeaturesLow,
@@ -662,19 +673,8 @@ int smu_cmn_feature_update_enable_state(struct smu_context *smu,
SMU_MSG_DisableSmuFeaturesHigh,
upper_32_bits(feature_mask),
NULL);
- if (ret)
- return ret;
}
- mutex_lock(&feature->mutex);
- if (enabled)
- bitmap_or(feature->enabled, feature->enabled,
- (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
- else
- bitmap_andnot(feature->enabled, feature->enabled,
- (unsigned long *)(&feature_mask), SMU_FEATURE_MAX);
- mutex_unlock(&feature->mutex);
-
return ret;
}
@@ -682,7 +682,6 @@ int smu_cmn_feature_set_enabled(struct smu_context *smu,
enum smu_feature_mask mask,
bool enable)
{
- struct smu_feature *feature = &smu->smu_feature;
int feature_id;
feature_id = smu_cmn_to_asic_specific_index(smu,
@@ -691,8 +690,6 @@ int smu_cmn_feature_set_enabled(struct smu_context *smu,
if (feature_id < 0)
return -EINVAL;
- WARN_ON(feature_id > feature->feature_num);
-
return smu_cmn_feature_update_enable_state(smu,
1ULL << feature_id,
enable);
@@ -715,29 +712,20 @@ static const char *smu_get_feature_name(struct smu_context *smu,
size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
char *buf)
{
- uint32_t feature_mask[2] = { 0 };
+ uint64_t feature_mask;
int feature_index = 0;
uint32_t count = 0;
int8_t sort_feature[SMU_FEATURE_COUNT];
size_t size = 0;
int ret = 0, i;
+ int feature_id;
- if (!smu->is_apu) {
- ret = smu_cmn_get_enabled_mask(smu,
- feature_mask,
- 2);
- if (ret)
- return 0;
- } else {
- ret = smu_cmn_get_enabled_32_bits_mask(smu,
- feature_mask,
- 2);
- if (ret)
- return 0;
- }
+ ret = __smu_get_enabled_features(smu, &feature_mask);
+ if (ret)
+ return 0;
size = sysfs_emit_at(buf, size, "features high: 0x%08x low: 0x%08x\n",
- feature_mask[1], feature_mask[0]);
+ upper_32_bits(feature_mask), lower_32_bits(feature_mask));
memset(sort_feature, -1, sizeof(sort_feature));
@@ -758,11 +746,18 @@ size_t smu_cmn_get_pp_feature_mask(struct smu_context *smu,
if (sort_feature[i] < 0)
continue;
+ /* convert to asic spcific feature ID */
+ feature_id = smu_cmn_to_asic_specific_index(smu,
+ CMN2ASIC_MAPPING_FEATURE,
+ sort_feature[i]);
+ if (feature_id < 0)
+ continue;
+
size += sysfs_emit_at(buf, size, "%02d. %-20s (%2d) : %s\n",
count++,
smu_get_feature_name(smu, sort_feature[i]),
i,
- !!smu_cmn_feature_is_enabled(smu, sort_feature[i]) ?
+ !!test_bit(feature_id, (unsigned long *)&feature_mask) ?
"enabled" : "disabled");
}
@@ -773,22 +768,16 @@ int smu_cmn_set_pp_feature_mask(struct smu_context *smu,
uint64_t new_mask)
{
int ret = 0;
- uint32_t feature_mask[2] = { 0 };
+ uint64_t feature_mask;
uint64_t feature_2_enabled = 0;
uint64_t feature_2_disabled = 0;
- uint64_t feature_enables = 0;
- ret = smu_cmn_get_enabled_mask(smu,
- feature_mask,
- 2);
+ ret = __smu_get_enabled_features(smu, &feature_mask);
if (ret)
return ret;
- feature_enables = ((uint64_t)feature_mask[1] << 32 |
- (uint64_t)feature_mask[0]);
-
- feature_2_enabled = ~feature_enables & new_mask;
- feature_2_disabled = feature_enables & ~new_mask;
+ feature_2_enabled = ~feature_mask & new_mask;
+ feature_2_disabled = feature_mask & ~new_mask;
if (feature_2_enabled) {
ret = smu_cmn_feature_update_enable_state(smu,
@@ -814,9 +803,6 @@ int smu_cmn_set_pp_feature_mask(struct smu_context *smu,
* @mask
*
* @smu: smu_context pointer
- * @no_hw_disablement: whether real dpm disablement should be performed
- * true: update the cache(about dpm enablement state) only
- * false: real dpm disablement plus cache update
* @mask: the dpm feature which should not be disabled
* SMU_FEATURE_COUNT: no exception, all dpm features
* to disable
@@ -825,10 +811,8 @@ int smu_cmn_set_pp_feature_mask(struct smu_context *smu,
* 0 on success or a negative error code on failure.
*/
int smu_cmn_disable_all_features_with_exception(struct smu_context *smu,
- bool no_hw_disablement,
enum smu_feature_mask mask)
{
- struct smu_feature *feature = &smu->smu_feature;
uint64_t features_to_disable = U64_MAX;
int skipped_feature_id;
@@ -842,18 +826,9 @@ int smu_cmn_disable_all_features_with_exception(struct smu_context *smu,
features_to_disable &= ~(1ULL << skipped_feature_id);
}
- if (no_hw_disablement) {
- mutex_lock(&feature->mutex);
- bitmap_andnot(feature->enabled, feature->enabled,
- (unsigned long *)(&features_to_disable), SMU_FEATURE_MAX);
- mutex_unlock(&feature->mutex);
-
- return 0;
- } else {
- return smu_cmn_feature_update_enable_state(smu,
- features_to_disable,
- 0);
- }
+ return smu_cmn_feature_update_enable_state(smu,
+ features_to_disable,
+ 0);
}
int smu_cmn_get_smc_version(struct smu_context *smu,
@@ -964,9 +939,9 @@ int smu_cmn_write_pptable(struct smu_context *smu)
true);
}
-int smu_cmn_get_metrics_table_locked(struct smu_context *smu,
- void *metrics_table,
- bool bypass_cache)
+int smu_cmn_get_metrics_table(struct smu_context *smu,
+ void *metrics_table,
+ bool bypass_cache)
{
struct smu_table_context *smu_table= &smu->smu_table;
uint32_t table_size =
@@ -994,21 +969,6 @@ int smu_cmn_get_metrics_table_locked(struct smu_context *smu,
return 0;
}
-int smu_cmn_get_metrics_table(struct smu_context *smu,
- void *metrics_table,
- bool bypass_cache)
-{
- int ret = 0;
-
- mutex_lock(&smu->metrics_lock);
- ret = smu_cmn_get_metrics_table_locked(smu,
- metrics_table,
- bypass_cache);
- mutex_unlock(&smu->metrics_lock);
-
- return ret;
-}
-
void smu_cmn_init_soft_gpu_metrics(void *table, uint8_t frev, uint8_t crev)
{
struct metrics_table_header *header = (struct metrics_table_header *)table;
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
index beea03810bca..a4c593ed8b03 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_cmn.h
@@ -26,6 +26,10 @@
#include "amdgpu_smu.h"
#if defined(SWSMU_CODE_LAYER_L2) || defined(SWSMU_CODE_LAYER_L3) || defined(SWSMU_CODE_LAYER_L4)
+
+#define FDO_PWM_MODE_STATIC 1
+#define FDO_PWM_MODE_STATIC_RPM 5
+
int smu_cmn_send_msg_without_waiting(struct smu_context *smu,
uint16_t msg_index,
uint32_t param);
@@ -54,12 +58,7 @@ bool smu_cmn_clk_dpm_is_enabled(struct smu_context *smu,
enum smu_clk_type clk_type);
int smu_cmn_get_enabled_mask(struct smu_context *smu,
- uint32_t *feature_mask,
- uint32_t num);
-
-int smu_cmn_get_enabled_32_bits_mask(struct smu_context *smu,
- uint32_t *feature_mask,
- uint32_t num);
+ uint64_t *feature_mask);
uint64_t smu_cmn_get_indep_throttler_status(
const unsigned long dep_status,
@@ -80,7 +79,6 @@ int smu_cmn_set_pp_feature_mask(struct smu_context *smu,
uint64_t new_mask);
int smu_cmn_disable_all_features_with_exception(struct smu_context *smu,
- bool no_hw_disablement,
enum smu_feature_mask mask);
int smu_cmn_get_smc_version(struct smu_context *smu,
@@ -97,10 +95,6 @@ int smu_cmn_write_watermarks_table(struct smu_context *smu);
int smu_cmn_write_pptable(struct smu_context *smu);
-int smu_cmn_get_metrics_table_locked(struct smu_context *smu,
- void *metrics_table,
- bool bypass_cache);
-
int smu_cmn_get_metrics_table(struct smu_context *smu,
void *metrics_table,
bool bypass_cache);
diff --git a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
index 59f9cfff3d61..5f21ead860f9 100644
--- a/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
+++ b/drivers/gpu/drm/amd/pm/swsmu/smu_internal.h
@@ -55,9 +55,9 @@
#define smu_send_smc_msg(smu, msg, read_arg) smu_ppt_funcs(send_smc_msg, 0, smu, msg, read_arg)
#define smu_init_display_count(smu, count) smu_ppt_funcs(init_display_count, 0, smu, count)
#define smu_feature_set_allowed_mask(smu) smu_ppt_funcs(set_allowed_mask, 0, smu)
-#define smu_feature_get_enabled_mask(smu, mask, num) smu_ppt_funcs(get_enabled_mask, 0, smu, mask, num)
+#define smu_feature_get_enabled_mask(smu, mask) smu_ppt_funcs(get_enabled_mask, -EOPNOTSUPP, smu, mask)
#define smu_feature_is_enabled(smu, mask) smu_ppt_funcs(feature_is_enabled, 0, smu, mask)
-#define smu_disable_all_features_with_exception(smu, no_hw_disablement, mask) smu_ppt_funcs(disable_all_features_with_exception, 0, smu, no_hw_disablement, mask)
+#define smu_disable_all_features_with_exception(smu, mask) smu_ppt_funcs(disable_all_features_with_exception, 0, smu, mask)
#define smu_is_dpm_running(smu) smu_ppt_funcs(is_dpm_running, 0 , smu)
#define smu_notify_display_change(smu) smu_ppt_funcs(notify_display_change, 0, smu)
#define smu_populate_umd_state_clk(smu) smu_ppt_funcs(populate_umd_state_clk, 0, smu)
@@ -78,8 +78,8 @@
#define smu_dump_pptable(smu) smu_ppt_funcs(dump_pptable, 0, smu)
#define smu_update_pcie_parameters(smu, pcie_gen_cap, pcie_width_cap) smu_ppt_funcs(update_pcie_parameters, 0, smu, pcie_gen_cap, pcie_width_cap)
#define smu_set_power_source(smu, power_src) smu_ppt_funcs(set_power_source, 0, smu, power_src)
-#define smu_i2c_init(smu, control) smu_ppt_funcs(i2c_init, 0, smu, control)
-#define smu_i2c_fini(smu, control) smu_ppt_funcs(i2c_fini, 0, smu, control)
+#define smu_i2c_init(smu) smu_ppt_funcs(i2c_init, 0, smu)
+#define smu_i2c_fini(smu) smu_ppt_funcs(i2c_fini, 0, smu)
#define smu_get_unique_id(smu) smu_ppt_funcs(get_unique_id, 0, smu)
#define smu_log_thermal_throttling(smu) smu_ppt_funcs(log_thermal_throttling_event, 0, smu)
#define smu_get_asic_power_limits(smu, current, default, max) smu_ppt_funcs(get_power_limit, 0, smu, current, default, max)
@@ -91,6 +91,8 @@
#define smu_post_init(smu) smu_ppt_funcs(post_init, 0, smu)
#define smu_gpo_control(smu, enablement) smu_ppt_funcs(gpo_control, 0, smu, enablement)
#define smu_set_fine_grain_gfx_freq_parameters(smu) smu_ppt_funcs(set_fine_grain_gfx_freq_parameters, 0, smu)
+#define smu_get_default_config_table_settings(smu, config_table) smu_ppt_funcs(get_default_config_table_settings, -EOPNOTSUPP, smu, config_table)
+#define smu_set_config_table(smu, config_table) smu_ppt_funcs(set_config_table, -EOPNOTSUPP, smu, config_table)
#endif
#endif