diff options
Diffstat (limited to 'drivers/gpu/drm/amd/pm/amdgpu_pm.c')
| -rw-r--r-- | drivers/gpu/drm/amd/pm/amdgpu_pm.c | 233 | 
1 files changed, 161 insertions, 72 deletions
diff --git a/drivers/gpu/drm/amd/pm/amdgpu_pm.c b/drivers/gpu/drm/amd/pm/amdgpu_pm.c index f09b9d49297e..c11952a4389b 100644 --- a/drivers/gpu/drm/amd/pm/amdgpu_pm.c +++ b/drivers/gpu/drm/amd/pm/amdgpu_pm.c @@ -38,6 +38,8 @@  #define MAX_NUM_OF_FEATURES_PER_SUBSET		8  #define MAX_NUM_OF_SUBSETS			8 +#define DEVICE_ATTR_IS(_name)		(attr_id == device_attr_id__##_name) +  struct od_attribute {  	struct kobj_attribute	attribute;  	struct list_head	entry; @@ -1582,6 +1584,30 @@ static ssize_t amdgpu_get_mem_busy_percent(struct device *dev,  }  /** + * DOC: vcn_busy_percent + * + * The amdgpu driver provides a sysfs API for reading how busy the VCN + * is as a percentage.  The file vcn_busy_percent is used for this. + * The SMU firmware computes a percentage of load based on the + * aggregate activity level in the IP cores. + */ +static ssize_t amdgpu_get_vcn_busy_percent(struct device *dev, +						  struct device_attribute *attr, +						  char *buf) +{ +	struct drm_device *ddev = dev_get_drvdata(dev); +	struct amdgpu_device *adev = drm_to_adev(ddev); +	unsigned int value; +	int r; + +	r = amdgpu_hwmon_get_sensor_generic(adev, AMDGPU_PP_SENSOR_VCN_LOAD, &value); +	if (r) +		return r; + +	return sysfs_emit(buf, "%d\n", value); +} + +/**   * DOC: pcie_bw   *   * The amdgpu driver provides a sysfs API for estimating how much data @@ -2091,6 +2117,99 @@ static int pp_dpm_dcefclk_attr_update(struct amdgpu_device *adev, struct amdgpu_  	return 0;  } +static int pp_dpm_clk_default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_attr *attr, +					  uint32_t mask, enum amdgpu_device_attr_states *states) +{ +	struct device_attribute *dev_attr = &attr->dev_attr; +	enum amdgpu_device_attr_id attr_id = attr->attr_id; +	uint32_t mp1_ver = amdgpu_ip_version(adev, MP1_HWIP, 0); +	uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0); + +	*states = ATTR_STATE_SUPPORTED; + +	if (!(attr->flags & mask)) { +		*states = ATTR_STATE_UNSUPPORTED; +		return 0; +	} + +	if (DEVICE_ATTR_IS(pp_dpm_socclk)) { +		if (gc_ver < IP_VERSION(9, 0, 0)) +			*states = ATTR_STATE_UNSUPPORTED; +	} else if (DEVICE_ATTR_IS(pp_dpm_fclk)) { +		if (mp1_ver < IP_VERSION(10, 0, 0)) +			*states = ATTR_STATE_UNSUPPORTED; +	} else if (DEVICE_ATTR_IS(pp_dpm_vclk)) { +		if (!(gc_ver == IP_VERSION(10, 3, 1) || +		      gc_ver == IP_VERSION(10, 3, 3) || +		      gc_ver == IP_VERSION(10, 3, 6) || +		      gc_ver == IP_VERSION(10, 3, 7) || +		      gc_ver == IP_VERSION(10, 3, 0) || +		      gc_ver == IP_VERSION(10, 1, 2) || +		      gc_ver == IP_VERSION(11, 0, 0) || +		      gc_ver == IP_VERSION(11, 0, 1) || +		      gc_ver == IP_VERSION(11, 0, 4) || +		      gc_ver == IP_VERSION(11, 5, 0) || +		      gc_ver == IP_VERSION(11, 0, 2) || +		      gc_ver == IP_VERSION(11, 0, 3) || +		      gc_ver == IP_VERSION(9, 4, 3))) +			*states = ATTR_STATE_UNSUPPORTED; +	} else if (DEVICE_ATTR_IS(pp_dpm_vclk1)) { +		if (!((gc_ver == IP_VERSION(10, 3, 1) || +		       gc_ver == IP_VERSION(10, 3, 0) || +		       gc_ver == IP_VERSION(11, 0, 2) || +		       gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2)) +			*states = ATTR_STATE_UNSUPPORTED; +	} else if (DEVICE_ATTR_IS(pp_dpm_dclk)) { +		if (!(gc_ver == IP_VERSION(10, 3, 1) || +		      gc_ver == IP_VERSION(10, 3, 3) || +		      gc_ver == IP_VERSION(10, 3, 6) || +		      gc_ver == IP_VERSION(10, 3, 7) || +		      gc_ver == IP_VERSION(10, 3, 0) || +		      gc_ver == IP_VERSION(10, 1, 2) || +		      gc_ver == IP_VERSION(11, 0, 0) || +		      gc_ver == IP_VERSION(11, 0, 1) || +		      gc_ver == IP_VERSION(11, 0, 4) || +		      gc_ver == IP_VERSION(11, 5, 0) || +		      gc_ver == IP_VERSION(11, 0, 2) || +		      gc_ver == IP_VERSION(11, 0, 3) || +		      gc_ver == IP_VERSION(9, 4, 3))) +			*states = ATTR_STATE_UNSUPPORTED; +	} else if (DEVICE_ATTR_IS(pp_dpm_dclk1)) { +		if (!((gc_ver == IP_VERSION(10, 3, 1) || +		       gc_ver == IP_VERSION(10, 3, 0) || +		       gc_ver == IP_VERSION(11, 0, 2) || +		       gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2)) +			*states = ATTR_STATE_UNSUPPORTED; +	} else if (DEVICE_ATTR_IS(pp_dpm_pcie)) { +		if (gc_ver == IP_VERSION(9, 4, 2) || +		    gc_ver == IP_VERSION(9, 4, 3)) +			*states = ATTR_STATE_UNSUPPORTED; +	} + +	switch (gc_ver) { +	case IP_VERSION(9, 4, 1): +	case IP_VERSION(9, 4, 2): +		/* the Mi series card does not support standalone mclk/socclk/fclk level setting */ +		if (DEVICE_ATTR_IS(pp_dpm_mclk) || +		    DEVICE_ATTR_IS(pp_dpm_socclk) || +		    DEVICE_ATTR_IS(pp_dpm_fclk)) { +			dev_attr->attr.mode &= ~S_IWUGO; +			dev_attr->store = NULL; +		} +		break; +	default: +		break; +	} + +	/* setting should not be allowed from VF if not in one VF mode */ +	if (amdgpu_sriov_vf(adev) && amdgpu_sriov_is_pp_one_vf(adev)) { +		dev_attr->attr.mode &= ~S_IWUGO; +		dev_attr->store = NULL; +	} + +	return 0; +} +  /* Following items will be read out to indicate current plpd policy:   *  - -1: none   *  - 0: disallow @@ -2162,17 +2281,26 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = {  	AMDGPU_DEVICE_ATTR_RO(pp_cur_state,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),  	AMDGPU_DEVICE_ATTR_RW(pp_force_state,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),  	AMDGPU_DEVICE_ATTR_RW(pp_table,					ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), -	AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), -	AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), -	AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), -	AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), -	AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), -	AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk1,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), -	AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), -	AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk1,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), +	AMDGPU_DEVICE_ATTR_RW(pp_dpm_sclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, +			      .attr_update = pp_dpm_clk_default_attr_update), +	AMDGPU_DEVICE_ATTR_RW(pp_dpm_mclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, +			      .attr_update = pp_dpm_clk_default_attr_update), +	AMDGPU_DEVICE_ATTR_RW(pp_dpm_socclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, +			      .attr_update = pp_dpm_clk_default_attr_update), +	AMDGPU_DEVICE_ATTR_RW(pp_dpm_fclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, +			      .attr_update = pp_dpm_clk_default_attr_update), +	AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, +			      .attr_update = pp_dpm_clk_default_attr_update), +	AMDGPU_DEVICE_ATTR_RW(pp_dpm_vclk1,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, +			      .attr_update = pp_dpm_clk_default_attr_update), +	AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, +			      .attr_update = pp_dpm_clk_default_attr_update), +	AMDGPU_DEVICE_ATTR_RW(pp_dpm_dclk1,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, +			      .attr_update = pp_dpm_clk_default_attr_update),  	AMDGPU_DEVICE_ATTR_RW(pp_dpm_dcefclk,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF,  			      .attr_update = pp_dpm_dcefclk_attr_update), -	AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), +	AMDGPU_DEVICE_ATTR_RW(pp_dpm_pcie,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF, +			      .attr_update = pp_dpm_clk_default_attr_update),  	AMDGPU_DEVICE_ATTR_RW(pp_sclk_od,				ATTR_FLAG_BASIC),  	AMDGPU_DEVICE_ATTR_RW(pp_mclk_od,				ATTR_FLAG_BASIC),  	AMDGPU_DEVICE_ATTR_RW(pp_power_profile_mode,			ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), @@ -2180,6 +2308,7 @@ static struct amdgpu_device_attr amdgpu_device_attrs[] = {  			      .attr_update = pp_od_clk_voltage_attr_update),  	AMDGPU_DEVICE_ATTR_RO(gpu_busy_percent,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),  	AMDGPU_DEVICE_ATTR_RO(mem_busy_percent,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), +	AMDGPU_DEVICE_ATTR_RO(vcn_busy_percent,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),  	AMDGPU_DEVICE_ATTR_RO(pcie_bw,					ATTR_FLAG_BASIC),  	AMDGPU_DEVICE_ATTR_RW(pp_features,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF),  	AMDGPU_DEVICE_ATTR_RO(unique_id,				ATTR_FLAG_BASIC|ATTR_FLAG_ONEVF), @@ -2201,28 +2330,28 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_  			       uint32_t mask, enum amdgpu_device_attr_states *states)  {  	struct device_attribute *dev_attr = &attr->dev_attr; -	uint32_t mp1_ver = amdgpu_ip_version(adev, MP1_HWIP, 0); +	enum amdgpu_device_attr_id attr_id = attr->attr_id;  	uint32_t gc_ver = amdgpu_ip_version(adev, GC_HWIP, 0); -	const char *attr_name = dev_attr->attr.name;  	if (!(attr->flags & mask)) {  		*states = ATTR_STATE_UNSUPPORTED;  		return 0;  	} -#define DEVICE_ATTR_IS(_name)	(!strcmp(attr_name, #_name)) - -	if (DEVICE_ATTR_IS(pp_dpm_socclk)) { -		if (gc_ver < IP_VERSION(9, 0, 0)) -			*states = ATTR_STATE_UNSUPPORTED; -	} else if (DEVICE_ATTR_IS(pp_dpm_fclk)) { -		if (mp1_ver < IP_VERSION(10, 0, 0)) -			*states = ATTR_STATE_UNSUPPORTED; -	} else if (DEVICE_ATTR_IS(mem_busy_percent)) { +	if (DEVICE_ATTR_IS(mem_busy_percent)) {  		if ((adev->flags & AMD_IS_APU &&  		     gc_ver != IP_VERSION(9, 4, 3)) ||  		    gc_ver == IP_VERSION(9, 0, 1))  			*states = ATTR_STATE_UNSUPPORTED; +	} else if (DEVICE_ATTR_IS(vcn_busy_percent)) { +		if (!(gc_ver == IP_VERSION(10, 3, 1) || +			  gc_ver == IP_VERSION(10, 3, 3) || +			  gc_ver == IP_VERSION(10, 3, 6) || +			  gc_ver == IP_VERSION(10, 3, 7) || +			  gc_ver == IP_VERSION(11, 0, 1) || +			  gc_ver == IP_VERSION(11, 0, 4) || +			  gc_ver == IP_VERSION(11, 5, 0))) +			*states = ATTR_STATE_UNSUPPORTED;  	} else if (DEVICE_ATTR_IS(pcie_bw)) {  		/* PCIe Perf counters won't work on APU nodes */  		if (adev->flags & AMD_IS_APU || @@ -2253,36 +2382,6 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_  	} else if (DEVICE_ATTR_IS(gpu_metrics)) {  		if (gc_ver < IP_VERSION(9, 1, 0))  			*states = ATTR_STATE_UNSUPPORTED; -	} else if (DEVICE_ATTR_IS(pp_dpm_vclk)) { -		if (!(gc_ver == IP_VERSION(10, 3, 1) || -		      gc_ver == IP_VERSION(10, 3, 0) || -		      gc_ver == IP_VERSION(10, 1, 2) || -		      gc_ver == IP_VERSION(11, 0, 0) || -		      gc_ver == IP_VERSION(11, 0, 2) || -		      gc_ver == IP_VERSION(11, 0, 3) || -		      gc_ver == IP_VERSION(9, 4, 3))) -			*states = ATTR_STATE_UNSUPPORTED; -	} else if (DEVICE_ATTR_IS(pp_dpm_vclk1)) { -		if (!((gc_ver == IP_VERSION(10, 3, 1) || -			   gc_ver == IP_VERSION(10, 3, 0) || -			   gc_ver == IP_VERSION(11, 0, 2) || -			   gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2)) -			*states = ATTR_STATE_UNSUPPORTED; -	} else if (DEVICE_ATTR_IS(pp_dpm_dclk)) { -		if (!(gc_ver == IP_VERSION(10, 3, 1) || -		      gc_ver == IP_VERSION(10, 3, 0) || -		      gc_ver == IP_VERSION(10, 1, 2) || -		      gc_ver == IP_VERSION(11, 0, 0) || -		      gc_ver == IP_VERSION(11, 0, 2) || -		      gc_ver == IP_VERSION(11, 0, 3) || -		      gc_ver == IP_VERSION(9, 4, 3))) -			*states = ATTR_STATE_UNSUPPORTED; -	} else if (DEVICE_ATTR_IS(pp_dpm_dclk1)) { -		if (!((gc_ver == IP_VERSION(10, 3, 1) || -			   gc_ver == IP_VERSION(10, 3, 0) || -			   gc_ver == IP_VERSION(11, 0, 2) || -			   gc_ver == IP_VERSION(11, 0, 3)) && adev->vcn.num_vcn_inst >= 2)) -			*states = ATTR_STATE_UNSUPPORTED;  	} else if (DEVICE_ATTR_IS(pp_power_profile_mode)) {  		if (amdgpu_dpm_get_power_profile_mode(adev, NULL) == -EOPNOTSUPP)  			*states = ATTR_STATE_UNSUPPORTED; @@ -2304,23 +2403,9 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_  		if (amdgpu_dpm_get_apu_thermal_limit(adev, &limit) ==  		    -EOPNOTSUPP)  			*states = ATTR_STATE_UNSUPPORTED; -	} else if (DEVICE_ATTR_IS(pp_dpm_pcie)) { -		if (gc_ver == IP_VERSION(9, 4, 2) || -		    gc_ver == IP_VERSION(9, 4, 3)) -			*states = ATTR_STATE_UNSUPPORTED;  	}  	switch (gc_ver) { -	case IP_VERSION(9, 4, 1): -	case IP_VERSION(9, 4, 2): -		/* the Mi series card does not support standalone mclk/socclk/fclk level setting */ -		if (DEVICE_ATTR_IS(pp_dpm_mclk) || -		    DEVICE_ATTR_IS(pp_dpm_socclk) || -		    DEVICE_ATTR_IS(pp_dpm_fclk)) { -			dev_attr->attr.mode &= ~S_IWUGO; -			dev_attr->store = NULL; -		} -		break;  	case IP_VERSION(10, 3, 0):  		if (DEVICE_ATTR_IS(power_dpm_force_performance_level) &&  		    amdgpu_sriov_vf(adev)) { @@ -2332,14 +2417,6 @@ static int default_attr_update(struct amdgpu_device *adev, struct amdgpu_device_  		break;  	} -	/* setting should not be allowed from VF if not in one VF mode */ -	if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) { -		dev_attr->attr.mode &= ~S_IWUGO; -		dev_attr->store = NULL; -	} - -#undef DEVICE_ATTR_IS -  	return 0;  } @@ -4261,6 +4338,13 @@ static int amdgpu_od_set_init(struct amdgpu_device *adev)  		}  	} +	/* +	 * If gpu_od is the only member in the list, that means gpu_od is an +	 * empty directory, so remove it. +	 */ +	if (list_is_singular(&adev->pm.od_kobj_list)) +		goto err_out; +  	return 0;  err_out: @@ -4322,6 +4406,8 @@ int amdgpu_pm_sysfs_init(struct amdgpu_device *adev)  		ret = amdgpu_od_set_init(adev);  		if (ret)  			goto err_out1; +	} else if (adev->pm.pp_feature & PP_OVERDRIVE_MASK) { +		dev_info(adev->dev, "overdrive feature is not supported\n");  	}  	adev->pm.sysfs_initialized = true; @@ -4429,6 +4515,9 @@ static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *a  	/* MEM Load */  	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size))  		seq_printf(m, "MEM Load: %u %%\n", value); +	/* VCN Load */ +	if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_LOAD, (void *)&value, &size)) +		seq_printf(m, "VCN Load: %u %%\n", value);  	seq_printf(m, "\n");  |