diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 112 | 
1 files changed, 98 insertions, 14 deletions
| diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index 16807ff96dc9..0867fd9e15ba 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -556,6 +556,8 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type)  	default:  		if (!load_type)  			return AMDGPU_FW_LOAD_DIRECT; +		else if (load_type == 3) +			return AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO;  		else  			return AMDGPU_FW_LOAD_PSP;  	} @@ -642,6 +644,8 @@ const char *amdgpu_ucode_name(enum AMDGPU_UCODE_ID ucode_id)  		return "SMC";  	case AMDGPU_UCODE_ID_PPTABLE:  		return "PPTABLE"; +	case AMDGPU_UCODE_ID_P2S_TABLE: +		return "P2STABLE";  	case AMDGPU_UCODE_ID_UVD:  		return "UVD";  	case AMDGPU_UCODE_ID_UVD1: @@ -664,20 +668,44 @@ const char *amdgpu_ucode_name(enum AMDGPU_UCODE_ID ucode_id)  		return "DMCUB";  	case AMDGPU_UCODE_ID_CAP:  		return "CAP"; +	case AMDGPU_UCODE_ID_VPE_CTX: +		return "VPE_CTX"; +	case AMDGPU_UCODE_ID_VPE_CTL: +		return "VPE_CTL"; +	case AMDGPU_UCODE_ID_VPE: +		return "VPE"; +	case AMDGPU_UCODE_ID_UMSCH_MM_UCODE: +		return "UMSCH_MM_UCODE"; +	case AMDGPU_UCODE_ID_UMSCH_MM_DATA: +		return "UMSCH_MM_DATA"; +	case AMDGPU_UCODE_ID_UMSCH_MM_CMD_BUFFER: +		return "UMSCH_MM_CMD_BUFFER"; +	case AMDGPU_UCODE_ID_JPEG_RAM: +		return "JPEG";  	default:  		return "UNKNOWN UCODE";  	}  } +static inline int amdgpu_ucode_is_valid(uint32_t fw_version) +{ +	if (!fw_version) +		return -EINVAL; + +	return 0; +} +  #define FW_VERSION_ATTR(name, mode, field)				\  static ssize_t show_##name(struct device *dev,				\ -			  struct device_attribute *attr,		\ -			  char *buf)					\ +			   struct device_attribute *attr, char *buf)	\  {									\  	struct drm_device *ddev = dev_get_drvdata(dev);			\  	struct amdgpu_device *adev = drm_to_adev(ddev);			\  									\ -	return sysfs_emit(buf, "0x%08x\n", adev->field);	\ +	if (!buf)							\ +		return amdgpu_ucode_is_valid(adev->field);		\ +									\ +	return sysfs_emit(buf, "0x%08x\n", adev->field);		\  }									\  static DEVICE_ATTR(name, mode, show_##name, NULL) @@ -703,6 +731,8 @@ FW_VERSION_ATTR(sdma_fw_version, 0444, sdma.instance[0].fw_version);  FW_VERSION_ATTR(sdma2_fw_version, 0444, sdma.instance[1].fw_version);  FW_VERSION_ATTR(vcn_fw_version, 0444, vcn.fw_version);  FW_VERSION_ATTR(dmcu_fw_version, 0444, dm.dmcu_fw_version); +FW_VERSION_ATTR(mes_fw_version, 0444, mes.sched_version & AMDGPU_MES_VERSION_MASK); +FW_VERSION_ATTR(mes_kiq_fw_version, 0444, mes.kiq_version & AMDGPU_MES_VERSION_MASK);  static struct attribute *fw_attrs[] = {  	&dev_attr_vce_fw_version.attr, &dev_attr_uvd_fw_version.attr, @@ -716,12 +746,28 @@ static struct attribute *fw_attrs[] = {  	&dev_attr_smc_fw_version.attr, &dev_attr_sdma_fw_version.attr,  	&dev_attr_sdma2_fw_version.attr, &dev_attr_vcn_fw_version.attr,  	&dev_attr_dmcu_fw_version.attr, &dev_attr_imu_fw_version.attr, +	&dev_attr_mes_fw_version.attr, &dev_attr_mes_kiq_fw_version.attr,  	NULL  }; +#define to_dev_attr(x) container_of(x, struct device_attribute, attr) + +static umode_t amdgpu_ucode_sys_visible(struct kobject *kobj, +					struct attribute *attr, int idx) +{ +	struct device_attribute *dev_attr = to_dev_attr(attr); +	struct device *dev = kobj_to_dev(kobj); + +	if (dev_attr->show(dev, dev_attr, NULL) == -EINVAL) +		return 0; + +	return attr->mode; +} +  static const struct attribute_group fw_attr_group = {  	.name = "fw_version", -	.attrs = fw_attrs +	.attrs = fw_attrs, +	.is_visible = amdgpu_ucode_sys_visible  };  int amdgpu_ucode_sysfs_init(struct amdgpu_device *adev) @@ -746,6 +792,8 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,  	const struct mes_firmware_header_v1_0 *mes_hdr = NULL;  	const struct sdma_firmware_header_v2_0 *sdma_hdr = NULL;  	const struct imu_firmware_header_v1_0 *imu_hdr = NULL; +	const struct vpe_firmware_header_v1_0 *vpe_hdr = NULL; +	const struct umsch_mm_firmware_header_v1_0 *umsch_mm_hdr = NULL;  	u8 *ucode_addr;  	if (!ucode->fw) @@ -765,6 +813,8 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,  	mes_hdr = (const struct mes_firmware_header_v1_0 *)ucode->fw->data;  	sdma_hdr = (const struct sdma_firmware_header_v2_0 *)ucode->fw->data;  	imu_hdr = (const struct imu_firmware_header_v1_0 *)ucode->fw->data; +	vpe_hdr = (const struct vpe_firmware_header_v1_0 *)ucode->fw->data; +	umsch_mm_hdr = (const struct umsch_mm_firmware_header_v1_0 *)ucode->fw->data;  	if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {  		switch (ucode->ucode_id) { @@ -881,6 +931,10 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,  			ucode->ucode_size = ucode->fw->size;  			ucode_addr = (u8 *)ucode->fw->data;  			break; +		case AMDGPU_UCODE_ID_P2S_TABLE: +			ucode->ucode_size = ucode->fw->size; +			ucode_addr = (u8 *)ucode->fw->data; +			break;  		case AMDGPU_UCODE_ID_IMU_I:  			ucode->ucode_size = le32_to_cpu(imu_hdr->imu_iram_ucode_size_bytes);  			ucode_addr = (u8 *)ucode->fw->data + @@ -947,6 +1001,26 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev,  			ucode_addr = (u8 *)ucode->fw->data +  				le32_to_cpu(cpv2_hdr->data_offset_bytes);  			break; +		case AMDGPU_UCODE_ID_VPE_CTX: +			ucode->ucode_size = le32_to_cpu(vpe_hdr->ctx_ucode_size_bytes); +			ucode_addr = (u8 *)ucode->fw->data + +				le32_to_cpu(vpe_hdr->header.ucode_array_offset_bytes); +			break; +		case AMDGPU_UCODE_ID_VPE_CTL: +			ucode->ucode_size = le32_to_cpu(vpe_hdr->ctl_ucode_size_bytes); +			ucode_addr = (u8 *)ucode->fw->data + +				le32_to_cpu(vpe_hdr->ctl_ucode_offset); +			break; +		case AMDGPU_UCODE_ID_UMSCH_MM_UCODE: +			ucode->ucode_size = le32_to_cpu(umsch_mm_hdr->umsch_mm_ucode_size_bytes); +			ucode_addr = (u8 *)ucode->fw->data + +				le32_to_cpu(umsch_mm_hdr->header.ucode_array_offset_bytes); +			break; +		case AMDGPU_UCODE_ID_UMSCH_MM_DATA: +			ucode->ucode_size = le32_to_cpu(umsch_mm_hdr->umsch_mm_ucode_data_size_bytes); +			ucode_addr = (u8 *)ucode->fw->data + +				le32_to_cpu(umsch_mm_hdr->umsch_mm_ucode_data_offset_bytes); +			break;  		default:  			ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes);  			ucode_addr = (u8 *)ucode->fw->data + @@ -990,9 +1064,11 @@ static int amdgpu_ucode_patch_jt(struct amdgpu_firmware_info *ucode,  int amdgpu_ucode_create_bo(struct amdgpu_device *adev)  { -	if (adev->firmware.load_type != AMDGPU_FW_LOAD_DIRECT) { +	if ((adev->firmware.load_type != AMDGPU_FW_LOAD_DIRECT) && +	    (adev->firmware.load_type != AMDGPU_FW_LOAD_RLC_BACKDOOR_AUTO)) {  		amdgpu_bo_create_kernel(adev, adev->firmware.fw_size, PAGE_SIZE, -			amdgpu_sriov_vf(adev) ? AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT, +			(amdgpu_sriov_vf(adev) || adev->debug_use_vram_fw_buf) ? +			AMDGPU_GEM_DOMAIN_VRAM : AMDGPU_GEM_DOMAIN_GTT,  			&adev->firmware.fw_buf,  			&adev->firmware.fw_buf_mc,  			&adev->firmware.fw_buf_ptr); @@ -1058,7 +1134,7 @@ int amdgpu_ucode_init_bo(struct amdgpu_device *adev)  static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int block_type)  {  	if (block_type == MP0_HWIP) { -		switch (adev->ip_versions[MP0_HWIP][0]) { +		switch (amdgpu_ip_version(adev, MP0_HWIP, 0)) {  		case IP_VERSION(9, 0, 0):  			switch (adev->asic_type) {  			case CHIP_VEGA10: @@ -1109,7 +1185,7 @@ static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int bl  			return "yellow_carp";  		}  	} else if (block_type == MP1_HWIP) { -		switch (adev->ip_versions[MP1_HWIP][0]) { +		switch (amdgpu_ip_version(adev, MP1_HWIP, 0)) {  		case IP_VERSION(9, 0, 0):  		case IP_VERSION(10, 0, 0):  		case IP_VERSION(10, 0, 1): @@ -1135,7 +1211,7 @@ static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int bl  			return "aldebaran_smc";  		}  	} else if (block_type == SDMA0_HWIP) { -		switch (adev->ip_versions[SDMA0_HWIP][0]) { +		switch (amdgpu_ip_version(adev, SDMA0_HWIP, 0)) {  		case IP_VERSION(4, 0, 0):  			return "vega10_sdma";  		case IP_VERSION(4, 0, 1): @@ -1179,7 +1255,7 @@ static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int bl  			return "vangogh_sdma";  		}  	} else if (block_type == UVD_HWIP) { -		switch (adev->ip_versions[UVD_HWIP][0]) { +		switch (amdgpu_ip_version(adev, UVD_HWIP, 0)) {  		case IP_VERSION(1, 0, 0):  		case IP_VERSION(1, 0, 1):  			if (adev->apu_flags & AMD_APU_IS_RAVEN2) @@ -1204,7 +1280,8 @@ static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int bl  		case IP_VERSION(3, 0, 0):  		case IP_VERSION(3, 0, 64):  		case IP_VERSION(3, 0, 192): -			if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 0)) +			if (amdgpu_ip_version(adev, GC_HWIP, 0) == +			    IP_VERSION(10, 3, 0))  				return "sienna_cichlid_vcn";  			return "navy_flounder_vcn";  		case IP_VERSION(3, 0, 2): @@ -1217,7 +1294,7 @@ static const char *amdgpu_ucode_legacy_naming(struct amdgpu_device *adev, int bl  			return "yellow_carp_vcn";  		}  	} else if (block_type == GC_HWIP) { -		switch (adev->ip_versions[GC_HWIP][0]) { +		switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {  		case IP_VERSION(9, 0, 1):  			return "vega10";  		case IP_VERSION(9, 2, 1): @@ -1270,7 +1347,7 @@ void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type,  	int maj, min, rev;  	char *ip_name;  	const char *legacy; -	uint32_t version = adev->ip_versions[block_type][0]; +	uint32_t version = amdgpu_ip_version(adev, block_type, 0);  	legacy = amdgpu_ucode_legacy_naming(adev, block_type);  	if (legacy) { @@ -1294,6 +1371,9 @@ void amdgpu_ucode_ip_version_decode(struct amdgpu_device *adev, int block_type,  	case UVD_HWIP:  		ip_name = "vcn";  		break; +	case VPE_HWIP: +		ip_name = "vpe"; +		break;  	default:  		BUG();  	} @@ -1323,9 +1403,13 @@ int amdgpu_ucode_request(struct amdgpu_device *adev, const struct firmware **fw,  	if (err)  		return -ENODEV; +  	err = amdgpu_ucode_validate(*fw); -	if (err) +	if (err) {  		dev_dbg(adev->dev, "\"%s\" failed to validate\n", fw_name); +		release_firmware(*fw); +		*fw = NULL; +	}  	return err;  } |