diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 146 | 
1 files changed, 111 insertions, 35 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 34af664b9f93..2c5f093e79e3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -83,8 +83,10 @@ static const char *amdgpu_asic_name[] = {  	"POLARIS10",  	"POLARIS11",  	"POLARIS12", +	"VEGAM",  	"VEGA10",  	"VEGA12", +	"VEGA20",  	"RAVEN",  	"LAST",  }; @@ -690,6 +692,8 @@ void amdgpu_device_gart_location(struct amdgpu_device *adev,  {  	u64 size_af, size_bf; +	mc->gart_size += adev->pm.smu_prv_buffer_size; +  	size_af = adev->gmc.mc_mask - mc->vram_end;  	size_bf = mc->vram_start;  	if (size_bf > size_af) { @@ -907,6 +911,46 @@ static void amdgpu_device_check_vm_size(struct amdgpu_device *adev)  	}  } +static void amdgpu_device_check_smu_prv_buffer_size(struct amdgpu_device *adev) +{ +	struct sysinfo si; +	bool is_os_64 = (sizeof(void *) == 8) ? true : false; +	uint64_t total_memory; +	uint64_t dram_size_seven_GB = 0x1B8000000; +	uint64_t dram_size_three_GB = 0xB8000000; + +	if (amdgpu_smu_memory_pool_size == 0) +		return; + +	if (!is_os_64) { +		DRM_WARN("Not 64-bit OS, feature not supported\n"); +		goto def_value; +	} +	si_meminfo(&si); +	total_memory = (uint64_t)si.totalram * si.mem_unit; + +	if ((amdgpu_smu_memory_pool_size == 1) || +		(amdgpu_smu_memory_pool_size == 2)) { +		if (total_memory < dram_size_three_GB) +			goto def_value1; +	} else if ((amdgpu_smu_memory_pool_size == 4) || +		(amdgpu_smu_memory_pool_size == 8)) { +		if (total_memory < dram_size_seven_GB) +			goto def_value1; +	} else { +		DRM_WARN("Smu memory pool size not supported\n"); +		goto def_value; +	} +	adev->pm.smu_prv_buffer_size = amdgpu_smu_memory_pool_size << 28; + +	return; + +def_value1: +	DRM_WARN("No enough system memory\n"); +def_value: +	adev->pm.smu_prv_buffer_size = 0; +} +  /**   * amdgpu_device_check_arguments - validate module params   * @@ -948,6 +992,8 @@ static void amdgpu_device_check_arguments(struct amdgpu_device *adev)  		amdgpu_vm_fragment_size = -1;  	} +	amdgpu_device_check_smu_prv_buffer_size(adev); +  	amdgpu_device_check_vm_size(adev);  	amdgpu_device_check_block_size(adev); @@ -1039,10 +1085,11 @@ static const struct vga_switcheroo_client_ops amdgpu_switcheroo_ops = {   * the hardware IP specified.   * Returns the error code from the last instance.   */ -int amdgpu_device_ip_set_clockgating_state(struct amdgpu_device *adev, +int amdgpu_device_ip_set_clockgating_state(void *dev,  					   enum amd_ip_block_type block_type,  					   enum amd_clockgating_state state)  { +	struct amdgpu_device *adev = dev;  	int i, r = 0;  	for (i = 0; i < adev->num_ip_blocks; i++) { @@ -1072,10 +1119,11 @@ int amdgpu_device_ip_set_clockgating_state(struct amdgpu_device *adev,   * the hardware IP specified.   * Returns the error code from the last instance.   */ -int amdgpu_device_ip_set_powergating_state(struct amdgpu_device *adev, +int amdgpu_device_ip_set_powergating_state(void *dev,  					   enum amd_ip_block_type block_type,  					   enum amd_powergating_state state)  { +	struct amdgpu_device *adev = dev;  	int i, r = 0;  	for (i = 0; i < adev->num_ip_blocks; i++) { @@ -1320,9 +1368,10 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)  	case CHIP_TOPAZ:  	case CHIP_TONGA:  	case CHIP_FIJI: -	case CHIP_POLARIS11:  	case CHIP_POLARIS10: +	case CHIP_POLARIS11:  	case CHIP_POLARIS12: +	case CHIP_VEGAM:  	case CHIP_CARRIZO:  	case CHIP_STONEY:  #ifdef CONFIG_DRM_AMDGPU_SI @@ -1339,6 +1388,7 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev)  	case CHIP_KABINI:  	case CHIP_MULLINS:  #endif +	case CHIP_VEGA20:  	default:  		return 0;  	case CHIP_VEGA10: @@ -1428,9 +1478,10 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)  	case CHIP_TOPAZ:  	case CHIP_TONGA:  	case CHIP_FIJI: -	case CHIP_POLARIS11:  	case CHIP_POLARIS10: +	case CHIP_POLARIS11:  	case CHIP_POLARIS12: +	case CHIP_VEGAM:  	case CHIP_CARRIZO:  	case CHIP_STONEY:  		if (adev->asic_type == CHIP_CARRIZO || adev->asic_type == CHIP_STONEY) @@ -1472,6 +1523,7 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)  #endif  	case CHIP_VEGA10:  	case CHIP_VEGA12: +	case CHIP_VEGA20:  	case CHIP_RAVEN:  		if (adev->asic_type == CHIP_RAVEN)  			adev->family = AMDGPU_FAMILY_RV; @@ -1499,6 +1551,8 @@ static int amdgpu_device_ip_early_init(struct amdgpu_device *adev)  			return -EAGAIN;  	} +	adev->powerplay.pp_feature = amdgpu_pp_feature_mask; +  	for (i = 0; i < adev->num_ip_blocks; i++) {  		if ((amdgpu_ip_block_mask & (1 << i)) == 0) {  			DRM_ERROR("disabled ip block: %d <%s>\n", @@ -1654,12 +1708,17 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)  	if (amdgpu_emu_mode == 1)  		return 0; +	r = amdgpu_ib_ring_tests(adev); +	if (r) +		DRM_ERROR("ib ring test failed (%d).\n", r); +  	for (i = 0; i < adev->num_ip_blocks; i++) {  		if (!adev->ip_blocks[i].status.valid)  			continue;  		/* skip CG for VCE/UVD, it's handled specially */  		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&  		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && +		    adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&  		    adev->ip_blocks[i].version->funcs->set_clockgating_state) {  			/* enable clockgating to save power */  			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, @@ -1671,6 +1730,18 @@ static int amdgpu_device_ip_late_set_cg_state(struct amdgpu_device *adev)  			}  		}  	} + +	if (adev->powerplay.pp_feature & PP_GFXOFF_MASK) { +		/* enable gfx powergating */ +		amdgpu_device_ip_set_powergating_state(adev, +						       AMD_IP_BLOCK_TYPE_GFX, +						       AMD_PG_STATE_GATE); +		/* enable gfxoff */ +		amdgpu_device_ip_set_powergating_state(adev, +						       AMD_IP_BLOCK_TYPE_SMC, +						       AMD_PG_STATE_GATE); +	} +  	return 0;  } @@ -1704,8 +1775,8 @@ static int amdgpu_device_ip_late_init(struct amdgpu_device *adev)  		}  	} -	mod_delayed_work(system_wq, &adev->late_init_work, -			msecs_to_jiffies(AMDGPU_RESUME_MS)); +	queue_delayed_work(system_wq, &adev->late_init_work, +			   msecs_to_jiffies(AMDGPU_RESUME_MS));  	amdgpu_device_fill_reset_magic(adev); @@ -1759,6 +1830,7 @@ static int amdgpu_device_ip_fini(struct amdgpu_device *adev)  		if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_UVD &&  			adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCE && +			adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_VCN &&  			adev->ip_blocks[i].version->funcs->set_clockgating_state) {  			/* ungate blocks before hw fini so that we can shutdown the blocks safely */  			r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev, @@ -1850,6 +1922,12 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev)  	if (amdgpu_sriov_vf(adev))  		amdgpu_virt_request_full_gpu(adev, false); +	/* ungate SMC block powergating */ +	if (adev->powerplay.pp_feature & PP_GFXOFF_MASK) +		amdgpu_device_ip_set_powergating_state(adev, +						       AMD_IP_BLOCK_TYPE_SMC, +						       AMD_CG_STATE_UNGATE); +  	/* ungate SMC block first */  	r = amdgpu_device_ip_set_clockgating_state(adev, AMD_IP_BLOCK_TYPE_SMC,  						   AMD_CG_STATE_UNGATE); @@ -2080,22 +2158,29 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)  	switch (asic_type) {  #if defined(CONFIG_DRM_AMD_DC)  	case CHIP_BONAIRE: -	case CHIP_HAWAII:  	case CHIP_KAVERI:  	case CHIP_KABINI:  	case CHIP_MULLINS: +		/* +		 * We have systems in the wild with these ASICs that require +		 * LVDS and VGA support which is not supported with DC. +		 * +		 * Fallback to the non-DC driver here by default so as not to +		 * cause regressions. +		 */ +		return amdgpu_dc > 0; +	case CHIP_HAWAII:  	case CHIP_CARRIZO:  	case CHIP_STONEY: -	case CHIP_POLARIS11:  	case CHIP_POLARIS10: +	case CHIP_POLARIS11:  	case CHIP_POLARIS12: +	case CHIP_VEGAM:  	case CHIP_TONGA:  	case CHIP_FIJI: -#if defined(CONFIG_DRM_AMD_DC_PRE_VEGA) -		return amdgpu_dc != 0; -#endif  	case CHIP_VEGA10:  	case CHIP_VEGA12: +	case CHIP_VEGA20:  #if defined(CONFIG_DRM_AMD_DC_DCN1_0)  	case CHIP_RAVEN:  #endif @@ -2375,10 +2460,6 @@ fence_driver_init:  		goto failed;  	} -	r = amdgpu_ib_ring_tests(adev); -	if (r) -		DRM_ERROR("ib ring test failed (%d).\n", r); -  	if (amdgpu_sriov_vf(adev))  		amdgpu_virt_init_data_exchange(adev); @@ -2539,7 +2620,7 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)  	/* unpin the front buffers and cursors */  	list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {  		struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc); -		struct amdgpu_framebuffer *rfb = to_amdgpu_framebuffer(crtc->primary->fb); +		struct drm_framebuffer *fb = crtc->primary->fb;  		struct amdgpu_bo *robj;  		if (amdgpu_crtc->cursor_bo) { @@ -2551,10 +2632,10 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)  			}  		} -		if (rfb == NULL || rfb->obj == NULL) { +		if (fb == NULL || fb->obj[0] == NULL) {  			continue;  		} -		robj = gem_to_amdgpu_bo(rfb->obj); +		robj = gem_to_amdgpu_bo(fb->obj[0]);  		/* don't unpin kernel fb objects */  		if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {  			r = amdgpu_bo_reserve(robj, true); @@ -2640,11 +2721,6 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)  	}  	amdgpu_fence_driver_resume(adev); -	if (resume) { -		r = amdgpu_ib_ring_tests(adev); -		if (r) -			DRM_ERROR("ib ring test failed (%d).\n", r); -	}  	r = amdgpu_device_ip_late_init(adev);  	if (r) @@ -2671,6 +2747,9 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)  	if (r)  		return r; +	/* Make sure IB tests flushed */ +	flush_delayed_work(&adev->late_init_work); +  	/* blat the mode back in */  	if (fbcon) {  		if (!amdgpu_device_has_dc_support(adev)) { @@ -2736,6 +2815,9 @@ static bool amdgpu_device_ip_check_soft_reset(struct amdgpu_device *adev)  	if (amdgpu_sriov_vf(adev))  		return true; +	if (amdgpu_asic_need_full_reset(adev)) +		return true; +  	for (i = 0; i < adev->num_ip_blocks; i++) {  		if (!adev->ip_blocks[i].status.valid)  			continue; @@ -2792,6 +2874,9 @@ static bool amdgpu_device_ip_need_full_reset(struct amdgpu_device *adev)  {  	int i; +	if (amdgpu_asic_need_full_reset(adev)) +		return true; +  	for (i = 0; i < adev->num_ip_blocks; i++) {  		if (!adev->ip_blocks[i].status.valid)  			continue; @@ -3087,20 +3172,19 @@ static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,  	/* now we are okay to resume SMC/CP/SDMA */  	r = amdgpu_device_ip_reinit_late_sriov(adev); -	amdgpu_virt_release_full_gpu(adev, true);  	if (r)  		goto error;  	amdgpu_irq_gpu_reset_resume_helper(adev);  	r = amdgpu_ib_ring_tests(adev); +error: +	amdgpu_virt_release_full_gpu(adev, true);  	if (!r && adev->virt.gim_feature & AMDGIM_FEATURE_GIM_FLR_VRAMLOST) {  		atomic_inc(&adev->vram_lost_counter);  		r = amdgpu_device_handle_vram_lost(adev);  	} -error: -  	return r;  } @@ -3117,7 +3201,6 @@ error:  int amdgpu_device_gpu_recover(struct amdgpu_device *adev,  			      struct amdgpu_job *job, bool force)  { -	struct drm_atomic_state *state = NULL;  	int i, r, resched;  	if (!force && !amdgpu_device_ip_check_soft_reset(adev)) { @@ -3140,10 +3223,6 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,  	/* block TTM */  	resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev); -	/* store modesetting */ -	if (amdgpu_device_has_dc_support(adev)) -		state = drm_atomic_helper_suspend(adev->ddev); -  	/* block all schedulers and reset given job's ring */  	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {  		struct amdgpu_ring *ring = adev->rings[i]; @@ -3183,10 +3262,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,  		kthread_unpark(ring->sched.thread);  	} -	if (amdgpu_device_has_dc_support(adev)) { -		if (drm_atomic_helper_resume(adev->ddev, state)) -			dev_info(adev->dev, "drm resume failed:%d\n", r); -	} else { +	if (!amdgpu_device_has_dc_support(adev)) {  		drm_helper_resume_force_mode(adev->ddev);  	}  |