diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/si.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/si.c | 111 | 
1 files changed, 108 insertions, 3 deletions
| diff --git a/drivers/gpu/drm/amd/amdgpu/si.c b/drivers/gpu/drm/amd/amdgpu/si.c index 1b449291f068..e5e336fd9e94 100644 --- a/drivers/gpu/drm/amd/amdgpu/si.c +++ b/drivers/gpu/drm/amd/amdgpu/si.c @@ -52,6 +52,8 @@  #include "bif/bif_3_0_d.h"  #include "bif/bif_3_0_sh_mask.h" +#include "amdgpu_dm.h" +  static const u32 tahiti_golden_registers[] =  {  	mmAZALIA_SCLK_CONTROL, 0x00000030, 0x00000011, @@ -1215,10 +1217,100 @@ static bool si_read_bios_from_rom(struct amdgpu_device *adev,  	return true;  } -//xxx: not implemented +static void si_set_clk_bypass_mode(struct amdgpu_device *adev) +{ +	u32 tmp, i; + +	tmp = RREG32(CG_SPLL_FUNC_CNTL); +	tmp |= SPLL_BYPASS_EN; +	WREG32(CG_SPLL_FUNC_CNTL, tmp); + +	tmp = RREG32(CG_SPLL_FUNC_CNTL_2); +	tmp |= SPLL_CTLREQ_CHG; +	WREG32(CG_SPLL_FUNC_CNTL_2, tmp); + +	for (i = 0; i < adev->usec_timeout; i++) { +		if (RREG32(SPLL_STATUS) & SPLL_CHG_STATUS) +			break; +		udelay(1); +	} + +	tmp = RREG32(CG_SPLL_FUNC_CNTL_2); +	tmp &= ~(SPLL_CTLREQ_CHG | SCLK_MUX_UPDATE); +	WREG32(CG_SPLL_FUNC_CNTL_2, tmp); + +	tmp = RREG32(MPLL_CNTL_MODE); +	tmp &= ~MPLL_MCLK_SEL; +	WREG32(MPLL_CNTL_MODE, tmp); +} + +static void si_spll_powerdown(struct amdgpu_device *adev) +{ +	u32 tmp; + +	tmp = RREG32(SPLL_CNTL_MODE); +	tmp |= SPLL_SW_DIR_CONTROL; +	WREG32(SPLL_CNTL_MODE, tmp); + +	tmp = RREG32(CG_SPLL_FUNC_CNTL); +	tmp |= SPLL_RESET; +	WREG32(CG_SPLL_FUNC_CNTL, tmp); + +	tmp = RREG32(CG_SPLL_FUNC_CNTL); +	tmp |= SPLL_SLEEP; +	WREG32(CG_SPLL_FUNC_CNTL, tmp); + +	tmp = RREG32(SPLL_CNTL_MODE); +	tmp &= ~SPLL_SW_DIR_CONTROL; +	WREG32(SPLL_CNTL_MODE, tmp); +} + +static int si_gpu_pci_config_reset(struct amdgpu_device *adev) +{ +	u32 i; +	int r = -EINVAL; + +	dev_info(adev->dev, "GPU pci config reset\n"); + +	/* set mclk/sclk to bypass */ +	si_set_clk_bypass_mode(adev); +	/* powerdown spll */ +	si_spll_powerdown(adev); +	/* disable BM */ +	pci_clear_master(adev->pdev); +	/* reset */ +	amdgpu_device_pci_config_reset(adev); + +	udelay(100); + +	/* wait for asic to come out of reset */ +	for (i = 0; i < adev->usec_timeout; i++) { +		if (RREG32(mmCONFIG_MEMSIZE) != 0xffffffff) { +			/* enable BM */ +			pci_set_master(adev->pdev); +			adev->has_hw_reset = true; +			r = 0; +			break; +		} +		udelay(1); +	} + +	return r; +} +  static int si_asic_reset(struct amdgpu_device *adev)  { -	return 0; +	int r; + +	dev_info(adev->dev, "PCI CONFIG reset\n"); + +	amdgpu_atombios_scratch_regs_engine_hung(adev, true); + +	r = si_gpu_pci_config_reset(adev); + +	amdgpu_atombios_scratch_regs_engine_hung(adev, false); + +	return r;  }  static bool si_asic_supports_baco(struct amdgpu_device *adev) @@ -1247,7 +1339,7 @@ static void si_vga_set_state(struct amdgpu_device *adev, bool state)  	uint32_t temp;  	temp = RREG32(CONFIG_CNTL); -	if (state == false) { +	if (!state) {  		temp &= ~(1<<0);  		temp |= (1<<1);  	} else { @@ -1779,6 +1871,10 @@ static int si_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk)  	return 0;  } +static void si_pre_asic_init(struct amdgpu_device *adev) +{ +} +  static const struct amdgpu_asic_funcs si_asic_funcs =  {  	.read_disabled_bios = &si_read_disabled_bios, @@ -1800,6 +1896,7 @@ static const struct amdgpu_asic_funcs si_asic_funcs =  	.need_reset_on_init = &si_need_reset_on_init,  	.get_pcie_replay_count = &si_get_pcie_replay_count,  	.supports_baco = &si_asic_supports_baco, +	.pre_asic_init = &si_pre_asic_init,  };  static uint32_t si_get_rev_id(struct amdgpu_device *adev) @@ -2546,6 +2643,10 @@ int si_set_ip_blocks(struct amdgpu_device *adev)  		amdgpu_device_ip_block_add(adev, &si_smu_ip_block);  		if (adev->enable_virtual_display)  			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); +#if defined(CONFIG_DRM_AMD_DC) && defined(CONFIG_DRM_AMD_DC_SI) +		else if (amdgpu_device_has_dc_support(adev)) +			amdgpu_device_ip_block_add(adev, &dm_ip_block); +#endif  		else  			amdgpu_device_ip_block_add(adev, &dce_v6_0_ip_block);  		amdgpu_device_ip_block_add(adev, &uvd_v3_1_ip_block); @@ -2560,6 +2661,10 @@ int si_set_ip_blocks(struct amdgpu_device *adev)  		amdgpu_device_ip_block_add(adev, &si_smu_ip_block);  		if (adev->enable_virtual_display)  			amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); +#if defined(CONFIG_DRM_AMD_DC) && defined(CONFIG_DRM_AMD_DC_SI) +		else if (amdgpu_device_has_dc_support(adev)) +			amdgpu_device_ip_block_add(adev, &dm_ip_block); +#endif  		else  			amdgpu_device_ip_block_add(adev, &dce_v6_4_ip_block);  		amdgpu_device_ip_block_add(adev, &uvd_v3_1_ip_block); |