diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/cik.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/cik.c | 323 |
1 files changed, 201 insertions, 122 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c index 793b1470284d..8e59e65efd44 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik.c +++ b/drivers/gpu/drm/amd/amdgpu/cik.c @@ -755,74 +755,74 @@ static void cik_init_golden_registers(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_BONAIRE: - amdgpu_program_register_sequence(adev, - bonaire_mgcg_cgcg_init, - (const u32)ARRAY_SIZE(bonaire_mgcg_cgcg_init)); - amdgpu_program_register_sequence(adev, - bonaire_golden_registers, - (const u32)ARRAY_SIZE(bonaire_golden_registers)); - amdgpu_program_register_sequence(adev, - bonaire_golden_common_registers, - (const u32)ARRAY_SIZE(bonaire_golden_common_registers)); - amdgpu_program_register_sequence(adev, - bonaire_golden_spm_registers, - (const u32)ARRAY_SIZE(bonaire_golden_spm_registers)); + amdgpu_device_program_register_sequence(adev, + bonaire_mgcg_cgcg_init, + ARRAY_SIZE(bonaire_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + bonaire_golden_registers, + ARRAY_SIZE(bonaire_golden_registers)); + amdgpu_device_program_register_sequence(adev, + bonaire_golden_common_registers, + ARRAY_SIZE(bonaire_golden_common_registers)); + amdgpu_device_program_register_sequence(adev, + bonaire_golden_spm_registers, + ARRAY_SIZE(bonaire_golden_spm_registers)); break; case CHIP_KABINI: - amdgpu_program_register_sequence(adev, - kalindi_mgcg_cgcg_init, - (const u32)ARRAY_SIZE(kalindi_mgcg_cgcg_init)); - amdgpu_program_register_sequence(adev, - kalindi_golden_registers, - (const u32)ARRAY_SIZE(kalindi_golden_registers)); - amdgpu_program_register_sequence(adev, - kalindi_golden_common_registers, - (const u32)ARRAY_SIZE(kalindi_golden_common_registers)); - amdgpu_program_register_sequence(adev, - kalindi_golden_spm_registers, - (const u32)ARRAY_SIZE(kalindi_golden_spm_registers)); + amdgpu_device_program_register_sequence(adev, + kalindi_mgcg_cgcg_init, + ARRAY_SIZE(kalindi_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + kalindi_golden_registers, + ARRAY_SIZE(kalindi_golden_registers)); + amdgpu_device_program_register_sequence(adev, + kalindi_golden_common_registers, + ARRAY_SIZE(kalindi_golden_common_registers)); + amdgpu_device_program_register_sequence(adev, + kalindi_golden_spm_registers, + ARRAY_SIZE(kalindi_golden_spm_registers)); break; case CHIP_MULLINS: - amdgpu_program_register_sequence(adev, - kalindi_mgcg_cgcg_init, - (const u32)ARRAY_SIZE(kalindi_mgcg_cgcg_init)); - amdgpu_program_register_sequence(adev, - godavari_golden_registers, - (const u32)ARRAY_SIZE(godavari_golden_registers)); - amdgpu_program_register_sequence(adev, - kalindi_golden_common_registers, - (const u32)ARRAY_SIZE(kalindi_golden_common_registers)); - amdgpu_program_register_sequence(adev, - kalindi_golden_spm_registers, - (const u32)ARRAY_SIZE(kalindi_golden_spm_registers)); + amdgpu_device_program_register_sequence(adev, + kalindi_mgcg_cgcg_init, + ARRAY_SIZE(kalindi_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + godavari_golden_registers, + ARRAY_SIZE(godavari_golden_registers)); + amdgpu_device_program_register_sequence(adev, + kalindi_golden_common_registers, + ARRAY_SIZE(kalindi_golden_common_registers)); + amdgpu_device_program_register_sequence(adev, + kalindi_golden_spm_registers, + ARRAY_SIZE(kalindi_golden_spm_registers)); break; case CHIP_KAVERI: - amdgpu_program_register_sequence(adev, - spectre_mgcg_cgcg_init, - (const u32)ARRAY_SIZE(spectre_mgcg_cgcg_init)); - amdgpu_program_register_sequence(adev, - spectre_golden_registers, - (const u32)ARRAY_SIZE(spectre_golden_registers)); - amdgpu_program_register_sequence(adev, - spectre_golden_common_registers, - (const u32)ARRAY_SIZE(spectre_golden_common_registers)); - amdgpu_program_register_sequence(adev, - spectre_golden_spm_registers, - (const u32)ARRAY_SIZE(spectre_golden_spm_registers)); + amdgpu_device_program_register_sequence(adev, + spectre_mgcg_cgcg_init, + ARRAY_SIZE(spectre_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + spectre_golden_registers, + ARRAY_SIZE(spectre_golden_registers)); + amdgpu_device_program_register_sequence(adev, + spectre_golden_common_registers, + ARRAY_SIZE(spectre_golden_common_registers)); + amdgpu_device_program_register_sequence(adev, + spectre_golden_spm_registers, + ARRAY_SIZE(spectre_golden_spm_registers)); break; case CHIP_HAWAII: - amdgpu_program_register_sequence(adev, - hawaii_mgcg_cgcg_init, - (const u32)ARRAY_SIZE(hawaii_mgcg_cgcg_init)); - amdgpu_program_register_sequence(adev, - hawaii_golden_registers, - (const u32)ARRAY_SIZE(hawaii_golden_registers)); - amdgpu_program_register_sequence(adev, - hawaii_golden_common_registers, - (const u32)ARRAY_SIZE(hawaii_golden_common_registers)); - amdgpu_program_register_sequence(adev, - hawaii_golden_spm_registers, - (const u32)ARRAY_SIZE(hawaii_golden_spm_registers)); + amdgpu_device_program_register_sequence(adev, + hawaii_mgcg_cgcg_init, + ARRAY_SIZE(hawaii_mgcg_cgcg_init)); + amdgpu_device_program_register_sequence(adev, + hawaii_golden_registers, + ARRAY_SIZE(hawaii_golden_registers)); + amdgpu_device_program_register_sequence(adev, + hawaii_golden_common_registers, + ARRAY_SIZE(hawaii_golden_common_registers)); + amdgpu_device_program_register_sequence(adev, + hawaii_golden_spm_registers, + ARRAY_SIZE(hawaii_golden_spm_registers)); break; default: break; @@ -1023,22 +1023,101 @@ static const struct amdgpu_allowed_register_entry cik_allowed_read_registers[] = {mmPA_SC_RASTER_CONFIG_1, true}, }; -static uint32_t cik_read_indexed_register(struct amdgpu_device *adev, - u32 se_num, u32 sh_num, - u32 reg_offset) + +static uint32_t cik_get_register_value(struct amdgpu_device *adev, + bool indexed, u32 se_num, + u32 sh_num, u32 reg_offset) { - uint32_t val; + if (indexed) { + uint32_t val; + unsigned se_idx = (se_num == 0xffffffff) ? 0 : se_num; + unsigned sh_idx = (sh_num == 0xffffffff) ? 0 : sh_num; + + switch (reg_offset) { + case mmCC_RB_BACKEND_DISABLE: + return adev->gfx.config.rb_config[se_idx][sh_idx].rb_backend_disable; + case mmGC_USER_RB_BACKEND_DISABLE: + return adev->gfx.config.rb_config[se_idx][sh_idx].user_rb_backend_disable; + case mmPA_SC_RASTER_CONFIG: + return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config; + case mmPA_SC_RASTER_CONFIG_1: + return adev->gfx.config.rb_config[se_idx][sh_idx].raster_config_1; + } - mutex_lock(&adev->grbm_idx_mutex); - if (se_num != 0xffffffff || sh_num != 0xffffffff) - amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); + mutex_lock(&adev->grbm_idx_mutex); + if (se_num != 0xffffffff || sh_num != 0xffffffff) + amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff); - val = RREG32(reg_offset); + val = RREG32(reg_offset); - if (se_num != 0xffffffff || sh_num != 0xffffffff) - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); - mutex_unlock(&adev->grbm_idx_mutex); - return val; + if (se_num != 0xffffffff || sh_num != 0xffffffff) + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff); + mutex_unlock(&adev->grbm_idx_mutex); + return val; + } else { + unsigned idx; + + switch (reg_offset) { + case mmGB_ADDR_CONFIG: + return adev->gfx.config.gb_addr_config; + case mmMC_ARB_RAMCFG: + return adev->gfx.config.mc_arb_ramcfg; + case mmGB_TILE_MODE0: + case mmGB_TILE_MODE1: + case mmGB_TILE_MODE2: + case mmGB_TILE_MODE3: + case mmGB_TILE_MODE4: + case mmGB_TILE_MODE5: + case mmGB_TILE_MODE6: + case mmGB_TILE_MODE7: + case mmGB_TILE_MODE8: + case mmGB_TILE_MODE9: + case mmGB_TILE_MODE10: + case mmGB_TILE_MODE11: + case mmGB_TILE_MODE12: + case mmGB_TILE_MODE13: + case mmGB_TILE_MODE14: + case mmGB_TILE_MODE15: + case mmGB_TILE_MODE16: + case mmGB_TILE_MODE17: + case mmGB_TILE_MODE18: + case mmGB_TILE_MODE19: + case mmGB_TILE_MODE20: + case mmGB_TILE_MODE21: + case mmGB_TILE_MODE22: + case mmGB_TILE_MODE23: + case mmGB_TILE_MODE24: + case mmGB_TILE_MODE25: + case mmGB_TILE_MODE26: + case mmGB_TILE_MODE27: + case mmGB_TILE_MODE28: + case mmGB_TILE_MODE29: + case mmGB_TILE_MODE30: + case mmGB_TILE_MODE31: + idx = (reg_offset - mmGB_TILE_MODE0); + return adev->gfx.config.tile_mode_array[idx]; + case mmGB_MACROTILE_MODE0: + case mmGB_MACROTILE_MODE1: + case mmGB_MACROTILE_MODE2: + case mmGB_MACROTILE_MODE3: + case mmGB_MACROTILE_MODE4: + case mmGB_MACROTILE_MODE5: + case mmGB_MACROTILE_MODE6: + case mmGB_MACROTILE_MODE7: + case mmGB_MACROTILE_MODE8: + case mmGB_MACROTILE_MODE9: + case mmGB_MACROTILE_MODE10: + case mmGB_MACROTILE_MODE11: + case mmGB_MACROTILE_MODE12: + case mmGB_MACROTILE_MODE13: + case mmGB_MACROTILE_MODE14: + case mmGB_MACROTILE_MODE15: + idx = (reg_offset - mmGB_MACROTILE_MODE0); + return adev->gfx.config.macrotile_mode_array[idx]; + default: + return RREG32(reg_offset); + } + } } static int cik_read_register(struct amdgpu_device *adev, u32 se_num, @@ -1048,13 +1127,13 @@ static int cik_read_register(struct amdgpu_device *adev, u32 se_num, *value = 0; for (i = 0; i < ARRAY_SIZE(cik_allowed_read_registers); i++) { + bool indexed = cik_allowed_read_registers[i].grbm_indexed; + if (reg_offset != cik_allowed_read_registers[i].reg_offset) continue; - *value = cik_allowed_read_registers[i].grbm_indexed ? - cik_read_indexed_register(adev, se_num, - sh_num, reg_offset) : - RREG32(reg_offset); + *value = cik_get_register_value(adev, indexed, se_num, sh_num, + reg_offset); return 0; } return -EINVAL; @@ -1167,7 +1246,7 @@ static int cik_gpu_pci_config_reset(struct amdgpu_device *adev) /* disable BM */ pci_clear_master(adev->pdev); /* reset */ - amdgpu_pci_config_reset(adev); + amdgpu_device_pci_config_reset(adev); udelay(100); @@ -1787,7 +1866,7 @@ static int cik_common_early_init(void *handle) adev->firmware.load_type = amdgpu_ucode_get_load_type(adev, amdgpu_fw_load_type); - amdgpu_get_pcie_info(adev); + amdgpu_device_get_pcie_info(adev); return 0; } @@ -1895,77 +1974,77 @@ int cik_set_ip_blocks(struct amdgpu_device *adev) switch (adev->asic_type) { case CHIP_BONAIRE: - amdgpu_ip_block_add(adev, &cik_common_ip_block); - amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block); - amdgpu_ip_block_add(adev, &cik_ih_ip_block); - amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); + amdgpu_device_ip_block_add(adev, &cik_common_ip_block); + amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block); + amdgpu_device_ip_block_add(adev, &cik_ih_ip_block); + amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); if (adev->enable_virtual_display) - amdgpu_ip_block_add(adev, &dce_virtual_ip_block); + amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); #if defined(CONFIG_DRM_AMD_DC) else if (amdgpu_device_has_dc_support(adev)) - amdgpu_ip_block_add(adev, &dm_ip_block); + amdgpu_device_ip_block_add(adev, &dm_ip_block); #endif else - amdgpu_ip_block_add(adev, &dce_v8_2_ip_block); - amdgpu_ip_block_add(adev, &gfx_v7_2_ip_block); - amdgpu_ip_block_add(adev, &cik_sdma_ip_block); - amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block); - amdgpu_ip_block_add(adev, &vce_v2_0_ip_block); + amdgpu_device_ip_block_add(adev, &dce_v8_2_ip_block); + amdgpu_device_ip_block_add(adev, &gfx_v7_2_ip_block); + amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block); + amdgpu_device_ip_block_add(adev, &uvd_v4_2_ip_block); + amdgpu_device_ip_block_add(adev, &vce_v2_0_ip_block); break; case CHIP_HAWAII: - amdgpu_ip_block_add(adev, &cik_common_ip_block); - amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block); - amdgpu_ip_block_add(adev, &cik_ih_ip_block); - amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); + amdgpu_device_ip_block_add(adev, &cik_common_ip_block); + amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block); + amdgpu_device_ip_block_add(adev, &cik_ih_ip_block); + amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); if (adev->enable_virtual_display) - amdgpu_ip_block_add(adev, &dce_virtual_ip_block); + amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); #if defined(CONFIG_DRM_AMD_DC) else if (amdgpu_device_has_dc_support(adev)) - amdgpu_ip_block_add(adev, &dm_ip_block); + amdgpu_device_ip_block_add(adev, &dm_ip_block); #endif else - amdgpu_ip_block_add(adev, &dce_v8_5_ip_block); - amdgpu_ip_block_add(adev, &gfx_v7_3_ip_block); - amdgpu_ip_block_add(adev, &cik_sdma_ip_block); - amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block); - amdgpu_ip_block_add(adev, &vce_v2_0_ip_block); + amdgpu_device_ip_block_add(adev, &dce_v8_5_ip_block); + amdgpu_device_ip_block_add(adev, &gfx_v7_3_ip_block); + amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block); + amdgpu_device_ip_block_add(adev, &uvd_v4_2_ip_block); + amdgpu_device_ip_block_add(adev, &vce_v2_0_ip_block); break; case CHIP_KAVERI: - amdgpu_ip_block_add(adev, &cik_common_ip_block); - amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block); - amdgpu_ip_block_add(adev, &cik_ih_ip_block); - amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); + amdgpu_device_ip_block_add(adev, &cik_common_ip_block); + amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block); + amdgpu_device_ip_block_add(adev, &cik_ih_ip_block); + amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); if (adev->enable_virtual_display) - amdgpu_ip_block_add(adev, &dce_virtual_ip_block); + amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); #if defined(CONFIG_DRM_AMD_DC) else if (amdgpu_device_has_dc_support(adev)) - amdgpu_ip_block_add(adev, &dm_ip_block); + amdgpu_device_ip_block_add(adev, &dm_ip_block); #endif else - amdgpu_ip_block_add(adev, &dce_v8_1_ip_block); - amdgpu_ip_block_add(adev, &gfx_v7_1_ip_block); - amdgpu_ip_block_add(adev, &cik_sdma_ip_block); - amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block); - amdgpu_ip_block_add(adev, &vce_v2_0_ip_block); + amdgpu_device_ip_block_add(adev, &dce_v8_1_ip_block); + amdgpu_device_ip_block_add(adev, &gfx_v7_1_ip_block); + amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block); + amdgpu_device_ip_block_add(adev, &uvd_v4_2_ip_block); + amdgpu_device_ip_block_add(adev, &vce_v2_0_ip_block); break; case CHIP_KABINI: case CHIP_MULLINS: - amdgpu_ip_block_add(adev, &cik_common_ip_block); - amdgpu_ip_block_add(adev, &gmc_v7_0_ip_block); - amdgpu_ip_block_add(adev, &cik_ih_ip_block); - amdgpu_ip_block_add(adev, &amdgpu_pp_ip_block); + amdgpu_device_ip_block_add(adev, &cik_common_ip_block); + amdgpu_device_ip_block_add(adev, &gmc_v7_0_ip_block); + amdgpu_device_ip_block_add(adev, &cik_ih_ip_block); + amdgpu_device_ip_block_add(adev, &amdgpu_pp_ip_block); if (adev->enable_virtual_display) - amdgpu_ip_block_add(adev, &dce_virtual_ip_block); + amdgpu_device_ip_block_add(adev, &dce_virtual_ip_block); #if defined(CONFIG_DRM_AMD_DC) else if (amdgpu_device_has_dc_support(adev)) - amdgpu_ip_block_add(adev, &dm_ip_block); + amdgpu_device_ip_block_add(adev, &dm_ip_block); #endif else - amdgpu_ip_block_add(adev, &dce_v8_3_ip_block); - amdgpu_ip_block_add(adev, &gfx_v7_2_ip_block); - amdgpu_ip_block_add(adev, &cik_sdma_ip_block); - amdgpu_ip_block_add(adev, &uvd_v4_2_ip_block); - amdgpu_ip_block_add(adev, &vce_v2_0_ip_block); + amdgpu_device_ip_block_add(adev, &dce_v8_3_ip_block); + amdgpu_device_ip_block_add(adev, &gfx_v7_2_ip_block); + amdgpu_device_ip_block_add(adev, &cik_sdma_ip_block); + amdgpu_device_ip_block_add(adev, &uvd_v4_2_ip_block); + amdgpu_device_ip_block_add(adev, &vce_v2_0_ip_block); break; default: /* FIXME: not supported yet */ |