diff options
author | Dave Airlie <airlied@redhat.com> | 2018-09-21 09:52:34 +1000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2018-09-21 09:52:53 +1000 |
commit | 36c9c3c91128e2b892c9be0dd9ee9bd82cbe82ad (patch) | |
tree | 687db2e37b7fdcb4bd756a078812d049da18c804 /drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | |
parent | 0320ac5188eab5c6e8b92b110d1eae967ac272d2 (diff) | |
parent | 846311ae68f3c78365ebf3dff505c99e7da861cf (diff) |
Merge branch 'drm-next-4.20' of git://people.freedesktop.org/~agd5f/linux into drm-next
This is a new pull for drm-next on top of last weeks with the following
changes:
- Fixed 64 bit divide
- Fixed vram type on vega20
- Misc vega20 fixes
- Misc DC fixes
- Fix GDS/GWS/OA domain handling
Previous changes from last week:
amdgpu/kfd:
- Picasso (new APU) support
- Raven2 (new APU) support
- Vega20 enablement
- ACP powergating improvements
- Add ABGR/XBGR display support
- VCN JPEG engine support
- Initial xGMI support
- Use load balancing for engine scheduling
- Lots of new documentation
- Rework and clean up i2c and aux handling in DC
- Add DP YCbCr 4:2:0 support in DC
- Add DMCU firmware loading for Raven (used for ABM and PSR)
- New debugfs features in DC
- LVDS support in DC
- Implement wave kill for gfx/compute (light weight reset for shaders)
- Use AGP aperture to avoid gart mappings when possible
- GPUVM performance improvements
- Bulk moves for more efficient GPUVM LRU handling
- Merge amdgpu and amdkfd into one module
- Enable gfxoff and stutter mode on Raven
- Misc cleanups
Scheduler:
- Load balancing support
- Bug fixes
ttm:
- Bulk move functionality
- Bug fixes
radeon:
- Misc cleanups
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180920150438.12693-1-alexander.deucher@amd.com
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c | 25 |
1 files changed, 21 insertions, 4 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c index f55f72a37ca8..1fa8bc337859 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.c @@ -277,6 +277,7 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type) case CHIP_PITCAIRN: case CHIP_VERDE: case CHIP_OLAND: + case CHIP_HAINAN: return AMDGPU_FW_LOAD_DIRECT; #endif #ifdef CONFIG_DRM_AMDGPU_CIK @@ -303,12 +304,11 @@ amdgpu_ucode_get_load_type(struct amdgpu_device *adev, int load_type) case CHIP_VEGA10: case CHIP_RAVEN: case CHIP_VEGA12: + case CHIP_VEGA20: if (!load_type) return AMDGPU_FW_LOAD_DIRECT; else return AMDGPU_FW_LOAD_PSP; - case CHIP_VEGA20: - return AMDGPU_FW_LOAD_DIRECT; default: DRM_ERROR("Unknown firmware load type\n"); } @@ -322,6 +322,7 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev, { const struct common_firmware_header *header = NULL; const struct gfx_firmware_header_v1_0 *cp_hdr = NULL; + const struct dmcu_firmware_header_v1_0 *dmcu_hdr = NULL; if (NULL == ucode->fw) return 0; @@ -333,8 +334,8 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev, return 0; header = (const struct common_firmware_header *)ucode->fw->data; - cp_hdr = (const struct gfx_firmware_header_v1_0 *)ucode->fw->data; + dmcu_hdr = (const struct dmcu_firmware_header_v1_0 *)ucode->fw->data; if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP || (ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC1 && @@ -343,7 +344,9 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev, ucode->ucode_id != AMDGPU_UCODE_ID_CP_MEC2_JT && ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL && ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_GPM_MEM && - ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM)) { + ucode->ucode_id != AMDGPU_UCODE_ID_RLC_RESTORE_LIST_SRM_MEM && + ucode->ucode_id != AMDGPU_UCODE_ID_DMCU_ERAM && + ucode->ucode_id != AMDGPU_UCODE_ID_DMCU_INTV)) { ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes); memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data + @@ -365,6 +368,20 @@ static int amdgpu_ucode_init_single_fw(struct amdgpu_device *adev, le32_to_cpu(header->ucode_array_offset_bytes) + le32_to_cpu(cp_hdr->jt_offset) * 4), ucode->ucode_size); + } else if (ucode->ucode_id == AMDGPU_UCODE_ID_DMCU_ERAM) { + ucode->ucode_size = le32_to_cpu(header->ucode_size_bytes) - + le32_to_cpu(dmcu_hdr->intv_size_bytes); + + memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data + + le32_to_cpu(header->ucode_array_offset_bytes)), + ucode->ucode_size); + } else if (ucode->ucode_id == AMDGPU_UCODE_ID_DMCU_INTV) { + ucode->ucode_size = le32_to_cpu(dmcu_hdr->intv_size_bytes); + + memcpy(ucode->kaddr, (void *)((uint8_t *)ucode->fw->data + + le32_to_cpu(header->ucode_array_offset_bytes) + + le32_to_cpu(dmcu_hdr->intv_offset_bytes)), + ucode->ucode_size); } else if (ucode->ucode_id == AMDGPU_UCODE_ID_RLC_RESTORE_LIST_CNTL) { ucode->ucode_size = adev->gfx.rlc.save_restore_list_cntl_size_bytes; memcpy(ucode->kaddr, adev->gfx.rlc.save_restore_list_cntl, |