diff options
author | Dave Airlie <airlied@redhat.com> | 2018-09-21 09:52:34 +1000 |
---|---|---|
committer | Dave Airlie <airlied@redhat.com> | 2018-09-21 09:52:53 +1000 |
commit | 36c9c3c91128e2b892c9be0dd9ee9bd82cbe82ad (patch) | |
tree | 687db2e37b7fdcb4bd756a078812d049da18c804 /drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h | |
parent | 0320ac5188eab5c6e8b92b110d1eae967ac272d2 (diff) | |
parent | 846311ae68f3c78365ebf3dff505c99e7da861cf (diff) |
Merge branch 'drm-next-4.20' of git://people.freedesktop.org/~agd5f/linux into drm-next
This is a new pull for drm-next on top of last weeks with the following
changes:
- Fixed 64 bit divide
- Fixed vram type on vega20
- Misc vega20 fixes
- Misc DC fixes
- Fix GDS/GWS/OA domain handling
Previous changes from last week:
amdgpu/kfd:
- Picasso (new APU) support
- Raven2 (new APU) support
- Vega20 enablement
- ACP powergating improvements
- Add ABGR/XBGR display support
- VCN JPEG engine support
- Initial xGMI support
- Use load balancing for engine scheduling
- Lots of new documentation
- Rework and clean up i2c and aux handling in DC
- Add DP YCbCr 4:2:0 support in DC
- Add DMCU firmware loading for Raven (used for ABM and PSR)
- New debugfs features in DC
- LVDS support in DC
- Implement wave kill for gfx/compute (light weight reset for shaders)
- Use AGP aperture to avoid gart mappings when possible
- GPUVM performance improvements
- Bulk moves for more efficient GPUVM LRU handling
- Merge amdgpu and amdkfd into one module
- Enable gfxoff and stutter mode on Raven
- Misc cleanups
Scheduler:
- Load balancing support
- Bug fixes
ttm:
- Bulk move functionality
- Bug fixes
radeon:
- Misc cleanups
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Alex Deucher <alexdeucher@gmail.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20180920150438.12693-1-alexander.deucher@amd.com
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h | 71 |
1 files changed, 71 insertions, 0 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h index bb5a47a45790..6fa7ef446e46 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h @@ -30,6 +30,19 @@ #include "amdgpu_irq.h" +/* VA hole for 48bit addresses on Vega10 */ +#define AMDGPU_GMC_HOLE_START 0x0000800000000000ULL +#define AMDGPU_GMC_HOLE_END 0xffff800000000000ULL + +/* + * Hardware is programmed as if the hole doesn't exists with start and end + * address values. + * + * This mask is used to remove the upper 16bits of the VA and so come up with + * the linear addr value. + */ +#define AMDGPU_GMC_HOLE_MASK 0x0000ffffffffffffULL + struct firmware; /* @@ -74,6 +87,20 @@ struct amdgpu_gmc_funcs { u64 *dst, u64 *flags); }; +struct amdgpu_xgmi { + /* from psp */ + u64 device_id; + u64 hive_id; + /* fixed per family */ + u64 node_segment_size; + /* physical node (0-3) */ + unsigned physical_node_id; + /* number of nodes (0-4) */ + unsigned num_physical_nodes; + /* gpu list in the same hive */ + struct list_head head; +}; + struct amdgpu_gmc { resource_size_t aper_size; resource_size_t aper_base; @@ -81,11 +108,22 @@ struct amdgpu_gmc { * about vram size near mc fb location */ u64 mc_vram_size; u64 visible_vram_size; + u64 agp_size; + u64 agp_start; + u64 agp_end; u64 gart_size; u64 gart_start; u64 gart_end; u64 vram_start; u64 vram_end; + /* FB region , it's same as local vram region in single GPU, in XGMI + * configuration, this region covers all GPUs in the same hive , + * each GPU in the hive has the same view of this FB region . + * GPU0's vram starts at offset (0 * segment size) , + * GPU1 starts at offset (1 * segment size), etc. + */ + u64 fb_start; + u64 fb_end; unsigned vram_width; u64 real_vram_size; int vram_mtrr; @@ -109,8 +147,17 @@ struct amdgpu_gmc { atomic_t vm_fault_info_updated; const struct amdgpu_gmc_funcs *gmc_funcs; + + struct amdgpu_xgmi xgmi; }; +#define amdgpu_gmc_flush_gpu_tlb(adev, vmid) (adev)->gmc.gmc_funcs->flush_gpu_tlb((adev), (vmid)) +#define amdgpu_gmc_emit_flush_gpu_tlb(r, vmid, addr) (r)->adev->gmc.gmc_funcs->emit_flush_gpu_tlb((r), (vmid), (addr)) +#define amdgpu_gmc_emit_pasid_mapping(r, vmid, pasid) (r)->adev->gmc.gmc_funcs->emit_pasid_mapping((r), (vmid), (pasid)) +#define amdgpu_gmc_set_pte_pde(adev, pt, idx, addr, flags) (adev)->gmc.gmc_funcs->set_pte_pde((adev), (pt), (idx), (addr), (flags)) +#define amdgpu_gmc_get_vm_pde(adev, level, dst, flags) (adev)->gmc.gmc_funcs->get_vm_pde((adev), (level), (dst), (flags)) +#define amdgpu_gmc_get_pte_flags(adev, flags) (adev)->gmc.gmc_funcs->get_vm_pte_flags((adev),(flags)) + /** * amdgpu_gmc_vram_full_visible - Check if full VRAM is visible through the BAR * @@ -126,4 +173,28 @@ static inline bool amdgpu_gmc_vram_full_visible(struct amdgpu_gmc *gmc) return (gmc->real_vram_size == gmc->visible_vram_size); } +/** + * amdgpu_gmc_sign_extend - sign extend the given gmc address + * + * @addr: address to extend + */ +static inline uint64_t amdgpu_gmc_sign_extend(uint64_t addr) +{ + if (addr >= AMDGPU_GMC_HOLE_START) + addr |= AMDGPU_GMC_HOLE_END; + + return addr; +} + +void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level, + uint64_t *addr, uint64_t *flags); +uint64_t amdgpu_gmc_pd_addr(struct amdgpu_bo *bo); +uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo); +void amdgpu_gmc_vram_location(struct amdgpu_device *adev, struct amdgpu_gmc *mc, + u64 base); +void amdgpu_gmc_gart_location(struct amdgpu_device *adev, + struct amdgpu_gmc *mc); +void amdgpu_gmc_agp_location(struct amdgpu_device *adev, + struct amdgpu_gmc *mc); + #endif |