aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2017-10-09 11:00:16 +1000
committerDave Airlie <airlied@redhat.com>2017-10-09 11:00:16 +1000
commitbb7a9c8d712f37385a706a594d6edf6e6d2669d0 (patch)
tree701a317ca8ecc2bedc40577b657dcdda3e7428c5 /drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
parent15438ab06515b093d61e2f35bb27d21e5e7f966e (diff)
parentd3f04c98ead2b89887e1e3c09b26e4917bacdd9e (diff)
Merge branch 'drm-next-4.15' of git://people.freedesktop.org/~agd5f/linux into drm-next
More new stuff for 4.15. Highlights: - Add clock query interface for raven - Add new FENCE_TO_HANDLE ioctl - UVD video encode ring support on polaris - transparent huge page DMA support - deadlock fixes - compute pipe lru tweaks - powerplay cleanups and regression fixes - fix duplicate symbol issue with radeon and amdgpu - misc bug fixes * 'drm-next-4.15' of git://people.freedesktop.org/~agd5f/linux: (72 commits) drm/radeon/dp: make radeon_dp_get_dp_link_config static drm/radeon: move ci_send_msg_to_smc to where it's used drm/amd/sched: fix deadlock caused by unsignaled fences of deleted jobs drm/amd/sched: NULL out the s_fence field after run_job drm/amd/sched: move adding finish callback to amd_sched_job_begin drm/amd/sched: fix an outdated comment drm/amd/sched: rename amd_sched_entity_pop_job drm/amdgpu: minor coding style fix drm/ttm: add transparent huge page support for DMA allocations v2 drm/ttm: add support for different pool sizes drm/ttm: remove unsued options from ttm_mem_global_alloc_page drm/amdgpu: add uvd enc irq drm/amdgpu: add uvd enc ib test drm/amdgpu: add uvd enc ring test drm/amdgpu: add uvd enc vm functions (v2) drm/amdgpu: add uvd enc into run queue drm/amdgpu: add uvd enc rings drm/amdgpu: add new uvd enc ring methods drm/amdgpu: add uvd enc command in header drm/amdgpu: add uvd enc registers in header ...
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c44
1 files changed, 20 insertions, 24 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
index 045988b18bc3..904a1bab9b9f 100644
--- a/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
+++ b/drivers/gpu/drm/amd/amdgpu/nbio_v6_1.c
@@ -215,31 +215,27 @@ void nbio_v6_1_get_clockgating_state(struct amdgpu_device *adev, u32 *flags)
*flags |= AMD_CG_SUPPORT_BIF_LS;
}
-struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg;
-struct nbio_pcie_index_data nbio_v6_1_pcie_index_data;
+const struct nbio_hdp_flush_reg nbio_v6_1_hdp_flush_reg = {
+ .hdp_flush_req_offset = SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_REQ),
+ .hdp_flush_done_offset = SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_DONE),
+ .ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK,
+ .ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK,
+ .ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK,
+ .ref_and_mask_cp3 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP3_MASK,
+ .ref_and_mask_cp4 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP4_MASK,
+ .ref_and_mask_cp5 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP5_MASK,
+ .ref_and_mask_cp6 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP6_MASK,
+ .ref_and_mask_cp7 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP7_MASK,
+ .ref_and_mask_cp8 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP8_MASK,
+ .ref_and_mask_cp9 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP9_MASK,
+ .ref_and_mask_sdma0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA0_MASK,
+ .ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK
+};
-int nbio_v6_1_init(struct amdgpu_device *adev)
-{
- nbio_v6_1_hdp_flush_reg.hdp_flush_req_offset = SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_REQ);
- nbio_v6_1_hdp_flush_reg.hdp_flush_done_offset = SOC15_REG_OFFSET(NBIO, 0, mmBIF_BX_PF0_GPU_HDP_FLUSH_DONE);
- nbio_v6_1_hdp_flush_reg.ref_and_mask_cp0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP0_MASK;
- nbio_v6_1_hdp_flush_reg.ref_and_mask_cp1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP1_MASK;
- nbio_v6_1_hdp_flush_reg.ref_and_mask_cp2 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP2_MASK;
- nbio_v6_1_hdp_flush_reg.ref_and_mask_cp3 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP3_MASK;
- nbio_v6_1_hdp_flush_reg.ref_and_mask_cp4 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP4_MASK;
- nbio_v6_1_hdp_flush_reg.ref_and_mask_cp5 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP5_MASK;
- nbio_v6_1_hdp_flush_reg.ref_and_mask_cp6 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP6_MASK;
- nbio_v6_1_hdp_flush_reg.ref_and_mask_cp7 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP7_MASK;
- nbio_v6_1_hdp_flush_reg.ref_and_mask_cp8 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP8_MASK;
- nbio_v6_1_hdp_flush_reg.ref_and_mask_cp9 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__CP9_MASK;
- nbio_v6_1_hdp_flush_reg.ref_and_mask_sdma0 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA0_MASK;
- nbio_v6_1_hdp_flush_reg.ref_and_mask_sdma1 = BIF_BX_PF0_GPU_HDP_FLUSH_DONE__SDMA1_MASK;
-
- nbio_v6_1_pcie_index_data.index_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX);
- nbio_v6_1_pcie_index_data.data_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA);
-
- return 0;
-}
+const struct nbio_pcie_index_data nbio_v6_1_pcie_index_data = {
+ .index_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX),
+ .data_offset = SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA),
+};
void nbio_v6_1_detect_hw_virt(struct amdgpu_device *adev)
{