From 5c964221003d9a31ad56d4784773c91a291cba97 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 14 Dec 2016 17:14:16 +0800 Subject: drm/amdgpu: refine gfx_v8 pg code. move en/disable GFX CP/SMU_HS PG to function gfx_v8_0_set_powergating_state Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 23 ++++++++++++----------- 1 file changed, 12 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 373374164bd5..5ab53d7b4bad 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -4024,17 +4024,6 @@ static void gfx_v8_0_init_pg(struct amdgpu_device *adev) WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8); gfx_v8_0_init_power_gating(adev); WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask); - if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) { - cz_enable_sck_slow_down_on_power_up(adev, true); - cz_enable_sck_slow_down_on_power_down(adev, true); - } else { - cz_enable_sck_slow_down_on_power_up(adev, false); - cz_enable_sck_slow_down_on_power_down(adev, false); - } - if (adev->pg_flags & AMD_PG_SUPPORT_CP) - cz_enable_cp_power_gating(adev, true); - else - cz_enable_cp_power_gating(adev, false); } else if ((adev->asic_type == CHIP_POLARIS11) || (adev->asic_type == CHIP_POLARIS12)) { gfx_v8_0_init_csb(adev); @@ -5360,6 +5349,18 @@ static int gfx_v8_0_set_powergating_state(void *handle, case CHIP_CARRIZO: case CHIP_STONEY: + if (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS) { + cz_enable_sck_slow_down_on_power_up(adev, true); + cz_enable_sck_slow_down_on_power_down(adev, true); + } else { + cz_enable_sck_slow_down_on_power_up(adev, false); + cz_enable_sck_slow_down_on_power_down(adev, false); + } + if (adev->pg_flags & AMD_PG_SUPPORT_CP) + cz_enable_cp_power_gating(adev, true); + else + cz_enable_cp_power_gating(adev, false); + cz_update_gfx_cg_power_gating(adev, enable); if ((adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG) && enable) -- cgit From ae6a58e4090365f0ed6b24e0a67b8a08f6b55856 Mon Sep 17 00:00:00 2001 From: Rex Zhu Date: Wed, 21 Dec 2016 20:32:38 +0800 Subject: drm/amdgpu: use same enter/exit safe mode for gfx_8. Signed-off-by: Rex Zhu Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 95 +---------------------------------- 1 file changed, 1 insertion(+), 94 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 5ab53d7b4bad..71ab1eb47909 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -5445,68 +5445,6 @@ static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev, #define RLC_GPR_REG2__MESSAGE__SHIFT 0x00000001 #define RLC_GPR_REG2__MESSAGE_MASK 0x0000001e -static void cz_enter_rlc_safe_mode(struct amdgpu_device *adev) -{ - u32 data = 0; - unsigned i; - - data = RREG32(mmRLC_CNTL); - if ((data & RLC_CNTL__RLC_ENABLE_F32_MASK) == 0) - return; - - if ((adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) || - (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | AMD_PG_SUPPORT_GFX_SMG | - AMD_PG_SUPPORT_GFX_DMG))) { - data |= RLC_GPR_REG2__REQ_MASK; - data &= ~RLC_GPR_REG2__MESSAGE_MASK; - data |= (MSG_ENTER_RLC_SAFE_MODE << RLC_GPR_REG2__MESSAGE__SHIFT); - WREG32(mmRLC_GPR_REG2, data); - - for (i = 0; i < adev->usec_timeout; i++) { - if ((RREG32(mmRLC_GPM_STAT) & - (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK | - RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) == - (RLC_GPM_STAT__GFX_CLOCK_STATUS_MASK | - RLC_GPM_STAT__GFX_POWER_STATUS_MASK)) - break; - udelay(1); - } - - for (i = 0; i < adev->usec_timeout; i++) { - if (!REG_GET_FIELD(RREG32(mmRLC_GPR_REG2), RLC_GPR_REG2, REQ)) - break; - udelay(1); - } - adev->gfx.rlc.in_safe_mode = true; - } -} - -static void cz_exit_rlc_safe_mode(struct amdgpu_device *adev) -{ - u32 data; - unsigned i; - - data = RREG32(mmRLC_CNTL); - if ((data & RLC_CNTL__RLC_ENABLE_F32_MASK) == 0) - return; - - if ((adev->cg_flags & (AMD_CG_SUPPORT_GFX_CGCG | AMD_CG_SUPPORT_GFX_MGCG)) || - (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG | AMD_PG_SUPPORT_GFX_SMG | - AMD_PG_SUPPORT_GFX_DMG))) { - data |= RLC_GPR_REG2__REQ_MASK; - data &= ~RLC_GPR_REG2__MESSAGE_MASK; - data |= (MSG_EXIT_RLC_SAFE_MODE << RLC_GPR_REG2__MESSAGE__SHIFT); - WREG32(mmRLC_GPR_REG2, data); - adev->gfx.rlc.in_safe_mode = false; - } - - for (i = 0; i < adev->usec_timeout; i++) { - if (!REG_GET_FIELD(RREG32(mmRLC_GPR_REG2), RLC_GPR_REG2, REQ)) - break; - udelay(1); - } -} - static void iceland_enter_rlc_safe_mode(struct amdgpu_device *adev) { u32 data; @@ -5566,31 +5504,11 @@ static void iceland_exit_rlc_safe_mode(struct amdgpu_device *adev) } } -static void gfx_v8_0_nop_enter_rlc_safe_mode(struct amdgpu_device *adev) -{ - adev->gfx.rlc.in_safe_mode = true; -} - -static void gfx_v8_0_nop_exit_rlc_safe_mode(struct amdgpu_device *adev) -{ - adev->gfx.rlc.in_safe_mode = false; -} - -static const struct amdgpu_rlc_funcs cz_rlc_funcs = { - .enter_safe_mode = cz_enter_rlc_safe_mode, - .exit_safe_mode = cz_exit_rlc_safe_mode -}; - static const struct amdgpu_rlc_funcs iceland_rlc_funcs = { .enter_safe_mode = iceland_enter_rlc_safe_mode, .exit_safe_mode = iceland_exit_rlc_safe_mode }; -static const struct amdgpu_rlc_funcs gfx_v8_0_nop_rlc_funcs = { - .enter_safe_mode = gfx_v8_0_nop_enter_rlc_safe_mode, - .exit_safe_mode = gfx_v8_0_nop_exit_rlc_safe_mode -}; - static void gfx_v8_0_update_medium_grain_clock_gating(struct amdgpu_device *adev, bool enable) { @@ -6526,18 +6444,7 @@ static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev) static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev) { - switch (adev->asic_type) { - case CHIP_TOPAZ: - adev->gfx.rlc.funcs = &iceland_rlc_funcs; - break; - case CHIP_STONEY: - case CHIP_CARRIZO: - adev->gfx.rlc.funcs = &cz_rlc_funcs; - break; - default: - adev->gfx.rlc.funcs = &gfx_v8_0_nop_rlc_funcs; - break; - } + adev->gfx.rlc.funcs = &iceland_rlc_funcs; } static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev) -- cgit From 4e638ae9c1e7a2b85155f2dd91c8105ce109ea7e Mon Sep 17 00:00:00 2001 From: Xiangliang Yu Date: Fri, 23 Dec 2016 15:00:01 +0800 Subject: drm/amdgpu/gfx8: add support kernel interface queue(KIQ) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit KIQ is queue-memory based initialization method: setup KIQ queue firstly, then send command to KIQ to setup other queues, without accessing registers. For virtualization, need KIQ to access virtual function registers when running on guest mode. V2: use amdgpu_bo_create/free_kernel to allocate BO. Signed-off-by: Monk Liu Signed-off-by: Xiangliang Yu Reviewed-by: Alex Deucher Reviewed-by: Christian König Reviewed-by: Chunming Zhou Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 13 + drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 607 +++++++++++++++++++++++++++++++++- drivers/gpu/drm/amd/amdgpu/vid.h | 2 + 3 files changed, 620 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index e8fbd7a791d3..b5ad548e2503 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -183,6 +183,11 @@ enum amdgpu_thermal_irq { AMDGPU_THERMAL_IRQ_LAST }; +enum amdgpu_kiq_irq { + AMDGPU_CP_KIQ_IRQ_DRIVER0 = 0, + AMDGPU_CP_KIQ_IRQ_LAST +}; + int amdgpu_set_clockgating_state(struct amdgpu_device *adev, enum amd_ip_block_type block_type, enum amd_clockgating_state state); @@ -775,6 +780,13 @@ struct amdgpu_mec { u32 num_queue; }; +struct amdgpu_kiq { + u64 eop_gpu_addr; + struct amdgpu_bo *eop_obj; + struct amdgpu_ring ring; + struct amdgpu_irq_src irq; +}; + /* * GPU scratch registers structures, functions & helpers */ @@ -850,6 +862,7 @@ struct amdgpu_gfx { struct amdgpu_gca_config config; struct amdgpu_rlc rlc; struct amdgpu_mec mec; + struct amdgpu_kiq kiq; struct amdgpu_scratch scratch; const struct firmware *me_fw; /* ME firmware */ uint32_t me_fw_version; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 71ab1eb47909..d604ba37541f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -1367,6 +1367,42 @@ static void gfx_v8_0_mec_fini(struct amdgpu_device *adev) } } +static int gfx_v8_0_kiq_init_ring(struct amdgpu_device *adev, + struct amdgpu_ring *ring, + struct amdgpu_irq_src *irq) +{ + int r = 0; + + ring->adev = NULL; + ring->ring_obj = NULL; + ring->use_doorbell = true; + ring->doorbell_index = AMDGPU_DOORBELL_KIQ; + if (adev->gfx.mec2_fw) { + ring->me = 2; + ring->pipe = 0; + } else { + ring->me = 1; + ring->pipe = 1; + } + + irq->data = ring; + ring->queue = 0; + sprintf(ring->name, "kiq %d.%d.%d", ring->me, ring->pipe, ring->queue); + r = amdgpu_ring_init(adev, ring, 1024, + irq, AMDGPU_CP_KIQ_IRQ_DRIVER0); + if (r) + dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r); + + return r; +} + +static void gfx_v8_0_kiq_free_ring(struct amdgpu_ring *ring, + struct amdgpu_irq_src *irq) +{ + amdgpu_ring_fini(ring); + irq->data = NULL; +} + #define MEC_HPD_SIZE 2048 static int gfx_v8_0_mec_init(struct amdgpu_device *adev) @@ -1421,6 +1457,35 @@ static int gfx_v8_0_mec_init(struct amdgpu_device *adev) return 0; } +static void gfx_v8_0_kiq_fini(struct amdgpu_device *adev) +{ + struct amdgpu_kiq *kiq = &adev->gfx.kiq; + + amdgpu_bo_free_kernel(&kiq->eop_obj, &kiq->eop_gpu_addr, NULL); + kiq->eop_obj = NULL; +} + +static int gfx_v8_0_kiq_init(struct amdgpu_device *adev) +{ + int r; + u32 *hpd; + struct amdgpu_kiq *kiq = &adev->gfx.kiq; + + r = amdgpu_bo_create_kernel(adev, MEC_HPD_SIZE, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_GTT, &kiq->eop_obj, + &kiq->eop_gpu_addr, (void **)&hpd); + if (r) { + dev_warn(adev->dev, "failed to create KIQ bo (%d).\n", r); + return r; + } + + memset(hpd, 0, MEC_HPD_SIZE); + + amdgpu_bo_kunmap(kiq->eop_obj); + + return 0; +} + static const u32 vgpr_init_compute_shader[] = { 0x7e000209, 0x7e020208, @@ -1997,8 +2062,14 @@ static int gfx_v8_0_sw_init(void *handle) { int i, r; struct amdgpu_ring *ring; + struct amdgpu_kiq *kiq; struct amdgpu_device *adev = (struct amdgpu_device *)handle; + /* KIQ event */ + r = amdgpu_irq_add_id(adev, 178, &adev->gfx.kiq.irq); + if (r) + return r; + /* EOP Event */ r = amdgpu_irq_add_id(adev, 181, &adev->gfx.eop_irq); if (r) @@ -2036,6 +2107,17 @@ static int gfx_v8_0_sw_init(void *handle) return r; } + r = gfx_v8_0_kiq_init(adev); + if (r) { + DRM_ERROR("Failed to init KIQ BOs!\n"); + return r; + } + + kiq = &adev->gfx.kiq; + r = gfx_v8_0_kiq_init_ring(adev, &kiq->ring, &kiq->irq); + if (r) + return r; + /* set up the gfx ring */ for (i = 0; i < adev->gfx.num_gfx_rings; i++) { ring = &adev->gfx.gfx_ring[i]; @@ -2119,7 +2201,9 @@ static int gfx_v8_0_sw_fini(void *handle) amdgpu_ring_fini(&adev->gfx.gfx_ring[i]); for (i = 0; i < adev->gfx.num_compute_rings; i++) amdgpu_ring_fini(&adev->gfx.compute_ring[i]); + gfx_v8_0_kiq_free_ring(&adev->gfx.kiq.ring, &adev->gfx.kiq.irq); + gfx_v8_0_kiq_fini(adev); gfx_v8_0_mec_fini(adev); gfx_v8_0_rlc_fini(adev); gfx_v8_0_free_microcode(adev); @@ -4495,6 +4579,393 @@ static void gfx_v8_0_cp_compute_fini(struct amdgpu_device *adev) } } +/* KIQ functions */ +static void gfx_v8_0_kiq_setting(struct amdgpu_ring *ring) +{ + uint32_t tmp; + struct amdgpu_device *adev = ring->adev; + + /* tell RLC which is KIQ queue */ + tmp = RREG32(mmRLC_CP_SCHEDULERS); + tmp &= 0xffffff00; + tmp |= (ring->me << 5) | (ring->pipe << 3) | (ring->queue); + WREG32(mmRLC_CP_SCHEDULERS, tmp); + tmp |= 0x80; + WREG32(mmRLC_CP_SCHEDULERS, tmp); +} + +static void gfx_v8_0_kiq_enable(struct amdgpu_ring *ring) +{ + amdgpu_ring_alloc(ring, 8); + /* set resources */ + amdgpu_ring_write(ring, PACKET3(PACKET3_SET_RESOURCES, 6)); + amdgpu_ring_write(ring, 0); /* vmid_mask:0 queue_type:0 (KIQ) */ + amdgpu_ring_write(ring, 0x000000FF); /* queue mask lo */ + amdgpu_ring_write(ring, 0); /* queue mask hi */ + amdgpu_ring_write(ring, 0); /* gws mask lo */ + amdgpu_ring_write(ring, 0); /* gws mask hi */ + amdgpu_ring_write(ring, 0); /* oac mask */ + amdgpu_ring_write(ring, 0); /* gds heap base:0, gds heap size:0 */ + amdgpu_ring_commit(ring); + udelay(50); +} + +static void gfx_v8_0_map_queue_enable(struct amdgpu_ring *kiq_ring, + struct amdgpu_ring *ring) +{ + struct amdgpu_device *adev = kiq_ring->adev; + uint64_t mqd_addr, wptr_addr; + + mqd_addr = amdgpu_bo_gpu_offset(ring->mqd_obj); + wptr_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); + amdgpu_ring_alloc(kiq_ring, 8); + + amdgpu_ring_write(kiq_ring, PACKET3(PACKET3_MAP_QUEUES, 5)); + /* Q_sel:0, vmid:0, vidmem: 1, engine:0, num_Q:1*/ + amdgpu_ring_write(kiq_ring, 0x21010000); + amdgpu_ring_write(kiq_ring, (ring->doorbell_index << 2) | + (ring->queue << 26) | + (ring->pipe << 29) | + ((ring->me == 1 ? 0 : 1) << 31)); /* doorbell */ + amdgpu_ring_write(kiq_ring, lower_32_bits(mqd_addr)); + amdgpu_ring_write(kiq_ring, upper_32_bits(mqd_addr)); + amdgpu_ring_write(kiq_ring, lower_32_bits(wptr_addr)); + amdgpu_ring_write(kiq_ring, upper_32_bits(wptr_addr)); + amdgpu_ring_commit(kiq_ring); + udelay(50); +} + +static int gfx_v8_0_mqd_init(struct amdgpu_device *adev, + struct vi_mqd *mqd, + uint64_t mqd_gpu_addr, + uint64_t eop_gpu_addr, + struct amdgpu_ring *ring) +{ + uint64_t hqd_gpu_addr, wb_gpu_addr, eop_base_addr; + uint32_t tmp; + + mqd->header = 0xC0310800; + mqd->compute_pipelinestat_enable = 0x00000001; + mqd->compute_static_thread_mgmt_se0 = 0xffffffff; + mqd->compute_static_thread_mgmt_se1 = 0xffffffff; + mqd->compute_static_thread_mgmt_se2 = 0xffffffff; + mqd->compute_static_thread_mgmt_se3 = 0xffffffff; + mqd->compute_misc_reserved = 0x00000003; + + eop_base_addr = eop_gpu_addr >> 8; + mqd->cp_hqd_eop_base_addr_lo = eop_base_addr; + mqd->cp_hqd_eop_base_addr_hi = upper_32_bits(eop_base_addr); + + /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ + tmp = RREG32(mmCP_HQD_EOP_CONTROL); + tmp = REG_SET_FIELD(tmp, CP_HQD_EOP_CONTROL, EOP_SIZE, + (order_base_2(MEC_HPD_SIZE / 4) - 1)); + + mqd->cp_hqd_eop_control = tmp; + + /* enable doorbell? */ + tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL); + + if (ring->use_doorbell) + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, + DOORBELL_EN, 1); + else + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, + DOORBELL_EN, 0); + + mqd->cp_hqd_pq_doorbell_control = tmp; + + /* disable the queue if it's active */ + mqd->cp_hqd_dequeue_request = 0; + mqd->cp_hqd_pq_rptr = 0; + mqd->cp_hqd_pq_wptr = 0; + + /* set the pointer to the MQD */ + mqd->cp_mqd_base_addr_lo = mqd_gpu_addr & 0xfffffffc; + mqd->cp_mqd_base_addr_hi = upper_32_bits(mqd_gpu_addr); + + /* set MQD vmid to 0 */ + tmp = RREG32(mmCP_MQD_CONTROL); + tmp = REG_SET_FIELD(tmp, CP_MQD_CONTROL, VMID, 0); + mqd->cp_mqd_control = tmp; + + /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ + hqd_gpu_addr = ring->gpu_addr >> 8; + mqd->cp_hqd_pq_base_lo = hqd_gpu_addr; + mqd->cp_hqd_pq_base_hi = upper_32_bits(hqd_gpu_addr); + + /* set up the HQD, this is similar to CP_RB0_CNTL */ + tmp = RREG32(mmCP_HQD_PQ_CONTROL); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, QUEUE_SIZE, + (order_base_2(ring->ring_size / 4) - 1)); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, RPTR_BLOCK_SIZE, + ((order_base_2(AMDGPU_GPU_PAGE_SIZE / 4) - 1) << 8)); +#ifdef __BIG_ENDIAN + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ENDIAN_SWAP, 1); +#endif + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, UNORD_DISPATCH, 0); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, ROQ_PQ_IB_FLIP, 0); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, PRIV_STATE, 1); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_CONTROL, KMD_QUEUE, 1); + mqd->cp_hqd_pq_control = tmp; + + /* set the wb address whether it's enabled or not */ + wb_gpu_addr = adev->wb.gpu_addr + (ring->rptr_offs * 4); + mqd->cp_hqd_pq_rptr_report_addr_lo = wb_gpu_addr & 0xfffffffc; + mqd->cp_hqd_pq_rptr_report_addr_hi = + upper_32_bits(wb_gpu_addr) & 0xffff; + + /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ + wb_gpu_addr = adev->wb.gpu_addr + (ring->wptr_offs * 4); + mqd->cp_hqd_pq_wptr_poll_addr_lo = wb_gpu_addr & 0xfffffffc; + mqd->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits(wb_gpu_addr) & 0xffff; + + tmp = 0; + /* enable the doorbell if requested */ + if (ring->use_doorbell) { + tmp = RREG32(mmCP_HQD_PQ_DOORBELL_CONTROL); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, + DOORBELL_OFFSET, ring->doorbell_index); + + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, + DOORBELL_EN, 1); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, + DOORBELL_SOURCE, 0); + tmp = REG_SET_FIELD(tmp, CP_HQD_PQ_DOORBELL_CONTROL, + DOORBELL_HIT, 0); + } + + mqd->cp_hqd_pq_doorbell_control = tmp; + + /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ + ring->wptr = 0; + mqd->cp_hqd_pq_wptr = ring->wptr; + mqd->cp_hqd_pq_rptr = RREG32(mmCP_HQD_PQ_RPTR); + + /* set the vmid for the queue */ + mqd->cp_hqd_vmid = 0; + + tmp = RREG32(mmCP_HQD_PERSISTENT_STATE); + tmp = REG_SET_FIELD(tmp, CP_HQD_PERSISTENT_STATE, PRELOAD_SIZE, 0x53); + mqd->cp_hqd_persistent_state = tmp; + + /* activate the queue */ + mqd->cp_hqd_active = 1; + + return 0; +} + +static int gfx_v8_0_kiq_init_register(struct amdgpu_device *adev, + struct vi_mqd *mqd, + struct amdgpu_ring *ring) +{ + uint32_t tmp; + int j; + + /* disable wptr polling */ + tmp = RREG32(mmCP_PQ_WPTR_POLL_CNTL); + tmp = REG_SET_FIELD(tmp, CP_PQ_WPTR_POLL_CNTL, EN, 0); + WREG32(mmCP_PQ_WPTR_POLL_CNTL, tmp); + + WREG32(mmCP_HQD_EOP_BASE_ADDR, mqd->cp_hqd_eop_base_addr_lo); + WREG32(mmCP_HQD_EOP_BASE_ADDR_HI, mqd->cp_hqd_eop_base_addr_hi); + + /* set the EOP size, register value is 2^(EOP_SIZE+1) dwords */ + WREG32(mmCP_HQD_EOP_CONTROL, mqd->cp_hqd_eop_control); + + /* enable doorbell? */ + WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, mqd->cp_hqd_pq_doorbell_control); + + /* disable the queue if it's active */ + if (RREG32(mmCP_HQD_ACTIVE) & 1) { + WREG32(mmCP_HQD_DEQUEUE_REQUEST, 1); + for (j = 0; j < adev->usec_timeout; j++) { + if (!(RREG32(mmCP_HQD_ACTIVE) & 1)) + break; + udelay(1); + } + WREG32(mmCP_HQD_DEQUEUE_REQUEST, mqd->cp_hqd_dequeue_request); + WREG32(mmCP_HQD_PQ_RPTR, mqd->cp_hqd_pq_rptr); + WREG32(mmCP_HQD_PQ_WPTR, mqd->cp_hqd_pq_wptr); + } + + /* set the pointer to the MQD */ + WREG32(mmCP_MQD_BASE_ADDR, mqd->cp_mqd_base_addr_lo); + WREG32(mmCP_MQD_BASE_ADDR_HI, mqd->cp_mqd_base_addr_hi); + + /* set MQD vmid to 0 */ + WREG32(mmCP_MQD_CONTROL, mqd->cp_mqd_control); + + /* set the pointer to the HQD, this is similar CP_RB0_BASE/_HI */ + WREG32(mmCP_HQD_PQ_BASE, mqd->cp_hqd_pq_base_lo); + WREG32(mmCP_HQD_PQ_BASE_HI, mqd->cp_hqd_pq_base_hi); + + /* set up the HQD, this is similar to CP_RB0_CNTL */ + WREG32(mmCP_HQD_PQ_CONTROL, mqd->cp_hqd_pq_control); + + /* set the wb address whether it's enabled or not */ + WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR, + mqd->cp_hqd_pq_rptr_report_addr_lo); + WREG32(mmCP_HQD_PQ_RPTR_REPORT_ADDR_HI, + mqd->cp_hqd_pq_rptr_report_addr_hi); + + /* only used if CP_PQ_WPTR_POLL_CNTL.CP_PQ_WPTR_POLL_CNTL__EN_MASK=1 */ + WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR, mqd->cp_hqd_pq_wptr_poll_addr_lo); + WREG32(mmCP_HQD_PQ_WPTR_POLL_ADDR_HI, mqd->cp_hqd_pq_wptr_poll_addr_hi); + + /* enable the doorbell if requested */ + if (ring->use_doorbell) { + if ((adev->asic_type == CHIP_CARRIZO) || + (adev->asic_type == CHIP_FIJI) || + (adev->asic_type == CHIP_STONEY)) { + WREG32(mmCP_MEC_DOORBELL_RANGE_LOWER, + AMDGPU_DOORBELL_KIQ << 2); + WREG32(mmCP_MEC_DOORBELL_RANGE_UPPER, + AMDGPU_DOORBELL_MEC_RING7 << 2); + } + } + WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, mqd->cp_hqd_pq_doorbell_control); + + /* reset read and write pointers, similar to CP_RB0_WPTR/_RPTR */ + WREG32(mmCP_HQD_PQ_WPTR, mqd->cp_hqd_pq_wptr); + + /* set the vmid for the queue */ + WREG32(mmCP_HQD_VMID, mqd->cp_hqd_vmid); + + WREG32(mmCP_HQD_PERSISTENT_STATE, mqd->cp_hqd_persistent_state); + + /* activate the queue */ + WREG32(mmCP_HQD_ACTIVE, mqd->cp_hqd_active); + + if (ring->use_doorbell) { + tmp = RREG32(mmCP_PQ_STATUS); + tmp = REG_SET_FIELD(tmp, CP_PQ_STATUS, DOORBELL_ENABLE, 1); + WREG32(mmCP_PQ_STATUS, tmp); + } + + return 0; +} + +static int gfx_v8_0_kiq_init_queue(struct amdgpu_ring *ring, + struct vi_mqd *mqd, + u64 mqd_gpu_addr) +{ + struct amdgpu_device *adev = ring->adev; + struct amdgpu_kiq *kiq = &adev->gfx.kiq; + uint64_t eop_gpu_addr; + bool is_kiq = false; + + if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) + is_kiq = true; + + if (is_kiq) { + eop_gpu_addr = kiq->eop_gpu_addr; + gfx_v8_0_kiq_setting(&kiq->ring); + } else + eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr + + ring->queue * MEC_HPD_SIZE; + + mutex_lock(&adev->srbm_mutex); + vi_srbm_select(adev, ring->me, ring->pipe, ring->queue, 0); + + gfx_v8_0_mqd_init(adev, mqd, mqd_gpu_addr, eop_gpu_addr, ring); + + if (is_kiq) + gfx_v8_0_kiq_init_register(adev, mqd, ring); + + vi_srbm_select(adev, 0, 0, 0, 0); + mutex_unlock(&adev->srbm_mutex); + + if (is_kiq) + gfx_v8_0_kiq_enable(ring); + else + gfx_v8_0_map_queue_enable(&kiq->ring, ring); + + return 0; +} + +static void gfx_v8_0_kiq_free_queue(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring = NULL; + int i; + + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + ring = &adev->gfx.compute_ring[i]; + amdgpu_bo_free_kernel(&ring->mqd_obj, NULL, NULL); + ring->mqd_obj = NULL; + } + + ring = &adev->gfx.kiq.ring; + amdgpu_bo_free_kernel(&ring->mqd_obj, NULL, NULL); + ring->mqd_obj = NULL; +} + +static int gfx_v8_0_kiq_setup_queue(struct amdgpu_device *adev, + struct amdgpu_ring *ring) +{ + struct vi_mqd *mqd; + u64 mqd_gpu_addr; + u32 *buf; + int r = 0; + + r = amdgpu_bo_create_kernel(adev, sizeof(struct vi_mqd), PAGE_SIZE, + AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj, + &mqd_gpu_addr, (void **)&buf); + if (r) { + dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r); + return r; + } + + /* init the mqd struct */ + memset(buf, 0, sizeof(struct vi_mqd)); + mqd = (struct vi_mqd *)buf; + + r = gfx_v8_0_kiq_init_queue(ring, mqd, mqd_gpu_addr); + if (r) + return r; + + amdgpu_bo_kunmap(ring->mqd_obj); + + return 0; +} + +static int gfx_v8_0_kiq_resume(struct amdgpu_device *adev) +{ + struct amdgpu_ring *ring = NULL; + int r, i; + + ring = &adev->gfx.kiq.ring; + r = gfx_v8_0_kiq_setup_queue(adev, ring); + if (r) + return r; + + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + ring = &adev->gfx.compute_ring[i]; + r = gfx_v8_0_kiq_setup_queue(adev, ring); + if (r) + return r; + } + + gfx_v8_0_cp_compute_enable(adev, true); + + for (i = 0; i < adev->gfx.num_compute_rings; i++) { + ring = &adev->gfx.compute_ring[i]; + + ring->ready = true; + r = amdgpu_ring_test_ring(ring); + if (r) + ring->ready = false; + } + + ring = &adev->gfx.kiq.ring; + ring->ready = true; + r = amdgpu_ring_test_ring(ring); + if (r) + ring->ready = false; + + return 0; +} + static int gfx_v8_0_cp_compute_resume(struct amdgpu_device *adev) { int r, i, j; @@ -4795,7 +5266,10 @@ static int gfx_v8_0_cp_resume(struct amdgpu_device *adev) if (r) return r; - r = gfx_v8_0_cp_compute_resume(adev); + if (amdgpu_sriov_vf(adev)) + r = gfx_v8_0_kiq_resume(adev); + else + r = gfx_v8_0_cp_compute_resume(adev); if (r) return r; @@ -4834,6 +5308,7 @@ static int gfx_v8_0_hw_fini(void *handle) amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0); amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0); if (amdgpu_sriov_vf(adev)) { + gfx_v8_0_kiq_free_queue(adev); pr_debug("For SRIOV client, shouldn't do anything.\n"); return 0; } @@ -5930,7 +6405,8 @@ static void gfx_v8_0_ring_emit_hdp_flush(struct amdgpu_ring *ring) { u32 ref_and_mask, reg_mem_engine; - if (ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) { + if ((ring->funcs->type == AMDGPU_RING_TYPE_COMPUTE) || + (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)) { switch (ring->me) { case 1: ref_and_mask = GPU_HDP_FLUSH_DONE__CP2_MASK << ring->pipe; @@ -6143,6 +6619,32 @@ static void gfx_v8_0_ring_emit_fence_compute(struct amdgpu_ring *ring, amdgpu_ring_write(ring, upper_32_bits(seq)); } +static void gfx_v8_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr, + u64 seq, unsigned int flags) +{ + /* we only allocate 32bit for each seq wb address */ + if (flags & AMDGPU_FENCE_FLAG_64BIT) + BUG(); + + /* write fence seq to the "addr" */ + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); + amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | + WRITE_DATA_DST_SEL(5) | WR_CONFIRM)); + amdgpu_ring_write(ring, lower_32_bits(addr)); + amdgpu_ring_write(ring, upper_32_bits(addr)); + amdgpu_ring_write(ring, lower_32_bits(seq)); + + if (flags & AMDGPU_FENCE_FLAG_INT) { + /* set register to trigger INT */ + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); + amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(0) | + WRITE_DATA_DST_SEL(0) | WR_CONFIRM)); + amdgpu_ring_write(ring, mmCPC_INT_STATUS); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, 0x20000000); /* src_id is 178 */ + } +} + static void gfx_v8_ring_emit_sb(struct amdgpu_ring *ring) { amdgpu_ring_write(ring, PACKET3(PACKET3_SWITCH_BUFFER, 0)); @@ -6324,6 +6826,72 @@ static int gfx_v8_0_priv_inst_irq(struct amdgpu_device *adev, return 0; } +static int gfx_v8_0_kiq_set_interrupt_state(struct amdgpu_device *adev, + struct amdgpu_irq_src *src, + unsigned int type, + enum amdgpu_interrupt_state state) +{ + uint32_t tmp, target; + struct amdgpu_ring *ring = (struct amdgpu_ring *)src->data; + + BUG_ON(!ring || (ring->funcs->type != AMDGPU_RING_TYPE_KIQ)); + + if (ring->me == 1) + target = mmCP_ME1_PIPE0_INT_CNTL; + else + target = mmCP_ME2_PIPE0_INT_CNTL; + target += ring->pipe; + + switch (type) { + case AMDGPU_CP_KIQ_IRQ_DRIVER0: + if (state == AMDGPU_IRQ_STATE_DISABLE) { + tmp = RREG32(mmCPC_INT_CNTL); + tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL, + GENERIC2_INT_ENABLE, 0); + WREG32(mmCPC_INT_CNTL, tmp); + + tmp = RREG32(target); + tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL, + GENERIC2_INT_ENABLE, 0); + WREG32(target, tmp); + } else { + tmp = RREG32(mmCPC_INT_CNTL); + tmp = REG_SET_FIELD(tmp, CPC_INT_CNTL, + GENERIC2_INT_ENABLE, 1); + WREG32(mmCPC_INT_CNTL, tmp); + + tmp = RREG32(target); + tmp = REG_SET_FIELD(tmp, CP_ME2_PIPE0_INT_CNTL, + GENERIC2_INT_ENABLE, 1); + WREG32(target, tmp); + } + break; + default: + BUG(); /* kiq only support GENERIC2_INT now */ + break; + } + return 0; +} + +static int gfx_v8_0_kiq_irq(struct amdgpu_device *adev, + struct amdgpu_irq_src *source, + struct amdgpu_iv_entry *entry) +{ + u8 me_id, pipe_id, queue_id; + struct amdgpu_ring *ring = (struct amdgpu_ring *)source->data; + + BUG_ON(!ring || (ring->funcs->type != AMDGPU_RING_TYPE_KIQ)); + + me_id = (entry->ring_id & 0x0c) >> 2; + pipe_id = (entry->ring_id & 0x03) >> 0; + queue_id = (entry->ring_id & 0x70) >> 4; + DRM_DEBUG("IH: CPC GENERIC2_INT, me:%d, pipe:%d, queue:%d\n", + me_id, pipe_id, queue_id); + + amdgpu_fence_process(ring); + return 0; +} + static const struct amd_ip_funcs gfx_v8_0_ip_funcs = { .name = "gfx_v8_0", .early_init = gfx_v8_0_early_init, @@ -6404,10 +6972,37 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_compute = { .pad_ib = amdgpu_ring_generic_pad_ib, }; +static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = { + .type = AMDGPU_RING_TYPE_KIQ, + .align_mask = 0xff, + .nop = PACKET3(PACKET3_NOP, 0x3FFF), + .get_rptr = gfx_v8_0_ring_get_rptr, + .get_wptr = gfx_v8_0_ring_get_wptr_compute, + .set_wptr = gfx_v8_0_ring_set_wptr_compute, + .emit_frame_size = + 20 + /* gfx_v8_0_ring_emit_gds_switch */ + 7 + /* gfx_v8_0_ring_emit_hdp_flush */ + 5 + /* gfx_v8_0_ring_emit_hdp_invalidate */ + 7 + /* gfx_v8_0_ring_emit_pipeline_sync */ + 17 + /* gfx_v8_0_ring_emit_vm_flush */ + 7 + 7 + 7, /* gfx_v8_0_ring_emit_fence_kiq x3 for user fence, vm fence */ + .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_compute */ + .emit_ib = gfx_v8_0_ring_emit_ib_compute, + .emit_fence = gfx_v8_0_ring_emit_fence_kiq, + .emit_hdp_flush = gfx_v8_0_ring_emit_hdp_flush, + .emit_hdp_invalidate = gfx_v8_0_ring_emit_hdp_invalidate, + .test_ring = gfx_v8_0_ring_test_ring, + .test_ib = gfx_v8_0_ring_test_ib, + .insert_nop = amdgpu_ring_insert_nop, + .pad_ib = amdgpu_ring_generic_pad_ib, +}; + static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev) { int i; + adev->gfx.kiq.ring.funcs = &gfx_v8_0_ring_funcs_kiq; + for (i = 0; i < adev->gfx.num_gfx_rings; i++) adev->gfx.gfx_ring[i].funcs = &gfx_v8_0_ring_funcs_gfx; @@ -6430,6 +7025,11 @@ static const struct amdgpu_irq_src_funcs gfx_v8_0_priv_inst_irq_funcs = { .process = gfx_v8_0_priv_inst_irq, }; +static const struct amdgpu_irq_src_funcs gfx_v8_0_kiq_irq_funcs = { + .set = gfx_v8_0_kiq_set_interrupt_state, + .process = gfx_v8_0_kiq_irq, +}; + static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev) { adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST; @@ -6440,6 +7040,9 @@ static void gfx_v8_0_set_irq_funcs(struct amdgpu_device *adev) adev->gfx.priv_inst_irq.num_types = 1; adev->gfx.priv_inst_irq.funcs = &gfx_v8_0_priv_inst_irq_funcs; + + adev->gfx.kiq.irq.num_types = AMDGPU_CP_KIQ_IRQ_LAST; + adev->gfx.kiq.irq.funcs = &gfx_v8_0_kiq_irq_funcs; } static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev) diff --git a/drivers/gpu/drm/amd/amdgpu/vid.h b/drivers/gpu/drm/amd/amdgpu/vid.h index 11746f22d0c5..7a3863a45f0a 100644 --- a/drivers/gpu/drm/amd/amdgpu/vid.h +++ b/drivers/gpu/drm/amd/amdgpu/vid.h @@ -360,6 +360,8 @@ #define PACKET3_WAIT_ON_CE_COUNTER 0x86 #define PACKET3_WAIT_ON_DE_COUNTER_DIFF 0x88 #define PACKET3_SWITCH_BUFFER 0x8B +#define PACKET3_SET_RESOURCES 0xA0 +#define PACKET3_MAP_QUEUES 0xA2 #define VCE_CMD_NO_OP 0x00000000 #define VCE_CMD_END 0x00000001 -- cgit From ebd843d6ee915394094777c72f01fe55356c8eae Mon Sep 17 00:00:00 2001 From: Huang Rui Date: Thu, 5 Jan 2017 18:48:44 +0800 Subject: drm/amdgpu: add get clockgating_state method for gfx v8 Signed-off-by: Huang Rui Reviewed-by: Alex Deucher Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 40 +++++++++++++++++++++++++++++++++++ 1 file changed, 40 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index d604ba37541f..0907173d9681 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -5872,6 +5872,45 @@ static int gfx_v8_0_set_powergating_state(void *handle, return 0; } +static void gfx_v8_0_get_clockgating_state(void *handle, u32 *flags) +{ + struct amdgpu_device *adev = (struct amdgpu_device *)handle; + int data; + + /* AMD_CG_SUPPORT_GFX_MGCG */ + data = RREG32(mmRLC_CGTT_MGCG_OVERRIDE); + if (!(data & RLC_CGTT_MGCG_OVERRIDE__CPF_MASK)) + *flags |= AMD_CG_SUPPORT_GFX_MGCG; + + /* AMD_CG_SUPPORT_GFX_CGLG */ + data = RREG32(mmRLC_CGCG_CGLS_CTRL); + if (data & RLC_CGCG_CGLS_CTRL__CGCG_EN_MASK) + *flags |= AMD_CG_SUPPORT_GFX_CGCG; + + /* AMD_CG_SUPPORT_GFX_CGLS */ + if (data & RLC_CGCG_CGLS_CTRL__CGLS_EN_MASK) + *flags |= AMD_CG_SUPPORT_GFX_CGLS; + + /* AMD_CG_SUPPORT_GFX_CGTS */ + data = RREG32(mmCGTS_SM_CTRL_REG); + if (!(data & CGTS_SM_CTRL_REG__OVERRIDE_MASK)) + *flags |= AMD_CG_SUPPORT_GFX_CGTS; + + /* AMD_CG_SUPPORT_GFX_CGTS_LS */ + if (!(data & CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK)) + *flags |= AMD_CG_SUPPORT_GFX_CGTS_LS; + + /* AMD_CG_SUPPORT_GFX_RLC_LS */ + data = RREG32(mmRLC_MEM_SLP_CNTL); + if (data & RLC_MEM_SLP_CNTL__RLC_MEM_LS_EN_MASK) + *flags |= AMD_CG_SUPPORT_GFX_RLC_LS | AMD_CG_SUPPORT_GFX_MGLS; + + /* AMD_CG_SUPPORT_GFX_CP_LS */ + data = RREG32(mmCP_MEM_SLP_CNTL); + if (data & CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK) + *flags |= AMD_CG_SUPPORT_GFX_CP_LS | AMD_CG_SUPPORT_GFX_MGLS; +} + static void gfx_v8_0_send_serdes_cmd(struct amdgpu_device *adev, uint32_t reg_addr, uint32_t cmd) { @@ -6910,6 +6949,7 @@ static const struct amd_ip_funcs gfx_v8_0_ip_funcs = { .post_soft_reset = gfx_v8_0_post_soft_reset, .set_clockgating_state = gfx_v8_0_set_clockgating_state, .set_powergating_state = gfx_v8_0_set_powergating_state, + .get_clockgating_state = gfx_v8_0_get_clockgating_state, }; static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = { -- cgit From 880e87e380985480cc5bee5f389f912554064930 Mon Sep 17 00:00:00 2001 From: Xiangliang Yu Date: Thu, 12 Jan 2017 13:57:48 +0800 Subject: drm/amdgpu/gfx8: implement emit_rreg/wreg function MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Implement emit_rreg/wreg function for kiq ring. Signed-off-by: Xiangliang Yu Signed-off-by: Monk Liu Reviewed-by: Alex Deucher Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | 1 + drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 37 ++++++++++++++++++++++++++++++++ 2 files changed, 38 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index fa1d569ec479..70ab72cd8fae 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -34,6 +34,7 @@ struct amdgpu_virt { uint32_t caps; struct amdgpu_bo *csa_obj; uint64_t csa_vmid0_addr; + uint32_t reg_val_offs; }; #define AMDGPU_CSA_SIZE (8 * 1024) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 0907173d9681..aa053f628786 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -1373,6 +1373,12 @@ static int gfx_v8_0_kiq_init_ring(struct amdgpu_device *adev, { int r = 0; + if (amdgpu_sriov_vf(adev)) { + r = amdgpu_wb_get(adev, &adev->virt.reg_val_offs); + if (r) + return r; + } + ring->adev = NULL; ring->ring_obj = NULL; ring->use_doorbell = true; @@ -1399,6 +1405,9 @@ static int gfx_v8_0_kiq_init_ring(struct amdgpu_device *adev, static void gfx_v8_0_kiq_free_ring(struct amdgpu_ring *ring, struct amdgpu_irq_src *irq) { + if (amdgpu_sriov_vf(ring->adev)) + amdgpu_wb_free(ring->adev, ring->adev->virt.reg_val_offs); + amdgpu_ring_fini(ring); irq->data = NULL; } @@ -6720,6 +6729,32 @@ static void gfx_v8_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags) amdgpu_ring_write(ring, 0); } +static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg) +{ + struct amdgpu_device *adev = ring->adev; + + amdgpu_ring_write(ring, PACKET3(PACKET3_COPY_DATA, 4)); + amdgpu_ring_write(ring, 0 | /* src: register*/ + (5 << 8) | /* dst: memory */ + (1 << 20)); /* write confirm */ + amdgpu_ring_write(ring, reg); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, lower_32_bits(adev->wb.gpu_addr + + adev->virt.reg_val_offs * 4)); + amdgpu_ring_write(ring, upper_32_bits(adev->wb.gpu_addr + + adev->virt.reg_val_offs * 4)); +} + +static void gfx_v8_0_ring_emit_wreg(struct amdgpu_ring *ring, uint32_t reg, + uint32_t val) +{ + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); + amdgpu_ring_write(ring, (1 << 16)); /* no inc addr */ + amdgpu_ring_write(ring, reg); + amdgpu_ring_write(ring, 0); + amdgpu_ring_write(ring, val); +} + static void gfx_v8_0_set_gfx_eop_interrupt_state(struct amdgpu_device *adev, enum amdgpu_interrupt_state state) { @@ -7035,6 +7070,8 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_kiq = { .test_ib = gfx_v8_0_ring_test_ib, .insert_nop = amdgpu_ring_insert_nop, .pad_ib = amdgpu_ring_generic_pad_ib, + .emit_rreg = gfx_v8_0_ring_emit_rreg, + .emit_wreg = gfx_v8_0_ring_emit_wreg, }; static void gfx_v8_0_set_ring_funcs(struct amdgpu_device *adev) -- cgit From f10b478d9e5f70f62d6a7b964922ab91299279eb Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Tue, 10 Jan 2017 06:52:59 +0100 Subject: drm/amdgpu/gfx8: fix bugon.cocci warnings MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use BUG_ON instead of a if condition followed by BUG. Generated by: scripts/coccinelle/misc/bugon.cocci Reviewed-by: Christian König Reviewd-by: Xiangliang.Yu CC: Xiangliang Yu Signed-off-by: Julia Lawall Signed-off-by: Fengguang Wu Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index aa053f628786..c8c45ba952c8 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -6671,8 +6671,7 @@ static void gfx_v8_0_ring_emit_fence_kiq(struct amdgpu_ring *ring, u64 addr, u64 seq, unsigned int flags) { /* we only allocate 32bit for each seq wb address */ - if (flags & AMDGPU_FENCE_FLAG_64BIT) - BUG(); + BUG_ON(flags & AMDGPU_FENCE_FLAG_64BIT); /* write fence seq to the "addr" */ amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, 3)); -- cgit From 50261151a13176a99ee6117dbbb2e557fd0b608b Mon Sep 17 00:00:00 2001 From: Nils Wallménius Date: Mon, 16 Jan 2017 21:56:48 +0100 Subject: drm/amdgpu: simplify allocation of scratch regs MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The scratch regs are sequential so there's no need to keep them in an array, we can just return the index of the first free register + the base register. Also change the array of bools for keeping track of the free regs to a bitfield. Reviewed-by: Christian König Signed-off-by: Nils Wallménius Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 3 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 21 +++++++-------------- drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c | 7 +------ drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 7 +------ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 7 +------ 5 files changed, 11 insertions(+), 34 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index ffeda245f38b..953148058bdb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -794,8 +794,7 @@ struct amdgpu_kiq { struct amdgpu_scratch { unsigned num_reg; uint32_t reg_base; - bool free[32]; - uint32_t reg[32]; + uint32_t free_mask; }; /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 01a42b6a69a4..19943356cca7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -42,12 +42,12 @@ int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg) { int i; - for (i = 0; i < adev->gfx.scratch.num_reg; i++) { - if (adev->gfx.scratch.free[i]) { - adev->gfx.scratch.free[i] = false; - *reg = adev->gfx.scratch.reg[i]; - return 0; - } + i = ffs(adev->gfx.scratch.free_mask); + if (i != 0 && i <= adev->gfx.scratch.num_reg) { + i--; + adev->gfx.scratch.free_mask &= ~(1u << i); + *reg = adev->gfx.scratch.reg_base + i; + return 0; } return -EINVAL; } @@ -62,14 +62,7 @@ int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg) */ void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg) { - int i; - - for (i = 0; i < adev->gfx.scratch.num_reg; i++) { - if (adev->gfx.scratch.reg[i] == reg) { - adev->gfx.scratch.free[i] = true; - return; - } - } + adev->gfx.scratch.free_mask |= 1u << (reg - adev->gfx.scratch.reg_base); } /** diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index b323f5ef64d2..e0132436c76f 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -1794,14 +1794,9 @@ static void gfx_v6_0_gpu_init(struct amdgpu_device *adev) static void gfx_v6_0_scratch_init(struct amdgpu_device *adev) { - int i; - adev->gfx.scratch.num_reg = 7; adev->gfx.scratch.reg_base = mmSCRATCH_REG0; - for (i = 0; i < adev->gfx.scratch.num_reg; i++) { - adev->gfx.scratch.free[i] = true; - adev->gfx.scratch.reg[i] = adev->gfx.scratch.reg_base + i; - } + adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1; } static int gfx_v6_0_ring_test_ring(struct amdgpu_ring *ring) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index c4e14015ec5b..cfed6db69b93 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -2003,14 +2003,9 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev) */ static void gfx_v7_0_scratch_init(struct amdgpu_device *adev) { - int i; - adev->gfx.scratch.num_reg = 7; adev->gfx.scratch.reg_base = mmSCRATCH_REG0; - for (i = 0; i < adev->gfx.scratch.num_reg; i++) { - adev->gfx.scratch.free[i] = true; - adev->gfx.scratch.reg[i] = adev->gfx.scratch.reg_base + i; - } + adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1; } /** diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index c8c45ba952c8..69543d4eef34 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -749,14 +749,9 @@ static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev) static void gfx_v8_0_scratch_init(struct amdgpu_device *adev) { - int i; - adev->gfx.scratch.num_reg = 7; adev->gfx.scratch.reg_base = mmSCRATCH_REG0; - for (i = 0; i < adev->gfx.scratch.num_reg; i++) { - adev->gfx.scratch.free[i] = true; - adev->gfx.scratch.reg[i] = adev->gfx.scratch.reg_base + i; - } + adev->gfx.scratch.free_mask = (1u << adev->gfx.scratch.num_reg) - 1; } static int gfx_v8_0_ring_test_ring(struct amdgpu_ring *ring) -- cgit From ae65a26dd387fec18dd1645fbb4b901b379cd6f5 Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Thu, 12 Jan 2017 15:32:44 +0800 Subject: drm/amdgpu:add META_DATA struct for CSA/SRIOV v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit META-DATA is used in GFX cmd submit, we have two format suit for META-DATA-init, one is legacy and another is for chained-ib preempt, which is used in vulkan UMD. v2: drop use CP version number to judge if chain-ib supports or not, we wait for it mature Signed-off-by: Monk Liu Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h | 1 + drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 7 ++ drivers/gpu/drm/amd/amdgpu/vi.h | 112 +++++++++++++++++++++++++++++++ 3 files changed, 120 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h index 59376fa69eba..675e12c42532 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.h @@ -44,6 +44,7 @@ struct amdgpu_virt { uint32_t caps; struct amdgpu_bo *csa_obj; uint64_t csa_vmid0_addr; + bool chained_ib_support; uint32_t reg_val_offs; struct mutex lock; struct amdgpu_irq_src ack_irq; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 69543d4eef34..3c613c66d47d 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -936,6 +936,13 @@ static int gfx_v8_0_init_microcode(struct amdgpu_device *adev) goto out; cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data; adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version); + + /* chain ib ucode isn't formal released, just disable it by far + * TODO: when ucod ready we should use ucode version to judge if + * chain-ib support or not. + */ + adev->virt.chained_ib_support = false; + adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version); snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_ce.bin", chip_name); diff --git a/drivers/gpu/drm/amd/amdgpu/vi.h b/drivers/gpu/drm/amd/amdgpu/vi.h index 575d7aed5d32..719587b8b0cb 100644 --- a/drivers/gpu/drm/amd/amdgpu/vi.h +++ b/drivers/gpu/drm/amd/amdgpu/vi.h @@ -28,4 +28,116 @@ void vi_srbm_select(struct amdgpu_device *adev, u32 me, u32 pipe, u32 queue, u32 vmid); int vi_set_ip_blocks(struct amdgpu_device *adev); +struct amdgpu_ce_ib_state +{ + uint32_t ce_ib_completion_status; + uint32_t ce_constegnine_count; + uint32_t ce_ibOffset_ib1; + uint32_t ce_ibOffset_ib2; +}; /* Total of 4 DWORD */ + +struct amdgpu_de_ib_state +{ + uint32_t ib_completion_status; + uint32_t de_constEngine_count; + uint32_t ib_offset_ib1; + uint32_t ib_offset_ib2; + uint32_t preamble_begin_ib1; + uint32_t preamble_begin_ib2; + uint32_t preamble_end_ib1; + uint32_t preamble_end_ib2; + uint32_t draw_indirect_baseLo; + uint32_t draw_indirect_baseHi; + uint32_t disp_indirect_baseLo; + uint32_t disp_indirect_baseHi; + uint32_t gds_backup_addrlo; + uint32_t gds_backup_addrhi; + uint32_t index_base_addrlo; + uint32_t index_base_addrhi; + uint32_t sample_cntl; +}; /* Total of 17 DWORD */ + +struct amdgpu_ce_ib_state_chained_ib +{ + /* section of non chained ib part */ + uint32_t ce_ib_completion_status; + uint32_t ce_constegnine_count; + uint32_t ce_ibOffset_ib1; + uint32_t ce_ibOffset_ib2; + + /* section of chained ib */ + uint32_t ce_chainib_addrlo_ib1; + uint32_t ce_chainib_addrlo_ib2; + uint32_t ce_chainib_addrhi_ib1; + uint32_t ce_chainib_addrhi_ib2; + uint32_t ce_chainib_size_ib1; + uint32_t ce_chainib_size_ib2; +}; /* total 10 DWORD */ + +struct amdgpu_de_ib_state_chained_ib +{ + /* section of non chained ib part */ + uint32_t ib_completion_status; + uint32_t de_constEngine_count; + uint32_t ib_offset_ib1; + uint32_t ib_offset_ib2; + + /* section of chained ib */ + uint32_t chain_ib_addrlo_ib1; + uint32_t chain_ib_addrlo_ib2; + uint32_t chain_ib_addrhi_ib1; + uint32_t chain_ib_addrhi_ib2; + uint32_t chain_ib_size_ib1; + uint32_t chain_ib_size_ib2; + + /* section of non chained ib part */ + uint32_t preamble_begin_ib1; + uint32_t preamble_begin_ib2; + uint32_t preamble_end_ib1; + uint32_t preamble_end_ib2; + + /* section of chained ib */ + uint32_t chain_ib_pream_addrlo_ib1; + uint32_t chain_ib_pream_addrlo_ib2; + uint32_t chain_ib_pream_addrhi_ib1; + uint32_t chain_ib_pream_addrhi_ib2; + + /* section of non chained ib part */ + uint32_t draw_indirect_baseLo; + uint32_t draw_indirect_baseHi; + uint32_t disp_indirect_baseLo; + uint32_t disp_indirect_baseHi; + uint32_t gds_backup_addrlo; + uint32_t gds_backup_addrhi; + uint32_t index_base_addrlo; + uint32_t index_base_addrhi; + uint32_t sample_cntl; +}; /* Total of 27 DWORD */ + +struct amdgpu_gfx_meta_data +{ + /* 4 DWORD, address must be 4KB aligned */ + struct amdgpu_ce_ib_state ce_payload; + uint32_t reserved1[60]; + /* 17 DWORD, address must be 64B aligned */ + struct amdgpu_de_ib_state de_payload; + /* PFP IB base address which get pre-empted */ + uint32_t DeIbBaseAddrLo; + uint32_t DeIbBaseAddrHi; + uint32_t reserved2[941]; +}; /* Total of 4K Bytes */ + +struct amdgpu_gfx_meta_data_chained_ib +{ + /* 10 DWORD, address must be 4KB aligned */ + struct amdgpu_ce_ib_state_chained_ib ce_payload; + uint32_t reserved1[54]; + /* 27 DWORD, address must be 64B aligned */ + struct amdgpu_de_ib_state_chained_ib de_payload; + /* PFP IB base address which get pre-empted */ + uint32_t DeIbBaseAddrLo; + uint32_t DeIbBaseAddrHi; + uint32_t reserved2[931]; +}; /* Total of 4K Bytes */ + #endif -- cgit From acad2b2a7b7040b3e9e3a3239f137b7c67dea37b Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Tue, 17 Jan 2017 10:52:58 +0800 Subject: drm/amdgpu:implement CE/DE meta-init routines MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit those package need to insert into ring buffer for SRIOV case. they are used to let CP do preemption. Signed-off-by: Monk Liu Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 61 +++++++++++++++++++++++++++++++++++ 1 file changed, 61 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 3c613c66d47d..869e754b1758 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -657,6 +657,8 @@ static void gfx_v8_0_set_gds_init(struct amdgpu_device *adev); static void gfx_v8_0_set_rlc_funcs(struct amdgpu_device *adev); static u32 gfx_v8_0_get_csb_size(struct amdgpu_device *adev); static void gfx_v8_0_get_cu_info(struct amdgpu_device *adev); +static void gfx_v8_0_ring_emit_ce_meta_init(struct amdgpu_ring *ring, uint64_t addr); +static void gfx_v8_0_ring_emit_de_meta_init(struct amdgpu_ring *ring, uint64_t addr); static void gfx_v8_0_init_golden_registers(struct amdgpu_device *adev) { @@ -7242,3 +7244,62 @@ const struct amdgpu_ip_block_version gfx_v8_1_ip_block = .rev = 0, .funcs = &gfx_v8_0_ip_funcs, }; + +static void gfx_v8_0_ring_emit_ce_meta_init(struct amdgpu_ring *ring, uint64_t csa_addr) +{ + uint64_t ce_payload_addr; + int cnt_ce; + static union { + struct amdgpu_ce_ib_state regular; + struct amdgpu_ce_ib_state_chained_ib chained; + } ce_payload = {0}; + + if (ring->adev->virt.chained_ib_support) { + ce_payload_addr = csa_addr + offsetof(struct amdgpu_gfx_meta_data_chained_ib, ce_payload); + cnt_ce = (sizeof(ce_payload.chained) >> 2) + 4 - 2; + } else { + ce_payload_addr = csa_addr + offsetof(struct amdgpu_gfx_meta_data, ce_payload); + cnt_ce = (sizeof(ce_payload.regular) >> 2) + 4 - 2; + } + + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt_ce)); + amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(2) | + WRITE_DATA_DST_SEL(8) | + WR_CONFIRM) | + WRITE_DATA_CACHE_POLICY(0)); + amdgpu_ring_write(ring, lower_32_bits(ce_payload_addr)); + amdgpu_ring_write(ring, upper_32_bits(ce_payload_addr)); + amdgpu_ring_write_multiple(ring, (void *)&ce_payload, cnt_ce - 2); +} + +static void gfx_v8_0_ring_emit_de_meta_init(struct amdgpu_ring *ring, uint64_t csa_addr) +{ + uint64_t de_payload_addr, gds_addr; + int cnt_de; + static union { + struct amdgpu_de_ib_state regular; + struct amdgpu_de_ib_state_chained_ib chained; + } de_payload = {0}; + + gds_addr = csa_addr + 4096; + if (ring->adev->virt.chained_ib_support) { + de_payload.chained.gds_backup_addrlo = lower_32_bits(gds_addr); + de_payload.chained.gds_backup_addrhi = upper_32_bits(gds_addr); + de_payload_addr = csa_addr + offsetof(struct amdgpu_gfx_meta_data_chained_ib, de_payload); + cnt_de = (sizeof(de_payload.chained) >> 2) + 4 - 2; + } else { + de_payload.regular.gds_backup_addrlo = lower_32_bits(gds_addr); + de_payload.regular.gds_backup_addrhi = upper_32_bits(gds_addr); + de_payload_addr = csa_addr + offsetof(struct amdgpu_gfx_meta_data, de_payload); + cnt_de = (sizeof(de_payload.regular) >> 2) + 4 - 2; + } + + amdgpu_ring_write(ring, PACKET3(PACKET3_WRITE_DATA, cnt_de)); + amdgpu_ring_write(ring, (WRITE_DATA_ENGINE_SEL(1) | + WRITE_DATA_DST_SEL(8) | + WR_CONFIRM) | + WRITE_DATA_CACHE_POLICY(0)); + amdgpu_ring_write(ring, lower_32_bits(de_payload_addr)); + amdgpu_ring_write(ring, upper_32_bits(de_payload_addr)); + amdgpu_ring_write_multiple(ring, (void *)&de_payload, cnt_de - 2); +} -- cgit From c2ce92fc7912d0eb2f4ae5a40977fa8a0378e796 Mon Sep 17 00:00:00 2001 From: Monk Liu Date: Tue, 17 Jan 2017 10:56:16 +0800 Subject: drm/amdgpu:in cntx_ctrl we need insert meta-init for CE/DE(V2) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit to support SRIOV preemption. v2: fix emit_frame_size Signed-off-by: Monk Liu Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 869e754b1758..ad8430432cbf 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -6706,6 +6706,10 @@ static void gfx_v8_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags) { uint32_t dw2 = 0; + if (amdgpu_sriov_vf(ring->adev)) + gfx_v8_0_ring_emit_ce_meta_init(ring, + (flags & AMDGPU_VM_DOMAIN) ? AMDGPU_CSA_VADDR : ring->adev->virt.csa_vmid0_addr); + dw2 |= 0x80000000; /* set load_enable otherwise this package is just NOPs */ if (flags & AMDGPU_HAVE_CTX_SWITCH) { gfx_v8_0_ring_emit_vgt_flush(ring); @@ -6730,6 +6734,10 @@ static void gfx_v8_ring_emit_cntxcntl(struct amdgpu_ring *ring, uint32_t flags) amdgpu_ring_write(ring, PACKET3(PACKET3_CONTEXT_CONTROL, 1)); amdgpu_ring_write(ring, dw2); amdgpu_ring_write(ring, 0); + + if (amdgpu_sriov_vf(ring->adev)) + gfx_v8_0_ring_emit_de_meta_init(ring, + (flags & AMDGPU_VM_DOMAIN) ? AMDGPU_CSA_VADDR : ring->adev->virt.csa_vmid0_addr); } static void gfx_v8_0_ring_emit_rreg(struct amdgpu_ring *ring, uint32_t reg) @@ -7005,7 +7013,7 @@ static const struct amdgpu_ring_funcs gfx_v8_0_ring_funcs_gfx = { 7 + /* gfx_v8_0_ring_emit_pipeline_sync */ 128 + 19 + /* gfx_v8_0_ring_emit_vm_flush */ 2 + /* gfx_v8_ring_emit_sb */ - 3 + 4, /* gfx_v8_ring_emit_cntxcntl including vgt flush */ + 3 + 4 + 29, /* gfx_v8_ring_emit_cntxcntl including vgt flush/meta-data */ .emit_ib_size = 4, /* gfx_v8_0_ring_emit_ib_gfx */ .emit_ib = gfx_v8_0_ring_emit_ib_gfx, .emit_fence = gfx_v8_0_ring_emit_fence_gfx, -- cgit From 50ddc75e32bba7cce994d530ec27aec697a372f8 Mon Sep 17 00:00:00 2001 From: Junwei Zhang Date: Mon, 23 Jan 2017 16:30:38 +0800 Subject: drm/amd/amdgpu: remove the uncessary parameter for ib scheduler MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Junwei Zhang Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 3 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/cik_sdma.c | 2 +- drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c | 2 +- drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c | 2 +- drivers/gpu/drm/amd/amdgpu/si_dma.c | 2 +- 13 files changed, 17 insertions(+), 18 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index a0728ff8c80f..179f01d44d14 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -906,8 +906,8 @@ int amdgpu_ib_get(struct amdgpu_device *adev, struct amdgpu_vm *vm, void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, struct dma_fence *f); int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, - struct amdgpu_ib *ib, struct dma_fence *last_vm_update, - struct amdgpu_job *job, struct dma_fence **f); + struct amdgpu_ib *ibs, struct amdgpu_job *job, + struct dma_fence **f); int amdgpu_ib_pool_init(struct amdgpu_device *adev); void amdgpu_ib_pool_fini(struct amdgpu_device *adev); int amdgpu_ib_ring_tests(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c index dcf1d8aa35ea..e02a70dd37b5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c @@ -116,8 +116,8 @@ void amdgpu_ib_free(struct amdgpu_device *adev, struct amdgpu_ib *ib, * to SI there was just a DE IB. */ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs, - struct amdgpu_ib *ibs, struct dma_fence *last_vm_update, - struct amdgpu_job *job, struct dma_fence **f) + struct amdgpu_ib *ibs, struct amdgpu_job *job, + struct dma_fence **f) { struct amdgpu_device *adev = ring->adev; struct amdgpu_ib *ib = &ibs[0]; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index a0de6286c453..86a12424c162 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -170,8 +170,7 @@ static struct dma_fence *amdgpu_job_run(struct amd_sched_job *sched_job) BUG_ON(amdgpu_sync_peek_fence(&job->sync, NULL)); trace_amdgpu_sched_run_job(job); - r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, - job->sync.last_vm_update, job, &fence); + r = amdgpu_ib_schedule(job->ring, job->num_ibs, job->ibs, job, &fence); if (r) DRM_ERROR("Error scheduling IBs (%d)\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index eebfc1d19b29..10e9b690676e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1294,7 +1294,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, WARN_ON(job->ibs[0].length_dw > num_dw); if (direct_submit) { r = amdgpu_ib_schedule(ring, job->num_ibs, job->ibs, - NULL, NULL, fence); + NULL, fence); job->fence = dma_fence_get(*fence); if (r) DRM_ERROR("Error scheduling IBs (%d)\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 326b7f5a79ff..6f62ac473064 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -976,7 +976,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, ib->length_dw = 16; if (direct) { - r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); + r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); job->fence = dma_fence_get(f); if (r) goto err_free; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 8fec802d3908..79bc9c7aad45 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -455,7 +455,7 @@ int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, for (i = ib->length_dw; i < ib_size_dw; ++i) ib->ptr[i] = 0x0; - r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); + r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); job->fence = dma_fence_get(f); if (r) goto err; @@ -518,7 +518,7 @@ int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, ib->ptr[i] = 0x0; if (direct) { - r = amdgpu_ib_schedule(ring, 1, ib, NULL, NULL, &f); + r = amdgpu_ib_schedule(ring, 1, ib, NULL, &f); job->fence = dma_fence_get(f); if (r) goto err; diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c index 4c34dbc7a254..810bba533975 100644 --- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c @@ -651,7 +651,7 @@ static int cik_sdma_ring_test_ib(struct amdgpu_ring *ring, long timeout) ib.ptr[3] = 1; ib.ptr[4] = 0xDEADBEEF; ib.length_dw = 5; - r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f); + r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); if (r) goto err1; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c index e0132436c76f..b1f4808109d2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v6_0.c @@ -1970,7 +1970,7 @@ static int gfx_v6_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) ib.ptr[2] = 0xDEADBEEF; ib.length_dw = 3; - r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f); + r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); if (r) goto err2; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index cfed6db69b93..e3589b55a1e1 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -2316,7 +2316,7 @@ static int gfx_v7_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) ib.ptr[2] = 0xDEADBEEF; ib.length_dw = 3; - r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f); + r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); if (r) goto err2; diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index ad8430432cbf..35f9cd83b821 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -826,7 +826,7 @@ static int gfx_v8_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) ib.ptr[2] = 0xDEADBEEF; ib.length_dw = 3; - r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f); + r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); if (r) goto err2; @@ -1780,7 +1780,7 @@ static int gfx_v8_0_do_edc_gpr_workarounds(struct amdgpu_device *adev) ib.ptr[ib.length_dw++] = EVENT_TYPE(7) | EVENT_INDEX(4); /* shedule the ib on the ring */ - r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f); + r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); if (r) { DRM_ERROR("amdgpu: ib submit failed (%d).\n", r); goto fail; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c index fbe74a33899c..896be64b7013 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v2_4.c @@ -701,7 +701,7 @@ static int sdma_v2_4_ring_test_ib(struct amdgpu_ring *ring, long timeout) ib.ptr[7] = SDMA_PKT_HEADER_OP(SDMA_OP_NOP); ib.length_dw = 8; - r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f); + r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); if (r) goto err1; diff --git a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c index 4bb8bec13c47..31375bdde6f1 100644 --- a/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/sdma_v3_0.c @@ -910,7 +910,7 @@ static int sdma_v3_0_ring_test_ib(struct amdgpu_ring *ring, long timeout) ib.ptr[7] = SDMA_PKT_NOP_HEADER_OP(SDMA_OP_NOP); ib.length_dw = 8; - r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f); + r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); if (r) goto err1; diff --git a/drivers/gpu/drm/amd/amdgpu/si_dma.c b/drivers/gpu/drm/amd/amdgpu/si_dma.c index 3dd552ae0b59..9b6524690e74 100644 --- a/drivers/gpu/drm/amd/amdgpu/si_dma.c +++ b/drivers/gpu/drm/amd/amdgpu/si_dma.c @@ -301,7 +301,7 @@ static int si_dma_ring_test_ib(struct amdgpu_ring *ring, long timeout) ib.ptr[2] = upper_32_bits(gpu_addr) & 0xff; ib.ptr[3] = 0xDEADBEEF; ib.length_dw = 4; - r = amdgpu_ib_schedule(ring, 1, &ib, NULL, NULL, &f); + r = amdgpu_ib_schedule(ring, 1, &ib, NULL, &f); if (r) goto err1; -- cgit From d2383267557a6080fadcbe335422c0825325845d Mon Sep 17 00:00:00 2001 From: ozeng Date: Fri, 10 Feb 2017 17:55:36 -0600 Subject: drm/amdgpu: Initialize pipe priority order on graphic initialization MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Initialized PIPE_ORDER_TS0/1/2/3 field of SPI_ARB_PRIORITY register to 2. This set the pipe priority order to: 02 - HP3D, CS_H, GFX, CS_M, CS_L Signed-off-by: Oak Zeng Reviewed-by: Alex Deucher Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c | 8 ++++++++ drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 8 ++++++++ 2 files changed, 16 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c index e3589b55a1e1..1f9354541f29 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c @@ -1983,6 +1983,14 @@ static void gfx_v7_0_gpu_init(struct amdgpu_device *adev) WREG32(mmPA_CL_ENHANCE, PA_CL_ENHANCE__CLIP_VTX_REORDER_ENA_MASK | (3 << PA_CL_ENHANCE__NUM_CLIP_SEQ__SHIFT)); WREG32(mmPA_SC_ENHANCE, PA_SC_ENHANCE__ENABLE_PA_SC_OUT_OF_ORDER_MASK); + + tmp = RREG32(mmSPI_ARB_PRIORITY); + tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS0, 2); + tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS1, 2); + tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS2, 2); + tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS3, 2); + WREG32(mmSPI_ARB_PRIORITY, tmp); + mutex_unlock(&adev->grbm_idx_mutex); udelay(50); diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 35f9cd83b821..6ed19e3ab364 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -3898,6 +3898,14 @@ static void gfx_v8_0_gpu_init(struct amdgpu_device *adev) PA_SC_FIFO_SIZE__SC_HIZ_TILE_FIFO_SIZE__SHIFT) | (adev->gfx.config.sc_earlyz_tile_fifo_size << PA_SC_FIFO_SIZE__SC_EARLYZ_TILE_FIFO_SIZE__SHIFT)); + + tmp = RREG32(mmSPI_ARB_PRIORITY); + tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS0, 2); + tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS1, 2); + tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS2, 2); + tmp = REG_SET_FIELD(tmp, SPI_ARB_PRIORITY, PIPE_ORDER_TS3, 2); + WREG32(mmSPI_ARB_PRIORITY, tmp); + mutex_unlock(&adev->grbm_idx_mutex); } -- cgit From e8411302b44b844b4f619e8064735c70b7490ee8 Mon Sep 17 00:00:00 2001 From: Arnd Bergmann Date: Fri, 3 Feb 2017 17:47:26 +0100 Subject: drm/amdgpu: fix warning on older gcc releases MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit gcc-4.8 warns about '{0}' being used an an initializer for nested structures: drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c: In function ‘gfx_v8_0_ring_emit_ce_meta_init’: drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c:7263:2: warning: missing braces around initializer [-Wmissing-braces] } ce_payload = {0}; drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c: In function ‘gfx_v8_0_ring_emit_de_meta_init’: drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c:7290:2: warning: missing braces around initializer [-Wmissing-braces] } de_payload = {0}; Using an empty {} initializer however has the same effect and works on all versions. Fixes: acad2b2a7b70 ("drm/amdgpu:implement CE/DE meta-init routines") Signed-off-by: Arnd Bergmann Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c') diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c index 6ed19e3ab364..67afc901905c 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c @@ -7268,7 +7268,7 @@ static void gfx_v8_0_ring_emit_ce_meta_init(struct amdgpu_ring *ring, uint64_t c static union { struct amdgpu_ce_ib_state regular; struct amdgpu_ce_ib_state_chained_ib chained; - } ce_payload = {0}; + } ce_payload = {}; if (ring->adev->virt.chained_ib_support) { ce_payload_addr = csa_addr + offsetof(struct amdgpu_gfx_meta_data_chained_ib, ce_payload); @@ -7295,7 +7295,7 @@ static void gfx_v8_0_ring_emit_de_meta_init(struct amdgpu_ring *ring, uint64_t c static union { struct amdgpu_de_ib_state regular; struct amdgpu_de_ib_state_chained_ib chained; - } de_payload = {0}; + } de_payload = {}; gds_addr = csa_addr + 4096; if (ring->adev->virt.chained_ib_support) { -- cgit