diff options
Diffstat (limited to 'drivers/gpu/drm/radeon/r600.c')
-rw-r--r-- | drivers/gpu/drm/radeon/r600.c | 197 |
1 files changed, 160 insertions, 37 deletions
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c index e8bf0ea2dade..56b02927cd3d 100644 --- a/drivers/gpu/drm/radeon/r600.c +++ b/drivers/gpu/drm/radeon/r600.c @@ -122,6 +122,94 @@ u32 r600_get_xclk(struct radeon_device *rdev) int r600_set_uvd_clocks(struct radeon_device *rdev, u32 vclk, u32 dclk) { + unsigned fb_div = 0, ref_div, vclk_div = 0, dclk_div = 0; + int r; + + /* bypass vclk and dclk with bclk */ + WREG32_P(CG_UPLL_FUNC_CNTL_2, + VCLK_SRC_SEL(1) | DCLK_SRC_SEL(1), + ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK)); + + /* assert BYPASS_EN, deassert UPLL_RESET, UPLL_SLEEP and UPLL_CTLREQ */ + WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_BYPASS_EN_MASK, ~( + UPLL_RESET_MASK | UPLL_SLEEP_MASK | UPLL_CTLREQ_MASK)); + + if (rdev->family >= CHIP_RS780) + WREG32_P(GFX_MACRO_BYPASS_CNTL, UPLL_BYPASS_CNTL, + ~UPLL_BYPASS_CNTL); + + if (!vclk || !dclk) { + /* keep the Bypass mode, put PLL to sleep */ + WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_SLEEP_MASK, ~UPLL_SLEEP_MASK); + return 0; + } + + if (rdev->clock.spll.reference_freq == 10000) + ref_div = 34; + else + ref_div = 4; + + r = radeon_uvd_calc_upll_dividers(rdev, vclk, dclk, 50000, 160000, + ref_div + 1, 0xFFF, 2, 30, ~0, + &fb_div, &vclk_div, &dclk_div); + if (r) + return r; + + if (rdev->family >= CHIP_RV670 && rdev->family < CHIP_RS780) + fb_div >>= 1; + else + fb_div |= 1; + + r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL); + if (r) + return r; + + /* assert PLL_RESET */ + WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_RESET_MASK, ~UPLL_RESET_MASK); + + /* For RS780 we have to choose ref clk */ + if (rdev->family >= CHIP_RS780) + WREG32_P(CG_UPLL_FUNC_CNTL, UPLL_REFCLK_SRC_SEL_MASK, + ~UPLL_REFCLK_SRC_SEL_MASK); + + /* set the required fb, ref and post divder values */ + WREG32_P(CG_UPLL_FUNC_CNTL, + UPLL_FB_DIV(fb_div) | + UPLL_REF_DIV(ref_div), + ~(UPLL_FB_DIV_MASK | UPLL_REF_DIV_MASK)); + WREG32_P(CG_UPLL_FUNC_CNTL_2, + UPLL_SW_HILEN(vclk_div >> 1) | + UPLL_SW_LOLEN((vclk_div >> 1) + (vclk_div & 1)) | + UPLL_SW_HILEN2(dclk_div >> 1) | + UPLL_SW_LOLEN2((dclk_div >> 1) + (dclk_div & 1)) | + UPLL_DIVEN_MASK | UPLL_DIVEN2_MASK, + ~UPLL_SW_MASK); + + /* give the PLL some time to settle */ + mdelay(15); + + /* deassert PLL_RESET */ + WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_RESET_MASK); + + mdelay(15); + + /* deassert BYPASS EN */ + WREG32_P(CG_UPLL_FUNC_CNTL, 0, ~UPLL_BYPASS_EN_MASK); + + if (rdev->family >= CHIP_RS780) + WREG32_P(GFX_MACRO_BYPASS_CNTL, 0, ~UPLL_BYPASS_CNTL); + + r = radeon_uvd_send_upll_ctlreq(rdev, CG_UPLL_FUNC_CNTL); + if (r) + return r; + + /* switch VCLK and DCLK selection */ + WREG32_P(CG_UPLL_FUNC_CNTL_2, + VCLK_SRC_SEL(2) | DCLK_SRC_SEL(2), + ~(VCLK_SRC_SEL_MASK | DCLK_SRC_SEL_MASK)); + + mdelay(100); + return 0; } @@ -992,6 +1080,8 @@ static int r600_pcie_gart_enable(struct radeon_device *rdev) WREG32(MC_VM_L1_TLB_MCB_WR_GFX_CNTL, tmp); WREG32(MC_VM_L1_TLB_MCB_RD_PDMA_CNTL, tmp); WREG32(MC_VM_L1_TLB_MCB_WR_PDMA_CNTL, tmp); + WREG32(MC_VM_L1_TLB_MCB_RD_UVD_CNTL, tmp); + WREG32(MC_VM_L1_TLB_MCB_WR_UVD_CNTL, tmp); WREG32(MC_VM_L1_TLB_MCB_RD_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); WREG32(MC_VM_L1_TLB_MCB_WR_SEM_CNTL, tmp | ENABLE_SEMAPHORE_MODE); WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR, rdev->mc.gtt_start >> 12); @@ -1042,6 +1132,8 @@ static void r600_pcie_gart_disable(struct radeon_device *rdev) WREG32(MC_VM_L1_TLB_MCB_WR_SYS_CNTL, tmp); WREG32(MC_VM_L1_TLB_MCB_RD_HDP_CNTL, tmp); WREG32(MC_VM_L1_TLB_MCB_WR_HDP_CNTL, tmp); + WREG32(MC_VM_L1_TLB_MCB_RD_UVD_CNTL, tmp); + WREG32(MC_VM_L1_TLB_MCB_WR_UVD_CNTL, tmp); radeon_gart_table_vram_unpin(rdev); } @@ -1338,7 +1430,7 @@ int r600_vram_scratch_init(struct radeon_device *rdev) if (rdev->vram_scratch.robj == NULL) { r = radeon_bo_create(rdev, RADEON_GPU_PAGE_SIZE, PAGE_SIZE, true, RADEON_GEM_DOMAIN_VRAM, - 0, NULL, &rdev->vram_scratch.robj); + 0, NULL, NULL, &rdev->vram_scratch.robj); if (r) { return r; } @@ -1812,7 +1904,6 @@ static void r600_gpu_init(struct radeon_device *rdev) { u32 tiling_config; u32 ramcfg; - u32 cc_rb_backend_disable; u32 cc_gc_shader_pipe_config; u32 tmp; int i, j; @@ -1939,29 +2030,20 @@ static void r600_gpu_init(struct radeon_device *rdev) } tiling_config |= BANK_SWAPS(1); - cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000; - tmp = R6XX_MAX_BACKENDS - - r600_count_pipe_bits((cc_rb_backend_disable >> 16) & R6XX_MAX_BACKENDS_MASK); - if (tmp < rdev->config.r600.max_backends) { - rdev->config.r600.max_backends = tmp; - } - cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00; - tmp = R6XX_MAX_PIPES - - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R6XX_MAX_PIPES_MASK); - if (tmp < rdev->config.r600.max_pipes) { - rdev->config.r600.max_pipes = tmp; - } - tmp = R6XX_MAX_SIMDS - - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK); - if (tmp < rdev->config.r600.max_simds) { - rdev->config.r600.max_simds = tmp; - } tmp = rdev->config.r600.max_simds - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK); rdev->config.r600.active_simds = tmp; disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK; + tmp = 0; + for (i = 0; i < rdev->config.r600.max_backends; i++) + tmp |= (1 << i); + /* if all the backends are disabled, fix it up here */ + if ((disabled_rb_mask & tmp) == tmp) { + for (i = 0; i < rdev->config.r600.max_backends; i++) + disabled_rb_mask &= ~(1 << i); + } tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT; tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends, R6XX_MAX_BACKENDS, disabled_rb_mask); @@ -2779,8 +2861,8 @@ bool r600_semaphore_ring_emit(struct radeon_device *rdev, radeon_ring_write(ring, lower_32_bits(addr)); radeon_ring_write(ring, (upper_32_bits(addr) & 0xff) | sel); - /* PFP_SYNC_ME packet only exists on 7xx+ */ - if (emit_wait && (rdev->family >= CHIP_RV770)) { + /* PFP_SYNC_ME packet only exists on 7xx+, only enable it on eg+ */ + if (emit_wait && (rdev->family >= CHIP_CEDAR)) { /* Prevent the PFP from running ahead of the semaphore wait */ radeon_ring_write(ring, PACKET3(PACKET3_PFP_SYNC_ME, 0)); radeon_ring_write(ring, 0x0); @@ -2802,12 +2884,13 @@ bool r600_semaphore_ring_emit(struct radeon_device *rdev, * Used by the radeon ttm implementation to move pages if * registered as the asic copy callback. */ -int r600_copy_cpdma(struct radeon_device *rdev, - uint64_t src_offset, uint64_t dst_offset, - unsigned num_gpu_pages, - struct radeon_fence **fence) +struct radeon_fence *r600_copy_cpdma(struct radeon_device *rdev, + uint64_t src_offset, uint64_t dst_offset, + unsigned num_gpu_pages, + struct reservation_object *resv) { struct radeon_semaphore *sem = NULL; + struct radeon_fence *fence; int ring_index = rdev->asic->copy.blit_ring_index; struct radeon_ring *ring = &rdev->ring[ring_index]; u32 size_in_bytes, cur_size_in_bytes, tmp; @@ -2817,7 +2900,7 @@ int r600_copy_cpdma(struct radeon_device *rdev, r = radeon_semaphore_create(rdev, &sem); if (r) { DRM_ERROR("radeon: moving bo (%d).\n", r); - return r; + return ERR_PTR(r); } size_in_bytes = (num_gpu_pages << RADEON_GPU_PAGE_SHIFT); @@ -2826,10 +2909,10 @@ int r600_copy_cpdma(struct radeon_device *rdev, if (r) { DRM_ERROR("radeon: moving bo (%d).\n", r); radeon_semaphore_free(rdev, &sem, NULL); - return r; + return ERR_PTR(r); } - radeon_semaphore_sync_to(sem, *fence); + radeon_semaphore_sync_resv(rdev, sem, resv, false); radeon_semaphore_sync_rings(rdev, sem, ring->idx); radeon_ring_write(ring, PACKET3(PACKET3_SET_CONFIG_REG, 1)); @@ -2856,17 +2939,17 @@ int r600_copy_cpdma(struct radeon_device *rdev, radeon_ring_write(ring, (WAIT_UNTIL - PACKET3_SET_CONFIG_REG_OFFSET) >> 2); radeon_ring_write(ring, WAIT_CP_DMA_IDLE_bit); - r = radeon_fence_emit(rdev, fence, ring->idx); + r = radeon_fence_emit(rdev, &fence, ring->idx); if (r) { radeon_ring_unlock_undo(rdev, ring); radeon_semaphore_free(rdev, &sem, NULL); - return r; + return ERR_PTR(r); } radeon_ring_unlock_commit(rdev, ring, false); - radeon_semaphore_free(rdev, &sem, *fence); + radeon_semaphore_free(rdev, &sem, fence); - return r; + return fence; } int r600_set_surface_reg(struct radeon_device *rdev, int reg, @@ -2917,6 +3000,18 @@ static int r600_startup(struct radeon_device *rdev) return r; } + if (rdev->has_uvd) { + r = uvd_v1_0_resume(rdev); + if (!r) { + r = radeon_fence_driver_start_ring(rdev, R600_RING_TYPE_UVD_INDEX); + if (r) { + dev_err(rdev->dev, "failed initializing UVD fences (%d).\n", r); + } + } + if (r) + rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_size = 0; + } + /* Enable IRQ */ if (!rdev->irq.installed) { r = radeon_irq_kms_init(rdev); @@ -2945,6 +3040,18 @@ static int r600_startup(struct radeon_device *rdev) if (r) return r; + if (rdev->has_uvd) { + ring = &rdev->ring[R600_RING_TYPE_UVD_INDEX]; + if (ring->ring_size) { + r = radeon_ring_init(rdev, ring, ring->ring_size, 0, + RADEON_CP_PACKET2); + if (!r) + r = uvd_v1_0_init(rdev); + if (r) + DRM_ERROR("radeon: failed initializing UVD (%d).\n", r); + } + } + r = radeon_ib_pool_init(rdev); if (r) { dev_err(rdev->dev, "IB initialization failed (%d).\n", r); @@ -3004,6 +3111,10 @@ int r600_suspend(struct radeon_device *rdev) radeon_pm_suspend(rdev); r600_audio_fini(rdev); r600_cp_stop(rdev); + if (rdev->has_uvd) { + uvd_v1_0_fini(rdev); + radeon_uvd_suspend(rdev); + } r600_irq_suspend(rdev); radeon_wb_disable(rdev); r600_pcie_gart_disable(rdev); @@ -3083,6 +3194,14 @@ int r600_init(struct radeon_device *rdev) rdev->ring[RADEON_RING_TYPE_GFX_INDEX].ring_obj = NULL; r600_ring_init(rdev, &rdev->ring[RADEON_RING_TYPE_GFX_INDEX], 1024 * 1024); + if (rdev->has_uvd) { + r = radeon_uvd_init(rdev); + if (!r) { + rdev->ring[R600_RING_TYPE_UVD_INDEX].ring_obj = NULL; + r600_ring_init(rdev, &rdev->ring[R600_RING_TYPE_UVD_INDEX], 4096); + } + } + rdev->ih.ring_obj = NULL; r600_ih_ring_init(rdev, 64 * 1024); @@ -3112,6 +3231,10 @@ void r600_fini(struct radeon_device *rdev) r600_audio_fini(rdev); r600_cp_fini(rdev); r600_irq_fini(rdev); + if (rdev->has_uvd) { + uvd_v1_0_fini(rdev); + radeon_uvd_fini(rdev); + } radeon_wb_fini(rdev); radeon_ib_pool_fini(rdev); radeon_irq_kms_fini(rdev); @@ -3245,7 +3368,7 @@ int r600_ih_ring_alloc(struct radeon_device *rdev) r = radeon_bo_create(rdev, rdev->ih.ring_size, PAGE_SIZE, true, RADEON_GEM_DOMAIN_GTT, 0, - NULL, &rdev->ih.ring_obj); + NULL, NULL, &rdev->ih.ring_obj); if (r) { DRM_ERROR("radeon: failed to create ih ring buffer (%d).\n", r); return r; @@ -3802,17 +3925,17 @@ static u32 r600_get_ih_wptr(struct radeon_device *rdev) wptr = RREG32(IH_RB_WPTR); if (wptr & RB_OVERFLOW) { + wptr &= ~RB_OVERFLOW; /* When a ring buffer overflow happen start parsing interrupt * from the last not overwritten vector (wptr + 16). Hopefully * this should allow us to catchup. */ - dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, %d, %d)\n", - wptr, rdev->ih.rptr, (wptr + 16) + rdev->ih.ptr_mask); + dev_warn(rdev->dev, "IH ring buffer overflow (0x%08X, 0x%08X, 0x%08X)\n", + wptr, rdev->ih.rptr, (wptr + 16) & rdev->ih.ptr_mask); rdev->ih.rptr = (wptr + 16) & rdev->ih.ptr_mask; tmp = RREG32(IH_RB_CNTL); tmp |= IH_WPTR_OVERFLOW_CLEAR; WREG32(IH_RB_CNTL, tmp); - wptr &= ~RB_OVERFLOW; } return (wptr & rdev->ih.ptr_mask); } @@ -4058,6 +4181,7 @@ restart_ih: /* wptr/rptr are in bytes! */ rptr += 16; rptr &= rdev->ih.ptr_mask; + WREG32(IH_RB_RPTR, rptr); } if (queue_hotplug) schedule_work(&rdev->hotplug_work); @@ -4066,7 +4190,6 @@ restart_ih: if (queue_thermal && rdev->pm.dpm_enabled) schedule_work(&rdev->pm.dpm.thermal.work); rdev->ih.rptr = rptr; - WREG32(IH_RB_RPTR, rdev->ih.rptr); atomic_set(&rdev->ih.lock, 0); /* make sure wptr hasn't changed while processing */ |