aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
diff options
context:
space:
mode:
authorDave Airlie <airlied@redhat.com>2017-12-07 06:28:22 +1000
committerDave Airlie <airlied@redhat.com>2017-12-07 06:28:22 +1000
commit9c606cd4117a3c45e04a6616b1a0dbeb18eeee62 (patch)
treeaa6c1db29e1a3f687c81fa03aecd24992a76e993 /drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
parentc5dd52f653fa74f8f4771425c6db33609ad21258 (diff)
parent3997eea57caf542e9327df9b6bb2882a57c4c421 (diff)
Merge branch 'drm-next-4.16' of git://people.freedesktop.org/~agd5f/linux into drm-next
First feature request for 4.16. Highlights: - RV and Vega header cleanups - TTM operation context support - 48 bit GPUVM fixes for Vega/RV - More smatch fixes - ECC support for vega10 - Resizeable BAR support - Multi-display sync support in DC - SR-IOV fixes - Various scheduler improvements - GPU reset fixes and vram lost tracking - Clean up DC/powerplay interfaces - DCN display fixes - Various DC fixes * 'drm-next-4.16' of git://people.freedesktop.org/~agd5f/linux: (291 commits) drm/radeon: Use drm_fb_helper_lastclose() and _poll_changed() drm/amdgpu: Use drm_fb_helper_lastclose() and _poll_changed() drm/amd/display: Use drm_fb_helper_poll_changed() drm/ttm: swap consecutive allocated pooled pages v4 drm/amdgpu: fix amdgpu_sync_resv v2 drm/ttm: swap consecutive allocated cached pages v3 drm/amd/amdgpu: set gtt size according to system memory size only drm/amdgpu: Get rid of dep_sync as a seperate object. drm/amdgpu: allow specifying vm_block_size for multi level PDs v2 drm/amdgpu: move validation of the VM size into the VM code drm/amdgpu: allow non pot VM size values drm/amdgpu: choose number of VM levels based on VM size drm/amdgpu: unify VM size handling of Vega10 with older generation drm/amdgpu: fix amdgpu_vm_num_entries drm/amdgpu: fix VM PD addr shift drm/amdgpu: correct vce4.0 fw config for SRIOV (V2) drm/amd/display: Don't call dm_log_to_buffer directly in dc_conn_log drm/amd/display: Add dm_logger_append_va API drm/ttm: Use a static string instead of an array of char * drm/amd/display: remove usage of legacy_cursor_update ...
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c174
1 files changed, 74 insertions, 100 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index ad5bf86ee8a3..952e0bf3bc84 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -110,7 +110,7 @@ static int amdgpu_ttm_global_init(struct amdgpu_device *adev)
ring = adev->mman.buffer_funcs_ring;
rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_KERNEL];
r = amd_sched_entity_init(&ring->sched, &adev->mman.entity,
- rq, amdgpu_sched_jobs);
+ rq, amdgpu_sched_jobs, NULL);
if (r) {
DRM_ERROR("Failed setting up TTM BO move run queue.\n");
goto error_entity;
@@ -282,8 +282,7 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
{
uint64_t addr = 0;
- if (mem->mem_type != TTM_PL_TT ||
- amdgpu_gtt_mgr_is_allocated(mem)) {
+ if (mem->mem_type != TTM_PL_TT || amdgpu_gtt_mgr_has_gart_addr(mem)) {
addr = mm_node->start << PAGE_SHIFT;
addr += bo->bdev->man[mem->mem_type].gpu_offset;
}
@@ -369,7 +368,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
* dst to window 1
*/
if (src->mem->mem_type == TTM_PL_TT &&
- !amdgpu_gtt_mgr_is_allocated(src->mem)) {
+ !amdgpu_gtt_mgr_has_gart_addr(src->mem)) {
r = amdgpu_map_buffer(src->bo, src->mem,
PFN_UP(cur_size + src_page_offset),
src_node_start, 0, ring,
@@ -383,7 +382,7 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
}
if (dst->mem->mem_type == TTM_PL_TT &&
- !amdgpu_gtt_mgr_is_allocated(dst->mem)) {
+ !amdgpu_gtt_mgr_has_gart_addr(dst->mem)) {
r = amdgpu_map_buffer(dst->bo, dst->mem,
PFN_UP(cur_size + dst_page_offset),
dst_node_start, 1, ring,
@@ -467,9 +466,8 @@ error:
return r;
}
-static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
- bool evict, bool interruptible,
- bool no_wait_gpu,
+static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
+ struct ttm_operation_ctx *ctx,
struct ttm_mem_reg *new_mem)
{
struct amdgpu_device *adev;
@@ -489,8 +487,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
placements.fpfn = 0;
placements.lpfn = 0;
placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
- r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
- interruptible, no_wait_gpu);
+ r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
if (unlikely(r)) {
return r;
}
@@ -504,19 +501,18 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo,
if (unlikely(r)) {
goto out_cleanup;
}
- r = amdgpu_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem);
+ r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, &tmp_mem, old_mem);
if (unlikely(r)) {
goto out_cleanup;
}
- r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, new_mem);
+ r = ttm_bo_move_ttm(bo, ctx->interruptible, ctx->no_wait_gpu, new_mem);
out_cleanup:
ttm_bo_mem_put(bo, &tmp_mem);
return r;
}
-static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo,
- bool evict, bool interruptible,
- bool no_wait_gpu,
+static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
+ struct ttm_operation_ctx *ctx,
struct ttm_mem_reg *new_mem)
{
struct amdgpu_device *adev;
@@ -536,16 +532,15 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo,
placements.fpfn = 0;
placements.lpfn = 0;
placements.flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
- r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
- interruptible, no_wait_gpu);
+ r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx);
if (unlikely(r)) {
return r;
}
- r = ttm_bo_move_ttm(bo, interruptible, no_wait_gpu, &tmp_mem);
+ r = ttm_bo_move_ttm(bo, ctx->interruptible, ctx->no_wait_gpu, &tmp_mem);
if (unlikely(r)) {
goto out_cleanup;
}
- r = amdgpu_move_blit(bo, true, no_wait_gpu, new_mem, old_mem);
+ r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, new_mem, old_mem);
if (unlikely(r)) {
goto out_cleanup;
}
@@ -554,10 +549,9 @@ out_cleanup:
return r;
}
-static int amdgpu_bo_move(struct ttm_buffer_object *bo,
- bool evict, bool interruptible,
- bool no_wait_gpu,
- struct ttm_mem_reg *new_mem)
+static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
+ struct ttm_operation_ctx *ctx,
+ struct ttm_mem_reg *new_mem)
{
struct amdgpu_device *adev;
struct amdgpu_bo *abo;
@@ -592,19 +586,19 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo,
if (old_mem->mem_type == TTM_PL_VRAM &&
new_mem->mem_type == TTM_PL_SYSTEM) {
- r = amdgpu_move_vram_ram(bo, evict, interruptible,
- no_wait_gpu, new_mem);
+ r = amdgpu_move_vram_ram(bo, evict, ctx, new_mem);
} else if (old_mem->mem_type == TTM_PL_SYSTEM &&
new_mem->mem_type == TTM_PL_VRAM) {
- r = amdgpu_move_ram_vram(bo, evict, interruptible,
- no_wait_gpu, new_mem);
+ r = amdgpu_move_ram_vram(bo, evict, ctx, new_mem);
} else {
- r = amdgpu_move_blit(bo, evict, no_wait_gpu, new_mem, old_mem);
+ r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu,
+ new_mem, old_mem);
}
if (r) {
memcpy:
- r = ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem);
+ r = ttm_bo_move_memcpy(bo, ctx->interruptible,
+ ctx->no_wait_gpu, new_mem);
if (r) {
return r;
}
@@ -690,7 +684,6 @@ struct amdgpu_ttm_tt {
struct list_head guptasks;
atomic_t mmu_invalidations;
uint32_t last_set_pages;
- struct list_head list;
};
int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
@@ -861,44 +854,35 @@ static int amdgpu_ttm_backend_bind(struct ttm_tt *ttm,
bo_mem->mem_type == AMDGPU_PL_OA)
return -EINVAL;
- if (!amdgpu_gtt_mgr_is_allocated(bo_mem))
+ if (!amdgpu_gtt_mgr_has_gart_addr(bo_mem)) {
+ gtt->offset = AMDGPU_BO_INVALID_OFFSET;
return 0;
+ }
- spin_lock(&gtt->adev->gtt_list_lock);
flags = amdgpu_ttm_tt_pte_flags(gtt->adev, ttm, bo_mem);
gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
r = amdgpu_gart_bind(gtt->adev, gtt->offset, ttm->num_pages,
ttm->pages, gtt->ttm.dma_address, flags);
- if (r) {
+ if (r)
DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
ttm->num_pages, gtt->offset);
- goto error_gart_bind;
- }
-
- list_add_tail(&gtt->list, &gtt->adev->gtt_list);
-error_gart_bind:
- spin_unlock(&gtt->adev->gtt_list_lock);
return r;
}
-bool amdgpu_ttm_is_bound(struct ttm_tt *ttm)
-{
- struct amdgpu_ttm_tt *gtt = (void *)ttm;
-
- return gtt && !list_empty(&gtt->list);
-}
-
-int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
+int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
- struct ttm_tt *ttm = bo->ttm;
+ struct ttm_operation_ctx ctx = { false, false };
+ struct amdgpu_ttm_tt *gtt = (void*)bo->ttm;
struct ttm_mem_reg tmp;
struct ttm_placement placement;
struct ttm_place placements;
+ uint64_t flags;
int r;
- if (!ttm || amdgpu_ttm_is_bound(ttm))
+ if (bo->mem.mem_type != TTM_PL_TT ||
+ amdgpu_gtt_mgr_has_gart_addr(&bo->mem))
return 0;
tmp = bo->mem;
@@ -912,43 +896,44 @@ int amdgpu_ttm_bind(struct ttm_buffer_object *bo, struct ttm_mem_reg *bo_mem)
placements.flags = (bo->mem.placement & ~TTM_PL_MASK_MEM) |
TTM_PL_FLAG_TT;
- r = ttm_bo_mem_space(bo, &placement, &tmp, true, false);
+ r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
if (unlikely(r))
return r;
- r = ttm_bo_move_ttm(bo, true, false, &tmp);
- if (unlikely(r))
+ flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
+ gtt->offset = (u64)tmp.start << PAGE_SHIFT;
+ r = amdgpu_gart_bind(adev, gtt->offset, bo->ttm->num_pages,
+ bo->ttm->pages, gtt->ttm.dma_address, flags);
+ if (unlikely(r)) {
ttm_bo_mem_put(bo, &tmp);
- else
- bo->offset = (bo->mem.start << PAGE_SHIFT) +
- bo->bdev->man[bo->mem.mem_type].gpu_offset;
+ return r;
+ }
- return r;
+ ttm_bo_mem_put(bo, &bo->mem);
+ bo->mem = tmp;
+ bo->offset = (bo->mem.start << PAGE_SHIFT) +
+ bo->bdev->man[bo->mem.mem_type].gpu_offset;
+
+ return 0;
}
-int amdgpu_ttm_recover_gart(struct amdgpu_device *adev)
+int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
{
- struct amdgpu_ttm_tt *gtt, *tmp;
- struct ttm_mem_reg bo_mem;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
+ struct amdgpu_ttm_tt *gtt = (void *)tbo->ttm;
uint64_t flags;
int r;
- bo_mem.mem_type = TTM_PL_TT;
- spin_lock(&adev->gtt_list_lock);
- list_for_each_entry_safe(gtt, tmp, &adev->gtt_list, list) {
- flags = amdgpu_ttm_tt_pte_flags(gtt->adev, &gtt->ttm.ttm, &bo_mem);
- r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
- gtt->ttm.ttm.pages, gtt->ttm.dma_address,
- flags);
- if (r) {
- spin_unlock(&adev->gtt_list_lock);
- DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
- gtt->ttm.ttm.num_pages, gtt->offset);
- return r;
- }
- }
- spin_unlock(&adev->gtt_list_lock);
- return 0;
+ if (!gtt)
+ return 0;
+
+ flags = amdgpu_ttm_tt_pte_flags(adev, &gtt->ttm.ttm, &tbo->mem);
+ r = amdgpu_gart_bind(adev, gtt->offset, gtt->ttm.ttm.num_pages,
+ gtt->ttm.ttm.pages, gtt->ttm.dma_address, flags);
+ if (r)
+ DRM_ERROR("failed to bind %lu pages at 0x%08llX\n",
+ gtt->ttm.ttm.num_pages, gtt->offset);
+ return r;
}
static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
@@ -959,20 +944,14 @@ static int amdgpu_ttm_backend_unbind(struct ttm_tt *ttm)
if (gtt->userptr)
amdgpu_ttm_tt_unpin_userptr(ttm);
- if (!amdgpu_ttm_is_bound(ttm))
+ if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
return 0;
/* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */
- spin_lock(&gtt->adev->gtt_list_lock);
r = amdgpu_gart_unbind(gtt->adev, gtt->offset, ttm->num_pages);
- if (r) {
+ if (r)
DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n",
gtt->ttm.ttm.num_pages, gtt->offset);
- goto error_unbind;
- }
- list_del_init(&gtt->list);
-error_unbind:
- spin_unlock(&gtt->adev->gtt_list_lock);
return r;
}
@@ -1009,7 +988,6 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_bo_device *bdev,
kfree(gtt);
return NULL;
}
- INIT_LIST_HEAD(&gtt->list);
return &gtt->ttm.ttm;
}
@@ -1348,10 +1326,13 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
(unsigned) (adev->mc.real_vram_size / (1024 * 1024)));
- if (amdgpu_gtt_size == -1)
- gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20),
- adev->mc.mc_vram_size);
- else
+ if (amdgpu_gtt_size == -1) {
+ struct sysinfo si;
+
+ si_meminfo(&si);
+ gtt_size = max(AMDGPU_DEFAULT_GTT_SIZE_MB << 20,
+ (uint64_t)si.totalram * si.mem_unit * 3/4);
+ } else
gtt_size = (uint64_t)amdgpu_gtt_size << 20;
r = ttm_bo_init_mm(&adev->mman.bdev, TTM_PL_TT, gtt_size >> PAGE_SHIFT);
if (r) {
@@ -1410,19 +1391,13 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
void amdgpu_ttm_fini(struct amdgpu_device *adev)
{
- int r;
-
if (!adev->mman.initialized)
return;
+
amdgpu_ttm_debugfs_fini(adev);
- if (adev->stolen_vga_memory) {
- r = amdgpu_bo_reserve(adev->stolen_vga_memory, true);
- if (r == 0) {
- amdgpu_bo_unpin(adev->stolen_vga_memory);
- amdgpu_bo_unreserve(adev->stolen_vga_memory);
- }
- amdgpu_bo_unref(&adev->stolen_vga_memory);
- }
+ amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
+ amdgpu_fw_reserve_vram_fini(adev);
+
ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_VRAM);
ttm_bo_clean_mm(&adev->mman.bdev, TTM_PL_TT);
if (adev->gds.mem.total_size)
@@ -1432,7 +1407,6 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
if (adev->gds.oa.total_size)
ttm_bo_clean_mm(&adev->mman.bdev, AMDGPU_PL_OA);
ttm_bo_device_release(&adev->mman.bdev);
- amdgpu_gart_fini(adev);
amdgpu_ttm_global_fini(adev);
adev->mman.initialized = false;
DRM_INFO("amdgpu: ttm finalized\n");
@@ -1628,7 +1602,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
}
if (bo->tbo.mem.mem_type == TTM_PL_TT) {
- r = amdgpu_ttm_bind(&bo->tbo, &bo->tbo.mem);
+ r = amdgpu_ttm_alloc_gart(&bo->tbo);
if (r)
return r;
}