From 313bbdee84542437672824edbf03ba43ea07de04 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Wed, 23 Sep 2020 13:04:45 +1000 Subject: drm/radeon: kill radeon_bo_wait MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit this is unused Reviewed-by: Christian König Signed-off-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20200923030454.362731-2-airlied@gmail.com --- drivers/gpu/drm/radeon/radeon_object.c | 15 --------------- drivers/gpu/drm/radeon/radeon_object.h | 3 --- 2 files changed, 18 deletions(-) (limited to 'drivers/gpu/drm/radeon') diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 316e35d3f8a9..76dfed30d61c 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -844,21 +844,6 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) return 0; } -int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, bool no_wait) -{ - int r; - - r = ttm_bo_reserve(&bo->tbo, true, no_wait, NULL); - if (unlikely(r != 0)) - return r; - if (mem_type) - *mem_type = bo->tbo.mem.mem_type; - - r = ttm_bo_wait(&bo->tbo, true, no_wait); - ttm_bo_unreserve(&bo->tbo); - return r; -} - /** * radeon_bo_fence - add fence to buffer object * diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index 44b47241ee42..430574caaf09 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h @@ -133,9 +133,6 @@ static inline u64 radeon_bo_mmap_offset(struct radeon_bo *bo) return drm_vma_node_offset_addr(&bo->tbo.base.vma_node); } -extern int radeon_bo_wait(struct radeon_bo *bo, u32 *mem_type, - bool no_wait); - extern int radeon_bo_create(struct radeon_device *rdev, unsigned long size, int byte_align, bool kernel, u32 domain, u32 flags, -- cgit From b1ec2924aa5a284aee1b829a15864378e88f423a Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Wed, 23 Sep 2020 13:04:47 +1000 Subject: drm/radeon: cleanup ttm operation ctx usage. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Just pass it around move, and remove unused pieces Reviewed-by: Christian König Signed-off-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20200923030454.362731-4-airlied@gmail.com --- drivers/gpu/drm/radeon/radeon_ttm.c | 34 +++++++++++++++------------------- 1 file changed, 15 insertions(+), 19 deletions(-) (limited to 'drivers/gpu/drm/radeon') diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 36150b7f31a9..f3d424b3148e 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -151,7 +151,7 @@ static int radeon_verify_access(struct ttm_buffer_object *bo, struct file *filp) } static int radeon_move_blit(struct ttm_buffer_object *bo, - bool evict, bool no_wait_gpu, + bool evict, struct ttm_resource *new_mem, struct ttm_resource *old_mem) { @@ -206,11 +206,10 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, } static int radeon_move_vram_ram(struct ttm_buffer_object *bo, - bool evict, bool interruptible, - bool no_wait_gpu, + bool evict, + struct ttm_operation_ctx *ctx, struct ttm_resource *new_mem) { - struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu }; struct ttm_resource *old_mem = &bo->mem; struct ttm_resource tmp_mem; struct ttm_place placements; @@ -227,7 +226,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, placements.lpfn = 0; placements.mem_type = TTM_PL_TT; placements.flags = TTM_PL_MASK_CACHING; - r = ttm_bo_mem_space(bo, &placement, &tmp_mem, &ctx); + r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx); if (unlikely(r)) { return r; } @@ -237,7 +236,7 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, goto out_cleanup; } - r = ttm_tt_populate(bo->bdev, bo->ttm, &ctx); + r = ttm_tt_populate(bo->bdev, bo->ttm, ctx); if (unlikely(r)) { goto out_cleanup; } @@ -246,22 +245,21 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, if (unlikely(r)) { goto out_cleanup; } - r = radeon_move_blit(bo, true, no_wait_gpu, &tmp_mem, old_mem); + r = radeon_move_blit(bo, true, &tmp_mem, old_mem); if (unlikely(r)) { goto out_cleanup; } - r = ttm_bo_move_ttm(bo, &ctx, new_mem); + r = ttm_bo_move_ttm(bo, ctx, new_mem); out_cleanup: ttm_resource_free(bo, &tmp_mem); return r; } static int radeon_move_ram_vram(struct ttm_buffer_object *bo, - bool evict, bool interruptible, - bool no_wait_gpu, + bool evict, + struct ttm_operation_ctx *ctx, struct ttm_resource *new_mem) { - struct ttm_operation_ctx ctx = { interruptible, no_wait_gpu }; struct ttm_resource *old_mem = &bo->mem; struct ttm_resource tmp_mem; struct ttm_placement placement; @@ -278,15 +276,15 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo, placements.lpfn = 0; placements.mem_type = TTM_PL_TT; placements.flags = TTM_PL_MASK_CACHING; - r = ttm_bo_mem_space(bo, &placement, &tmp_mem, &ctx); + r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx); if (unlikely(r)) { return r; } - r = ttm_bo_move_ttm(bo, &ctx, &tmp_mem); + r = ttm_bo_move_ttm(bo, ctx, &tmp_mem); if (unlikely(r)) { goto out_cleanup; } - r = radeon_move_blit(bo, true, no_wait_gpu, new_mem, old_mem); + r = radeon_move_blit(bo, true, new_mem, old_mem); if (unlikely(r)) { goto out_cleanup; } @@ -334,14 +332,12 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict, if (old_mem->mem_type == TTM_PL_VRAM && new_mem->mem_type == TTM_PL_SYSTEM) { - r = radeon_move_vram_ram(bo, evict, ctx->interruptible, - ctx->no_wait_gpu, new_mem); + r = radeon_move_vram_ram(bo, evict, ctx, new_mem); } else if (old_mem->mem_type == TTM_PL_SYSTEM && new_mem->mem_type == TTM_PL_VRAM) { - r = radeon_move_ram_vram(bo, evict, ctx->interruptible, - ctx->no_wait_gpu, new_mem); + r = radeon_move_ram_vram(bo, evict, ctx, new_mem); } else { - r = radeon_move_blit(bo, evict, ctx->no_wait_gpu, + r = radeon_move_blit(bo, evict, new_mem, old_mem); } -- cgit From 0ef1ed813e6b13d29331088070c7f554b350a266 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Wed, 23 Sep 2020 13:04:49 +1000 Subject: drm/ttm: add bo wait that takes a ctx wrapper. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit I'm thinking of pushing the wait into the drivers. Reviewed-by: Christian König Signed-off-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20200923030454.362731-6-airlied@gmail.com --- drivers/gpu/drm/nouveau/nouveau_bo.c | 4 ++-- drivers/gpu/drm/qxl/qxl_ttm.c | 2 +- drivers/gpu/drm/radeon/radeon_ttm.c | 2 +- drivers/gpu/drm/ttm/ttm_bo_util.c | 4 ++-- include/drm/ttm/ttm_bo_api.h | 5 +++++ 5 files changed, 11 insertions(+), 6 deletions(-) (limited to 'drivers/gpu/drm/radeon') diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 0cbf5fb764ed..a74bf1e8fdbe 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1062,7 +1062,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, struct nouveau_drm_tile *new_tile = NULL; int ret = 0; - ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu); + ret = ttm_bo_wait_ctx(bo, ctx); if (ret) return ret; @@ -1097,7 +1097,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, } /* Fallback to software copy. */ - ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu); + ret = ttm_bo_wait_ctx(bo, ctx); if (ret == 0) ret = ttm_bo_move_memcpy(bo, ctx, new_reg); diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index fd691fff8394..320c202f163c 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -160,7 +160,7 @@ static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict, struct ttm_resource *old_mem = &bo->mem; int ret; - ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu); + ret = ttm_bo_wait_ctx(bo, ctx); if (ret) return ret; diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index f3d424b3148e..6869770e2eef 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -302,7 +302,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict, struct ttm_resource *old_mem = &bo->mem; int r; - r = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu); + r = ttm_bo_wait_ctx(bo, ctx); if (r) return r; diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index fb2a25f8408f..c90133d8a612 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -59,7 +59,7 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo, int ret; if (old_mem->mem_type != TTM_PL_SYSTEM) { - ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu); + ret = ttm_bo_wait_ctx(bo, ctx); if (unlikely(ret != 0)) { if (ret != -ERESTARTSYS) @@ -231,7 +231,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, unsigned long add = 0; int dir; - ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu); + ret = ttm_bo_wait_ctx(bo, ctx); if (ret) return ret; diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index 0f7cd21d6d74..6f544b6d70a3 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -261,6 +261,11 @@ ttm_bo_get_unless_zero(struct ttm_buffer_object *bo) */ int ttm_bo_wait(struct ttm_buffer_object *bo, bool interruptible, bool no_wait); +static inline int ttm_bo_wait_ctx(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx) +{ + return ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu); +} + /** * ttm_bo_mem_compat - Check if proposed placement is compatible with a bo * -- cgit From 0b8793f6e7fc097c112f1848aa7dab60b9ede5a7 Mon Sep 17 00:00:00 2001 From: Christian König Date: Mon, 21 Sep 2020 13:18:02 +0200 Subject: drm/radeon: switch over to the new pin interface MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Stop using TTM_PL_FLAG_NO_EVICT. Signed-off-by: Christian König Reviewed-by: Dave Airlie Reviewed-by: Huang Rui Link: https://patchwork.freedesktop.org/patch/391610/?series=81973&rev=1 --- drivers/gpu/drm/radeon/radeon.h | 1 - drivers/gpu/drm/radeon/radeon_display.c | 9 ++------ drivers/gpu/drm/radeon/radeon_object.c | 37 ++++++++------------------------- drivers/gpu/drm/radeon/radeon_object.h | 2 +- drivers/gpu/drm/radeon/radeon_ttm.c | 2 +- 5 files changed, 13 insertions(+), 38 deletions(-) (limited to 'drivers/gpu/drm/radeon') diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h index a6d8de01194a..5d54bccebd4d 100644 --- a/drivers/gpu/drm/radeon/radeon.h +++ b/drivers/gpu/drm/radeon/radeon.h @@ -497,7 +497,6 @@ struct radeon_bo { struct ttm_buffer_object tbo; struct ttm_bo_kmap_obj kmap; u32 flags; - unsigned pin_count; void *kptr; u32 tiling_flags; u32 pitch; diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c index 7b69d6dfe44a..3eacf33bbe48 100644 --- a/drivers/gpu/drm/radeon/radeon_display.c +++ b/drivers/gpu/drm/radeon/radeon_display.c @@ -273,10 +273,7 @@ static void radeon_unpin_work_func(struct work_struct *__work) /* unpin of the old buffer */ r = radeon_bo_reserve(work->old_rbo, false); if (likely(r == 0)) { - r = radeon_bo_unpin(work->old_rbo); - if (unlikely(r != 0)) { - DRM_ERROR("failed to unpin buffer after flip\n"); - } + radeon_bo_unpin(work->old_rbo); radeon_bo_unreserve(work->old_rbo); } else DRM_ERROR("failed to reserve buffer after flip\n"); @@ -607,9 +604,7 @@ pflip_cleanup: DRM_ERROR("failed to reserve new rbo in error path\n"); goto cleanup; } - if (unlikely(radeon_bo_unpin(new_rbo) != 0)) { - DRM_ERROR("failed to unpin new rbo in error path\n"); - } + radeon_bo_unpin(new_rbo); radeon_bo_unreserve(new_rbo); cleanup: diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 76dfed30d61c..689426dd8480 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -334,8 +334,8 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, if (radeon_ttm_tt_has_userptr(bo->rdev, bo->tbo.ttm)) return -EPERM; - if (bo->pin_count) { - bo->pin_count++; + if (bo->tbo.pin_count) { + ttm_bo_pin(&bo->tbo); if (gpu_addr) *gpu_addr = radeon_bo_gpu_offset(bo); @@ -367,13 +367,11 @@ int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, bo->rdev->mc.visible_vram_size >> PAGE_SHIFT; else bo->placements[i].lpfn = max_offset >> PAGE_SHIFT; - - bo->placements[i].flags |= TTM_PL_FLAG_NO_EVICT; } r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); if (likely(r == 0)) { - bo->pin_count = 1; + ttm_bo_pin(&bo->tbo); if (gpu_addr != NULL) *gpu_addr = radeon_bo_gpu_offset(bo); if (domain == RADEON_GEM_DOMAIN_VRAM) @@ -391,32 +389,15 @@ int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr) return radeon_bo_pin_restricted(bo, domain, 0, gpu_addr); } -int radeon_bo_unpin(struct radeon_bo *bo) +void radeon_bo_unpin(struct radeon_bo *bo) { - struct ttm_operation_ctx ctx = { false, false }; - int r, i; - - if (!bo->pin_count) { - dev_warn(bo->rdev->dev, "%p unpin not necessary\n", bo); - return 0; - } - bo->pin_count--; - if (bo->pin_count) - return 0; - for (i = 0; i < bo->placement.num_placement; i++) { - bo->placements[i].lpfn = 0; - bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT; - } - r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); - if (likely(r == 0)) { + ttm_bo_unpin(&bo->tbo); + if (!bo->tbo.pin_count) { if (bo->tbo.mem.mem_type == TTM_PL_VRAM) bo->rdev->vram_pin_size -= radeon_bo_size(bo); else bo->rdev->gart_pin_size -= radeon_bo_size(bo); - } else { - dev_err(bo->rdev->dev, "%p validate failed for unpin\n", bo); } - return r; } int radeon_bo_evict_vram(struct radeon_device *rdev) @@ -549,7 +530,7 @@ int radeon_bo_list_validate(struct radeon_device *rdev, list_for_each_entry(lobj, head, tv.head) { struct radeon_bo *bo = lobj->robj; - if (!bo->pin_count) { + if (!bo->tbo.pin_count) { u32 domain = lobj->preferred_domains; u32 allowed = lobj->allowed_domains; u32 current_domain = @@ -629,7 +610,7 @@ int radeon_bo_get_surface_reg(struct radeon_bo *bo) break; old_object = reg->bo; - if (old_object->pin_count == 0) + if (old_object->tbo.pin_count == 0) steal = i; } @@ -816,7 +797,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) return 0; /* Can't move a pinned BO to visible VRAM */ - if (rbo->pin_count > 0) + if (rbo->tbo.pin_count > 0) return -EINVAL; /* hurrah the memory is not visible ! */ diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index 430574caaf09..27cfb64057fe 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h @@ -146,7 +146,7 @@ extern void radeon_bo_unref(struct radeon_bo **bo); extern int radeon_bo_pin(struct radeon_bo *bo, u32 domain, u64 *gpu_addr); extern int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset, u64 *gpu_addr); -extern int radeon_bo_unpin(struct radeon_bo *bo); +extern void radeon_bo_unpin(struct radeon_bo *bo); extern int radeon_bo_evict_vram(struct radeon_device *rdev); extern void radeon_bo_force_delete(struct radeon_device *rdev); extern int radeon_bo_init(struct radeon_device *rdev); diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 6869770e2eef..ea9ffa6198da 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -308,7 +308,7 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict, /* Can't move a pinned BO */ rbo = container_of(bo, struct radeon_bo, tbo); - if (WARN_ON_ONCE(rbo->pin_count > 0)) + if (WARN_ON_ONCE(rbo->tbo.pin_count > 0)) return -EINVAL; rdev = radeon_get_rdev(bo->bdev); -- cgit From 51e50e542204329e36e248de14f4c7a466d2f677 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Thu, 24 Sep 2020 15:18:04 +1000 Subject: drm/radeon/ttm: handle ttm moves properly MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The core move code currently handles use_tt moves, for radeon this was being handled also in the driver, but not using the same paths. If moving between TT/SYSTEM (all the use_tt paths on radeon) use the core move function. Eventually the core will be flipped over to calling the driver. Reviewed-by: Christian König Signed-off-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20200924051845.397177-5-airlied@gmail.com --- drivers/gpu/drm/radeon/radeon_ttm.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/radeon') diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index ea9ffa6198da..df5cedb2b632 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -316,14 +316,16 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict, ttm_bo_move_null(bo, new_mem); return 0; } - if ((old_mem->mem_type == TTM_PL_TT && - new_mem->mem_type == TTM_PL_SYSTEM) || - (old_mem->mem_type == TTM_PL_SYSTEM && - new_mem->mem_type == TTM_PL_TT)) { - /* bind is enough */ + if (old_mem->mem_type == TTM_PL_SYSTEM && + new_mem->mem_type == TTM_PL_TT) { ttm_bo_move_null(bo, new_mem); return 0; } + + if (old_mem->mem_type == TTM_PL_TT && + new_mem->mem_type == TTM_PL_SYSTEM) + return ttm_bo_move_ttm(bo, ctx, new_mem); + if (!rdev->ring[radeon_copy_ring_index(rdev)].ready || rdev->asic->copy.copy == NULL) { /* use memcpy */ -- cgit From 552f9d60f6cc5bc53007b0d82c1d0696fcf51a33 Mon Sep 17 00:00:00 2001 From: Thomas Zimmermann Date: Wed, 23 Sep 2020 12:21:50 +0200 Subject: drm/radeon: Introduce GEM object functions MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit GEM object functions deprecate several similar callback interfaces in struct drm_driver. This patch replaces the per-driver callbacks with per-instance callbacks in radeon. v2: * move object-function instance to radeon_gem.c (Christian) * set callbacks in radeon_gem_object_create() (Christian) Signed-off-by: Thomas Zimmermann Reviewed-by: Christian König Link: https://patchwork.freedesktop.org/patch/msgid/20200923102159.24084-14-tzimmermann@suse.de --- drivers/gpu/drm/radeon/radeon_drv.c | 23 +---------------------- drivers/gpu/drm/radeon/radeon_gem.c | 31 +++++++++++++++++++++++++++---- 2 files changed, 28 insertions(+), 26 deletions(-) (limited to 'drivers/gpu/drm/radeon') diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c index 4cd30613fa1d..65061c949aee 100644 --- a/drivers/gpu/drm/radeon/radeon_drv.c +++ b/drivers/gpu/drm/radeon/radeon_drv.c @@ -124,13 +124,6 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev); int radeon_driver_irq_postinstall_kms(struct drm_device *dev); void radeon_driver_irq_uninstall_kms(struct drm_device *dev); irqreturn_t radeon_driver_irq_handler_kms(int irq, void *arg); -void radeon_gem_object_free(struct drm_gem_object *obj); -int radeon_gem_object_open(struct drm_gem_object *obj, - struct drm_file *file_priv); -void radeon_gem_object_close(struct drm_gem_object *obj, - struct drm_file *file_priv); -struct dma_buf *radeon_gem_prime_export(struct drm_gem_object *gobj, - int flags); extern int radeon_get_crtc_scanoutpos(struct drm_device *dev, unsigned int crtc, unsigned int flags, int *vpos, int *hpos, ktime_t *stime, ktime_t *etime, @@ -145,14 +138,9 @@ int radeon_mode_dumb_mmap(struct drm_file *filp, int radeon_mode_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); -struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj); struct drm_gem_object *radeon_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *, struct sg_table *sg); -int radeon_gem_prime_pin(struct drm_gem_object *obj); -void radeon_gem_prime_unpin(struct drm_gem_object *obj); -void *radeon_gem_prime_vmap(struct drm_gem_object *obj); -void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); /* atpx handler */ #if defined(CONFIG_VGA_SWITCHEROO) @@ -550,7 +538,7 @@ long radeon_drm_ioctl(struct file *filp, } ret = drm_ioctl(filp, cmd, arg); - + pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); return ret; @@ -609,22 +597,13 @@ static struct drm_driver kms_driver = { .irq_uninstall = radeon_driver_irq_uninstall_kms, .irq_handler = radeon_driver_irq_handler_kms, .ioctls = radeon_ioctls_kms, - .gem_free_object_unlocked = radeon_gem_object_free, - .gem_open_object = radeon_gem_object_open, - .gem_close_object = radeon_gem_object_close, .dumb_create = radeon_mode_dumb_create, .dumb_map_offset = radeon_mode_dumb_mmap, .fops = &radeon_driver_kms_fops, .prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_fd_to_handle = drm_gem_prime_fd_to_handle, - .gem_prime_export = radeon_gem_prime_export, - .gem_prime_pin = radeon_gem_prime_pin, - .gem_prime_unpin = radeon_gem_prime_unpin, - .gem_prime_get_sg_table = radeon_gem_prime_get_sg_table, .gem_prime_import_sg_table = radeon_gem_prime_import_sg_table, - .gem_prime_vmap = radeon_gem_prime_vmap, - .gem_prime_vunmap = radeon_gem_prime_vunmap, .name = DRIVER_NAME, .desc = DRIVER_DESC, diff --git a/drivers/gpu/drm/radeon/radeon_gem.c b/drivers/gpu/drm/radeon/radeon_gem.c index e5c4271e64ed..0ccd7213e41f 100644 --- a/drivers/gpu/drm/radeon/radeon_gem.c +++ b/drivers/gpu/drm/radeon/radeon_gem.c @@ -35,7 +35,17 @@ #include "radeon.h" -void radeon_gem_object_free(struct drm_gem_object *gobj) +struct dma_buf *radeon_gem_prime_export(struct drm_gem_object *gobj, + int flags); +struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj); +int radeon_gem_prime_pin(struct drm_gem_object *obj); +void radeon_gem_prime_unpin(struct drm_gem_object *obj); +void *radeon_gem_prime_vmap(struct drm_gem_object *obj); +void radeon_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); + +static const struct drm_gem_object_funcs radeon_gem_object_funcs; + +static void radeon_gem_object_free(struct drm_gem_object *gobj) { struct radeon_bo *robj = gem_to_radeon_bo(gobj); @@ -85,6 +95,7 @@ retry: return r; } *obj = &robj->tbo.base; + (*obj)->funcs = &radeon_gem_object_funcs; robj->pid = task_pid_nr(current); mutex_lock(&rdev->gem.mutex); @@ -146,7 +157,7 @@ void radeon_gem_fini(struct radeon_device *rdev) * Call from drm_gem_handle_create which appear in both new and open ioctl * case. */ -int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) +static int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_priv) { struct radeon_bo *rbo = gem_to_radeon_bo(obj); struct radeon_device *rdev = rbo->rdev; @@ -176,8 +187,8 @@ int radeon_gem_object_open(struct drm_gem_object *obj, struct drm_file *file_pri return 0; } -void radeon_gem_object_close(struct drm_gem_object *obj, - struct drm_file *file_priv) +static void radeon_gem_object_close(struct drm_gem_object *obj, + struct drm_file *file_priv) { struct radeon_bo *rbo = gem_to_radeon_bo(obj); struct radeon_device *rdev = rbo->rdev; @@ -216,6 +227,18 @@ static int radeon_gem_handle_lockup(struct radeon_device *rdev, int r) return r; } +static const struct drm_gem_object_funcs radeon_gem_object_funcs = { + .free = radeon_gem_object_free, + .open = radeon_gem_object_open, + .close = radeon_gem_object_close, + .export = radeon_gem_prime_export, + .pin = radeon_gem_prime_pin, + .unpin = radeon_gem_prime_unpin, + .get_sg_table = radeon_gem_prime_get_sg_table, + .vmap = radeon_gem_prime_vmap, + .vunmap = radeon_gem_prime_vunmap, +}; + /* * GEM ioctls. */ -- cgit From 8e0310f0ff04ea311190f83cc0d18a656fe65d26 Mon Sep 17 00:00:00 2001 From: Christian König Date: Fri, 25 Sep 2020 14:17:09 +0200 Subject: drm/radeon: stop using TTMs fault callback MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We already implemented the fault handler ourself, just open code what is necessary here. Signed-off-by: Christian König Reviewed-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/392322/ --- drivers/gpu/drm/radeon/radeon_object.c | 22 +++++++++++++--------- drivers/gpu/drm/radeon/radeon_object.h | 2 +- drivers/gpu/drm/radeon/radeon_ttm.c | 29 ++++++++++++++++++++--------- 3 files changed, 34 insertions(+), 19 deletions(-) (limited to 'drivers/gpu/drm/radeon') diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 689426dd8480..8c285eb118f9 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -775,7 +775,7 @@ void radeon_bo_move_notify(struct ttm_buffer_object *bo, radeon_update_memory_usage(rbo, new_mem->mem_type, 1); } -int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) +vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) { struct ttm_operation_ctx ctx = { false, false }; struct radeon_device *rdev; @@ -798,7 +798,7 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) /* Can't move a pinned BO to visible VRAM */ if (rbo->tbo.pin_count > 0) - return -EINVAL; + return VM_FAULT_SIGBUS; /* hurrah the memory is not visible ! */ radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_VRAM); @@ -812,16 +812,20 @@ int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) r = ttm_bo_validate(bo, &rbo->placement, &ctx); if (unlikely(r == -ENOMEM)) { radeon_ttm_placement_from_domain(rbo, RADEON_GEM_DOMAIN_GTT); - return ttm_bo_validate(bo, &rbo->placement, &ctx); - } else if (unlikely(r != 0)) { - return r; + r = ttm_bo_validate(bo, &rbo->placement, &ctx); + } else if (likely(!r)) { + offset = bo->mem.start << PAGE_SHIFT; + /* this should never happen */ + if ((offset + size) > rdev->mc.visible_vram_size) + return VM_FAULT_SIGBUS; } - offset = bo->mem.start << PAGE_SHIFT; - /* this should never happen */ - if ((offset + size) > rdev->mc.visible_vram_size) - return -EINVAL; + if (unlikely(r == -EBUSY || r == -ERESTARTSYS)) + return VM_FAULT_NOPAGE; + else if (unlikely(r)) + return VM_FAULT_SIGBUS; + ttm_bo_move_to_lru_tail_unlocked(bo); return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_object.h b/drivers/gpu/drm/radeon/radeon_object.h index 27cfb64057fe..d606e9a935e3 100644 --- a/drivers/gpu/drm/radeon/radeon_object.h +++ b/drivers/gpu/drm/radeon/radeon_object.h @@ -163,7 +163,7 @@ extern int radeon_bo_check_tiling(struct radeon_bo *bo, bool has_moved, extern void radeon_bo_move_notify(struct ttm_buffer_object *bo, bool evict, struct ttm_resource *new_mem); -extern int radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo); +extern vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo); extern int radeon_bo_get_surface_reg(struct radeon_bo *bo); extern void radeon_bo_fence(struct radeon_bo *bo, struct radeon_fence *fence, bool shared); diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index df5cedb2b632..63e38b05a5bc 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -803,7 +803,6 @@ static struct ttm_bo_driver radeon_bo_driver = { .move = &radeon_bo_move, .verify_access = &radeon_verify_access, .move_notify = &radeon_bo_move_notify, - .fault_reserve_notify = &radeon_bo_fault_reserve_notify, .io_mem_reserve = &radeon_ttm_io_mem_reserve, }; @@ -904,17 +903,29 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size) static vm_fault_t radeon_ttm_fault(struct vm_fault *vmf) { - struct ttm_buffer_object *bo; - struct radeon_device *rdev; + struct ttm_buffer_object *bo = vmf->vma->vm_private_data; + struct radeon_device *rdev = radeon_get_rdev(bo->bdev); vm_fault_t ret; - bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data; - if (bo == NULL) - return VM_FAULT_NOPAGE; - - rdev = radeon_get_rdev(bo->bdev); down_read(&rdev->pm.mclk_lock); - ret = ttm_bo_vm_fault(vmf); + + ret = ttm_bo_vm_reserve(bo, vmf); + if (ret) + goto unlock_mclk; + + ret = radeon_bo_fault_reserve_notify(bo); + if (ret) + goto unlock_resv; + + ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, + TTM_BO_VM_NUM_PREFAULT, 1); + if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) + goto unlock_mclk; + +unlock_resv: + dma_resv_unlock(bo->base.resv); + +unlock_mclk: up_read(&rdev->pm.mclk_lock); return ret; } -- cgit From 4ce032d64c2a30cf5b23c57b3328d5d2dab99a1f Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 1 Oct 2020 15:21:00 +0200 Subject: drm/ttm: nuke ttm_bo_evict_mm and rename mgr function v3 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Make it more clear what the resource manager function does and nuke the wrapper function. v2: nuke the wrapper v3: fix typo in radeon, rebased Signed-off-by: Christian König Reviewed-by: Daniel Vetter (v2) Link: https://patchwork.freedesktop.org/patch/393914/ --- drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c | 5 ++++- drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 6 +++++- drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 2 +- drivers/gpu/drm/nouveau/nouveau_drm.c | 5 ++++- drivers/gpu/drm/nouveau/nouveau_ttm.c | 4 ++-- drivers/gpu/drm/qxl/qxl_object.c | 10 ++++++++-- drivers/gpu/drm/radeon/radeon_object.c | 6 +++++- drivers/gpu/drm/ttm/ttm_bo.c | 18 ------------------ drivers/gpu/drm/ttm/ttm_range_manager.c | 2 +- drivers/gpu/drm/ttm/ttm_resource.c | 10 +++++----- drivers/gpu/drm/vmwgfx/vmwgfx_drv.c | 12 +++++++++--- drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c | 2 +- drivers/gpu/drm/vmwgfx/vmwgfx_thp.c | 2 +- include/drm/ttm/ttm_bo_api.h | 20 -------------------- include/drm/ttm/ttm_resource.h | 4 ++-- 16 files changed, 49 insertions(+), 61 deletions(-) (limited to 'drivers/gpu/drm/radeon') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c index abe0c2729e1c..a5e08dc54e4a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_debugfs.c @@ -1319,6 +1319,7 @@ static int amdgpu_debugfs_evict_gtt(struct seq_file *m, void *data) struct drm_info_node *node = (struct drm_info_node *)m->private; struct drm_device *dev = node->minor->dev; struct amdgpu_device *adev = drm_to_adev(dev); + struct ttm_resource_manager *man; int r; r = pm_runtime_get_sync(dev->dev); @@ -1327,7 +1328,9 @@ static int amdgpu_debugfs_evict_gtt(struct seq_file *m, void *data) return r; } - seq_printf(m, "(%d)\n", ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_TT)); + man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT); + r = ttm_resource_manager_evict_all(&adev->mman.bdev, man); + seq_printf(m, "(%d)\n", r); pm_runtime_mark_last_busy(dev->dev); pm_runtime_put_autosuspend(dev->dev); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index f203e4a6a3f2..1721739def84 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -136,7 +136,7 @@ void amdgpu_gtt_mgr_fini(struct amdgpu_device *adev) ttm_resource_manager_set_used(man, false); - ret = ttm_resource_manager_force_list_clean(&adev->mman.bdev, man); + ret = ttm_resource_manager_evict_all(&adev->mman.bdev, man); if (ret) return; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 80bc7177cd45..8b96e7aaeff1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -1029,6 +1029,8 @@ void amdgpu_bo_unpin(struct amdgpu_bo *bo) */ int amdgpu_bo_evict_vram(struct amdgpu_device *adev) { + struct ttm_resource_manager *man; + /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ #ifndef CONFIG_HIBERNATION if (adev->flags & AMD_IS_APU) { @@ -1036,7 +1038,9 @@ int amdgpu_bo_evict_vram(struct amdgpu_device *adev) return 0; } #endif - return ttm_bo_evict_mm(&adev->mman.bdev, TTM_PL_VRAM); + + man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM); + return ttm_resource_manager_evict_all(&adev->mman.bdev, man); } static const char *amdgpu_vram_names[] = { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 01c1171afbe0..7747be644dd0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -212,7 +212,7 @@ void amdgpu_vram_mgr_fini(struct amdgpu_device *adev) ttm_resource_manager_set_used(man, false); - ret = ttm_resource_manager_force_list_clean(&adev->mman.bdev, man); + ret = ttm_resource_manager_evict_all(&adev->mman.bdev, man); if (ret) return; diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c index 72640bca1617..d141a5f004af 100644 --- a/drivers/gpu/drm/nouveau/nouveau_drm.c +++ b/drivers/gpu/drm/nouveau/nouveau_drm.c @@ -820,6 +820,7 @@ static int nouveau_do_suspend(struct drm_device *dev, bool runtime) { struct nouveau_drm *drm = nouveau_drm(dev); + struct ttm_resource_manager *man; int ret; nouveau_svm_suspend(drm); @@ -836,7 +837,9 @@ nouveau_do_suspend(struct drm_device *dev, bool runtime) } NV_DEBUG(drm, "evicting buffers...\n"); - ttm_bo_evict_mm(&drm->ttm.bdev, TTM_PL_VRAM); + + man = ttm_manager_type(&drm->ttm.bdev, TTM_PL_VRAM); + ttm_resource_manager_evict_all(&drm->ttm.bdev, man); NV_DEBUG(drm, "waiting for kernel channels to go idle...\n"); if (drm->cechan) { diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index edf3bb89a47f..04b95277c73a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -222,7 +222,7 @@ nouveau_ttm_fini_vram(struct nouveau_drm *drm) if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA) { ttm_resource_manager_set_used(man, false); - ttm_resource_manager_force_list_clean(&drm->ttm.bdev, man); + ttm_resource_manager_evict_all(&drm->ttm.bdev, man); ttm_resource_manager_cleanup(man); ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_VRAM, NULL); kfree(man); @@ -267,7 +267,7 @@ nouveau_ttm_fini_gtt(struct nouveau_drm *drm) ttm_range_man_fini(&drm->ttm.bdev, TTM_PL_TT); else { ttm_resource_manager_set_used(man, false); - ttm_resource_manager_force_list_clean(&drm->ttm.bdev, man); + ttm_resource_manager_evict_all(&drm->ttm.bdev, man); ttm_resource_manager_cleanup(man); ttm_set_driver_manager(&drm->ttm.bdev, TTM_PL_TT, NULL); kfree(man); diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c index c8b67e7a3f02..940e99354f49 100644 --- a/drivers/gpu/drm/qxl/qxl_object.c +++ b/drivers/gpu/drm/qxl/qxl_object.c @@ -351,10 +351,16 @@ int qxl_bo_check_id(struct qxl_device *qdev, struct qxl_bo *bo) int qxl_surf_evict(struct qxl_device *qdev) { - return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_PRIV); + struct ttm_resource_manager *man; + + man = ttm_manager_type(&qdev->mman.bdev, TTM_PL_PRIV); + return ttm_resource_manager_evict_all(&qdev->mman.bdev, man); } int qxl_vram_evict(struct qxl_device *qdev) { - return ttm_bo_evict_mm(&qdev->mman.bdev, TTM_PL_VRAM); + struct ttm_resource_manager *man; + + man = ttm_manager_type(&qdev->mman.bdev, TTM_PL_VRAM); + return ttm_resource_manager_evict_all(&qdev->mman.bdev, man); } diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 8c285eb118f9..ad0e6e9ef922 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -402,6 +402,9 @@ void radeon_bo_unpin(struct radeon_bo *bo) int radeon_bo_evict_vram(struct radeon_device *rdev) { + struct ttm_bo_device *bdev = &rdev->mman.bdev; + struct ttm_resource_manager *man; + /* late 2.6.33 fix IGP hibernate - we need pm ops to do this correct */ #ifndef CONFIG_HIBERNATION if (rdev->flags & RADEON_IS_IGP) { @@ -410,7 +413,8 @@ int radeon_bo_evict_vram(struct radeon_device *rdev) return 0; } #endif - return ttm_bo_evict_mm(&rdev->mman.bdev, TTM_PL_VRAM); + man = ttm_manager_type(bdev, TTM_PL_VRAM); + return ttm_resource_manager_evict_all(bdev, man); } void radeon_bo_force_delete(struct radeon_device *rdev) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 88d215de9ae1..6eb151dd840d 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -1249,24 +1249,6 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, } EXPORT_SYMBOL(ttm_bo_dma_acc_size); -int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type) -{ - struct ttm_resource_manager *man = ttm_manager_type(bdev, mem_type); - - if (mem_type == 0 || mem_type >= TTM_NUM_MEM_TYPES) { - pr_err("Illegal memory manager memory type %u\n", mem_type); - return -EINVAL; - } - - if (!man) { - pr_err("Memory type %u has not been initialized\n", mem_type); - return 0; - } - - return ttm_resource_manager_force_list_clean(bdev, man); -} -EXPORT_SYMBOL(ttm_bo_evict_mm); - static void ttm_bo_global_kobj_release(struct kobject *kobj) { struct ttm_bo_global *glob = diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c index 1da0e277c511..ea77919569a2 100644 --- a/drivers/gpu/drm/ttm/ttm_range_manager.c +++ b/drivers/gpu/drm/ttm/ttm_range_manager.c @@ -149,7 +149,7 @@ int ttm_range_man_fini(struct ttm_bo_device *bdev, ttm_resource_manager_set_used(man, false); - ret = ttm_resource_manager_force_list_clean(bdev, man); + ret = ttm_resource_manager_evict_all(bdev, man); if (ret) return ret; diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c index b325b9264203..4ebc043e2867 100644 --- a/drivers/gpu/drm/ttm/ttm_resource.c +++ b/drivers/gpu/drm/ttm/ttm_resource.c @@ -75,16 +75,16 @@ void ttm_resource_manager_init(struct ttm_resource_manager *man, EXPORT_SYMBOL(ttm_resource_manager_init); /* - * ttm_resource_manager_force_list_clean + * ttm_resource_manager_evict_all * * @bdev - device to use * @man - manager to use * - * Force all the objects out of a memory manager until clean. + * Evict all the objects out of a memory manager until it is empty. * Part of memory manager cleanup sequence. */ -int ttm_resource_manager_force_list_clean(struct ttm_bo_device *bdev, - struct ttm_resource_manager *man) +int ttm_resource_manager_evict_all(struct ttm_bo_device *bdev, + struct ttm_resource_manager *man) { struct ttm_operation_ctx ctx = { .interruptible = false, @@ -126,7 +126,7 @@ int ttm_resource_manager_force_list_clean(struct ttm_bo_device *bdev, return 0; } -EXPORT_SYMBOL(ttm_resource_manager_force_list_clean); +EXPORT_SYMBOL(ttm_resource_manager_evict_all); /** * ttm_resource_manager_debug diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c index bdb7a5e96560..10a054d25485 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.c @@ -468,7 +468,10 @@ out_no_query_bo: if (dev_priv->cman) vmw_cmdbuf_remove_pool(dev_priv->cman); if (dev_priv->has_mob) { - (void) ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB); + struct ttm_resource_manager *man; + + man = ttm_manager_type(&dev_priv->bdev, VMW_PL_MOB); + ttm_resource_manager_evict_all(&dev_priv->bdev, man); vmw_otables_takedown(dev_priv); } if (dev_priv->cman) @@ -501,7 +504,10 @@ static void vmw_release_device_early(struct vmw_private *dev_priv) vmw_cmdbuf_remove_pool(dev_priv->cman); if (dev_priv->has_mob) { - ttm_bo_evict_mm(&dev_priv->bdev, VMW_PL_MOB); + struct ttm_resource_manager *man; + + man = ttm_manager_type(&dev_priv->bdev, VMW_PL_MOB); + ttm_resource_manager_evict_all(&dev_priv->bdev, man); vmw_otables_takedown(dev_priv); } } @@ -1257,7 +1263,7 @@ void vmw_svga_disable(struct vmw_private *dev_priv) if (ttm_resource_manager_used(man)) { ttm_resource_manager_set_used(man, false); spin_unlock(&dev_priv->svga_lock); - if (ttm_bo_evict_mm(&dev_priv->bdev, TTM_PL_VRAM)) + if (ttm_resource_manager_evict_all(&dev_priv->bdev, man)) DRM_ERROR("Failed evicting VRAM buffers.\n"); vmw_write(dev_priv, SVGA_REG_ENABLE, SVGA_REG_ENABLE_HIDE | diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c index db64c3a90285..9e8a3e337b96 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c @@ -143,7 +143,7 @@ void vmw_gmrid_man_fini(struct vmw_private *dev_priv, int type) ttm_resource_manager_set_used(man, false); - ttm_resource_manager_force_list_clean(&dev_priv->bdev, man); + ttm_resource_manager_evict_all(&dev_priv->bdev, man); ttm_resource_manager_cleanup(man); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c index 63fe7da4cbf4..328d332a55e2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c @@ -149,7 +149,7 @@ void vmw_thp_fini(struct vmw_private *dev_priv) ttm_resource_manager_set_used(man, false); - ret = ttm_resource_manager_force_list_clean(&dev_priv->bdev, man); + ret = ttm_resource_manager_evict_all(&dev_priv->bdev, man); if (ret) return; spin_lock(&rman->lock); diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h index c96a25d571c8..6f5ced7d83fe 100644 --- a/include/drm/ttm/ttm_bo_api.h +++ b/include/drm/ttm/ttm_bo_api.h @@ -448,26 +448,6 @@ int ttm_bo_init(struct ttm_bo_device *bdev, struct ttm_buffer_object *bo, struct sg_table *sg, struct dma_resv *resv, void (*destroy) (struct ttm_buffer_object *)); -/** - * ttm_bo_evict_mm - * - * @bdev: Pointer to a ttm_bo_device struct. - * @mem_type: The memory type. - * - * Evicts all buffers on the lru list of the memory type. - * This is normally part of a VT switch or an - * out-of-memory-space-due-to-fragmentation handler. - * The caller must make sure that there are no other processes - * currently validating buffers, and can do that by taking the - * struct ttm_bo_device::ttm_lock in write mode. - * - * Returns: - * -EINVAL: Invalid or uninitialized memory type. - * -ERESTARTSYS: The call was interrupted by a signal while waiting to - * evict a buffer. - */ -int ttm_bo_evict_mm(struct ttm_bo_device *bdev, unsigned mem_type); - /** * ttm_kmap_obj_virtual * diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h index 0e172d94a0c1..1b2f56163c6c 100644 --- a/include/drm/ttm/ttm_resource.h +++ b/include/drm/ttm/ttm_resource.h @@ -228,8 +228,8 @@ void ttm_resource_free(struct ttm_buffer_object *bo, struct ttm_resource *res); void ttm_resource_manager_init(struct ttm_resource_manager *man, unsigned long p_size); -int ttm_resource_manager_force_list_clean(struct ttm_bo_device *bdev, - struct ttm_resource_manager *man); +int ttm_resource_manager_evict_all(struct ttm_bo_device *bdev, + struct ttm_resource_manager *man); void ttm_resource_manager_debug(struct ttm_resource_manager *man, struct drm_printer *p); -- cgit From 1b4ea4c5980ff3a64607166298269c30a9671d33 Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 30 Sep 2020 10:38:48 +0200 Subject: drm/ttm: set the tt caching state at creation time MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit All drivers can determine the tt caching state at creation time, no need to do this on the fly during every validation. Signed-off-by: Christian König Reviewed-by: Michael J. Ruhl Link: https://patchwork.freedesktop.org/patch/394253/ --- drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 11 ++++++++-- drivers/gpu/drm/drm_gem_vram_helper.c | 2 +- drivers/gpu/drm/nouveau/nouveau_sgdma.c | 13 +++++++++++- drivers/gpu/drm/qxl/qxl_ttm.c | 2 +- drivers/gpu/drm/radeon/radeon_ttm.c | 16 ++++++++++++-- drivers/gpu/drm/ttm/ttm_agp_backend.c | 2 +- drivers/gpu/drm/ttm/ttm_page_alloc.c | 26 +++++++++++------------ drivers/gpu/drm/ttm/ttm_page_alloc_dma.c | 20 +++++++++--------- drivers/gpu/drm/ttm/ttm_tt.c | 33 +++++++++++++++-------------- drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c | 6 ++++-- include/drm/ttm/ttm_caching.h | 34 ++++++++++++++++++++++++++++++ include/drm/ttm/ttm_tt.h | 16 ++++++-------- 13 files changed, 123 insertions(+), 60 deletions(-) create mode 100644 include/drm/ttm/ttm_caching.h (limited to 'drivers/gpu/drm/radeon') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 213ef090bb0e..3c5ad69eff19 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -124,7 +124,7 @@ uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo) struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct ttm_dma_tt *ttm; - if (bo->num_pages != 1 || bo->ttm->caching_state == tt_cached) + if (bo->num_pages != 1 || bo->ttm->caching == ttm_cached) return AMDGPU_BO_INVALID_OFFSET; ttm = container_of(bo->ttm, struct ttm_dma_tt, ttm); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 399961035ae6..7f41a47e7353 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1292,7 +1292,9 @@ static void amdgpu_ttm_backend_destroy(struct ttm_bo_device *bdev, static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags) { + struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); struct amdgpu_ttm_tt *gtt; + enum ttm_caching caching; gtt = kzalloc(sizeof(struct amdgpu_ttm_tt), GFP_KERNEL); if (gtt == NULL) { @@ -1300,8 +1302,13 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo, } gtt->gobj = &bo->base; + if (abo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) + caching = ttm_write_combined; + else + caching = ttm_cached; + /* allocate space for the uninitialized page entries */ - if (ttm_sg_tt_init(>t->ttm, bo, page_flags)) { + if (ttm_sg_tt_init(>t->ttm, bo, page_flags, caching)) { kfree(gtt); return NULL; } @@ -1525,7 +1532,7 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem) if (mem && mem->mem_type == TTM_PL_TT) { flags |= AMDGPU_PTE_SYSTEM; - if (ttm->caching_state == tt_cached) + if (ttm->caching == ttm_cached) flags |= AMDGPU_PTE_SNOOPED; } diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c index 3213429f8444..ad58d0af5141 100644 --- a/drivers/gpu/drm/drm_gem_vram_helper.c +++ b/drivers/gpu/drm/drm_gem_vram_helper.c @@ -918,7 +918,7 @@ static struct ttm_tt *bo_driver_ttm_tt_create(struct ttm_buffer_object *bo, if (!tt) return NULL; - ret = ttm_tt_init(tt, bo, page_flags); + ret = ttm_tt_init(tt, bo, page_flags, ttm_cached); if (ret < 0) goto err_ttm_tt_init; diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index 806d9ec310f5..cd6fdebae795 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -5,6 +5,7 @@ #include "nouveau_drv.h" #include "nouveau_mem.h" #include "nouveau_ttm.h" +#include "nouveau_bo.h" struct nouveau_sgdma_be { /* this has to be the first field so populate/unpopulated in @@ -67,13 +68,23 @@ nouveau_sgdma_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm) struct ttm_tt * nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo, uint32_t page_flags) { + struct nouveau_drm *drm = nouveau_bdev(bo->bdev); + struct nouveau_bo *nvbo = nouveau_bo(bo); struct nouveau_sgdma_be *nvbe; + enum ttm_caching caching; + + if (nvbo->force_coherent) + caching = ttm_uncached; + else if (drm->agp.bridge) + caching = ttm_write_combined; + else + caching = ttm_cached; nvbe = kzalloc(sizeof(*nvbe), GFP_KERNEL); if (!nvbe) return NULL; - if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags)) { + if (ttm_dma_tt_init(&nvbe->ttm, bo, page_flags, caching)) { kfree(nvbe); return NULL; } diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index 669bceb58205..f50863493f64 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -133,7 +133,7 @@ static struct ttm_tt *qxl_ttm_tt_create(struct ttm_buffer_object *bo, ttm = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL); if (ttm == NULL) return NULL; - if (ttm_tt_init(ttm, bo, page_flags)) { + if (ttm_tt_init(ttm, bo, page_flags, ttm_cached)) { kfree(ttm); return NULL; } diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 63e38b05a5bc..130a7cea35c3 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -546,7 +546,7 @@ static int radeon_ttm_backend_bind(struct ttm_bo_device *bdev, WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", ttm->num_pages, bo_mem, ttm); } - if (ttm->caching_state == tt_cached) + if (ttm->caching == ttm_cached) flags |= RADEON_GART_PAGE_SNOOP; r = radeon_gart_bind(rdev, gtt->offset, ttm->num_pages, ttm->pages, gtt->ttm.dma_address, flags); @@ -590,6 +590,10 @@ static struct ttm_tt *radeon_ttm_tt_create(struct ttm_buffer_object *bo, { struct radeon_device *rdev; struct radeon_ttm_tt *gtt; + enum ttm_caching caching; + struct radeon_bo *rbo; + + rbo = container_of(bo, struct radeon_bo, tbo); rdev = radeon_get_rdev(bo->bdev); #if IS_ENABLED(CONFIG_AGP) @@ -603,7 +607,15 @@ static struct ttm_tt *radeon_ttm_tt_create(struct ttm_buffer_object *bo, if (gtt == NULL) { return NULL; } - if (ttm_dma_tt_init(>t->ttm, bo, page_flags)) { + + if (rbo->flags & RADEON_GEM_GTT_UC) + caching = ttm_uncached; + else if (rbo->flags & RADEON_GEM_GTT_WC) + caching = ttm_write_combined; + else + caching = ttm_cached; + + if (ttm_dma_tt_init(>t->ttm, bo, page_flags, caching)) { kfree(gtt); return NULL; } diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c index a98fd795b752..a723062d37e7 100644 --- a/drivers/gpu/drm/ttm/ttm_agp_backend.c +++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c @@ -136,7 +136,7 @@ struct ttm_tt *ttm_agp_tt_create(struct ttm_buffer_object *bo, agp_be->mem = NULL; agp_be->bridge = bridge; - if (ttm_tt_init(&agp_be->ttm, bo, page_flags)) { + if (ttm_tt_init(&agp_be->ttm, bo, page_flags, ttm_write_combined)) { kfree(agp_be); return NULL; } diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 111031cbb6df..c8f6790962b9 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -220,14 +220,14 @@ static struct ttm_pool_manager *_manager; /** * Select the right pool or requested caching state and ttm flags. */ static struct ttm_page_pool *ttm_get_pool(int flags, bool huge, - enum ttm_caching_state cstate) + enum ttm_caching cstate) { int pool_index; - if (cstate == tt_cached) + if (cstate == ttm_cached) return NULL; - if (cstate == tt_wc) + if (cstate == ttm_write_combined) pool_index = 0x0; else pool_index = 0x1; @@ -441,17 +441,17 @@ static void ttm_pool_mm_shrink_fini(struct ttm_pool_manager *manager) } static int ttm_set_pages_caching(struct page **pages, - enum ttm_caching_state cstate, unsigned cpages) + enum ttm_caching cstate, unsigned cpages) { int r = 0; /* Set page caching */ switch (cstate) { - case tt_uncached: + case ttm_uncached: r = ttm_set_pages_array_uc(pages, cpages); if (r) pr_err("Failed to set %d pages to uc!\n", cpages); break; - case tt_wc: + case ttm_write_combined: r = ttm_set_pages_array_wc(pages, cpages); if (r) pr_err("Failed to set %d pages to wc!\n", cpages); @@ -486,7 +486,7 @@ static void ttm_handle_caching_failure(struct page **failed_pages, * pages returned in pages array. */ static int ttm_alloc_new_pages(struct list_head *pages, gfp_t gfp_flags, - int ttm_flags, enum ttm_caching_state cstate, + int ttm_flags, enum ttm_caching cstate, unsigned count, unsigned order) { struct page **caching_array; @@ -566,7 +566,7 @@ out: * pages is small. */ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, int ttm_flags, - enum ttm_caching_state cstate, + enum ttm_caching cstate, unsigned count, unsigned long *irq_flags) { struct page *p; @@ -626,7 +626,7 @@ static void ttm_page_pool_fill_locked(struct ttm_page_pool *pool, int ttm_flags, static int ttm_page_pool_get_pages(struct ttm_page_pool *pool, struct list_head *pages, int ttm_flags, - enum ttm_caching_state cstate, + enum ttm_caching cstate, unsigned count, unsigned order) { unsigned long irq_flags; @@ -703,7 +703,7 @@ out: /* Put all pages in pages list to correct pool to wait for reuse */ static void ttm_put_pages(struct page **pages, unsigned npages, int flags, - enum ttm_caching_state cstate) + enum ttm_caching cstate) { struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate); #ifdef CONFIG_TRANSPARENT_HUGEPAGE @@ -821,7 +821,7 @@ static void ttm_put_pages(struct page **pages, unsigned npages, int flags, * cached pages. */ static int ttm_get_pages(struct page **pages, unsigned npages, int flags, - enum ttm_caching_state cstate) + enum ttm_caching cstate) { struct ttm_page_pool *pool = ttm_get_pool(flags, false, cstate); #ifdef CONFIG_TRANSPARENT_HUGEPAGE @@ -1040,7 +1040,7 @@ ttm_pool_unpopulate_helper(struct ttm_tt *ttm, unsigned mem_count_update) put_pages: ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags, - ttm->caching_state); + ttm->caching); ttm_tt_set_unpopulated(ttm); } @@ -1057,7 +1057,7 @@ int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) return -ENOMEM; ret = ttm_get_pages(ttm->pages, ttm->num_pages, ttm->page_flags, - ttm->caching_state); + ttm->caching); if (unlikely(ret != 0)) { ttm_pool_unpopulate_helper(ttm, 0); return ret; diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c index 1045a5c26ee3..6625b43f6256 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c @@ -325,15 +325,15 @@ static struct dma_page *__ttm_dma_alloc_page(struct dma_pool *pool) } return d_page; } -static enum pool_type ttm_to_type(int flags, enum ttm_caching_state cstate) +static enum pool_type ttm_to_type(int flags, enum ttm_caching cstate) { enum pool_type type = IS_UNDEFINED; if (flags & TTM_PAGE_FLAG_DMA32) type |= IS_DMA32; - if (cstate == tt_cached) + if (cstate == ttm_cached) type |= IS_CACHED; - else if (cstate == tt_uncached) + else if (cstate == ttm_uncached) type |= IS_UC; else type |= IS_WC; @@ -663,7 +663,7 @@ static struct dma_pool *ttm_dma_find_pool(struct device *dev, * are pages that have changed their caching state already put them to the * pool. */ -static void ttm_dma_handle_caching_state_failure(struct dma_pool *pool, +static void ttm_dma_handle_caching_failure(struct dma_pool *pool, struct list_head *d_pages, struct page **failed_pages, unsigned cpages) @@ -734,7 +734,7 @@ static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool, r = ttm_set_pages_caching(pool, caching_array, cpages); if (r) - ttm_dma_handle_caching_state_failure( + ttm_dma_handle_caching_failure( pool, d_pages, caching_array, cpages); } @@ -760,7 +760,7 @@ static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool, r = ttm_set_pages_caching(pool, caching_array, cpages); if (r) { - ttm_dma_handle_caching_state_failure( + ttm_dma_handle_caching_failure( pool, d_pages, caching_array, cpages); goto out; @@ -773,7 +773,7 @@ static int ttm_dma_pool_alloc_new_pages(struct dma_pool *pool, if (cpages) { r = ttm_set_pages_caching(pool, caching_array, cpages); if (r) - ttm_dma_handle_caching_state_failure(pool, d_pages, + ttm_dma_handle_caching_failure(pool, d_pages, caching_array, cpages); } out: @@ -904,7 +904,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev, INIT_LIST_HEAD(&ttm_dma->pages_list); i = 0; - type = ttm_to_type(ttm->page_flags, ttm->caching_state); + type = ttm_to_type(ttm->page_flags, ttm->caching); #ifdef CONFIG_TRANSPARENT_HUGEPAGE if (ttm->page_flags & TTM_PAGE_FLAG_DMA32) @@ -1000,7 +1000,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev) unsigned count, i, npages = 0; unsigned long irq_flags; - type = ttm_to_type(ttm->page_flags, ttm->caching_state); + type = ttm_to_type(ttm->page_flags, ttm->caching); #ifdef CONFIG_TRANSPARENT_HUGEPAGE pool = ttm_dma_find_pool(dev, type | IS_HUGE); @@ -1032,7 +1032,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev) return; is_cached = (ttm_dma_find_pool(pool->dev, - ttm_to_type(ttm->page_flags, tt_cached)) == pool); + ttm_to_type(ttm->page_flags, ttm_cached)) == pool); /* make sure pages array match list and count number of pages */ count = 0; diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 23e9604bc924..a465f51df027 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -114,31 +114,30 @@ static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm) return 0; } -static int ttm_tt_set_caching(struct ttm_tt *ttm, - enum ttm_caching_state c_state) +static int ttm_tt_set_caching(struct ttm_tt *ttm, enum ttm_caching caching) { - if (ttm->caching_state == c_state) + if (ttm->caching == caching) return 0; /* Can't change the caching state after TT is populated */ if (WARN_ON_ONCE(ttm_tt_is_populated(ttm))) return -EINVAL; - ttm->caching_state = c_state; + ttm->caching = caching; return 0; } int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement) { - enum ttm_caching_state state; + enum ttm_caching state; if (placement & TTM_PL_FLAG_WC) - state = tt_wc; + state = ttm_write_combined; else if (placement & TTM_PL_FLAG_UNCACHED) - state = tt_uncached; + state = ttm_uncached; else - state = tt_cached; + state = ttm_cached; return ttm_tt_set_caching(ttm, state); } @@ -162,20 +161,22 @@ void ttm_tt_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm) static void ttm_tt_init_fields(struct ttm_tt *ttm, struct ttm_buffer_object *bo, - uint32_t page_flags) + uint32_t page_flags, + enum ttm_caching caching) { ttm->num_pages = bo->num_pages; - ttm->caching_state = tt_cached; + ttm->caching = ttm_cached; ttm->page_flags = page_flags; ttm_tt_set_unpopulated(ttm); ttm->swap_storage = NULL; ttm->sg = bo->sg; + ttm->caching = caching; } int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo, - uint32_t page_flags) + uint32_t page_flags, enum ttm_caching caching) { - ttm_tt_init_fields(ttm, bo, page_flags); + ttm_tt_init_fields(ttm, bo, page_flags, caching); if (ttm_tt_alloc_page_directory(ttm)) { pr_err("Failed allocating page table\n"); @@ -193,11 +194,11 @@ void ttm_tt_fini(struct ttm_tt *ttm) EXPORT_SYMBOL(ttm_tt_fini); int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, - uint32_t page_flags) + uint32_t page_flags, enum ttm_caching caching) { struct ttm_tt *ttm = &ttm_dma->ttm; - ttm_tt_init_fields(ttm, bo, page_flags); + ttm_tt_init_fields(ttm, bo, page_flags, caching); INIT_LIST_HEAD(&ttm_dma->pages_list); if (ttm_dma_tt_alloc_page_directory(ttm_dma)) { @@ -209,12 +210,12 @@ int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, EXPORT_SYMBOL(ttm_dma_tt_init); int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, - uint32_t page_flags) + uint32_t page_flags, enum ttm_caching caching) { struct ttm_tt *ttm = &ttm_dma->ttm; int ret; - ttm_tt_init_fields(ttm, bo, page_flags); + ttm_tt_init_fields(ttm, bo, page_flags, caching); INIT_LIST_HEAD(&ttm_dma->pages_list); if (page_flags & TTM_PAGE_FLAG_SG) diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c index 7b5fd5288870..1fa7f9438ec4 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c @@ -647,9 +647,11 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo, vmw_be->mob = NULL; if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) - ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags); + ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags, + ttm_cached); else - ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bo, page_flags); + ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bo, page_flags, + ttm_cached); if (unlikely(ret != 0)) goto out_no_init; diff --git a/include/drm/ttm/ttm_caching.h b/include/drm/ttm/ttm_caching.h new file mode 100644 index 000000000000..161624dcf6be --- /dev/null +++ b/include/drm/ttm/ttm_caching.h @@ -0,0 +1,34 @@ +/* + * Copyright 2020 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Christian König + */ + +#ifndef _TTM_CACHING_H_ +#define _TTM_CACHING_H_ + +enum ttm_caching { + ttm_uncached, + ttm_write_combined, + ttm_cached +}; + +#endif diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h index 5d1835d44084..c39c722d5184 100644 --- a/include/drm/ttm/ttm_tt.h +++ b/include/drm/ttm/ttm_tt.h @@ -28,6 +28,7 @@ #define _TTM_TT_H_ #include +#include struct ttm_tt; struct ttm_resource; @@ -42,12 +43,6 @@ struct ttm_operation_ctx; #define TTM_PAGE_FLAG_PRIV_POPULATED (1 << 31) -enum ttm_caching_state { - tt_uncached, - tt_wc, - tt_cached -}; - /** * struct ttm_tt * @@ -69,7 +64,7 @@ struct ttm_tt { unsigned long num_pages; struct sg_table *sg; /* for SG objects via dma-buf */ struct file *swap_storage; - enum ttm_caching_state caching_state; + enum ttm_caching caching; }; static inline bool ttm_tt_is_populated(struct ttm_tt *tt) @@ -121,6 +116,7 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc); * @ttm: The struct ttm_tt. * @bo: The buffer object we create the ttm for. * @page_flags: Page flags as identified by TTM_PAGE_FLAG_XX flags. + * @caching: the desired caching state of the pages * * Create a struct ttm_tt to back data with system memory pages. * No pages are actually allocated. @@ -128,11 +124,11 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc); * NULL: Out of memory. */ int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo, - uint32_t page_flags); + uint32_t page_flags, enum ttm_caching caching); int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, - uint32_t page_flags); + uint32_t page_flags, enum ttm_caching caching); int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, - uint32_t page_flags); + uint32_t page_flags, enum ttm_caching caching); /** * ttm_tt_fini -- cgit From 1cf65c45183a6c8b4703675d40e709f7ffed935c Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 30 Sep 2020 11:17:44 +0200 Subject: drm/ttm: add caching state to ttm_bus_placement MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit And implement setting it up correctly in the drivers. This allows getting rid of the placement flags for this. Signed-off-by: Christian König Reviewed-by: Michael J. Ruhl Link: https://patchwork.freedesktop.org/patch/394254/ --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 1 + drivers/gpu/drm/drm_gem_vram_helper.c | 1 + drivers/gpu/drm/nouveau/nouveau_bo.c | 11 +++++++++++ drivers/gpu/drm/qxl/qxl_ttm.c | 2 ++ drivers/gpu/drm/radeon/radeon_ttm.c | 2 ++ drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c | 1 + include/drm/ttm/ttm_resource.h | 8 +++++--- 7 files changed, 23 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/radeon') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 7f41a47e7353..5b56a66063fd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -769,6 +769,7 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_reso mem->bus.offset += adev->gmc.aper_base; mem->bus.is_iomem = true; + mem->bus.caching = ttm_write_combined; break; default: return -EINVAL; diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c index ad58d0af5141..b9e7ce1adf25 100644 --- a/drivers/gpu/drm/drm_gem_vram_helper.c +++ b/drivers/gpu/drm/drm_gem_vram_helper.c @@ -981,6 +981,7 @@ static int bo_driver_io_mem_reserve(struct ttm_bo_device *bdev, case TTM_PL_VRAM: mem->bus.offset = (mem->start << PAGE_SHIFT) + vmm->vram_base; mem->bus.is_iomem = true; + mem->bus.caching = ttm_write_combined; break; default: return -EINVAL; diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 0c0ca44a6802..cb878c0e8276 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1134,6 +1134,8 @@ nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *reg) struct nouveau_drm *drm = nouveau_bdev(bdev); struct nvkm_device *device = nvxx_device(&drm->client.device); struct nouveau_mem *mem = nouveau_mem(reg); + struct nvif_mmu *mmu = &drm->client.mmu; + const u8 type = mmu->type[drm->ttm.type_vram].type; int ret; mutex_lock(&drm->ttm.io_reserve_mutex); @@ -1149,6 +1151,7 @@ retry: reg->bus.offset = (reg->start << PAGE_SHIFT) + drm->agp.base; reg->bus.is_iomem = !drm->agp.cma; + reg->bus.caching = ttm_write_combined; } #endif if (drm->client.mem->oclass < NVIF_CLASS_MEM_NV50 || @@ -1162,6 +1165,14 @@ retry: reg->bus.offset = (reg->start << PAGE_SHIFT) + device->func->resource_addr(device, 1); reg->bus.is_iomem = true; + + /* Some BARs do not support being ioremapped WC */ + if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA && + type & NVIF_MEM_UNCACHED) + reg->bus.caching = ttm_uncached; + else + reg->bus.caching = ttm_write_combined; + if (drm->client.mem->oclass >= NVIF_CLASS_MEM_NV50) { union { struct nv50_mem_map_v0 nv50; diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index f50863493f64..61eb06dbbce8 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -83,11 +83,13 @@ int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev, case TTM_PL_VRAM: mem->bus.is_iomem = true; mem->bus.offset = (mem->start << PAGE_SHIFT) + qdev->vram_base; + mem->bus.caching = ttm_cached; break; case TTM_PL_PRIV: mem->bus.is_iomem = true; mem->bus.offset = (mem->start << PAGE_SHIFT) + qdev->surfaceram_base; + mem->bus.caching = ttm_cached; break; default: return -EINVAL; diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 130a7cea35c3..9b53a1d80632 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -372,6 +372,7 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_reso mem->bus.offset = (mem->start << PAGE_SHIFT) + rdev->mc.agp_base; mem->bus.is_iomem = !rdev->ddev->agp->cant_use_aperture; + mem->bus.caching = ttm_write_combined; } #endif break; @@ -382,6 +383,7 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_reso return -EINVAL; mem->bus.offset += rdev->mc.aper_base; mem->bus.is_iomem = true; + mem->bus.caching = ttm_write_combined; #ifdef __alpha__ /* * Alpha: use bus.addr to hold the ioremap() return, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c index 1fa7f9438ec4..fae88969a15a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c @@ -688,6 +688,7 @@ static int vmw_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resourc mem->bus.offset = (mem->start << PAGE_SHIFT) + dev_priv->vram_start; mem->bus.is_iomem = true; + mem->bus.caching = ttm_cached; break; default: return -EINVAL; diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h index 1b2f56163c6c..f48a70d39ac5 100644 --- a/include/drm/ttm/ttm_resource.h +++ b/include/drm/ttm/ttm_resource.h @@ -29,6 +29,7 @@ #include #include #include +#include #define TTM_MAX_BO_PRIORITY 4U @@ -148,9 +149,10 @@ struct ttm_resource_manager { * Structure indicating the bus placement of an object. */ struct ttm_bus_placement { - void *addr; - phys_addr_t offset; - bool is_iomem; + void *addr; + phys_addr_t offset; + bool is_iomem; + enum ttm_caching caching; }; /** -- cgit From ce65b874001d756294e0b7cf06c51137af964f38 Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 30 Sep 2020 16:44:16 +0200 Subject: drm/ttm: nuke caching placement flags MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Changing the caching on the fly never really worked flawlessly. So stop this completely and just let drivers specific the desired caching in the tt or bus object. Signed-off-by: Christian König Reviewed-by: Michael J. Ruhl Link: https://patchwork.freedesktop.org/patch/394256/ --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 20 ++++--------- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 12 ++------ drivers/gpu/drm/drm_gem_vram_helper.c | 7 ++--- drivers/gpu/drm/nouveau/nouveau_bo.c | 36 ++++++----------------- drivers/gpu/drm/qxl/qxl_object.c | 10 +++---- drivers/gpu/drm/qxl/qxl_ttm.c | 2 +- drivers/gpu/drm/radeon/radeon_object.c | 46 ++++++------------------------ drivers/gpu/drm/radeon/radeon_ttm.c | 18 +++--------- drivers/gpu/drm/ttm/ttm_agp_backend.c | 2 +- drivers/gpu/drm/ttm/ttm_bo.c | 44 +++------------------------- drivers/gpu/drm/ttm/ttm_bo_util.c | 10 ++----- drivers/gpu/drm/ttm/ttm_tt.c | 29 ------------------- drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c | 30 +++++++++---------- include/drm/ttm/ttm_placement.h | 14 --------- include/drm/ttm/ttm_tt.h | 15 ---------- 15 files changed, 61 insertions(+), 234 deletions(-) (limited to 'drivers/gpu/drm/radeon') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 8b96e7aaeff1..1aa516429c80 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -137,7 +137,7 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) places[c].fpfn = 0; places[c].lpfn = 0; places[c].mem_type = TTM_PL_VRAM; - places[c].flags = TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED; + places[c].flags = 0; if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) places[c].lpfn = visible_pfn; @@ -154,11 +154,6 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) places[c].lpfn = 0; places[c].mem_type = TTM_PL_TT; places[c].flags = 0; - if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) - places[c].flags |= TTM_PL_FLAG_WC | - TTM_PL_FLAG_UNCACHED; - else - places[c].flags |= TTM_PL_FLAG_CACHED; c++; } @@ -167,11 +162,6 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) places[c].lpfn = 0; places[c].mem_type = TTM_PL_SYSTEM; places[c].flags = 0; - if (flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) - places[c].flags |= TTM_PL_FLAG_WC | - TTM_PL_FLAG_UNCACHED; - else - places[c].flags |= TTM_PL_FLAG_CACHED; c++; } @@ -179,7 +169,7 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) places[c].fpfn = 0; places[c].lpfn = 0; places[c].mem_type = AMDGPU_PL_GDS; - places[c].flags = TTM_PL_FLAG_UNCACHED; + places[c].flags = 0; c++; } @@ -187,7 +177,7 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) places[c].fpfn = 0; places[c].lpfn = 0; places[c].mem_type = AMDGPU_PL_GWS; - places[c].flags = TTM_PL_FLAG_UNCACHED; + places[c].flags = 0; c++; } @@ -195,7 +185,7 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) places[c].fpfn = 0; places[c].lpfn = 0; places[c].mem_type = AMDGPU_PL_OA; - places[c].flags = TTM_PL_FLAG_UNCACHED; + places[c].flags = 0; c++; } @@ -203,7 +193,7 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) places[c].fpfn = 0; places[c].lpfn = 0; places[c].mem_type = TTM_PL_SYSTEM; - places[c].flags = TTM_PL_MASK_CACHING; + places[c].flags = 0; c++; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 5b56a66063fd..8cdec58b9106 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -92,7 +92,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, .fpfn = 0, .lpfn = 0, .mem_type = TTM_PL_SYSTEM, - .flags = TTM_PL_MASK_CACHING + .flags = 0 }; /* Don't handle scatter gather BOs */ @@ -538,19 +538,13 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict, placements.fpfn = 0; placements.lpfn = 0; placements.mem_type = TTM_PL_TT; - placements.flags = TTM_PL_MASK_CACHING; + placements.flags = 0; r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx); if (unlikely(r)) { pr_err("Failed to find GTT space for blit from VRAM\n"); return r; } - /* set caching flags */ - r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement); - if (unlikely(r)) { - goto out_cleanup; - } - r = ttm_tt_populate(bo->bdev, bo->ttm, ctx); if (unlikely(r)) goto out_cleanup; @@ -599,7 +593,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict, placements.fpfn = 0; placements.lpfn = 0; placements.mem_type = TTM_PL_TT; - placements.flags = TTM_PL_MASK_CACHING; + placements.flags = 0; r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx); if (unlikely(r)) { pr_err("Failed to find GTT space for blit to VRAM\n"); diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c index b9e7ce1adf25..7aeb5daf2805 100644 --- a/drivers/gpu/drm/drm_gem_vram_helper.c +++ b/drivers/gpu/drm/drm_gem_vram_helper.c @@ -147,15 +147,12 @@ static void drm_gem_vram_placement(struct drm_gem_vram_object *gbo, if (pl_flag & DRM_GEM_VRAM_PL_FLAG_VRAM) { gbo->placements[c].mem_type = TTM_PL_VRAM; - gbo->placements[c++].flags = TTM_PL_FLAG_WC | - TTM_PL_FLAG_UNCACHED | - invariant_flags; + gbo->placements[c++].flags = invariant_flags; } if (pl_flag & DRM_GEM_VRAM_PL_FLAG_SYSTEM || !c) { gbo->placements[c].mem_type = TTM_PL_SYSTEM; - gbo->placements[c++].flags = TTM_PL_MASK_CACHING | - invariant_flags; + gbo->placements[c++].flags = invariant_flags; } gbo->placement.num_placement = c; diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index cb878c0e8276..4ccb3329014b 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -343,37 +343,23 @@ nouveau_bo_new(struct nouveau_cli *cli, u64 size, int align, } static void -set_placement_list(struct nouveau_drm *drm, struct ttm_place *pl, unsigned *n, - uint32_t domain, uint32_t flags) +set_placement_list(struct ttm_place *pl, unsigned *n, uint32_t domain) { *n = 0; if (domain & NOUVEAU_GEM_DOMAIN_VRAM) { - struct nvif_mmu *mmu = &drm->client.mmu; - const u8 type = mmu->type[drm->ttm.type_vram].type; - pl[*n].mem_type = TTM_PL_VRAM; - pl[*n].flags = flags & ~TTM_PL_FLAG_CACHED; - - /* Some BARs do not support being ioremapped WC */ - if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA && - type & NVIF_MEM_UNCACHED) - pl[*n].flags &= ~TTM_PL_FLAG_WC; - + pl[*n].flags = 0; (*n)++; } if (domain & NOUVEAU_GEM_DOMAIN_GART) { pl[*n].mem_type = TTM_PL_TT; - pl[*n].flags = flags; - - if (drm->agp.bridge) - pl[*n].flags &= ~TTM_PL_FLAG_CACHED; - + pl[*n].flags = 0; (*n)++; } if (domain & NOUVEAU_GEM_DOMAIN_CPU) { pl[*n].mem_type = TTM_PL_SYSTEM; - pl[(*n)++].flags = flags; + pl[(*n)++].flags = 0; } } @@ -415,18 +401,14 @@ void nouveau_bo_placement_set(struct nouveau_bo *nvbo, uint32_t domain, uint32_t busy) { - struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); struct ttm_placement *pl = &nvbo->placement; - uint32_t flags = nvbo->force_coherent ? TTM_PL_FLAG_UNCACHED : - TTM_PL_MASK_CACHING; pl->placement = nvbo->placements; - set_placement_list(drm, nvbo->placements, &pl->num_placement, - domain, flags); + set_placement_list(nvbo->placements, &pl->num_placement, domain); pl->busy_placement = nvbo->busy_placements; - set_placement_list(drm, nvbo->busy_placements, &pl->num_busy_placement, - domain | busy, flags); + set_placement_list(nvbo->busy_placements, &pl->num_busy_placement, + domain | busy); set_placement_range(nvbo, domain); } @@ -888,7 +870,7 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, .fpfn = 0, .lpfn = 0, .mem_type = TTM_PL_TT, - .flags = TTM_PL_MASK_CACHING + .flags = 0 }; struct ttm_placement placement; struct ttm_resource tmp_reg; @@ -930,7 +912,7 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, .fpfn = 0, .lpfn = 0, .mem_type = TTM_PL_TT, - .flags = TTM_PL_MASK_CACHING + .flags = 0 }; struct ttm_placement placement; struct ttm_resource tmp_reg; diff --git a/drivers/gpu/drm/qxl/qxl_object.c b/drivers/gpu/drm/qxl/qxl_object.c index 940e99354f49..547d46c14d56 100644 --- a/drivers/gpu/drm/qxl/qxl_object.c +++ b/drivers/gpu/drm/qxl/qxl_object.c @@ -64,21 +64,21 @@ void qxl_ttm_placement_from_domain(struct qxl_bo *qbo, u32 domain) qbo->placement.busy_placement = qbo->placements; if (domain == QXL_GEM_DOMAIN_VRAM) { qbo->placements[c].mem_type = TTM_PL_VRAM; - qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | pflag; + qbo->placements[c++].flags = pflag; } if (domain == QXL_GEM_DOMAIN_SURFACE) { qbo->placements[c].mem_type = TTM_PL_PRIV; - qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | pflag; + qbo->placements[c++].flags = pflag; qbo->placements[c].mem_type = TTM_PL_VRAM; - qbo->placements[c++].flags = TTM_PL_FLAG_CACHED | pflag; + qbo->placements[c++].flags = pflag; } if (domain == QXL_GEM_DOMAIN_CPU) { qbo->placements[c].mem_type = TTM_PL_SYSTEM; - qbo->placements[c++].flags = TTM_PL_MASK_CACHING | pflag; + qbo->placements[c++].flags = pflag; } if (!c) { qbo->placements[c].mem_type = TTM_PL_SYSTEM; - qbo->placements[c++].flags = TTM_PL_MASK_CACHING; + qbo->placements[c++].flags = 0; } qbo->placement.num_placement = c; qbo->placement.num_busy_placement = c; diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index 61eb06dbbce8..e3ed20215f18 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -56,7 +56,7 @@ static void qxl_evict_flags(struct ttm_buffer_object *bo, .fpfn = 0, .lpfn = 0, .mem_type = TTM_PL_SYSTEM, - .flags = TTM_PL_MASK_CACHING + .flags = 0 }; if (!qxl_ttm_bo_is_qxl_bo(bo)) { diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index ad0e6e9ef922..ab81e35cb060 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -113,57 +113,29 @@ void radeon_ttm_placement_from_domain(struct radeon_bo *rbo, u32 domain) rbo->placements[c].fpfn = rbo->rdev->mc.visible_vram_size >> PAGE_SHIFT; rbo->placements[c].mem_type = TTM_PL_VRAM; - rbo->placements[c++].flags = TTM_PL_FLAG_WC | - TTM_PL_FLAG_UNCACHED; + rbo->placements[c++].flags = 0; } rbo->placements[c].fpfn = 0; rbo->placements[c].mem_type = TTM_PL_VRAM; - rbo->placements[c++].flags = TTM_PL_FLAG_WC | - TTM_PL_FLAG_UNCACHED; + rbo->placements[c++].flags = 0; } if (domain & RADEON_GEM_DOMAIN_GTT) { - if (rbo->flags & RADEON_GEM_GTT_UC) { - rbo->placements[c].fpfn = 0; - rbo->placements[c].mem_type = TTM_PL_TT; - rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED; - - } else if ((rbo->flags & RADEON_GEM_GTT_WC) || - (rbo->rdev->flags & RADEON_IS_AGP)) { - rbo->placements[c].fpfn = 0; - rbo->placements[c].mem_type = TTM_PL_TT; - rbo->placements[c++].flags = TTM_PL_FLAG_WC | - TTM_PL_FLAG_UNCACHED; - } else { - rbo->placements[c].fpfn = 0; - rbo->placements[c].mem_type = TTM_PL_TT; - rbo->placements[c++].flags = TTM_PL_FLAG_CACHED; - } + rbo->placements[c].fpfn = 0; + rbo->placements[c].mem_type = TTM_PL_TT; + rbo->placements[c++].flags = 0; } if (domain & RADEON_GEM_DOMAIN_CPU) { - if (rbo->flags & RADEON_GEM_GTT_UC) { - rbo->placements[c].fpfn = 0; - rbo->placements[c].mem_type = TTM_PL_SYSTEM; - rbo->placements[c++].flags = TTM_PL_FLAG_UNCACHED; - - } else if ((rbo->flags & RADEON_GEM_GTT_WC) || - rbo->rdev->flags & RADEON_IS_AGP) { - rbo->placements[c].fpfn = 0; - rbo->placements[c].mem_type = TTM_PL_SYSTEM; - rbo->placements[c++].flags = TTM_PL_FLAG_WC | - TTM_PL_FLAG_UNCACHED; - } else { - rbo->placements[c].fpfn = 0; - rbo->placements[c].mem_type = TTM_PL_SYSTEM; - rbo->placements[c++].flags = TTM_PL_FLAG_CACHED; - } + rbo->placements[c].fpfn = 0; + rbo->placements[c].mem_type = TTM_PL_SYSTEM; + rbo->placements[c++].flags = 0; } if (!c) { rbo->placements[c].fpfn = 0; rbo->placements[c].mem_type = TTM_PL_SYSTEM; - rbo->placements[c++].flags = TTM_PL_MASK_CACHING; + rbo->placements[c++].flags = 0; } rbo->placement.num_placement = c; diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 9b53a1d80632..d6f42fbc81f4 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -89,7 +89,7 @@ static void radeon_evict_flags(struct ttm_buffer_object *bo, .fpfn = 0, .lpfn = 0, .mem_type = TTM_PL_SYSTEM, - .flags = TTM_PL_MASK_CACHING + .flags = 0 }; struct radeon_bo *rbo; @@ -225,17 +225,12 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, placements.fpfn = 0; placements.lpfn = 0; placements.mem_type = TTM_PL_TT; - placements.flags = TTM_PL_MASK_CACHING; + placements.flags = 0; r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx); if (unlikely(r)) { return r; } - r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement); - if (unlikely(r)) { - goto out_cleanup; - } - r = ttm_tt_populate(bo->bdev, bo->ttm, ctx); if (unlikely(r)) { goto out_cleanup; @@ -275,7 +270,7 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo, placements.fpfn = 0; placements.lpfn = 0; placements.mem_type = TTM_PL_TT; - placements.flags = TTM_PL_MASK_CACHING; + placements.flags = 0; r = ttm_bo_mem_space(bo, &placement, &tmp_mem, ctx); if (unlikely(r)) { return r; @@ -389,12 +384,7 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_reso * Alpha: use bus.addr to hold the ioremap() return, * so we can modify bus.base below. */ - if (mem->placement & TTM_PL_FLAG_WC) - mem->bus.addr = - ioremap_wc(mem->bus.offset, bus_size); - else - mem->bus.addr = - ioremap(mem->bus.offset, bus_size); + mem->bus.addr = ioremap_wc(mem->bus.offset, bus_size); if (!mem->bus.addr) return -ENOMEM; diff --git a/drivers/gpu/drm/ttm/ttm_agp_backend.c b/drivers/gpu/drm/ttm/ttm_agp_backend.c index a723062d37e7..4f76c9287159 100644 --- a/drivers/gpu/drm/ttm/ttm_agp_backend.c +++ b/drivers/gpu/drm/ttm/ttm_agp_backend.c @@ -54,7 +54,7 @@ int ttm_agp_bind(struct ttm_tt *ttm, struct ttm_resource *bo_mem) struct page *dummy_read_page = ttm_bo_glob.dummy_read_page; struct drm_mm_node *node = bo_mem->mm_node; struct agp_memory *mem; - int ret, cached = (bo_mem->placement & TTM_PL_FLAG_CACHED); + int ret, cached = ttm->caching == ttm_cached; unsigned i; if (agp_be->mem) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 3732dcb58aad..b97ed6ca8765 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -252,10 +252,6 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, if (ret) goto out_err; - ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); - if (ret) - goto out_err; - if (mem->mem_type != TTM_PL_SYSTEM) { ret = ttm_tt_populate(bdev, bo->ttm, ctx); if (ret) @@ -843,29 +839,6 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, return ttm_bo_add_move_fence(bo, man, mem, ctx->no_wait_gpu); } -static uint32_t ttm_bo_select_caching(struct ttm_resource_manager *man, - uint32_t cur_placement, - uint32_t proposed_placement) -{ - uint32_t caching = proposed_placement & TTM_PL_MASK_CACHING; - uint32_t result = proposed_placement & ~TTM_PL_MASK_CACHING; - - /** - * Keep current caching if possible. - */ - - if ((cur_placement & caching) != 0) - result |= (cur_placement & caching); - else if ((TTM_PL_FLAG_CACHED & caching) != 0) - result |= TTM_PL_FLAG_CACHED; - else if ((TTM_PL_FLAG_WC & caching) != 0) - result |= TTM_PL_FLAG_WC; - else if ((TTM_PL_FLAG_UNCACHED & caching) != 0) - result |= TTM_PL_FLAG_UNCACHED; - - return result; -} - /** * ttm_bo_mem_placement - check if placement is compatible * @bo: BO to find memory for @@ -884,18 +857,13 @@ static int ttm_bo_mem_placement(struct ttm_buffer_object *bo, { struct ttm_bo_device *bdev = bo->bdev; struct ttm_resource_manager *man; - uint32_t cur_flags = 0; man = ttm_manager_type(bdev, place->mem_type); if (!man || !ttm_resource_manager_used(man)) return -EBUSY; - cur_flags = ttm_bo_select_caching(man, bo->mem.placement, - place->flags); - cur_flags |= place->flags & ~TTM_PL_MASK_CACHING; - mem->mem_type = place->mem_type; - mem->placement = cur_flags; + mem->placement = place->flags; spin_lock(&ttm_bo_glob.lru_lock); ttm_bo_del_from_lru(bo); @@ -1028,8 +996,7 @@ static bool ttm_bo_places_compat(const struct ttm_place *places, continue; *new_flags = heap->flags; - if ((*new_flags & mem->placement & TTM_PL_MASK_CACHING) && - (mem->mem_type == heap->mem_type) && + if ((mem->mem_type == heap->mem_type) && (!(*new_flags & TTM_PL_FLAG_CONTIGUOUS) || (mem->placement & TTM_PL_FLAG_CONTIGUOUS))) return true; @@ -1083,9 +1050,6 @@ int ttm_bo_validate(struct ttm_buffer_object *bo, ret = ttm_bo_move_buffer(bo, placement, ctx); if (ret) return ret; - } else { - bo->mem.placement &= TTM_PL_MASK_CACHING; - bo->mem.placement |= new_flags & ~TTM_PL_MASK_CACHING; } /* * We might need to add a TTM. @@ -1153,7 +1117,7 @@ int ttm_bo_init_reserved(struct ttm_bo_device *bdev, bo->mem.bus.offset = 0; bo->mem.bus.addr = NULL; bo->moving = NULL; - bo->mem.placement = TTM_PL_FLAG_CACHED; + bo->mem.placement = 0; bo->acc_size = acc_size; bo->pin_count = 0; bo->sg = sg; @@ -1484,7 +1448,7 @@ int ttm_bo_swapout(struct ttm_operation_ctx *ctx) evict_mem = bo->mem; evict_mem.mm_node = NULL; - evict_mem.placement = TTM_PL_MASK_CACHING; + evict_mem.placement = 0; evict_mem.mem_type = TTM_PL_SYSTEM; ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, &ctx); diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 0542097dc419..ba7ab5ed85d0 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -72,10 +72,6 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo, old_mem->mem_type = TTM_PL_SYSTEM; } - ret = ttm_tt_set_placement_caching(ttm, new_mem->placement); - if (unlikely(ret != 0)) - return ret; - if (new_mem->mem_type != TTM_PL_SYSTEM) { ret = ttm_tt_populate(bo->bdev, ttm, ctx); @@ -135,7 +131,7 @@ static int ttm_resource_ioremap(struct ttm_bo_device *bdev, } else { size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT; - if (mem->placement & TTM_PL_FLAG_WC) + if (mem->bus.caching == ttm_write_combined) addr = ioremap_wc(mem->bus.offset, bus_size); else addr = ioremap(mem->bus.offset, bus_size); @@ -427,7 +423,7 @@ static int ttm_bo_ioremap(struct ttm_buffer_object *bo, map->virtual = (void *)(((u8 *)bo->mem.bus.addr) + offset); } else { map->bo_kmap_type = ttm_bo_map_iomap; - if (mem->placement & TTM_PL_FLAG_WC) + if (mem->bus.caching == ttm_write_combined) map->virtual = ioremap_wc(bo->mem.bus.offset + offset, size); else @@ -457,7 +453,7 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, if (ret) return ret; - if (num_pages == 1 && (mem->placement & TTM_PL_FLAG_CACHED)) { + if (num_pages == 1 && ttm->caching == ttm_cached) { /* * We're mapping a single page, and the desired * page protection is consistent with the bo. diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index a465f51df027..3e5dd6271d4c 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -114,35 +114,6 @@ static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm) return 0; } -static int ttm_tt_set_caching(struct ttm_tt *ttm, enum ttm_caching caching) -{ - if (ttm->caching == caching) - return 0; - - /* Can't change the caching state after TT is populated */ - if (WARN_ON_ONCE(ttm_tt_is_populated(ttm))) - return -EINVAL; - - ttm->caching = caching; - - return 0; -} - -int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement) -{ - enum ttm_caching state; - - if (placement & TTM_PL_FLAG_WC) - state = ttm_write_combined; - else if (placement & TTM_PL_FLAG_UNCACHED) - state = ttm_uncached; - else - state = ttm_cached; - - return ttm_tt_set_caching(ttm, state); -} -EXPORT_SYMBOL(ttm_tt_set_placement_caching); - void ttm_tt_destroy_common(struct ttm_bo_device *bdev, struct ttm_tt *ttm) { ttm_tt_unpopulate(bdev, ttm); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c index fae88969a15a..112253246f08 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c @@ -34,28 +34,28 @@ static const struct ttm_place vram_placement_flags = { .fpfn = 0, .lpfn = 0, .mem_type = TTM_PL_VRAM, - .flags = TTM_PL_FLAG_CACHED + .flags = 0 }; static const struct ttm_place sys_placement_flags = { .fpfn = 0, .lpfn = 0, .mem_type = TTM_PL_SYSTEM, - .flags = TTM_PL_FLAG_CACHED + .flags = 0 }; static const struct ttm_place gmr_placement_flags = { .fpfn = 0, .lpfn = 0, .mem_type = VMW_PL_GMR, - .flags = TTM_PL_FLAG_CACHED + .flags = 0 }; static const struct ttm_place mob_placement_flags = { .fpfn = 0, .lpfn = 0, .mem_type = VMW_PL_MOB, - .flags = TTM_PL_FLAG_CACHED + .flags = 0 }; struct ttm_placement vmw_vram_placement = { @@ -70,12 +70,12 @@ static const struct ttm_place vram_gmr_placement_flags[] = { .fpfn = 0, .lpfn = 0, .mem_type = TTM_PL_VRAM, - .flags = TTM_PL_FLAG_CACHED + .flags = 0 }, { .fpfn = 0, .lpfn = 0, .mem_type = VMW_PL_GMR, - .flags = TTM_PL_FLAG_CACHED + .flags = 0 } }; @@ -84,12 +84,12 @@ static const struct ttm_place gmr_vram_placement_flags[] = { .fpfn = 0, .lpfn = 0, .mem_type = VMW_PL_GMR, - .flags = TTM_PL_FLAG_CACHED + .flags = 0 }, { .fpfn = 0, .lpfn = 0, .mem_type = TTM_PL_VRAM, - .flags = TTM_PL_FLAG_CACHED + .flags = 0 } }; @@ -119,22 +119,22 @@ static const struct ttm_place evictable_placement_flags[] = { .fpfn = 0, .lpfn = 0, .mem_type = TTM_PL_SYSTEM, - .flags = TTM_PL_FLAG_CACHED + .flags = 0 }, { .fpfn = 0, .lpfn = 0, .mem_type = TTM_PL_VRAM, - .flags = TTM_PL_FLAG_CACHED + .flags = 0 }, { .fpfn = 0, .lpfn = 0, .mem_type = VMW_PL_GMR, - .flags = TTM_PL_FLAG_CACHED + .flags = 0 }, { .fpfn = 0, .lpfn = 0, .mem_type = VMW_PL_MOB, - .flags = TTM_PL_FLAG_CACHED + .flags = 0 } }; @@ -143,17 +143,17 @@ static const struct ttm_place nonfixed_placement_flags[] = { .fpfn = 0, .lpfn = 0, .mem_type = TTM_PL_SYSTEM, - .flags = TTM_PL_FLAG_CACHED + .flags = 0 }, { .fpfn = 0, .lpfn = 0, .mem_type = VMW_PL_GMR, - .flags = TTM_PL_FLAG_CACHED + .flags = 0 }, { .fpfn = 0, .lpfn = 0, .mem_type = VMW_PL_MOB, - .flags = TTM_PL_FLAG_CACHED + .flags = 0 } }; diff --git a/include/drm/ttm/ttm_placement.h b/include/drm/ttm/ttm_placement.h index 50e72df48b8d..aa6ba4d0cf78 100644 --- a/include/drm/ttm/ttm_placement.h +++ b/include/drm/ttm/ttm_placement.h @@ -43,27 +43,13 @@ #define TTM_PL_PRIV 3 /* - * Other flags that affects data placement. - * TTM_PL_FLAG_CACHED indicates cache-coherent mappings - * if available. - * TTM_PL_FLAG_SHARED means that another application may - * reference the buffer. - * TTM_PL_FLAG_NO_EVICT means that the buffer may never - * be evicted to make room for other buffers. * TTM_PL_FLAG_TOPDOWN requests to be placed from the * top of the memory area, instead of the bottom. */ -#define TTM_PL_FLAG_CACHED (1 << 16) -#define TTM_PL_FLAG_UNCACHED (1 << 17) -#define TTM_PL_FLAG_WC (1 << 18) #define TTM_PL_FLAG_CONTIGUOUS (1 << 19) #define TTM_PL_FLAG_TOPDOWN (1 << 22) -#define TTM_PL_MASK_CACHING (TTM_PL_FLAG_CACHED | \ - TTM_PL_FLAG_UNCACHED | \ - TTM_PL_FLAG_WC) - /** * struct ttm_place * diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h index c39c722d5184..e042dec5e6c1 100644 --- a/include/drm/ttm/ttm_tt.h +++ b/include/drm/ttm/ttm_tt.h @@ -164,21 +164,6 @@ void ttm_tt_destroy_common(struct ttm_bo_device *bdev, struct ttm_tt *ttm); * Swap in a previously swap out ttm_tt. */ int ttm_tt_swapin(struct ttm_tt *ttm); - -/** - * ttm_tt_set_placement_caching: - * - * @ttm A struct ttm_tt the backing pages of which will change caching policy. - * @placement: Flag indicating the desired caching policy. - * - * This function will change caching policy of any default kernel mappings of - * the pages backing @ttm. If changing from cached to uncached or - * write-combined, - * all CPU caches will first be flushed to make sure the data of the pages - * hit RAM. This function may be very costly as it involves global TLB - * and cache flushes and potential page splitting / combining. - */ -int ttm_tt_set_placement_caching(struct ttm_tt *ttm, uint32_t placement); int ttm_tt_swapout(struct ttm_bo_device *bdev, struct ttm_tt *ttm); /** -- cgit From a07e32bda0265b62ed0f85fbdfcd316c1dfc98ca Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Mon, 19 Oct 2020 17:13:12 +1000 Subject: drm/ttm: use new move interface for known system->ttm moves MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit In all 3 drivers there is a case where the driver knows the bo is in SYSTEM so don't call the api that checks that. Reviewed-by: Ben Skeggs Reviewed-by: Christian König Signed-off-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20201019071314.1671485-4-airlied@gmail.com --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 4 ++-- drivers/gpu/drm/nouveau/nouveau_bo.c | 3 ++- drivers/gpu/drm/radeon/radeon_ttm.c | 3 ++- drivers/gpu/drm/ttm/ttm_bo_util.c | 1 + 4 files changed, 7 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/radeon') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 8cdec58b9106..f8c9d66d3ef7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -601,11 +601,11 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict, } /* move/bind old memory to GTT space */ - r = ttm_bo_move_ttm(bo, ctx, &tmp_mem); + r = ttm_bo_move_to_new_tt_mem(bo, ctx, &tmp_mem); if (unlikely(r)) { goto out_cleanup; } - + ttm_bo_assign_mem(bo, &tmp_mem); /* copy to VRAM */ r = amdgpu_move_blit(bo, evict, new_mem, old_mem); if (unlikely(r)) { diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 4ccb3329014b..63baa9367851 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -927,10 +927,11 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, if (ret) return ret; - ret = ttm_bo_move_ttm(bo, ctx, &tmp_reg); + ret = ttm_bo_move_to_new_tt_mem(bo, ctx, &tmp_reg); if (ret) goto out; + ttm_bo_assign_mem(bo, &tmp_reg); ret = nouveau_bo_move_m2mf(bo, true, ctx, new_reg); if (ret) goto out; diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index d6f42fbc81f4..9437d29248fd 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -275,10 +275,11 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo, if (unlikely(r)) { return r; } - r = ttm_bo_move_ttm(bo, ctx, &tmp_mem); + r = ttm_bo_move_to_new_tt_mem(bo, ctx, &tmp_mem); if (unlikely(r)) { goto out_cleanup; } + ttm_bo_assign_mem(bo, &tmp_mem); r = radeon_move_blit(bo, true, new_mem, old_mem); if (unlikely(r)) { goto out_cleanup; diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 4a461226b6ba..7ea0482c1aee 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -64,6 +64,7 @@ int ttm_bo_move_to_new_tt_mem(struct ttm_buffer_object *bo, return 0; } +EXPORT_SYMBOL(ttm_bo_move_to_new_tt_mem); static int ttm_bo_move_to_system(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx) -- cgit From c37d951cb42aa340513c0bc2df10b7324fa0d856 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Mon, 19 Oct 2020 17:13:13 +1000 Subject: drm/ttm: add move old to system to drivers. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Uninline ttm_bo_move_ttm. Eventually want to unhook the unbind out. Reviewed-by: Ben Skeggs Reviewed-by: Christian König Signed-off-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20201019071314.1671485-5-airlied@gmail.com --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 15 ++++++++++++--- drivers/gpu/drm/nouveau/nouveau_bo.c | 11 +++++++++-- drivers/gpu/drm/radeon/radeon_ttm.c | 15 ++++++++++++--- drivers/gpu/drm/ttm/ttm_bo_util.c | 5 +++-- drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c | 7 ++++++- include/drm/ttm/ttm_bo_driver.h | 2 ++ 6 files changed, 44 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/drm/radeon') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index f8c9d66d3ef7..00bec9924b5b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -562,7 +562,11 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict, } /* move BO (in tmp_mem) to new_mem */ - r = ttm_bo_move_ttm(bo, ctx, new_mem); + r = ttm_bo_move_to_system(bo, ctx); + if (unlikely(r)) + goto out_cleanup; + + ttm_bo_assign_mem(bo, new_mem); out_cleanup: ttm_resource_free(bo, &tmp_mem); return r; @@ -672,8 +676,13 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, } if (old_mem->mem_type == TTM_PL_TT && - new_mem->mem_type == TTM_PL_SYSTEM) - return ttm_bo_move_ttm(bo, ctx, new_mem); + new_mem->mem_type == TTM_PL_SYSTEM) { + r = ttm_bo_move_to_system(bo, ctx); + if (r) + return r; + ttm_bo_assign_mem(bo, new_mem); + return 0; + } if (old_mem->mem_type == AMDGPU_PL_GDS || old_mem->mem_type == AMDGPU_PL_GWS || diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 63baa9367851..ec79c3b251e8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -897,7 +897,11 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, if (ret) goto out; - ret = ttm_bo_move_ttm(bo, ctx, new_reg); + ret = ttm_bo_move_to_system(bo, ctx); + if (ret) + goto out; + + ttm_bo_assign_mem(bo, &tmp_reg); out: ttm_resource_free(bo, &tmp_reg); return ret; @@ -1048,7 +1052,10 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, if (old_reg->mem_type == TTM_PL_TT && new_reg->mem_type == TTM_PL_SYSTEM) { - ret = ttm_bo_move_ttm(bo, ctx, new_reg); + ret = ttm_bo_move_to_system(bo, ctx); + if (ret) + return ret; + ttm_bo_assign_mem(bo, new_reg); goto out; } diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 9437d29248fd..426643120e3a 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -244,7 +244,11 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, if (unlikely(r)) { goto out_cleanup; } - r = ttm_bo_move_ttm(bo, ctx, new_mem); + r = ttm_bo_move_to_system(bo, ctx); + if (unlikely(r)) + goto out_cleanup; + + ttm_bo_assign_mem(bo, new_mem); out_cleanup: ttm_resource_free(bo, &tmp_mem); return r; @@ -319,9 +323,14 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict, } if (old_mem->mem_type == TTM_PL_TT && - new_mem->mem_type == TTM_PL_SYSTEM) - return ttm_bo_move_ttm(bo, ctx, new_mem); + new_mem->mem_type == TTM_PL_SYSTEM) { + r = ttm_bo_move_to_system(bo, ctx); + if (r) + return r; + ttm_bo_assign_mem(bo, new_mem); + return 0; + } if (!rdev->ring[radeon_copy_ring_index(rdev)].ready || rdev->asic->copy.copy == NULL) { /* use memcpy */ diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 7ea0482c1aee..4a74a297e529 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -66,8 +66,8 @@ int ttm_bo_move_to_new_tt_mem(struct ttm_buffer_object *bo, } EXPORT_SYMBOL(ttm_bo_move_to_new_tt_mem); -static int ttm_bo_move_to_system(struct ttm_buffer_object *bo, - struct ttm_operation_ctx *ctx) +int ttm_bo_move_to_system(struct ttm_buffer_object *bo, + struct ttm_operation_ctx *ctx) { struct ttm_resource *old_mem = &bo->mem; int ret; @@ -87,6 +87,7 @@ static int ttm_bo_move_to_system(struct ttm_buffer_object *bo, old_mem->mem_type = TTM_PL_SYSTEM; return 0; } +EXPORT_SYMBOL(ttm_bo_move_to_system); int ttm_bo_move_ttm(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx, diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c index 112253246f08..c5cf81c09971 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c @@ -735,13 +735,18 @@ static int vmw_move(struct ttm_buffer_object *bo, { struct ttm_resource_manager *old_man = ttm_manager_type(bo->bdev, bo->mem.mem_type); struct ttm_resource_manager *new_man = ttm_manager_type(bo->bdev, new_mem->mem_type); + int ret; if (old_man->use_tt && new_man->use_tt) { if (bo->mem.mem_type == TTM_PL_SYSTEM) { ttm_bo_assign_mem(bo, new_mem); return 0; } - return ttm_bo_move_ttm(bo, ctx, new_mem); + ret = ttm_bo_move_to_system(bo, ctx); + if (ret) + return ret; + ttm_bo_assign_mem(bo, new_mem); + return 0; } else { return ttm_bo_move_memcpy(bo, ctx, new_mem); } diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 1f4d2b1febd0..9e60e6814ba1 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -596,6 +596,8 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo, int ttm_bo_move_to_new_tt_mem(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx, struct ttm_resource *new_mem); +int ttm_bo_move_to_system(struct ttm_buffer_object *bo, + struct ttm_operation_ctx *ctx); /** * ttm_bo_move_memcpy -- cgit From 9764c35348b4c0da722a234fd355119abb371196 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 20 Oct 2020 11:03:13 +1000 Subject: drm/ttm: move some move binds into the drivers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This just gives the driver control over some of the bind paths. Reviewed-by: Christian König Signed-off-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20201020010319.1692445-2-airlied@gmail.com --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 7 ++++++- drivers/gpu/drm/nouveau/nouveau_bo.c | 10 +++++++--- drivers/gpu/drm/radeon/radeon_ttm.c | 11 ++++++++--- drivers/gpu/drm/ttm/ttm_bo_util.c | 1 - 4 files changed, 21 insertions(+), 8 deletions(-) (limited to 'drivers/gpu/drm/radeon') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 00bec9924b5b..11f464e0951d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -605,10 +605,15 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict, } /* move/bind old memory to GTT space */ - r = ttm_bo_move_to_new_tt_mem(bo, ctx, &tmp_mem); + r = ttm_tt_populate(bo->bdev, bo->ttm, ctx); + if (unlikely(r)) + return r; + + r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, &tmp_mem); if (unlikely(r)) { goto out_cleanup; } + ttm_bo_assign_mem(bo, &tmp_mem); /* copy to VRAM */ r = amdgpu_move_blit(bo, evict, new_mem, old_mem); diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index ec79c3b251e8..526bbf4c399a 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -931,9 +931,13 @@ nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, if (ret) return ret; - ret = ttm_bo_move_to_new_tt_mem(bo, ctx, &tmp_reg); - if (ret) - goto out; + ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx); + if (unlikely(ret != 0)) + return ret; + + ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, &tmp_reg); + if (unlikely(ret != 0)) + return ret; ttm_bo_assign_mem(bo, &tmp_reg); ret = nouveau_bo_move_m2mf(bo, true, ctx, new_reg); diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 426643120e3a..c1cdee33be11 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -279,10 +279,15 @@ static int radeon_move_ram_vram(struct ttm_buffer_object *bo, if (unlikely(r)) { return r; } - r = ttm_bo_move_to_new_tt_mem(bo, ctx, &tmp_mem); - if (unlikely(r)) { + + r = ttm_tt_populate(bo->bdev, bo->ttm, ctx); + if (unlikely(r)) goto out_cleanup; - } + + r = radeon_ttm_tt_bind(bo->bdev, bo->ttm, &tmp_mem); + if (unlikely(r)) + goto out_cleanup; + ttm_bo_assign_mem(bo, &tmp_mem); r = radeon_move_blit(bo, true, new_mem, old_mem); if (unlikely(r)) { diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 558e78ad82aa..520b53e6e1d6 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -64,7 +64,6 @@ int ttm_bo_move_to_new_tt_mem(struct ttm_buffer_object *bo, return 0; } -EXPORT_SYMBOL(ttm_bo_move_to_new_tt_mem); int ttm_bo_move_to_system(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx) -- cgit From 29a1d482e4044ab76d0c0f6341212f1a51f48236 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 20 Oct 2020 11:03:15 +1000 Subject: drm/ttm: add move to system into drivers MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This moves the to system move into the drivers, and moves all the unbinds in the move path under driver control Note: radeon/nouveau already wait so don't duplicate it. Reviewed-by: Christian König Signed-off-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20201020010319.1692445-4-airlied@gmail.com --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 12 +++++++++--- drivers/gpu/drm/nouveau/nouveau_bo.c | 10 ++++++---- drivers/gpu/drm/radeon/radeon_ttm.c | 12 +++++++----- drivers/gpu/drm/ttm/ttm_bo_util.c | 21 --------------------- drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c | 5 ++++- include/drm/ttm/ttm_bo_driver.h | 2 -- 6 files changed, 26 insertions(+), 36 deletions(-) (limited to 'drivers/gpu/drm/radeon') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 11f464e0951d..56f16bdba936 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -66,6 +66,8 @@ static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_resource *bo_mem); +static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev, + struct ttm_tt *ttm); static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev, unsigned int type, @@ -561,11 +563,12 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict, goto out_cleanup; } - /* move BO (in tmp_mem) to new_mem */ - r = ttm_bo_move_to_system(bo, ctx); + r = ttm_bo_wait_ctx(bo, ctx); if (unlikely(r)) goto out_cleanup; + amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm); + ttm_resource_free(bo, &bo->mem); ttm_bo_assign_mem(bo, new_mem); out_cleanup: ttm_resource_free(bo, &tmp_mem); @@ -682,9 +685,12 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, if (old_mem->mem_type == TTM_PL_TT && new_mem->mem_type == TTM_PL_SYSTEM) { - r = ttm_bo_move_to_system(bo, ctx); + r = ttm_bo_wait_ctx(bo, ctx); if (r) return r; + + amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm); + ttm_resource_free(bo, &bo->mem); ttm_bo_assign_mem(bo, new_mem); return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 526bbf4c399a..1ccd9ea6da95 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -46,6 +46,7 @@ static int nouveau_ttm_tt_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_resource *reg); +static void nouveau_ttm_tt_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm); /* * NV10-NV40 tiling helpers @@ -897,10 +898,12 @@ nouveau_bo_move_flipd(struct ttm_buffer_object *bo, bool evict, if (ret) goto out; - ret = ttm_bo_move_to_system(bo, ctx); + ret = ttm_bo_wait_ctx(bo, ctx); if (ret) goto out; + nouveau_ttm_tt_unbind(bo->bdev, bo->ttm); + ttm_resource_free(bo, &bo->mem); ttm_bo_assign_mem(bo, &tmp_reg); out: ttm_resource_free(bo, &tmp_reg); @@ -1056,9 +1059,8 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, if (old_reg->mem_type == TTM_PL_TT && new_reg->mem_type == TTM_PL_SYSTEM) { - ret = ttm_bo_move_to_system(bo, ctx); - if (ret) - return ret; + nouveau_ttm_tt_unbind(bo->bdev, bo->ttm); + ttm_resource_free(bo, &bo->mem); ttm_bo_assign_mem(bo, new_reg); goto out; } diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index c1cdee33be11..42c85afe7955 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -59,6 +59,8 @@ static void radeon_ttm_debugfs_fini(struct radeon_device *rdev); static int radeon_ttm_tt_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_resource *bo_mem); +static void radeon_ttm_tt_unbind(struct ttm_bo_device *bdev, + struct ttm_tt *ttm); struct radeon_device *radeon_get_rdev(struct ttm_bo_device *bdev) { @@ -244,10 +246,12 @@ static int radeon_move_vram_ram(struct ttm_buffer_object *bo, if (unlikely(r)) { goto out_cleanup; } - r = ttm_bo_move_to_system(bo, ctx); + r = ttm_bo_wait_ctx(bo, ctx); if (unlikely(r)) goto out_cleanup; + radeon_ttm_tt_unbind(bo->bdev, bo->ttm); + ttm_resource_free(bo, &bo->mem); ttm_bo_assign_mem(bo, new_mem); out_cleanup: ttm_resource_free(bo, &tmp_mem); @@ -329,10 +333,8 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict, if (old_mem->mem_type == TTM_PL_TT && new_mem->mem_type == TTM_PL_SYSTEM) { - r = ttm_bo_move_to_system(bo, ctx); - if (r) - return r; - + radeon_ttm_tt_unbind(bo->bdev, bo->ttm); + ttm_resource_free(bo, &bo->mem); ttm_bo_assign_mem(bo, new_mem); return 0; } diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index 5cfcb92a6d30..65f042d6da7c 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -65,27 +65,6 @@ int ttm_bo_move_to_new_tt_mem(struct ttm_buffer_object *bo, return 0; } -int ttm_bo_move_to_system(struct ttm_buffer_object *bo, - struct ttm_operation_ctx *ctx) -{ - int ret; - - if (bo->mem.mem_type == TTM_PL_SYSTEM) - return 0; - - ret = ttm_bo_wait_ctx(bo, ctx); - if (unlikely(ret != 0)) { - if (ret != -ERESTARTSYS) - pr_err("Failed to expire sync object before unbinding TTM\n"); - return ret; - } - - ttm_bo_tt_unbind(bo); - ttm_resource_free(bo, &bo->mem); - return 0; -} -EXPORT_SYMBOL(ttm_bo_move_to_system); - int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem) { diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c index c5cf81c09971..1d220a9794e6 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c @@ -742,9 +742,12 @@ static int vmw_move(struct ttm_buffer_object *bo, ttm_bo_assign_mem(bo, new_mem); return 0; } - ret = ttm_bo_move_to_system(bo, ctx); + ret = ttm_bo_wait_ctx(bo, ctx); if (ret) return ret; + + vmw_ttm_unbind(bo->bdev, bo->ttm); + ttm_resource_free(bo, &bo->mem); ttm_bo_assign_mem(bo, new_mem); return 0; } else { diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index a89728cb9a23..81a1618b9535 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -574,8 +574,6 @@ void ttm_mem_io_free(struct ttm_bo_device *bdev, int ttm_bo_move_to_new_tt_mem(struct ttm_buffer_object *bo, struct ttm_operation_ctx *ctx, struct ttm_resource *new_mem); -int ttm_bo_move_to_system(struct ttm_buffer_object *bo, - struct ttm_operation_ctx *ctx); /** * ttm_bo_move_memcpy -- cgit From f227ccc9612f5d2f6315874c93acf3945fd51dfb Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 20 Oct 2020 11:03:16 +1000 Subject: drm/ttm: drop unbind callback. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The drivers now control this, so drop unbinding. Reviewed-by: Christian König Signed-off-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20201020010319.1692445-5-airlied@gmail.com --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 1 - drivers/gpu/drm/nouveau/nouveau_bo.c | 1 - drivers/gpu/drm/qxl/qxl_ttm.c | 7 ------- drivers/gpu/drm/radeon/radeon_ttm.c | 1 - drivers/gpu/drm/ttm/ttm_bo.c | 5 ----- drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c | 1 - include/drm/ttm/ttm_bo_driver.h | 18 ------------------ 7 files changed, 34 deletions(-) (limited to 'drivers/gpu/drm/radeon') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 56f16bdba936..d7a2f912055d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1722,7 +1722,6 @@ static struct ttm_bo_driver amdgpu_bo_driver = { .ttm_tt_populate = &amdgpu_ttm_tt_populate, .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate, .ttm_tt_bind = &amdgpu_ttm_backend_bind, - .ttm_tt_unbind = &amdgpu_ttm_backend_unbind, .ttm_tt_destroy = &amdgpu_ttm_backend_destroy, .eviction_valuable = amdgpu_ttm_bo_eviction_valuable, .evict_flags = &amdgpu_evict_flags, diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 1ccd9ea6da95..43e6e089319e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1394,7 +1394,6 @@ struct ttm_bo_driver nouveau_bo_driver = { .ttm_tt_populate = &nouveau_ttm_tt_populate, .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate, .ttm_tt_bind = &nouveau_ttm_tt_bind, - .ttm_tt_unbind = &nouveau_ttm_tt_unbind, .ttm_tt_destroy = &nouveau_ttm_tt_destroy, .eviction_valuable = ttm_bo_eviction_valuable, .evict_flags = nouveau_bo_evict_flags, diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index e3ed20215f18..95c4f2c7ab79 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -113,12 +113,6 @@ static int qxl_ttm_backend_bind(struct ttm_bo_device *bdev, return -1; } -static void qxl_ttm_backend_unbind(struct ttm_bo_device *bdev, - struct ttm_tt *ttm) -{ - /* Not implemented */ -} - static void qxl_ttm_backend_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm) { @@ -180,7 +174,6 @@ static struct ttm_bo_driver qxl_bo_driver = { .ttm_tt_create = &qxl_ttm_tt_create, .ttm_tt_bind = &qxl_ttm_backend_bind, .ttm_tt_destroy = &qxl_ttm_backend_destroy, - .ttm_tt_unbind = &qxl_ttm_backend_unbind, .eviction_valuable = ttm_bo_eviction_valuable, .evict_flags = &qxl_evict_flags, .move = &qxl_bo_move, diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 42c85afe7955..914b2ca86936 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -817,7 +817,6 @@ static struct ttm_bo_driver radeon_bo_driver = { .ttm_tt_populate = &radeon_ttm_tt_populate, .ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate, .ttm_tt_bind = &radeon_ttm_tt_bind, - .ttm_tt_unbind = &radeon_ttm_tt_unbind, .ttm_tt_destroy = &radeon_ttm_tt_destroy, .eviction_valuable = ttm_bo_eviction_valuable, .evict_flags = &radeon_evict_flags, diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index cbc74a320db2..593e55879019 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -1496,8 +1496,3 @@ int ttm_bo_tt_bind(struct ttm_buffer_object *bo, struct ttm_resource *mem) { return bo->bdev->driver->ttm_tt_bind(bo->bdev, bo->ttm, mem); } - -void ttm_bo_tt_unbind(struct ttm_buffer_object *bo) -{ - bo->bdev->driver->ttm_tt_unbind(bo->bdev, bo->ttm); -} diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c index 1d220a9794e6..6e07ea982961 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c @@ -760,7 +760,6 @@ struct ttm_bo_driver vmw_bo_driver = { .ttm_tt_populate = &vmw_ttm_populate, .ttm_tt_unpopulate = &vmw_ttm_unpopulate, .ttm_tt_bind = &vmw_ttm_bind, - .ttm_tt_unbind = &vmw_ttm_unbind, .ttm_tt_destroy = &vmw_ttm_destroy, .eviction_valuable = ttm_bo_eviction_valuable, .evict_flags = vmw_evict_flags, diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 81a1618b9535..fbbcf10670c1 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -104,17 +104,6 @@ struct ttm_bo_driver { */ int (*ttm_tt_bind)(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_resource *bo_mem); - /** - * ttm_tt_unbind - * - * @bdev: Pointer to a ttm device - * @ttm: Pointer to a struct ttm_tt. - * - * Unbind previously bound backend pages. This function should be - * able to handle differences between aperture and system page sizes. - */ - void (*ttm_tt_unbind)(struct ttm_bo_device *bdev, struct ttm_tt *ttm); - /** * ttm_tt_destroy * @@ -647,13 +636,6 @@ pgprot_t ttm_io_prot(struct ttm_buffer_object *bo, struct ttm_resource *res, */ int ttm_bo_tt_bind(struct ttm_buffer_object *bo, struct ttm_resource *mem); -/** - * ttm_bo_tt_bind - * - * Unbind the object tt from a memory resource. - */ -void ttm_bo_tt_unbind(struct ttm_buffer_object *bo); - /** * ttm_bo_tt_destroy. */ -- cgit From 6d820003295977f865257f1845bcdebc5dab4fb5 Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 20 Oct 2020 11:03:18 +1000 Subject: drm/ttm: drop move notify around move. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The drivers now do this in the move callback. move_notify is still needed in the destroy path. Reviewed-by: Christian König Signed-off-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20201020010319.1692445-7-airlied@gmail.com --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 13 +++++++-- drivers/gpu/drm/drm_gem_vram_helper.c | 11 +++++++- drivers/gpu/drm/nouveau/nouveau_bo.c | 12 ++++++-- drivers/gpu/drm/qxl/qxl_ttm.c | 45 ++++++++++++++++++------------ drivers/gpu/drm/radeon/radeon_ttm.c | 11 ++++++-- drivers/gpu/drm/ttm/ttm_bo.c | 12 +------- drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c | 14 ++++++++-- 7 files changed, 78 insertions(+), 40 deletions(-) (limited to 'drivers/gpu/drm/radeon') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index d7a2f912055d..fba8ada99b0e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -666,6 +666,8 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, struct ttm_resource *old_mem = &bo->mem; int r; + amdgpu_bo_move_notify(bo, evict, new_mem); + /* Can't move a pinned BO */ abo = ttm_to_amdgpu_bo(bo); if (WARN_ON_ONCE(abo->tbo.pin_count > 0)) @@ -687,7 +689,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, new_mem->mem_type == TTM_PL_SYSTEM) { r = ttm_bo_wait_ctx(bo, ctx); if (r) - return r; + goto fail; amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm); ttm_resource_free(bo, &bo->mem); @@ -728,12 +730,12 @@ memcpy: if (!amdgpu_mem_visible(adev, old_mem) || !amdgpu_mem_visible(adev, new_mem)) { pr_err("Move buffer fallback to memcpy unavailable\n"); - return r; + goto fail; } r = ttm_bo_move_memcpy(bo, ctx, new_mem); if (r) - return r; + goto fail; } if (bo->type == ttm_bo_type_device && @@ -748,6 +750,11 @@ memcpy: /* update statistics */ atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &adev->num_bytes_moved); return 0; +fail: + swap(*new_mem, bo->mem); + amdgpu_bo_move_notify(bo, false, new_mem); + swap(*new_mem, bo->mem); + return r; } /** diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c index 7aeb5daf2805..19087b22bdbb 100644 --- a/drivers/gpu/drm/drm_gem_vram_helper.c +++ b/drivers/gpu/drm/drm_gem_vram_helper.c @@ -590,7 +590,16 @@ static int drm_gem_vram_bo_driver_move(struct drm_gem_vram_object *gbo, struct ttm_operation_ctx *ctx, struct ttm_resource *new_mem) { - return ttm_bo_move_memcpy(&gbo->bo, ctx, new_mem); + int ret; + + drm_gem_vram_bo_driver_move_notify(gbo, evict, new_mem); + ret = ttm_bo_move_memcpy(&gbo->bo, ctx, new_mem); + if (ret) { + swap(*new_mem, gbo->bo.mem); + drm_gem_vram_bo_driver_move_notify(gbo, false, new_mem); + swap(*new_mem, gbo->bo.mem); + } + return ret; } /* diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 43e6e089319e..ecd16847fbd8 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1032,9 +1032,10 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, struct nouveau_drm_tile *new_tile = NULL; int ret = 0; + nouveau_bo_move_ntfy(bo, evict, new_reg); ret = ttm_bo_wait_ctx(bo, ctx); if (ret) - return ret; + goto out_ntfy; if (nvbo->bo.pin_count) NV_WARN(drm, "Moving pinned object %p!\n", nvbo); @@ -1042,7 +1043,7 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, if (drm->client.device.info.family < NV_DEVICE_INFO_V0_TESLA) { ret = nouveau_bo_vm_bind(bo, new_reg, &new_tile); if (ret) - return ret; + goto out_ntfy; } /* Fake bo copy. */ @@ -1090,7 +1091,12 @@ out: else nouveau_bo_vm_cleanup(bo, new_tile, &nvbo->tile); } - +out_ntfy: + if (ret) { + swap(*new_reg, bo->mem); + nouveau_bo_move_ntfy(bo, false, new_reg); + swap(*new_reg, bo->mem); + } return ret; } diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index 95c4f2c7ab79..a6149e3cc3d2 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -136,24 +136,6 @@ static struct ttm_tt *qxl_ttm_tt_create(struct ttm_buffer_object *bo, return ttm; } -static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict, - struct ttm_operation_ctx *ctx, - struct ttm_resource *new_mem) -{ - struct ttm_resource *old_mem = &bo->mem; - int ret; - - ret = ttm_bo_wait_ctx(bo, ctx); - if (ret) - return ret; - - if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { - ttm_bo_move_null(bo, new_mem); - return 0; - } - return ttm_bo_move_memcpy(bo, ctx, new_mem); -} - static void qxl_bo_move_notify(struct ttm_buffer_object *bo, bool evict, struct ttm_resource *new_mem) @@ -170,6 +152,33 @@ static void qxl_bo_move_notify(struct ttm_buffer_object *bo, qxl_surface_evict(qdev, qbo, new_mem ? true : false); } +static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict, + struct ttm_operation_ctx *ctx, + struct ttm_resource *new_mem) +{ + struct ttm_resource *old_mem = &bo->mem; + int ret; + + qxl_bo_move_notify(bo, evict, new_mem); + + ret = ttm_bo_wait_ctx(bo, ctx); + if (ret) + goto out; + + if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { + ttm_bo_move_null(bo, new_mem); + return 0; + } + ret = ttm_bo_move_memcpy(bo, ctx, new_mem); +out: + if (ret) { + swap(*new_mem, bo->mem); + qxl_bo_move_notify(bo, false, new_mem); + swap(*new_mem, bo->mem); + } + return ret; +} + static struct ttm_bo_driver qxl_bo_driver = { .ttm_tt_create = &qxl_ttm_tt_create, .ttm_tt_bind = &qxl_ttm_backend_bind, diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 914b2ca86936..e9c95ef0859b 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -311,9 +311,11 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict, struct ttm_resource *old_mem = &bo->mem; int r; + radeon_bo_move_notify(bo, evict, new_mem); + r = ttm_bo_wait_ctx(bo, ctx); if (r) - return r; + goto fail; /* Can't move a pinned BO */ rbo = container_of(bo, struct radeon_bo, tbo); @@ -359,13 +361,18 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict, memcpy: r = ttm_bo_move_memcpy(bo, ctx, new_mem); if (r) { - return r; + goto fail; } } /* update statistics */ atomic64_add((u64)bo->num_pages << PAGE_SHIFT, &rdev->num_bytes_moved); return 0; +fail: + swap(*new_mem, bo->mem); + radeon_bo_move_notify(bo, false, new_mem); + swap(*new_mem, bo->mem); + return r; } static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem) diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index a41be3dba23d..cd9c2bb183f4 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -263,19 +263,9 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, } } - if (bdev->driver->move_notify) - bdev->driver->move_notify(bo, evict, mem); - ret = bdev->driver->move(bo, evict, ctx, mem); - if (ret) { - if (bdev->driver->move_notify) { - swap(*mem, bo->mem); - bdev->driver->move_notify(bo, false, mem); - swap(*mem, bo->mem); - } - + if (ret) goto out_err; - } ctx->bytes_moved += bo->num_pages << PAGE_SHIFT; return 0; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c index 6e07ea982961..fd82c9ba2d77 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c @@ -737,6 +737,8 @@ static int vmw_move(struct ttm_buffer_object *bo, struct ttm_resource_manager *new_man = ttm_manager_type(bo->bdev, new_mem->mem_type); int ret; + vmw_move_notify(bo, evict, new_mem); + if (old_man->use_tt && new_man->use_tt) { if (bo->mem.mem_type == TTM_PL_SYSTEM) { ttm_bo_assign_mem(bo, new_mem); @@ -744,15 +746,23 @@ static int vmw_move(struct ttm_buffer_object *bo, } ret = ttm_bo_wait_ctx(bo, ctx); if (ret) - return ret; + goto fail; vmw_ttm_unbind(bo->bdev, bo->ttm); ttm_resource_free(bo, &bo->mem); ttm_bo_assign_mem(bo, new_mem); return 0; } else { - return ttm_bo_move_memcpy(bo, ctx, new_mem); + ret = ttm_bo_move_memcpy(bo, ctx, new_mem); + if (ret) + goto fail; } + return 0; +fail: + swap(*new_mem, bo->mem); + vmw_move_notify(bo, false, new_mem); + swap(*new_mem, bo->mem); + return ret; } struct ttm_bo_driver vmw_bo_driver = { -- cgit From bfe5e585b44fb810e33fe54d4d822c6b2b40488c Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Tue, 20 Oct 2020 11:03:19 +1000 Subject: drm/ttm: move last binding into the drivers. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit This moves the call to tt binding into the driver move, and drops the driver callback. Reviewed-by: Christian König Signed-off-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20201020010319.1692445-8-airlied@gmail.com --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 7 ++++++- drivers/gpu/drm/nouveau/nouveau_bo.c | 7 ++++++- drivers/gpu/drm/qxl/qxl_ttm.c | 14 -------------- drivers/gpu/drm/radeon/radeon_ttm.c | 6 +++++- drivers/gpu/drm/ttm/ttm_bo.c | 4 ---- drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c | 7 ++++++- include/drm/ttm/ttm_bo_driver.h | 14 -------------- 7 files changed, 23 insertions(+), 36 deletions(-) (limited to 'drivers/gpu/drm/radeon') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index fba8ada99b0e..87e10a212b8a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -666,6 +666,12 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, struct ttm_resource *old_mem = &bo->mem; int r; + if (new_mem->mem_type == TTM_PL_TT) { + r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem); + if (r) + return r; + } + amdgpu_bo_move_notify(bo, evict, new_mem); /* Can't move a pinned BO */ @@ -1728,7 +1734,6 @@ static struct ttm_bo_driver amdgpu_bo_driver = { .ttm_tt_create = &amdgpu_ttm_tt_create, .ttm_tt_populate = &amdgpu_ttm_tt_populate, .ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate, - .ttm_tt_bind = &amdgpu_ttm_backend_bind, .ttm_tt_destroy = &amdgpu_ttm_backend_destroy, .eviction_valuable = amdgpu_ttm_bo_eviction_valuable, .evict_flags = &amdgpu_evict_flags, diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index ecd16847fbd8..70b6f3b1ae85 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1032,6 +1032,12 @@ nouveau_bo_move(struct ttm_buffer_object *bo, bool evict, struct nouveau_drm_tile *new_tile = NULL; int ret = 0; + if (new_reg->mem_type == TTM_PL_TT) { + ret = nouveau_ttm_tt_bind(bo->bdev, bo->ttm, new_reg); + if (ret) + return ret; + } + nouveau_bo_move_ntfy(bo, evict, new_reg); ret = ttm_bo_wait_ctx(bo, ctx); if (ret) @@ -1399,7 +1405,6 @@ struct ttm_bo_driver nouveau_bo_driver = { .ttm_tt_create = &nouveau_ttm_tt_create, .ttm_tt_populate = &nouveau_ttm_tt_populate, .ttm_tt_unpopulate = &nouveau_ttm_tt_unpopulate, - .ttm_tt_bind = &nouveau_ttm_tt_bind, .ttm_tt_destroy = &nouveau_ttm_tt_destroy, .eviction_valuable = ttm_bo_eviction_valuable, .evict_flags = nouveau_bo_evict_flags, diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index a6149e3cc3d2..1cc3c14bc684 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -100,19 +100,6 @@ int qxl_ttm_io_mem_reserve(struct ttm_bo_device *bdev, /* * TTM backend functions. */ - -static int qxl_ttm_backend_bind(struct ttm_bo_device *bdev, - struct ttm_tt *ttm, - struct ttm_resource *bo_mem) -{ - if (!ttm->num_pages) { - WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", - ttm->num_pages, bo_mem, ttm); - } - /* Not implemented */ - return -1; -} - static void qxl_ttm_backend_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm) { @@ -181,7 +168,6 @@ out: static struct ttm_bo_driver qxl_bo_driver = { .ttm_tt_create = &qxl_ttm_tt_create, - .ttm_tt_bind = &qxl_ttm_backend_bind, .ttm_tt_destroy = &qxl_ttm_backend_destroy, .eviction_valuable = ttm_bo_eviction_valuable, .evict_flags = &qxl_evict_flags, diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index e9c95ef0859b..cd454e5c802f 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -311,6 +311,11 @@ static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict, struct ttm_resource *old_mem = &bo->mem; int r; + if (new_mem->mem_type == TTM_PL_TT) { + r = radeon_ttm_tt_bind(bo->bdev, bo->ttm, new_mem); + if (r) + return r; + } radeon_bo_move_notify(bo, evict, new_mem); r = ttm_bo_wait_ctx(bo, ctx); @@ -823,7 +828,6 @@ static struct ttm_bo_driver radeon_bo_driver = { .ttm_tt_create = &radeon_ttm_tt_create, .ttm_tt_populate = &radeon_ttm_tt_populate, .ttm_tt_unpopulate = &radeon_ttm_tt_unpopulate, - .ttm_tt_bind = &radeon_ttm_tt_bind, .ttm_tt_destroy = &radeon_ttm_tt_destroy, .eviction_valuable = ttm_bo_eviction_valuable, .evict_flags = &radeon_evict_flags, diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index cd9c2bb183f4..2b578012cdef 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -256,10 +256,6 @@ static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, ret = ttm_tt_populate(bo->bdev, bo->ttm, ctx); if (ret) goto out_err; - - ret = bdev->driver->ttm_tt_bind(bo->bdev, bo->ttm, mem); - if (ret) - goto out_err; } } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c index fd82c9ba2d77..de25cf016be2 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c @@ -737,6 +737,12 @@ static int vmw_move(struct ttm_buffer_object *bo, struct ttm_resource_manager *new_man = ttm_manager_type(bo->bdev, new_mem->mem_type); int ret; + if (new_man->use_tt && new_mem->mem_type != TTM_PL_SYSTEM) { + ret = vmw_ttm_bind(bo->bdev, bo->ttm, new_mem); + if (ret) + return ret; + } + vmw_move_notify(bo, evict, new_mem); if (old_man->use_tt && new_man->use_tt) { @@ -769,7 +775,6 @@ struct ttm_bo_driver vmw_bo_driver = { .ttm_tt_create = &vmw_ttm_tt_create, .ttm_tt_populate = &vmw_ttm_populate, .ttm_tt_unpopulate = &vmw_ttm_unpopulate, - .ttm_tt_bind = &vmw_ttm_bind, .ttm_tt_destroy = &vmw_ttm_destroy, .eviction_valuable = ttm_bo_eviction_valuable, .evict_flags = vmw_evict_flags, diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 0c4efc169f46..72f106b335e9 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -90,20 +90,6 @@ struct ttm_bo_driver { */ void (*ttm_tt_unpopulate)(struct ttm_bo_device *bdev, struct ttm_tt *ttm); - /** - * ttm_tt_bind - * - * @bdev: Pointer to a ttm device - * @ttm: Pointer to a struct ttm_tt. - * @bo_mem: Pointer to a struct ttm_resource describing the - * memory type and location for binding. - * - * Bind the backend pages into the aperture in the location - * indicated by @bo_mem. This function should be able to handle - * differences between aperture and system page sizes. - */ - int (*ttm_tt_bind)(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_resource *bo_mem); - /** * ttm_tt_destroy * -- cgit From 6a6e5988a2657cd0c91f6f1a3e7d194599248b6d Mon Sep 17 00:00:00 2001 From: Dave Airlie Date: Wed, 21 Oct 2020 14:40:29 +1000 Subject: drm/ttm: replace last move_notify with delete_mem_notify MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The move notify callback is only used in one place, this should be removed in the future, but for now just rename it to the use case which is to notify the driver that the GPU memory is to be deleted. Drivers can be cleaned up after this separately. Reviewed-by: Christian König Signed-off-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/msgid/20201021044031.1752624-2-airlied@gmail.com --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 8 +++++++- drivers/gpu/drm/drm_gem_vram_helper.c | 8 +++----- drivers/gpu/drm/nouveau/nouveau_bo.c | 8 +++++++- drivers/gpu/drm/qxl/qxl_ttm.c | 7 ++++++- drivers/gpu/drm/radeon/radeon_ttm.c | 8 +++++++- drivers/gpu/drm/ttm/ttm_bo.c | 4 ++-- drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c | 8 +++++++- include/drm/ttm/ttm_bo_driver.h | 10 ++-------- 8 files changed, 41 insertions(+), 20 deletions(-) (limited to 'drivers/gpu/drm/radeon') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 87e10a212b8a..62f9194b1dd1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1730,6 +1730,12 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo, return ret; } +static void +amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo) +{ + amdgpu_bo_move_notify(bo, false, NULL); +} + static struct ttm_bo_driver amdgpu_bo_driver = { .ttm_tt_create = &amdgpu_ttm_tt_create, .ttm_tt_populate = &amdgpu_ttm_tt_populate, @@ -1739,7 +1745,7 @@ static struct ttm_bo_driver amdgpu_bo_driver = { .evict_flags = &amdgpu_evict_flags, .move = &amdgpu_bo_move, .verify_access = &amdgpu_verify_access, - .move_notify = &amdgpu_bo_move_notify, + .delete_mem_notify = &amdgpu_bo_delete_mem_notify, .release_notify = &amdgpu_bo_release_notify, .io_mem_reserve = &amdgpu_ttm_io_mem_reserve, .io_mem_pfn = amdgpu_ttm_io_mem_pfn, diff --git a/drivers/gpu/drm/drm_gem_vram_helper.c b/drivers/gpu/drm/drm_gem_vram_helper.c index 19087b22bdbb..9da823eb0edd 100644 --- a/drivers/gpu/drm/drm_gem_vram_helper.c +++ b/drivers/gpu/drm/drm_gem_vram_helper.c @@ -949,9 +949,7 @@ static void bo_driver_evict_flags(struct ttm_buffer_object *bo, drm_gem_vram_bo_driver_evict_flags(gbo, placement); } -static void bo_driver_move_notify(struct ttm_buffer_object *bo, - bool evict, - struct ttm_resource *new_mem) +static void bo_driver_delete_mem_notify(struct ttm_buffer_object *bo) { struct drm_gem_vram_object *gbo; @@ -961,7 +959,7 @@ static void bo_driver_move_notify(struct ttm_buffer_object *bo, gbo = drm_gem_vram_of_bo(bo); - drm_gem_vram_bo_driver_move_notify(gbo, evict, new_mem); + drm_gem_vram_bo_driver_move_notify(gbo, false, NULL); } static int bo_driver_move(struct ttm_buffer_object *bo, @@ -1002,7 +1000,7 @@ static struct ttm_bo_driver bo_driver = { .eviction_valuable = ttm_bo_eviction_valuable, .evict_flags = bo_driver_evict_flags, .move = bo_driver_move, - .move_notify = bo_driver_move_notify, + .delete_mem_notify = bo_driver_delete_mem_notify, .io_mem_reserve = bo_driver_io_mem_reserve, }; diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 70b6f3b1ae85..acff82afe260 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1401,6 +1401,12 @@ nouveau_bo_fence(struct nouveau_bo *nvbo, struct nouveau_fence *fence, bool excl dma_resv_add_shared_fence(resv, &fence->base); } +static void +nouveau_bo_delete_mem_notify(struct ttm_buffer_object *bo) +{ + nouveau_bo_move_ntfy(bo, false, NULL); +} + struct ttm_bo_driver nouveau_bo_driver = { .ttm_tt_create = &nouveau_ttm_tt_create, .ttm_tt_populate = &nouveau_ttm_tt_populate, @@ -1408,7 +1414,7 @@ struct ttm_bo_driver nouveau_bo_driver = { .ttm_tt_destroy = &nouveau_ttm_tt_destroy, .eviction_valuable = ttm_bo_eviction_valuable, .evict_flags = nouveau_bo_evict_flags, - .move_notify = nouveau_bo_move_ntfy, + .delete_mem_notify = nouveau_bo_delete_mem_notify, .move = nouveau_bo_move, .verify_access = nouveau_bo_verify_access, .io_mem_reserve = &nouveau_ttm_io_mem_reserve, diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index 1cc3c14bc684..b52a4563b47b 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -166,6 +166,11 @@ out: return ret; } +static void qxl_bo_delete_mem_notify(struct ttm_buffer_object *bo) +{ + qxl_bo_move_notify(bo, false, NULL); +} + static struct ttm_bo_driver qxl_bo_driver = { .ttm_tt_create = &qxl_ttm_tt_create, .ttm_tt_destroy = &qxl_ttm_backend_destroy, @@ -173,7 +178,7 @@ static struct ttm_bo_driver qxl_bo_driver = { .evict_flags = &qxl_evict_flags, .move = &qxl_bo_move, .io_mem_reserve = &qxl_ttm_io_mem_reserve, - .move_notify = &qxl_bo_move_notify, + .delete_mem_notify = &qxl_bo_delete_mem_notify, }; static int qxl_ttm_init_mem_type(struct qxl_device *qdev, diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index cd454e5c802f..321c09d20c6c 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -824,6 +824,12 @@ bool radeon_ttm_tt_is_readonly(struct radeon_device *rdev, return !!(gtt->userflags & RADEON_GEM_USERPTR_READONLY); } +static void +radeon_bo_delete_mem_notify(struct ttm_buffer_object *bo) +{ + radeon_bo_move_notify(bo, false, NULL); +} + static struct ttm_bo_driver radeon_bo_driver = { .ttm_tt_create = &radeon_ttm_tt_create, .ttm_tt_populate = &radeon_ttm_tt_populate, @@ -833,7 +839,7 @@ static struct ttm_bo_driver radeon_bo_driver = { .evict_flags = &radeon_evict_flags, .move = &radeon_bo_move, .verify_access = &radeon_verify_access, - .move_notify = &radeon_bo_move_notify, + .delete_mem_notify = &radeon_bo_delete_mem_notify, .io_mem_reserve = &radeon_ttm_io_mem_reserve, }; diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 03b40ce7d2dc..5b411252a857 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -284,8 +284,8 @@ out_err: static void ttm_bo_cleanup_memtype_use(struct ttm_buffer_object *bo) { - if (bo->bdev->driver->move_notify) - bo->bdev->driver->move_notify(bo, false, NULL); + if (bo->bdev->driver->delete_mem_notify) + bo->bdev->driver->delete_mem_notify(bo); ttm_bo_tt_destroy(bo); ttm_resource_free(bo, &bo->mem); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c index de25cf016be2..88be48ad0344 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c @@ -771,6 +771,12 @@ fail: return ret; } +static void +vmw_delete_mem_notify(struct ttm_buffer_object *bo) +{ + vmw_move_notify(bo, false, NULL); +} + struct ttm_bo_driver vmw_bo_driver = { .ttm_tt_create = &vmw_ttm_tt_create, .ttm_tt_populate = &vmw_ttm_populate, @@ -780,7 +786,7 @@ struct ttm_bo_driver vmw_bo_driver = { .evict_flags = vmw_evict_flags, .move = vmw_move, .verify_access = vmw_verify_access, - .move_notify = vmw_move_notify, + .delete_mem_notify = vmw_delete_mem_notify, .swap_notify = vmw_swap_notify, .io_mem_reserve = &vmw_ttm_io_mem_reserve, }; diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h index 72f106b335e9..29f6a1d1c853 100644 --- a/include/drm/ttm/ttm_bo_driver.h +++ b/include/drm/ttm/ttm_bo_driver.h @@ -156,15 +156,9 @@ struct ttm_bo_driver { struct file *filp); /** - * Hook to notify driver about a driver move so it - * can do tiling things and book-keeping. - * - * @evict: whether this move is evicting the buffer from the graphics - * address space + * Hook to notify driver about a resource delete. */ - void (*move_notify)(struct ttm_buffer_object *bo, - bool evict, - struct ttm_resource *new_mem); + void (*delete_mem_notify)(struct ttm_buffer_object *bo); /** * notify the driver that we're about to swap out this bo -- cgit From d1cb1f254a5b1c07788eecb84b443d59ccdfb9e0 Mon Sep 17 00:00:00 2001 From: Christian König Date: Mon, 19 Oct 2020 18:49:27 +0200 Subject: drm/ttm: nuke ttm_tt_set_(un)populated again MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Neither page allocation backend nor the driver should mess with that. Signed-off-by: Christian König Reviewed-by: Dave Airlie Reviewed-by: Madhav Chauhan Link: https://patchwork.freedesktop.org/patch/396948/ --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 2 -- drivers/gpu/drm/nouveau/nouveau_bo.c | 1 - drivers/gpu/drm/radeon/radeon_ttm.c | 2 -- drivers/gpu/drm/ttm/ttm_page_alloc.c | 2 -- drivers/gpu/drm/ttm/ttm_page_alloc_dma.c | 2 -- drivers/gpu/drm/ttm/ttm_tt.c | 11 +++++++---- include/drm/ttm/ttm_tt.h | 10 ---------- 7 files changed, 7 insertions(+), 23 deletions(-) (limited to 'drivers/gpu/drm/radeon') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 62f9194b1dd1..169468572930 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1363,7 +1363,6 @@ static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev, return -ENOMEM; ttm->page_flags |= TTM_PAGE_FLAG_SG; - ttm_tt_set_populated(ttm); return 0; } @@ -1383,7 +1382,6 @@ static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev, drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, gtt->ttm.dma_address, ttm->num_pages); - ttm_tt_set_populated(ttm); return 0; } diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index acff82afe260..06a1f4c4e96e 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -1321,7 +1321,6 @@ nouveau_ttm_tt_populate(struct ttm_bo_device *bdev, /* make userspace faulting work */ drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, ttm_dma->dma_address, ttm->num_pages); - ttm_tt_set_populated(ttm); return 0; } diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 321c09d20c6c..75fa2f55186b 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -670,14 +670,12 @@ static int radeon_ttm_tt_populate(struct ttm_bo_device *bdev, return -ENOMEM; ttm->page_flags |= TTM_PAGE_FLAG_SG; - ttm_tt_set_populated(ttm); return 0; } if (slave && ttm->sg) { drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages, gtt->ttm.dma_address, ttm->num_pages); - ttm_tt_set_populated(ttm); return 0; } diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index c8f6790962b9..04099dddce16 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -1041,7 +1041,6 @@ ttm_pool_unpopulate_helper(struct ttm_tt *ttm, unsigned mem_count_update) put_pages: ttm_put_pages(ttm->pages, ttm->num_pages, ttm->page_flags, ttm->caching); - ttm_tt_set_unpopulated(ttm); } int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) @@ -1080,7 +1079,6 @@ int ttm_pool_populate(struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) } } - ttm_tt_set_populated(ttm); return 0; } EXPORT_SYMBOL(ttm_pool_populate); diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c index 6625b43f6256..85dad69419f6 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c @@ -983,7 +983,6 @@ skip_huge: } } - ttm_tt_set_populated(ttm); return 0; } EXPORT_SYMBOL_GPL(ttm_dma_populate); @@ -1077,7 +1076,6 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev) /* shrink pool if necessary (only on !is_cached pools)*/ if (npages) ttm_dma_page_pool_free(pool, npages, false); - ttm_tt_set_unpopulated(ttm); } EXPORT_SYMBOL_GPL(ttm_dma_unpopulate); diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index 3e5dd6271d4c..beb1e878fed9 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -138,7 +138,6 @@ static void ttm_tt_init_fields(struct ttm_tt *ttm, ttm->num_pages = bo->num_pages; ttm->caching = ttm_cached; ttm->page_flags = page_flags; - ttm_tt_set_unpopulated(ttm); ttm->swap_storage = NULL; ttm->sg = bo->sg; ttm->caching = caching; @@ -334,9 +333,12 @@ int ttm_tt_populate(struct ttm_bo_device *bdev, ret = bdev->driver->ttm_tt_populate(bdev, ttm, ctx); else ret = ttm_pool_populate(ttm, ctx); - if (!ret) - ttm_tt_add_mapping(bdev, ttm); - return ret; + if (ret) + return ret; + + ttm_tt_add_mapping(bdev, ttm); + ttm->page_flags |= TTM_PAGE_FLAG_PRIV_POPULATED; + return 0; } EXPORT_SYMBOL(ttm_tt_populate); @@ -365,4 +367,5 @@ void ttm_tt_unpopulate(struct ttm_bo_device *bdev, bdev->driver->ttm_tt_unpopulate(bdev, ttm); else ttm_pool_unpopulate(ttm); + ttm->page_flags &= ~TTM_PAGE_FLAG_PRIV_POPULATED; } diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h index e042dec5e6c1..e3e60c1da754 100644 --- a/include/drm/ttm/ttm_tt.h +++ b/include/drm/ttm/ttm_tt.h @@ -72,16 +72,6 @@ static inline bool ttm_tt_is_populated(struct ttm_tt *tt) return tt->page_flags & TTM_PAGE_FLAG_PRIV_POPULATED; } -static inline void ttm_tt_set_unpopulated(struct ttm_tt *tt) -{ - tt->page_flags &= ~TTM_PAGE_FLAG_PRIV_POPULATED; -} - -static inline void ttm_tt_set_populated(struct ttm_tt *tt) -{ - tt->page_flags |= TTM_PAGE_FLAG_PRIV_POPULATED; -} - /** * struct ttm_dma_tt * -- cgit From 230c079fdcf45efacd316a76c3132b9f42cd3565 Mon Sep 17 00:00:00 2001 From: Christian König Date: Tue, 20 Oct 2020 20:10:39 +0200 Subject: drm/ttm: make num_pages uint32_t MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit We can still allocate 16TiB with that. Signed-off-by: Christian König Reviewed-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/396946/ --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 10 +++++----- drivers/gpu/drm/nouveau/nouveau_ttm.c | 2 +- drivers/gpu/drm/radeon/radeon_ttm.c | 4 ++-- include/drm/ttm/ttm_tt.h | 2 +- 4 files changed, 9 insertions(+), 9 deletions(-) (limited to 'drivers/gpu/drm/radeon') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 169468572930..0a3270f25e40 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -973,7 +973,7 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm) if (!gtt || !gtt->userptr) return false; - DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%lx\n", + DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%x\n", gtt->userptr, ttm->num_pages); WARN_ONCE(!gtt->range || !gtt->range->hmm_pfns, @@ -1124,7 +1124,7 @@ static int amdgpu_ttm_gart_bind(struct amdgpu_device *adev, gart_bind_fail: if (r) - DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", + DRM_ERROR("failed to bind %u pages at 0x%08llX\n", ttm->num_pages, gtt->offset); return r; @@ -1159,7 +1159,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev, } } if (!ttm->num_pages) { - WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", + WARN(1, "nothing to bind %u pages for mreg %p back %p!\n", ttm->num_pages, bo_mem, ttm); } @@ -1182,7 +1182,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev, ttm->pages, gtt->ttm.dma_address, flags); if (r) - DRM_ERROR("failed to bind %lu pages at 0x%08llX\n", + DRM_ERROR("failed to bind %u pages at 0x%08llX\n", ttm->num_pages, gtt->offset); gtt->bound = true; return r; @@ -1291,7 +1291,7 @@ static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev, /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */ r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages); if (r) - DRM_ERROR("failed to unbind %lu pages at 0x%08llX\n", + DRM_ERROR("failed to unbind %u pages at 0x%08llX\n", gtt->ttm.ttm.num_pages, gtt->offset); gtt->bound = false; } diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index 04b95277c73a..0592ed6eaad1 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -108,7 +108,7 @@ nv04_gart_manager_new(struct ttm_resource_manager *man, return ret; ret = nvif_vmm_get(&mem->cli->vmm.vmm, PTES, false, 12, 0, - reg->num_pages << PAGE_SHIFT, &mem->vma[0]); + (long)reg->num_pages << PAGE_SHIFT, &mem->vma[0]); if (ret) { nouveau_mem_del(reg); return ret; diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index 75fa2f55186b..c51dcbc818ef 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -564,7 +564,7 @@ static int radeon_ttm_backend_bind(struct ttm_bo_device *bdev, gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); if (!ttm->num_pages) { - WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", + WARN(1, "nothing to bind %u pages for mreg %p back %p!\n", ttm->num_pages, bo_mem, ttm); } if (ttm->caching == ttm_cached) @@ -572,7 +572,7 @@ static int radeon_ttm_backend_bind(struct ttm_bo_device *bdev, r = radeon_gart_bind(rdev, gtt->offset, ttm->num_pages, ttm->pages, gtt->ttm.dma_address, flags); if (r) { - DRM_ERROR("failed to bind %lu pages at 0x%08X\n", + DRM_ERROR("failed to bind %u pages at 0x%08X\n", ttm->num_pages, (unsigned)gtt->offset); return r; } diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h index e3e60c1da754..931a31355870 100644 --- a/include/drm/ttm/ttm_tt.h +++ b/include/drm/ttm/ttm_tt.h @@ -61,7 +61,7 @@ struct ttm_operation_ctx; struct ttm_tt { struct page **pages; uint32_t page_flags; - unsigned long num_pages; + uint32_t num_pages; struct sg_table *sg; /* for SG objects via dma-buf */ struct file *swap_storage; enum ttm_caching caching; -- cgit From e34b8feeaa4b65725b25f49c9b08a0f8707e8e86 Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 21 Oct 2020 14:06:49 +0200 Subject: drm/ttm: merge ttm_dma_tt back into ttm_tt MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit It makes no difference to kmalloc if the structure is 48 or 64 bytes in size. Signed-off-by: Christian König Reviewed-by: Dave Airlie Link: https://patchwork.freedesktop.org/patch/396950/ --- drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c | 10 ++---- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 14 ++++---- drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 7 ++-- drivers/gpu/drm/nouveau/nouveau_bo.c | 12 +++---- drivers/gpu/drm/nouveau/nouveau_mem.c | 8 +++-- drivers/gpu/drm/nouveau/nouveau_mem.h | 4 +-- drivers/gpu/drm/nouveau/nouveau_sgdma.c | 6 ++-- drivers/gpu/drm/qxl/qxl_ttm.c | 2 +- drivers/gpu/drm/radeon/radeon_ttm.c | 8 ++--- drivers/gpu/drm/ttm/ttm_bo.c | 2 +- drivers/gpu/drm/ttm/ttm_page_alloc.c | 30 ++++++++-------- drivers/gpu/drm/ttm/ttm_page_alloc_dma.c | 44 +++++++++++------------- drivers/gpu/drm/ttm/ttm_tt.c | 55 ++++++++++++------------------ drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c | 26 +++++++------- include/drm/ttm/ttm_page_alloc.h | 12 +++---- include/drm/ttm/ttm_tt.h | 35 ++++++------------- 16 files changed, 119 insertions(+), 156 deletions(-) (limited to 'drivers/gpu/drm/radeon') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 3c5ad69eff19..0e35023b5703 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -45,12 +45,10 @@ void amdgpu_gmc_get_pde_for_bo(struct amdgpu_bo *bo, int level, uint64_t *addr, uint64_t *flags) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - struct ttm_dma_tt *ttm; switch (bo->tbo.mem.mem_type) { case TTM_PL_TT: - ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm); - *addr = ttm->dma_address[0]; + *addr = bo->tbo.ttm->dma_address[0]; break; case TTM_PL_VRAM: *addr = amdgpu_bo_gpu_offset(bo); @@ -122,16 +120,14 @@ int amdgpu_gmc_set_pte_pde(struct amdgpu_device *adev, void *cpu_pt_addr, uint64_t amdgpu_gmc_agp_addr(struct ttm_buffer_object *bo) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); - struct ttm_dma_tt *ttm; if (bo->num_pages != 1 || bo->ttm->caching == ttm_cached) return AMDGPU_BO_INVALID_OFFSET; - ttm = container_of(bo->ttm, struct ttm_dma_tt, ttm); - if (ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size) + if (bo->ttm->dma_address[0] + PAGE_SIZE >= adev->gmc.agp_size) return AMDGPU_BO_INVALID_OFFSET; - return adev->gmc.agp_start + ttm->dma_address[0]; + return adev->gmc.agp_start + bo->ttm->dma_address[0]; } /** diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 0a3270f25e40..beacd00221d8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -294,11 +294,9 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, cpu_addr = &job->ibs[0].ptr[num_dw]; if (mem->mem_type == TTM_PL_TT) { - struct ttm_dma_tt *dma; dma_addr_t *dma_address; - dma = container_of(bo->ttm, struct ttm_dma_tt, ttm); - dma_address = &dma->dma_address[offset >> PAGE_SHIFT]; + dma_address = &bo->ttm->dma_address[offset >> PAGE_SHIFT]; r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags, cpu_addr); if (r) @@ -841,7 +839,7 @@ uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type) * TTM backend functions. */ struct amdgpu_ttm_tt { - struct ttm_dma_tt ttm; + struct ttm_tt ttm; struct drm_gem_object *gobj; u64 offset; uint64_t userptr; @@ -1292,7 +1290,7 @@ static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev, r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages); if (r) DRM_ERROR("failed to unbind %u pages at 0x%08llX\n", - gtt->ttm.ttm.num_pages, gtt->offset); + gtt->ttm.num_pages, gtt->offset); gtt->bound = false; } @@ -1306,7 +1304,7 @@ static void amdgpu_ttm_backend_destroy(struct ttm_bo_device *bdev, if (gtt->usertask) put_task_struct(gtt->usertask); - ttm_dma_tt_fini(>t->ttm); + ttm_tt_fini(>t->ttm); kfree(gtt); } @@ -1340,7 +1338,7 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo, kfree(gtt); return NULL; } - return >t->ttm.ttm; + return >t->ttm; } /** @@ -1507,7 +1505,7 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, /* Return false if no part of the ttm_tt object lies within * the range */ - size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE; + size = (unsigned long)gtt->ttm.num_pages * PAGE_SIZE; if (gtt->userptr > end || gtt->userptr + size <= start) return false; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 3e6243623082..f0e6fafd0938 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -1781,7 +1781,6 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, resv = vm->root.base.bo->tbo.base.resv; } else { struct drm_gem_object *obj = &bo->tbo.base; - struct ttm_dma_tt *ttm; resv = bo->tbo.base.resv; if (obj->import_attach && bo_va->is_xgmi) { @@ -1794,10 +1793,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev, struct amdgpu_bo_va *bo_va, } mem = &bo->tbo.mem; nodes = mem->mm_node; - if (mem->mem_type == TTM_PL_TT) { - ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm); - pages_addr = ttm->dma_address; - } + if (mem->mem_type == TTM_PL_TT) + pages_addr = bo->tbo.ttm->dma_address; } if (bo) { diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 06a1f4c4e96e..75fddbcd7832 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -547,7 +547,7 @@ void nouveau_bo_sync_for_device(struct nouveau_bo *nvbo) { struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); - struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm; + struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm; int i; if (!ttm_dma) @@ -557,7 +557,7 @@ nouveau_bo_sync_for_device(struct nouveau_bo *nvbo) if (nvbo->force_coherent) return; - for (i = 0; i < ttm_dma->ttm.num_pages; i++) + for (i = 0; i < ttm_dma->num_pages; i++) dma_sync_single_for_device(drm->dev->dev, ttm_dma->dma_address[i], PAGE_SIZE, DMA_TO_DEVICE); @@ -567,7 +567,7 @@ void nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo) { struct nouveau_drm *drm = nouveau_bdev(nvbo->bo.bdev); - struct ttm_dma_tt *ttm_dma = (struct ttm_dma_tt *)nvbo->bo.ttm; + struct ttm_tt *ttm_dma = (struct ttm_tt *)nvbo->bo.ttm; int i; if (!ttm_dma) @@ -577,7 +577,7 @@ nouveau_bo_sync_for_cpu(struct nouveau_bo *nvbo) if (nvbo->force_coherent) return; - for (i = 0; i < ttm_dma->ttm.num_pages; i++) + for (i = 0; i < ttm_dma->num_pages; i++) dma_sync_single_for_cpu(drm->dev->dev, ttm_dma->dma_address[i], PAGE_SIZE, DMA_FROM_DEVICE); } @@ -1309,7 +1309,7 @@ static int nouveau_ttm_tt_populate(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) { - struct ttm_dma_tt *ttm_dma = (void *)ttm; + struct ttm_tt *ttm_dma = (void *)ttm; struct nouveau_drm *drm; struct device *dev; bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); @@ -1345,7 +1345,7 @@ static void nouveau_ttm_tt_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *ttm) { - struct ttm_dma_tt *ttm_dma = (void *)ttm; + struct ttm_tt *ttm_dma = (void *)ttm; struct nouveau_drm *drm; struct device *dev; bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG); diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 269d8707acc3..0a4c04aaad30 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -92,7 +92,7 @@ nouveau_mem_fini(struct nouveau_mem *mem) } int -nouveau_mem_host(struct ttm_resource *reg, struct ttm_dma_tt *tt) +nouveau_mem_host(struct ttm_resource *reg, struct ttm_tt *tt) { struct nouveau_mem *mem = nouveau_mem(reg); struct nouveau_cli *cli = mem->cli; @@ -116,8 +116,10 @@ nouveau_mem_host(struct ttm_resource *reg, struct ttm_dma_tt *tt) mem->comp = 0; } - if (tt->ttm.sg) args.sgl = tt->ttm.sg->sgl; - else args.dma = tt->dma_address; + if (tt->sg) + args.sgl = tt->sg->sgl; + else + args.dma = tt->dma_address; mutex_lock(&drm->master.lock); cli->base.super = true; diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.h b/drivers/gpu/drm/nouveau/nouveau_mem.h index 3fe1cfed57a1..7df3848e85aa 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.h +++ b/drivers/gpu/drm/nouveau/nouveau_mem.h @@ -1,7 +1,7 @@ #ifndef __NOUVEAU_MEM_H__ #define __NOUVEAU_MEM_H__ #include -struct ttm_dma_tt; +struct ttm_tt; #include #include @@ -24,7 +24,7 @@ int nouveau_mem_new(struct nouveau_cli *, u8 kind, u8 comp, struct ttm_resource *); void nouveau_mem_del(struct ttm_resource *); int nouveau_mem_vram(struct ttm_resource *, bool contig, u8 page); -int nouveau_mem_host(struct ttm_resource *, struct ttm_dma_tt *); +int nouveau_mem_host(struct ttm_resource *, struct ttm_tt *); void nouveau_mem_fini(struct nouveau_mem *); int nouveau_mem_map(struct nouveau_mem *, struct nvif_vmm *, struct nvif_vma *); #endif diff --git a/drivers/gpu/drm/nouveau/nouveau_sgdma.c b/drivers/gpu/drm/nouveau/nouveau_sgdma.c index cd6fdebae795..a2e23fd4906c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_sgdma.c +++ b/drivers/gpu/drm/nouveau/nouveau_sgdma.c @@ -11,7 +11,7 @@ struct nouveau_sgdma_be { /* this has to be the first field so populate/unpopulated in * nouve_bo.c works properly, otherwise have to move them here */ - struct ttm_dma_tt ttm; + struct ttm_tt ttm; struct nouveau_mem *mem; }; @@ -23,7 +23,7 @@ nouveau_sgdma_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm) if (ttm) { nouveau_sgdma_unbind(bdev, ttm); ttm_tt_destroy_common(bdev, ttm); - ttm_dma_tt_fini(&nvbe->ttm); + ttm_tt_fini(&nvbe->ttm); kfree(nvbe); } } @@ -88,5 +88,5 @@ nouveau_sgdma_create_ttm(struct ttm_buffer_object *bo, uint32_t page_flags) kfree(nvbe); return NULL; } - return &nvbe->ttm.ttm; + return &nvbe->ttm; } diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c index b52a4563b47b..9609eeb52821 100644 --- a/drivers/gpu/drm/qxl/qxl_ttm.c +++ b/drivers/gpu/drm/qxl/qxl_ttm.c @@ -116,7 +116,7 @@ static struct ttm_tt *qxl_ttm_tt_create(struct ttm_buffer_object *bo, ttm = kzalloc(sizeof(struct ttm_tt), GFP_KERNEL); if (ttm == NULL) return NULL; - if (ttm_tt_init(ttm, bo, page_flags, ttm_cached)) { + if (ttm_dma_tt_init(ttm, bo, page_flags, ttm_cached)) { kfree(ttm); return NULL; } diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index c51dcbc818ef..0a6d7ea847db 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -437,7 +437,7 @@ static int radeon_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_reso * TTM backend functions. */ struct radeon_ttm_tt { - struct ttm_dma_tt ttm; + struct ttm_tt ttm; u64 offset; uint64_t userptr; @@ -602,7 +602,7 @@ static void radeon_ttm_backend_destroy(struct ttm_bo_device *bdev, struct ttm_tt radeon_ttm_backend_unbind(bdev, ttm); ttm_tt_destroy_common(bdev, ttm); - ttm_dma_tt_fini(>t->ttm); + ttm_tt_fini(>t->ttm); kfree(gtt); } @@ -640,7 +640,7 @@ static struct ttm_tt *radeon_ttm_tt_create(struct ttm_buffer_object *bo, kfree(gtt); return NULL; } - return >t->ttm.ttm; + return >t->ttm; } static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct radeon_device *rdev, @@ -653,7 +653,7 @@ static struct radeon_ttm_tt *radeon_ttm_tt_to_gtt(struct radeon_device *rdev, if (!ttm) return NULL; - return container_of(ttm, struct radeon_ttm_tt, ttm.ttm); + return container_of(ttm, struct radeon_ttm_tt, ttm); } static int radeon_ttm_tt_populate(struct ttm_bo_device *bdev, diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 5b411252a857..40c72a0f9325 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -1192,7 +1192,7 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev, size += ttm_round_pot(struct_size); size += ttm_round_pot(npages * (2*sizeof(void *) + sizeof(dma_addr_t))); - size += ttm_round_pot(sizeof(struct ttm_dma_tt)); + size += ttm_round_pot(sizeof(struct ttm_tt)); return size; } EXPORT_SYMBOL(ttm_bo_dma_acc_size); diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c index 661b75d19cad..29e6c29ad60e 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -1081,28 +1081,28 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm) } EXPORT_SYMBOL(ttm_pool_unpopulate); -int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt, +int ttm_populate_and_map_pages(struct device *dev, struct ttm_tt *tt, struct ttm_operation_ctx *ctx) { unsigned i, j; int r; - r = ttm_pool_populate(&tt->ttm, ctx); + r = ttm_pool_populate(tt, ctx); if (r) return r; - for (i = 0; i < tt->ttm.num_pages; ++i) { - struct page *p = tt->ttm.pages[i]; + for (i = 0; i < tt->num_pages; ++i) { + struct page *p = tt->pages[i]; size_t num_pages = 1; - for (j = i + 1; j < tt->ttm.num_pages; ++j) { - if (++p != tt->ttm.pages[j]) + for (j = i + 1; j < tt->num_pages; ++j) { + if (++p != tt->pages[j]) break; ++num_pages; } - tt->dma_address[i] = dma_map_page(dev, tt->ttm.pages[i], + tt->dma_address[i] = dma_map_page(dev, tt->pages[i], 0, num_pages * PAGE_SIZE, DMA_BIDIRECTIONAL); if (dma_mapping_error(dev, tt->dma_address[i])) { @@ -1111,7 +1111,7 @@ int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt, PAGE_SIZE, DMA_BIDIRECTIONAL); tt->dma_address[i] = 0; } - ttm_pool_unpopulate(&tt->ttm); + ttm_pool_unpopulate(tt); return -EFAULT; } @@ -1124,21 +1124,21 @@ int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt, } EXPORT_SYMBOL(ttm_populate_and_map_pages); -void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt) +void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_tt *tt) { unsigned i, j; - for (i = 0; i < tt->ttm.num_pages;) { - struct page *p = tt->ttm.pages[i]; + for (i = 0; i < tt->num_pages;) { + struct page *p = tt->pages[i]; size_t num_pages = 1; - if (!tt->dma_address[i] || !tt->ttm.pages[i]) { + if (!tt->dma_address[i] || !tt->pages[i]) { ++i; continue; } - for (j = i + 1; j < tt->ttm.num_pages; ++j) { - if (++p != tt->ttm.pages[j]) + for (j = i + 1; j < tt->num_pages; ++j) { + if (++p != tt->pages[j]) break; ++num_pages; @@ -1149,7 +1149,7 @@ void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt) i += num_pages; } - ttm_pool_unpopulate(&tt->ttm); + ttm_pool_unpopulate(tt); } EXPORT_SYMBOL(ttm_unmap_and_unpopulate_pages); diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c index a9aaed7e618a..c0353c25efd6 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c @@ -832,11 +832,10 @@ static int ttm_dma_page_pool_fill_locked(struct dma_pool *pool, * return dma_page pointer if success, otherwise NULL. */ static struct dma_page *ttm_dma_pool_get_pages(struct dma_pool *pool, - struct ttm_dma_tt *ttm_dma, + struct ttm_tt *ttm, unsigned index) { struct dma_page *d_page = NULL; - struct ttm_tt *ttm = &ttm_dma->ttm; unsigned long irq_flags; int count; @@ -845,8 +844,8 @@ static struct dma_page *ttm_dma_pool_get_pages(struct dma_pool *pool, if (count) { d_page = list_first_entry(&pool->free_list, struct dma_page, page_list); ttm->pages[index] = d_page->p; - ttm_dma->dma_address[index] = d_page->dma; - list_move_tail(&d_page->page_list, &ttm_dma->pages_list); + ttm->dma_address[index] = d_page->dma; + list_move_tail(&d_page->page_list, &ttm->pages_list); pool->npages_in_use += 1; pool->npages_free -= 1; } @@ -854,9 +853,8 @@ static struct dma_page *ttm_dma_pool_get_pages(struct dma_pool *pool, return d_page; } -static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge) +static gfp_t ttm_dma_pool_gfp_flags(struct ttm_tt *ttm, bool huge) { - struct ttm_tt *ttm = &ttm_dma->ttm; gfp_t gfp_flags; if (ttm->page_flags & TTM_PAGE_FLAG_DMA32) @@ -883,11 +881,10 @@ static gfp_t ttm_dma_pool_gfp_flags(struct ttm_dma_tt *ttm_dma, bool huge) * On success pages list will hold count number of correctly * cached pages. On failure will hold the negative return value (-ENOMEM, etc). */ -int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev, +int ttm_dma_populate(struct ttm_tt *ttm, struct device *dev, struct ttm_operation_ctx *ctx) { struct ttm_mem_global *mem_glob = &ttm_mem_glob; - struct ttm_tt *ttm = &ttm_dma->ttm; unsigned long num_pages = ttm->num_pages; struct dma_pool *pool; struct dma_page *d_page; @@ -901,7 +898,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev, if (ttm_check_under_lowerlimit(mem_glob, num_pages, ctx)) return -ENOMEM; - INIT_LIST_HEAD(&ttm_dma->pages_list); + INIT_LIST_HEAD(&ttm->pages_list); i = 0; type = ttm_to_type(ttm->page_flags, ttm->caching); @@ -912,7 +909,7 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev, pool = ttm_dma_find_pool(dev, type | IS_HUGE); if (!pool) { - gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, true); + gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm, true); pool = ttm_dma_pool_init(dev, gfp_flags, type | IS_HUGE); if (IS_ERR_OR_NULL(pool)) @@ -922,21 +919,21 @@ int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev, while (num_pages >= HPAGE_PMD_NR) { unsigned j; - d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i); + d_page = ttm_dma_pool_get_pages(pool, ttm, i); if (!d_page) break; ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], pool->size, ctx); if (unlikely(ret != 0)) { - ttm_dma_unpopulate(ttm_dma, dev); + ttm_dma_unpopulate(ttm, dev); return -ENOMEM; } d_page->vaddr |= VADDR_FLAG_UPDATED_COUNT; for (j = i + 1; j < (i + HPAGE_PMD_NR); ++j) { ttm->pages[j] = ttm->pages[j - 1] + 1; - ttm_dma->dma_address[j] = ttm_dma->dma_address[j - 1] + + ttm->dma_address[j] = ttm->dma_address[j - 1] + PAGE_SIZE; } @@ -949,7 +946,7 @@ skip_huge: pool = ttm_dma_find_pool(dev, type); if (!pool) { - gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm_dma, false); + gfp_t gfp_flags = ttm_dma_pool_gfp_flags(ttm, false); pool = ttm_dma_pool_init(dev, gfp_flags, type); if (IS_ERR_OR_NULL(pool)) @@ -957,16 +954,16 @@ skip_huge: } while (num_pages) { - d_page = ttm_dma_pool_get_pages(pool, ttm_dma, i); + d_page = ttm_dma_pool_get_pages(pool, ttm, i); if (!d_page) { - ttm_dma_unpopulate(ttm_dma, dev); + ttm_dma_unpopulate(ttm, dev); return -ENOMEM; } ret = ttm_mem_global_alloc_page(mem_glob, ttm->pages[i], pool->size, ctx); if (unlikely(ret != 0)) { - ttm_dma_unpopulate(ttm_dma, dev); + ttm_dma_unpopulate(ttm, dev); return -ENOMEM; } @@ -980,10 +977,9 @@ skip_huge: EXPORT_SYMBOL_GPL(ttm_dma_populate); /* Put all pages in pages list to correct pool to wait for reuse */ -void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev) +void ttm_dma_unpopulate(struct ttm_tt *ttm, struct device *dev) { struct ttm_mem_global *mem_glob = &ttm_mem_glob; - struct ttm_tt *ttm = &ttm_dma->ttm; struct dma_pool *pool; struct dma_page *d_page, *next; enum pool_type type; @@ -997,7 +993,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev) pool = ttm_dma_find_pool(dev, type | IS_HUGE); if (pool) { count = 0; - list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, + list_for_each_entry_safe(d_page, next, &ttm->pages_list, page_list) { if (!(d_page->vaddr & VADDR_FLAG_HUGE_POOL)) continue; @@ -1027,7 +1023,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev) /* make sure pages array match list and count number of pages */ count = 0; - list_for_each_entry_safe(d_page, next, &ttm_dma->pages_list, + list_for_each_entry_safe(d_page, next, &ttm->pages_list, page_list) { ttm->pages[count] = d_page->p; count++; @@ -1048,7 +1044,7 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev) pool->nfrees += count; } else { pool->npages_free += count; - list_splice(&ttm_dma->pages_list, &pool->free_list); + list_splice(&ttm->pages_list, &pool->free_list); /* * Wait to have at at least NUM_PAGES_TO_ALLOC number of pages * to free in order to minimize calls to set_memory_wb(). @@ -1059,10 +1055,10 @@ void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev) } spin_unlock_irqrestore(&pool->lock, irq_flags); - INIT_LIST_HEAD(&ttm_dma->pages_list); + INIT_LIST_HEAD(&ttm->pages_list); for (i = 0; i < ttm->num_pages; i++) { ttm->pages[i] = NULL; - ttm_dma->dma_address[i] = 0; + ttm->dma_address[i] = 0; } /* shrink pool if necessary (only on !is_cached pools)*/ diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c index dc1dad982f28..65c4254eea5c 100644 --- a/drivers/gpu/drm/ttm/ttm_tt.c +++ b/drivers/gpu/drm/ttm/ttm_tt.c @@ -92,21 +92,22 @@ static int ttm_tt_alloc_page_directory(struct ttm_tt *ttm) return 0; } -static int ttm_dma_tt_alloc_page_directory(struct ttm_dma_tt *ttm) +static int ttm_dma_tt_alloc_page_directory(struct ttm_tt *ttm) { - ttm->ttm.pages = kvmalloc_array(ttm->ttm.num_pages, - sizeof(*ttm->ttm.pages) + - sizeof(*ttm->dma_address), - GFP_KERNEL | __GFP_ZERO); - if (!ttm->ttm.pages) + ttm->pages = kvmalloc_array(ttm->num_pages, + sizeof(*ttm->pages) + + sizeof(*ttm->dma_address), + GFP_KERNEL | __GFP_ZERO); + if (!ttm->pages) return -ENOMEM; - ttm->dma_address = (void *) (ttm->ttm.pages + ttm->ttm.num_pages); + + ttm->dma_address = (void *)(ttm->pages + ttm->num_pages); return 0; } -static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm) +static int ttm_sg_tt_alloc_page_directory(struct ttm_tt *ttm) { - ttm->dma_address = kvmalloc_array(ttm->ttm.num_pages, + ttm->dma_address = kvmalloc_array(ttm->num_pages, sizeof(*ttm->dma_address), GFP_KERNEL | __GFP_ZERO); if (!ttm->dma_address) @@ -138,8 +139,10 @@ static void ttm_tt_init_fields(struct ttm_tt *ttm, ttm->num_pages = bo->num_pages; ttm->caching = ttm_cached; ttm->page_flags = page_flags; + ttm->dma_address = NULL; ttm->swap_storage = NULL; ttm->sg = bo->sg; + INIT_LIST_HEAD(&ttm->pages_list); ttm->caching = caching; } @@ -158,20 +161,21 @@ EXPORT_SYMBOL(ttm_tt_init); void ttm_tt_fini(struct ttm_tt *ttm) { - kvfree(ttm->pages); + if (ttm->pages) + kvfree(ttm->pages); + else + kvfree(ttm->dma_address); ttm->pages = NULL; + ttm->dma_address = NULL; } EXPORT_SYMBOL(ttm_tt_fini); -int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, +int ttm_dma_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo, uint32_t page_flags, enum ttm_caching caching) { - struct ttm_tt *ttm = &ttm_dma->ttm; - ttm_tt_init_fields(ttm, bo, page_flags, caching); - INIT_LIST_HEAD(&ttm_dma->pages_list); - if (ttm_dma_tt_alloc_page_directory(ttm_dma)) { + if (ttm_dma_tt_alloc_page_directory(ttm)) { pr_err("Failed allocating page table\n"); return -ENOMEM; } @@ -179,19 +183,17 @@ int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, } EXPORT_SYMBOL(ttm_dma_tt_init); -int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, +int ttm_sg_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo, uint32_t page_flags, enum ttm_caching caching) { - struct ttm_tt *ttm = &ttm_dma->ttm; int ret; ttm_tt_init_fields(ttm, bo, page_flags, caching); - INIT_LIST_HEAD(&ttm_dma->pages_list); if (page_flags & TTM_PAGE_FLAG_SG) - ret = ttm_sg_tt_alloc_page_directory(ttm_dma); + ret = ttm_sg_tt_alloc_page_directory(ttm); else - ret = ttm_dma_tt_alloc_page_directory(ttm_dma); + ret = ttm_dma_tt_alloc_page_directory(ttm); if (ret) { pr_err("Failed allocating page table\n"); return -ENOMEM; @@ -200,19 +202,6 @@ int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, } EXPORT_SYMBOL(ttm_sg_tt_init); -void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma) -{ - struct ttm_tt *ttm = &ttm_dma->ttm; - - if (ttm->pages) - kvfree(ttm->pages); - else - kvfree(ttm_dma->dma_address); - ttm->pages = NULL; - ttm_dma->dma_address = NULL; -} -EXPORT_SYMBOL(ttm_dma_tt_fini); - int ttm_tt_swapin(struct ttm_tt *ttm) { struct address_space *swap_space; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c index 88be48ad0344..92a5d245ff4d 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ttm_buffer.c @@ -186,7 +186,7 @@ struct ttm_placement vmw_nonfixed_placement = { }; struct vmw_ttm_tt { - struct ttm_dma_tt dma_ttm; + struct ttm_tt dma_ttm; struct vmw_private *dev_priv; int gmr_id; struct vmw_mob *mob; @@ -374,8 +374,8 @@ static int vmw_ttm_map_dma(struct vmw_ttm_tt *vmw_tt) return 0; vsgt->mode = dev_priv->map_mode; - vsgt->pages = vmw_tt->dma_ttm.ttm.pages; - vsgt->num_pages = vmw_tt->dma_ttm.ttm.num_pages; + vsgt->pages = vmw_tt->dma_ttm.pages; + vsgt->num_pages = vmw_tt->dma_ttm.num_pages; vsgt->addrs = vmw_tt->dma_ttm.dma_address; vsgt->sgt = &vmw_tt->sgt; @@ -483,7 +483,7 @@ static void vmw_ttm_unmap_dma(struct vmw_ttm_tt *vmw_tt) const struct vmw_sg_table *vmw_bo_sg_table(struct ttm_buffer_object *bo) { struct vmw_ttm_tt *vmw_tt = - container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); + container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm); return &vmw_tt->vsgt; } @@ -493,7 +493,7 @@ static int vmw_ttm_bind(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_resource *bo_mem) { struct vmw_ttm_tt *vmw_be = - container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); + container_of(ttm, struct vmw_ttm_tt, dma_ttm); int ret = 0; if (!bo_mem) @@ -537,7 +537,7 @@ static void vmw_ttm_unbind(struct ttm_bo_device *bdev, struct ttm_tt *ttm) { struct vmw_ttm_tt *vmw_be = - container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); + container_of(ttm, struct vmw_ttm_tt, dma_ttm); if (!vmw_be->bound) return; @@ -562,13 +562,13 @@ static void vmw_ttm_unbind(struct ttm_bo_device *bdev, static void vmw_ttm_destroy(struct ttm_bo_device *bdev, struct ttm_tt *ttm) { struct vmw_ttm_tt *vmw_be = - container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); + container_of(ttm, struct vmw_ttm_tt, dma_ttm); vmw_ttm_unbind(bdev, ttm); ttm_tt_destroy_common(bdev, ttm); vmw_ttm_unmap_dma(vmw_be); if (vmw_be->dev_priv->map_mode == vmw_dma_alloc_coherent) - ttm_dma_tt_fini(&vmw_be->dma_ttm); + ttm_tt_fini(&vmw_be->dma_ttm); else ttm_tt_fini(ttm); @@ -583,7 +583,7 @@ static int vmw_ttm_populate(struct ttm_bo_device *bdev, struct ttm_tt *ttm, struct ttm_operation_ctx *ctx) { struct vmw_ttm_tt *vmw_tt = - container_of(ttm, struct vmw_ttm_tt, dma_ttm.ttm); + container_of(ttm, struct vmw_ttm_tt, dma_ttm); struct vmw_private *dev_priv = vmw_tt->dev_priv; struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); int ret; @@ -612,7 +612,7 @@ static void vmw_ttm_unpopulate(struct ttm_bo_device *bdev, struct ttm_tt *ttm) { struct vmw_ttm_tt *vmw_tt = container_of(ttm, struct vmw_ttm_tt, - dma_ttm.ttm); + dma_ttm); struct vmw_private *dev_priv = vmw_tt->dev_priv; struct ttm_mem_global *glob = vmw_mem_glob(dev_priv); @@ -650,12 +650,12 @@ static struct ttm_tt *vmw_ttm_tt_create(struct ttm_buffer_object *bo, ret = ttm_dma_tt_init(&vmw_be->dma_ttm, bo, page_flags, ttm_cached); else - ret = ttm_tt_init(&vmw_be->dma_ttm.ttm, bo, page_flags, + ret = ttm_tt_init(&vmw_be->dma_ttm, bo, page_flags, ttm_cached); if (unlikely(ret != 0)) goto out_no_init; - return &vmw_be->dma_ttm.ttm; + return &vmw_be->dma_ttm; out_no_init: kfree(vmw_be); return NULL; @@ -813,7 +813,7 @@ int vmw_bo_create_and_populate(struct vmw_private *dev_priv, ret = vmw_ttm_populate(bo->bdev, bo->ttm, &ctx); if (likely(ret == 0)) { struct vmw_ttm_tt *vmw_tt = - container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm.ttm); + container_of(bo->ttm, struct vmw_ttm_tt, dma_ttm); ret = vmw_ttm_map_dma(vmw_tt); } diff --git a/include/drm/ttm/ttm_page_alloc.h b/include/drm/ttm/ttm_page_alloc.h index a6b6ef5f9bf4..8fa1e7df6213 100644 --- a/include/drm/ttm/ttm_page_alloc.h +++ b/include/drm/ttm/ttm_page_alloc.h @@ -61,13 +61,13 @@ void ttm_pool_unpopulate(struct ttm_tt *ttm); /** * Populates and DMA maps pages to fullfil a ttm_dma_populate() request */ -int ttm_populate_and_map_pages(struct device *dev, struct ttm_dma_tt *tt, +int ttm_populate_and_map_pages(struct device *dev, struct ttm_tt *tt, struct ttm_operation_ctx *ctx); /** * Unpopulates and DMA unmaps pages as part of a * ttm_dma_unpopulate() request */ -void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_dma_tt *tt); +void ttm_unmap_and_unpopulate_pages(struct device *dev, struct ttm_tt *tt); /** * Output the state of pools to debugfs file @@ -90,9 +90,9 @@ void ttm_dma_page_alloc_fini(void); */ int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data); -int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, struct device *dev, +int ttm_dma_populate(struct ttm_tt *ttm_dma, struct device *dev, struct ttm_operation_ctx *ctx); -void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, struct device *dev); +void ttm_dma_unpopulate(struct ttm_tt *ttm_dma, struct device *dev); #else static inline int ttm_dma_page_alloc_init(struct ttm_mem_global *glob, @@ -107,13 +107,13 @@ static inline int ttm_dma_page_alloc_debugfs(struct seq_file *m, void *data) { return 0; } -static inline int ttm_dma_populate(struct ttm_dma_tt *ttm_dma, +static inline int ttm_dma_populate(struct ttm_tt *ttm_dma, struct device *dev, struct ttm_operation_ctx *ctx) { return -ENOMEM; } -static inline void ttm_dma_unpopulate(struct ttm_dma_tt *ttm_dma, +static inline void ttm_dma_unpopulate(struct ttm_tt *ttm_dma, struct device *dev) { } diff --git a/include/drm/ttm/ttm_tt.h b/include/drm/ttm/ttm_tt.h index 931a31355870..df9a80650feb 100644 --- a/include/drm/ttm/ttm_tt.h +++ b/include/drm/ttm/ttm_tt.h @@ -47,12 +47,13 @@ struct ttm_operation_ctx; * struct ttm_tt * * @pages: Array of pages backing the data. + * @page_flags: see TTM_PAGE_FLAG_* * @num_pages: Number of pages in the page array. - * @bdev: Pointer to the current struct ttm_bo_device. - * @be: Pointer to the ttm backend. + * @sg: for SG objects via dma-buf + * @dma_address: The DMA (bus) addresses of the pages * @swap_storage: Pointer to shmem struct file for swap storage. - * @caching_state: The current caching state of the pages. - * @state: The current binding state of the pages. + * @pages_list: used by some page allocation backend + * @caching: The current caching state of the pages. * * This is a structure holding the pages, caching- and aperture binding * status for a buffer object that isn't backed by fixed (VRAM / AGP) @@ -62,8 +63,10 @@ struct ttm_tt { struct page **pages; uint32_t page_flags; uint32_t num_pages; - struct sg_table *sg; /* for SG objects via dma-buf */ + struct sg_table *sg; + dma_addr_t *dma_address; struct file *swap_storage; + struct list_head pages_list; enum ttm_caching caching; }; @@ -72,23 +75,6 @@ static inline bool ttm_tt_is_populated(struct ttm_tt *tt) return tt->page_flags & TTM_PAGE_FLAG_PRIV_POPULATED; } -/** - * struct ttm_dma_tt - * - * @ttm: Base ttm_tt struct. - * @dma_address: The DMA (bus) addresses of the pages - * @pages_list: used by some page allocation backend - * - * This is a structure holding the pages, caching- and aperture binding - * status for a buffer object that isn't backed by fixed (VRAM / AGP) - * memory. - */ -struct ttm_dma_tt { - struct ttm_tt ttm; - dma_addr_t *dma_address; - struct list_head pages_list; -}; - /** * ttm_tt_create * @@ -115,9 +101,9 @@ int ttm_tt_create(struct ttm_buffer_object *bo, bool zero_alloc); */ int ttm_tt_init(struct ttm_tt *ttm, struct ttm_buffer_object *bo, uint32_t page_flags, enum ttm_caching caching); -int ttm_dma_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, +int ttm_dma_tt_init(struct ttm_tt *ttm_dma, struct ttm_buffer_object *bo, uint32_t page_flags, enum ttm_caching caching); -int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, +int ttm_sg_tt_init(struct ttm_tt *ttm_dma, struct ttm_buffer_object *bo, uint32_t page_flags, enum ttm_caching caching); /** @@ -128,7 +114,6 @@ int ttm_sg_tt_init(struct ttm_dma_tt *ttm_dma, struct ttm_buffer_object *bo, * Free memory of ttm_tt structure */ void ttm_tt_fini(struct ttm_tt *ttm); -void ttm_dma_tt_fini(struct ttm_dma_tt *ttm_dma); /** * ttm_ttm_destroy: -- cgit