diff options
Diffstat (limited to 'drivers/gpu/drm/ttm/ttm_bo_util.c')
-rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_util.c | 364 |
1 files changed, 105 insertions, 259 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index ee04716b2603..fb2a25f8408f 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -67,10 +67,8 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo, return ret; } - ttm_tt_unbind(ttm); + ttm_bo_tt_unbind(bo); ttm_bo_free_old_node(bo); - ttm_flag_masked(&old_mem->placement, TTM_PL_FLAG_SYSTEM, - TTM_PL_MASK_MEM); old_mem->mem_type = TTM_PL_SYSTEM; } @@ -79,134 +77,56 @@ int ttm_bo_move_ttm(struct ttm_buffer_object *bo, return ret; if (new_mem->mem_type != TTM_PL_SYSTEM) { - ret = ttm_tt_bind(ttm, new_mem, ctx); + + ret = ttm_tt_populate(bo->bdev, ttm, ctx); if (unlikely(ret != 0)) return ret; - } - *old_mem = *new_mem; - new_mem->mm_node = NULL; + ret = ttm_bo_tt_bind(bo, new_mem); + if (unlikely(ret != 0)) + return ret; + } + ttm_bo_assign_mem(bo, new_mem); return 0; } EXPORT_SYMBOL(ttm_bo_move_ttm); -int ttm_mem_io_lock(struct ttm_resource_manager *man, bool interruptible) -{ - if (likely(!man->use_io_reserve_lru)) - return 0; - - if (interruptible) - return mutex_lock_interruptible(&man->io_reserve_mutex); - - mutex_lock(&man->io_reserve_mutex); - return 0; -} - -void ttm_mem_io_unlock(struct ttm_resource_manager *man) -{ - if (likely(!man->use_io_reserve_lru)) - return; - - mutex_unlock(&man->io_reserve_mutex); -} - -static int ttm_mem_io_evict(struct ttm_resource_manager *man) -{ - struct ttm_buffer_object *bo; - - bo = list_first_entry_or_null(&man->io_reserve_lru, - struct ttm_buffer_object, - io_reserve_lru); - if (!bo) - return -ENOSPC; - - list_del_init(&bo->io_reserve_lru); - ttm_bo_unmap_virtual_locked(bo); - return 0; -} - int ttm_mem_io_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem) { - struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type); - int ret; - - if (mem->bus.io_reserved_count++) + if (mem->bus.offset || mem->bus.addr) return 0; + mem->bus.is_iomem = false; if (!bdev->driver->io_mem_reserve) return 0; - mem->bus.addr = NULL; - mem->bus.offset = 0; - mem->bus.base = 0; - mem->bus.is_iomem = false; -retry: - ret = bdev->driver->io_mem_reserve(bdev, mem); - if (ret == -ENOSPC) { - ret = ttm_mem_io_evict(man); - if (ret == 0) - goto retry; - } - return ret; + return bdev->driver->io_mem_reserve(bdev, mem); } void ttm_mem_io_free(struct ttm_bo_device *bdev, struct ttm_resource *mem) { - if (--mem->bus.io_reserved_count) + if (!mem->bus.offset && !mem->bus.addr) return; - if (!bdev->driver->io_mem_free) - return; - - bdev->driver->io_mem_free(bdev, mem); -} - -int ttm_mem_io_reserve_vm(struct ttm_buffer_object *bo) -{ - struct ttm_resource_manager *man = ttm_manager_type(bo->bdev, bo->mem.mem_type); - struct ttm_resource *mem = &bo->mem; - int ret; - - if (mem->bus.io_reserved_vm) - return 0; - - ret = ttm_mem_io_reserve(bo->bdev, mem); - if (unlikely(ret != 0)) - return ret; - mem->bus.io_reserved_vm = true; - if (man->use_io_reserve_lru) - list_add_tail(&bo->io_reserve_lru, - &man->io_reserve_lru); - return 0; -} + if (bdev->driver->io_mem_free) + bdev->driver->io_mem_free(bdev, mem); -void ttm_mem_io_free_vm(struct ttm_buffer_object *bo) -{ - struct ttm_resource *mem = &bo->mem; - - if (!mem->bus.io_reserved_vm) - return; - - mem->bus.io_reserved_vm = false; - list_del_init(&bo->io_reserve_lru); - ttm_mem_io_free(bo->bdev, mem); + mem->bus.offset = 0; + mem->bus.addr = NULL; } static int ttm_resource_ioremap(struct ttm_bo_device *bdev, struct ttm_resource *mem, void **virtual) { - struct ttm_resource_manager *man = ttm_manager_type(bdev, mem->mem_type); int ret; void *addr; *virtual = NULL; - (void) ttm_mem_io_lock(man, false); ret = ttm_mem_io_reserve(bdev, mem); - ttm_mem_io_unlock(man); if (ret || !mem->bus.is_iomem) return ret; @@ -216,15 +136,11 @@ static int ttm_resource_ioremap(struct ttm_bo_device *bdev, size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT; if (mem->placement & TTM_PL_FLAG_WC) - addr = ioremap_wc(mem->bus.base + mem->bus.offset, - bus_size); + addr = ioremap_wc(mem->bus.offset, bus_size); else - addr = ioremap(mem->bus.base + mem->bus.offset, - bus_size); + addr = ioremap(mem->bus.offset, bus_size); if (!addr) { - (void) ttm_mem_io_lock(man, false); ttm_mem_io_free(bdev, mem); - ttm_mem_io_unlock(man); return -ENOMEM; } } @@ -236,15 +152,9 @@ static void ttm_resource_iounmap(struct ttm_bo_device *bdev, struct ttm_resource *mem, void *virtual) { - struct ttm_resource_manager *man; - - man = ttm_manager_type(bdev, mem->mem_type); - if (virtual && mem->bus.addr == NULL) iounmap(virtual); - (void) ttm_mem_io_lock(man, false); ttm_mem_io_free(bdev, mem); - ttm_mem_io_unlock(man); } static int ttm_copy_io_page(void *dst, void *src, unsigned long page) @@ -342,7 +252,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, * Don't move nonexistent data. Clear destination instead. */ if (old_iomap == NULL && - (ttm == NULL || (ttm->state == tt_unpopulated && + (ttm == NULL || (!ttm_tt_is_populated(ttm) && !(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) { memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE); goto out2; @@ -352,7 +262,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, * TTM might be null for moves within the same region. */ if (ttm) { - ret = ttm_tt_populate(ttm, ctx); + ret = ttm_tt_populate(bdev, ttm, ctx); if (ret) goto out1; } @@ -387,13 +297,11 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, mb(); out2: old_copy = *old_mem; - *old_mem = *new_mem; - new_mem->mm_node = NULL; - if (!man->use_tt) { - ttm_tt_destroy(ttm); - bo->ttm = NULL; - } + ttm_bo_assign_mem(bo, new_mem); + + if (!man->use_tt) + ttm_bo_tt_destroy(bo); out1: ttm_resource_iounmap(bdev, old_mem, new_iomap); @@ -458,7 +366,6 @@ static int ttm_buffer_object_transfer(struct ttm_buffer_object *bo, INIT_LIST_HEAD(&fbo->base.ddestroy); INIT_LIST_HEAD(&fbo->base.lru); INIT_LIST_HEAD(&fbo->base.swap); - INIT_LIST_HEAD(&fbo->base.io_reserve_lru); fbo->base.moving = NULL; drm_vma_node_reset(&fbo->base.base.vma_node); @@ -516,12 +423,10 @@ static int ttm_bo_ioremap(struct ttm_buffer_object *bo, } else { map->bo_kmap_type = ttm_bo_map_iomap; if (mem->placement & TTM_PL_FLAG_WC) - map->virtual = ioremap_wc(bo->mem.bus.base + - bo->mem.bus.offset + offset, + map->virtual = ioremap_wc(bo->mem.bus.offset + offset, size); else - map->virtual = ioremap(bo->mem.bus.base + - bo->mem.bus.offset + offset, + map->virtual = ioremap(bo->mem.bus.offset + offset, size); } return (!map->virtual) ? -ENOMEM : 0; @@ -543,7 +448,7 @@ static int ttm_bo_kmap_ttm(struct ttm_buffer_object *bo, BUG_ON(!ttm); - ret = ttm_tt_populate(ttm, &ctx); + ret = ttm_tt_populate(bo->bdev, ttm, &ctx); if (ret) return ret; @@ -573,8 +478,6 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, unsigned long start_page, unsigned long num_pages, struct ttm_bo_kmap_obj *map) { - struct ttm_resource_manager *man = - ttm_manager_type(bo->bdev, bo->mem.mem_type); unsigned long offset, size; int ret; @@ -585,9 +488,7 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, if (start_page > bo->num_pages) return -EINVAL; - (void) ttm_mem_io_lock(man, false); ret = ttm_mem_io_reserve(bo->bdev, &bo->mem); - ttm_mem_io_unlock(man); if (ret) return ret; if (!bo->mem.bus.is_iomem) { @@ -602,10 +503,6 @@ EXPORT_SYMBOL(ttm_bo_kmap); void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) { - struct ttm_buffer_object *bo = map->bo; - struct ttm_resource_manager *man = - ttm_manager_type(bo->bdev, bo->mem.mem_type); - if (!map->virtual) return; switch (map->bo_kmap_type) { @@ -623,167 +520,116 @@ void ttm_bo_kunmap(struct ttm_bo_kmap_obj *map) default: BUG(); } - (void) ttm_mem_io_lock(man, false); ttm_mem_io_free(map->bo->bdev, &map->bo->mem); - ttm_mem_io_unlock(man); map->virtual = NULL; map->page = NULL; } EXPORT_SYMBOL(ttm_bo_kunmap); -int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, - struct dma_fence *fence, - bool evict, - struct ttm_resource *new_mem) +static int ttm_bo_wait_free_node(struct ttm_buffer_object *bo, + bool dst_use_tt) { - struct ttm_bo_device *bdev = bo->bdev; - struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); - struct ttm_resource *old_mem = &bo->mem; int ret; - struct ttm_buffer_object *ghost_obj; - - dma_resv_add_excl_fence(bo->base.resv, fence); - if (evict) { - ret = ttm_bo_wait(bo, false, false); - if (ret) - return ret; - - if (!man->use_tt) { - ttm_tt_destroy(bo->ttm); - bo->ttm = NULL; - } - ttm_bo_free_old_node(bo); - } else { - /** - * This should help pipeline ordinary buffer moves. - * - * Hang old buffer memory on a new buffer object, - * and leave it to be released when the GPU - * operation has completed. - */ - - dma_fence_put(bo->moving); - bo->moving = dma_fence_get(fence); - - ret = ttm_buffer_object_transfer(bo, &ghost_obj); - if (ret) - return ret; - - dma_resv_add_excl_fence(&ghost_obj->base._resv, fence); - - /** - * If we're not moving to fixed memory, the TTM object - * needs to stay alive. Otherwhise hang it on the ghost - * bo to be unbound and destroyed. - */ - - if (man->use_tt) - ghost_obj->ttm = NULL; - else - bo->ttm = NULL; - - dma_resv_unlock(&ghost_obj->base._resv); - ttm_bo_put(ghost_obj); - } - - *old_mem = *new_mem; - new_mem->mm_node = NULL; + ret = ttm_bo_wait(bo, false, false); + if (ret) + return ret; + if (!dst_use_tt) + ttm_bo_tt_destroy(bo); + ttm_bo_free_old_node(bo); return 0; } -EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); -int ttm_bo_pipeline_move(struct ttm_buffer_object *bo, - struct dma_fence *fence, bool evict, - struct ttm_resource *new_mem) +static int ttm_bo_move_to_ghost(struct ttm_buffer_object *bo, + struct dma_fence *fence, + bool dst_use_tt) { - struct ttm_bo_device *bdev = bo->bdev; - struct ttm_resource *old_mem = &bo->mem; - - struct ttm_resource_manager *from = ttm_manager_type(bdev, old_mem->mem_type); - struct ttm_resource_manager *to = ttm_manager_type(bdev, new_mem->mem_type); - + struct ttm_buffer_object *ghost_obj; int ret; - dma_resv_add_excl_fence(bo->base.resv, fence); - - if (!evict) { - struct ttm_buffer_object *ghost_obj; - - /** - * This should help pipeline ordinary buffer moves. - * - * Hang old buffer memory on a new buffer object, - * and leave it to be released when the GPU - * operation has completed. - */ + /** + * This should help pipeline ordinary buffer moves. + * + * Hang old buffer memory on a new buffer object, + * and leave it to be released when the GPU + * operation has completed. + */ - dma_fence_put(bo->moving); - bo->moving = dma_fence_get(fence); + dma_fence_put(bo->moving); + bo->moving = dma_fence_get(fence); - ret = ttm_buffer_object_transfer(bo, &ghost_obj); - if (ret) - return ret; - - dma_resv_add_excl_fence(&ghost_obj->base._resv, fence); + ret = ttm_buffer_object_transfer(bo, &ghost_obj); + if (ret) + return ret; - /** - * If we're not moving to fixed memory, the TTM object - * needs to stay alive. Otherwhise hang it on the ghost - * bo to be unbound and destroyed. - */ + dma_resv_add_excl_fence(&ghost_obj->base._resv, fence); - if (to->use_tt) - ghost_obj->ttm = NULL; - else - bo->ttm = NULL; + /** + * If we're not moving to fixed memory, the TTM object + * needs to stay alive. Otherwhise hang it on the ghost + * bo to be unbound and destroyed. + */ - dma_resv_unlock(&ghost_obj->base._resv); - ttm_bo_put(ghost_obj); + if (dst_use_tt) + ghost_obj->ttm = NULL; + else + bo->ttm = NULL; - } else if (!from->use_tt) { + dma_resv_unlock(&ghost_obj->base._resv); + ttm_bo_put(ghost_obj); + return 0; +} - /** - * BO doesn't have a TTM we need to bind/unbind. Just remember - * this eviction and free up the allocation - */ +static void ttm_bo_move_pipeline_evict(struct ttm_buffer_object *bo, + struct dma_fence *fence) +{ + struct ttm_bo_device *bdev = bo->bdev; + struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type); - spin_lock(&from->move_lock); - if (!from->move || dma_fence_is_later(fence, from->move)) { - dma_fence_put(from->move); - from->move = dma_fence_get(fence); - } - spin_unlock(&from->move_lock); + /** + * BO doesn't have a TTM we need to bind/unbind. Just remember + * this eviction and free up the allocation + */ + spin_lock(&from->move_lock); + if (!from->move || dma_fence_is_later(fence, from->move)) { + dma_fence_put(from->move); + from->move = dma_fence_get(fence); + } + spin_unlock(&from->move_lock); - ttm_bo_free_old_node(bo); + ttm_bo_free_old_node(bo); - dma_fence_put(bo->moving); - bo->moving = dma_fence_get(fence); + dma_fence_put(bo->moving); + bo->moving = dma_fence_get(fence); +} - } else { - /** - * Last resort, wait for the move to be completed. - * - * Should never happen in pratice. - */ +int ttm_bo_move_accel_cleanup(struct ttm_buffer_object *bo, + struct dma_fence *fence, + bool evict, + bool pipeline, + struct ttm_resource *new_mem) +{ + struct ttm_bo_device *bdev = bo->bdev; + struct ttm_resource_manager *from = ttm_manager_type(bdev, bo->mem.mem_type); + struct ttm_resource_manager *man = ttm_manager_type(bdev, new_mem->mem_type); + int ret = 0; - ret = ttm_bo_wait(bo, false, false); - if (ret) - return ret; + dma_resv_add_excl_fence(bo->base.resv, fence); + if (!evict) + ret = ttm_bo_move_to_ghost(bo, fence, man->use_tt); + else if (!from->use_tt && pipeline) + ttm_bo_move_pipeline_evict(bo, fence); + else + ret = ttm_bo_wait_free_node(bo, man->use_tt); - if (!to->use_tt) { - ttm_tt_destroy(bo->ttm); - bo->ttm = NULL; - } - ttm_bo_free_old_node(bo); - } + if (ret) + return ret; - *old_mem = *new_mem; - new_mem->mm_node = NULL; + ttm_bo_assign_mem(bo, new_mem); return 0; } -EXPORT_SYMBOL(ttm_bo_pipeline_move); +EXPORT_SYMBOL(ttm_bo_move_accel_cleanup); int ttm_bo_pipeline_gutting(struct ttm_buffer_object *bo) { |