diff options
Diffstat (limited to 'drivers/gpu/drm/ttm')
| -rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo.c | 8 | ||||
| -rw-r--r-- | drivers/gpu/drm/ttm/ttm_bo_vm.c | 15 | ||||
| -rw-r--r-- | drivers/gpu/drm/ttm/ttm_device.c | 3 | ||||
| -rw-r--r-- | drivers/gpu/drm/ttm/ttm_pool.c | 103 |
4 files changed, 78 insertions, 51 deletions
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 882c2fa346f3..bd5dae4d1624 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -84,6 +84,7 @@ EXPORT_SYMBOL(ttm_bo_move_to_lru_tail); * ttm_bo_set_bulk_move - update BOs bulk move object * * @bo: The buffer object. + * @bulk: bulk move structure * * Update the BOs bulk move object, making sure that resources are added/removed * as well. A bulk move allows to move many resource on the LRU at once, @@ -294,8 +295,6 @@ static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, if (unlock_resv) dma_resv_unlock(bo->base.resv); - ttm_bo_put(bo); - return 0; } @@ -464,7 +463,8 @@ bounce: if (ret == -EMULTIHOP) { ret = ttm_bo_bounce_temp_buffer(bo, &evict_mem, ctx, &hop); if (ret) { - pr_err("Buffer eviction failed\n"); + if (ret != -ERESTARTSYS && ret != -EINTR) + pr_err("Buffer eviction failed\n"); ttm_resource_free(bo, &evict_mem); goto out; } @@ -747,7 +747,7 @@ static int ttm_bo_mem_force_space(struct ttm_buffer_object *bo, * * @bo: Pointer to a struct ttm_buffer_object. the data of which * we want to allocate space for. - * @proposed_placement: Proposed new placement for the buffer object. + * @placement: Proposed new placement for the buffer object. * @mem: A struct ttm_resource. * @ctx: if and how to sleep, lock buffers and alloc memory * diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index ca7744b852f5..4212b8c91dd4 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -218,14 +218,21 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, prot = ttm_io_prot(bo, bo->resource, prot); if (!bo->resource->bus.is_iomem) { struct ttm_operation_ctx ctx = { - .interruptible = false, + .interruptible = true, .no_wait_gpu = false, .force_alloc = true }; ttm = bo->ttm; - if (ttm_tt_populate(bdev, bo->ttm, &ctx)) - return VM_FAULT_OOM; + err = ttm_tt_populate(bdev, bo->ttm, &ctx); + if (err) { + if (err == -EINTR || err == -ERESTARTSYS || + err == -EAGAIN) + return VM_FAULT_NOPAGE; + + pr_debug("TTM fault hit %pe.\n", ERR_PTR(err)); + return VM_FAULT_SIGBUS; + } } else { /* Iomem should not be marked encrypted */ prot = pgprot_decrypted(prot); @@ -254,7 +261,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, * encryption bits. This is because the exact location of the * data may not be known at mmap() time and may also change * at arbitrary times while the data is mmap'ed. - * See vmf_insert_mixed_prot() for a discussion. + * See vmf_insert_pfn_prot() for a discussion. */ ret = vmf_insert_pfn_prot(vma, address, pfn, prot); diff --git a/drivers/gpu/drm/ttm/ttm_device.c b/drivers/gpu/drm/ttm/ttm_device.c index c7a1862f322a..64a59f46f6c3 100644 --- a/drivers/gpu/drm/ttm/ttm_device.c +++ b/drivers/gpu/drm/ttm/ttm_device.c @@ -137,7 +137,6 @@ int ttm_global_swapout(struct ttm_operation_ctx *ctx, gfp_t gfp_flags) mutex_unlock(&ttm_global_mutex); return ret; } -EXPORT_SYMBOL(ttm_global_swapout); int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx, gfp_t gfp_flags) @@ -158,7 +157,7 @@ int ttm_device_swapout(struct ttm_device *bdev, struct ttm_operation_ctx *ctx, struct ttm_buffer_object *bo = res->bo; uint32_t num_pages; - if (!bo) + if (!bo || bo->resource != res) continue; num_pages = PFN_UP(bo->base.size); diff --git a/drivers/gpu/drm/ttm/ttm_pool.c b/drivers/gpu/drm/ttm/ttm_pool.c index aa116a7bbae3..4db3982057be 100644 --- a/drivers/gpu/drm/ttm/ttm_pool.c +++ b/drivers/gpu/drm/ttm/ttm_pool.c @@ -65,11 +65,11 @@ module_param(page_pool_size, ulong, 0644); static atomic_long_t allocated_pages; -static struct ttm_pool_type global_write_combined[MAX_ORDER]; -static struct ttm_pool_type global_uncached[MAX_ORDER]; +static struct ttm_pool_type global_write_combined[MAX_ORDER + 1]; +static struct ttm_pool_type global_uncached[MAX_ORDER + 1]; -static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER]; -static struct ttm_pool_type global_dma32_uncached[MAX_ORDER]; +static struct ttm_pool_type global_dma32_write_combined[MAX_ORDER + 1]; +static struct ttm_pool_type global_dma32_uncached[MAX_ORDER + 1]; static spinlock_t shrinker_lock; static struct list_head shrinker_list; @@ -368,6 +368,43 @@ static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order, } /** + * ttm_pool_free_range() - Free a range of TTM pages + * @pool: The pool used for allocating. + * @tt: The struct ttm_tt holding the page pointers. + * @caching: The page caching mode used by the range. + * @start_page: index for first page to free. + * @end_page: index for last page to free + 1. + * + * During allocation the ttm_tt page-vector may be populated with ranges of + * pages with different attributes if allocation hit an error without being + * able to completely fulfill the allocation. This function can be used + * to free these individual ranges. + */ +static void ttm_pool_free_range(struct ttm_pool *pool, struct ttm_tt *tt, + enum ttm_caching caching, + pgoff_t start_page, pgoff_t end_page) +{ + struct page **pages = tt->pages; + unsigned int order; + pgoff_t i, nr; + + for (i = start_page; i < end_page; i += nr, pages += nr) { + struct ttm_pool_type *pt = NULL; + + order = ttm_pool_page_order(pool, *pages); + nr = (1UL << order); + if (tt->dma_address) + ttm_pool_unmap(pool, tt->dma_address[i], nr); + + pt = ttm_pool_select_type(pool, caching, order); + if (pt) + ttm_pool_type_give(pt, *pages); + else + ttm_pool_free_page(pool, caching, order, *pages); + } +} + +/** * ttm_pool_alloc - Fill a ttm_tt object * * @pool: ttm_pool to use @@ -382,12 +419,14 @@ static int ttm_pool_page_allocated(struct ttm_pool *pool, unsigned int order, int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, struct ttm_operation_ctx *ctx) { - unsigned long num_pages = tt->num_pages; + pgoff_t num_pages = tt->num_pages; dma_addr_t *dma_addr = tt->dma_address; struct page **caching = tt->pages; struct page **pages = tt->pages; + enum ttm_caching page_caching; gfp_t gfp_flags = GFP_USER; - unsigned int i, order; + pgoff_t caching_divide; + unsigned int order; struct page *p; int r; @@ -405,11 +444,12 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, else gfp_flags |= GFP_HIGHUSER; - for (order = min_t(unsigned int, MAX_ORDER - 1, __fls(num_pages)); + for (order = min_t(unsigned int, MAX_ORDER, __fls(num_pages)); num_pages; order = min_t(unsigned int, order, __fls(num_pages))) { struct ttm_pool_type *pt; + page_caching = tt->caching; pt = ttm_pool_select_type(pool, tt->caching, order); p = pt ? ttm_pool_type_take(pt) : NULL; if (p) { @@ -418,6 +458,7 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, if (r) goto error_free_page; + caching = pages; do { r = ttm_pool_page_allocated(pool, order, p, &dma_addr, @@ -426,14 +467,15 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, if (r) goto error_free_page; + caching = pages; if (num_pages < (1 << order)) break; p = ttm_pool_type_take(pt); } while (p); - caching = pages; } + page_caching = ttm_cached; while (num_pages >= (1 << order) && (p = ttm_pool_alloc_page(pool, gfp_flags, order))) { @@ -442,6 +484,7 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, tt->caching); if (r) goto error_free_page; + caching = pages; } r = ttm_pool_page_allocated(pool, order, p, &dma_addr, &num_pages, &pages); @@ -468,15 +511,13 @@ int ttm_pool_alloc(struct ttm_pool *pool, struct ttm_tt *tt, return 0; error_free_page: - ttm_pool_free_page(pool, tt->caching, order, p); + ttm_pool_free_page(pool, page_caching, order, p); error_free_all: num_pages = tt->num_pages - num_pages; - for (i = 0; i < num_pages; ) { - order = ttm_pool_page_order(pool, tt->pages[i]); - ttm_pool_free_page(pool, tt->caching, order, tt->pages[i]); - i += 1 << order; - } + caching_divide = caching - tt->pages; + ttm_pool_free_range(pool, tt, tt->caching, 0, caching_divide); + ttm_pool_free_range(pool, tt, ttm_cached, caching_divide, num_pages); return r; } @@ -492,27 +533,7 @@ EXPORT_SYMBOL(ttm_pool_alloc); */ void ttm_pool_free(struct ttm_pool *pool, struct ttm_tt *tt) { - unsigned int i; - - for (i = 0; i < tt->num_pages; ) { - struct page *p = tt->pages[i]; - unsigned int order, num_pages; - struct ttm_pool_type *pt; - - order = ttm_pool_page_order(pool, p); - num_pages = 1ULL << order; - if (tt->dma_address) - ttm_pool_unmap(pool, tt->dma_address[i], num_pages); - - pt = ttm_pool_select_type(pool, tt->caching, order); - if (pt) - ttm_pool_type_give(pt, tt->pages[i]); - else - ttm_pool_free_page(pool, tt->caching, order, - tt->pages[i]); - - i += num_pages; - } + ttm_pool_free_range(pool, tt, tt->caching, 0, tt->num_pages); while (atomic_long_read(&allocated_pages) > page_pool_size) ttm_pool_shrink(); @@ -542,7 +563,7 @@ void ttm_pool_init(struct ttm_pool *pool, struct device *dev, if (use_dma_alloc) { for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) - for (j = 0; j < MAX_ORDER; ++j) + for (j = 0; j <= MAX_ORDER; ++j) ttm_pool_type_init(&pool->caching[i].orders[j], pool, i, j); } @@ -562,7 +583,7 @@ void ttm_pool_fini(struct ttm_pool *pool) if (pool->use_dma_alloc) { for (i = 0; i < TTM_NUM_CACHING_TYPES; ++i) - for (j = 0; j < MAX_ORDER; ++j) + for (j = 0; j <= MAX_ORDER; ++j) ttm_pool_type_fini(&pool->caching[i].orders[j]); } @@ -616,7 +637,7 @@ static void ttm_pool_debugfs_header(struct seq_file *m) unsigned int i; seq_puts(m, "\t "); - for (i = 0; i < MAX_ORDER; ++i) + for (i = 0; i <= MAX_ORDER; ++i) seq_printf(m, " ---%2u---", i); seq_puts(m, "\n"); } @@ -627,7 +648,7 @@ static void ttm_pool_debugfs_orders(struct ttm_pool_type *pt, { unsigned int i; - for (i = 0; i < MAX_ORDER; ++i) + for (i = 0; i <= MAX_ORDER; ++i) seq_printf(m, " %8u", ttm_pool_type_count(&pt[i])); seq_puts(m, "\n"); } @@ -736,7 +757,7 @@ int ttm_pool_mgr_init(unsigned long num_pages) spin_lock_init(&shrinker_lock); INIT_LIST_HEAD(&shrinker_list); - for (i = 0; i < MAX_ORDER; ++i) { + for (i = 0; i <= MAX_ORDER; ++i) { ttm_pool_type_init(&global_write_combined[i], NULL, ttm_write_combined, i); ttm_pool_type_init(&global_uncached[i], NULL, ttm_uncached, i); @@ -769,7 +790,7 @@ void ttm_pool_mgr_fini(void) { unsigned int i; - for (i = 0; i < MAX_ORDER; ++i) { + for (i = 0; i <= MAX_ORDER; ++i) { ttm_pool_type_fini(&global_write_combined[i]); ttm_pool_type_fini(&global_uncached[i]); |