diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 1111 |
1 files changed, 554 insertions, 557 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 3bef0432cac2..dc262d2c2925 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -32,7 +32,6 @@ #include <linux/dma-mapping.h> #include <linux/iommu.h> -#include <linux/hmm.h> #include <linux/pagemap.h> #include <linux/sched/task.h> #include <linux/sched/mm.h> @@ -42,12 +41,16 @@ #include <linux/swiotlb.h> #include <linux/dma-buf.h> #include <linux/sizes.h> +#include <linux/module.h> +#include <drm/drm_drv.h> #include <drm/ttm/ttm_bo_api.h> #include <drm/ttm/ttm_bo_driver.h> #include <drm/ttm/ttm_placement.h> +#include <drm/ttm/ttm_range_manager.h> #include <drm/amdgpu_drm.h> +#include <drm/drm_drv.h> #include "amdgpu.h" #include "amdgpu_object.h" @@ -59,6 +62,8 @@ #include "amdgpu_res_cursor.h" #include "bif/bif_4_1_d.h" +MODULE_IMPORT_NS(DMA_BUF); + #define AMDGPU_TTM_VRAM_MAX_DW_READ (size_t)128 static int amdgpu_ttm_backend_bind(struct ttm_device *bdev, @@ -112,7 +117,13 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, } abo = ttm_to_amdgpu_bo(bo); - switch (bo->mem.mem_type) { + if (abo->flags & AMDGPU_GEM_CREATE_DISCARDABLE) { + placement->num_placement = 0; + placement->num_busy_placement = 0; + return; + } + + switch (bo->resource->mem_type) { case AMDGPU_PL_GDS: case AMDGPU_PL_GWS: case AMDGPU_PL_OA: @@ -134,17 +145,20 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, * BOs to be evicted from VRAM */ amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM | - AMDGPU_GEM_DOMAIN_GTT); + AMDGPU_GEM_DOMAIN_GTT | + AMDGPU_GEM_DOMAIN_CPU); abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT; abo->placements[0].lpfn = 0; abo->placement.busy_placement = &abo->placements[1]; abo->placement.num_busy_placement = 1; } else { /* Move to GTT memory */ - amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT); + amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT | + AMDGPU_GEM_DOMAIN_CPU); } break; case TTM_PL_TT: + case AMDGPU_PL_PREEMPT: default: amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU); break; @@ -153,40 +167,14 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo, } /** - * amdgpu_verify_access - Verify access for a mmap call - * - * @bo: The buffer object to map - * @filp: The file pointer from the process performing the mmap - * - * This is called by ttm_bo_mmap() to verify whether a process - * has the right to mmap a BO to their process space. - */ -static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp) -{ - struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); - - /* - * Don't verify access for KFD BOs. They don't have a GEM - * object associated with them. - */ - if (abo->kfd_bo) - return 0; - - if (amdgpu_ttm_tt_get_usermm(bo->ttm)) - return -EPERM; - return drm_vma_node_verify_access(&abo->tbo.base.vma_node, - filp->private_data); -} - -/** * amdgpu_ttm_map_buffer - Map memory into the GART windows * @bo: buffer object to map * @mem: memory object to map * @mm_cur: range to map - * @num_pages: number of pages to map * @window: which GART window to use * @ring: DMA ring to use for the copy * @tmz: if we should setup a TMZ enabled mapping + * @size: in number of bytes to map, out number of bytes mapped * @addr: resulting address inside the MC address space * * Setup one of the GART windows to access a specific piece of memory or return @@ -195,15 +183,14 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp) static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, struct ttm_resource *mem, struct amdgpu_res_cursor *mm_cur, - unsigned num_pages, unsigned window, - struct amdgpu_ring *ring, bool tmz, - uint64_t *addr) + unsigned window, struct amdgpu_ring *ring, + bool tmz, uint64_t *size, uint64_t *addr) { struct amdgpu_device *adev = ring->adev; - struct amdgpu_job *job; - unsigned num_dw, num_bytes; - struct dma_fence *fence; + unsigned offset, num_pages, num_dw, num_bytes; uint64_t src_addr, dst_addr; + struct dma_fence *fence; + struct amdgpu_job *job; void *cpu_addr; uint64_t flags; unsigned int i; @@ -212,6 +199,9 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, BUG_ON(adev->mman.buffer_funcs->copy_max_bytes < AMDGPU_GTT_MAX_TRANSFER_SIZE * 8); + if (WARN_ON(mem->mem_type == AMDGPU_PL_PREEMPT)) + return -EINVAL; + /* Map only what can't be accessed directly */ if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) { *addr = amdgpu_ttm_domain_start(adev, mem->mem_type) + @@ -219,13 +209,25 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, return 0; } + + /* + * If start begins at an offset inside the page, then adjust the size + * and addr accordingly + */ + offset = mm_cur->start & ~PAGE_MASK; + + num_pages = PFN_UP(*size + offset); + num_pages = min_t(uint32_t, num_pages, AMDGPU_GTT_MAX_TRANSFER_SIZE); + + *size = min(*size, (uint64_t)num_pages * PAGE_SIZE - offset); + *addr = adev->gmc.gart_start; *addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE * AMDGPU_GPU_PAGE_SIZE; - *addr += mm_cur->start & ~PAGE_MASK; + *addr += offset; num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); - num_bytes = num_pages * 8; + num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE; r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, AMDGPU_IB_POOL_DELAYED, &job); @@ -253,10 +255,7 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, dma_addr_t *dma_addr; dma_addr = &bo->ttm->dma_address[mm_cur->start >> PAGE_SHIFT]; - r = amdgpu_gart_map(adev, 0, num_pages, dma_addr, flags, - cpu_addr); - if (r) - goto error_free; + amdgpu_gart_map(adev, 0, num_pages, dma_addr, flags, cpu_addr); } else { dma_addr_t dma_address; @@ -264,11 +263,8 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, dma_address += adev->vm_manager.vram_base_offset; for (i = 0; i < num_pages; ++i) { - r = amdgpu_gart_map(adev, i << PAGE_SHIFT, 1, - &dma_address, flags, cpu_addr); - if (r) - goto error_free; - + amdgpu_gart_map(adev, i << PAGE_SHIFT, 1, &dma_address, + flags, cpu_addr); dma_address += PAGE_SIZE; } } @@ -288,7 +284,7 @@ error_free: } /** - * amdgpu_copy_ttm_mem_to_mem - Helper function for copy + * amdgpu_ttm_copy_mem_to_mem - Helper function for copy * @adev: amdgpu device * @src: buffer/address where to read from * @dst: buffer/address where to write to @@ -309,9 +305,6 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, struct dma_resv *resv, struct dma_fence **f) { - const uint32_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE * - AMDGPU_GPU_PAGE_SIZE); - struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; struct amdgpu_res_cursor src_mm, dst_mm; struct dma_fence *fence = NULL; @@ -327,29 +320,20 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev, mutex_lock(&adev->mman.gtt_window_lock); while (src_mm.remaining) { - uint32_t src_page_offset = src_mm.start & ~PAGE_MASK; - uint32_t dst_page_offset = dst_mm.start & ~PAGE_MASK; + uint64_t from, to, cur_size; struct dma_fence *next; - uint32_t cur_size; - uint64_t from, to; - /* Copy size cannot exceed GTT_MAX_BYTES. So if src or dst - * begins at an offset, then adjust the size accordingly - */ - cur_size = max(src_page_offset, dst_page_offset); - cur_size = min(min3(src_mm.size, dst_mm.size, size), - (uint64_t)(GTT_MAX_BYTES - cur_size)); + /* Never copy more than 256MiB at once to avoid a timeout */ + cur_size = min3(src_mm.size, dst_mm.size, 256ULL << 20); /* Map src to window 0 and dst to window 1. */ r = amdgpu_ttm_map_buffer(src->bo, src->mem, &src_mm, - PFN_UP(cur_size + src_page_offset), - 0, ring, tmz, &from); + 0, ring, tmz, &cur_size, &from); if (r) goto error; r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, &dst_mm, - PFN_UP(cur_size + dst_page_offset), - 1, ring, tmz, &to); + 1, ring, tmz, &cur_size, &to); if (r) goto error; @@ -408,8 +392,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, (abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE)) { struct dma_fence *wipe_fence = NULL; - r = amdgpu_fill_buffer(ttm_to_amdgpu_bo(bo), AMDGPU_POISON, - NULL, &wipe_fence); + r = amdgpu_fill_buffer(abo, AMDGPU_POISON, NULL, &wipe_fence); if (r) { goto error; } else if (wipe_fence) { @@ -441,8 +424,9 @@ error: static bool amdgpu_mem_visible(struct amdgpu_device *adev, struct ttm_resource *mem) { - uint64_t mem_size = (u64)mem->num_pages << PAGE_SHIFT; + u64 mem_size = (u64)mem->num_pages << PAGE_SHIFT; struct amdgpu_res_cursor cursor; + u64 end; if (mem->mem_type == TTM_PL_SYSTEM || mem->mem_type == TTM_PL_TT) @@ -451,12 +435,18 @@ static bool amdgpu_mem_visible(struct amdgpu_device *adev, return false; amdgpu_res_first(mem, 0, mem_size, &cursor); + end = cursor.start + cursor.size; + while (cursor.remaining) { + amdgpu_res_next(&cursor, cursor.size); - /* ttm_resource_ioremap only supports contiguous memory */ - if (cursor.size != mem_size) - return false; + /* ttm_resource_ioremap only supports contiguous memory */ + if (end != cursor.start) + return false; + + end = cursor.start + cursor.size; + } - return cursor.start + cursor.size <= adev->gmc.visible_vram_size; + return end <= adev->gmc.visible_vram_size; } /* @@ -471,10 +461,11 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, { struct amdgpu_device *adev; struct amdgpu_bo *abo; - struct ttm_resource *old_mem = &bo->mem; + struct ttm_resource *old_mem = bo->resource; int r; - if (new_mem->mem_type == TTM_PL_TT) { + if (new_mem->mem_type == TTM_PL_TT || + new_mem->mem_type == AMDGPU_PL_PREEMPT) { r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem); if (r) return r; @@ -487,23 +478,26 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, adev = amdgpu_ttm_adev(bo->bdev); - if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { + if (!old_mem || (old_mem->mem_type == TTM_PL_SYSTEM && + bo->ttm == NULL)) { ttm_bo_move_null(bo, new_mem); goto out; } if (old_mem->mem_type == TTM_PL_SYSTEM && - new_mem->mem_type == TTM_PL_TT) { + (new_mem->mem_type == TTM_PL_TT || + new_mem->mem_type == AMDGPU_PL_PREEMPT)) { ttm_bo_move_null(bo, new_mem); goto out; } - if (old_mem->mem_type == TTM_PL_TT && + if ((old_mem->mem_type == TTM_PL_TT || + old_mem->mem_type == AMDGPU_PL_PREEMPT) && new_mem->mem_type == TTM_PL_SYSTEM) { r = ttm_bo_wait_ctx(bo, ctx); if (r) return r; amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm); - ttm_resource_free(bo, &bo->mem); + ttm_resource_free(bo, &bo->resource); ttm_bo_assign_mem(bo, new_mem); goto out; } @@ -519,6 +513,15 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, goto out; } + if (bo->type == ttm_bo_type_device && + new_mem->mem_type == TTM_PL_VRAM && + old_mem->mem_type != TTM_PL_VRAM) { + /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU + * accesses the BO after it's moved. + */ + abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; + } + if (adev->mman.buffer_funcs_enabled) { if (((old_mem->mem_type == TTM_PL_SYSTEM && new_mem->mem_type == TTM_PL_VRAM) || @@ -527,7 +530,7 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, hop->fpfn = 0; hop->lpfn = 0; hop->mem_type = TTM_PL_TT; - hop->flags = 0; + hop->flags = TTM_PL_FLAG_TEMPORARY; return -EMULTIHOP; } @@ -549,15 +552,6 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict, return r; } - if (bo->type == ttm_bo_type_device && - new_mem->mem_type == TTM_PL_VRAM && - old_mem->mem_type != TTM_PL_VRAM) { - /* amdgpu_bo_fault_reserve_notify will re-set this if the CPU - * accesses the BO after it's moved. - */ - abo->flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; - } - out: /* update statistics */ atomic64_add(bo->base.size, &adev->num_bytes_moved); @@ -570,10 +564,10 @@ out: * * Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault() */ -static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem) +static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev, + struct ttm_resource *mem) { struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); - struct drm_mm_node *mm_node = mem->mm_node; size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT; switch (mem->mem_type) { @@ -581,27 +575,21 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resourc /* system memory */ return 0; case TTM_PL_TT: + case AMDGPU_PL_PREEMPT: break; case TTM_PL_VRAM: mem->bus.offset = mem->start << PAGE_SHIFT; /* check if it's visible */ if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size) return -EINVAL; - /* Only physically contiguous buffers apply. In a contiguous - * buffer, size of the first mm_node would match the number of - * pages in ttm_resource. - */ + if (adev->mman.aper_base_kaddr && - (mm_node->size == mem->num_pages)) + mem->placement & TTM_PL_FLAG_CONTIGUOUS) mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr + mem->bus.offset; mem->bus.offset += adev->gmc.aper_base; mem->bus.is_iomem = true; - if (adev->gmc.xgmi.connected_to_cpu) - mem->bus.caching = ttm_cached; - else - mem->bus.caching = ttm_write_combined; break; default: return -EINVAL; @@ -615,7 +603,8 @@ static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo, struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct amdgpu_res_cursor cursor; - amdgpu_res_first(&bo->mem, (u64)page_offset << PAGE_SHIFT, 0, &cursor); + amdgpu_res_first(bo->resource, (u64)page_offset << PAGE_SHIFT, 0, + &cursor); return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT; } @@ -656,6 +645,8 @@ struct amdgpu_ttm_tt { #endif }; +#define ttm_to_amdgpu_ttm_tt(ptr) container_of(ptr, struct amdgpu_ttm_tt, ttm) + #ifdef CONFIG_DRM_AMDGPU_USERPTR /* * amdgpu_ttm_tt_get_user_pages - get device accessible pages that back user @@ -667,13 +658,11 @@ struct amdgpu_ttm_tt { int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages) { struct ttm_tt *ttm = bo->tbo.ttm; - struct amdgpu_ttm_tt *gtt = (void *)ttm; + struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); unsigned long start = gtt->userptr; struct vm_area_struct *vma; - struct hmm_range *range; - unsigned long timeout; struct mm_struct *mm; - unsigned long i; + bool readonly; int r = 0; mm = bo->notifier.mm; @@ -689,28 +678,9 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages) if (!mmget_not_zero(mm)) /* Happens during process shutdown */ return -ESRCH; - range = kzalloc(sizeof(*range), GFP_KERNEL); - if (unlikely(!range)) { - r = -ENOMEM; - goto out; - } - range->notifier = &bo->notifier; - range->start = bo->notifier.interval_tree.start; - range->end = bo->notifier.interval_tree.last + 1; - range->default_flags = HMM_PFN_REQ_FAULT; - if (!amdgpu_ttm_tt_is_readonly(ttm)) - range->default_flags |= HMM_PFN_REQ_WRITE; - - range->hmm_pfns = kvmalloc_array(ttm->num_pages, - sizeof(*range->hmm_pfns), GFP_KERNEL); - if (unlikely(!range->hmm_pfns)) { - r = -ENOMEM; - goto out_free_ranges; - } - mmap_read_lock(mm); - vma = find_vma(mm, start); - if (unlikely(!vma || start < vma->vm_start)) { + vma = vma_lookup(mm, start); + if (unlikely(!vma)) { r = -EFAULT; goto out_unlock; } @@ -719,46 +689,18 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages) r = -EPERM; goto out_unlock; } - mmap_read_unlock(mm); - timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT); - -retry: - range->notifier_seq = mmu_interval_read_begin(&bo->notifier); - mmap_read_lock(mm); - r = hmm_range_fault(range); + readonly = amdgpu_ttm_tt_is_readonly(ttm); + r = amdgpu_hmm_range_get_pages(&bo->notifier, mm, pages, start, + ttm->num_pages, >t->range, readonly, + true, NULL); +out_unlock: mmap_read_unlock(mm); - if (unlikely(r)) { - /* - * FIXME: This timeout should encompass the retry from - * mmu_interval_read_retry() as well. - */ - if (r == -EBUSY && !time_after(jiffies, timeout)) - goto retry; - goto out_free_pfns; - } - - /* - * Due to default_flags, all pages are HMM_PFN_VALID or - * hmm_range_fault() fails. FIXME: The pages cannot be touched outside - * the notifier_lock, and mmu_interval_read_retry() must be done first. - */ - for (i = 0; i < ttm->num_pages; i++) - pages[i] = hmm_pfn_to_page(range->hmm_pfns[i]); + if (r) + pr_debug("failed %d to get user pages 0x%lx\n", r, start); - gtt->range = range; mmput(mm); - return 0; - -out_unlock: - mmap_read_unlock(mm); -out_free_pfns: - kvfree(range->hmm_pfns); -out_free_ranges: - kfree(range); -out: - mmput(mm); return r; } @@ -770,7 +712,7 @@ out: */ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm) { - struct amdgpu_ttm_tt *gtt = (void *)ttm; + struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); bool r = false; if (!gtt || !gtt->userptr) @@ -787,10 +729,7 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm) * FIXME: Must always hold notifier_lock for this, and must * not ignore the return code. */ - r = mmu_interval_read_retry(gtt->range->notifier, - gtt->range->notifier_seq); - kvfree(gtt->range->hmm_pfns); - kfree(gtt->range); + r = amdgpu_hmm_range_get_pages_done(gtt->range); gtt->range = NULL; } @@ -822,7 +761,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev, struct ttm_tt *ttm) { struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); - struct amdgpu_ttm_tt *gtt = (void *)ttm; + struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); enum dma_data_direction direction = write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; @@ -859,7 +798,7 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev, struct ttm_tt *ttm) { struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); - struct amdgpu_ttm_tt *gtt = (void *)ttm; + struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY); enum dma_data_direction direction = write ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE; @@ -887,14 +826,13 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev, #endif } -static int amdgpu_ttm_gart_bind(struct amdgpu_device *adev, - struct ttm_buffer_object *tbo, - uint64_t flags) +static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev, + struct ttm_buffer_object *tbo, + uint64_t flags) { struct amdgpu_bo *abo = ttm_to_amdgpu_bo(tbo); struct ttm_tt *ttm = tbo->ttm; - struct amdgpu_ttm_tt *gtt = (void *)ttm; - int r; + struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); if (amdgpu_bo_encrypted(abo)) flags |= AMDGPU_PTE_TMZ; @@ -902,10 +840,8 @@ static int amdgpu_ttm_gart_bind(struct amdgpu_device *adev, if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) { uint64_t page_idx = 1; - r = amdgpu_gart_bind(adev, gtt->offset, page_idx, - ttm->pages, gtt->ttm.dma_address, flags); - if (r) - goto gart_bind_fail; + amdgpu_gart_bind(adev, gtt->offset, page_idx, + gtt->ttm.dma_address, flags); /* The memory type of the first page defaults to UC. Now * modify the memory type to NC from the second page of @@ -914,22 +850,13 @@ static int amdgpu_ttm_gart_bind(struct amdgpu_device *adev, flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK; flags |= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC); - r = amdgpu_gart_bind(adev, - gtt->offset + (page_idx << PAGE_SHIFT), - ttm->num_pages - page_idx, - &ttm->pages[page_idx], - &(gtt->ttm.dma_address[page_idx]), flags); + amdgpu_gart_bind(adev, gtt->offset + (page_idx << PAGE_SHIFT), + ttm->num_pages - page_idx, + &(gtt->ttm.dma_address[page_idx]), flags); } else { - r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, - ttm->pages, gtt->ttm.dma_address, flags); + amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, + gtt->ttm.dma_address, flags); } - -gart_bind_fail: - if (r) - DRM_ERROR("failed to bind %u pages at 0x%08llX\n", - ttm->num_pages, gtt->offset); - - return r; } /* @@ -943,9 +870,9 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev, struct ttm_resource *bo_mem) { struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); - struct amdgpu_ttm_tt *gtt = (void*)ttm; + struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); uint64_t flags; - int r = 0; + int r; if (!bo_mem) return -EINVAL; @@ -959,18 +886,30 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev, DRM_ERROR("failed to pin userptr\n"); return r; } + } else if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) { + if (!ttm->sg) { + struct dma_buf_attachment *attach; + struct sg_table *sgt; + + attach = gtt->gobj->import_attach; + sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); + if (IS_ERR(sgt)) + return PTR_ERR(sgt); + + ttm->sg = sgt; + } + + drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address, + ttm->num_pages); } + if (!ttm->num_pages) { WARN(1, "nothing to bind %u pages for mreg %p back %p!\n", ttm->num_pages, bo_mem, ttm); } - if (bo_mem->mem_type == AMDGPU_PL_GDS || - bo_mem->mem_type == AMDGPU_PL_GWS || - bo_mem->mem_type == AMDGPU_PL_OA) - return -EINVAL; - - if (!amdgpu_gtt_mgr_has_gart_addr(bo_mem)) { + if (bo_mem->mem_type != TTM_PL_TT || + !amdgpu_gtt_mgr_has_gart_addr(bo_mem)) { gtt->offset = AMDGPU_BO_INVALID_OFFSET; return 0; } @@ -980,14 +919,10 @@ static int amdgpu_ttm_backend_bind(struct ttm_device *bdev, /* bind pages into GART page tables */ gtt->offset = (u64)bo_mem->start << PAGE_SHIFT; - r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, - ttm->pages, gtt->ttm.dma_address, flags); - - if (r) - DRM_ERROR("failed to bind %u pages at 0x%08llX\n", - ttm->num_pages, gtt->offset); + amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, + gtt->ttm.dma_address, flags); gtt->bound = true; - return r; + return 0; } /* @@ -1002,51 +937,45 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct ttm_operation_ctx ctx = { false, false }; - struct amdgpu_ttm_tt *gtt = (void *)bo->ttm; - struct ttm_resource tmp; + struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(bo->ttm); struct ttm_placement placement; struct ttm_place placements; + struct ttm_resource *tmp; uint64_t addr, flags; int r; - if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET) + if (bo->resource->start != AMDGPU_BO_INVALID_OFFSET) return 0; addr = amdgpu_gmc_agp_addr(bo); if (addr != AMDGPU_BO_INVALID_OFFSET) { - bo->mem.start = addr >> PAGE_SHIFT; - } else { + bo->resource->start = addr >> PAGE_SHIFT; + return 0; + } - /* allocate GART space */ - tmp = bo->mem; - tmp.mm_node = NULL; - placement.num_placement = 1; - placement.placement = &placements; - placement.num_busy_placement = 1; - placement.busy_placement = &placements; - placements.fpfn = 0; - placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT; - placements.mem_type = TTM_PL_TT; - placements.flags = bo->mem.placement; - - r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx); - if (unlikely(r)) - return r; + /* allocate GART space */ + placement.num_placement = 1; + placement.placement = &placements; + placement.num_busy_placement = 1; + placement.busy_placement = &placements; + placements.fpfn = 0; + placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT; + placements.mem_type = TTM_PL_TT; + placements.flags = bo->resource->placement; - /* compute PTE flags for this buffer object */ - flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp); + r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx); + if (unlikely(r)) + return r; - /* Bind pages */ - gtt->offset = (u64)tmp.start << PAGE_SHIFT; - r = amdgpu_ttm_gart_bind(adev, bo, flags); - if (unlikely(r)) { - ttm_resource_free(bo, &tmp); - return r; - } + /* compute PTE flags for this buffer object */ + flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, tmp); - ttm_resource_free(bo, &bo->mem); - bo->mem = tmp; - } + /* Bind pages */ + gtt->offset = (u64)tmp->start << PAGE_SHIFT; + amdgpu_ttm_gart_bind(adev, bo, flags); + amdgpu_gart_invalidate_tlb(adev); + ttm_resource_free(bo, &bo->resource); + ttm_bo_assign_mem(bo, tmp); return 0; } @@ -1057,19 +986,16 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo) * Called by amdgpu_gtt_mgr_recover() from amdgpu_device_reset() to * rebind GTT pages during a GPU reset. */ -int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo) +void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo) { struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev); uint64_t flags; - int r; if (!tbo->ttm) - return 0; - - flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, &tbo->mem); - r = amdgpu_ttm_gart_bind(adev, tbo, flags); + return; - return r; + flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, tbo->resource); + amdgpu_ttm_gart_bind(adev, tbo, flags); } /* @@ -1082,12 +1008,18 @@ static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev, struct ttm_tt *ttm) { struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); - struct amdgpu_ttm_tt *gtt = (void *)ttm; - int r; + struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); /* if the pages have userptr pinning then clear that first */ - if (gtt->userptr) + if (gtt->userptr) { amdgpu_ttm_tt_unpin_userptr(bdev, ttm); + } else if (ttm->sg && gtt->gobj->import_attach) { + struct dma_buf_attachment *attach; + + attach = gtt->gobj->import_attach; + dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL); + ttm->sg = NULL; + } if (!gtt->bound) return; @@ -1096,20 +1028,15 @@ static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev, return; /* unbind shouldn't be done for GDS/GWS/OA in ttm_bo_clean_mm */ - r = amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages); - if (r) - DRM_ERROR("failed to unbind %u pages at 0x%08llX\n", - gtt->ttm.num_pages, gtt->offset); + amdgpu_gart_unbind(adev, gtt->offset, ttm->num_pages); gtt->bound = false; } static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev, struct ttm_tt *ttm) { - struct amdgpu_ttm_tt *gtt = (void *)ttm; + struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); - amdgpu_ttm_backend_unbind(bdev, ttm); - ttm_tt_destroy_common(bdev, ttm); if (gtt->usertask) put_task_struct(gtt->usertask); @@ -1162,37 +1089,29 @@ static int amdgpu_ttm_tt_populate(struct ttm_device *bdev, struct ttm_operation_ctx *ctx) { struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); - struct amdgpu_ttm_tt *gtt = (void *)ttm; + struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); + pgoff_t i; + int ret; /* user pages are bound by amdgpu_ttm_tt_pin_userptr() */ - if (gtt && gtt->userptr) { + if (gtt->userptr) { ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL); if (!ttm->sg) return -ENOMEM; - - ttm->page_flags |= TTM_PAGE_FLAG_SG; return 0; } - if (ttm->page_flags & TTM_PAGE_FLAG_SG) { - if (!ttm->sg) { - struct dma_buf_attachment *attach; - struct sg_table *sgt; - - attach = gtt->gobj->import_attach; - sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL); - if (IS_ERR(sgt)) - return PTR_ERR(sgt); + if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) + return 0; - ttm->sg = sgt; - } + ret = ttm_pool_alloc(&adev->mman.bdev.pool, ttm, ctx); + if (ret) + return ret; - drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address, - ttm->num_pages); - return 0; - } + for (i = 0; i < ttm->num_pages; ++i) + ttm->pages[i]->mapping = bdev->dev_mapping; - return ttm_pool_alloc(&adev->mman.bdev.pool, ttm, ctx); + return 0; } /* @@ -1204,33 +1123,50 @@ static int amdgpu_ttm_tt_populate(struct ttm_device *bdev, static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev, struct ttm_tt *ttm) { - struct amdgpu_ttm_tt *gtt = (void *)ttm; + struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); struct amdgpu_device *adev; + pgoff_t i; - if (gtt && gtt->userptr) { + amdgpu_ttm_backend_unbind(bdev, ttm); + + if (gtt->userptr) { amdgpu_ttm_tt_set_user_pages(ttm, NULL); kfree(ttm->sg); - ttm->page_flags &= ~TTM_PAGE_FLAG_SG; - return; - } - - if (ttm->sg && gtt->gobj->import_attach) { - struct dma_buf_attachment *attach; - - attach = gtt->gobj->import_attach; - dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL); ttm->sg = NULL; return; } - if (ttm->page_flags & TTM_PAGE_FLAG_SG) + if (ttm->page_flags & TTM_TT_FLAG_EXTERNAL) return; + for (i = 0; i < ttm->num_pages; ++i) + ttm->pages[i]->mapping = NULL; + adev = amdgpu_ttm_adev(bdev); return ttm_pool_free(&adev->mman.bdev.pool, ttm); } /** + * amdgpu_ttm_tt_get_userptr - Return the userptr GTT ttm_tt for the current + * task + * + * @tbo: The ttm_buffer_object that contains the userptr + * @user_addr: The returned value + */ +int amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object *tbo, + uint64_t *user_addr) +{ + struct amdgpu_ttm_tt *gtt; + + if (!tbo->ttm) + return -EINVAL; + + gtt = (void *)tbo->ttm; + *user_addr = gtt->userptr; + return 0; +} + +/** * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current * task * @@ -1253,7 +1189,10 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo, return -ENOMEM; } - gtt = (void *)bo->ttm; + /* Set TTM_TT_FLAG_EXTERNAL before populate but after create. */ + bo->ttm->page_flags |= TTM_TT_FLAG_EXTERNAL; + + gtt = ttm_to_amdgpu_ttm_tt(bo->ttm); gtt->userptr = addr; gtt->userflags = flags; @@ -1270,7 +1209,7 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo, */ struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm) { - struct amdgpu_ttm_tt *gtt = (void *)ttm; + struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); if (gtt == NULL) return NULL; @@ -1287,9 +1226,9 @@ struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm) * */ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, - unsigned long end) + unsigned long end, unsigned long *userptr) { - struct amdgpu_ttm_tt *gtt = (void *)ttm; + struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); unsigned long size; if (gtt == NULL || !gtt->userptr) @@ -1302,6 +1241,8 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, if (gtt->userptr > end || gtt->userptr + size <= start) return false; + if (userptr) + *userptr = gtt->userptr; return true; } @@ -1310,7 +1251,7 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start, */ bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm) { - struct amdgpu_ttm_tt *gtt = (void *)ttm; + struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); if (gtt == NULL || !gtt->userptr) return false; @@ -1323,7 +1264,7 @@ bool amdgpu_ttm_tt_is_userptr(struct ttm_tt *ttm) */ bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm) { - struct amdgpu_ttm_tt *gtt = (void *)ttm; + struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); if (gtt == NULL) return false; @@ -1346,7 +1287,8 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem) if (mem && mem->mem_type != TTM_PL_SYSTEM) flags |= AMDGPU_PTE_VALID; - if (mem && mem->mem_type == TTM_PL_TT) { + if (mem && (mem->mem_type == TTM_PL_TT || + mem->mem_type == AMDGPU_PL_PREEMPT)) { flags |= AMDGPU_PTE_SYSTEM; if (ttm->caching == ttm_cached) @@ -1395,11 +1337,15 @@ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm, static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, const struct ttm_place *place) { - unsigned long num_pages = bo->mem.num_pages; - struct amdgpu_res_cursor cursor; - struct dma_resv_list *flist; + struct dma_resv_iter resv_cursor; struct dma_fence *f; - int i; + + if (!amdgpu_bo_is_amdgpu_bo(bo)) + return ttm_bo_eviction_valuable(bo, place); + + /* Swapout? */ + if (bo->resource->mem_type == TTM_PL_SYSTEM) + return true; if (bo->type == ttm_bo_type_kernel && !amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo))) @@ -1409,42 +1355,119 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo, * If true, then return false as any KFD process needs all its BOs to * be resident to run successfully */ - flist = dma_resv_get_list(bo->base.resv); - if (flist) { - for (i = 0; i < flist->shared_count; ++i) { - f = rcu_dereference_protected(flist->shared[i], - dma_resv_held(bo->base.resv)); - if (amdkfd_fence_check_mm(f, current->mm)) - return false; - } + dma_resv_for_each_fence(&resv_cursor, bo->base.resv, + DMA_RESV_USAGE_BOOKKEEP, f) { + if (amdkfd_fence_check_mm(f, current->mm)) + return false; } - switch (bo->mem.mem_type) { - case TTM_PL_TT: - if (amdgpu_bo_is_amdgpu_bo(bo) && - amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo))) - return false; - return true; + /* Preemptible BOs don't own system resources managed by the + * driver (pages, VRAM, GART space). They point to resources + * owned by someone else (e.g. pageable memory in user mode + * or a DMABuf). They are used in a preemptible context so we + * can guarantee no deadlocks and good QoS in case of MMU + * notifiers or DMABuf move notifiers from the resource owner. + */ + if (bo->resource->mem_type == AMDGPU_PL_PREEMPT) + return false; - case TTM_PL_VRAM: - /* Check each drm MM node individually */ - amdgpu_res_first(&bo->mem, 0, (u64)num_pages << PAGE_SHIFT, - &cursor); - while (cursor.remaining) { - if (place->fpfn < PFN_DOWN(cursor.start + cursor.size) - && !(place->lpfn && - place->lpfn <= PFN_DOWN(cursor.start))) - return true; - - amdgpu_res_next(&cursor, cursor.size); - } + if (bo->resource->mem_type == TTM_PL_TT && + amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo))) return false; - default: - break; + return ttm_bo_eviction_valuable(bo, place); +} + +static void amdgpu_ttm_vram_mm_access(struct amdgpu_device *adev, loff_t pos, + void *buf, size_t size, bool write) +{ + while (size) { + uint64_t aligned_pos = ALIGN_DOWN(pos, 4); + uint64_t bytes = 4 - (pos & 0x3); + uint32_t shift = (pos & 0x3) * 8; + uint32_t mask = 0xffffffff << shift; + uint32_t value = 0; + + if (size < bytes) { + mask &= 0xffffffff >> (bytes - size) * 8; + bytes = size; + } + + if (mask != 0xffffffff) { + amdgpu_device_mm_access(adev, aligned_pos, &value, 4, false); + if (write) { + value &= ~mask; + value |= (*(uint32_t *)buf << shift) & mask; + amdgpu_device_mm_access(adev, aligned_pos, &value, 4, true); + } else { + value = (value & mask) >> shift; + memcpy(buf, &value, bytes); + } + } else { + amdgpu_device_mm_access(adev, aligned_pos, buf, 4, write); + } + + pos += bytes; + buf += bytes; + size -= bytes; } +} - return ttm_bo_eviction_valuable(bo, place); +static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo, + unsigned long offset, void *buf, int len, int write) +{ + struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); + struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); + struct amdgpu_res_cursor src_mm; + struct amdgpu_job *job; + struct dma_fence *fence; + uint64_t src_addr, dst_addr; + unsigned int num_dw; + int r, idx; + + if (len != PAGE_SIZE) + return -EINVAL; + + if (!adev->mman.sdma_access_ptr) + return -EACCES; + + if (!drm_dev_enter(adev_to_drm(adev), &idx)) + return -ENODEV; + + if (write) + memcpy(adev->mman.sdma_access_ptr, buf, len); + + num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); + r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_DELAYED, &job); + if (r) + goto out; + + amdgpu_res_first(abo->tbo.resource, offset, len, &src_mm); + src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) + src_mm.start; + dst_addr = amdgpu_bo_gpu_offset(adev->mman.sdma_access_bo); + if (write) + swap(src_addr, dst_addr); + + amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr, PAGE_SIZE, false); + + amdgpu_ring_pad_ib(adev->mman.buffer_funcs_ring, &job->ibs[0]); + WARN_ON(job->ibs[0].length_dw > num_dw); + + r = amdgpu_job_submit(job, &adev->mman.entity, AMDGPU_FENCE_OWNER_UNDEFINED, &fence); + if (r) { + amdgpu_job_free(job); + goto out; + } + + if (!dma_fence_wait_timeout(fence, false, adev->sdma_timeout)) + r = -ETIMEDOUT; + dma_fence_put(fence); + + if (!(r || write)) + memcpy(buf, adev->mman.sdma_access_ptr, len); +out: + drm_dev_exit(idx); + return r; } /** @@ -1466,50 +1489,32 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo, struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); struct amdgpu_res_cursor cursor; - unsigned long flags; - uint32_t value = 0; int ret = 0; - if (bo->mem.mem_type != TTM_PL_VRAM) + if (bo->resource->mem_type != TTM_PL_VRAM) return -EIO; - amdgpu_res_first(&bo->mem, offset, len, &cursor); - while (cursor.remaining) { - uint64_t aligned_pos = cursor.start & ~(uint64_t)3; - uint64_t bytes = 4 - (cursor.start & 3); - uint32_t shift = (cursor.start & 3) * 8; - uint32_t mask = 0xffffffff << shift; + if (amdgpu_device_has_timeouts_enabled(adev) && + !amdgpu_ttm_access_memory_sdma(bo, offset, buf, len, write)) + return len; - if (cursor.size < bytes) { - mask &= 0xffffffff >> (bytes - cursor.size) * 8; - bytes = cursor.size; - } - - if (mask != 0xffffffff) { - spin_lock_irqsave(&adev->mmio_idx_lock, flags); - WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000); - WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31); - value = RREG32_NO_KIQ(mmMM_DATA); - if (write) { - value &= ~mask; - value |= (*(uint32_t *)buf << shift) & mask; - WREG32_NO_KIQ(mmMM_DATA, value); - } - spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); - if (!write) { - value = (value & mask) >> shift; - memcpy(buf, &value, bytes); - } - } else { - bytes = cursor.size & ~0x3ULL; - amdgpu_device_vram_access(adev, cursor.start, - (uint32_t *)buf, bytes, - write); + amdgpu_res_first(bo->resource, offset, len, &cursor); + while (cursor.remaining) { + size_t count, size = cursor.size; + loff_t pos = cursor.start; + + count = amdgpu_device_aper_access(adev, pos, buf, size, write); + size -= count; + if (size) { + /* using MM to access rest vram and handle un-aligned address */ + pos += count; + buf += count; + amdgpu_ttm_vram_mm_access(adev, pos, buf, size, write); } - ret += bytes; - buf = (uint8_t *)buf + bytes; - amdgpu_res_next(&cursor, bytes); + ret += cursor.size; + buf += cursor.size; + amdgpu_res_next(&cursor, cursor.size); } return ret; @@ -1529,13 +1534,11 @@ static struct ttm_device_funcs amdgpu_bo_driver = { .eviction_valuable = amdgpu_ttm_bo_eviction_valuable, .evict_flags = &amdgpu_evict_flags, .move = &amdgpu_bo_move, - .verify_access = &amdgpu_verify_access, .delete_mem_notify = &amdgpu_bo_delete_mem_notify, .release_notify = &amdgpu_bo_release_notify, .io_mem_reserve = &amdgpu_ttm_io_mem_reserve, .io_mem_pfn = amdgpu_ttm_io_mem_pfn, .access_memory = &amdgpu_ttm_access_memory, - .del_from_lru_notify = &amdgpu_vm_del_from_lru_notify }; /* @@ -1632,11 +1635,8 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev) bool mem_train_support = false; if (!amdgpu_sriov_vf(adev)) { - ret = amdgpu_mem_train_support(adev); - if (ret == 1) + if (amdgpu_atomfirmware_mem_training_supported(adev)) mem_train_support = true; - else if (ret == -1) - return -EINVAL; else DRM_DEBUG("memory training does not support!\n"); } @@ -1778,22 +1778,37 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) NULL); if (r) return r; + r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_reserved_offset, + adev->mman.stolen_reserved_size, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->mman.stolen_reserved_memory, + NULL); + if (r) + return r; DRM_INFO("amdgpu: %uM of VRAM memory ready\n", (unsigned) (adev->gmc.real_vram_size / (1024 * 1024))); - /* Compute GTT size, either bsaed on 3/4th the size of RAM size + /* Compute GTT size, either based on 1/2 the size of RAM size * or whatever the user passed on module init */ if (amdgpu_gtt_size == -1) { struct sysinfo si; si_meminfo(&si); - gtt_size = min(max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20), - adev->gmc.mc_vram_size), - ((uint64_t)si.totalram * si.mem_unit * 3/4)); - } - else + /* Certain GL unit tests for large textures can cause problems + * with the OOM killer since there is no way to link this memory + * to a process. This was originally mitigated (but not necessarily + * eliminated) by limiting the GTT size. The problem is this limit + * is often too low for many modern games so just make the limit 1/2 + * of system memory which aligns with TTM. The OOM accounting needs + * to be addressed, but we shouldn't prevent common 3D applications + * from being usable just to potentially mitigate that corner case. + */ + gtt_size = max((AMDGPU_DEFAULT_GTT_SIZE_MB << 20), + (u64)si.totalram * si.mem_unit / 2); + } else { gtt_size = (uint64_t)amdgpu_gtt_size << 20; + } /* Initialize GTT memory pool */ r = amdgpu_gtt_mgr_init(adev, gtt_size); @@ -1804,6 +1819,13 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) DRM_INFO("amdgpu: %uM of GTT memory ready.\n", (unsigned)(gtt_size / (1024 * 1024))); + /* Initialize preemptible memory pool */ + r = amdgpu_preempt_mgr_init(adev); + if (r) { + DRM_ERROR("Failed initializing PREEMPT heap.\n"); + return r; + } + /* Initialize various on-chip memory pools */ r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size); if (r) { @@ -1823,6 +1845,12 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) return r; } + if (amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_GTT, + &adev->mman.sdma_access_bo, NULL, + &adev->mman.sdma_access_ptr)) + DRM_WARN("Debug VRAM access will use slowpath MM access\n"); + return 0; } @@ -1831,6 +1859,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) */ void amdgpu_ttm_fini(struct amdgpu_device *adev) { + int idx; if (!adev->mman.initialized) return; @@ -1840,14 +1869,25 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL); /* return the IP Discovery TMR memory back to VRAM */ amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL); + if (adev->mman.stolen_reserved_size) + amdgpu_bo_free_kernel(&adev->mman.stolen_reserved_memory, + NULL, NULL); + amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL, + &adev->mman.sdma_access_ptr); amdgpu_ttm_fw_reserve_vram_fini(adev); - if (adev->mman.aper_base_kaddr) - iounmap(adev->mman.aper_base_kaddr); - adev->mman.aper_base_kaddr = NULL; + if (drm_dev_enter(adev_to_drm(adev), &idx)) { + + if (adev->mman.aper_base_kaddr) + iounmap(adev->mman.aper_base_kaddr); + adev->mman.aper_base_kaddr = NULL; + + drm_dev_exit(idx); + } amdgpu_vram_mgr_fini(adev); amdgpu_gtt_mgr_fini(adev); + amdgpu_preempt_mgr_fini(adev); ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS); ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS); ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA); @@ -1900,51 +1940,42 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable) size = adev->gmc.real_vram_size; else size = adev->gmc.visible_vram_size; - man->size = size >> PAGE_SHIFT; + man->size = size; adev->mman.buffer_funcs_enabled = enable; } -static vm_fault_t amdgpu_ttm_fault(struct vm_fault *vmf) +static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev, + bool direct_submit, + unsigned int num_dw, + struct dma_resv *resv, + bool vm_needs_flush, + struct amdgpu_job **job) { - struct ttm_buffer_object *bo = vmf->vma->vm_private_data; - vm_fault_t ret; - - ret = ttm_bo_vm_reserve(bo, vmf); - if (ret) - return ret; - - ret = amdgpu_bo_fault_reserve_notify(bo); - if (ret) - goto unlock; - - ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot, - TTM_BO_VM_NUM_PREFAULT, 1); - if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT)) - return ret; - -unlock: - dma_resv_unlock(bo->base.resv); - return ret; -} - -static const struct vm_operations_struct amdgpu_ttm_vm_ops = { - .fault = amdgpu_ttm_fault, - .open = ttm_bo_vm_open, - .close = ttm_bo_vm_close, - .access = ttm_bo_vm_access -}; - -int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma) -{ - struct drm_file *file_priv = filp->private_data; - struct amdgpu_device *adev = drm_to_adev(file_priv->minor->dev); + enum amdgpu_ib_pool_type pool = direct_submit ? + AMDGPU_IB_POOL_DIRECT : + AMDGPU_IB_POOL_DELAYED; int r; - r = ttm_bo_mmap(filp, vma, &adev->mman.bdev); - if (unlikely(r != 0)) + r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, pool, job); + if (r) return r; - vma->vm_ops = &amdgpu_ttm_vm_ops; + if (vm_needs_flush) { + (*job)->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo ? + adev->gmc.pdb0_bo : + adev->gart.bo); + (*job)->vm_needs_flush = true; + } + if (resv) { + r = amdgpu_sync_resv(adev, &(*job)->sync, resv, + AMDGPU_SYNC_ALWAYS, + AMDGPU_FENCE_OWNER_UNDEFINED); + if (r) { + DRM_ERROR("sync failed (%d).\n", r); + amdgpu_job_free(*job); + return r; + } + } return 0; } @@ -1954,17 +1985,14 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, struct dma_fence **fence, bool direct_submit, bool vm_needs_flush, bool tmz) { - enum amdgpu_ib_pool_type pool = direct_submit ? AMDGPU_IB_POOL_DIRECT : - AMDGPU_IB_POOL_DELAYED; struct amdgpu_device *adev = ring->adev; + unsigned num_loops, num_dw; struct amdgpu_job *job; - uint32_t max_bytes; - unsigned num_loops, num_dw; unsigned i; int r; - if (direct_submit && !ring->sched.ready) { + if (!direct_submit && !ring->sched.ready) { DRM_ERROR("Trying to move memory with ring turned off.\n"); return -EINVAL; } @@ -1972,26 +2000,11 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, max_bytes = adev->mman.buffer_funcs->copy_max_bytes; num_loops = DIV_ROUND_UP(byte_count, max_bytes); num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8); - - r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, pool, &job); + r = amdgpu_ttm_prepare_job(adev, direct_submit, num_dw, + resv, vm_needs_flush, &job); if (r) return r; - if (vm_needs_flush) { - job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo ? - adev->gmc.pdb0_bo : adev->gart.bo); - job->vm_needs_flush = true; - } - if (resv) { - r = amdgpu_sync_resv(adev, &job->sync, resv, - AMDGPU_SYNC_ALWAYS, - AMDGPU_FENCE_OWNER_UNDEFINED); - if (r) { - DRM_ERROR("sync failed (%d).\n", r); - goto error_free; - } - } - for (i = 0; i < num_loops; i++) { uint32_t cur_size_in_bytes = min(byte_count, max_bytes); @@ -2021,71 +2034,35 @@ error_free: return r; } -int amdgpu_fill_buffer(struct amdgpu_bo *bo, - uint32_t src_data, - struct dma_resv *resv, - struct dma_fence **fence) +static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data, + uint64_t dst_addr, uint32_t byte_count, + struct dma_resv *resv, + struct dma_fence **fence, + bool vm_needs_flush) { - struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes; - struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; - - struct amdgpu_res_cursor cursor; + struct amdgpu_device *adev = ring->adev; unsigned int num_loops, num_dw; - uint64_t num_bytes; - struct amdgpu_job *job; + uint32_t max_bytes; + unsigned int i; int r; - if (!adev->mman.buffer_funcs_enabled) { - DRM_ERROR("Trying to clear memory with ring turned off.\n"); - return -EINVAL; - } - - if (bo->tbo.mem.mem_type == TTM_PL_TT) { - r = amdgpu_ttm_alloc_gart(&bo->tbo); - if (r) - return r; - } - - num_bytes = bo->tbo.mem.num_pages << PAGE_SHIFT; - num_loops = 0; - - amdgpu_res_first(&bo->tbo.mem, 0, num_bytes, &cursor); - while (cursor.remaining) { - num_loops += DIV_ROUND_UP_ULL(cursor.size, max_bytes); - amdgpu_res_next(&cursor, cursor.size); - } - num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw; - - /* for IB padding */ - num_dw += 64; - - r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_DELAYED, - &job); + max_bytes = adev->mman.buffer_funcs->fill_max_bytes; + num_loops = DIV_ROUND_UP_ULL(byte_count, max_bytes); + num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->fill_num_dw, 8); + r = amdgpu_ttm_prepare_job(adev, false, num_dw, resv, vm_needs_flush, + &job); if (r) return r; - if (resv) { - r = amdgpu_sync_resv(adev, &job->sync, resv, - AMDGPU_SYNC_ALWAYS, - AMDGPU_FENCE_OWNER_UNDEFINED); - if (r) { - DRM_ERROR("sync failed (%d).\n", r); - goto error_free; - } - } - - amdgpu_res_first(&bo->tbo.mem, 0, num_bytes, &cursor); - while (cursor.remaining) { - uint32_t cur_size = min_t(uint64_t, cursor.size, max_bytes); - uint64_t dst_addr = cursor.start; + for (i = 0; i < num_loops; i++) { + uint32_t cur_size = min(byte_count, max_bytes); - dst_addr += amdgpu_ttm_domain_start(adev, bo->tbo.mem.mem_type); amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, dst_addr, cur_size); - amdgpu_res_next(&cursor, cur_size); + dst_addr += cur_size; + byte_count -= cur_size; } amdgpu_ring_pad_ib(ring, &job->ibs[0]); @@ -2102,75 +2079,94 @@ error_free: return r; } -#if defined(CONFIG_DEBUG_FS) - -static int amdgpu_mm_vram_table_show(struct seq_file *m, void *unused) +int amdgpu_fill_buffer(struct amdgpu_bo *bo, + uint32_t src_data, + struct dma_resv *resv, + struct dma_fence **f) { - struct amdgpu_device *adev = (struct amdgpu_device *)m->private; - struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, - TTM_PL_VRAM); - struct drm_printer p = drm_seq_file_printer(m); + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; + struct dma_fence *fence = NULL; + struct amdgpu_res_cursor dst; + int r; - man->func->debug(man, &p); - return 0; -} + if (!adev->mman.buffer_funcs_enabled) { + DRM_ERROR("Trying to clear memory with ring turned off.\n"); + return -EINVAL; + } -static int amdgpu_ttm_page_pool_show(struct seq_file *m, void *unused) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)m->private; + amdgpu_res_first(bo->tbo.resource, 0, amdgpu_bo_size(bo), &dst); - return ttm_pool_debugfs(&adev->mman.bdev.pool, m); -} + mutex_lock(&adev->mman.gtt_window_lock); + while (dst.remaining) { + struct dma_fence *next; + uint64_t cur_size, to; -static int amdgpu_mm_tt_table_show(struct seq_file *m, void *unused) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)m->private; - struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, - TTM_PL_TT); - struct drm_printer p = drm_seq_file_printer(m); + /* Never fill more than 256MiB at once to avoid timeouts */ + cur_size = min(dst.size, 256ULL << 20); - man->func->debug(man, &p); - return 0; -} + r = amdgpu_ttm_map_buffer(&bo->tbo, bo->tbo.resource, &dst, + 1, ring, false, &cur_size, &to); + if (r) + goto error; -static int amdgpu_mm_gds_table_show(struct seq_file *m, void *unused) -{ - struct amdgpu_device *adev = (struct amdgpu_device *)m->private; - struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, - AMDGPU_PL_GDS); - struct drm_printer p = drm_seq_file_printer(m); + r = amdgpu_ttm_fill_mem(ring, src_data, to, cur_size, resv, + &next, true); + if (r) + goto error; - man->func->debug(man, &p); - return 0; + dma_fence_put(fence); + fence = next; + + amdgpu_res_next(&dst, cur_size); + } +error: + mutex_unlock(&adev->mman.gtt_window_lock); + if (f) + *f = dma_fence_get(fence); + dma_fence_put(fence); + return r; } -static int amdgpu_mm_gws_table_show(struct seq_file *m, void *unused) +/** + * amdgpu_ttm_evict_resources - evict memory buffers + * @adev: amdgpu device object + * @mem_type: evicted BO's memory type + * + * Evicts all @mem_type buffers on the lru list of the memory type. + * + * Returns: + * 0 for success or a negative error code on failure. + */ +int amdgpu_ttm_evict_resources(struct amdgpu_device *adev, int mem_type) { - struct amdgpu_device *adev = (struct amdgpu_device *)m->private; - struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, - AMDGPU_PL_GWS); - struct drm_printer p = drm_seq_file_printer(m); + struct ttm_resource_manager *man; - man->func->debug(man, &p); - return 0; + switch (mem_type) { + case TTM_PL_VRAM: + case TTM_PL_TT: + case AMDGPU_PL_GWS: + case AMDGPU_PL_GDS: + case AMDGPU_PL_OA: + man = ttm_manager_type(&adev->mman.bdev, mem_type); + break; + default: + DRM_ERROR("Trying to evict invalid memory type\n"); + return -EINVAL; + } + + return ttm_resource_manager_evict_all(&adev->mman.bdev, man); } -static int amdgpu_mm_oa_table_show(struct seq_file *m, void *unused) +#if defined(CONFIG_DEBUG_FS) + +static int amdgpu_ttm_page_pool_show(struct seq_file *m, void *unused) { struct amdgpu_device *adev = (struct amdgpu_device *)m->private; - struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, - AMDGPU_PL_OA); - struct drm_printer p = drm_seq_file_printer(m); - man->func->debug(man, &p); - return 0; + return ttm_pool_debugfs(&adev->mman.bdev.pool, m); } -DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_vram_table); -DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_tt_table); -DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_gds_table); -DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_gws_table); -DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_oa_table); DEFINE_SHOW_ATTRIBUTE(amdgpu_ttm_page_pool); /* @@ -2227,7 +2223,6 @@ static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf, return -ENXIO; while (size) { - unsigned long flags; uint32_t value; if (*pos >= adev->gmc.mc_vram_size) @@ -2237,11 +2232,7 @@ static ssize_t amdgpu_ttm_vram_write(struct file *f, const char __user *buf, if (r) return r; - spin_lock_irqsave(&adev->mmio_idx_lock, flags); - WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)*pos) | 0x80000000); - WREG32_NO_KIQ(mmMM_INDEX_HI, *pos >> 31); - WREG32_NO_KIQ(mmMM_DATA, value); - spin_unlock_irqrestore(&adev->mmio_idx_lock, flags); + amdgpu_device_mm_access(adev, *pos, &value, 4, true); result += 4; buf += 4; @@ -2385,17 +2376,23 @@ void amdgpu_ttm_debugfs_init(struct amdgpu_device *adev) &amdgpu_ttm_vram_fops, adev->gmc.mc_vram_size); debugfs_create_file("amdgpu_iomem", 0444, root, adev, &amdgpu_ttm_iomem_fops); - debugfs_create_file("amdgpu_vram_mm", 0444, root, adev, - &amdgpu_mm_vram_table_fops); - debugfs_create_file("amdgpu_gtt_mm", 0444, root, adev, - &amdgpu_mm_tt_table_fops); - debugfs_create_file("amdgpu_gds_mm", 0444, root, adev, - &amdgpu_mm_gds_table_fops); - debugfs_create_file("amdgpu_gws_mm", 0444, root, adev, - &amdgpu_mm_gws_table_fops); - debugfs_create_file("amdgpu_oa_mm", 0444, root, adev, - &amdgpu_mm_oa_table_fops); debugfs_create_file("ttm_page_pool", 0444, root, adev, &amdgpu_ttm_page_pool_fops); + ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev, + TTM_PL_VRAM), + root, "amdgpu_vram_mm"); + ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev, + TTM_PL_TT), + root, "amdgpu_gtt_mm"); + ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev, + AMDGPU_PL_GDS), + root, "amdgpu_gds_mm"); + ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev, + AMDGPU_PL_GWS), + root, "amdgpu_gws_mm"); + ttm_resource_manager_create_debugfs(ttm_manager_type(&adev->mman.bdev, + AMDGPU_PL_OA), + root, "amdgpu_oa_mm"); + #endif } |