From a2c554262d39f81be7422fd8bee2f2fe3779f7f5 Mon Sep 17 00:00:00 2001 From: "Fabio M. De Francesco" Date: Sun, 16 Oct 2022 19:41:58 +0200 Subject: drm/amd/amdgpu: Replace kmap() with kmap_local_page() MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit kmap() is being deprecated in favor of kmap_local_page(). There are two main problems with kmap(): (1) It comes with an overhead as mapping space is restricted and protected by a global lock for synchronization and (2) it also requires global TLB invalidation when the kmap’s pool wraps and it might block when the mapping space is fully utilized until a slot becomes available. With kmap_local_page() the mappings are per thread, CPU local, can take page faults, and can be called from any context (including interrupts). It is faster than kmap() in kernels with HIGHMEM enabled. Furthermore, the tasks can be preempted and, when they are scheduled to run again, the kernel virtual addresses are restored and are still valid. Since its use in amdgpu/amdgpu_ttm.c is safe, it should be preferred. Therefore, replace kmap() with kmap_local_page() in amdgpu/amdgpu_ttm.c. Suggested-by: Ira Weiny Acked-by: Christian König Signed-off-by: Fabio M. De Francesco Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index dc262d2c2925..8782fc6bcd2c 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -2292,9 +2292,9 @@ static ssize_t amdgpu_iomem_read(struct file *f, char __user *buf, if (p->mapping != adev->mman.bdev.dev_mapping) return -EPERM; - ptr = kmap(p); + ptr = kmap_local_page(p); r = copy_to_user(buf, ptr + off, bytes); - kunmap(p); + kunmap_local(ptr); if (r) return -EFAULT; @@ -2343,9 +2343,9 @@ static ssize_t amdgpu_iomem_write(struct file *f, const char __user *buf, if (p->mapping != adev->mman.bdev.dev_mapping) return -EPERM; - ptr = kmap(p); + ptr = kmap_local_page(p); r = copy_from_user(ptr + off, buf, bytes); - kunmap(p); + kunmap_local(ptr); if (r) return -EFAULT; -- cgit From df768a9770271b0d9faab25f42dfc7bdec87b21c Mon Sep 17 00:00:00 2001 From: Arunpravin Paneer Selvam Date: Tue, 18 Oct 2022 07:08:38 -0700 Subject: drm/amdgpu: Fix for BO move issue MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A user reported a bug on CAPE VERDE system where uvd_v3_1 IP component failed to initialize as there is an issue with BO move code from one memory to other. In function amdgpu_mem_visible() called by amdgpu_bo_move(), when there are no blocks to compare or if we have a single block then break the loop. Fixes: 312b4dc11d4f ("drm/amdgpu: Fix VRAM BO swap issue") Signed-off-by: Arunpravin Paneer Selvam Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 8782fc6bcd2c..0de9bc12793d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -439,6 +439,9 @@ static bool amdgpu_mem_visible(struct amdgpu_device *adev, while (cursor.remaining) { amdgpu_res_next(&cursor, cursor.size); + if (!cursor.remaining) + break; + /* ttm_resource_ioremap only supports contiguous memory */ if (end != cursor.start) return false; -- cgit From 8273b4048664fff356fd10059033f0e2f5a422a1 Mon Sep 17 00:00:00 2001 From: Arunpravin Paneer Selvam Date: Tue, 18 Oct 2022 07:08:38 -0700 Subject: drm/amdgpu: Fix for BO move issue MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit A user reported a bug on CAPE VERDE system where uvd_v3_1 IP component failed to initialize as there is an issue with BO move code from one memory to other. In function amdgpu_mem_visible() called by amdgpu_bo_move(), when there are no blocks to compare or if we have a single block then break the loop. Fixes: 312b4dc11d4f ("drm/amdgpu: Fix VRAM BO swap issue") Signed-off-by: Arunpravin Paneer Selvam Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 3 +++ 1 file changed, 3 insertions(+) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index dc262d2c2925..57277b1cf183 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -439,6 +439,9 @@ static bool amdgpu_mem_visible(struct amdgpu_device *adev, while (cursor.remaining) { amdgpu_res_next(&cursor, cursor.size); + if (!cursor.remaining) + break; + /* ttm_resource_ioremap only supports contiguous memory */ if (end != cursor.start) return false; -- cgit From e3c92eb4a84fb0f00442e6b5cabf4f11b0eaaf41 Mon Sep 17 00:00:00 2001 From: Somalapuram Amaranath Date: Thu, 27 Oct 2022 14:42:37 +0530 Subject: drm/ttm: rework on ttm_resource to use size_t type MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Change ttm_resource structure from num_pages to size_t size in bytes. v1 -> v2: change PFN_UP(dst_mem->size) to ttm->num_pages v1 -> v2: change bo->resource->size to bo->base.size at some places v1 -> v2: remove the local variable v1 -> v2: cleanup cmp_size_smaller_first() v2 -> v3: adding missing PFN_UP in ttm_bo_vm_fault_reserved Signed-off-by: Somalapuram Amaranath Link: https://patchwork.freedesktop.org/patch/msgid/20221027091237.983582-1-Amaranath.Somalapuram@amd.com Reviewed-by: Christian König Signed-off-by: Christian König --- drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 3 ++- drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 6 +++--- drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c | 8 ++++---- drivers/gpu/drm/i915/gem/i915_gem_ttm.c | 2 +- drivers/gpu/drm/i915/i915_scatterlist.c | 4 ++-- drivers/gpu/drm/i915/i915_ttm_buddy_manager.c | 12 ++++++------ drivers/gpu/drm/i915/intel_region_ttm.c | 2 +- drivers/gpu/drm/nouveau/nouveau_bo.c | 4 ++-- drivers/gpu/drm/nouveau/nouveau_bo0039.c | 4 ++-- drivers/gpu/drm/nouveau/nouveau_bo5039.c | 2 +- drivers/gpu/drm/nouveau/nouveau_bo74c1.c | 2 +- drivers/gpu/drm/nouveau/nouveau_bo85b5.c | 4 ++-- drivers/gpu/drm/nouveau/nouveau_bo9039.c | 4 ++-- drivers/gpu/drm/nouveau/nouveau_bo90b5.c | 4 ++-- drivers/gpu/drm/nouveau/nouveau_boa0b5.c | 2 +- drivers/gpu/drm/nouveau/nouveau_gem.c | 5 ++--- drivers/gpu/drm/nouveau/nouveau_mem.c | 4 ++-- drivers/gpu/drm/nouveau/nouveau_ttm.c | 2 +- drivers/gpu/drm/radeon/radeon_cs.c | 7 +++++-- drivers/gpu/drm/radeon/radeon_object.c | 4 ++-- drivers/gpu/drm/radeon/radeon_trace.h | 2 +- drivers/gpu/drm/radeon/radeon_ttm.c | 4 ++-- drivers/gpu/drm/ttm/ttm_bo.c | 3 --- drivers/gpu/drm/ttm/ttm_bo_util.c | 6 +++--- drivers/gpu/drm/ttm/ttm_bo_vm.c | 4 ++-- drivers/gpu/drm/ttm/ttm_range_manager.c | 2 +- drivers/gpu/drm/ttm/ttm_resource.c | 14 ++++++-------- drivers/gpu/drm/vmwgfx/vmwgfx_blit.c | 4 ++-- drivers/gpu/drm/vmwgfx/vmwgfx_bo.c | 6 +++--- drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c | 2 +- drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c | 2 +- drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c | 6 +++--- drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c | 6 +++--- include/drm/ttm/ttm_resource.h | 4 ++-- 37 files changed, 78 insertions(+), 80 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c index 1f3302aebeff..44367f03316f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gtt_mgr.c @@ -144,7 +144,7 @@ static int amdgpu_gtt_mgr_new(struct ttm_resource_manager *man, node->base.start = node->mm_nodes[0].start; } else { node->mm_nodes[0].start = 0; - node->mm_nodes[0].size = node->base.num_pages; + node->mm_nodes[0].size = PFN_UP(node->base.size); node->base.start = AMDGPU_BO_INVALID_OFFSET; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 2e8f6cd7a729..974e85d8b6cc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -542,6 +542,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev, /* GWS and OA don't need any alignment. */ page_align = bp->byte_align; size <<= PAGE_SHIFT; + } else if (bp->domain & AMDGPU_GEM_DOMAIN_GDS) { /* Both size and alignment must be a multiple of 4. */ page_align = ALIGN(bp->byte_align, 4); @@ -776,7 +777,7 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr) return 0; } - r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.resource->num_pages, &bo->kmap); + r = ttm_bo_kmap(&bo->tbo, 0, PFN_UP(bo->tbo.base.size), &bo->kmap); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h index 6546552e596c..5c4f93ee0c57 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_res_cursor.h @@ -62,7 +62,7 @@ static inline void amdgpu_res_first(struct ttm_resource *res, if (!res) goto fallback; - BUG_ON(start + size > res->num_pages << PAGE_SHIFT); + BUG_ON(start + size > res->size); cur->mem_type = res->mem_type; @@ -110,7 +110,7 @@ fallback: cur->size = size; cur->remaining = size; cur->node = NULL; - WARN_ON(res && start + size > res->num_pages << PAGE_SHIFT); + WARN_ON(res && start + size > res->size); return; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h index 5e6ddc7e101c..677ad2016976 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h @@ -127,7 +127,7 @@ TRACE_EVENT(amdgpu_bo_create, TP_fast_assign( __entry->bo = bo; - __entry->pages = bo->tbo.resource->num_pages; + __entry->pages = PFN_UP(bo->tbo.resource->size); __entry->type = bo->tbo.resource->mem_type; __entry->prefer = bo->preferred_domains; __entry->allow = bo->allowed_domains; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index dc262d2c2925..36066965346f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -381,7 +381,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, dst.offset = 0; r = amdgpu_ttm_copy_mem_to_mem(adev, &src, &dst, - new_mem->num_pages << PAGE_SHIFT, + new_mem->size, amdgpu_bo_encrypted(abo), bo->base.resv, &fence); if (r) @@ -424,7 +424,7 @@ error: static bool amdgpu_mem_visible(struct amdgpu_device *adev, struct ttm_resource *mem) { - u64 mem_size = (u64)mem->num_pages << PAGE_SHIFT; + u64 mem_size = (u64)mem->size; struct amdgpu_res_cursor cursor; u64 end; @@ -568,7 +568,7 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem) { struct amdgpu_device *adev = amdgpu_ttm_adev(bdev); - size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT; + size_t bus_size = (size_t)mem->size; switch (mem->mem_type) { case TTM_PL_SYSTEM: diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c index 73a517bcf5c1..18c1a173d187 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c @@ -439,7 +439,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, /* Allocate blocks in desired range */ vres->flags |= DRM_BUDDY_RANGE_ALLOCATION; - remaining_size = (u64)vres->base.num_pages << PAGE_SHIFT; + remaining_size = (u64)vres->base.size; mutex_lock(&mgr->lock); while (remaining_size) { @@ -498,7 +498,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, LIST_HEAD(temp); trim_list = &vres->blocks; - original_size = (u64)vres->base.num_pages << PAGE_SHIFT; + original_size = (u64)vres->base.size; /* * If size value is rounded up to min_block_size, trim the last @@ -533,8 +533,8 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, amdgpu_vram_mgr_block_size(block); start >>= PAGE_SHIFT; - if (start > vres->base.num_pages) - start -= vres->base.num_pages; + if (start > PFN_UP(vres->base.size)) + start -= PFN_UP(vres->base.size); else start = 0; vres->base.start = max(vres->base.start, start); diff --git a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c index 4f861782c3e8..7a1e92c11946 100644 --- a/drivers/gpu/drm/i915/gem/i915_gem_ttm.c +++ b/drivers/gpu/drm/i915/gem/i915_gem_ttm.c @@ -649,7 +649,7 @@ bool i915_ttm_resource_mappable(struct ttm_resource *res) if (!i915_ttm_cpu_maps_iomem(res)) return true; - return bman_res->used_visible_size == bman_res->base.num_pages; + return bman_res->used_visible_size == PFN_UP(bman_res->base.size); } static int i915_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem) diff --git a/drivers/gpu/drm/i915/i915_scatterlist.c b/drivers/gpu/drm/i915/i915_scatterlist.c index dcc081874ec8..114e5e39aa72 100644 --- a/drivers/gpu/drm/i915/i915_scatterlist.c +++ b/drivers/gpu/drm/i915/i915_scatterlist.c @@ -158,7 +158,7 @@ struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res, u32 page_alignment) { struct i915_ttm_buddy_resource *bman_res = to_ttm_buddy_resource(res); - const u64 size = res->num_pages << PAGE_SHIFT; + const u64 size = res->size; const u32 max_segment = round_down(UINT_MAX, page_alignment); struct drm_buddy *mm = bman_res->mm; struct list_head *blocks = &bman_res->blocks; @@ -177,7 +177,7 @@ struct i915_refct_sgt *i915_rsgt_from_buddy_resource(struct ttm_resource *res, i915_refct_sgt_init(rsgt, size); st = &rsgt->table; - if (sg_alloc_table(st, res->num_pages, GFP_KERNEL)) { + if (sg_alloc_table(st, PFN_UP(res->size), GFP_KERNEL)) { i915_refct_sgt_put(rsgt); return ERR_PTR(-ENOMEM); } diff --git a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c index e19452f0e100..7e611476c7a4 100644 --- a/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c +++ b/drivers/gpu/drm/i915/i915_ttm_buddy_manager.c @@ -62,8 +62,8 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man, if (place->fpfn || lpfn != man->size) bman_res->flags |= DRM_BUDDY_RANGE_ALLOCATION; - GEM_BUG_ON(!bman_res->base.num_pages); - size = bman_res->base.num_pages << PAGE_SHIFT; + GEM_BUG_ON(!bman_res->base.size); + size = bman_res->base.size; min_page_size = bman->default_page_size; if (bo->page_alignment) @@ -72,7 +72,7 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man, GEM_BUG_ON(min_page_size < mm->chunk_size); GEM_BUG_ON(!IS_ALIGNED(size, min_page_size)); - if (place->fpfn + bman_res->base.num_pages != place->lpfn && + if (place->fpfn + PFN_UP(bman_res->base.size) != place->lpfn && place->flags & TTM_PL_FLAG_CONTIGUOUS) { unsigned long pages; @@ -108,7 +108,7 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man, goto err_free_blocks; if (place->flags & TTM_PL_FLAG_CONTIGUOUS) { - u64 original_size = (u64)bman_res->base.num_pages << PAGE_SHIFT; + u64 original_size = (u64)bman_res->base.size; drm_buddy_block_trim(mm, original_size, @@ -116,7 +116,7 @@ static int i915_ttm_buddy_man_alloc(struct ttm_resource_manager *man, } if (lpfn <= bman->visible_size) { - bman_res->used_visible_size = bman_res->base.num_pages; + bman_res->used_visible_size = PFN_UP(bman_res->base.size); } else { struct drm_buddy_block *block; @@ -228,7 +228,7 @@ static bool i915_ttm_buddy_man_compatible(struct ttm_resource_manager *man, if (!place->fpfn && place->lpfn == i915_ttm_buddy_man_visible_size(man)) - return bman_res->used_visible_size == res->num_pages; + return bman_res->used_visible_size == PFN_UP(res->size); /* Check each drm buddy block individually */ list_for_each_entry(block, &bman_res->blocks, link) { diff --git a/drivers/gpu/drm/i915/intel_region_ttm.c b/drivers/gpu/drm/i915/intel_region_ttm.c index 575d67bc6ffe..cf89d0c2a2d9 100644 --- a/drivers/gpu/drm/i915/intel_region_ttm.c +++ b/drivers/gpu/drm/i915/intel_region_ttm.c @@ -244,7 +244,7 @@ void intel_region_ttm_resource_free(struct intel_memory_region *mem, struct ttm_resource_manager *man = mem->region_private; struct ttm_buffer_object mock_bo = {}; - mock_bo.base.size = res->num_pages << PAGE_SHIFT; + mock_bo.base.size = res->size; mock_bo.bdev = &mem->i915->bdev; res->bo = &mock_bo; diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c index 126b3c6e12f9..813937ad1dc2 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo.c @@ -532,7 +532,7 @@ nouveau_bo_map(struct nouveau_bo *nvbo) if (ret) return ret; - ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.resource->num_pages, &nvbo->kmap); + ret = ttm_bo_kmap(&nvbo->bo, 0, PFN_UP(nvbo->bo.base.size), &nvbo->kmap); ttm_bo_unreserve(&nvbo->bo); return ret; @@ -1236,7 +1236,7 @@ vm_fault_t nouveau_ttm_fault_reserve_notify(struct ttm_buffer_object *bo) } else { /* make sure bo is in mappable vram */ if (drm->client.device.info.family >= NV_DEVICE_INFO_V0_TESLA || - bo->resource->start + bo->resource->num_pages < mappable) + bo->resource->start + PFN_UP(bo->resource->size) < mappable) return 0; for (i = 0; i < nvbo->placement.num_placement; ++i) { diff --git a/drivers/gpu/drm/nouveau/nouveau_bo0039.c b/drivers/gpu/drm/nouveau/nouveau_bo0039.c index 7390132129fe..e2ce44adaa5c 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo0039.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo0039.c @@ -52,7 +52,7 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, u32 src_offset = old_reg->start << PAGE_SHIFT; u32 dst_ctxdma = nouveau_bo_mem_ctxdma(bo, chan, new_reg); u32 dst_offset = new_reg->start << PAGE_SHIFT; - u32 page_count = new_reg->num_pages; + u32 page_count = PFN_UP(new_reg->size); int ret; ret = PUSH_WAIT(push, 3); @@ -62,7 +62,7 @@ nv04_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, PUSH_MTHD(push, NV039, SET_CONTEXT_DMA_BUFFER_IN, src_ctxdma, SET_CONTEXT_DMA_BUFFER_OUT, dst_ctxdma); - page_count = new_reg->num_pages; + page_count = PFN_UP(new_reg->size); while (page_count) { int line_count = (page_count > 2047) ? 2047 : page_count; diff --git a/drivers/gpu/drm/nouveau/nouveau_bo5039.c b/drivers/gpu/drm/nouveau/nouveau_bo5039.c index 4c75c7b3804c..c6cf3629a9f9 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo5039.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo5039.c @@ -41,7 +41,7 @@ nv50_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, { struct nouveau_mem *mem = nouveau_mem(old_reg); struct nvif_push *push = chan->chan.push; - u64 length = (new_reg->num_pages << PAGE_SHIFT); + u64 length = new_reg->size; u64 src_offset = mem->vma[0].addr; u64 dst_offset = mem->vma[1].addr; int src_tiled = !!mem->kind; diff --git a/drivers/gpu/drm/nouveau/nouveau_bo74c1.c b/drivers/gpu/drm/nouveau/nouveau_bo74c1.c index ed6c09d67840..9b7ba31fae13 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo74c1.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo74c1.c @@ -44,7 +44,7 @@ nv84_bo_move_exec(struct nouveau_channel *chan, struct ttm_buffer_object *bo, if (ret) return ret; - PUSH_NVSQ(push, NV74C1, 0x0304, new_reg->num_pages << PAGE_SHIFT, + PUSH_NVSQ(push, NV74C1, 0x0304, new_reg->size, 0x0308, upper_32_bits(mem->vma[0].addr), 0x030c, lower_32_bits(mem->vma[0].addr), 0x0310, upper_32_bits(mem->vma[1].addr), diff --git a/drivers/gpu/drm/nouveau/nouveau_bo85b5.c b/drivers/gpu/drm/nouveau/nouveau_bo85b5.c index dec29b2d8bb2..a15a38a87a95 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo85b5.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo85b5.c @@ -44,10 +44,10 @@ nva3_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, struct nvif_push *push = chan->chan.push; u64 src_offset = mem->vma[0].addr; u64 dst_offset = mem->vma[1].addr; - u32 page_count = new_reg->num_pages; + u32 page_count = PFN_UP(new_reg->size); int ret; - page_count = new_reg->num_pages; + page_count = PFN_UP(new_reg->size); while (page_count) { int line_count = (page_count > 8191) ? 8191 : page_count; diff --git a/drivers/gpu/drm/nouveau/nouveau_bo9039.c b/drivers/gpu/drm/nouveau/nouveau_bo9039.c index 776b04976cdf..d2bb2687d401 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo9039.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo9039.c @@ -42,10 +42,10 @@ nvc0_bo_move_m2mf(struct nouveau_channel *chan, struct ttm_buffer_object *bo, struct nouveau_mem *mem = nouveau_mem(old_reg); u64 src_offset = mem->vma[0].addr; u64 dst_offset = mem->vma[1].addr; - u32 page_count = new_reg->num_pages; + u32 page_count = PFN_UP(new_reg->size); int ret; - page_count = new_reg->num_pages; + page_count = PFN_UP(new_reg->size); while (page_count) { int line_count = (page_count > 2047) ? 2047 : page_count; diff --git a/drivers/gpu/drm/nouveau/nouveau_bo90b5.c b/drivers/gpu/drm/nouveau/nouveau_bo90b5.c index 8499f58213e3..4618f4f5ab56 100644 --- a/drivers/gpu/drm/nouveau/nouveau_bo90b5.c +++ b/drivers/gpu/drm/nouveau/nouveau_bo90b5.c @@ -37,10 +37,10 @@ nvc0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, struct nvif_push *push = chan->chan.push; u64 src_offset = mem->vma[0].addr; u64 dst_offset = mem->vma[1].addr; - u32 page_count = new_reg->num_pages; + u32 page_count = PFN_UP(new_reg->size); int ret; - page_count = new_reg->num_pages; + page_count = PFN_UP(new_reg->size); while (page_count) { int line_count = (page_count > 8191) ? 8191 : page_count; diff --git a/drivers/gpu/drm/nouveau/nouveau_boa0b5.c b/drivers/gpu/drm/nouveau/nouveau_boa0b5.c index 575212472e7a..07a5c6302c98 100644 --- a/drivers/gpu/drm/nouveau/nouveau_boa0b5.c +++ b/drivers/gpu/drm/nouveau/nouveau_boa0b5.c @@ -58,7 +58,7 @@ nve0_bo_move_copy(struct nouveau_channel *chan, struct ttm_buffer_object *bo, PITCH_IN, PAGE_SIZE, PITCH_OUT, PAGE_SIZE, LINE_LENGTH_IN, PAGE_SIZE, - LINE_COUNT, new_reg->num_pages); + LINE_COUNT, PFN_UP(new_reg->size)); PUSH_IMMD(push, NVA0B5, LAUNCH_DMA, NVDEF(NVA0B5, LAUNCH_DMA, DATA_TRANSFER_TYPE, NON_PIPELINED) | diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c index fab542a758ff..ac5793c96957 100644 --- a/drivers/gpu/drm/nouveau/nouveau_gem.c +++ b/drivers/gpu/drm/nouveau/nouveau_gem.c @@ -679,7 +679,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli, } if (!nvbo->kmap.virtual) { - ret = ttm_bo_kmap(&nvbo->bo, 0, nvbo->bo.resource->num_pages, + ret = ttm_bo_kmap(&nvbo->bo, 0, PFN_UP(nvbo->bo.base.size), &nvbo->kmap); if (ret) { NV_PRINTK(err, cli, "failed kmap for reloc\n"); @@ -868,8 +868,7 @@ revalidate: if (unlikely(cmd != req->suffix0)) { if (!nvbo->kmap.virtual) { ret = ttm_bo_kmap(&nvbo->bo, 0, - nvbo->bo.resource-> - num_pages, + PFN_UP(nvbo->bo.base.size), &nvbo->kmap); if (ret) { WIND_RING(chan); diff --git a/drivers/gpu/drm/nouveau/nouveau_mem.c b/drivers/gpu/drm/nouveau/nouveau_mem.c index 76f8edefa637..1fde3a5d7c32 100644 --- a/drivers/gpu/drm/nouveau/nouveau_mem.c +++ b/drivers/gpu/drm/nouveau/nouveau_mem.c @@ -115,7 +115,7 @@ nouveau_mem_host(struct ttm_resource *reg, struct ttm_tt *tt) mutex_lock(&drm->master.lock); ret = nvif_mem_ctor_type(mmu, "ttmHostMem", cli->mem->oclass, type, PAGE_SHIFT, - reg->num_pages << PAGE_SHIFT, + reg->size, &args, sizeof(args), &mem->mem); mutex_unlock(&drm->master.lock); return ret; @@ -128,7 +128,7 @@ nouveau_mem_vram(struct ttm_resource *reg, bool contig, u8 page) struct nouveau_cli *cli = mem->cli; struct nouveau_drm *drm = cli->drm; struct nvif_mmu *mmu = &cli->mmu; - u64 size = ALIGN(reg->num_pages << PAGE_SHIFT, 1 << page); + u64 size = ALIGN(reg->size, 1 << page); int ret; mutex_lock(&drm->master.lock); diff --git a/drivers/gpu/drm/nouveau/nouveau_ttm.c b/drivers/gpu/drm/nouveau/nouveau_ttm.c index 9602c30928f2..1469a88910e4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_ttm.c +++ b/drivers/gpu/drm/nouveau/nouveau_ttm.c @@ -139,7 +139,7 @@ nv04_gart_manager_new(struct ttm_resource_manager *man, mem = nouveau_mem(*res); ttm_resource_init(bo, place, *res); ret = nvif_vmm_get(&mem->cli->vmm.vmm, PTES, false, 12, 0, - (long)(*res)->num_pages << PAGE_SHIFT, &mem->vma[0]); + (long)(*res)->size, &mem->vma[0]); if (ret) { nouveau_mem_del(man, *res); return ret; diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c index 446f7bae54c4..46a27ebf4588 100644 --- a/drivers/gpu/drm/radeon/radeon_cs.c +++ b/drivers/gpu/drm/radeon/radeon_cs.c @@ -400,8 +400,11 @@ static int cmp_size_smaller_first(void *priv, const struct list_head *a, struct radeon_bo_list *lb = list_entry(b, struct radeon_bo_list, tv.head); /* Sort A before B if A is smaller. */ - return (int)la->robj->tbo.resource->num_pages - - (int)lb->robj->tbo.resource->num_pages; + if (la->robj->tbo.base.size > lb->robj->tbo.base.size) + return 1; + if (la->robj->tbo.base.size < lb->robj->tbo.base.size) + return -1; + return 0; } /** diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c index 00c33b24d5d3..10c0fbd9d2b4 100644 --- a/drivers/gpu/drm/radeon/radeon_object.c +++ b/drivers/gpu/drm/radeon/radeon_object.c @@ -232,7 +232,7 @@ int radeon_bo_kmap(struct radeon_bo *bo, void **ptr) } return 0; } - r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.resource->num_pages, &bo->kmap); + r = ttm_bo_kmap(&bo->tbo, 0, PFN_UP(bo->tbo.base.size), &bo->kmap); if (r) { return r; } @@ -737,7 +737,7 @@ vm_fault_t radeon_bo_fault_reserve_notify(struct ttm_buffer_object *bo) if (bo->resource->mem_type != TTM_PL_VRAM) return 0; - size = bo->resource->num_pages << PAGE_SHIFT; + size = bo->resource->size; offset = bo->resource->start << PAGE_SHIFT; if ((offset + size) <= rdev->mc.visible_vram_size) return 0; diff --git a/drivers/gpu/drm/radeon/radeon_trace.h b/drivers/gpu/drm/radeon/radeon_trace.h index c9fed5f2b870..22676617e1a5 100644 --- a/drivers/gpu/drm/radeon/radeon_trace.h +++ b/drivers/gpu/drm/radeon/radeon_trace.h @@ -22,7 +22,7 @@ TRACE_EVENT(radeon_bo_create, TP_fast_assign( __entry->bo = bo; - __entry->pages = bo->tbo.resource->num_pages; + __entry->pages = PFN_UP(bo->tbo.resource->size); ), TP_printk("bo=%p, pages=%u", __entry->bo, __entry->pages) ); diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c index d33fec488713..fff48306c05f 100644 --- a/drivers/gpu/drm/radeon/radeon_ttm.c +++ b/drivers/gpu/drm/radeon/radeon_ttm.c @@ -181,7 +181,7 @@ static int radeon_move_blit(struct ttm_buffer_object *bo, BUILD_BUG_ON((PAGE_SIZE % RADEON_GPU_PAGE_SIZE) != 0); - num_pages = new_mem->num_pages * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); + num_pages = PFN_UP(new_mem->size) * (PAGE_SIZE / RADEON_GPU_PAGE_SIZE); fence = radeon_copy(rdev, old_start, new_start, num_pages, bo->base.resv); if (IS_ERR(fence)) return PTR_ERR(fence); @@ -268,7 +268,7 @@ out: static int radeon_ttm_io_mem_reserve(struct ttm_device *bdev, struct ttm_resource *mem) { struct radeon_device *rdev = radeon_get_rdev(bdev); - size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT; + size_t bus_size = (size_t)mem->size; switch (mem->mem_type) { case TTM_PL_SYSTEM: diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index 7c8e8be774f1..c3f4b33136e5 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -51,9 +51,6 @@ static void ttm_bo_mem_space_debug(struct ttm_buffer_object *bo, struct ttm_resource_manager *man; int i, mem_type; - drm_printf(&p, "No space for %p (%lu pages, %zuK, %zuM)\n", - bo, bo->resource->num_pages, bo->base.size >> 10, - bo->base.size >> 20); for (i = 0; i < placement->num_placement; i++) { mem_type = placement->placement[i].mem_type; drm_printf(&p, " placement[%d]=0x%08X (%d)\n", diff --git a/drivers/gpu/drm/ttm/ttm_bo_util.c b/drivers/gpu/drm/ttm/ttm_bo_util.c index fa04e62202c1..ba3aa0a0fc43 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_util.c +++ b/drivers/gpu/drm/ttm/ttm_bo_util.c @@ -173,7 +173,7 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, clear = src_iter->ops->maps_tt && (!ttm || !ttm_tt_is_populated(ttm)); if (!(clear && ttm && !(ttm->page_flags & TTM_TT_FLAG_ZERO_ALLOC))) - ttm_move_memcpy(clear, dst_mem->num_pages, dst_iter, src_iter); + ttm_move_memcpy(clear, ttm->num_pages, dst_iter, src_iter); if (!src_iter->ops->maps_tt) ttm_kmap_iter_linear_io_fini(&_src_iter.io, bdev, src_mem); @@ -357,9 +357,9 @@ int ttm_bo_kmap(struct ttm_buffer_object *bo, map->virtual = NULL; map->bo = bo; - if (num_pages > bo->resource->num_pages) + if (num_pages > PFN_UP(bo->resource->size)) return -EINVAL; - if ((start_page + num_pages) > bo->resource->num_pages) + if ((start_page + num_pages) > PFN_UP(bo->resource->size)) return -EINVAL; ret = ttm_mem_io_reserve(bo->bdev, bo->resource); diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c index 38119311284d..5a3e4b891377 100644 --- a/drivers/gpu/drm/ttm/ttm_bo_vm.c +++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c @@ -217,7 +217,7 @@ vm_fault_t ttm_bo_vm_fault_reserved(struct vm_fault *vmf, page_last = vma_pages(vma) + vma->vm_pgoff - drm_vma_node_start(&bo->base.vma_node); - if (unlikely(page_offset >= bo->resource->num_pages)) + if (unlikely(page_offset >= PFN_UP(bo->base.size))) return VM_FAULT_SIGBUS; prot = ttm_io_prot(bo, bo->resource, prot); @@ -412,7 +412,7 @@ int ttm_bo_vm_access(struct vm_area_struct *vma, unsigned long addr, << PAGE_SHIFT); int ret; - if (len < 1 || (offset + len) >> PAGE_SHIFT > bo->resource->num_pages) + if (len < 1 || (offset + len) > bo->base.size) return -EIO; ret = ttm_bo_reserve(bo, true, false, NULL); diff --git a/drivers/gpu/drm/ttm/ttm_range_manager.c b/drivers/gpu/drm/ttm/ttm_range_manager.c index f7c16c46cfbc..0a8bc0b7f380 100644 --- a/drivers/gpu/drm/ttm/ttm_range_manager.c +++ b/drivers/gpu/drm/ttm/ttm_range_manager.c @@ -83,7 +83,7 @@ static int ttm_range_man_alloc(struct ttm_resource_manager *man, spin_lock(&rman->lock); ret = drm_mm_insert_node_in_range(mm, &node->mm_nodes[0], - node->base.num_pages, + PFN_UP(node->base.size), bo->page_alignment, 0, place->fpfn, lpfn, mode); spin_unlock(&rman->lock); diff --git a/drivers/gpu/drm/ttm/ttm_resource.c b/drivers/gpu/drm/ttm/ttm_resource.c index a729c32a1e48..328391bb1d87 100644 --- a/drivers/gpu/drm/ttm/ttm_resource.c +++ b/drivers/gpu/drm/ttm/ttm_resource.c @@ -177,7 +177,7 @@ void ttm_resource_init(struct ttm_buffer_object *bo, struct ttm_resource_manager *man; res->start = 0; - res->num_pages = PFN_UP(bo->base.size); + res->size = bo->base.size; res->mem_type = place->mem_type; res->placement = place->flags; res->bus.addr = NULL; @@ -192,7 +192,7 @@ void ttm_resource_init(struct ttm_buffer_object *bo, list_add_tail(&res->lru, &bo->bdev->pinned); else list_add_tail(&res->lru, &man->lru[bo->priority]); - man->usage += res->num_pages << PAGE_SHIFT; + man->usage += res->size; spin_unlock(&bo->bdev->lru_lock); } EXPORT_SYMBOL(ttm_resource_init); @@ -214,7 +214,7 @@ void ttm_resource_fini(struct ttm_resource_manager *man, spin_lock(&bdev->lru_lock); list_del_init(&res->lru); - man->usage -= res->num_pages << PAGE_SHIFT; + man->usage -= res->size; spin_unlock(&bdev->lru_lock); } EXPORT_SYMBOL(ttm_resource_fini); @@ -665,17 +665,15 @@ ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io, iosys_map_set_vaddr(&iter_io->dmap, mem->bus.addr); iter_io->needs_unmap = false; } else { - size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT; - iter_io->needs_unmap = true; memset(&iter_io->dmap, 0, sizeof(iter_io->dmap)); if (mem->bus.caching == ttm_write_combined) iosys_map_set_vaddr_iomem(&iter_io->dmap, ioremap_wc(mem->bus.offset, - bus_size)); + mem->size)); else if (mem->bus.caching == ttm_cached) iosys_map_set_vaddr(&iter_io->dmap, - memremap(mem->bus.offset, bus_size, + memremap(mem->bus.offset, mem->size, MEMREMAP_WB | MEMREMAP_WT | MEMREMAP_WC)); @@ -684,7 +682,7 @@ ttm_kmap_iter_linear_io_init(struct ttm_kmap_iter_linear_io *iter_io, if (iosys_map_is_null(&iter_io->dmap)) iosys_map_set_vaddr_iomem(&iter_io->dmap, ioremap(mem->bus.offset, - bus_size)); + mem->size)); if (iosys_map_is_null(&iter_io->dmap)) { ret = -ENOMEM; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c index 09fe20e918f9..c52c7bf1485b 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_blit.c @@ -483,8 +483,8 @@ int vmw_bo_cpu_blit(struct ttm_buffer_object *dst, d.src_addr = NULL; d.dst_pages = dst->ttm->pages; d.src_pages = src->ttm->pages; - d.dst_num_pages = dst->resource->num_pages; - d.src_num_pages = src->resource->num_pages; + d.dst_num_pages = PFN_UP(dst->resource->size); + d.src_num_pages = PFN_UP(src->resource->size); d.dst_prot = ttm_io_prot(dst, dst->resource, PAGE_KERNEL); d.src_prot = ttm_io_prot(src, src->resource, PAGE_KERNEL); d.diff = diff; diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c index d218b15953e0..321c551784a1 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_bo.c @@ -194,7 +194,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv, int ret = 0; place = vmw_vram_placement.placement[0]; - place.lpfn = bo->resource->num_pages; + place.lpfn = PFN_UP(bo->resource->size); placement.num_placement = 1; placement.placement = &place; placement.num_busy_placement = 1; @@ -211,7 +211,7 @@ int vmw_bo_pin_in_start_of_vram(struct vmw_private *dev_priv, * that situation. */ if (bo->resource->mem_type == TTM_PL_VRAM && - bo->resource->start < bo->resource->num_pages && + bo->resource->start < PFN_UP(bo->resource->size) && bo->resource->start > 0 && buf->base.pin_count == 0) { ctx.interruptible = false; @@ -352,7 +352,7 @@ void *vmw_bo_map_and_cache(struct vmw_buffer_object *vbo) if (virtual) return virtual; - ret = ttm_bo_kmap(bo, 0, bo->resource->num_pages, &vbo->map); + ret = ttm_bo_kmap(bo, 0, PFN_UP(bo->base.size), &vbo->map); if (ret) DRM_ERROR("Buffer object map failed: %d.\n", ret); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c index 0422b6b89cc1..b78a10312fad 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_cotable.c @@ -443,7 +443,7 @@ static int vmw_cotable_resize(struct vmw_resource *res, size_t new_size) * Do a page by page copy of COTables. This eliminates slow vmap()s. * This should really be a TTM utility. */ - for (i = 0; i < old_bo->resource->num_pages; ++i) { + for (i = 0; i < PFN_UP(old_bo->resource->size); ++i) { bool dummy; ret = ttm_bo_kmap(old_bo, i, 1, &old_map); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c index f16fc489d725..a5379f6fb5ab 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_execbuf.c @@ -1047,7 +1047,7 @@ static int vmw_query_bo_switch_prepare(struct vmw_private *dev_priv, if (unlikely(new_query_bo != sw_context->cur_query_bo)) { - if (unlikely(new_query_bo->base.resource->num_pages > 4)) { + if (unlikely(PFN_UP(new_query_bo->base.resource->size) > 4)) { VMW_DEBUG_USER("Query buffer too large.\n"); return -EINVAL; } diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c index 60e3cc537f36..abd5e3323ebf 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c @@ -71,7 +71,7 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man, spin_lock(&gman->lock); if (gman->max_gmr_pages > 0) { - gman->used_gmr_pages += (*res)->num_pages; + gman->used_gmr_pages += PFN_UP((*res)->size); /* * Because the graphics memory is a soft limit we can try to * expand it instead of letting the userspace apps crash. @@ -114,7 +114,7 @@ static int vmw_gmrid_man_get_node(struct ttm_resource_manager *man, return 0; nospace: - gman->used_gmr_pages -= (*res)->num_pages; + gman->used_gmr_pages -= PFN_UP((*res)->size); spin_unlock(&gman->lock); ida_free(&gman->gmr_ida, id); ttm_resource_fini(man, *res); @@ -129,7 +129,7 @@ static void vmw_gmrid_man_put_node(struct ttm_resource_manager *man, ida_free(&gman->gmr_ida, res->start); spin_lock(&gman->lock); - gman->used_gmr_pages -= res->num_pages; + gman->used_gmr_pages -= PFN_UP(res->size); spin_unlock(&gman->lock); ttm_resource_fini(man, res); kfree(res); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c index 7bc99b1279f7..f41f041559f4 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_page_dirty.c @@ -230,7 +230,7 @@ void vmw_bo_dirty_unmap(struct vmw_buffer_object *vbo, int vmw_bo_dirty_add(struct vmw_buffer_object *vbo) { struct vmw_bo_dirty *dirty = vbo->dirty; - pgoff_t num_pages = vbo->base.resource->num_pages; + pgoff_t num_pages = PFN_UP(vbo->base.resource->size); size_t size; int ret; @@ -395,7 +395,7 @@ vm_fault_t vmw_bo_vm_mkwrite(struct vm_fault *vmf) return ret; page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node); - if (unlikely(page_offset >= bo->resource->num_pages)) { + if (unlikely(page_offset >= PFN_UP(bo->resource->size))) { ret = VM_FAULT_SIGBUS; goto out_unlock; } @@ -438,7 +438,7 @@ vm_fault_t vmw_bo_vm_fault(struct vm_fault *vmf) page_offset = vmf->pgoff - drm_vma_node_start(&bo->base.vma_node); - if (page_offset >= bo->resource->num_pages || + if (page_offset >= PFN_UP(bo->resource->size) || vmw_resources_clean(vbo, page_offset, page_offset + PAGE_SIZE, &allowed_prefault)) { diff --git a/include/drm/ttm/ttm_resource.h b/include/drm/ttm/ttm_resource.h index 5afc6d664fde..78a226eba953 100644 --- a/include/drm/ttm/ttm_resource.h +++ b/include/drm/ttm/ttm_resource.h @@ -197,7 +197,7 @@ struct ttm_bus_placement { * struct ttm_resource * * @start: Start of the allocation. - * @num_pages: Actual size of resource in pages. + * @size: Actual size of resource in bytes. * @mem_type: Resource type of the allocation. * @placement: Placement flags. * @bus: Placement on io bus accessible to the CPU @@ -208,7 +208,7 @@ struct ttm_bus_placement { */ struct ttm_resource { unsigned long start; - unsigned long num_pages; + size_t size; uint32_t mem_type; uint32_t placement; struct ttm_bus_placement bus; -- cgit From 4f91790b42ffba72d80434d901548979ab41dc7c Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 28 Sep 2022 10:54:41 +0200 Subject: drm/amdgpu: use drm_sched_job_add_resv_dependencies for moves MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use the new common scheduler functions to figure out what to wait for. Signed-off-by: Christian König Reviewed-by: Luben Tuikov Link: https://patchwork.freedesktop.org/patch/msgid/20221014084641.128280-4-christian.koenig@amd.com --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 16 +++++----------- 1 file changed, 5 insertions(+), 11 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 36066965346f..5aae63891858 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1966,17 +1966,11 @@ static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev, adev->gart.bo); (*job)->vm_needs_flush = true; } - if (resv) { - r = amdgpu_sync_resv(adev, &(*job)->sync, resv, - AMDGPU_SYNC_ALWAYS, - AMDGPU_FENCE_OWNER_UNDEFINED); - if (r) { - DRM_ERROR("sync failed (%d).\n", r); - amdgpu_job_free(*job); - return r; - } - } - return 0; + if (!resv) + return 0; + + return drm_sched_job_add_resv_dependencies(&(*job)->base, resv, + DMA_RESV_USAGE_BOOKKEEP); } int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, -- cgit From f7d66fb2ea43a3016e78a700a2ca6c77a74579f9 Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 28 Sep 2022 20:31:38 +0200 Subject: drm/amdgpu: cleanup scheduler job initialization v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Init the DRM scheduler base class while allocating the job. This makes the whole handling much more cleaner. v2: fix coding style Signed-off-by: Christian König Reviewed-by: Luben Tuikov Link: https://patchwork.freedesktop.org/patch/msgid/20221014084641.128280-7-christian.koenig@amd.com --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 8 +--- drivers/gpu/drm/amd/amdgpu/amdgpu_job.c | 44 +++++++++++---------- drivers/gpu/drm/amd/amdgpu/amdgpu_job.h | 14 ++++--- drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c | 7 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 56 ++++++++++---------------- drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c | 9 ++--- drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c | 13 +++--- drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c | 22 ++++++----- drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c | 61 +++++++++++++++-------------- drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c | 12 ++---- drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c | 8 ++-- drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c | 12 +++--- drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 17 +++----- 14 files changed, 135 insertions(+), 150 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 03bbfaa51cbc..cf58c1125e60 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -674,7 +674,7 @@ int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev, goto err; } - ret = amdgpu_job_alloc(adev, 1, &job, NULL); + ret = amdgpu_job_alloc(adev, NULL, NULL, NULL, 1, &job); if (ret) goto err; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 1bbd39b3b0fc..aa6f6c428dbc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -291,12 +291,8 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p, return -EINVAL; for (i = 0; i < p->gang_size; ++i) { - ret = amdgpu_job_alloc(p->adev, num_ibs[i], &p->jobs[i], vm); - if (ret) - goto free_all_kdata; - - ret = drm_sched_job_init(&p->jobs[i]->base, p->entities[i], - &fpriv->vm); + ret = amdgpu_job_alloc(p->adev, vm, p->entities[i], vm, + num_ibs[i], &p->jobs[i]); if (ret) goto free_all_kdata; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 384149a81978..fa60d25dfbd1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -89,8 +89,9 @@ exit: return DRM_GPU_SCHED_STAT_NOMINAL; } -int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, - struct amdgpu_job **job, struct amdgpu_vm *vm) +int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm, + struct drm_sched_entity *entity, void *owner, + unsigned int num_ibs, struct amdgpu_job **job) { if (num_ibs == 0) return -EINVAL; @@ -111,23 +112,30 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, (*job)->vram_lost_counter = atomic_read(&adev->vram_lost_counter); (*job)->vm_pd_addr = AMDGPU_BO_INVALID_OFFSET; - return 0; + if (!entity) + return 0; + + return drm_sched_job_init(&(*job)->base, entity, owner); } -int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, - enum amdgpu_ib_pool_type pool_type, - struct amdgpu_job **job) +int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, + struct drm_sched_entity *entity, void *owner, + size_t size, enum amdgpu_ib_pool_type pool_type, + struct amdgpu_job **job) { int r; - r = amdgpu_job_alloc(adev, 1, job, NULL); + r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job); if (r) return r; (*job)->num_ibs = 1; r = amdgpu_ib_get(adev, NULL, size, pool_type, &(*job)->ibs[0]); - if (r) + if (r) { + if (entity) + drm_sched_job_cleanup(&(*job)->base); kfree(*job); + } return r; } @@ -191,6 +199,9 @@ void amdgpu_job_set_gang_leader(struct amdgpu_job *job, void amdgpu_job_free(struct amdgpu_job *job) { + if (job->base.entity) + drm_sched_job_cleanup(&job->base); + amdgpu_job_free_resources(job); amdgpu_sync_free(&job->sync); amdgpu_sync_free(&job->sched_sync); @@ -203,25 +214,16 @@ void amdgpu_job_free(struct amdgpu_job *job) dma_fence_put(&job->hw_fence); } -int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, - void *owner, struct dma_fence **f) +struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job) { - int r; - - if (!f) - return -EINVAL; - - r = drm_sched_job_init(&job->base, entity, owner); - if (r) - return r; + struct dma_fence *f; drm_sched_job_arm(&job->base); - - *f = dma_fence_get(&job->base.s_fence->finished); + f = dma_fence_get(&job->base.s_fence->finished); amdgpu_job_free_resources(job); drm_sched_entity_push_job(&job->base); - return 0; + return f; } int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h index ab7b150e5d50..e2ecab977d0d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h @@ -78,18 +78,20 @@ static inline struct amdgpu_ring *amdgpu_job_ring(struct amdgpu_job *job) return to_amdgpu_ring(job->base.entity->rq->sched); } -int amdgpu_job_alloc(struct amdgpu_device *adev, unsigned num_ibs, - struct amdgpu_job **job, struct amdgpu_vm *vm); -int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, unsigned size, - enum amdgpu_ib_pool_type pool, struct amdgpu_job **job); +int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm, + struct drm_sched_entity *entity, void *owner, + unsigned int num_ibs, struct amdgpu_job **job); +int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, + struct drm_sched_entity *entity, void *owner, + size_t size, enum amdgpu_ib_pool_type pool_type, + struct amdgpu_job **job); void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds, struct amdgpu_bo *gws, struct amdgpu_bo *oa); void amdgpu_job_free_resources(struct amdgpu_job *job); void amdgpu_job_set_gang_leader(struct amdgpu_job *job, struct amdgpu_job *leader); void amdgpu_job_free(struct amdgpu_job *job); -int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity, - void *owner, struct dma_fence **f); +struct dma_fence *amdgpu_job_submit(struct amdgpu_job *job); int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring, struct dma_fence **fence); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c index 518eb0e40d32..de182bfcf85f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c @@ -150,14 +150,15 @@ static int amdgpu_jpeg_dec_set_reg(struct amdgpu_ring *ring, uint32_t handle, const unsigned ib_size_dw = 16; int i, r; - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, - AMDGPU_IB_POOL_DIRECT, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, + AMDGPU_IB_POOL_DIRECT, &job); if (r) return r; ib = &job->ibs[0]; - ib->ptr[0] = PACKETJ(adev->jpeg.internal.jpeg_pitch, 0, 0, PACKETJ_TYPE0); + ib->ptr[0] = PACKETJ(adev->jpeg.internal.jpeg_pitch, 0, 0, + PACKETJ_TYPE0); ib->ptr[1] = 0xDEADBEEF; for (i = 2; i < 16; i += 2) { ib->ptr[i] = PACKETJ(0, 0, 0, PACKETJ_TYPE6); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 5aae63891858..9f5c1d86d2ab 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -189,7 +189,6 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, struct amdgpu_device *adev = ring->adev; unsigned offset, num_pages, num_dw, num_bytes; uint64_t src_addr, dst_addr; - struct dma_fence *fence; struct amdgpu_job *job; void *cpu_addr; uint64_t flags; @@ -229,7 +228,9 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE; - r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, + r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity, + AMDGPU_FENCE_OWNER_UNDEFINED, + num_dw * 4 + num_bytes, AMDGPU_IB_POOL_DELAYED, &job); if (r) return r; @@ -269,18 +270,8 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, } } - r = amdgpu_job_submit(job, &adev->mman.entity, - AMDGPU_FENCE_OWNER_UNDEFINED, &fence); - if (r) - goto error_free; - - dma_fence_put(fence); - - return r; - -error_free: - amdgpu_job_free(job); - return r; + dma_fence_put(amdgpu_job_submit(job)); + return 0; } /** @@ -1414,7 +1405,8 @@ static void amdgpu_ttm_vram_mm_access(struct amdgpu_device *adev, loff_t pos, } static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo, - unsigned long offset, void *buf, int len, int write) + unsigned long offset, void *buf, + int len, int write) { struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); @@ -1438,26 +1430,27 @@ static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo, memcpy(adev->mman.sdma_access_ptr, buf, len); num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); - r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, AMDGPU_IB_POOL_DELAYED, &job); + r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity, + AMDGPU_FENCE_OWNER_UNDEFINED, + num_dw * 4, AMDGPU_IB_POOL_DELAYED, + &job); if (r) goto out; amdgpu_res_first(abo->tbo.resource, offset, len, &src_mm); - src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) + src_mm.start; + src_addr = amdgpu_ttm_domain_start(adev, bo->resource->mem_type) + + src_mm.start; dst_addr = amdgpu_bo_gpu_offset(adev->mman.sdma_access_bo); if (write) swap(src_addr, dst_addr); - amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr, PAGE_SIZE, false); + amdgpu_emit_copy_buffer(adev, &job->ibs[0], src_addr, dst_addr, + PAGE_SIZE, false); amdgpu_ring_pad_ib(adev->mman.buffer_funcs_ring, &job->ibs[0]); WARN_ON(job->ibs[0].length_dw > num_dw); - r = amdgpu_job_submit(job, &adev->mman.entity, AMDGPU_FENCE_OWNER_UNDEFINED, &fence); - if (r) { - amdgpu_job_free(job); - goto out; - } + fence = amdgpu_job_submit(job); if (!dma_fence_wait_timeout(fence, false, adev->sdma_timeout)) r = -ETIMEDOUT; @@ -1956,7 +1949,9 @@ static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev, AMDGPU_IB_POOL_DELAYED; int r; - r = amdgpu_job_alloc_with_ib(adev, num_dw * 4, pool, job); + r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity, + AMDGPU_FENCE_OWNER_UNDEFINED, + num_dw * 4, pool, job); if (r) return r; @@ -2015,8 +2010,7 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, if (direct_submit) r = amdgpu_job_submit_direct(job, ring, fence); else - r = amdgpu_job_submit(job, &adev->mman.entity, - AMDGPU_FENCE_OWNER_UNDEFINED, fence); + *fence = amdgpu_job_submit(job); if (r) goto error_free; @@ -2061,16 +2055,8 @@ static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data, amdgpu_ring_pad_ib(ring, &job->ibs[0]); WARN_ON(job->ibs[0].length_dw > num_dw); - r = amdgpu_job_submit(job, &adev->mman.entity, - AMDGPU_FENCE_OWNER_UNDEFINED, fence); - if (r) - goto error_free; - + *fence = amdgpu_job_submit(job); return 0; - -error_free: - amdgpu_job_free(job); - return r; } int amdgpu_fill_buffer(struct amdgpu_bo *bo, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 6eac649499d3..8baddf79635b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -1132,7 +1132,9 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, unsigned offset_idx = 0; unsigned offset[3] = { UVD_BASE_SI, 0, 0 }; - r = amdgpu_job_alloc_with_ib(adev, 64, direct ? AMDGPU_IB_POOL_DIRECT : + r = amdgpu_job_alloc_with_ib(ring->adev, &adev->uvd.entity, + AMDGPU_FENCE_OWNER_UNDEFINED, + 64, direct ? AMDGPU_IB_POOL_DIRECT : AMDGPU_IB_POOL_DELAYED, &job); if (r) return r; @@ -1181,10 +1183,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, if (r) goto err_free; - r = amdgpu_job_submit(job, &adev->uvd.entity, - AMDGPU_FENCE_OWNER_UNDEFINED, &f); - if (r) - goto err_free; + f = amdgpu_job_submit(job); } amdgpu_bo_reserve(bo, true); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index 02cb3a12dd76..b239e874f2d5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -450,8 +450,10 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, uint64_t addr; int i, r; - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, - AMDGPU_IB_POOL_DIRECT, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, &ring->adev->vce.entity, + AMDGPU_FENCE_OWNER_UNDEFINED, + ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT, + &job); if (r) return r; @@ -538,7 +540,9 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, struct dma_fence *f = NULL; int i, r; - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, + r = amdgpu_job_alloc_with_ib(ring->adev, &ring->adev->vce.entity, + AMDGPU_FENCE_OWNER_UNDEFINED, + ib_size_dw * 4, direct ? AMDGPU_IB_POOL_DIRECT : AMDGPU_IB_POOL_DELAYED, &job); if (r) @@ -570,8 +574,7 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, if (direct) r = amdgpu_job_submit_direct(job, ring, &f); else - r = amdgpu_job_submit(job, &ring->adev->vce.entity, - AMDGPU_FENCE_OWNER_UNDEFINED, &f); + f = amdgpu_job_submit(job); if (r) goto err; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index 0b52af415b28..3449145ab2bc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -600,15 +600,16 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, struct amdgpu_ib *ib_msg, struct dma_fence **fence) { + u64 addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr); struct amdgpu_device *adev = ring->adev; struct dma_fence *f = NULL; struct amdgpu_job *job; struct amdgpu_ib *ib; - uint64_t addr = AMDGPU_GPU_PAGE_ALIGN(ib_msg->gpu_addr); int i, r; - r = amdgpu_job_alloc_with_ib(adev, 64, - AMDGPU_IB_POOL_DIRECT, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, + 64, AMDGPU_IB_POOL_DIRECT, + &job); if (r) goto err; @@ -787,8 +788,9 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring, if (sq) ib_size_dw += 8; - r = amdgpu_job_alloc_with_ib(adev, ib_size_dw * 4, - AMDGPU_IB_POOL_DIRECT, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, + ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT, + &job); if (r) goto err; @@ -916,8 +918,9 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand if (sq) ib_size_dw += 8; - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, - AMDGPU_IB_POOL_DIRECT, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, + ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT, + &job); if (r) return r; @@ -982,8 +985,9 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han if (sq) ib_size_dw += 8; - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, - AMDGPU_IB_POOL_DIRECT, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, + ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT, + &job); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c index 2b0669c464f6..6768af365f3b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c @@ -47,6 +47,32 @@ static int amdgpu_vm_sdma_map_table(struct amdgpu_bo_vm *table) return r; } +/* Allocate a new job for @count PTE updates */ +static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p, + unsigned int count) +{ + enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE + : AMDGPU_IB_POOL_DELAYED; + struct drm_sched_entity *entity = p->immediate ? &p->vm->immediate + : &p->vm->delayed; + unsigned int ndw; + int r; + + /* estimate how many dw we need */ + ndw = AMDGPU_VM_SDMA_MIN_NUM_DW; + if (p->pages_addr) + ndw += count * 2; + ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW); + + r = amdgpu_job_alloc_with_ib(p->adev, entity, AMDGPU_FENCE_OWNER_VM, + ndw * 4, pool, &p->job); + if (r) + return r; + + p->num_dw_left = ndw; + return 0; +} + /** * amdgpu_vm_sdma_prepare - prepare SDMA command submission * @@ -61,17 +87,12 @@ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p, struct dma_resv *resv, enum amdgpu_sync_mode sync_mode) { - enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE - : AMDGPU_IB_POOL_DELAYED; - unsigned int ndw = AMDGPU_VM_SDMA_MIN_NUM_DW; int r; - r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, pool, &p->job); + r = amdgpu_vm_sdma_alloc_job(p, 0); if (r) return r; - p->num_dw_left = ndw; - if (!resv) return 0; @@ -91,20 +112,16 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p, struct dma_fence **fence) { struct amdgpu_ib *ib = p->job->ibs; - struct drm_sched_entity *entity; struct amdgpu_ring *ring; struct dma_fence *f; - int r; - entity = p->immediate ? &p->vm->immediate : &p->vm->delayed; - ring = container_of(entity->rq->sched, struct amdgpu_ring, sched); + ring = container_of(p->vm->delayed.rq->sched, struct amdgpu_ring, + sched); WARN_ON(ib->length_dw == 0); amdgpu_ring_pad_ib(ring, ib); WARN_ON(ib->length_dw > p->num_dw_left); - r = amdgpu_job_submit(p->job, entity, AMDGPU_FENCE_OWNER_VM, &f); - if (r) - goto error; + f = amdgpu_job_submit(p->job); if (p->unlocked) { struct dma_fence *tmp = dma_fence_get(f); @@ -120,10 +137,6 @@ static int amdgpu_vm_sdma_commit(struct amdgpu_vm_update_params *p, swap(*fence, f); dma_fence_put(f); return 0; - -error: - amdgpu_job_free(p->job); - return r; } /** @@ -203,8 +216,6 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p, uint64_t flags) { struct amdgpu_bo *bo = &vmbo->bo; - enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE - : AMDGPU_IB_POOL_DELAYED; struct dma_resv_iter cursor; unsigned int i, ndw, nptes; struct dma_fence *fence; @@ -231,19 +242,9 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p, if (r) return r; - /* estimate how many dw we need */ - ndw = 32; - if (p->pages_addr) - ndw += count * 2; - ndw = max(ndw, AMDGPU_VM_SDMA_MIN_NUM_DW); - ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW); - - r = amdgpu_job_alloc_with_ib(p->adev, ndw * 4, pool, - &p->job); + r = amdgpu_vm_sdma_alloc_job(p, count); if (r) return r; - - p->num_dw_left = ndw; } if (!p->pages_addr) { diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c index f513e2c2e964..657e53708248 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v10_0.c @@ -371,7 +371,9 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, * translation. Avoid this by doing the invalidation from the SDMA * itself. */ - r = amdgpu_job_alloc_with_ib(adev, 16 * 4, AMDGPU_IB_POOL_IMMEDIATE, + r = amdgpu_job_alloc_with_ib(ring->adev, &adev->mman.entity, + AMDGPU_FENCE_OWNER_UNDEFINED, + 16 * 4, AMDGPU_IB_POOL_IMMEDIATE, &job); if (r) goto error_alloc; @@ -380,10 +382,7 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, job->vm_needs_flush = true; job->ibs->ptr[job->ibs->length_dw++] = ring->funcs->nop; amdgpu_ring_pad_ib(ring, &job->ibs[0]); - r = amdgpu_job_submit(job, &adev->mman.entity, - AMDGPU_FENCE_OWNER_UNDEFINED, &fence); - if (r) - goto error_submit; + fence = amdgpu_job_submit(job); mutex_unlock(&adev->mman.gtt_window_lock); @@ -392,9 +391,6 @@ static void gmc_v10_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, return; -error_submit: - amdgpu_job_free(job); - error_alloc: mutex_unlock(&adev->mman.gtt_window_lock); DRM_ERROR("Error flushing GPU TLB using the SDMA (%d)!\n", r); diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 375c440957dc..5fe872f4bea7 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -216,8 +216,8 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle uint64_t addr; int i, r; - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, - AMDGPU_IB_POOL_DIRECT, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, + AMDGPU_IB_POOL_DIRECT, &job); if (r) return r; @@ -280,8 +280,8 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint64_t addr; int i, r; - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, - AMDGPU_IB_POOL_DIRECT, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, + AMDGPU_IB_POOL_DIRECT, &job); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index e668b3baa8c6..e407be6cb63c 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -213,7 +213,7 @@ static int uvd_v7_0_enc_ring_test_ring(struct amdgpu_ring *ring) * * Open up a stream for HW test */ -static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, +static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, u32 handle, struct amdgpu_bo *bo, struct dma_fence **fence) { @@ -224,8 +224,8 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle uint64_t addr; int i, r; - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, - AMDGPU_IB_POOL_DIRECT, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, + AMDGPU_IB_POOL_DIRECT, &job); if (r) return r; @@ -276,7 +276,7 @@ err: * * Close up a stream for HW test or if userspace failed to do so */ -static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, +static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, u32 handle, struct amdgpu_bo *bo, struct dma_fence **fence) { @@ -287,8 +287,8 @@ static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handl uint64_t addr; int i, r; - r = amdgpu_job_alloc_with_ib(ring->adev, ib_size_dw * 4, - AMDGPU_IB_POOL_DIRECT, &job); + r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, + AMDGPU_IB_POOL_DIRECT, &job); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 2797029bd500..79069c485386 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -65,8 +65,11 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages, num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8); num_bytes = npages * 8; - r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes, - AMDGPU_IB_POOL_DELAYED, &job); + r = amdgpu_job_alloc_with_ib(adev, &adev->mman.entity, + AMDGPU_FENCE_OWNER_UNDEFINED, + num_dw * 4 + num_bytes, + AMDGPU_IB_POOL_DELAYED, + &job); if (r) return r; @@ -89,18 +92,10 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages, cpu_addr = &job->ibs[0].ptr[num_dw]; amdgpu_gart_map(adev, 0, npages, addr, pte_flags, cpu_addr); - r = amdgpu_job_submit(job, &adev->mman.entity, - AMDGPU_FENCE_OWNER_UNDEFINED, &fence); - if (r) - goto error_free; - + fence = amdgpu_job_submit(job); dma_fence_put(fence); return r; - -error_free: - amdgpu_job_free(job); - return r; } /** -- cgit From adf65dff5d61d3adad669a1f5c6dd009107eacd7 Mon Sep 17 00:00:00 2001 From: Rajneesh Bhardwaj Date: Wed, 20 Apr 2022 12:28:20 -0400 Subject: drm/amdgpu: Fix the kerneldoc description amdgpu_ttm_tt_set_userptr() is also called by the KFD as part of initializing the user pages for userptr BOs and also while initializing the GPUVM for a KFD process so update the function description. Reviewed-by: Felix Kuehling Signed-off-by: Rajneesh Bhardwaj Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 8c00a7a06c32..c9ceea46711e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1177,8 +1177,9 @@ int amdgpu_ttm_tt_get_userptr(const struct ttm_buffer_object *tbo, * @addr: The address in the current tasks VM space to use * @flags: Requirements of userptr object. * - * Called by amdgpu_gem_userptr_ioctl() to bind userptr pages - * to current task + * Called by amdgpu_gem_userptr_ioctl() and kfd_ioctl_alloc_memory_of_gpu() to + * bind userptr pages to current task and by kfd_ioctl_acquire_vm() to + * initialize GPU VM for a KFD process. */ int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo, uint64_t addr, uint32_t flags) -- cgit From 4864f2ee9ee2acf4a1009b58fbc62f17fa086d4e Mon Sep 17 00:00:00 2001 From: Tong Liu01 Date: Thu, 10 Nov 2022 17:31:36 +0800 Subject: drm/amdgpu: add vram reservation based on vram_usagebyfirmware_v2_2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move TMR region from top of FB to 2MB for FFBM, so we need to reserve TMR region firstly to make sure TMR can be allocated at 2MB Signed-off-by: Tong Liu01 Acked-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c | 104 ++++++++++++++++++----- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 51 +++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 5 ++ drivers/gpu/drm/amd/include/atomfirmware.h | 63 ++++++++++++-- 4 files changed, 191 insertions(+), 32 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c index b81b77a9efa6..9b97fa39d47a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c @@ -101,39 +101,97 @@ void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev) } } +static int amdgpu_atomfirmware_allocate_fb_v2_1(struct amdgpu_device *adev, + struct vram_usagebyfirmware_v2_1 *fw_usage, int *usage_bytes) +{ + uint32_t start_addr, fw_size, drv_size; + + start_addr = le32_to_cpu(fw_usage->start_address_in_kb); + fw_size = le16_to_cpu(fw_usage->used_by_firmware_in_kb); + drv_size = le16_to_cpu(fw_usage->used_by_driver_in_kb); + + DRM_DEBUG("atom firmware v2_1 requested %08x %dkb fw %dkb drv\n", + start_addr, + fw_size, + drv_size); + + if ((start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) == + (uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION << + ATOM_VRAM_OPERATION_FLAGS_SHIFT)) { + /* Firmware request VRAM reservation for SR-IOV */ + adev->mman.fw_vram_usage_start_offset = (start_addr & + (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10; + adev->mman.fw_vram_usage_size = fw_size << 10; + /* Use the default scratch size */ + *usage_bytes = 0; + } else { + *usage_bytes = drv_size << 10; + } + return 0; +} + +static int amdgpu_atomfirmware_allocate_fb_v2_2(struct amdgpu_device *adev, + struct vram_usagebyfirmware_v2_2 *fw_usage, int *usage_bytes) +{ + uint32_t fw_start_addr, fw_size, drv_start_addr, drv_size; + + fw_start_addr = le32_to_cpu(fw_usage->fw_region_start_address_in_kb); + fw_size = le16_to_cpu(fw_usage->used_by_firmware_in_kb); + + drv_start_addr = le32_to_cpu(fw_usage->driver_region0_start_address_in_kb); + drv_size = le32_to_cpu(fw_usage->used_by_driver_region0_in_kb); + + DRM_DEBUG("atom requested fw start at %08x %dkb and drv start at %08x %dkb\n", + fw_start_addr, + fw_size, + drv_start_addr, + drv_size); + + if ((fw_start_addr & (ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION << 30)) == 0) { + /* Firmware request VRAM reservation for SR-IOV */ + adev->mman.fw_vram_usage_start_offset = (fw_start_addr & + (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10; + adev->mman.fw_vram_usage_size = fw_size << 10; + } + + if ((drv_start_addr & (ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION << 30)) == 0) { + /* driver request VRAM reservation for SR-IOV */ + adev->mman.drv_vram_usage_start_offset = (drv_start_addr & + (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10; + adev->mman.drv_vram_usage_size = drv_size << 10; + } + + *usage_bytes = 0; + return 0; +} + int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev) { struct atom_context *ctx = adev->mode_info.atom_context; int index = get_index_into_master_table(atom_master_list_of_data_tables_v2_1, vram_usagebyfirmware); - struct vram_usagebyfirmware_v2_1 *firmware_usage; - uint32_t start_addr, size; + struct vram_usagebyfirmware_v2_1 *fw_usage_v2_1; + struct vram_usagebyfirmware_v2_2 *fw_usage_v2_2; uint16_t data_offset; + uint8_t frev, crev; int usage_bytes = 0; - if (amdgpu_atom_parse_data_header(ctx, index, NULL, NULL, NULL, &data_offset)) { - firmware_usage = (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset); - DRM_DEBUG("atom firmware requested %08x %dkb fw %dkb drv\n", - le32_to_cpu(firmware_usage->start_address_in_kb), - le16_to_cpu(firmware_usage->used_by_firmware_in_kb), - le16_to_cpu(firmware_usage->used_by_driver_in_kb)); - - start_addr = le32_to_cpu(firmware_usage->start_address_in_kb); - size = le16_to_cpu(firmware_usage->used_by_firmware_in_kb); - - if ((uint32_t)(start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) == - (uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION << - ATOM_VRAM_OPERATION_FLAGS_SHIFT)) { - /* Firmware request VRAM reservation for SR-IOV */ - adev->mman.fw_vram_usage_start_offset = (start_addr & - (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10; - adev->mman.fw_vram_usage_size = size << 10; - /* Use the default scratch size */ - usage_bytes = 0; - } else { - usage_bytes = le16_to_cpu(firmware_usage->used_by_driver_in_kb) << 10; + if (amdgpu_atom_parse_data_header(ctx, index, NULL, &frev, &crev, &data_offset)) { + if (frev == 2 && crev == 1) { + fw_usage_v2_1 = + (struct vram_usagebyfirmware_v2_1 *)(ctx->bios + data_offset); + amdgpu_atomfirmware_allocate_fb_v2_1(adev, + fw_usage_v2_1, + &usage_bytes); + } else if (frev >= 2 && crev >= 2) { + fw_usage_v2_2 = + (struct vram_usagebyfirmware_v2_2 *)(ctx->bios + data_offset); + amdgpu_atomfirmware_allocate_fb_v2_2(adev, + fw_usage_v2_2, + &usage_bytes); } } + ctx->scratch_size_bytes = 0; if (usage_bytes == 0) usage_bytes = 20 * 1024; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index c9ceea46711e..02a8f25ae134 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1561,6 +1561,23 @@ static void amdgpu_ttm_fw_reserve_vram_fini(struct amdgpu_device *adev) NULL, &adev->mman.fw_vram_usage_va); } +/* + * Driver Reservation functions + */ +/** + * amdgpu_ttm_drv_reserve_vram_fini - free drv reserved vram + * + * @adev: amdgpu_device pointer + * + * free drv reserved vram if it has been reserved. + */ +static void amdgpu_ttm_drv_reserve_vram_fini(struct amdgpu_device *adev) +{ + amdgpu_bo_free_kernel(&adev->mman.drv_vram_usage_reserved_bo, + NULL, + NULL); +} + /** * amdgpu_ttm_fw_reserve_vram_init - create bo vram reservation from fw * @@ -1587,6 +1604,31 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev) &adev->mman.fw_vram_usage_va); } +/** + * amdgpu_ttm_drv_reserve_vram_init - create bo vram reservation from driver + * + * @adev: amdgpu_device pointer + * + * create bo vram reservation from drv. + */ +static int amdgpu_ttm_drv_reserve_vram_init(struct amdgpu_device *adev) +{ + uint64_t vram_size = adev->gmc.visible_vram_size; + + adev->mman.drv_vram_usage_reserved_bo = NULL; + + if (adev->mman.drv_vram_usage_size == 0 || + adev->mman.drv_vram_usage_size > vram_size) + return 0; + + return amdgpu_bo_create_kernel_at(adev, + adev->mman.drv_vram_usage_start_offset, + adev->mman.drv_vram_usage_size, + AMDGPU_GEM_DOMAIN_VRAM, + &adev->mman.drv_vram_usage_reserved_bo, + NULL); +} + /* * Memoy training reservation functions */ @@ -1754,6 +1796,14 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) return r; } + /* + *The reserved vram for driver must be pinned to the specified + *place on the VRAM, so reserve it early. + */ + r = amdgpu_ttm_drv_reserve_vram_init(adev); + if (r) + return r; + /* * only NAVI10 and onwards ASIC support for IP discovery. * If IP discovery enabled, a block of memory should be @@ -1879,6 +1929,7 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev) amdgpu_bo_free_kernel(&adev->mman.sdma_access_bo, NULL, &adev->mman.sdma_access_ptr); amdgpu_ttm_fw_reserve_vram_fini(adev); + amdgpu_ttm_drv_reserve_vram_fini(adev); if (drm_dev_enter(adev_to_drm(adev), &idx)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 6a70818039dd..7c38843f411e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -84,6 +84,11 @@ struct amdgpu_mman { struct amdgpu_bo *fw_vram_usage_reserved_bo; void *fw_vram_usage_va; + /* driver VRAM reservation */ + u64 drv_vram_usage_start_offset; + u64 drv_vram_usage_size; + struct amdgpu_bo *drv_vram_usage_reserved_bo; + /* PAGE_SIZE'd BO for process memory r/w over SDMA. */ struct amdgpu_bo *sdma_access_bo; void *sdma_access_ptr; diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h index ff855cb21d3f..bbe1337a8cee 100644 --- a/drivers/gpu/drm/amd/include/atomfirmware.h +++ b/drivers/gpu/drm/amd/include/atomfirmware.h @@ -705,20 +705,65 @@ struct atom_gpio_pin_lut_v2_1 }; -/* - *************************************************************************** - Data Table vram_usagebyfirmware structure - *************************************************************************** -*/ +/* + * VBIOS/PRE-OS always reserve a FB region at the top of frame buffer. driver should not write + * access that region. driver can allocate their own reservation region as long as it does not + * overlap firwmare's reservation region. + * if (pre-NV1X) atom data table firmwareInfoTable version < 3.3: + * in this case, atom data table vram_usagebyfirmwareTable version always <= 2.1 + * if VBIOS/UEFI GOP is posted: + * VBIOS/UEFIGOP update used_by_firmware_in_kb = total reserved size by VBIOS + * update start_address_in_kb = total_mem_size_in_kb - used_by_firmware_in_kb; + * ( total_mem_size_in_kb = reg(CONFIG_MEMSIZE)<<10) + * driver can allocate driver reservation region under firmware reservation, + * used_by_driver_in_kb = driver reservation size + * driver reservation start address = (start_address_in_kb - used_by_driver_in_kb) + * Comment1[hchan]: There is only one reservation at the beginning of the FB reserved by + * host driver. Host driver would overwrite the table with the following + * used_by_firmware_in_kb = total reserved size for pf-vf info exchange and + * set SRIOV_MSG_SHARE_RESERVATION mask start_address_in_kb = 0 + * else there is no VBIOS reservation region: + * driver must allocate driver reservation region at top of FB. + * driver set used_by_driver_in_kb = driver reservation size + * driver reservation start address = (total_mem_size_in_kb - used_by_driver_in_kb) + * same as Comment1 + * else (NV1X and after): + * if VBIOS/UEFI GOP is posted: + * VBIOS/UEFIGOP update: + * used_by_firmware_in_kb = atom_firmware_Info_v3_3.fw_reserved_size_in_kb; + * start_address_in_kb = total_mem_size_in_kb - used_by_firmware_in_kb; + * (total_mem_size_in_kb = reg(CONFIG_MEMSIZE)<<10) + * if vram_usagebyfirmwareTable version <= 2.1: + * driver can allocate driver reservation region under firmware reservation, + * driver set used_by_driver_in_kb = driver reservation size + * driver reservation start address = start_address_in_kb - used_by_driver_in_kb + * same as Comment1 + * else driver can: + * allocate it reservation any place as long as it does overlap pre-OS FW reservation area + * set used_by_driver_region0_in_kb = driver reservation size + * set driver_region0_start_address_in_kb = driver reservation region start address + * Comment2[hchan]: Host driver can set used_by_firmware_in_kb and start_address_in_kb to + * zero as the reservation for VF as it doesn’t exist. And Host driver should also + * update atom_firmware_Info table to remove the same VBIOS reservation as well. + */ struct vram_usagebyfirmware_v2_1 { - struct atom_common_table_header table_header; - uint32_t start_address_in_kb; - uint16_t used_by_firmware_in_kb; - uint16_t used_by_driver_in_kb; + struct atom_common_table_header table_header; + uint32_t start_address_in_kb; + uint16_t used_by_firmware_in_kb; + uint16_t used_by_driver_in_kb; }; +struct vram_usagebyfirmware_v2_2 { + struct atom_common_table_header table_header; + uint32_t fw_region_start_address_in_kb; + uint16_t used_by_firmware_in_kb; + uint16_t reserved; + uint32_t driver_region0_start_address_in_kb; + uint32_t used_by_driver_region0_in_kb; + uint32_t reserved32[7]; +}; /* *************************************************************************** -- cgit From fec8fdb54e8f74d88951c9f998f47bf4f2031fe0 Mon Sep 17 00:00:00 2001 From: Christian König Date: Thu, 10 Nov 2022 12:31:41 +0100 Subject: drm/amdgpu: fix userptr HMM range handling v2 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The basic problem here is that it's not allowed to page fault while holding the reservation lock. So it can happen that multiple processes try to validate an userptr at the same time. Work around that by putting the HMM range object into the mutex protected bo list for now. v2: make sure range is set to NULL in case of an error Signed-off-by: Christian König Reviewed-by: Alex Deucher Reviewed-by: Felix Kuehling CC: stable@vger.kernel.org Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 12 ++++-- drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h | 3 ++ drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 8 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 6 ++- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 53 +++++++----------------- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 14 +++++-- 7 files changed, 46 insertions(+), 51 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 68741b157153..e44d740022bc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -938,6 +938,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr, struct amdkfd_process_info *process_info = mem->process_info; struct amdgpu_bo *bo = mem->bo; struct ttm_operation_ctx ctx = { true, false }; + struct hmm_range *range; int ret = 0; mutex_lock(&process_info->lock); @@ -967,7 +968,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr, return 0; } - ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages); + ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages, &range); if (ret) { pr_err("%s: Failed to get user pages: %d\n", __func__, ret); goto unregister_out; @@ -985,7 +986,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr, amdgpu_bo_unreserve(bo); release_out: - amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); + amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range); unregister_out: if (ret) amdgpu_mn_unregister(bo); @@ -2317,6 +2318,8 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info, /* Go through userptr_inval_list and update any invalid user_pages */ list_for_each_entry(mem, &process_info->userptr_inval_list, validate_list.head) { + struct hmm_range *range; + invalid = atomic_read(&mem->invalid); if (!invalid) /* BO hasn't been invalidated since the last @@ -2327,7 +2330,8 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info, bo = mem->bo; /* Get updated user pages */ - ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages); + ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages, + &range); if (ret) { pr_debug("Failed %d to get user pages\n", ret); @@ -2346,7 +2350,7 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info, * FIXME: Cannot ignore the return code, must hold * notifier_lock */ - amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); + amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range); } /* Mark the BO as valid unless it was invalidated diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c index 2168163aad2d..252a876b0725 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c @@ -209,6 +209,7 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list, list_add_tail(&e->tv.head, &bucket[priority]); e->user_pages = NULL; + e->range = NULL; } /* Connect the sorted buckets in the output list. */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h index 9caea1688fc3..e4d78491bcc7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h @@ -26,6 +26,8 @@ #include #include +struct hmm_range; + struct amdgpu_device; struct amdgpu_bo; struct amdgpu_bo_va; @@ -36,6 +38,7 @@ struct amdgpu_bo_list_entry { struct amdgpu_bo_va *bo_va; uint32_t priority; struct page **user_pages; + struct hmm_range *range; bool user_invalidated; }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 255d545e5524..275da612cd87 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -912,7 +912,7 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, goto out_free_user_pages; } - r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages); + r = amdgpu_ttm_tt_get_user_pages(bo, e->user_pages, &e->range); if (r) { kvfree(e->user_pages); e->user_pages = NULL; @@ -990,9 +990,10 @@ out_free_user_pages: if (!e->user_pages) continue; - amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); + amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range); kvfree(e->user_pages); e->user_pages = NULL; + e->range = NULL; } mutex_unlock(&p->bo_list->bo_list_mutex); return r; @@ -1267,7 +1268,8 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { struct amdgpu_bo *bo = ttm_to_amdgpu_bo(e->tv.bo); - r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); + r |= !amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, e->range); + e->range = NULL; } if (r) { r = -EAGAIN; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 111484ceb47d..91571b1324f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -378,6 +378,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, struct amdgpu_device *adev = drm_to_adev(dev); struct drm_amdgpu_gem_userptr *args = data; struct drm_gem_object *gobj; + struct hmm_range *range; struct amdgpu_bo *bo; uint32_t handle; int r; @@ -418,7 +419,8 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, goto release_object; if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) { - r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages); + r = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages, + &range); if (r) goto release_object; @@ -441,7 +443,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, user_pages_done: if (args->flags & AMDGPU_GEM_USERPTR_VALIDATE) - amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); + amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range); release_object: drm_gem_object_put(gobj); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 02a8f25ae134..965808f4f682 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -643,9 +643,6 @@ struct amdgpu_ttm_tt { struct task_struct *usertask; uint32_t userflags; bool bound; -#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR) - struct hmm_range *range; -#endif }; #define ttm_to_amdgpu_ttm_tt(ptr) container_of(ptr, struct amdgpu_ttm_tt, ttm) @@ -658,7 +655,8 @@ struct amdgpu_ttm_tt { * Calling function must call amdgpu_ttm_tt_userptr_range_done() once and only * once afterwards to stop HMM tracking */ -int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages) +int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages, + struct hmm_range **range) { struct ttm_tt *ttm = bo->tbo.ttm; struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); @@ -668,16 +666,15 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages) bool readonly; int r = 0; + /* Make sure get_user_pages_done() can cleanup gracefully */ + *range = NULL; + mm = bo->notifier.mm; if (unlikely(!mm)) { DRM_DEBUG_DRIVER("BO is not registered?\n"); return -EFAULT; } - /* Another get_user_pages is running at the same time?? */ - if (WARN_ON(gtt->range)) - return -EFAULT; - if (!mmget_not_zero(mm)) /* Happens during process shutdown */ return -ESRCH; @@ -695,7 +692,7 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages) readonly = amdgpu_ttm_tt_is_readonly(ttm); r = amdgpu_hmm_range_get_pages(&bo->notifier, mm, pages, start, - ttm->num_pages, >t->range, readonly, + ttm->num_pages, range, readonly, true, NULL); out_unlock: mmap_read_unlock(mm); @@ -713,30 +710,24 @@ out_unlock: * * Returns: true if pages are still valid */ -bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm) +bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm, + struct hmm_range *range) { struct amdgpu_ttm_tt *gtt = ttm_to_amdgpu_ttm_tt(ttm); - bool r = false; - if (!gtt || !gtt->userptr) + if (!gtt || !gtt->userptr || !range) return false; DRM_DEBUG_DRIVER("user_pages_done 0x%llx pages 0x%x\n", gtt->userptr, ttm->num_pages); - WARN_ONCE(!gtt->range || !gtt->range->hmm_pfns, - "No user pages to check\n"); + WARN_ONCE(!range->hmm_pfns, "No user pages to check\n"); - if (gtt->range) { - /* - * FIXME: Must always hold notifier_lock for this, and must - * not ignore the return code. - */ - r = amdgpu_hmm_range_get_pages_done(gtt->range); - gtt->range = NULL; - } - - return !r; + /* + * FIXME: Must always hold notifier_lock for this, and must + * not ignore the return code. + */ + return !amdgpu_hmm_range_get_pages_done(range); } #endif @@ -813,20 +804,6 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev, /* unmap the pages mapped to the device */ dma_unmap_sgtable(adev->dev, ttm->sg, direction, 0); sg_free_table(ttm->sg); - -#if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR) - if (gtt->range) { - unsigned long i; - - for (i = 0; i < ttm->num_pages; i++) { - if (ttm->pages[i] != - hmm_pfn_to_page(gtt->range->hmm_pfns[i])) - break; - } - - WARN((i == ttm->num_pages), "Missing get_user_page_done\n"); - } -#endif } static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 7c38843f411e..b391c8d076ff 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -39,6 +39,8 @@ #define AMDGPU_POISON 0xd0bed0be +struct hmm_range; + struct amdgpu_gtt_mgr { struct ttm_resource_manager manager; struct drm_mm mm; @@ -154,15 +156,19 @@ void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo); uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type); #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR) -int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages); -bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm); +int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages, + struct hmm_range **range); +bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm, + struct hmm_range *range); #else static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, - struct page **pages) + struct page **pages, + struct hmm_range **range) { return -EPERM; } -static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm) +static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm, + struct hmm_range *range) { return false; } -- cgit From d9483ecd327b7537c6a51cab515b5faad21b8200 Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 9 Nov 2022 12:28:46 +0100 Subject: drm/amdgpu: rename the files for HMM handling MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Clean that up a bit, no functional change. Signed-off-by: Christian König Reviewed-by: Alex Deucher Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/Makefile | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu.h | 1 - drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 7 +- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 5 +- drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c | 245 +++++++++++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h | 53 +++++ drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c | 244 ---------------------- drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h | 53 ----- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 1 + drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 1 - drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 2 +- 11 files changed, 308 insertions(+), 306 deletions(-) create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c create mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h delete mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c delete mode 100644 drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/Makefile b/drivers/gpu/drm/amd/amdgpu/Makefile index 6ad39cf71bdd..712075a491f2 100644 --- a/drivers/gpu/drm/amd/amdgpu/Makefile +++ b/drivers/gpu/drm/amd/amdgpu/Makefile @@ -250,7 +250,7 @@ endif amdgpu-$(CONFIG_COMPAT) += amdgpu_ioc32.o amdgpu-$(CONFIG_VGA_SWITCHEROO) += amdgpu_atpx_handler.o amdgpu-$(CONFIG_ACPI) += amdgpu_acpi.o -amdgpu-$(CONFIG_HMM_MIRROR) += amdgpu_mn.o +amdgpu-$(CONFIG_HMM_MIRROR) += amdgpu_hmm.o include $(FULL_AMD_PATH)/pm/Makefile diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 1f3a4d596d0d..6b74df446694 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -82,7 +82,6 @@ #include "amdgpu_vce.h" #include "amdgpu_vcn.h" #include "amdgpu_jpeg.h" -#include "amdgpu_mn.h" #include "amdgpu_gmc.h" #include "amdgpu_gfx.h" #include "amdgpu_sdma.h" diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index e44d740022bc..3a763916a5a1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -29,6 +29,7 @@ #include "amdgpu_object.h" #include "amdgpu_gem.h" #include "amdgpu_vm.h" +#include "amdgpu_hmm.h" #include "amdgpu_amdkfd.h" #include "amdgpu_dma_buf.h" #include @@ -949,7 +950,7 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr, goto out; } - ret = amdgpu_mn_register(bo, user_addr); + ret = amdgpu_hmm_register(bo, user_addr); if (ret) { pr_err("%s: Failed to register MMU notifier: %d\n", __func__, ret); @@ -989,7 +990,7 @@ release_out: amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range); unregister_out: if (ret) - amdgpu_mn_unregister(bo); + amdgpu_hmm_unregister(bo); out: mutex_unlock(&process_info->lock); return ret; @@ -1773,7 +1774,7 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( mutex_unlock(&process_info->lock); /* No more MMU notifiers */ - amdgpu_mn_unregister(mem->bo); + amdgpu_hmm_unregister(mem->bo); ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx); if (unlikely(ret)) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 91571b1324f2..a0780a4e3e61 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -38,6 +38,7 @@ #include "amdgpu.h" #include "amdgpu_display.h" #include "amdgpu_dma_buf.h" +#include "amdgpu_hmm.h" #include "amdgpu_xgmi.h" static const struct drm_gem_object_funcs amdgpu_gem_object_funcs; @@ -87,7 +88,7 @@ static void amdgpu_gem_object_free(struct drm_gem_object *gobj) struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj); if (robj) { - amdgpu_mn_unregister(robj); + amdgpu_hmm_unregister(robj); amdgpu_bo_unref(&robj); } } @@ -414,7 +415,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, if (r) goto release_object; - r = amdgpu_mn_register(bo, args->addr); + r = amdgpu_hmm_register(bo, args->addr); if (r) goto release_object; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c new file mode 100644 index 000000000000..a68072f766c7 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c @@ -0,0 +1,245 @@ +/* + * Copyright 2014 Advanced Micro Devices, Inc. + * All Rights Reserved. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the + * "Software"), to deal in the Software without restriction, including + * without limitation the rights to use, copy, modify, merge, publish, + * distribute, sub license, and/or sell copies of the Software, and to + * permit persons to whom the Software is furnished to do so, subject to + * the following conditions: + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, + * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR + * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE + * USE OR OTHER DEALINGS IN THE SOFTWARE. + * + * The above copyright notice and this permission notice (including the + * next paragraph) shall be included in all copies or substantial portions + * of the Software. + * + */ +/* + * Authors: + * Christian König + */ + +/** + * DOC: MMU Notifier + * + * For coherent userptr handling registers an MMU notifier to inform the driver + * about updates on the page tables of a process. + * + * When somebody tries to invalidate the page tables we block the update until + * all operations on the pages in question are completed, then those pages are + * marked as accessed and also dirty if it wasn't a read only access. + * + * New command submissions using the userptrs in question are delayed until all + * page table invalidation are completed and we once more see a coherent process + * address space. + */ + +#include +#include +#include + +#include "amdgpu.h" +#include "amdgpu_amdkfd.h" +#include "amdgpu_hmm.h" + +/** + * amdgpu_hmm_invalidate_gfx - callback to notify about mm change + * + * @mni: the range (mm) is about to update + * @range: details on the invalidation + * @cur_seq: Value to pass to mmu_interval_set_seq() + * + * Block for operations on BOs to finish and mark pages as accessed and + * potentially dirty. + */ +static bool amdgpu_hmm_invalidate_gfx(struct mmu_interval_notifier *mni, + const struct mmu_notifier_range *range, + unsigned long cur_seq) +{ + struct amdgpu_bo *bo = container_of(mni, struct amdgpu_bo, notifier); + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + long r; + + if (!mmu_notifier_range_blockable(range)) + return false; + + mutex_lock(&adev->notifier_lock); + + mmu_interval_set_seq(mni, cur_seq); + + r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP, + false, MAX_SCHEDULE_TIMEOUT); + mutex_unlock(&adev->notifier_lock); + if (r <= 0) + DRM_ERROR("(%ld) failed to wait for user bo\n", r); + return true; +} + +static const struct mmu_interval_notifier_ops amdgpu_hmm_gfx_ops = { + .invalidate = amdgpu_hmm_invalidate_gfx, +}; + +/** + * amdgpu_hmm_invalidate_hsa - callback to notify about mm change + * + * @mni: the range (mm) is about to update + * @range: details on the invalidation + * @cur_seq: Value to pass to mmu_interval_set_seq() + * + * We temporarily evict the BO attached to this range. This necessitates + * evicting all user-mode queues of the process. + */ +static bool amdgpu_hmm_invalidate_hsa(struct mmu_interval_notifier *mni, + const struct mmu_notifier_range *range, + unsigned long cur_seq) +{ + struct amdgpu_bo *bo = container_of(mni, struct amdgpu_bo, notifier); + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); + + if (!mmu_notifier_range_blockable(range)) + return false; + + mutex_lock(&adev->notifier_lock); + + mmu_interval_set_seq(mni, cur_seq); + + amdgpu_amdkfd_evict_userptr(bo->kfd_bo, bo->notifier.mm); + mutex_unlock(&adev->notifier_lock); + + return true; +} + +static const struct mmu_interval_notifier_ops amdgpu_hmm_hsa_ops = { + .invalidate = amdgpu_hmm_invalidate_hsa, +}; + +/** + * amdgpu_hmm_register - register a BO for notifier updates + * + * @bo: amdgpu buffer object + * @addr: userptr addr we should monitor + * + * Registers a mmu_notifier for the given BO at the specified address. + * Returns 0 on success, -ERRNO if anything goes wrong. + */ +int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr) +{ + if (bo->kfd_bo) + return mmu_interval_notifier_insert(&bo->notifier, current->mm, + addr, amdgpu_bo_size(bo), + &amdgpu_hmm_hsa_ops); + return mmu_interval_notifier_insert(&bo->notifier, current->mm, addr, + amdgpu_bo_size(bo), + &amdgpu_hmm_gfx_ops); +} + +/** + * amdgpu_hmm_unregister - unregister a BO for notifier updates + * + * @bo: amdgpu buffer object + * + * Remove any registration of mmu notifier updates from the buffer object. + */ +void amdgpu_hmm_unregister(struct amdgpu_bo *bo) +{ + if (!bo->notifier.mm) + return; + mmu_interval_notifier_remove(&bo->notifier); + bo->notifier.mm = NULL; +} + +int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier, + struct mm_struct *mm, struct page **pages, + uint64_t start, uint64_t npages, + struct hmm_range **phmm_range, bool readonly, + bool mmap_locked, void *owner) +{ + struct hmm_range *hmm_range; + unsigned long timeout; + unsigned long i; + unsigned long *pfns; + int r = 0; + + hmm_range = kzalloc(sizeof(*hmm_range), GFP_KERNEL); + if (unlikely(!hmm_range)) + return -ENOMEM; + + pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL); + if (unlikely(!pfns)) { + r = -ENOMEM; + goto out_free_range; + } + + hmm_range->notifier = notifier; + hmm_range->default_flags = HMM_PFN_REQ_FAULT; + if (!readonly) + hmm_range->default_flags |= HMM_PFN_REQ_WRITE; + hmm_range->hmm_pfns = pfns; + hmm_range->start = start; + hmm_range->end = start + npages * PAGE_SIZE; + hmm_range->dev_private_owner = owner; + + /* Assuming 512MB takes maxmium 1 second to fault page address */ + timeout = max(npages >> 17, 1ULL) * HMM_RANGE_DEFAULT_TIMEOUT; + timeout = jiffies + msecs_to_jiffies(timeout); + +retry: + hmm_range->notifier_seq = mmu_interval_read_begin(notifier); + + if (likely(!mmap_locked)) + mmap_read_lock(mm); + + r = hmm_range_fault(hmm_range); + + if (likely(!mmap_locked)) + mmap_read_unlock(mm); + if (unlikely(r)) { + /* + * FIXME: This timeout should encompass the retry from + * mmu_interval_read_retry() as well. + */ + if (r == -EBUSY && !time_after(jiffies, timeout)) + goto retry; + goto out_free_pfns; + } + + /* + * Due to default_flags, all pages are HMM_PFN_VALID or + * hmm_range_fault() fails. FIXME: The pages cannot be touched outside + * the notifier_lock, and mmu_interval_read_retry() must be done first. + */ + for (i = 0; pages && i < npages; i++) + pages[i] = hmm_pfn_to_page(pfns[i]); + + *phmm_range = hmm_range; + + return 0; + +out_free_pfns: + kvfree(pfns); +out_free_range: + kfree(hmm_range); + + return r; +} + +int amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range) +{ + int r; + + r = mmu_interval_read_retry(hmm_range->notifier, + hmm_range->notifier_seq); + kvfree(hmm_range->hmm_pfns); + kfree(hmm_range); + + return r; +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h new file mode 100644 index 000000000000..4e596a16d288 --- /dev/null +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h @@ -0,0 +1,53 @@ +/* + * Copyright 2017 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + * + * Authors: Christian König + */ +#ifndef __AMDGPU_MN_H__ +#define __AMDGPU_MN_H__ + +#include +#include +#include +#include +#include + +int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier, + struct mm_struct *mm, struct page **pages, + uint64_t start, uint64_t npages, + struct hmm_range **phmm_range, bool readonly, + bool mmap_locked, void *owner); +int amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range); + +#if defined(CONFIG_HMM_MIRROR) +int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr); +void amdgpu_hmm_unregister(struct amdgpu_bo *bo); +#else +static inline int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr) +{ + DRM_WARN_ONCE("HMM_MIRROR kernel config option is not enabled, " + "add CONFIG_ZONE_DEVICE=y in config file to fix this\n"); + return -ENODEV; +} +static inline void amdgpu_hmm_unregister(struct amdgpu_bo *bo) {} +#endif + +#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c deleted file mode 100644 index b86c0b8252a5..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c +++ /dev/null @@ -1,244 +0,0 @@ -/* - * Copyright 2014 Advanced Micro Devices, Inc. - * All Rights Reserved. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the - * "Software"), to deal in the Software without restriction, including - * without limitation the rights to use, copy, modify, merge, publish, - * distribute, sub license, and/or sell copies of the Software, and to - * permit persons to whom the Software is furnished to do so, subject to - * the following conditions: - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, - * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR - * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE - * USE OR OTHER DEALINGS IN THE SOFTWARE. - * - * The above copyright notice and this permission notice (including the - * next paragraph) shall be included in all copies or substantial portions - * of the Software. - * - */ -/* - * Authors: - * Christian König - */ - -/** - * DOC: MMU Notifier - * - * For coherent userptr handling registers an MMU notifier to inform the driver - * about updates on the page tables of a process. - * - * When somebody tries to invalidate the page tables we block the update until - * all operations on the pages in question are completed, then those pages are - * marked as accessed and also dirty if it wasn't a read only access. - * - * New command submissions using the userptrs in question are delayed until all - * page table invalidation are completed and we once more see a coherent process - * address space. - */ - -#include -#include -#include - -#include "amdgpu.h" -#include "amdgpu_amdkfd.h" - -/** - * amdgpu_mn_invalidate_gfx - callback to notify about mm change - * - * @mni: the range (mm) is about to update - * @range: details on the invalidation - * @cur_seq: Value to pass to mmu_interval_set_seq() - * - * Block for operations on BOs to finish and mark pages as accessed and - * potentially dirty. - */ -static bool amdgpu_mn_invalidate_gfx(struct mmu_interval_notifier *mni, - const struct mmu_notifier_range *range, - unsigned long cur_seq) -{ - struct amdgpu_bo *bo = container_of(mni, struct amdgpu_bo, notifier); - struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - long r; - - if (!mmu_notifier_range_blockable(range)) - return false; - - mutex_lock(&adev->notifier_lock); - - mmu_interval_set_seq(mni, cur_seq); - - r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_BOOKKEEP, - false, MAX_SCHEDULE_TIMEOUT); - mutex_unlock(&adev->notifier_lock); - if (r <= 0) - DRM_ERROR("(%ld) failed to wait for user bo\n", r); - return true; -} - -static const struct mmu_interval_notifier_ops amdgpu_mn_gfx_ops = { - .invalidate = amdgpu_mn_invalidate_gfx, -}; - -/** - * amdgpu_mn_invalidate_hsa - callback to notify about mm change - * - * @mni: the range (mm) is about to update - * @range: details on the invalidation - * @cur_seq: Value to pass to mmu_interval_set_seq() - * - * We temporarily evict the BO attached to this range. This necessitates - * evicting all user-mode queues of the process. - */ -static bool amdgpu_mn_invalidate_hsa(struct mmu_interval_notifier *mni, - const struct mmu_notifier_range *range, - unsigned long cur_seq) -{ - struct amdgpu_bo *bo = container_of(mni, struct amdgpu_bo, notifier); - struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); - - if (!mmu_notifier_range_blockable(range)) - return false; - - mutex_lock(&adev->notifier_lock); - - mmu_interval_set_seq(mni, cur_seq); - - amdgpu_amdkfd_evict_userptr(bo->kfd_bo, bo->notifier.mm); - mutex_unlock(&adev->notifier_lock); - - return true; -} - -static const struct mmu_interval_notifier_ops amdgpu_mn_hsa_ops = { - .invalidate = amdgpu_mn_invalidate_hsa, -}; - -/** - * amdgpu_mn_register - register a BO for notifier updates - * - * @bo: amdgpu buffer object - * @addr: userptr addr we should monitor - * - * Registers a mmu_notifier for the given BO at the specified address. - * Returns 0 on success, -ERRNO if anything goes wrong. - */ -int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) -{ - if (bo->kfd_bo) - return mmu_interval_notifier_insert(&bo->notifier, current->mm, - addr, amdgpu_bo_size(bo), - &amdgpu_mn_hsa_ops); - return mmu_interval_notifier_insert(&bo->notifier, current->mm, addr, - amdgpu_bo_size(bo), - &amdgpu_mn_gfx_ops); -} - -/** - * amdgpu_mn_unregister - unregister a BO for notifier updates - * - * @bo: amdgpu buffer object - * - * Remove any registration of mmu notifier updates from the buffer object. - */ -void amdgpu_mn_unregister(struct amdgpu_bo *bo) -{ - if (!bo->notifier.mm) - return; - mmu_interval_notifier_remove(&bo->notifier); - bo->notifier.mm = NULL; -} - -int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier, - struct mm_struct *mm, struct page **pages, - uint64_t start, uint64_t npages, - struct hmm_range **phmm_range, bool readonly, - bool mmap_locked, void *owner) -{ - struct hmm_range *hmm_range; - unsigned long timeout; - unsigned long i; - unsigned long *pfns; - int r = 0; - - hmm_range = kzalloc(sizeof(*hmm_range), GFP_KERNEL); - if (unlikely(!hmm_range)) - return -ENOMEM; - - pfns = kvmalloc_array(npages, sizeof(*pfns), GFP_KERNEL); - if (unlikely(!pfns)) { - r = -ENOMEM; - goto out_free_range; - } - - hmm_range->notifier = notifier; - hmm_range->default_flags = HMM_PFN_REQ_FAULT; - if (!readonly) - hmm_range->default_flags |= HMM_PFN_REQ_WRITE; - hmm_range->hmm_pfns = pfns; - hmm_range->start = start; - hmm_range->end = start + npages * PAGE_SIZE; - hmm_range->dev_private_owner = owner; - - /* Assuming 512MB takes maxmium 1 second to fault page address */ - timeout = max(npages >> 17, 1ULL) * HMM_RANGE_DEFAULT_TIMEOUT; - timeout = jiffies + msecs_to_jiffies(timeout); - -retry: - hmm_range->notifier_seq = mmu_interval_read_begin(notifier); - - if (likely(!mmap_locked)) - mmap_read_lock(mm); - - r = hmm_range_fault(hmm_range); - - if (likely(!mmap_locked)) - mmap_read_unlock(mm); - if (unlikely(r)) { - /* - * FIXME: This timeout should encompass the retry from - * mmu_interval_read_retry() as well. - */ - if (r == -EBUSY && !time_after(jiffies, timeout)) - goto retry; - goto out_free_pfns; - } - - /* - * Due to default_flags, all pages are HMM_PFN_VALID or - * hmm_range_fault() fails. FIXME: The pages cannot be touched outside - * the notifier_lock, and mmu_interval_read_retry() must be done first. - */ - for (i = 0; pages && i < npages; i++) - pages[i] = hmm_pfn_to_page(pfns[i]); - - *phmm_range = hmm_range; - - return 0; - -out_free_pfns: - kvfree(pfns); -out_free_range: - kfree(hmm_range); - - return r; -} - -int amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range) -{ - int r; - - r = mmu_interval_read_retry(hmm_range->notifier, - hmm_range->notifier_seq); - kvfree(hmm_range->hmm_pfns); - kfree(hmm_range); - - return r; -} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h deleted file mode 100644 index 14a3c1864085..000000000000 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright 2017 Advanced Micro Devices, Inc. - * - * Permission is hereby granted, free of charge, to any person obtaining a - * copy of this software and associated documentation files (the "Software"), - * to deal in the Software without restriction, including without limitation - * the rights to use, copy, modify, merge, publish, distribute, sublicense, - * and/or sell copies of the Software, and to permit persons to whom the - * Software is furnished to do so, subject to the following conditions: - * - * The above copyright notice and this permission notice shall be included in - * all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR - * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, - * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL - * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR - * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, - * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR - * OTHER DEALINGS IN THE SOFTWARE. - * - * Authors: Christian König - */ -#ifndef __AMDGPU_MN_H__ -#define __AMDGPU_MN_H__ - -#include -#include -#include -#include -#include - -int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier, - struct mm_struct *mm, struct page **pages, - uint64_t start, uint64_t npages, - struct hmm_range **phmm_range, bool readonly, - bool mmap_locked, void *owner); -int amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range); - -#if defined(CONFIG_HMM_MIRROR) -int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr); -void amdgpu_mn_unregister(struct amdgpu_bo *bo); -#else -static inline int amdgpu_mn_register(struct amdgpu_bo *bo, unsigned long addr) -{ - DRM_WARN_ONCE("HMM_MIRROR kernel config option is not enabled, " - "add CONFIG_ZONE_DEVICE=y in config file to fix this\n"); - return -ENODEV; -} -static inline void amdgpu_mn_unregister(struct amdgpu_bo *bo) {} -#endif - -#endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 965808f4f682..ddb13b18ed7b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -58,6 +58,7 @@ #include "amdgpu_amdkfd.h" #include "amdgpu_sdma.h" #include "amdgpu_ras.h" +#include "amdgpu_hmm.h" #include "amdgpu_atomfirmware.h" #include "amdgpu_res_cursor.h" #include "bif/bif_4_1_d.h" diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 3723e90e3a90..77227761e669 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -28,7 +28,6 @@ #include "amdgpu_sync.h" #include "amdgpu_object.h" #include "amdgpu_vm.h" -#include "amdgpu_mn.h" #include "amdgpu_res_cursor.h" #include "kfd_priv.h" #include "kfd_svm.h" diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index afe7c4998676..1cf7dcb22e3f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -26,7 +26,7 @@ #include "amdgpu_sync.h" #include "amdgpu_object.h" #include "amdgpu_vm.h" -#include "amdgpu_mn.h" +#include "amdgpu_hmm.h" #include "amdgpu.h" #include "amdgpu_xgmi.h" #include "kfd_priv.h" -- cgit From d4cbff464d2932a71d7d3b7d17ffef7700b58edd Mon Sep 17 00:00:00 2001 From: Christian König Date: Wed, 9 Nov 2022 12:41:08 +0100 Subject: drm/amdgpu: cleanup amdgpu_hmm_range_get_pages MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Remove unused parameters and cleanup dead code. Signed-off-by: Christian König Reviewed-by: Alex Deucher Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c | 14 +++----------- drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h | 7 +++---- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 5 ++--- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 6 +++--- 4 files changed, 11 insertions(+), 21 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c index a68072f766c7..a48ea62b12b0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c @@ -158,10 +158,9 @@ void amdgpu_hmm_unregister(struct amdgpu_bo *bo) } int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier, - struct mm_struct *mm, struct page **pages, - uint64_t start, uint64_t npages, - struct hmm_range **phmm_range, bool readonly, - bool mmap_locked, void *owner) + uint64_t start, uint64_t npages, bool readonly, + void *owner, struct page **pages, + struct hmm_range **phmm_range) { struct hmm_range *hmm_range; unsigned long timeout; @@ -194,14 +193,7 @@ int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier, retry: hmm_range->notifier_seq = mmu_interval_read_begin(notifier); - - if (likely(!mmap_locked)) - mmap_read_lock(mm); - r = hmm_range_fault(hmm_range); - - if (likely(!mmap_locked)) - mmap_read_unlock(mm); if (unlikely(r)) { /* * FIXME: This timeout should encompass the retry from diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h index 4e596a16d288..13ed94d3b01b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h @@ -31,10 +31,9 @@ #include int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier, - struct mm_struct *mm, struct page **pages, - uint64_t start, uint64_t npages, - struct hmm_range **phmm_range, bool readonly, - bool mmap_locked, void *owner); + uint64_t start, uint64_t npages, bool readonly, + void *owner, struct page **pages, + struct hmm_range **phmm_range); int amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range); #if defined(CONFIG_HMM_MIRROR) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index ddb13b18ed7b..c8169cea3032 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -692,9 +692,8 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages, } readonly = amdgpu_ttm_tt_is_readonly(ttm); - r = amdgpu_hmm_range_get_pages(&bo->notifier, mm, pages, start, - ttm->num_pages, range, readonly, - true, NULL); + r = amdgpu_hmm_range_get_pages(&bo->notifier, start, ttm->num_pages, + readonly, NULL, pages, range); out_unlock: mmap_read_unlock(mm); if (r) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 1cf7dcb22e3f..814f99888ab1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1596,9 +1596,9 @@ static int svm_range_validate_and_map(struct mm_struct *mm, next = min(vma->vm_end, end); npages = (next - addr) >> PAGE_SHIFT; WRITE_ONCE(p->svms.faulting_task, current); - r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL, - addr, npages, &hmm_range, - readonly, true, owner); + r = amdgpu_hmm_range_get_pages(&prange->notifier, addr, npages, + readonly, owner, NULL, + &hmm_range); WRITE_ONCE(p->svms.faulting_task, NULL); if (r) { pr_debug("failed %d to get svm range pages\n", r); -- cgit From 6d96ced7600e02ac1efb03a21af529fd9a95e3c6 Mon Sep 17 00:00:00 2001 From: Tong Liu01 Date: Thu, 17 Nov 2022 18:18:58 +0800 Subject: drm/amdgpu: add drv_vram_usage_va for virt data exchange For vram_usagebyfirmware_v2_2, fw_vram_reserve is not used. So fw_vram_usage_va is NULL, and cannot do virt data exchange anymore. Should add drv_vram_usage_va to do virt data exchange in vram_usagebyfirmware_v2_2 case. And refine some code style checks in pre add vram reservation logic patch Signed-off-by: Tong Liu01 Acked-by: Luben Tuikov Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c | 16 ++++--- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 7 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 1 + drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 54 +++++++++++++++--------- 4 files changed, 49 insertions(+), 29 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c index 9b97fa39d47a..e40df72c138a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atomfirmware.c @@ -104,7 +104,7 @@ void amdgpu_atomfirmware_scratch_regs_init(struct amdgpu_device *adev) static int amdgpu_atomfirmware_allocate_fb_v2_1(struct amdgpu_device *adev, struct vram_usagebyfirmware_v2_1 *fw_usage, int *usage_bytes) { - uint32_t start_addr, fw_size, drv_size; + u32 start_addr, fw_size, drv_size; start_addr = le32_to_cpu(fw_usage->start_address_in_kb); fw_size = le16_to_cpu(fw_usage->used_by_firmware_in_kb); @@ -116,7 +116,7 @@ static int amdgpu_atomfirmware_allocate_fb_v2_1(struct amdgpu_device *adev, drv_size); if ((start_addr & ATOM_VRAM_OPERATION_FLAGS_MASK) == - (uint32_t)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION << + (u32)(ATOM_VRAM_BLOCK_SRIOV_MSG_SHARE_RESERVATION << ATOM_VRAM_OPERATION_FLAGS_SHIFT)) { /* Firmware request VRAM reservation for SR-IOV */ adev->mman.fw_vram_usage_start_offset = (start_addr & @@ -133,7 +133,7 @@ static int amdgpu_atomfirmware_allocate_fb_v2_1(struct amdgpu_device *adev, static int amdgpu_atomfirmware_allocate_fb_v2_2(struct amdgpu_device *adev, struct vram_usagebyfirmware_v2_2 *fw_usage, int *usage_bytes) { - uint32_t fw_start_addr, fw_size, drv_start_addr, drv_size; + u32 fw_start_addr, fw_size, drv_start_addr, drv_size; fw_start_addr = le32_to_cpu(fw_usage->fw_region_start_address_in_kb); fw_size = le16_to_cpu(fw_usage->used_by_firmware_in_kb); @@ -147,14 +147,16 @@ static int amdgpu_atomfirmware_allocate_fb_v2_2(struct amdgpu_device *adev, drv_start_addr, drv_size); - if ((fw_start_addr & (ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION << 30)) == 0) { + if ((fw_start_addr & (ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION << + ATOM_VRAM_OPERATION_FLAGS_SHIFT)) == 0) { /* Firmware request VRAM reservation for SR-IOV */ adev->mman.fw_vram_usage_start_offset = (fw_start_addr & (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10; adev->mman.fw_vram_usage_size = fw_size << 10; } - if ((drv_start_addr & (ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION << 30)) == 0) { + if ((drv_start_addr & (ATOM_VRAM_BLOCK_NEEDS_NO_RESERVATION << + ATOM_VRAM_OPERATION_FLAGS_SHIFT)) == 0) { /* driver request VRAM reservation for SR-IOV */ adev->mman.drv_vram_usage_start_offset = (drv_start_addr & (~ATOM_VRAM_OPERATION_FLAGS_MASK)) << 10; @@ -172,8 +174,8 @@ int amdgpu_atomfirmware_allocate_fb_scratch(struct amdgpu_device *adev) vram_usagebyfirmware); struct vram_usagebyfirmware_v2_1 *fw_usage_v2_1; struct vram_usagebyfirmware_v2_2 *fw_usage_v2_2; - uint16_t data_offset; - uint8_t frev, crev; + u16 data_offset; + u8 frev, crev; int usage_bytes = 0; if (amdgpu_atom_parse_data_header(ctx, index, NULL, &frev, &crev, &data_offset)) { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 7b5074e776f4..b4236572eae1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1545,7 +1545,7 @@ static void amdgpu_ttm_drv_reserve_vram_fini(struct amdgpu_device *adev) { amdgpu_bo_free_kernel(&adev->mman.drv_vram_usage_reserved_bo, NULL, - NULL); + &adev->mman.drv_vram_usage_va); } /** @@ -1583,8 +1583,9 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev) */ static int amdgpu_ttm_drv_reserve_vram_init(struct amdgpu_device *adev) { - uint64_t vram_size = adev->gmc.visible_vram_size; + u64 vram_size = adev->gmc.visible_vram_size; + adev->mman.drv_vram_usage_va = NULL; adev->mman.drv_vram_usage_reserved_bo = NULL; if (adev->mman.drv_vram_usage_size == 0 || @@ -1596,7 +1597,7 @@ static int amdgpu_ttm_drv_reserve_vram_init(struct amdgpu_device *adev) adev->mman.drv_vram_usage_size, AMDGPU_GEM_DOMAIN_VRAM, &adev->mman.drv_vram_usage_reserved_bo, - NULL); + &adev->mman.drv_vram_usage_va); } /* diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index b391c8d076ff..b4d8ba2789f3 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -90,6 +90,7 @@ struct amdgpu_mman { u64 drv_vram_usage_start_offset; u64 drv_vram_usage_size; struct amdgpu_bo *drv_vram_usage_reserved_bo; + void *drv_vram_usage_va; /* PAGE_SIZE'd BO for process memory r/w over SDMA. */ struct amdgpu_bo *sdma_access_bo; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index a226a6c48fb7..15544f262ec1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -428,11 +428,17 @@ static void amdgpu_virt_add_bad_page(struct amdgpu_device *adev, struct eeprom_table_record bp; uint64_t retired_page; uint32_t bp_idx, bp_cnt; + void *vram_usage_va = NULL; + + if (adev->mman.fw_vram_usage_va) + vram_usage_va = adev->mman.fw_vram_usage_va; + else + vram_usage_va = adev->mman.drv_vram_usage_va; if (bp_block_size) { bp_cnt = bp_block_size / sizeof(uint64_t); for (bp_idx = 0; bp_idx < bp_cnt; bp_idx++) { - retired_page = *(uint64_t *)(adev->mman.fw_vram_usage_va + + retired_page = *(uint64_t *)(vram_usage_va + bp_block_offset + bp_idx * sizeof(uint64_t)); bp.retired_page = retired_page; @@ -643,7 +649,9 @@ void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev) adev->virt.fw_reserve.p_vf2pf = NULL; adev->virt.vf2pf_update_interval_ms = 0; - if (adev->mman.fw_vram_usage_va != NULL) { + if (adev->mman.fw_vram_usage_va && adev->mman.drv_vram_usage_va) { + DRM_WARN("Currently fw_vram and drv_vram should not have values at the same time!"); + } else if (adev->mman.fw_vram_usage_va || adev->mman.drv_vram_usage_va) { /* go through this logic in ip_init and reset to init workqueue*/ amdgpu_virt_exchange_data(adev); @@ -666,32 +674,40 @@ void amdgpu_virt_exchange_data(struct amdgpu_device *adev) uint32_t bp_block_size = 0; struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL; - if (adev->mman.fw_vram_usage_va != NULL) { - - adev->virt.fw_reserve.p_pf2vf = - (struct amd_sriov_msg_pf2vf_info_header *) - (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10)); - adev->virt.fw_reserve.p_vf2pf = - (struct amd_sriov_msg_vf2pf_info_header *) - (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10)); + if (adev->mman.fw_vram_usage_va || adev->mman.drv_vram_usage_va) { + if (adev->mman.fw_vram_usage_va) { + adev->virt.fw_reserve.p_pf2vf = + (struct amd_sriov_msg_pf2vf_info_header *) + (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10)); + adev->virt.fw_reserve.p_vf2pf = + (struct amd_sriov_msg_vf2pf_info_header *) + (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10)); + } else if (adev->mman.drv_vram_usage_va) { + adev->virt.fw_reserve.p_pf2vf = + (struct amd_sriov_msg_pf2vf_info_header *) + (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10)); + adev->virt.fw_reserve.p_vf2pf = + (struct amd_sriov_msg_vf2pf_info_header *) + (adev->mman.drv_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10)); + } amdgpu_virt_read_pf2vf_data(adev); amdgpu_virt_write_vf2pf_data(adev); /* bad page handling for version 2 */ if (adev->virt.fw_reserve.p_pf2vf->version == 2) { - pf2vf_v2 = (struct amd_sriov_msg_pf2vf_info *)adev->virt.fw_reserve.p_pf2vf; + pf2vf_v2 = (struct amd_sriov_msg_pf2vf_info *)adev->virt.fw_reserve.p_pf2vf; - bp_block_offset = ((uint64_t)pf2vf_v2->bp_block_offset_low & 0xFFFFFFFF) | - ((((uint64_t)pf2vf_v2->bp_block_offset_high) << 32) & 0xFFFFFFFF00000000); - bp_block_size = pf2vf_v2->bp_block_size; + bp_block_offset = ((uint64_t)pf2vf_v2->bp_block_offset_low & 0xFFFFFFFF) | + ((((uint64_t)pf2vf_v2->bp_block_offset_high) << 32) & 0xFFFFFFFF00000000); + bp_block_size = pf2vf_v2->bp_block_size; - if (bp_block_size && !adev->virt.ras_init_done) - amdgpu_virt_init_ras_err_handler_data(adev); + if (bp_block_size && !adev->virt.ras_init_done) + amdgpu_virt_init_ras_err_handler_data(adev); - if (adev->virt.ras_init_done) - amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size); - } + if (adev->virt.ras_init_done) + amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size); + } } } -- cgit From f95f51a4c3357eabf74fe14ab7daa5b5c0422b27 Mon Sep 17 00:00:00 2001 From: Felix Kuehling Date: Wed, 21 Apr 2021 21:09:54 -0400 Subject: drm/amdgpu: Add notifier lock for KFD userptrs Add a per-process MMU notifier lock for processing notifiers from userptrs. Use that lock to properly synchronize page table updates with MMU notifiers. Signed-off-by: Felix Kuehling Reviewed-by: Xiaogang Chen Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 13 +- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 212 +++++++++++++++-------- drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c | 12 +- drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h | 3 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 17 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h | 6 + 6 files changed, 172 insertions(+), 91 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index f50e3ba4d7a5..589939631ed4 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -29,6 +29,7 @@ #include #include #include +#include #include #include #include "amdgpu_sync.h" @@ -65,6 +66,7 @@ struct kgd_mem { struct mutex lock; struct amdgpu_bo *bo; struct dma_buf *dmabuf; + struct hmm_range *range; struct list_head attachments; /* protected by amdkfd_process_info.lock */ struct ttm_validate_buffer validate_list; @@ -75,7 +77,7 @@ struct kgd_mem { uint32_t alloc_flags; - atomic_t invalid; + uint32_t invalid; struct amdkfd_process_info *process_info; struct amdgpu_sync sync; @@ -131,7 +133,8 @@ struct amdkfd_process_info { struct amdgpu_amdkfd_fence *eviction_fence; /* MMU-notifier related fields */ - atomic_t evicted_bos; + struct mutex notifier_lock; + uint32_t evicted_bos; struct delayed_work restore_userptr_work; struct pid *pid; bool block_mmu_notifications; @@ -180,7 +183,8 @@ int kfd_debugfs_kfd_mem_limits(struct seq_file *m, void *data); bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm); struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f); int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo); -int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm); +int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni, + unsigned long cur_seq, struct kgd_mem *mem); #else static inline bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm) @@ -201,7 +205,8 @@ int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo) } static inline -int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm) +int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni, + unsigned long cur_seq, struct kgd_mem *mem) { return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 8782916e64a0..0a854bb8b47e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -964,7 +964,9 @@ static int init_user_pages(struct kgd_mem *mem, uint64_t user_addr, * later stage when it is scheduled by another ioctl called by * CRIU master process for the target pid for restore. */ - atomic_inc(&mem->invalid); + mutex_lock(&process_info->notifier_lock); + mem->invalid++; + mutex_unlock(&process_info->notifier_lock); mutex_unlock(&process_info->lock); return 0; } @@ -1301,6 +1303,7 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, return -ENOMEM; mutex_init(&info->lock); + mutex_init(&info->notifier_lock); INIT_LIST_HEAD(&info->vm_list_head); INIT_LIST_HEAD(&info->kfd_bo_list); INIT_LIST_HEAD(&info->userptr_valid_list); @@ -1317,7 +1320,6 @@ static int init_kfd_vm(struct amdgpu_vm *vm, void **process_info, } info->pid = get_task_pid(current->group_leader, PIDTYPE_PID); - atomic_set(&info->evicted_bos, 0); INIT_DELAYED_WORK(&info->restore_userptr_work, amdgpu_amdkfd_restore_userptr_worker); @@ -1372,6 +1374,7 @@ reserve_pd_fail: put_pid(info->pid); create_evict_fence_fail: mutex_destroy(&info->lock); + mutex_destroy(&info->notifier_lock); kfree(info); } return ret; @@ -1496,6 +1499,7 @@ void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, cancel_delayed_work_sync(&process_info->restore_userptr_work); put_pid(process_info->pid); mutex_destroy(&process_info->lock); + mutex_destroy(&process_info->notifier_lock); kfree(process_info); } } @@ -1548,7 +1552,9 @@ int amdgpu_amdkfd_criu_resume(void *p) mutex_lock(&pinfo->lock); pr_debug("scheduling work\n"); - atomic_inc(&pinfo->evicted_bos); + mutex_lock(&pinfo->notifier_lock); + pinfo->evicted_bos++; + mutex_unlock(&pinfo->notifier_lock); if (!READ_ONCE(pinfo->block_mmu_notifications)) { ret = -EINVAL; goto out_unlock; @@ -1773,8 +1779,13 @@ int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( list_del(&bo_list_entry->head); mutex_unlock(&process_info->lock); - /* No more MMU notifiers */ - amdgpu_hmm_unregister(mem->bo); + /* Cleanup user pages and MMU notifiers */ + if (amdgpu_ttm_tt_get_usermm(mem->bo->tbo.ttm)) { + amdgpu_hmm_unregister(mem->bo); + mutex_lock(&process_info->notifier_lock); + amdgpu_ttm_tt_discard_user_pages(mem->bo->tbo.ttm, mem->range); + mutex_unlock(&process_info->notifier_lock); + } ret = reserve_bo_and_cond_vms(mem, NULL, BO_VM_ALL, &ctx); if (unlikely(ret)) @@ -1864,6 +1875,16 @@ int amdgpu_amdkfd_gpuvm_map_memory_to_gpu( */ mutex_lock(&mem->process_info->lock); + /* Lock notifier lock. If we find an invalid userptr BO, we can be + * sure that the MMU notifier is no longer running + * concurrently and the queues are actually stopped + */ + if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) { + mutex_lock(&mem->process_info->notifier_lock); + is_invalid_userptr = !!mem->invalid; + mutex_unlock(&mem->process_info->notifier_lock); + } + mutex_lock(&mem->lock); domain = mem->domain; @@ -2241,34 +2262,38 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev, * * Runs in MMU notifier, may be in RECLAIM_FS context. This means it * cannot do any memory allocations, and cannot take any locks that - * are held elsewhere while allocating memory. Therefore this is as - * simple as possible, using atomic counters. + * are held elsewhere while allocating memory. * * It doesn't do anything to the BO itself. The real work happens in * restore, where we get updated page addresses. This function only * ensures that GPU access to the BO is stopped. */ -int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, - struct mm_struct *mm) +int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni, + unsigned long cur_seq, struct kgd_mem *mem) { struct amdkfd_process_info *process_info = mem->process_info; - int evicted_bos; int r = 0; - /* Do not process MMU notifications until stage-4 IOCTL is received */ + /* Do not process MMU notifications during CRIU restore until + * KFD_CRIU_OP_RESUME IOCTL is received + */ if (READ_ONCE(process_info->block_mmu_notifications)) return 0; - atomic_inc(&mem->invalid); - evicted_bos = atomic_inc_return(&process_info->evicted_bos); - if (evicted_bos == 1) { + mutex_lock(&process_info->notifier_lock); + mmu_interval_set_seq(mni, cur_seq); + + mem->invalid++; + if (++process_info->evicted_bos == 1) { /* First eviction, stop the queues */ - r = kgd2kfd_quiesce_mm(mm, KFD_QUEUE_EVICTION_TRIGGER_USERPTR); + r = kgd2kfd_quiesce_mm(mni->mm, + KFD_QUEUE_EVICTION_TRIGGER_USERPTR); if (r) pr_err("Failed to quiesce KFD\n"); schedule_delayed_work(&process_info->restore_userptr_work, msecs_to_jiffies(AMDGPU_USERPTR_RESTORE_DELAY_MS)); } + mutex_unlock(&process_info->notifier_lock); return r; } @@ -2285,54 +2310,58 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info, struct kgd_mem *mem, *tmp_mem; struct amdgpu_bo *bo; struct ttm_operation_ctx ctx = { false, false }; - int invalid, ret; + uint32_t invalid; + int ret = 0; - /* Move all invalidated BOs to the userptr_inval_list and - * release their user pages by migration to the CPU domain - */ + mutex_lock(&process_info->notifier_lock); + + /* Move all invalidated BOs to the userptr_inval_list */ list_for_each_entry_safe(mem, tmp_mem, &process_info->userptr_valid_list, - validate_list.head) { - if (!atomic_read(&mem->invalid)) - continue; /* BO is still valid */ - - bo = mem->bo; - - if (amdgpu_bo_reserve(bo, true)) - return -EAGAIN; - amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); - ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); - amdgpu_bo_unreserve(bo); - if (ret) { - pr_err("%s: Failed to invalidate userptr BO\n", - __func__); - return -EAGAIN; - } - - list_move_tail(&mem->validate_list.head, - &process_info->userptr_inval_list); - } - - if (list_empty(&process_info->userptr_inval_list)) - return 0; /* All evicted userptr BOs were freed */ + validate_list.head) + if (mem->invalid) + list_move_tail(&mem->validate_list.head, + &process_info->userptr_inval_list); /* Go through userptr_inval_list and update any invalid user_pages */ list_for_each_entry(mem, &process_info->userptr_inval_list, validate_list.head) { - struct hmm_range *range; - - invalid = atomic_read(&mem->invalid); + invalid = mem->invalid; if (!invalid) /* BO hasn't been invalidated since the last - * revalidation attempt. Keep its BO list. + * revalidation attempt. Keep its page list. */ continue; bo = mem->bo; + amdgpu_ttm_tt_discard_user_pages(bo->tbo.ttm, mem->range); + mem->range = NULL; + + /* BO reservations and getting user pages (hmm_range_fault) + * must happen outside the notifier lock + */ + mutex_unlock(&process_info->notifier_lock); + + /* Move the BO to system (CPU) domain if necessary to unmap + * and free the SG table + */ + if (bo->tbo.resource->mem_type != TTM_PL_SYSTEM) { + if (amdgpu_bo_reserve(bo, true)) + return -EAGAIN; + amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU); + ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx); + amdgpu_bo_unreserve(bo); + if (ret) { + pr_err("%s: Failed to invalidate userptr BO\n", + __func__); + return -EAGAIN; + } + } + /* Get updated user pages */ ret = amdgpu_ttm_tt_get_user_pages(bo, bo->tbo.ttm->pages, - &range); + &mem->range); if (ret) { pr_debug("Failed %d to get user pages\n", ret); @@ -2345,30 +2374,32 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info, */ if (ret != -EFAULT) return ret; - } else { - /* - * FIXME: Cannot ignore the return code, must hold - * notifier_lock - */ - amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm, range); + ret = 0; } + mutex_lock(&process_info->notifier_lock); + /* Mark the BO as valid unless it was invalidated * again concurrently. */ - if (atomic_cmpxchg(&mem->invalid, invalid, 0) != invalid) - return -EAGAIN; + if (mem->invalid != invalid) { + ret = -EAGAIN; + goto unlock_out; + } + mem->invalid = 0; } - return 0; +unlock_out: + mutex_unlock(&process_info->notifier_lock); + + return ret; } /* Validate invalid userptr BOs * - * Validates BOs on the userptr_inval_list, and moves them back to the - * userptr_valid_list. Also updates GPUVM page tables with new page - * addresses and waits for the page table updates to complete. + * Validates BOs on the userptr_inval_list. Also updates GPUVM page tables + * with new page addresses and waits for the page table updates to complete. */ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) { @@ -2439,9 +2470,6 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) } } - list_move_tail(&mem->validate_list.head, - &process_info->userptr_valid_list); - /* Update mapping. If the BO was not validated * (because we couldn't get user pages), this will * clear the page table entries, which will result in @@ -2457,7 +2485,9 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info) if (ret) { pr_err("%s: update PTE failed\n", __func__); /* make sure this gets validated again */ - atomic_inc(&mem->invalid); + mutex_lock(&process_info->notifier_lock); + mem->invalid++; + mutex_unlock(&process_info->notifier_lock); goto unreserve_out; } } @@ -2477,6 +2507,36 @@ out_no_mem: return ret; } +/* Confirm that all user pages are valid while holding the notifier lock + * + * Moves valid BOs from the userptr_inval_list back to userptr_val_list. + */ +static int confirm_valid_user_pages_locked(struct amdkfd_process_info *process_info) +{ + struct kgd_mem *mem, *tmp_mem; + int ret = 0; + + list_for_each_entry_safe(mem, tmp_mem, + &process_info->userptr_inval_list, + validate_list.head) { + bool valid = amdgpu_ttm_tt_get_user_pages_done( + mem->bo->tbo.ttm, mem->range); + + mem->range = NULL; + if (!valid) { + WARN(!mem->invalid, "Invalid BO not marked invalid"); + ret = -EAGAIN; + continue; + } + WARN(mem->invalid, "Valid BO is marked invalid"); + + list_move_tail(&mem->validate_list.head, + &process_info->userptr_valid_list); + } + + return ret; +} + /* Worker callback to restore evicted userptr BOs * * Tries to update and validate all userptr BOs. If successful and no @@ -2491,9 +2551,11 @@ static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work) restore_userptr_work); struct task_struct *usertask; struct mm_struct *mm; - int evicted_bos; + uint32_t evicted_bos; - evicted_bos = atomic_read(&process_info->evicted_bos); + mutex_lock(&process_info->notifier_lock); + evicted_bos = process_info->evicted_bos; + mutex_unlock(&process_info->notifier_lock); if (!evicted_bos) return; @@ -2516,9 +2578,6 @@ static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work) * and we can just restart the queues. */ if (!list_empty(&process_info->userptr_inval_list)) { - if (atomic_read(&process_info->evicted_bos) != evicted_bos) - goto unlock_out; /* Concurrent eviction, try again */ - if (validate_invalid_user_pages(process_info)) goto unlock_out; } @@ -2527,10 +2586,17 @@ static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work) * be a first eviction that calls quiesce_mm. The eviction * reference counting inside KFD will handle this case. */ - if (atomic_cmpxchg(&process_info->evicted_bos, evicted_bos, 0) != - evicted_bos) - goto unlock_out; - evicted_bos = 0; + mutex_lock(&process_info->notifier_lock); + if (process_info->evicted_bos != evicted_bos) + goto unlock_notifier_out; + + if (confirm_valid_user_pages_locked(process_info)) { + WARN(1, "User pages unexpectedly invalid"); + goto unlock_notifier_out; + } + + process_info->evicted_bos = evicted_bos = 0; + if (kgd2kfd_resume_mm(mm)) { pr_err("%s: Failed to resume KFD\n", __func__); /* No recovery from this failure. Probably the CP is @@ -2538,6 +2604,8 @@ static void amdgpu_amdkfd_restore_userptr_worker(struct work_struct *work) */ } +unlock_notifier_out: + mutex_unlock(&process_info->notifier_lock); unlock_out: mutex_unlock(&process_info->lock); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c index 65715cb395d8..2dadcfe43d03 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.c @@ -105,17 +105,11 @@ static bool amdgpu_hmm_invalidate_hsa(struct mmu_interval_notifier *mni, unsigned long cur_seq) { struct amdgpu_bo *bo = container_of(mni, struct amdgpu_bo, notifier); - struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); if (!mmu_notifier_range_blockable(range)) return false; - mutex_lock(&adev->notifier_lock); - - mmu_interval_set_seq(mni, cur_seq); - - amdgpu_amdkfd_evict_userptr(bo->kfd_bo, bo->notifier.mm); - mutex_unlock(&adev->notifier_lock); + amdgpu_amdkfd_evict_userptr(mni, cur_seq, bo->kfd_bo); return true; } @@ -244,9 +238,9 @@ out_free_range: return r; } -int amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range) +bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range) { - int r; + bool r; r = mmu_interval_read_retry(hmm_range->notifier, hmm_range->notifier_seq); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h index 13ed94d3b01b..e2edcd010ccc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_hmm.h @@ -29,12 +29,13 @@ #include #include #include +#include int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier, uint64_t start, uint64_t npages, bool readonly, void *owner, struct page **pages, struct hmm_range **phmm_range); -int amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range); +bool amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range); #if defined(CONFIG_HMM_MIRROR) int amdgpu_hmm_register(struct amdgpu_bo *bo, unsigned long addr); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index b4236572eae1..f0e4c7309438 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -695,8 +695,19 @@ out_unlock: return r; } +/* amdgpu_ttm_tt_discard_user_pages - Discard range and pfn array allocations + */ +void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm, + struct hmm_range *range) +{ + struct amdgpu_ttm_tt *gtt = (void *)ttm; + + if (gtt && gtt->userptr && range) + amdgpu_hmm_range_get_pages_done(range); +} + /* - * amdgpu_ttm_tt_userptr_range_done - stop HMM track the CPU page table change + * amdgpu_ttm_tt_get_user_pages_done - stop HMM track the CPU page table change * Check if the pages backing this ttm range have been invalidated * * Returns: true if pages are still valid @@ -714,10 +725,6 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm, WARN_ONCE(!range->hmm_pfns, "No user pages to check\n"); - /* - * FIXME: Must always hold notifier_lock for this, and must - * not ignore the return code. - */ return !amdgpu_hmm_range_get_pages_done(range); } #endif diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index b4d8ba2789f3..e2cd5894afc9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -159,6 +159,8 @@ uint64_t amdgpu_ttm_domain_start(struct amdgpu_device *adev, uint32_t type); #if IS_ENABLED(CONFIG_DRM_AMDGPU_USERPTR) int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages, struct hmm_range **range); +void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm, + struct hmm_range *range); bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm, struct hmm_range *range); #else @@ -168,6 +170,10 @@ static inline int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, { return -EPERM; } +static inline void amdgpu_ttm_tt_discard_user_pages(struct ttm_tt *ttm, + struct hmm_range *range) +{ +} static inline bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm, struct hmm_range *range) { -- cgit From 3273f11675ef11959d25a56df3279f712bcd41b7 Mon Sep 17 00:00:00 2001 From: Luben Tuikov Date: Wed, 14 Dec 2022 03:56:03 -0500 Subject: drm/amdgpu: Remove unnecessary domain argument MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Remove the "domain" argument to amdgpu_bo_create_kernel_at() since this function takes an "offset" argument which is the offset off of VRAM, and as such allocation always takes place in VRAM. Thus, the "domain" argument is unnecessary. Cc: Alex Deucher Cc: Christian König Cc: AMD Graphics Signed-off-by: Luben Tuikov Reviewed-by: Christian König Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 10 +++++----- drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 7 ------- drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c | 1 - 4 files changed, 6 insertions(+), 14 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 26c6e9b2d460..0f6385941480 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -346,17 +346,16 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev, * @adev: amdgpu device object * @offset: offset of the BO * @size: size of the BO - * @domain: where to place it * @bo_ptr: used to initialize BOs in structures * @cpu_addr: optional CPU address mapping * - * Creates a kernel BO at a specific offset in the address space of the domain. + * Creates a kernel BO at a specific offset in VRAM. * * Returns: * 0 on success, negative error code otherwise. */ int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev, - uint64_t offset, uint64_t size, uint32_t domain, + uint64_t offset, uint64_t size, struct amdgpu_bo **bo_ptr, void **cpu_addr) { struct ttm_operation_ctx ctx = { false, false }; @@ -366,8 +365,9 @@ int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev, offset &= PAGE_MASK; size = ALIGN(size, PAGE_SIZE); - r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE, domain, bo_ptr, - NULL, cpu_addr); + r = amdgpu_bo_create_reserved(adev, size, PAGE_SIZE, + AMDGPU_GEM_DOMAIN_VRAM, bo_ptr, NULL, + cpu_addr); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index 147b79c10cbb..93207badf83f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -284,7 +284,7 @@ int amdgpu_bo_create_kernel(struct amdgpu_device *adev, u32 domain, struct amdgpu_bo **bo_ptr, u64 *gpu_addr, void **cpu_addr); int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev, - uint64_t offset, uint64_t size, uint32_t domain, + uint64_t offset, uint64_t size, struct amdgpu_bo **bo_ptr, void **cpu_addr); int amdgpu_bo_create_user(struct amdgpu_device *adev, struct amdgpu_bo_param *bp, diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index f0e4c7309438..55e0284b2bdd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1576,7 +1576,6 @@ static int amdgpu_ttm_fw_reserve_vram_init(struct amdgpu_device *adev) return amdgpu_bo_create_kernel_at(adev, adev->mman.fw_vram_usage_start_offset, adev->mman.fw_vram_usage_size, - AMDGPU_GEM_DOMAIN_VRAM, &adev->mman.fw_vram_usage_reserved_bo, &adev->mman.fw_vram_usage_va); } @@ -1602,7 +1601,6 @@ static int amdgpu_ttm_drv_reserve_vram_init(struct amdgpu_device *adev) return amdgpu_bo_create_kernel_at(adev, adev->mman.drv_vram_usage_start_offset, adev->mman.drv_vram_usage_size, - AMDGPU_GEM_DOMAIN_VRAM, &adev->mman.drv_vram_usage_reserved_bo, &adev->mman.drv_vram_usage_va); } @@ -1683,7 +1681,6 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev) ret = amdgpu_bo_create_kernel_at(adev, ctx->c2p_train_data_offset, ctx->train_data_size, - AMDGPU_GEM_DOMAIN_VRAM, &ctx->c2p_bo, NULL); if (ret) { @@ -1697,7 +1694,6 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev) ret = amdgpu_bo_create_kernel_at(adev, adev->gmc.real_vram_size - adev->mman.discovery_tmr_size, adev->mman.discovery_tmr_size, - AMDGPU_GEM_DOMAIN_VRAM, &adev->mman.discovery_memory, NULL); if (ret) { @@ -1798,21 +1794,18 @@ int amdgpu_ttm_init(struct amdgpu_device *adev) * avoid display artifacts while transitioning between pre-OS * and driver. */ r = amdgpu_bo_create_kernel_at(adev, 0, adev->mman.stolen_vga_size, - AMDGPU_GEM_DOMAIN_VRAM, &adev->mman.stolen_vga_memory, NULL); if (r) return r; r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_vga_size, adev->mman.stolen_extended_size, - AMDGPU_GEM_DOMAIN_VRAM, &adev->mman.stolen_extended_memory, NULL); if (r) return r; r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_reserved_offset, adev->mman.stolen_reserved_size, - AMDGPU_GEM_DOMAIN_VRAM, &adev->mman.stolen_reserved_memory, NULL); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c index 15544f262ec1..2994b9db196f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_virt.c @@ -395,7 +395,6 @@ static void amdgpu_virt_ras_reserve_bps(struct amdgpu_device *adev) */ if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT, AMDGPU_GPU_PAGE_SIZE, - AMDGPU_GEM_DOMAIN_VRAM, &bo, NULL)) DRM_DEBUG("RAS WARN: reserve vram for retired page %llx fail\n", bp); -- cgit