diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 189 |
1 files changed, 82 insertions, 107 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 4ed9958af94e..b443907afcea 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -33,12 +33,16 @@ #include <drm/amdgpu_drm.h> #include <drm/drm_debugfs.h> +#include <drm/drm_gem_ttm_helper.h> #include "amdgpu.h" #include "amdgpu_display.h" +#include "amdgpu_dma_buf.h" #include "amdgpu_xgmi.h" -void amdgpu_gem_object_free(struct drm_gem_object *gobj) +static const struct drm_gem_object_funcs amdgpu_gem_object_funcs; + +static void amdgpu_gem_object_free(struct drm_gem_object *gobj) { struct amdgpu_bo *robj = gem_to_amdgpu_bo(gobj); @@ -66,34 +70,21 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, bp.type = type; bp.resv = resv; bp.preferred_domain = initial_domain; -retry: bp.flags = flags; bp.domain = initial_domain; r = amdgpu_bo_create(adev, &bp, &bo); - if (r) { - if (r != -ERESTARTSYS) { - if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) { - flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; - goto retry; - } - - if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) { - initial_domain |= AMDGPU_GEM_DOMAIN_GTT; - goto retry; - } - DRM_DEBUG("Failed to allocate GEM object (%ld, %d, %u, %d)\n", - size, initial_domain, alignment, r); - } + if (r) return r; - } + *obj = &bo->tbo.base; + (*obj)->funcs = &amdgpu_gem_object_funcs; return 0; } void amdgpu_gem_force_release(struct amdgpu_device *adev) { - struct drm_device *ddev = adev->ddev; + struct drm_device *ddev = adev_to_drm(adev); struct drm_file *file; mutex_lock(&ddev->filelist_mutex); @@ -106,7 +97,7 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev) spin_lock(&file->table_lock); idr_for_each_entry(&file->object_idr, gobj, handle) { WARN_ONCE(1, "And also active allocations!\n"); - drm_gem_object_put_unlocked(gobj); + drm_gem_object_put(gobj); } idr_destroy(&file->object_idr); spin_unlock(&file->table_lock); @@ -119,8 +110,8 @@ void amdgpu_gem_force_release(struct amdgpu_device *adev) * Call from drm_gem_handle_create which appear in both new and open ioctl * case. */ -int amdgpu_gem_object_open(struct drm_gem_object *obj, - struct drm_file *file_priv) +static int amdgpu_gem_object_open(struct drm_gem_object *obj, + struct drm_file *file_priv) { struct amdgpu_bo *abo = gem_to_amdgpu_bo(obj); struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev); @@ -152,8 +143,8 @@ int amdgpu_gem_object_open(struct drm_gem_object *obj, return 0; } -void amdgpu_gem_object_close(struct drm_gem_object *obj, - struct drm_file *file_priv) +static void amdgpu_gem_object_close(struct drm_gem_object *obj, + struct drm_file *file_priv) { struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); @@ -211,13 +202,22 @@ out_unlock: ttm_eu_backoff_reservation(&ticket, &list); } +static const struct drm_gem_object_funcs amdgpu_gem_object_funcs = { + .free = amdgpu_gem_object_free, + .open = amdgpu_gem_object_open, + .close = amdgpu_gem_object_close, + .export = amdgpu_gem_prime_export, + .vmap = drm_gem_ttm_vmap, + .vunmap = drm_gem_ttm_vunmap, +}; + /* * GEM ioctls. */ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_fpriv *fpriv = filp->driver_priv; struct amdgpu_vm *vm = &fpriv->vm; union drm_amdgpu_gem_create *args = data; @@ -225,7 +225,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, uint64_t size = args->in.bo_size; struct dma_resv *resv = NULL; struct drm_gem_object *gobj; - uint32_t handle; + uint32_t handle, initial_domain; int r; /* reject invalid gem flags */ @@ -269,9 +269,28 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, resv = vm->root.base.bo->tbo.base.resv; } + initial_domain = (u32)(0xffffffff & args->in.domains); +retry: r = amdgpu_gem_object_create(adev, size, args->in.alignment, - (u32)(0xffffffff & args->in.domains), + initial_domain, flags, ttm_bo_type_device, resv, &gobj); + if (r) { + if (r != -ERESTARTSYS) { + if (flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) { + flags &= ~AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED; + goto retry; + } + + if (initial_domain == AMDGPU_GEM_DOMAIN_VRAM) { + initial_domain |= AMDGPU_GEM_DOMAIN_GTT; + goto retry; + } + DRM_DEBUG("Failed to allocate GEM object (%llu, %d, %llu, %d)\n", + size, initial_domain, args->in.alignment, r); + } + return r; + } + if (flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) { if (!r) { struct amdgpu_bo *abo = gem_to_amdgpu_bo(gobj); @@ -285,7 +304,7 @@ int amdgpu_gem_create_ioctl(struct drm_device *dev, void *data, r = drm_gem_handle_create(filp, gobj, &handle); /* drop reference from allocate - handle holds it now */ - drm_gem_object_put_unlocked(gobj); + drm_gem_object_put(gobj); if (r) return r; @@ -298,7 +317,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { struct ttm_operation_ctx ctx = { true, false }; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct drm_amdgpu_gem_userptr *args = data; struct drm_gem_object *gobj; struct amdgpu_bo *bo; @@ -332,7 +351,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data, bo = gem_to_amdgpu_bo(gobj); bo->preferred_domains = AMDGPU_GEM_DOMAIN_GTT; bo->allowed_domains = AMDGPU_GEM_DOMAIN_GTT; - r = amdgpu_ttm_tt_set_userptr(bo->tbo.ttm, args->addr, args->flags); + r = amdgpu_ttm_tt_set_userptr(&bo->tbo, args->addr, args->flags); if (r) goto release_object; @@ -369,7 +388,7 @@ user_pages_done: amdgpu_ttm_tt_get_user_pages_done(bo->tbo.ttm); release_object: - drm_gem_object_put_unlocked(gobj); + drm_gem_object_put(gobj); return r; } @@ -388,11 +407,11 @@ int amdgpu_mode_dumb_mmap(struct drm_file *filp, robj = gem_to_amdgpu_bo(gobj); if (amdgpu_ttm_tt_get_usermm(robj->tbo.ttm) || (robj->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) { - drm_gem_object_put_unlocked(gobj); + drm_gem_object_put(gobj); return -EPERM; } *offset_p = amdgpu_bo_mmap_offset(robj); - drm_gem_object_put_unlocked(gobj); + drm_gem_object_put(gobj); return 0; } @@ -462,7 +481,7 @@ int amdgpu_gem_wait_idle_ioctl(struct drm_device *dev, void *data, } else r = ret; - drm_gem_object_put_unlocked(gobj); + drm_gem_object_put(gobj); return r; } @@ -505,7 +524,7 @@ int amdgpu_gem_metadata_ioctl(struct drm_device *dev, void *data, unreserve: amdgpu_bo_unreserve(robj); out: - drm_gem_object_put_unlocked(gobj); + drm_gem_object_put(gobj); return r; } @@ -587,7 +606,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, struct drm_amdgpu_gem_va *args = data; struct drm_gem_object *gobj; - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct amdgpu_fpriv *fpriv = filp->driver_priv; struct amdgpu_bo *abo; struct amdgpu_bo_va *bo_va; @@ -596,10 +615,11 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, struct ww_acquire_ctx ticket; struct list_head list, duplicates; uint64_t va_flags; + uint64_t vm_size; int r = 0; if (args->va_address < AMDGPU_VA_RESERVED_SIZE) { - dev_dbg(&dev->pdev->dev, + dev_dbg(dev->dev, "va_address 0x%LX is in reserved area 0x%LX\n", args->va_address, AMDGPU_VA_RESERVED_SIZE); return -EINVAL; @@ -607,7 +627,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, if (args->va_address >= AMDGPU_GMC_HOLE_START && args->va_address < AMDGPU_GMC_HOLE_END) { - dev_dbg(&dev->pdev->dev, + dev_dbg(dev->dev, "va_address 0x%LX is in VA hole 0x%LX-0x%LX\n", args->va_address, AMDGPU_GMC_HOLE_START, AMDGPU_GMC_HOLE_END); @@ -616,8 +636,17 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, args->va_address &= AMDGPU_GMC_HOLE_MASK; + vm_size = adev->vm_manager.max_pfn * AMDGPU_GPU_PAGE_SIZE; + vm_size -= AMDGPU_VA_RESERVED_SIZE; + if (args->va_address + args->map_size > vm_size) { + dev_dbg(dev->dev, + "va_address 0x%llx is in top reserved area 0x%llx\n", + args->va_address + args->map_size, vm_size); + return -EINVAL; + } + if ((args->flags & ~valid_flags) && (args->flags & ~prt_flags)) { - dev_dbg(&dev->pdev->dev, "invalid flags combination 0x%08X\n", + dev_dbg(dev->dev, "invalid flags combination 0x%08X\n", args->flags); return -EINVAL; } @@ -629,7 +658,7 @@ int amdgpu_gem_va_ioctl(struct drm_device *dev, void *data, case AMDGPU_VA_OP_REPLACE: break; default: - dev_dbg(&dev->pdev->dev, "unsupported operation %d\n", + dev_dbg(dev->dev, "unsupported operation %d\n", args->operation); return -EINVAL; } @@ -704,14 +733,14 @@ error_backoff: ttm_eu_backoff_reservation(&ticket, &list); error_unref: - drm_gem_object_put_unlocked(gobj); + drm_gem_object_put(gobj); return r; } int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) { - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct drm_amdgpu_gem_op *args = data; struct drm_gem_object *gobj; struct amdgpu_vm_bo_base *base; @@ -780,7 +809,7 @@ int amdgpu_gem_op_ioctl(struct drm_device *dev, void *data, } out: - drm_gem_object_put_unlocked(gobj); + drm_gem_object_put(gobj); return r; } @@ -788,7 +817,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args) { - struct amdgpu_device *adev = dev->dev_private; + struct amdgpu_device *adev = drm_to_adev(dev); struct drm_gem_object *gobj; uint32_t handle; u64 flags = AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED | @@ -817,7 +846,7 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, r = drm_gem_handle_create(file_priv, gobj, &handle); /* drop reference from allocate - handle holds it now */ - drm_gem_object_put_unlocked(gobj); + drm_gem_object_put(gobj); if (r) { return r; } @@ -826,67 +855,6 @@ int amdgpu_mode_dumb_create(struct drm_file *file_priv, } #if defined(CONFIG_DEBUG_FS) - -#define amdgpu_debugfs_gem_bo_print_flag(m, bo, flag) \ - if (bo->flags & (AMDGPU_GEM_CREATE_ ## flag)) { \ - seq_printf((m), " " #flag); \ - } - -static int amdgpu_debugfs_gem_bo_info(int id, void *ptr, void *data) -{ - struct drm_gem_object *gobj = ptr; - struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj); - struct seq_file *m = data; - - struct dma_buf_attachment *attachment; - struct dma_buf *dma_buf; - unsigned domain; - const char *placement; - unsigned pin_count; - - domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type); - switch (domain) { - case AMDGPU_GEM_DOMAIN_VRAM: - placement = "VRAM"; - break; - case AMDGPU_GEM_DOMAIN_GTT: - placement = " GTT"; - break; - case AMDGPU_GEM_DOMAIN_CPU: - default: - placement = " CPU"; - break; - } - seq_printf(m, "\t0x%08x: %12ld byte %s", - id, amdgpu_bo_size(bo), placement); - - pin_count = READ_ONCE(bo->pin_count); - if (pin_count) - seq_printf(m, " pin count %d", pin_count); - - dma_buf = READ_ONCE(bo->tbo.base.dma_buf); - attachment = READ_ONCE(bo->tbo.base.import_attach); - - if (attachment) - seq_printf(m, " imported from %p%s", dma_buf, - attachment->peer2peer ? " P2P" : ""); - else if (dma_buf) - seq_printf(m, " exported as %p", dma_buf); - - amdgpu_debugfs_gem_bo_print_flag(m, bo, CPU_ACCESS_REQUIRED); - amdgpu_debugfs_gem_bo_print_flag(m, bo, NO_CPU_ACCESS); - amdgpu_debugfs_gem_bo_print_flag(m, bo, CPU_GTT_USWC); - amdgpu_debugfs_gem_bo_print_flag(m, bo, VRAM_CLEARED); - amdgpu_debugfs_gem_bo_print_flag(m, bo, SHADOW); - amdgpu_debugfs_gem_bo_print_flag(m, bo, VRAM_CONTIGUOUS); - amdgpu_debugfs_gem_bo_print_flag(m, bo, VM_ALWAYS_VALID); - amdgpu_debugfs_gem_bo_print_flag(m, bo, EXPLICIT_SYNC); - - seq_printf(m, "\n"); - - return 0; -} - static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data) { struct drm_info_node *node = (struct drm_info_node *)m->private; @@ -900,6 +868,8 @@ static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data) list_for_each_entry(file, &dev->filelist, lhead) { struct task_struct *task; + struct drm_gem_object *gobj; + int id; /* * Although we have a valid reference on file->pid, that does @@ -914,7 +884,11 @@ static int amdgpu_debugfs_gem_info(struct seq_file *m, void *data) rcu_read_unlock(); spin_lock(&file->table_lock); - idr_for_each(&file->object_idr, amdgpu_debugfs_gem_bo_info, m); + idr_for_each_entry(&file->object_idr, gobj, id) { + struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj); + + amdgpu_bo_print_info(id, bo, m); + } spin_unlock(&file->table_lock); } @@ -930,7 +904,8 @@ static const struct drm_info_list amdgpu_debugfs_gem_list[] = { int amdgpu_debugfs_gem_init(struct amdgpu_device *adev) { #if defined(CONFIG_DEBUG_FS) - return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, 1); + return amdgpu_debugfs_add_files(adev, amdgpu_debugfs_gem_list, + ARRAY_SIZE(amdgpu_debugfs_gem_list)); #endif return 0; } |