aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_object.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c425
1 files changed, 206 insertions, 219 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 1345f7eba011..2e8f6cd7a729 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -33,6 +33,7 @@
#include <linux/slab.h>
#include <linux/dma-buf.h>
+#include <drm/drm_drv.h>
#include <drm/amdgpu_drm.h>
#include <drm/drm_cache.h>
#include "amdgpu.h"
@@ -52,55 +53,44 @@
*
*/
-/**
- * amdgpu_bo_subtract_pin_size - Remove BO from pin_size accounting
- *
- * @bo: &amdgpu_bo buffer object
- *
- * This function is called when a BO stops being pinned, and updates the
- * &amdgpu_device pin_size values accordingly.
- */
-static void amdgpu_bo_subtract_pin_size(struct amdgpu_bo *bo)
+static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
- if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
- atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
- atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
- &adev->visible_pin_size);
- } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
- atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
- }
+ amdgpu_bo_kunmap(bo);
+
+ if (bo->tbo.base.import_attach)
+ drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
+ drm_gem_object_release(&bo->tbo.base);
+ amdgpu_bo_unref(&bo->parent);
+ kvfree(bo);
}
-static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
+static void amdgpu_bo_user_destroy(struct ttm_buffer_object *tbo)
{
- struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
struct amdgpu_bo_user *ubo;
- if (bo->tbo.pin_count > 0)
- amdgpu_bo_subtract_pin_size(bo);
+ ubo = to_amdgpu_bo_user(bo);
+ kfree(ubo->metadata);
+ amdgpu_bo_destroy(tbo);
+}
- amdgpu_bo_kunmap(bo);
+static void amdgpu_bo_vm_destroy(struct ttm_buffer_object *tbo)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
+ struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
+ struct amdgpu_bo_vm *vmbo;
- if (bo->tbo.base.import_attach)
- drm_prime_gem_destroy(&bo->tbo.base, bo->tbo.sg);
- drm_gem_object_release(&bo->tbo.base);
+ vmbo = to_amdgpu_bo_vm(bo);
/* in case amdgpu_device_recover_vram got NULL of bo->parent */
- if (!list_empty(&bo->shadow_list)) {
+ if (!list_empty(&vmbo->shadow_list)) {
mutex_lock(&adev->shadow_list_lock);
- list_del_init(&bo->shadow_list);
+ list_del_init(&vmbo->shadow_list);
mutex_unlock(&adev->shadow_list_lock);
}
- amdgpu_bo_unref(&bo->parent);
-
- if (bo->tbo.type == ttm_bo_type_device) {
- ubo = to_amdgpu_bo_user(bo);
- kfree(ubo->metadata);
- }
- kfree(bo);
+ amdgpu_bo_destroy(tbo);
}
/**
@@ -115,8 +105,11 @@ static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
*/
bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
{
- if (bo->destroy == &amdgpu_bo_destroy)
+ if (bo->destroy == &amdgpu_bo_destroy ||
+ bo->destroy == &amdgpu_bo_user_destroy ||
+ bo->destroy == &amdgpu_bo_vm_destroy)
return true;
+
return false;
}
@@ -157,7 +150,9 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
if (domain & AMDGPU_GEM_DOMAIN_GTT) {
places[c].fpfn = 0;
places[c].lpfn = 0;
- places[c].mem_type = TTM_PL_TT;
+ places[c].mem_type =
+ abo->flags & AMDGPU_GEM_CREATE_PREEMPTIBLE ?
+ AMDGPU_PL_PREEMPT : TTM_PL_TT;
places[c].flags = 0;
c++;
}
@@ -202,7 +197,7 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
c++;
}
- BUG_ON(c >= AMDGPU_BO_MAX_PLACEMENTS);
+ BUG_ON(c > AMDGPU_BO_MAX_PLACEMENTS);
placement->num_placement = c;
placement->placement = places;
@@ -386,14 +381,14 @@ int amdgpu_bo_create_kernel_at(struct amdgpu_device *adev,
if (cpu_addr)
amdgpu_bo_kunmap(*bo_ptr);
- ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.mem);
+ ttm_resource_free(&(*bo_ptr)->tbo, &(*bo_ptr)->tbo.resource);
for (i = 0; i < (*bo_ptr)->placement.num_placement; ++i) {
(*bo_ptr)->placements[i].fpfn = offset >> PAGE_SHIFT;
(*bo_ptr)->placements[i].lpfn = (offset + size) >> PAGE_SHIFT;
}
r = ttm_bo_mem_space(&(*bo_ptr)->tbo, &(*bo_ptr)->placement,
- &(*bo_ptr)->tbo.mem, &ctx);
+ &(*bo_ptr)->tbo.resource, &ctx);
if (r)
goto error;
@@ -456,7 +451,7 @@ static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
if (domain & AMDGPU_GEM_DOMAIN_GTT) {
man = ttm_manager_type(&adev->mman.bdev, TTM_PL_TT);
- if (size < (man->size << PAGE_SHIFT))
+ if (size < man->size)
return true;
else
goto fail;
@@ -465,7 +460,7 @@ static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
if (domain & AMDGPU_GEM_DOMAIN_VRAM) {
man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
- if (size < (man->size << PAGE_SHIFT))
+ if (size < man->size)
return true;
else
goto fail;
@@ -477,7 +472,7 @@ static bool amdgpu_bo_validate_size(struct amdgpu_device *adev,
fail:
DRM_DEBUG("BO size %lu > total memory in domain: %llu\n", size,
- man->size << PAGE_SHIFT);
+ man->size);
return false;
}
@@ -515,7 +510,18 @@ bool amdgpu_bo_support_uswc(u64 bo_flags)
#endif
}
-static int amdgpu_bo_do_create(struct amdgpu_device *adev,
+/**
+ * amdgpu_bo_create - create an &amdgpu_bo buffer object
+ * @adev: amdgpu device object
+ * @bp: parameters to be used for the buffer object
+ * @bo_ptr: pointer to the buffer object pointer
+ *
+ * Creates an &amdgpu_bo buffer object.
+ *
+ * Returns:
+ * 0 for success or a negative error code on failure.
+ */
+int amdgpu_bo_create(struct amdgpu_device *adev,
struct amdgpu_bo_param *bp,
struct amdgpu_bo **bo_ptr)
{
@@ -552,16 +558,16 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo));
*bo_ptr = NULL;
- bo = kzalloc(bp->bo_ptr_size, GFP_KERNEL);
+ bo = kvzalloc(bp->bo_ptr_size, GFP_KERNEL);
if (bo == NULL)
return -ENOMEM;
drm_gem_private_object_init(adev_to_drm(adev), &bo->tbo.base, size);
- INIT_LIST_HEAD(&bo->shadow_list);
bo->vm_bo = NULL;
bo->preferred_domains = bp->preferred_domain ? bp->preferred_domain :
bp->domain;
bo->allowed_domains = bo->preferred_domains;
if (bp->type != ttm_bo_type_kernel &&
+ !(bp->flags & AMDGPU_GEM_CREATE_DISCARDABLE) &&
bo->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
bo->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
@@ -570,6 +576,9 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
if (!amdgpu_bo_support_uswc(bo->flags))
bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+ if (adev->ras_enabled)
+ bo->flags |= AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE;
+
bo->tbo.bdev = &adev->mman.bdev;
if (bp->domain & (AMDGPU_GEM_DOMAIN_GWS | AMDGPU_GEM_DOMAIN_OA |
AMDGPU_GEM_DOMAIN_GDS))
@@ -579,31 +588,33 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
if (bp->type == ttm_bo_type_kernel)
bo->tbo.priority = 1;
- r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type,
+ if (!bp->destroy)
+ bp->destroy = &amdgpu_bo_destroy;
+
+ r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, bp->type,
&bo->placement, page_align, &ctx, NULL,
- bp->resv, &amdgpu_bo_destroy);
+ bp->resv, bp->destroy);
if (unlikely(r != 0))
return r;
if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
- bo->tbo.mem.mem_type == TTM_PL_VRAM &&
- bo->tbo.mem.start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
+ bo->tbo.resource->mem_type == TTM_PL_VRAM &&
+ bo->tbo.resource->start < adev->gmc.visible_vram_size >> PAGE_SHIFT)
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved,
ctx.bytes_moved);
else
amdgpu_cs_report_moved_bytes(adev, ctx.bytes_moved, 0);
if (bp->flags & AMDGPU_GEM_CREATE_VRAM_CLEARED &&
- bo->tbo.mem.mem_type == TTM_PL_VRAM) {
+ bo->tbo.resource->mem_type == TTM_PL_VRAM) {
struct dma_fence *fence;
r = amdgpu_fill_buffer(bo, 0, bo->tbo.base.resv, &fence);
if (unlikely(r))
goto fail_unreserve;
- amdgpu_bo_fence(bo, fence, false);
- dma_fence_put(bo->tbo.moving);
- bo->tbo.moving = dma_fence_get(fence);
+ dma_resv_add_fence(bo->tbo.base.resv, fence,
+ DMA_RESV_USAGE_KERNEL);
dma_fence_put(fence);
}
if (!bp->resv)
@@ -625,140 +636,85 @@ fail_unreserve:
return r;
}
-static int amdgpu_bo_create_shadow(struct amdgpu_device *adev,
- unsigned long size,
- struct amdgpu_bo *bo)
-{
- struct amdgpu_bo_param bp;
- int r;
-
- if (bo->shadow)
- return 0;
-
- memset(&bp, 0, sizeof(bp));
- bp.size = size;
- bp.domain = AMDGPU_GEM_DOMAIN_GTT;
- bp.flags = AMDGPU_GEM_CREATE_CPU_GTT_USWC |
- AMDGPU_GEM_CREATE_SHADOW;
- bp.type = ttm_bo_type_kernel;
- bp.resv = bo->tbo.base.resv;
- bp.bo_ptr_size = sizeof(struct amdgpu_bo);
-
- r = amdgpu_bo_do_create(adev, &bp, &bo->shadow);
- if (!r) {
- bo->shadow->parent = amdgpu_bo_ref(bo);
- mutex_lock(&adev->shadow_list_lock);
- list_add_tail(&bo->shadow->shadow_list, &adev->shadow_list);
- mutex_unlock(&adev->shadow_list_lock);
- }
-
- return r;
-}
-
/**
- * amdgpu_bo_create - create an &amdgpu_bo buffer object
+ * amdgpu_bo_create_user - create an &amdgpu_bo_user buffer object
* @adev: amdgpu device object
* @bp: parameters to be used for the buffer object
- * @bo_ptr: pointer to the buffer object pointer
+ * @ubo_ptr: pointer to the buffer object pointer
*
- * Creates an &amdgpu_bo buffer object; and if requested, also creates a
- * shadow object.
- * Shadow object is used to backup the original buffer object, and is always
- * in GTT.
+ * Create a BO to be used by user application;
*
* Returns:
* 0 for success or a negative error code on failure.
*/
-int amdgpu_bo_create(struct amdgpu_device *adev,
- struct amdgpu_bo_param *bp,
- struct amdgpu_bo **bo_ptr)
+
+int amdgpu_bo_create_user(struct amdgpu_device *adev,
+ struct amdgpu_bo_param *bp,
+ struct amdgpu_bo_user **ubo_ptr)
{
- u64 flags = bp->flags;
+ struct amdgpu_bo *bo_ptr;
int r;
- bp->flags = bp->flags & ~AMDGPU_GEM_CREATE_SHADOW;
-
- r = amdgpu_bo_do_create(adev, bp, bo_ptr);
+ bp->bo_ptr_size = sizeof(struct amdgpu_bo_user);
+ bp->destroy = &amdgpu_bo_user_destroy;
+ r = amdgpu_bo_create(adev, bp, &bo_ptr);
if (r)
return r;
- if ((flags & AMDGPU_GEM_CREATE_SHADOW) && !(adev->flags & AMD_IS_APU)) {
- if (!bp->resv)
- WARN_ON(dma_resv_lock((*bo_ptr)->tbo.base.resv,
- NULL));
-
- r = amdgpu_bo_create_shadow(adev, bp->size, *bo_ptr);
-
- if (!bp->resv)
- dma_resv_unlock((*bo_ptr)->tbo.base.resv);
-
- if (r)
- amdgpu_bo_unref(bo_ptr);
- }
-
+ *ubo_ptr = to_amdgpu_bo_user(bo_ptr);
return r;
}
/**
- * amdgpu_bo_create_user - create an &amdgpu_bo_user buffer object
+ * amdgpu_bo_create_vm - create an &amdgpu_bo_vm buffer object
* @adev: amdgpu device object
* @bp: parameters to be used for the buffer object
- * @ubo_ptr: pointer to the buffer object pointer
+ * @vmbo_ptr: pointer to the buffer object pointer
*
- * Create a BO to be used by user application;
+ * Create a BO to be for GPUVM.
*
* Returns:
* 0 for success or a negative error code on failure.
*/
-int amdgpu_bo_create_user(struct amdgpu_device *adev,
- struct amdgpu_bo_param *bp,
- struct amdgpu_bo_user **ubo_ptr)
+int amdgpu_bo_create_vm(struct amdgpu_device *adev,
+ struct amdgpu_bo_param *bp,
+ struct amdgpu_bo_vm **vmbo_ptr)
{
struct amdgpu_bo *bo_ptr;
int r;
- bp->flags = bp->flags & ~AMDGPU_GEM_CREATE_SHADOW;
- bp->bo_ptr_size = sizeof(struct amdgpu_bo_user);
- r = amdgpu_bo_do_create(adev, bp, &bo_ptr);
+ /* bo_ptr_size will be determined by the caller and it depends on
+ * num of amdgpu_vm_pt entries.
+ */
+ BUG_ON(bp->bo_ptr_size < sizeof(struct amdgpu_bo_vm));
+ r = amdgpu_bo_create(adev, bp, &bo_ptr);
if (r)
return r;
- *ubo_ptr = to_amdgpu_bo_user(bo_ptr);
+ *vmbo_ptr = to_amdgpu_bo_vm(bo_ptr);
+ INIT_LIST_HEAD(&(*vmbo_ptr)->shadow_list);
+ /* Set destroy callback to amdgpu_bo_vm_destroy after vmbo->shadow_list
+ * is initialized.
+ */
+ bo_ptr->tbo.destroy = &amdgpu_bo_vm_destroy;
return r;
}
+
/**
- * amdgpu_bo_validate - validate an &amdgpu_bo buffer object
- * @bo: pointer to the buffer object
+ * amdgpu_bo_add_to_shadow_list - add a BO to the shadow list
*
- * Sets placement according to domain; and changes placement and caching
- * policy of the buffer object according to the placement.
- * This is used for validating shadow bos. It calls ttm_bo_validate() to
- * make sure the buffer is resident where it needs to be.
+ * @vmbo: BO that will be inserted into the shadow list
*
- * Returns:
- * 0 for success or a negative error code on failure.
+ * Insert a BO to the shadow list.
*/
-int amdgpu_bo_validate(struct amdgpu_bo *bo)
+void amdgpu_bo_add_to_shadow_list(struct amdgpu_bo_vm *vmbo)
{
- struct ttm_operation_ctx ctx = { false, false };
- uint32_t domain;
- int r;
+ struct amdgpu_device *adev = amdgpu_ttm_adev(vmbo->bo.tbo.bdev);
- if (bo->tbo.pin_count)
- return 0;
-
- domain = bo->preferred_domains;
-
-retry:
- amdgpu_bo_placement_from_domain(bo, domain);
- r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
- if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
- domain = bo->allowed_domains;
- goto retry;
- }
-
- return r;
+ mutex_lock(&adev->shadow_list_lock);
+ list_add_tail(&vmbo->shadow_list, &adev->shadow_list);
+ mutex_unlock(&adev->shadow_list_lock);
}
/**
@@ -808,6 +764,11 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
return -EPERM;
+ r = dma_resv_wait_timeout(bo->tbo.base.resv, DMA_RESV_USAGE_KERNEL,
+ false, MAX_SCHEDULE_TIMEOUT);
+ if (r < 0)
+ return r;
+
kptr = amdgpu_bo_kptr(bo);
if (kptr) {
if (ptr)
@@ -815,12 +776,7 @@ int amdgpu_bo_kmap(struct amdgpu_bo *bo, void **ptr)
return 0;
}
- r = dma_resv_wait_timeout_rcu(bo->tbo.base.resv, false, false,
- MAX_SCHEDULE_TIMEOUT);
- if (r < 0)
- return r;
-
- r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.mem.num_pages, &bo->kmap);
+ r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.resource->num_pages, &bo->kmap);
if (r)
return r;
@@ -929,22 +885,21 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
if (WARN_ON_ONCE(min_offset > max_offset))
return -EINVAL;
+ /* Check domain to be pinned to against preferred domains */
+ if (bo->preferred_domains & domain)
+ domain = bo->preferred_domains & domain;
+
/* A shared bo cannot be migrated to VRAM */
- if (bo->prime_shared_count || bo->tbo.base.import_attach) {
+ if (bo->tbo.base.import_attach) {
if (domain & AMDGPU_GEM_DOMAIN_GTT)
domain = AMDGPU_GEM_DOMAIN_GTT;
else
return -EINVAL;
}
- /* This assumes only APU display buffers are pinned with (VRAM|GTT).
- * See function amdgpu_display_supported_domains()
- */
- domain = amdgpu_bo_get_preferred_pin_domain(adev, domain);
-
if (bo->tbo.pin_count) {
- uint32_t mem_type = bo->tbo.mem.mem_type;
- uint32_t mem_flags = bo->tbo.mem.placement;
+ uint32_t mem_type = bo->tbo.resource->mem_type;
+ uint32_t mem_flags = bo->tbo.resource->placement;
if (!(domain & amdgpu_mem_type_to_domain(mem_type)))
return -EINVAL;
@@ -966,6 +921,11 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
return 0;
}
+ /* This assumes only APU display buffers are pinned with (VRAM|GTT).
+ * See function amdgpu_display_supported_domains()
+ */
+ domain = amdgpu_bo_get_preferred_domain(adev, domain);
+
if (bo->tbo.base.import_attach)
dma_buf_pin(bo->tbo.base.import_attach);
@@ -994,7 +954,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
ttm_bo_pin(&bo->tbo);
- domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+ domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
atomic64_add(amdgpu_bo_size(bo), &adev->vram_pin_size);
atomic64_add(amdgpu_vram_mgr_bo_visible_size(bo),
@@ -1037,37 +997,22 @@ int amdgpu_bo_pin(struct amdgpu_bo *bo, u32 domain)
*/
void amdgpu_bo_unpin(struct amdgpu_bo *bo)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+
ttm_bo_unpin(&bo->tbo);
if (bo->tbo.pin_count)
return;
- amdgpu_bo_subtract_pin_size(bo);
-
if (bo->tbo.base.import_attach)
dma_buf_unpin(bo->tbo.base.import_attach);
-}
-
-/**
- * amdgpu_bo_evict_vram - evict VRAM buffers
- * @adev: amdgpu device object
- *
- * Evicts all VRAM buffers on the lru list of the memory type.
- * Mainly used for evicting vram at suspend time.
- *
- * Returns:
- * 0 for success or a negative error code on failure.
- */
-int amdgpu_bo_evict_vram(struct amdgpu_device *adev)
-{
- struct ttm_resource_manager *man;
- if (adev->in_s3 && (adev->flags & AMD_IS_APU)) {
- /* No need to evict vram on APUs for suspend to ram */
- return 0;
+ if (bo->tbo.resource->mem_type == TTM_PL_VRAM) {
+ atomic64_sub(amdgpu_bo_size(bo), &adev->vram_pin_size);
+ atomic64_sub(amdgpu_vram_mgr_bo_visible_size(bo),
+ &adev->visible_pin_size);
+ } else if (bo->tbo.resource->mem_type == TTM_PL_TT) {
+ atomic64_sub(amdgpu_bo_size(bo), &adev->gart_pin_size);
}
-
- man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
- return ttm_resource_manager_evict_all(&adev->mman.bdev, man);
}
static const char *amdgpu_vram_names[] = {
@@ -1081,7 +1026,9 @@ static const char *amdgpu_vram_names[] = {
"DDR3",
"DDR4",
"GDDR6",
- "DDR5"
+ "DDR5",
+ "LPDDR4",
+ "LPDDR5"
};
/**
@@ -1098,9 +1045,14 @@ int amdgpu_bo_init(struct amdgpu_device *adev)
/* On A+A platform, VRAM can be mapped as WB */
if (!adev->gmc.xgmi.connected_to_cpu) {
/* reserve PAT memory space to WC for VRAM */
- arch_io_reserve_memtype_wc(adev->gmc.aper_base,
+ int r = arch_io_reserve_memtype_wc(adev->gmc.aper_base,
adev->gmc.aper_size);
+ if (r) {
+ DRM_ERROR("Unable to set WC memtype for the aperture base\n");
+ return r;
+ }
+
/* Add an MTRR for the VRAM */
adev->gmc.vram_mtrr = arch_phys_wc_add(adev->gmc.aper_base,
adev->gmc.aper_size);
@@ -1122,10 +1074,17 @@ int amdgpu_bo_init(struct amdgpu_device *adev)
*/
void amdgpu_bo_fini(struct amdgpu_device *adev)
{
+ int idx;
+
amdgpu_ttm_fini(adev);
- if (!adev->gmc.xgmi.connected_to_cpu) {
- arch_phys_wc_del(adev->gmc.vram_mtrr);
- arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
+
+ if (drm_dev_enter(adev_to_drm(adev), &idx)) {
+
+ if (!adev->gmc.xgmi.connected_to_cpu) {
+ arch_phys_wc_del(adev->gmc.vram_mtrr);
+ arch_io_free_memtype_wc(adev->gmc.aper_base, adev->gmc.aper_size);
+ }
+ drm_dev_exit(idx);
}
}
@@ -1246,6 +1205,9 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
BUG_ON(bo->tbo.type == ttm_bo_type_kernel);
ubo = to_amdgpu_bo_user(bo);
+ if (metadata_size)
+ *metadata_size = ubo->metadata_size;
+
if (buffer) {
if (buffer_size < ubo->metadata_size)
return -EINVAL;
@@ -1254,8 +1216,6 @@ int amdgpu_bo_get_metadata(struct amdgpu_bo *bo, void *buffer,
memcpy(buffer, ubo->metadata, ubo->metadata_size);
}
- if (metadata_size)
- *metadata_size = ubo->metadata_size;
if (flags)
*flags = ubo->metadata_flags;
@@ -1278,7 +1238,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct amdgpu_bo *abo;
- struct ttm_resource *old_mem = &bo->mem;
+ struct ttm_resource *old_mem = bo->resource;
if (!amdgpu_bo_is_amdgpu_bo(bo))
return;
@@ -1289,7 +1249,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
amdgpu_bo_kunmap(abo);
if (abo->tbo.base.dma_buf && !abo->tbo.base.import_attach &&
- bo->mem.mem_type != TTM_PL_SYSTEM)
+ bo->resource->mem_type != TTM_PL_SYSTEM)
dma_buf_move_notify(abo->tbo.base.dma_buf);
/* remember the eviction */
@@ -1304,6 +1264,26 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
}
+void amdgpu_bo_get_memory(struct amdgpu_bo *bo, uint64_t *vram_mem,
+ uint64_t *gtt_mem, uint64_t *cpu_mem)
+{
+ unsigned int domain;
+
+ domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
+ switch (domain) {
+ case AMDGPU_GEM_DOMAIN_VRAM:
+ *vram_mem += amdgpu_bo_size(bo);
+ break;
+ case AMDGPU_GEM_DOMAIN_GTT:
+ *gtt_mem += amdgpu_bo_size(bo);
+ break;
+ case AMDGPU_GEM_DOMAIN_CPU:
+ default:
+ *cpu_mem += amdgpu_bo_size(bo);
+ break;
+ }
+}
+
/**
* amdgpu_bo_release_notify - notification about a BO being released
* @bo: pointer to a buffer object
@@ -1313,6 +1293,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
*/
void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct dma_fence *fence = NULL;
struct amdgpu_bo *abo;
int r;
@@ -1323,7 +1304,7 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
abo = ttm_to_amdgpu_bo(bo);
if (abo->kfd_bo)
- amdgpu_amdkfd_unreserve_memory_limit(abo);
+ amdgpu_amdkfd_release_notify(abo);
/* We only remove the fence if the resv has individualized. */
WARN_ON_ONCE(bo->type == ttm_bo_type_kernel
@@ -1331,11 +1312,13 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo)
if (bo->base.resv == &bo->base._resv)
amdgpu_amdkfd_remove_fence_on_pt_pd_bos(abo);
- if (bo->mem.mem_type != TTM_PL_VRAM || !bo->mem.mm_node ||
- !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE))
+ if (!bo->resource || bo->resource->mem_type != TTM_PL_VRAM ||
+ !(abo->flags & AMDGPU_GEM_CREATE_VRAM_WIPE_ON_RELEASE) ||
+ adev->in_suspend || adev->shutdown)
return;
- dma_resv_lock(bo->base.resv, NULL);
+ if (WARN_ON_ONCE(!dma_resv_trylock(bo->base.resv)))
+ return;
r = amdgpu_fill_buffer(abo, AMDGPU_POISON, bo->base.resv, &fence);
if (!WARN_ON(r)) {
@@ -1362,18 +1345,17 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
- unsigned long offset, size;
+ unsigned long offset;
int r;
/* Remember that this BO was accessed by the CPU */
abo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
- if (bo->mem.mem_type != TTM_PL_VRAM)
+ if (bo->resource->mem_type != TTM_PL_VRAM)
return 0;
- size = bo->mem.num_pages << PAGE_SHIFT;
- offset = bo->mem.start << PAGE_SHIFT;
- if ((offset + size) <= adev->gmc.visible_vram_size)
+ offset = bo->resource->start << PAGE_SHIFT;
+ if ((offset + bo->base.size) <= adev->gmc.visible_vram_size)
return 0;
/* Can't move a pinned BO to visible VRAM */
@@ -1395,10 +1377,10 @@ vm_fault_t amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
else if (unlikely(r))
return VM_FAULT_SIGBUS;
- offset = bo->mem.start << PAGE_SHIFT;
+ offset = bo->resource->start << PAGE_SHIFT;
/* this should never happen */
- if (bo->mem.mem_type == TTM_PL_VRAM &&
- (offset + size) > adev->gmc.visible_vram_size)
+ if (bo->resource->mem_type == TTM_PL_VRAM &&
+ (offset + bo->base.size) > adev->gmc.visible_vram_size)
return VM_FAULT_SIGBUS;
ttm_bo_move_to_lru_tail_unlocked(bo);
@@ -1417,11 +1399,17 @@ void amdgpu_bo_fence(struct amdgpu_bo *bo, struct dma_fence *fence,
bool shared)
{
struct dma_resv *resv = bo->tbo.base.resv;
+ int r;
- if (shared)
- dma_resv_add_shared_fence(resv, fence);
- else
- dma_resv_add_excl_fence(resv, fence);
+ r = dma_resv_reserve_fences(resv, 1);
+ if (r) {
+ /* As last resort on OOM we block for the fence */
+ dma_fence_wait(fence, false);
+ return;
+ }
+
+ dma_resv_add_fence(resv, fence, shared ? DMA_RESV_USAGE_READ :
+ DMA_RESV_USAGE_WRITE);
}
/**
@@ -1482,11 +1470,11 @@ int amdgpu_bo_sync_wait(struct amdgpu_bo *bo, void *owner, bool intr)
*/
u64 amdgpu_bo_gpu_offset(struct amdgpu_bo *bo)
{
- WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_SYSTEM);
+ WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_SYSTEM);
WARN_ON_ONCE(!dma_resv_is_locked(bo->tbo.base.resv) &&
!bo->tbo.pin_count && bo->tbo.type != ttm_bo_type_kernel);
- WARN_ON_ONCE(bo->tbo.mem.start == AMDGPU_BO_INVALID_OFFSET);
- WARN_ON_ONCE(bo->tbo.mem.mem_type == TTM_PL_VRAM &&
+ WARN_ON_ONCE(bo->tbo.resource->start == AMDGPU_BO_INVALID_OFFSET);
+ WARN_ON_ONCE(bo->tbo.resource->mem_type == TTM_PL_VRAM &&
!(bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS));
return amdgpu_bo_gpu_offset_no_check(bo);
@@ -1504,21 +1492,21 @@ u64 amdgpu_bo_gpu_offset_no_check(struct amdgpu_bo *bo)
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
uint64_t offset;
- offset = (bo->tbo.mem.start << PAGE_SHIFT) +
- amdgpu_ttm_domain_start(adev, bo->tbo.mem.mem_type);
+ offset = (bo->tbo.resource->start << PAGE_SHIFT) +
+ amdgpu_ttm_domain_start(adev, bo->tbo.resource->mem_type);
return amdgpu_gmc_sign_extend(offset);
}
/**
- * amdgpu_bo_get_preferred_pin_domain - get preferred domain for scanout
+ * amdgpu_bo_get_preferred_domain - get preferred domain
* @adev: amdgpu device object
* @domain: allowed :ref:`memory domains <amdgpu_memory_domains>`
*
* Returns:
- * Which of the allowed domains is preferred for pinning the BO for scanout.
+ * Which of the allowed domains is preferred for allocating the BO.
*/
-uint32_t amdgpu_bo_get_preferred_pin_domain(struct amdgpu_device *adev,
+uint32_t amdgpu_bo_get_preferred_domain(struct amdgpu_device *adev,
uint32_t domain)
{
if (domain == (AMDGPU_GEM_DOMAIN_VRAM | AMDGPU_GEM_DOMAIN_GTT)) {
@@ -1558,7 +1546,7 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
unsigned int pin_count;
u64 size;
- domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
+ domain = amdgpu_mem_type_to_domain(bo->tbo.resource->mem_type);
switch (domain) {
case AMDGPU_GEM_DOMAIN_VRAM:
placement = "VRAM";
@@ -1592,7 +1580,6 @@ u64 amdgpu_bo_print_info(int id, struct amdgpu_bo *bo, struct seq_file *m)
amdgpu_bo_print_flag(m, bo, NO_CPU_ACCESS);
amdgpu_bo_print_flag(m, bo, CPU_GTT_USWC);
amdgpu_bo_print_flag(m, bo, VRAM_CLEARED);
- amdgpu_bo_print_flag(m, bo, SHADOW);
amdgpu_bo_print_flag(m, bo, VRAM_CONTIGUOUS);
amdgpu_bo_print_flag(m, bo, VM_ALWAYS_VALID);
amdgpu_bo_print_flag(m, bo, EXPLICIT_SYNC);