aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c872
1 files changed, 338 insertions, 534 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 5efa331e3ee8..cdfc20b0b2eb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -32,7 +32,6 @@
#include <linux/dma-mapping.h>
#include <linux/iommu.h>
-#include <linux/hmm.h>
#include <linux/pagemap.h>
#include <linux/sched/task.h>
#include <linux/sched/mm.h>
@@ -46,8 +45,8 @@
#include <drm/ttm/ttm_bo_api.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_placement.h>
+#include <drm/ttm/ttm_range_manager.h>
-#include <drm/drm_debugfs.h>
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
@@ -57,14 +56,15 @@
#include "amdgpu_sdma.h"
#include "amdgpu_ras.h"
#include "amdgpu_atomfirmware.h"
+#include "amdgpu_res_cursor.h"
#include "bif/bif_4_1_d.h"
#define AMDGPU_TTM_VRAM_MAX_DW_READ (size_t)128
-static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
+static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
struct ttm_tt *ttm,
struct ttm_resource *bo_mem);
-static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
+static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
struct ttm_tt *ttm);
static int amdgpu_ttm_init_on_chip(struct amdgpu_device *adev,
@@ -112,7 +112,22 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
}
abo = ttm_to_amdgpu_bo(bo);
- switch (bo->mem.mem_type) {
+ if (abo->flags & AMDGPU_AMDKFD_CREATE_SVM_BO) {
+ struct dma_fence *fence;
+ struct dma_resv *resv = &bo->base._resv;
+
+ rcu_read_lock();
+ fence = rcu_dereference(resv->fence_excl);
+ if (fence && !fence->ops->signaled)
+ dma_fence_enable_sw_signaling(fence);
+
+ placement->num_placement = 0;
+ placement->num_busy_placement = 0;
+ rcu_read_unlock();
+ return;
+ }
+
+ switch (bo->resource->mem_type) {
case AMDGPU_PL_GDS:
case AMDGPU_PL_GWS:
case AMDGPU_PL_OA:
@@ -145,6 +160,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
}
break;
case TTM_PL_TT:
+ case AMDGPU_PL_PREEMPT:
default:
amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
break;
@@ -153,80 +169,11 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
}
/**
- * amdgpu_verify_access - Verify access for a mmap call
- *
- * @bo: The buffer object to map
- * @filp: The file pointer from the process performing the mmap
- *
- * This is called by ttm_bo_mmap() to verify whether a process
- * has the right to mmap a BO to their process space.
- */
-static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
-{
- struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
-
- /*
- * Don't verify access for KFD BOs. They don't have a GEM
- * object associated with them.
- */
- if (abo->kfd_bo)
- return 0;
-
- if (amdgpu_ttm_tt_get_usermm(bo->ttm))
- return -EPERM;
- return drm_vma_node_verify_access(&abo->tbo.base.vma_node,
- filp->private_data);
-}
-
-/**
- * amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT buffer.
- *
- * @bo: The bo to assign the memory to.
- * @mm_node: Memory manager node for drm allocator.
- * @mem: The region where the bo resides.
- *
- */
-static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
- struct drm_mm_node *mm_node,
- struct ttm_resource *mem)
-{
- uint64_t addr = 0;
-
- if (mm_node->start != AMDGPU_BO_INVALID_OFFSET) {
- addr = mm_node->start << PAGE_SHIFT;
- addr += amdgpu_ttm_domain_start(amdgpu_ttm_adev(bo->bdev),
- mem->mem_type);
- }
- return addr;
-}
-
-/**
- * amdgpu_find_mm_node - Helper function finds the drm_mm_node corresponding to
- * @offset. It also modifies the offset to be within the drm_mm_node returned
- *
- * @mem: The region where the bo resides.
- * @offset: The offset that drm_mm_node is used for finding.
- *
- */
-static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_resource *mem,
- uint64_t *offset)
-{
- struct drm_mm_node *mm_node = mem->mm_node;
-
- while (*offset >= (mm_node->size << PAGE_SHIFT)) {
- *offset -= (mm_node->size << PAGE_SHIFT);
- ++mm_node;
- }
- return mm_node;
-}
-
-/**
* amdgpu_ttm_map_buffer - Map memory into the GART windows
* @bo: buffer object to map
* @mem: memory object to map
- * @mm_node: drm_mm node object to map
+ * @mm_cur: range to map
* @num_pages: number of pages to map
- * @offset: offset into @mm_node where to start
* @window: which GART window to use
* @ring: DMA ring to use for the copy
* @tmz: if we should setup a TMZ enabled mapping
@@ -237,10 +184,10 @@ static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_resource *mem,
*/
static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
struct ttm_resource *mem,
- struct drm_mm_node *mm_node,
- unsigned num_pages, uint64_t offset,
- unsigned window, struct amdgpu_ring *ring,
- bool tmz, uint64_t *addr)
+ struct amdgpu_res_cursor *mm_cur,
+ unsigned num_pages, unsigned window,
+ struct amdgpu_ring *ring, bool tmz,
+ uint64_t *addr)
{
struct amdgpu_device *adev = ring->adev;
struct amdgpu_job *job;
@@ -254,20 +201,22 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
BUG_ON(adev->mman.buffer_funcs->copy_max_bytes <
AMDGPU_GTT_MAX_TRANSFER_SIZE * 8);
+ BUG_ON(mem->mem_type == AMDGPU_PL_PREEMPT);
/* Map only what can't be accessed directly */
if (!tmz && mem->start != AMDGPU_BO_INVALID_OFFSET) {
- *addr = amdgpu_mm_node_addr(bo, mm_node, mem) + offset;
+ *addr = amdgpu_ttm_domain_start(adev, mem->mem_type) +
+ mm_cur->start;
return 0;
}
*addr = adev->gmc.gart_start;
*addr += (u64)window * AMDGPU_GTT_MAX_TRANSFER_SIZE *
AMDGPU_GPU_PAGE_SIZE;
- *addr += offset & ~PAGE_MASK;
+ *addr += mm_cur->start & ~PAGE_MASK;
num_dw = ALIGN(adev->mman.buffer_funcs->copy_num_dw, 8);
- num_bytes = num_pages * 8;
+ num_bytes = num_pages * 8 * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
r = amdgpu_job_alloc_with_ib(adev, num_dw * 4 + num_bytes,
AMDGPU_IB_POOL_DELAYED, &job);
@@ -292,17 +241,17 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo,
cpu_addr = &job->ibs[0].ptr[num_dw];
if (mem->mem_type == TTM_PL_TT) {
- dma_addr_t *dma_address;
+ dma_addr_t *dma_addr;
- dma_address = &bo->ttm->dma_address[offset >> PAGE_SHIFT];
- r = amdgpu_gart_map(adev, 0, num_pages, dma_address, flags,
+ dma_addr = &bo->ttm->dma_address[mm_cur->start >> PAGE_SHIFT];
+ r = amdgpu_gart_map(adev, 0, num_pages, dma_addr, flags,
cpu_addr);
if (r)
goto error_free;
} else {
dma_addr_t dma_address;
- dma_address = (mm_node->start << PAGE_SHIFT) + offset;
+ dma_address = mm_cur->start;
dma_address += adev->vm_manager.vram_base_offset;
for (i = 0; i < num_pages; ++i) {
@@ -330,7 +279,7 @@ error_free:
}
/**
- * amdgpu_copy_ttm_mem_to_mem - Helper function for copy
+ * amdgpu_ttm_copy_mem_to_mem - Helper function for copy
* @adev: amdgpu device
* @src: buffer/address where to read from
* @dst: buffer/address where to write to
@@ -354,9 +303,8 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
const uint32_t GTT_MAX_BYTES = (AMDGPU_GTT_MAX_TRANSFER_SIZE *
AMDGPU_GPU_PAGE_SIZE);
- uint64_t src_node_size, dst_node_size, src_offset, dst_offset;
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
- struct drm_mm_node *src_mm, *dst_mm;
+ struct amdgpu_res_cursor src_mm, dst_mm;
struct dma_fence *fence = NULL;
int r = 0;
@@ -365,29 +313,13 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
return -EINVAL;
}
- src_offset = src->offset;
- if (src->mem->mm_node) {
- src_mm = amdgpu_find_mm_node(src->mem, &src_offset);
- src_node_size = (src_mm->size << PAGE_SHIFT) - src_offset;
- } else {
- src_mm = NULL;
- src_node_size = ULLONG_MAX;
- }
-
- dst_offset = dst->offset;
- if (dst->mem->mm_node) {
- dst_mm = amdgpu_find_mm_node(dst->mem, &dst_offset);
- dst_node_size = (dst_mm->size << PAGE_SHIFT) - dst_offset;
- } else {
- dst_mm = NULL;
- dst_node_size = ULLONG_MAX;
- }
+ amdgpu_res_first(src->mem, src->offset, size, &src_mm);
+ amdgpu_res_first(dst->mem, dst->offset, size, &dst_mm);
mutex_lock(&adev->mman.gtt_window_lock);
-
- while (size) {
- uint32_t src_page_offset = src_offset & ~PAGE_MASK;
- uint32_t dst_page_offset = dst_offset & ~PAGE_MASK;
+ while (src_mm.remaining) {
+ uint32_t src_page_offset = src_mm.start & ~PAGE_MASK;
+ uint32_t dst_page_offset = dst_mm.start & ~PAGE_MASK;
struct dma_fence *next;
uint32_t cur_size;
uint64_t from, to;
@@ -396,19 +328,19 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
* begins at an offset, then adjust the size accordingly
*/
cur_size = max(src_page_offset, dst_page_offset);
- cur_size = min(min3(src_node_size, dst_node_size, size),
+ cur_size = min(min3(src_mm.size, dst_mm.size, size),
(uint64_t)(GTT_MAX_BYTES - cur_size));
/* Map src to window 0 and dst to window 1. */
- r = amdgpu_ttm_map_buffer(src->bo, src->mem, src_mm,
+ r = amdgpu_ttm_map_buffer(src->bo, src->mem, &src_mm,
PFN_UP(cur_size + src_page_offset),
- src_offset, 0, ring, tmz, &from);
+ 0, ring, tmz, &from);
if (r)
goto error;
- r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, dst_mm,
+ r = amdgpu_ttm_map_buffer(dst->bo, dst->mem, &dst_mm,
PFN_UP(cur_size + dst_page_offset),
- dst_offset, 1, ring, tmz, &to);
+ 1, ring, tmz, &to);
if (r)
goto error;
@@ -420,27 +352,8 @@ int amdgpu_ttm_copy_mem_to_mem(struct amdgpu_device *adev,
dma_fence_put(fence);
fence = next;
- size -= cur_size;
- if (!size)
- break;
-
- src_node_size -= cur_size;
- if (!src_node_size) {
- ++src_mm;
- src_node_size = src_mm->size << PAGE_SHIFT;
- src_offset = 0;
- } else {
- src_offset += cur_size;
- }
-
- dst_node_size -= cur_size;
- if (!dst_node_size) {
- ++dst_mm;
- dst_node_size = dst_mm->size << PAGE_SHIFT;
- dst_offset = 0;
- } else {
- dst_offset += cur_size;
- }
+ amdgpu_res_next(&src_mm, cur_size);
+ amdgpu_res_next(&dst_mm, cur_size);
}
error:
mutex_unlock(&adev->mman.gtt_window_lock);
@@ -519,7 +432,8 @@ error:
static bool amdgpu_mem_visible(struct amdgpu_device *adev,
struct ttm_resource *mem)
{
- struct drm_mm_node *nodes = mem->mm_node;
+ uint64_t mem_size = (u64)mem->num_pages << PAGE_SHIFT;
+ struct amdgpu_res_cursor cursor;
if (mem->mem_type == TTM_PL_SYSTEM ||
mem->mem_type == TTM_PL_TT)
@@ -527,12 +441,13 @@ static bool amdgpu_mem_visible(struct amdgpu_device *adev,
if (mem->mem_type != TTM_PL_VRAM)
return false;
+ amdgpu_res_first(mem, 0, mem_size, &cursor);
+
/* ttm_resource_ioremap only supports contiguous memory */
- if (nodes->size != mem->num_pages)
+ if (cursor.size != mem_size)
return false;
- return ((nodes->start + nodes->size) << PAGE_SHIFT)
- <= adev->gmc.visible_vram_size;
+ return cursor.start + cursor.size <= adev->gmc.visible_vram_size;
}
/*
@@ -547,10 +462,11 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
{
struct amdgpu_device *adev;
struct amdgpu_bo *abo;
- struct ttm_resource *old_mem = &bo->mem;
+ struct ttm_resource *old_mem = bo->resource;
int r;
- if (new_mem->mem_type == TTM_PL_TT) {
+ if (new_mem->mem_type == TTM_PL_TT ||
+ new_mem->mem_type == AMDGPU_PL_PREEMPT) {
r = amdgpu_ttm_backend_bind(bo->bdev, bo->ttm, new_mem);
if (r)
return r;
@@ -568,18 +484,20 @@ static int amdgpu_bo_move(struct ttm_buffer_object *bo, bool evict,
goto out;
}
if (old_mem->mem_type == TTM_PL_SYSTEM &&
- new_mem->mem_type == TTM_PL_TT) {
+ (new_mem->mem_type == TTM_PL_TT ||
+ new_mem->mem_type == AMDGPU_PL_PREEMPT)) {
ttm_bo_move_null(bo, new_mem);
goto out;
}
- if (old_mem->mem_type == TTM_PL_TT &&
+ if ((old_mem->mem_type == TTM_PL_TT ||
+ old_mem->mem_type == AMDGPU_PL_PREEMPT) &&
new_mem->mem_type == TTM_PL_SYSTEM) {
r = ttm_bo_wait_ctx(bo, ctx);
if (r)
return r;
amdgpu_ttm_backend_unbind(bo->bdev, bo->ttm);
- ttm_resource_free(bo, &bo->mem);
+ ttm_resource_free(bo, &bo->resource);
ttm_bo_assign_mem(bo, new_mem);
goto out;
}
@@ -646,10 +564,10 @@ out:
*
* Called by ttm_mem_io_reserve() ultimately via ttm_bo_vm_fault()
*/
-static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_resource *mem)
+static int amdgpu_ttm_io_mem_reserve(struct ttm_device *bdev,
+ struct ttm_resource *mem)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
- struct drm_mm_node *mm_node = mem->mm_node;
size_t bus_size = (size_t)mem->num_pages << PAGE_SHIFT;
switch (mem->mem_type) {
@@ -657,24 +575,25 @@ static int amdgpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_reso
/* system memory */
return 0;
case TTM_PL_TT:
+ case AMDGPU_PL_PREEMPT:
break;
case TTM_PL_VRAM:
mem->bus.offset = mem->start << PAGE_SHIFT;
/* check if it's visible */
if ((mem->bus.offset + bus_size) > adev->gmc.visible_vram_size)
return -EINVAL;
- /* Only physically contiguous buffers apply. In a contiguous
- * buffer, size of the first mm_node would match the number of
- * pages in ttm_resource.
- */
+
if (adev->mman.aper_base_kaddr &&
- (mm_node->size == mem->num_pages))
+ mem->placement & TTM_PL_FLAG_CONTIGUOUS)
mem->bus.addr = (u8 *)adev->mman.aper_base_kaddr +
mem->bus.offset;
mem->bus.offset += adev->gmc.aper_base;
mem->bus.is_iomem = true;
- mem->bus.caching = ttm_write_combined;
+ if (adev->gmc.xgmi.connected_to_cpu)
+ mem->bus.caching = ttm_cached;
+ else
+ mem->bus.caching = ttm_write_combined;
break;
default:
return -EINVAL;
@@ -686,12 +605,11 @@ static unsigned long amdgpu_ttm_io_mem_pfn(struct ttm_buffer_object *bo,
unsigned long page_offset)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
- uint64_t offset = (page_offset << PAGE_SHIFT);
- struct drm_mm_node *mm;
+ struct amdgpu_res_cursor cursor;
- mm = amdgpu_find_mm_node(&bo->mem, &offset);
- offset += adev->gmc.aper_base;
- return mm->start + (offset >> PAGE_SHIFT);
+ amdgpu_res_first(bo->resource, (u64)page_offset << PAGE_SHIFT, 0,
+ &cursor);
+ return (adev->gmc.aper_base + cursor.start) >> PAGE_SHIFT;
}
/**
@@ -745,10 +663,8 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
struct amdgpu_ttm_tt *gtt = (void *)ttm;
unsigned long start = gtt->userptr;
struct vm_area_struct *vma;
- struct hmm_range *range;
- unsigned long timeout;
struct mm_struct *mm;
- unsigned long i;
+ bool readonly;
int r = 0;
mm = bo->notifier.mm;
@@ -764,28 +680,9 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
if (!mmget_not_zero(mm)) /* Happens during process shutdown */
return -ESRCH;
- range = kzalloc(sizeof(*range), GFP_KERNEL);
- if (unlikely(!range)) {
- r = -ENOMEM;
- goto out;
- }
- range->notifier = &bo->notifier;
- range->start = bo->notifier.interval_tree.start;
- range->end = bo->notifier.interval_tree.last + 1;
- range->default_flags = HMM_PFN_REQ_FAULT;
- if (!amdgpu_ttm_tt_is_readonly(ttm))
- range->default_flags |= HMM_PFN_REQ_WRITE;
-
- range->hmm_pfns = kvmalloc_array(ttm->num_pages,
- sizeof(*range->hmm_pfns), GFP_KERNEL);
- if (unlikely(!range->hmm_pfns)) {
- r = -ENOMEM;
- goto out_free_ranges;
- }
-
mmap_read_lock(mm);
- vma = find_vma(mm, start);
- if (unlikely(!vma || start < vma->vm_start)) {
+ vma = vma_lookup(mm, start);
+ if (unlikely(!vma)) {
r = -EFAULT;
goto out_unlock;
}
@@ -794,46 +691,15 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
r = -EPERM;
goto out_unlock;
}
- mmap_read_unlock(mm);
- timeout = jiffies + msecs_to_jiffies(HMM_RANGE_DEFAULT_TIMEOUT);
-
-retry:
- range->notifier_seq = mmu_interval_read_begin(&bo->notifier);
-
- mmap_read_lock(mm);
- r = hmm_range_fault(range);
- mmap_read_unlock(mm);
- if (unlikely(r)) {
- /*
- * FIXME: This timeout should encompass the retry from
- * mmu_interval_read_retry() as well.
- */
- if (r == -EBUSY && !time_after(jiffies, timeout))
- goto retry;
- goto out_free_pfns;
- }
-
- /*
- * Due to default_flags, all pages are HMM_PFN_VALID or
- * hmm_range_fault() fails. FIXME: The pages cannot be touched outside
- * the notifier_lock, and mmu_interval_read_retry() must be done first.
- */
- for (i = 0; i < ttm->num_pages; i++)
- pages[i] = hmm_pfn_to_page(range->hmm_pfns[i]);
-
- gtt->range = range;
- mmput(mm);
-
- return 0;
+ readonly = amdgpu_ttm_tt_is_readonly(ttm);
+ r = amdgpu_hmm_range_get_pages(&bo->notifier, mm, pages, start,
+ ttm->num_pages, &gtt->range, readonly,
+ true);
out_unlock:
mmap_read_unlock(mm);
-out_free_pfns:
- kvfree(range->hmm_pfns);
-out_free_ranges:
- kfree(range);
-out:
mmput(mm);
+
return r;
}
@@ -862,10 +728,7 @@ bool amdgpu_ttm_tt_get_user_pages_done(struct ttm_tt *ttm)
* FIXME: Must always hold notifier_lock for this, and must
* not ignore the return code.
*/
- r = mmu_interval_read_retry(gtt->range->notifier,
- gtt->range->notifier_seq);
- kvfree(gtt->range->hmm_pfns);
- kfree(gtt->range);
+ r = amdgpu_hmm_range_get_pages_done(gtt->range);
gtt->range = NULL;
}
@@ -893,16 +756,15 @@ void amdgpu_ttm_tt_set_user_pages(struct ttm_tt *ttm, struct page **pages)
*
* Called by amdgpu_ttm_backend_bind()
**/
-static int amdgpu_ttm_tt_pin_userptr(struct ttm_bo_device *bdev,
+static int amdgpu_ttm_tt_pin_userptr(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
- int r;
-
int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
enum dma_data_direction direction = write ?
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
+ int r;
/* Allocate an SG array and squash pages into it */
r = sg_alloc_table_from_pages(ttm->sg, ttm->pages, ttm->num_pages, 0,
@@ -931,18 +793,17 @@ release_sg:
/*
* amdgpu_ttm_tt_unpin_userptr - Unpin and unmap userptr pages
*/
-static void amdgpu_ttm_tt_unpin_userptr(struct ttm_bo_device *bdev,
+static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
-
int write = !(gtt->userflags & AMDGPU_GEM_USERPTR_READONLY);
enum dma_data_direction direction = write ?
DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
/* double check that we don't free the table twice */
- if (!ttm->sg->sgl)
+ if (!ttm->sg || !ttm->sg->sgl)
return;
/* unmap the pages mapped to the device */
@@ -980,7 +841,7 @@ static int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
uint64_t page_idx = 1;
r = amdgpu_gart_bind(adev, gtt->offset, page_idx,
- ttm->pages, gtt->ttm.dma_address, flags);
+ gtt->ttm.dma_address, flags);
if (r)
goto gart_bind_fail;
@@ -994,11 +855,10 @@ static int amdgpu_ttm_gart_bind(struct amdgpu_device *adev,
r = amdgpu_gart_bind(adev,
gtt->offset + (page_idx << PAGE_SHIFT),
ttm->num_pages - page_idx,
- &ttm->pages[page_idx],
&(gtt->ttm.dma_address[page_idx]), flags);
} else {
r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
- ttm->pages, gtt->ttm.dma_address, flags);
+ gtt->ttm.dma_address, flags);
}
gart_bind_fail:
@@ -1015,7 +875,7 @@ gart_bind_fail:
* Called by ttm_tt_bind() on behalf of ttm_bo_handle_move_mem().
* This handles binding GTT memory to the device address space.
*/
-static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
+static int amdgpu_ttm_backend_bind(struct ttm_device *bdev,
struct ttm_tt *ttm,
struct ttm_resource *bo_mem)
{
@@ -1036,7 +896,23 @@ static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
DRM_ERROR("failed to pin userptr\n");
return r;
}
+ } else if (ttm->page_flags & TTM_PAGE_FLAG_SG) {
+ if (!ttm->sg) {
+ struct dma_buf_attachment *attach;
+ struct sg_table *sgt;
+
+ attach = gtt->gobj->import_attach;
+ sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
+ if (IS_ERR(sgt))
+ return PTR_ERR(sgt);
+
+ ttm->sg = sgt;
+ }
+
+ drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
+ ttm->num_pages);
}
+
if (!ttm->num_pages) {
WARN(1, "nothing to bind %u pages for mreg %p back %p!\n",
ttm->num_pages, bo_mem, ttm);
@@ -1058,7 +934,7 @@ static int amdgpu_ttm_backend_bind(struct ttm_bo_device *bdev,
/* bind pages into GART page tables */
gtt->offset = (u64)bo_mem->start << PAGE_SHIFT;
r = amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages,
- ttm->pages, gtt->ttm.dma_address, flags);
+ gtt->ttm.dma_address, flags);
if (r)
DRM_ERROR("failed to bind %u pages at 0x%08llX\n",
@@ -1080,51 +956,50 @@ int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo)
struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev);
struct ttm_operation_ctx ctx = { false, false };
struct amdgpu_ttm_tt *gtt = (void *)bo->ttm;
- struct ttm_resource tmp;
struct ttm_placement placement;
struct ttm_place placements;
+ struct ttm_resource *tmp;
uint64_t addr, flags;
int r;
- if (bo->mem.start != AMDGPU_BO_INVALID_OFFSET)
+ if (bo->resource->start != AMDGPU_BO_INVALID_OFFSET)
return 0;
addr = amdgpu_gmc_agp_addr(bo);
if (addr != AMDGPU_BO_INVALID_OFFSET) {
- bo->mem.start = addr >> PAGE_SHIFT;
- } else {
+ bo->resource->start = addr >> PAGE_SHIFT;
+ return 0;
+ }
- /* allocate GART space */
- tmp = bo->mem;
- tmp.mm_node = NULL;
- placement.num_placement = 1;
- placement.placement = &placements;
- placement.num_busy_placement = 1;
- placement.busy_placement = &placements;
- placements.fpfn = 0;
- placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
- placements.mem_type = TTM_PL_TT;
- placements.flags = bo->mem.placement;
-
- r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
- if (unlikely(r))
- return r;
+ /* allocate GART space */
+ placement.num_placement = 1;
+ placement.placement = &placements;
+ placement.num_busy_placement = 1;
+ placement.busy_placement = &placements;
+ placements.fpfn = 0;
+ placements.lpfn = adev->gmc.gart_size >> PAGE_SHIFT;
+ placements.mem_type = TTM_PL_TT;
+ placements.flags = bo->resource->placement;
- /* compute PTE flags for this buffer object */
- flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, &tmp);
+ r = ttm_bo_mem_space(bo, &placement, &tmp, &ctx);
+ if (unlikely(r))
+ return r;
- /* Bind pages */
- gtt->offset = (u64)tmp.start << PAGE_SHIFT;
- r = amdgpu_ttm_gart_bind(adev, bo, flags);
- if (unlikely(r)) {
- ttm_resource_free(bo, &tmp);
- return r;
- }
+ /* compute PTE flags for this buffer object */
+ flags = amdgpu_ttm_tt_pte_flags(adev, bo->ttm, tmp);
- ttm_resource_free(bo, &bo->mem);
- bo->mem = tmp;
+ /* Bind pages */
+ gtt->offset = (u64)tmp->start << PAGE_SHIFT;
+ r = amdgpu_ttm_gart_bind(adev, bo, flags);
+ if (unlikely(r)) {
+ ttm_resource_free(bo, &tmp);
+ return r;
}
+ amdgpu_gart_invalidate_tlb(adev);
+ ttm_resource_free(bo, &bo->resource);
+ ttm_bo_assign_mem(bo, tmp);
+
return 0;
}
@@ -1143,7 +1018,7 @@ int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
if (!tbo->ttm)
return 0;
- flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, &tbo->mem);
+ flags = amdgpu_ttm_tt_pte_flags(adev, tbo->ttm, tbo->resource);
r = amdgpu_ttm_gart_bind(adev, tbo, flags);
return r;
@@ -1155,19 +1030,26 @@ int amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo)
* Called by ttm_tt_unbind() on behalf of ttm_bo_move_ttm() and
* ttm_tt_destroy().
*/
-static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
+static void amdgpu_ttm_backend_unbind(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(bdev);
struct amdgpu_ttm_tt *gtt = (void *)ttm;
int r;
- if (!gtt->bound)
- return;
-
/* if the pages have userptr pinning then clear that first */
- if (gtt->userptr)
+ if (gtt->userptr) {
amdgpu_ttm_tt_unpin_userptr(bdev, ttm);
+ } else if (ttm->sg && gtt->gobj->import_attach) {
+ struct dma_buf_attachment *attach;
+
+ attach = gtt->gobj->import_attach;
+ dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL);
+ ttm->sg = NULL;
+ }
+
+ if (!gtt->bound)
+ return;
if (gtt->offset == AMDGPU_BO_INVALID_OFFSET)
return;
@@ -1180,7 +1062,7 @@ static void amdgpu_ttm_backend_unbind(struct ttm_bo_device *bdev,
gtt->bound = false;
}
-static void amdgpu_ttm_backend_destroy(struct ttm_bo_device *bdev,
+static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
@@ -1234,7 +1116,7 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo,
* Map the pages of a ttm_tt object to an address space visible
* to the underlying device.
*/
-static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev,
+static int amdgpu_ttm_tt_populate(struct ttm_device *bdev,
struct ttm_tt *ttm,
struct ttm_operation_ctx *ctx)
{
@@ -1246,28 +1128,11 @@ static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev,
ttm->sg = kzalloc(sizeof(struct sg_table), GFP_KERNEL);
if (!ttm->sg)
return -ENOMEM;
-
- ttm->page_flags |= TTM_PAGE_FLAG_SG;
return 0;
}
- if (ttm->page_flags & TTM_PAGE_FLAG_SG) {
- if (!ttm->sg) {
- struct dma_buf_attachment *attach;
- struct sg_table *sgt;
-
- attach = gtt->gobj->import_attach;
- sgt = dma_buf_map_attachment(attach, DMA_BIDIRECTIONAL);
- if (IS_ERR(sgt))
- return PTR_ERR(sgt);
-
- ttm->sg = sgt;
- }
-
- drm_prime_sg_to_dma_addr_array(ttm->sg, gtt->ttm.dma_address,
- ttm->num_pages);
+ if (ttm->page_flags & TTM_PAGE_FLAG_SG)
return 0;
- }
return ttm_pool_alloc(&adev->mman.bdev.pool, ttm, ctx);
}
@@ -1278,7 +1143,7 @@ static int amdgpu_ttm_tt_populate(struct ttm_bo_device *bdev,
* Unmaps pages of a ttm_tt object from the device address space and
* unpopulates the page array backing it.
*/
-static void amdgpu_ttm_tt_unpopulate(struct ttm_bo_device *bdev,
+static void amdgpu_ttm_tt_unpopulate(struct ttm_device *bdev,
struct ttm_tt *ttm)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
@@ -1287,15 +1152,6 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_bo_device *bdev,
if (gtt && gtt->userptr) {
amdgpu_ttm_tt_set_user_pages(ttm, NULL);
kfree(ttm->sg);
- ttm->page_flags &= ~TTM_PAGE_FLAG_SG;
- return;
- }
-
- if (ttm->sg && gtt->gobj->import_attach) {
- struct dma_buf_attachment *attach;
-
- attach = gtt->gobj->import_attach;
- dma_buf_unmap_attachment(attach, ttm->sg, DMA_BIDIRECTIONAL);
ttm->sg = NULL;
return;
}
@@ -1330,6 +1186,9 @@ int amdgpu_ttm_tt_set_userptr(struct ttm_buffer_object *bo,
return -ENOMEM;
}
+ /* Set TTM_PAGE_FLAG_SG before populate but after create. */
+ bo->ttm->page_flags |= TTM_PAGE_FLAG_SG;
+
gtt = (void *)bo->ttm;
gtt->userptr = addr;
gtt->userflags = flags;
@@ -1423,13 +1282,18 @@ uint64_t amdgpu_ttm_tt_pde_flags(struct ttm_tt *ttm, struct ttm_resource *mem)
if (mem && mem->mem_type != TTM_PL_SYSTEM)
flags |= AMDGPU_PTE_VALID;
- if (mem && mem->mem_type == TTM_PL_TT) {
+ if (mem && (mem->mem_type == TTM_PL_TT ||
+ mem->mem_type == AMDGPU_PL_PREEMPT)) {
flags |= AMDGPU_PTE_SYSTEM;
if (ttm->caching == ttm_cached)
flags |= AMDGPU_PTE_SNOOPED;
}
+ if (mem && mem->mem_type == TTM_PL_VRAM &&
+ mem->bus.caching == ttm_cached)
+ flags |= AMDGPU_PTE_SNOOPED;
+
return flags;
}
@@ -1468,12 +1332,16 @@ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
const struct ttm_place *place)
{
- unsigned long num_pages = bo->mem.num_pages;
- struct drm_mm_node *node = bo->mem.mm_node;
+ unsigned long num_pages = bo->resource->num_pages;
+ struct amdgpu_res_cursor cursor;
struct dma_resv_list *flist;
struct dma_fence *f;
int i;
+ /* Swapout? */
+ if (bo->resource->mem_type == TTM_PL_SYSTEM)
+ return true;
+
if (bo->type == ttm_bo_type_kernel &&
!amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo)))
return false;
@@ -1482,7 +1350,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
* If true, then return false as any KFD process needs all its BOs to
* be resident to run successfully
*/
- flist = dma_resv_get_list(bo->base.resv);
+ flist = dma_resv_shared_list(bo->base.resv);
if (flist) {
for (i = 0; i < flist->shared_count; ++i) {
f = rcu_dereference_protected(flist->shared[i],
@@ -1492,7 +1360,16 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
}
}
- switch (bo->mem.mem_type) {
+ switch (bo->resource->mem_type) {
+ case AMDGPU_PL_PREEMPT:
+ /* Preemptible BOs don't own system resources managed by the
+ * driver (pages, VRAM, GART space). They point to resources
+ * owned by someone else (e.g. pageable memory in user mode
+ * or a DMABuf). They are used in a preemptible context so we
+ * can guarantee no deadlocks and good QoS in case of MMU
+ * notifiers or DMABuf move notifiers from the resource owner.
+ */
+ return false;
case TTM_PL_TT:
if (amdgpu_bo_is_amdgpu_bo(bo) &&
amdgpu_bo_encrypted(ttm_to_amdgpu_bo(bo)))
@@ -1501,13 +1378,15 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
case TTM_PL_VRAM:
/* Check each drm MM node individually */
- while (num_pages) {
- if (place->fpfn < (node->start + node->size) &&
- !(place->lpfn && place->lpfn <= node->start))
+ amdgpu_res_first(bo->resource, 0, (u64)num_pages << PAGE_SHIFT,
+ &cursor);
+ while (cursor.remaining) {
+ if (place->fpfn < PFN_DOWN(cursor.start + cursor.size)
+ && !(place->lpfn &&
+ place->lpfn <= PFN_DOWN(cursor.start)))
return true;
- num_pages -= node->size;
- ++node;
+ amdgpu_res_next(&cursor, cursor.size);
}
return false;
@@ -1531,41 +1410,36 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
* access for debugging purposes.
*/
static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
- unsigned long offset,
- void *buf, int len, int write)
+ unsigned long offset, void *buf, int len,
+ int write)
{
struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo);
struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
- struct drm_mm_node *nodes;
+ struct amdgpu_res_cursor cursor;
+ unsigned long flags;
uint32_t value = 0;
int ret = 0;
- uint64_t pos;
- unsigned long flags;
- if (bo->mem.mem_type != TTM_PL_VRAM)
+ if (bo->resource->mem_type != TTM_PL_VRAM)
return -EIO;
- pos = offset;
- nodes = amdgpu_find_mm_node(&abo->tbo.mem, &pos);
- pos += (nodes->start << PAGE_SHIFT);
-
- while (len && pos < adev->gmc.mc_vram_size) {
- uint64_t aligned_pos = pos & ~(uint64_t)3;
- uint64_t bytes = 4 - (pos & 3);
- uint32_t shift = (pos & 3) * 8;
+ amdgpu_res_first(bo->resource, offset, len, &cursor);
+ while (cursor.remaining) {
+ uint64_t aligned_pos = cursor.start & ~(uint64_t)3;
+ uint64_t bytes = 4 - (cursor.start & 3);
+ uint32_t shift = (cursor.start & 3) * 8;
uint32_t mask = 0xffffffff << shift;
- if (len < bytes) {
- mask &= 0xffffffff >> (bytes - len) * 8;
- bytes = len;
+ if (cursor.size < bytes) {
+ mask &= 0xffffffff >> (bytes - cursor.size) * 8;
+ bytes = cursor.size;
}
if (mask != 0xffffffff) {
spin_lock_irqsave(&adev->mmio_idx_lock, flags);
WREG32_NO_KIQ(mmMM_INDEX, ((uint32_t)aligned_pos) | 0x80000000);
WREG32_NO_KIQ(mmMM_INDEX_HI, aligned_pos >> 31);
- if (!write || mask != 0xffffffff)
- value = RREG32_NO_KIQ(mmMM_DATA);
+ value = RREG32_NO_KIQ(mmMM_DATA);
if (write) {
value &= ~mask;
value |= (*(uint32_t *)buf << shift) & mask;
@@ -1577,21 +1451,15 @@ static int amdgpu_ttm_access_memory(struct ttm_buffer_object *bo,
memcpy(buf, &value, bytes);
}
} else {
- bytes = (nodes->start + nodes->size) << PAGE_SHIFT;
- bytes = min(bytes - pos, (uint64_t)len & ~0x3ull);
-
- amdgpu_device_vram_access(adev, pos, (uint32_t *)buf,
- bytes, write);
+ bytes = cursor.size & ~0x3ULL;
+ amdgpu_device_vram_access(adev, cursor.start,
+ (uint32_t *)buf, bytes,
+ write);
}
ret += bytes;
buf = (uint8_t *)buf + bytes;
- pos += bytes;
- len -= bytes;
- if (pos >= (nodes->start + nodes->size) << PAGE_SHIFT) {
- ++nodes;
- pos = (nodes->start << PAGE_SHIFT);
- }
+ amdgpu_res_next(&cursor, bytes);
}
return ret;
@@ -1603,7 +1471,7 @@ amdgpu_bo_delete_mem_notify(struct ttm_buffer_object *bo)
amdgpu_bo_move_notify(bo, false, NULL);
}
-static struct ttm_bo_driver amdgpu_bo_driver = {
+static struct ttm_device_funcs amdgpu_bo_driver = {
.ttm_tt_create = &amdgpu_ttm_tt_create,
.ttm_tt_populate = &amdgpu_ttm_tt_populate,
.ttm_tt_unpopulate = &amdgpu_ttm_tt_unpopulate,
@@ -1611,7 +1479,6 @@ static struct ttm_bo_driver amdgpu_bo_driver = {
.eviction_valuable = amdgpu_ttm_bo_eviction_valuable,
.evict_flags = &amdgpu_evict_flags,
.move = &amdgpu_bo_move,
- .verify_access = &amdgpu_verify_access,
.delete_mem_notify = &amdgpu_bo_delete_mem_notify,
.release_notify = &amdgpu_bo_release_notify,
.io_mem_reserve = &amdgpu_ttm_io_mem_reserve,
@@ -1696,7 +1563,7 @@ static void amdgpu_ttm_training_data_block_init(struct amdgpu_device *adev)
(adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);
ctx->train_data_size =
GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES;
-
+
DRM_DEBUG("train_data_size:%llx,p2c_train_data_offset:%llx,c2p_train_data_offset:%llx.\n",
ctx->train_data_size,
ctx->p2c_train_data_offset,
@@ -1714,11 +1581,8 @@ static int amdgpu_ttm_reserve_tmr(struct amdgpu_device *adev)
bool mem_train_support = false;
if (!amdgpu_sriov_vf(adev)) {
- ret = amdgpu_mem_train_support(adev);
- if (ret == 1)
+ if (amdgpu_atomfirmware_mem_training_supported(adev))
mem_train_support = true;
- else if (ret == -1)
- return -EINVAL;
else
DRM_DEBUG("memory training does not support!\n");
}
@@ -1785,7 +1649,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
mutex_init(&adev->mman.gtt_window_lock);
/* No others user of address space so set it to 0 */
- r = ttm_bo_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
+ r = ttm_device_init(&adev->mman.bdev, &amdgpu_bo_driver, adev->dev,
adev_to_drm(adev)->anon_inode->i_mapping,
adev_to_drm(adev)->vma_offset_manager,
adev->need_swiotlb,
@@ -1812,8 +1676,15 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
/* Change the size here instead of the init above so only lpfn is affected */
amdgpu_ttm_set_buffer_funcs_status(adev, false);
#ifdef CONFIG_64BIT
- adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
- adev->gmc.visible_vram_size);
+#ifdef CONFIG_X86
+ if (adev->gmc.xgmi.connected_to_cpu)
+ adev->mman.aper_base_kaddr = ioremap_cache(adev->gmc.aper_base,
+ adev->gmc.visible_vram_size);
+
+ else
+#endif
+ adev->mman.aper_base_kaddr = ioremap_wc(adev->gmc.aper_base,
+ adev->gmc.visible_vram_size);
#endif
/*
@@ -1853,6 +1724,13 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
NULL);
if (r)
return r;
+ r = amdgpu_bo_create_kernel_at(adev, adev->mman.stolen_reserved_offset,
+ adev->mman.stolen_reserved_size,
+ AMDGPU_GEM_DOMAIN_VRAM,
+ &adev->mman.stolen_reserved_memory,
+ NULL);
+ if (r)
+ return r;
DRM_INFO("amdgpu: %uM of VRAM memory ready\n",
(unsigned) (adev->gmc.real_vram_size / (1024 * 1024)));
@@ -1879,6 +1757,13 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
DRM_INFO("amdgpu: %uM of GTT memory ready.\n",
(unsigned)(gtt_size / (1024 * 1024)));
+ /* Initialize preemptible memory pool */
+ r = amdgpu_preempt_mgr_init(adev);
+ if (r) {
+ DRM_ERROR("Failed initializing PREEMPT heap.\n");
+ return r;
+ }
+
/* Initialize various on-chip memory pools */
r = amdgpu_ttm_init_on_chip(adev, AMDGPU_PL_GDS, adev->gds.gds_size);
if (r) {
@@ -1915,18 +1800,18 @@ void amdgpu_ttm_fini(struct amdgpu_device *adev)
amdgpu_bo_free_kernel(&adev->mman.stolen_extended_memory, NULL, NULL);
/* return the IP Discovery TMR memory back to VRAM */
amdgpu_bo_free_kernel(&adev->mman.discovery_memory, NULL, NULL);
+ if (adev->mman.stolen_reserved_size)
+ amdgpu_bo_free_kernel(&adev->mman.stolen_reserved_memory,
+ NULL, NULL);
amdgpu_ttm_fw_reserve_vram_fini(adev);
- if (adev->mman.aper_base_kaddr)
- iounmap(adev->mman.aper_base_kaddr);
- adev->mman.aper_base_kaddr = NULL;
-
amdgpu_vram_mgr_fini(adev);
amdgpu_gtt_mgr_fini(adev);
+ amdgpu_preempt_mgr_fini(adev);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GDS);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_GWS);
ttm_range_man_fini(&adev->mman.bdev, AMDGPU_PL_OA);
- ttm_bo_device_release(&adev->mman.bdev);
+ ttm_device_fini(&adev->mman.bdev);
adev->mman.initialized = false;
DRM_INFO("amdgpu: ttm finalized\n");
}
@@ -1979,50 +1864,6 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
adev->mman.buffer_funcs_enabled = enable;
}
-static vm_fault_t amdgpu_ttm_fault(struct vm_fault *vmf)
-{
- struct ttm_buffer_object *bo = vmf->vma->vm_private_data;
- vm_fault_t ret;
-
- ret = ttm_bo_vm_reserve(bo, vmf);
- if (ret)
- return ret;
-
- ret = amdgpu_bo_fault_reserve_notify(bo);
- if (ret)
- goto unlock;
-
- ret = ttm_bo_vm_fault_reserved(vmf, vmf->vma->vm_page_prot,
- TTM_BO_VM_NUM_PREFAULT, 1);
- if (ret == VM_FAULT_RETRY && !(vmf->flags & FAULT_FLAG_RETRY_NOWAIT))
- return ret;
-
-unlock:
- dma_resv_unlock(bo->base.resv);
- return ret;
-}
-
-static struct vm_operations_struct amdgpu_ttm_vm_ops = {
- .fault = amdgpu_ttm_fault,
- .open = ttm_bo_vm_open,
- .close = ttm_bo_vm_close,
- .access = ttm_bo_vm_access
-};
-
-int amdgpu_mmap(struct file *filp, struct vm_area_struct *vma)
-{
- struct drm_file *file_priv = filp->private_data;
- struct amdgpu_device *adev = drm_to_adev(file_priv->minor->dev);
- int r;
-
- r = ttm_bo_mmap(filp, vma, &adev->mman.bdev);
- if (unlikely(r != 0))
- return r;
-
- vma->vm_ops = &amdgpu_ttm_vm_ops;
- return 0;
-}
-
int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
uint64_t dst_offset, uint32_t byte_count,
struct dma_resv *resv,
@@ -2053,7 +1894,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset,
return r;
if (vm_needs_flush) {
- job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gart.bo);
+ job->vm_pd_addr = amdgpu_gmc_pd_addr(adev->gmc.pdb0_bo ?
+ adev->gmc.pdb0_bo : adev->gart.bo);
job->vm_needs_flush = true;
}
if (resv) {
@@ -2104,9 +1946,9 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
uint32_t max_bytes = adev->mman.buffer_funcs->fill_max_bytes;
struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring;
- struct drm_mm_node *mm_node;
- unsigned long num_pages;
+ struct amdgpu_res_cursor cursor;
unsigned int num_loops, num_dw;
+ uint64_t num_bytes;
struct amdgpu_job *job;
int r;
@@ -2116,21 +1958,24 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
return -EINVAL;
}
- if (bo->tbo.mem.mem_type == TTM_PL_TT) {
+ if (bo->tbo.resource->mem_type == AMDGPU_PL_PREEMPT) {
+ DRM_ERROR("Trying to clear preemptible memory.\n");
+ return -EINVAL;
+ }
+
+ if (bo->tbo.resource->mem_type == TTM_PL_TT) {
r = amdgpu_ttm_alloc_gart(&bo->tbo);
if (r)
return r;
}
- num_pages = bo->tbo.mem.num_pages;
- mm_node = bo->tbo.mem.mm_node;
+ num_bytes = bo->tbo.resource->num_pages << PAGE_SHIFT;
num_loops = 0;
- while (num_pages) {
- uint64_t byte_count = mm_node->size << PAGE_SHIFT;
- num_loops += DIV_ROUND_UP_ULL(byte_count, max_bytes);
- num_pages -= mm_node->size;
- ++mm_node;
+ amdgpu_res_first(bo->tbo.resource, 0, num_bytes, &cursor);
+ while (cursor.remaining) {
+ num_loops += DIV_ROUND_UP_ULL(cursor.size, max_bytes);
+ amdgpu_res_next(&cursor, cursor.size);
}
num_dw = num_loops * adev->mman.buffer_funcs->fill_num_dw;
@@ -2152,27 +1997,17 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo,
}
}
- num_pages = bo->tbo.mem.num_pages;
- mm_node = bo->tbo.mem.mm_node;
-
- while (num_pages) {
- uint64_t byte_count = mm_node->size << PAGE_SHIFT;
- uint64_t dst_addr;
-
- dst_addr = amdgpu_mm_node_addr(&bo->tbo, mm_node, &bo->tbo.mem);
- while (byte_count) {
- uint32_t cur_size_in_bytes = min_t(uint64_t, byte_count,
- max_bytes);
+ amdgpu_res_first(bo->tbo.resource, 0, num_bytes, &cursor);
+ while (cursor.remaining) {
+ uint32_t cur_size = min_t(uint64_t, cursor.size, max_bytes);
+ uint64_t dst_addr = cursor.start;
- amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data,
- dst_addr, cur_size_in_bytes);
+ dst_addr += amdgpu_ttm_domain_start(adev,
+ bo->tbo.resource->mem_type);
+ amdgpu_emit_fill_buffer(adev, &job->ibs[0], src_data, dst_addr,
+ cur_size);
- dst_addr += cur_size_in_bytes;
- byte_count -= cur_size_in_bytes;
- }
-
- num_pages -= mm_node->size;
- ++mm_node;
+ amdgpu_res_next(&cursor, cur_size);
}
amdgpu_ring_pad_ib(ring, &job->ibs[0]);
@@ -2191,36 +2026,74 @@ error_free:
#if defined(CONFIG_DEBUG_FS)
-static int amdgpu_mm_dump_table(struct seq_file *m, void *data)
+static int amdgpu_mm_vram_table_show(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- unsigned ttm_pl = (uintptr_t)node->info_ent->data;
- struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
- struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev, ttm_pl);
+ struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
+ struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
+ TTM_PL_VRAM);
struct drm_printer p = drm_seq_file_printer(m);
man->func->debug(man, &p);
return 0;
}
-static int amdgpu_ttm_pool_debugfs(struct seq_file *m, void *data)
+static int amdgpu_ttm_page_pool_show(struct seq_file *m, void *unused)
{
- struct drm_info_node *node = (struct drm_info_node *)m->private;
- struct drm_device *dev = node->minor->dev;
- struct amdgpu_device *adev = drm_to_adev(dev);
+ struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
return ttm_pool_debugfs(&adev->mman.bdev.pool, m);
}
-static const struct drm_info_list amdgpu_ttm_debugfs_list[] = {
- {"amdgpu_vram_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_VRAM},
- {"amdgpu_gtt_mm", amdgpu_mm_dump_table, 0, (void *)TTM_PL_TT},
- {"amdgpu_gds_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GDS},
- {"amdgpu_gws_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_GWS},
- {"amdgpu_oa_mm", amdgpu_mm_dump_table, 0, (void *)AMDGPU_PL_OA},
- {"ttm_page_pool", amdgpu_ttm_pool_debugfs, 0, NULL},
-};
+static int amdgpu_mm_tt_table_show(struct seq_file *m, void *unused)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
+ struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
+ TTM_PL_TT);
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ man->func->debug(man, &p);
+ return 0;
+}
+
+static int amdgpu_mm_gds_table_show(struct seq_file *m, void *unused)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
+ struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
+ AMDGPU_PL_GDS);
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ man->func->debug(man, &p);
+ return 0;
+}
+
+static int amdgpu_mm_gws_table_show(struct seq_file *m, void *unused)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
+ struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
+ AMDGPU_PL_GWS);
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ man->func->debug(man, &p);
+ return 0;
+}
+
+static int amdgpu_mm_oa_table_show(struct seq_file *m, void *unused)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)m->private;
+ struct ttm_resource_manager *man = ttm_manager_type(&adev->mman.bdev,
+ AMDGPU_PL_OA);
+ struct drm_printer p = drm_seq_file_printer(m);
+
+ man->func->debug(man, &p);
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_vram_table);
+DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_tt_table);
+DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_gds_table);
+DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_gws_table);
+DEFINE_SHOW_ATTRIBUTE(amdgpu_mm_oa_table);
+DEFINE_SHOW_ATTRIBUTE(amdgpu_ttm_page_pool);
/*
* amdgpu_ttm_vram_read - Linear read access to VRAM
@@ -2308,58 +2181,6 @@ static const struct file_operations amdgpu_ttm_vram_fops = {
.llseek = default_llseek,
};
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
-
-/*
- * amdgpu_ttm_gtt_read - Linear read access to GTT memory
- */
-static ssize_t amdgpu_ttm_gtt_read(struct file *f, char __user *buf,
- size_t size, loff_t *pos)
-{
- struct amdgpu_device *adev = file_inode(f)->i_private;
- ssize_t result = 0;
- int r;
-
- while (size) {
- loff_t p = *pos / PAGE_SIZE;
- unsigned off = *pos & ~PAGE_MASK;
- size_t cur_size = min_t(size_t, size, PAGE_SIZE - off);
- struct page *page;
- void *ptr;
-
- if (p >= adev->gart.num_cpu_pages)
- return result;
-
- page = adev->gart.pages[p];
- if (page) {
- ptr = kmap(page);
- ptr += off;
-
- r = copy_to_user(buf, ptr, cur_size);
- kunmap(adev->gart.pages[p]);
- } else
- r = clear_user(buf, cur_size);
-
- if (r)
- return -EFAULT;
-
- result += cur_size;
- buf += cur_size;
- *pos += cur_size;
- size -= cur_size;
- }
-
- return result;
-}
-
-static const struct file_operations amdgpu_ttm_gtt_fops = {
- .owner = THIS_MODULE,
- .read = amdgpu_ttm_gtt_read,
- .llseek = default_llseek
-};
-
-#endif
-
/*
* amdgpu_iomem_read - Virtual read access to GPU mapped memory
*
@@ -2474,46 +2295,29 @@ static const struct file_operations amdgpu_ttm_iomem_fops = {
.llseek = default_llseek
};
-static const struct {
- char *name;
- const struct file_operations *fops;
- int domain;
-} ttm_debugfs_entries[] = {
- { "amdgpu_vram", &amdgpu_ttm_vram_fops, TTM_PL_VRAM },
-#ifdef CONFIG_DRM_AMDGPU_GART_DEBUGFS
- { "amdgpu_gtt", &amdgpu_ttm_gtt_fops, TTM_PL_TT },
-#endif
- { "amdgpu_iomem", &amdgpu_ttm_iomem_fops, TTM_PL_SYSTEM },
-};
-
#endif
-int amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
+void amdgpu_ttm_debugfs_init(struct amdgpu_device *adev)
{
#if defined(CONFIG_DEBUG_FS)
- unsigned count;
-
struct drm_minor *minor = adev_to_drm(adev)->primary;
- struct dentry *ent, *root = minor->debugfs_root;
-
- for (count = 0; count < ARRAY_SIZE(ttm_debugfs_entries); count++) {
- ent = debugfs_create_file(
- ttm_debugfs_entries[count].name,
- S_IFREG | S_IRUGO, root,
- adev,
- ttm_debugfs_entries[count].fops);
- if (IS_ERR(ent))
- return PTR_ERR(ent);
- if (ttm_debugfs_entries[count].domain == TTM_PL_VRAM)
- i_size_write(ent->d_inode, adev->gmc.mc_vram_size);
- else if (ttm_debugfs_entries[count].domain == TTM_PL_TT)
- i_size_write(ent->d_inode, adev->gmc.gart_size);
- adev->mman.debugfs_entries[count] = ent;
- }
-
- count = ARRAY_SIZE(amdgpu_ttm_debugfs_list);
- return amdgpu_debugfs_add_files(adev, amdgpu_ttm_debugfs_list, count);
-#else
- return 0;
+ struct dentry *root = minor->debugfs_root;
+
+ debugfs_create_file_size("amdgpu_vram", 0444, root, adev,
+ &amdgpu_ttm_vram_fops, adev->gmc.mc_vram_size);
+ debugfs_create_file("amdgpu_iomem", 0444, root, adev,
+ &amdgpu_ttm_iomem_fops);
+ debugfs_create_file("amdgpu_vram_mm", 0444, root, adev,
+ &amdgpu_mm_vram_table_fops);
+ debugfs_create_file("amdgpu_gtt_mm", 0444, root, adev,
+ &amdgpu_mm_tt_table_fops);
+ debugfs_create_file("amdgpu_gds_mm", 0444, root, adev,
+ &amdgpu_mm_gds_table_fops);
+ debugfs_create_file("amdgpu_gws_mm", 0444, root, adev,
+ &amdgpu_mm_gws_table_fops);
+ debugfs_create_file("amdgpu_oa_mm", 0444, root, adev,
+ &amdgpu_mm_oa_table_fops);
+ debugfs_create_file("ttm_page_pool", 0444, root, adev,
+ &amdgpu_ttm_page_pool_fops);
#endif
}