aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c2134
1 files changed, 1534 insertions, 600 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 5afbc5e714d0..dad0e2342df9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -32,9 +32,12 @@
#include <drm/amdgpu_drm.h>
#include "amdgpu.h"
#include "amdgpu_trace.h"
+#include "amdgpu_amdkfd.h"
+#include "amdgpu_gmc.h"
-/*
- * GPUVM
+/**
+ * DOC: GPUVM
+ *
* GPUVM is similar to the legacy gart on older asics, however
* rather than there being a single global gart table
* for the entire GPU, there are multiple VM page tables active
@@ -62,33 +65,71 @@ INTERVAL_TREE_DEFINE(struct amdgpu_bo_va_mapping, rb, uint64_t, __subtree_last,
#undef START
#undef LAST
-/* Local structure. Encapsulate some VM table update parameters to reduce
+/**
+ * struct amdgpu_pte_update_params - Local structure
+ *
+ * Encapsulate some VM table update parameters to reduce
* the number of function parameters
+ *
*/
struct amdgpu_pte_update_params {
- /* amdgpu device we do this update for */
+
+ /**
+ * @adev: amdgpu device we do this update for
+ */
struct amdgpu_device *adev;
- /* optional amdgpu_vm we do this update for */
+
+ /**
+ * @vm: optional amdgpu_vm we do this update for
+ */
struct amdgpu_vm *vm;
- /* address where to copy page table entries from */
+
+ /**
+ * @src: address where to copy page table entries from
+ */
uint64_t src;
- /* indirect buffer to fill with commands */
+
+ /**
+ * @ib: indirect buffer to fill with commands
+ */
struct amdgpu_ib *ib;
- /* Function which actually does the update */
- void (*func)(struct amdgpu_pte_update_params *params, uint64_t pe,
+
+ /**
+ * @func: Function which actually does the update
+ */
+ void (*func)(struct amdgpu_pte_update_params *params,
+ struct amdgpu_bo *bo, uint64_t pe,
uint64_t addr, unsigned count, uint32_t incr,
uint64_t flags);
- /* The next two are used during VM update by CPU
- * DMA addresses to use for mapping
- * Kernel pointer of PD/PT BO that needs to be updated
+ /**
+ * @pages_addr:
+ *
+ * DMA addresses to use for mapping, used during VM update by CPU
*/
dma_addr_t *pages_addr;
+
+ /**
+ * @kptr:
+ *
+ * Kernel pointer of PD/PT BO that needs to be updated,
+ * used during VM update by CPU
+ */
void *kptr;
};
-/* Helper to disable partial resident texture feature from a fence callback */
+/**
+ * struct amdgpu_prt_cb - Helper to disable partial resident texture feature from a fence callback
+ */
struct amdgpu_prt_cb {
+
+ /**
+ * @adev: amdgpu device
+ */
struct amdgpu_device *adev;
+
+ /**
+ * @cb: callback
+ */
struct dma_fence_cb cb;
};
@@ -96,8 +137,10 @@ struct amdgpu_prt_cb {
* amdgpu_vm_level_shift - return the addr shift for each level
*
* @adev: amdgpu_device pointer
+ * @level: VMPT level
*
- * Returns the number of bits the pfn needs to be right shifted for a level.
+ * Returns:
+ * The number of bits the pfn needs to be right shifted for a level.
*/
static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
unsigned level)
@@ -125,8 +168,10 @@ static unsigned amdgpu_vm_level_shift(struct amdgpu_device *adev,
* amdgpu_vm_num_entries - return the number of entries in a PD/PT
*
* @adev: amdgpu_device pointer
+ * @level: VMPT level
*
- * Calculate the number of entries in a page directory or page table.
+ * Returns:
+ * The number of entries in a page directory or page table.
*/
static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
unsigned level)
@@ -146,11 +191,33 @@ static unsigned amdgpu_vm_num_entries(struct amdgpu_device *adev,
}
/**
+ * amdgpu_vm_entries_mask - the mask to get the entry number of a PD/PT
+ *
+ * @adev: amdgpu_device pointer
+ * @level: VMPT level
+ *
+ * Returns:
+ * The mask to extract the entry number of a PD/PT from an address.
+ */
+static uint32_t amdgpu_vm_entries_mask(struct amdgpu_device *adev,
+ unsigned int level)
+{
+ if (level <= adev->vm_manager.root_level)
+ return 0xffffffff;
+ else if (level != AMDGPU_VM_PTB)
+ return 0x1ff;
+ else
+ return AMDGPU_VM_PTE_COUNT(adev) - 1;
+}
+
+/**
* amdgpu_vm_bo_size - returns the size of the BOs in bytes
*
* @adev: amdgpu_device pointer
+ * @level: VMPT level
*
- * Calculate the size of the BO for a page directory or page table in bytes.
+ * Returns:
+ * The size of the BO for a page directory or page table in bytes.
*/
static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
{
@@ -158,6 +225,383 @@ static unsigned amdgpu_vm_bo_size(struct amdgpu_device *adev, unsigned level)
}
/**
+ * amdgpu_vm_bo_evicted - vm_bo is evicted
+ *
+ * @vm_bo: vm_bo which is evicted
+ *
+ * State for PDs/PTs and per VM BOs which are not at the location they should
+ * be.
+ */
+static void amdgpu_vm_bo_evicted(struct amdgpu_vm_bo_base *vm_bo)
+{
+ struct amdgpu_vm *vm = vm_bo->vm;
+ struct amdgpu_bo *bo = vm_bo->bo;
+
+ vm_bo->moved = true;
+ if (bo->tbo.type == ttm_bo_type_kernel)
+ list_move(&vm_bo->vm_status, &vm->evicted);
+ else
+ list_move_tail(&vm_bo->vm_status, &vm->evicted);
+}
+
+/**
+ * amdgpu_vm_bo_relocated - vm_bo is reloacted
+ *
+ * @vm_bo: vm_bo which is relocated
+ *
+ * State for PDs/PTs which needs to update their parent PD.
+ */
+static void amdgpu_vm_bo_relocated(struct amdgpu_vm_bo_base *vm_bo)
+{
+ list_move(&vm_bo->vm_status, &vm_bo->vm->relocated);
+}
+
+/**
+ * amdgpu_vm_bo_moved - vm_bo is moved
+ *
+ * @vm_bo: vm_bo which is moved
+ *
+ * State for per VM BOs which are moved, but that change is not yet reflected
+ * in the page tables.
+ */
+static void amdgpu_vm_bo_moved(struct amdgpu_vm_bo_base *vm_bo)
+{
+ list_move(&vm_bo->vm_status, &vm_bo->vm->moved);
+}
+
+/**
+ * amdgpu_vm_bo_idle - vm_bo is idle
+ *
+ * @vm_bo: vm_bo which is now idle
+ *
+ * State for PDs/PTs and per VM BOs which have gone through the state machine
+ * and are now idle.
+ */
+static void amdgpu_vm_bo_idle(struct amdgpu_vm_bo_base *vm_bo)
+{
+ list_move(&vm_bo->vm_status, &vm_bo->vm->idle);
+ vm_bo->moved = false;
+}
+
+/**
+ * amdgpu_vm_bo_invalidated - vm_bo is invalidated
+ *
+ * @vm_bo: vm_bo which is now invalidated
+ *
+ * State for normal BOs which are invalidated and that change not yet reflected
+ * in the PTs.
+ */
+static void amdgpu_vm_bo_invalidated(struct amdgpu_vm_bo_base *vm_bo)
+{
+ spin_lock(&vm_bo->vm->invalidated_lock);
+ list_move(&vm_bo->vm_status, &vm_bo->vm->invalidated);
+ spin_unlock(&vm_bo->vm->invalidated_lock);
+}
+
+/**
+ * amdgpu_vm_bo_done - vm_bo is done
+ *
+ * @vm_bo: vm_bo which is now done
+ *
+ * State for normal BOs which are invalidated and that change has been updated
+ * in the PTs.
+ */
+static void amdgpu_vm_bo_done(struct amdgpu_vm_bo_base *vm_bo)
+{
+ spin_lock(&vm_bo->vm->invalidated_lock);
+ list_del_init(&vm_bo->vm_status);
+ spin_unlock(&vm_bo->vm->invalidated_lock);
+}
+
+/**
+ * amdgpu_vm_bo_base_init - Adds bo to the list of bos associated with the vm
+ *
+ * @base: base structure for tracking BO usage in a VM
+ * @vm: vm to which bo is to be added
+ * @bo: amdgpu buffer object
+ *
+ * Initialize a bo_va_base structure and add it to the appropriate lists
+ *
+ */
+static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
+ struct amdgpu_vm *vm,
+ struct amdgpu_bo *bo)
+{
+ base->vm = vm;
+ base->bo = bo;
+ base->next = NULL;
+ INIT_LIST_HEAD(&base->vm_status);
+
+ if (!bo)
+ return;
+ base->next = bo->vm_bo;
+ bo->vm_bo = base;
+
+ if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
+ return;
+
+ vm->bulk_moveable = false;
+ if (bo->tbo.type == ttm_bo_type_kernel)
+ amdgpu_vm_bo_relocated(base);
+ else
+ amdgpu_vm_bo_idle(base);
+
+ if (bo->preferred_domains &
+ amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
+ return;
+
+ /*
+ * we checked all the prerequisites, but it looks like this per vm bo
+ * is currently evicted. add the bo to the evicted list to make sure it
+ * is validated on next vm use to avoid fault.
+ * */
+ amdgpu_vm_bo_evicted(base);
+}
+
+/**
+ * amdgpu_vm_pt_parent - get the parent page directory
+ *
+ * @pt: child page table
+ *
+ * Helper to get the parent entry for the child page table. NULL if we are at
+ * the root page directory.
+ */
+static struct amdgpu_vm_pt *amdgpu_vm_pt_parent(struct amdgpu_vm_pt *pt)
+{
+ struct amdgpu_bo *parent = pt->base.bo->parent;
+
+ if (!parent)
+ return NULL;
+
+ return container_of(parent->vm_bo, struct amdgpu_vm_pt, base);
+}
+
+/**
+ * amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt
+ */
+struct amdgpu_vm_pt_cursor {
+ uint64_t pfn;
+ struct amdgpu_vm_pt *parent;
+ struct amdgpu_vm_pt *entry;
+ unsigned level;
+};
+
+/**
+ * amdgpu_vm_pt_start - start PD/PT walk
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: amdgpu_vm structure
+ * @start: start address of the walk
+ * @cursor: state to initialize
+ *
+ * Initialize a amdgpu_vm_pt_cursor to start a walk.
+ */
+static void amdgpu_vm_pt_start(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm, uint64_t start,
+ struct amdgpu_vm_pt_cursor *cursor)
+{
+ cursor->pfn = start;
+ cursor->parent = NULL;
+ cursor->entry = &vm->root;
+ cursor->level = adev->vm_manager.root_level;
+}
+
+/**
+ * amdgpu_vm_pt_descendant - go to child node
+ *
+ * @adev: amdgpu_device pointer
+ * @cursor: current state
+ *
+ * Walk to the child node of the current node.
+ * Returns:
+ * True if the walk was possible, false otherwise.
+ */
+static bool amdgpu_vm_pt_descendant(struct amdgpu_device *adev,
+ struct amdgpu_vm_pt_cursor *cursor)
+{
+ unsigned mask, shift, idx;
+
+ if (!cursor->entry->entries)
+ return false;
+
+ BUG_ON(!cursor->entry->base.bo);
+ mask = amdgpu_vm_entries_mask(adev, cursor->level);
+ shift = amdgpu_vm_level_shift(adev, cursor->level);
+
+ ++cursor->level;
+ idx = (cursor->pfn >> shift) & mask;
+ cursor->parent = cursor->entry;
+ cursor->entry = &cursor->entry->entries[idx];
+ return true;
+}
+
+/**
+ * amdgpu_vm_pt_sibling - go to sibling node
+ *
+ * @adev: amdgpu_device pointer
+ * @cursor: current state
+ *
+ * Walk to the sibling node of the current node.
+ * Returns:
+ * True if the walk was possible, false otherwise.
+ */
+static bool amdgpu_vm_pt_sibling(struct amdgpu_device *adev,
+ struct amdgpu_vm_pt_cursor *cursor)
+{
+ unsigned shift, num_entries;
+
+ /* Root doesn't have a sibling */
+ if (!cursor->parent)
+ return false;
+
+ /* Go to our parents and see if we got a sibling */
+ shift = amdgpu_vm_level_shift(adev, cursor->level - 1);
+ num_entries = amdgpu_vm_num_entries(adev, cursor->level - 1);
+
+ if (cursor->entry == &cursor->parent->entries[num_entries - 1])
+ return false;
+
+ cursor->pfn += 1ULL << shift;
+ cursor->pfn &= ~((1ULL << shift) - 1);
+ ++cursor->entry;
+ return true;
+}
+
+/**
+ * amdgpu_vm_pt_ancestor - go to parent node
+ *
+ * @cursor: current state
+ *
+ * Walk to the parent node of the current node.
+ * Returns:
+ * True if the walk was possible, false otherwise.
+ */
+static bool amdgpu_vm_pt_ancestor(struct amdgpu_vm_pt_cursor *cursor)
+{
+ if (!cursor->parent)
+ return false;
+
+ --cursor->level;
+ cursor->entry = cursor->parent;
+ cursor->parent = amdgpu_vm_pt_parent(cursor->parent);
+ return true;
+}
+
+/**
+ * amdgpu_vm_pt_next - get next PD/PT in hieratchy
+ *
+ * @adev: amdgpu_device pointer
+ * @cursor: current state
+ *
+ * Walk the PD/PT tree to the next node.
+ */
+static void amdgpu_vm_pt_next(struct amdgpu_device *adev,
+ struct amdgpu_vm_pt_cursor *cursor)
+{
+ /* First try a newborn child */
+ if (amdgpu_vm_pt_descendant(adev, cursor))
+ return;
+
+ /* If that didn't worked try to find a sibling */
+ while (!amdgpu_vm_pt_sibling(adev, cursor)) {
+ /* No sibling, go to our parents and grandparents */
+ if (!amdgpu_vm_pt_ancestor(cursor)) {
+ cursor->pfn = ~0ll;
+ return;
+ }
+ }
+}
+
+/**
+ * amdgpu_vm_pt_first_leaf - get first leaf PD/PT
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: amdgpu_vm structure
+ * @start: start addr of the walk
+ * @cursor: state to initialize
+ *
+ * Start a walk and go directly to the leaf node.
+ */
+static void amdgpu_vm_pt_first_leaf(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm, uint64_t start,
+ struct amdgpu_vm_pt_cursor *cursor)
+{
+ amdgpu_vm_pt_start(adev, vm, start, cursor);
+ while (amdgpu_vm_pt_descendant(adev, cursor));
+}
+
+/**
+ * amdgpu_vm_pt_next_leaf - get next leaf PD/PT
+ *
+ * @adev: amdgpu_device pointer
+ * @cursor: current state
+ *
+ * Walk the PD/PT tree to the next leaf node.
+ */
+static void amdgpu_vm_pt_next_leaf(struct amdgpu_device *adev,
+ struct amdgpu_vm_pt_cursor *cursor)
+{
+ amdgpu_vm_pt_next(adev, cursor);
+ if (cursor->pfn != ~0ll)
+ while (amdgpu_vm_pt_descendant(adev, cursor));
+}
+
+/**
+ * for_each_amdgpu_vm_pt_leaf - walk over all leaf PDs/PTs in the hierarchy
+ */
+#define for_each_amdgpu_vm_pt_leaf(adev, vm, start, end, cursor) \
+ for (amdgpu_vm_pt_first_leaf((adev), (vm), (start), &(cursor)); \
+ (cursor).pfn <= end; amdgpu_vm_pt_next_leaf((adev), &(cursor)))
+
+/**
+ * amdgpu_vm_pt_first_dfs - start a deep first search
+ *
+ * @adev: amdgpu_device structure
+ * @vm: amdgpu_vm structure
+ * @cursor: state to initialize
+ *
+ * Starts a deep first traversal of the PD/PT tree.
+ */
+static void amdgpu_vm_pt_first_dfs(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm,
+ struct amdgpu_vm_pt_cursor *cursor)
+{
+ amdgpu_vm_pt_start(adev, vm, 0, cursor);
+ while (amdgpu_vm_pt_descendant(adev, cursor));
+}
+
+/**
+ * amdgpu_vm_pt_next_dfs - get the next node for a deep first search
+ *
+ * @adev: amdgpu_device structure
+ * @cursor: current state
+ *
+ * Move the cursor to the next node in a deep first search.
+ */
+static void amdgpu_vm_pt_next_dfs(struct amdgpu_device *adev,
+ struct amdgpu_vm_pt_cursor *cursor)
+{
+ if (!cursor->entry)
+ return;
+
+ if (!cursor->parent)
+ cursor->entry = NULL;
+ else if (amdgpu_vm_pt_sibling(adev, cursor))
+ while (amdgpu_vm_pt_descendant(adev, cursor));
+ else
+ amdgpu_vm_pt_ancestor(cursor);
+}
+
+/**
+ * for_each_amdgpu_vm_pt_dfs_safe - safe deep first search of all PDs/PTs
+ */
+#define for_each_amdgpu_vm_pt_dfs_safe(adev, vm, cursor, entry) \
+ for (amdgpu_vm_pt_first_dfs((adev), (vm), &(cursor)), \
+ (entry) = (cursor).entry, amdgpu_vm_pt_next_dfs((adev), &(cursor));\
+ (entry); (entry) = (cursor).entry, \
+ amdgpu_vm_pt_next_dfs((adev), &(cursor)))
+
+/**
* amdgpu_vm_get_pd_bo - add the VM PD to a validation list
*
* @vm: vm providing the BOs
@@ -171,15 +615,55 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
struct list_head *validated,
struct amdgpu_bo_list_entry *entry)
{
- entry->robj = vm->root.base.bo;
entry->priority = 0;
- entry->tv.bo = &entry->robj->tbo;
+ entry->tv.bo = &vm->root.base.bo->tbo;
entry->tv.shared = true;
entry->user_pages = NULL;
list_add(&entry->tv.head, validated);
}
/**
+ * amdgpu_vm_move_to_lru_tail - move all BOs to the end of LRU
+ *
+ * @adev: amdgpu device pointer
+ * @vm: vm providing the BOs
+ *
+ * Move all BOs to the end of LRU and remember their positions to put them
+ * together.
+ */
+void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm)
+{
+ struct ttm_bo_global *glob = adev->mman.bdev.glob;
+ struct amdgpu_vm_bo_base *bo_base;
+
+ if (vm->bulk_moveable) {
+ spin_lock(&glob->lru_lock);
+ ttm_bo_bulk_move_lru_tail(&vm->lru_bulk_move);
+ spin_unlock(&glob->lru_lock);
+ return;
+ }
+
+ memset(&vm->lru_bulk_move, 0, sizeof(vm->lru_bulk_move));
+
+ spin_lock(&glob->lru_lock);
+ list_for_each_entry(bo_base, &vm->idle, vm_status) {
+ struct amdgpu_bo *bo = bo_base->bo;
+
+ if (!bo->parent)
+ continue;
+
+ ttm_bo_move_to_lru_tail(&bo->tbo, &vm->lru_bulk_move);
+ if (bo->shadow)
+ ttm_bo_move_to_lru_tail(&bo->shadow->tbo,
+ &vm->lru_bulk_move);
+ }
+ spin_unlock(&glob->lru_lock);
+
+ vm->bulk_moveable = true;
+}
+
+/**
* amdgpu_vm_validate_pt_bos - validate the page table BOs
*
* @adev: amdgpu device pointer
@@ -188,54 +672,45 @@ void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm,
* @param: parameter for the validation callback
*
* Validate the page table BOs on command submission if neccessary.
+ *
+ * Returns:
+ * Validation result.
*/
int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int (*validate)(void *p, struct amdgpu_bo *bo),
void *param)
{
- struct ttm_bo_global *glob = adev->mman.bdev.glob;
- int r;
-
- spin_lock(&vm->status_lock);
- while (!list_empty(&vm->evicted)) {
- struct amdgpu_vm_bo_base *bo_base;
- struct amdgpu_bo *bo;
+ struct amdgpu_vm_bo_base *bo_base, *tmp;
+ int r = 0;
- bo_base = list_first_entry(&vm->evicted,
- struct amdgpu_vm_bo_base,
- vm_status);
- spin_unlock(&vm->status_lock);
+ vm->bulk_moveable &= list_empty(&vm->evicted);
- bo = bo_base->bo;
- BUG_ON(!bo);
- if (bo->parent) {
- r = validate(param, bo);
- if (r)
- return r;
+ list_for_each_entry_safe(bo_base, tmp, &vm->evicted, vm_status) {
+ struct amdgpu_bo *bo = bo_base->bo;
- spin_lock(&glob->lru_lock);
- ttm_bo_move_to_lru_tail(&bo->tbo);
- if (bo->shadow)
- ttm_bo_move_to_lru_tail(&bo->shadow->tbo);
- spin_unlock(&glob->lru_lock);
- }
+ r = validate(param, bo);
+ if (r)
+ break;
- if (bo->tbo.type == ttm_bo_type_kernel &&
- vm->use_cpu_for_update) {
- r = amdgpu_bo_kmap(bo, NULL);
+ if (bo->tbo.type != ttm_bo_type_kernel) {
+ amdgpu_vm_bo_moved(bo_base);
+ } else {
+ if (vm->use_cpu_for_update)
+ r = amdgpu_bo_kmap(bo, NULL);
+ else
+ r = amdgpu_ttm_alloc_gart(&bo->tbo);
if (r)
- return r;
+ break;
+ if (bo->shadow) {
+ r = amdgpu_ttm_alloc_gart(&bo->shadow->tbo);
+ if (r)
+ break;
+ }
+ amdgpu_vm_bo_relocated(bo_base);
}
-
- spin_lock(&vm->status_lock);
- if (bo->tbo.type != ttm_bo_type_kernel)
- list_move(&bo_base->vm_status, &vm->moved);
- else
- list_move(&bo_base->vm_status, &vm->relocated);
}
- spin_unlock(&vm->status_lock);
- return 0;
+ return r;
}
/**
@@ -244,125 +719,150 @@ int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm,
* @vm: VM to check
*
* Check if all VM PDs/PTs are ready for updates
+ *
+ * Returns:
+ * True if eviction list is empty.
*/
bool amdgpu_vm_ready(struct amdgpu_vm *vm)
{
- bool ready;
-
- spin_lock(&vm->status_lock);
- ready = list_empty(&vm->evicted);
- spin_unlock(&vm->status_lock);
-
- return ready;
+ return list_empty(&vm->evicted);
}
/**
- * amdgpu_vm_alloc_levels - allocate the PD/PT levels
+ * amdgpu_vm_clear_bo - initially clear the PDs/PTs
*
* @adev: amdgpu_device pointer
- * @vm: requested vm
- * @saddr: start of the address range
- * @eaddr: end of the address range
+ * @vm: VM to clear BO from
+ * @bo: BO to clear
+ * @level: level this BO is at
+ * @pte_support_ats: indicate ATS support from PTE
*
- * Make sure the page directories and page tables are allocated
+ * Root PD needs to be reserved when calling this.
+ *
+ * Returns:
+ * 0 on success, errno otherwise.
*/
-static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct amdgpu_vm_pt *parent,
- uint64_t saddr, uint64_t eaddr,
- unsigned level)
+static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm, struct amdgpu_bo *bo,
+ unsigned level, bool pte_support_ats)
{
- unsigned shift = amdgpu_vm_level_shift(adev, level);
- unsigned pt_idx, from, to;
+ struct ttm_operation_ctx ctx = { true, false };
+ struct dma_fence *fence = NULL;
+ unsigned entries, ats_entries;
+ struct amdgpu_ring *ring;
+ struct amdgpu_job *job;
+ uint64_t addr;
int r;
- u64 flags;
- uint64_t init_value = 0;
- if (!parent->entries) {
- unsigned num_entries = amdgpu_vm_num_entries(adev, level);
+ entries = amdgpu_bo_size(bo) / 8;
- parent->entries = kvmalloc_array(num_entries,
- sizeof(struct amdgpu_vm_pt),
- GFP_KERNEL | __GFP_ZERO);
- if (!parent->entries)
- return -ENOMEM;
- memset(parent->entries, 0 , sizeof(struct amdgpu_vm_pt));
+ if (pte_support_ats) {
+ if (level == adev->vm_manager.root_level) {
+ ats_entries = amdgpu_vm_level_shift(adev, level);
+ ats_entries += AMDGPU_GPU_PAGE_SHIFT;
+ ats_entries = AMDGPU_GMC_HOLE_START >> ats_entries;
+ ats_entries = min(ats_entries, entries);
+ entries -= ats_entries;
+ } else {
+ ats_entries = entries;
+ entries = 0;
+ }
+ } else {
+ ats_entries = 0;
}
- from = saddr >> shift;
- to = eaddr >> shift;
- if (from >= amdgpu_vm_num_entries(adev, level) ||
- to >= amdgpu_vm_num_entries(adev, level))
- return -EINVAL;
+ ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
- ++level;
- saddr = saddr & ((1 << shift) - 1);
- eaddr = eaddr & ((1 << shift) - 1);
+ r = reservation_object_reserve_shared(bo->tbo.resv);
+ if (r)
+ return r;
- flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
- AMDGPU_GEM_CREATE_VRAM_CLEARED;
- if (vm->use_cpu_for_update)
- flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
- else
- flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
- AMDGPU_GEM_CREATE_SHADOW);
+ r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
+ if (r)
+ goto error;
+
+ r = amdgpu_ttm_alloc_gart(&bo->tbo);
+ if (r)
+ return r;
+
+ r = amdgpu_job_alloc_with_ib(adev, 64, &job);
+ if (r)
+ goto error;
- if (vm->pte_support_ats) {
- init_value = AMDGPU_PTE_DEFAULT_ATC;
+ addr = amdgpu_bo_gpu_offset(bo);
+ if (ats_entries) {
+ uint64_t ats_value;
+
+ ats_value = AMDGPU_PTE_DEFAULT_ATC;
if (level != AMDGPU_VM_PTB)
- init_value |= AMDGPU_PDE_PTE;
+ ats_value |= AMDGPU_PDE_PTE;
+ amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
+ ats_entries, 0, ats_value);
+ addr += ats_entries * 8;
}
- /* walk over the address space and allocate the page tables */
- for (pt_idx = from; pt_idx <= to; ++pt_idx) {
- struct reservation_object *resv = vm->root.base.bo->tbo.resv;
- struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
- struct amdgpu_bo *pt;
+ if (entries)
+ amdgpu_vm_set_pte_pde(adev, &job->ibs[0], addr, 0,
+ entries, 0, 0);
- if (!entry->base.bo) {
- r = amdgpu_bo_create(adev,
- amdgpu_vm_bo_size(adev, level),
- AMDGPU_GPU_PAGE_SIZE, true,
- AMDGPU_GEM_DOMAIN_VRAM,
- flags,
- NULL, resv, init_value, &pt);
- if (r)
- return r;
-
- if (vm->use_cpu_for_update) {
- r = amdgpu_bo_kmap(pt, NULL);
- if (r) {
- amdgpu_bo_unref(&pt);
- return r;
- }
- }
+ amdgpu_ring_pad_ib(ring, &job->ibs[0]);
- /* Keep a reference to the root directory to avoid
- * freeing them up in the wrong order.
- */
- pt->parent = amdgpu_bo_ref(parent->base.bo);
-
- entry->base.vm = vm;
- entry->base.bo = pt;
- list_add_tail(&entry->base.bo_list, &pt->va);
- spin_lock(&vm->status_lock);
- list_add(&entry->base.vm_status, &vm->relocated);
- spin_unlock(&vm->status_lock);
- }
+ WARN_ON(job->ibs[0].length_dw > 64);
+ r = amdgpu_sync_resv(adev, &job->sync, bo->tbo.resv,
+ AMDGPU_FENCE_OWNER_UNDEFINED, false);
+ if (r)
+ goto error_free;
- if (level < AMDGPU_VM_PTB) {
- uint64_t sub_saddr = (pt_idx == from) ? saddr : 0;
- uint64_t sub_eaddr = (pt_idx == to) ? eaddr :
- ((1 << shift) - 1);
- r = amdgpu_vm_alloc_levels(adev, vm, entry, sub_saddr,
- sub_eaddr, level);
- if (r)
- return r;
- }
- }
+ r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_UNDEFINED,
+ &fence);
+ if (r)
+ goto error_free;
+
+ amdgpu_bo_fence(bo, fence, true);
+ dma_fence_put(fence);
+
+ if (bo->shadow)
+ return amdgpu_vm_clear_bo(adev, vm, bo->shadow,
+ level, pte_support_ats);
return 0;
+
+error_free:
+ amdgpu_job_free(job);
+
+error:
+ return r;
+}
+
+/**
+ * amdgpu_vm_bo_param - fill in parameters for PD/PT allocation
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: requesting vm
+ * @bp: resulting BO allocation parameters
+ */
+static void amdgpu_vm_bo_param(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+ int level, struct amdgpu_bo_param *bp)
+{
+ memset(bp, 0, sizeof(*bp));
+
+ bp->size = amdgpu_vm_bo_size(adev, level);
+ bp->byte_align = AMDGPU_GPU_PAGE_SIZE;
+ bp->domain = AMDGPU_GEM_DOMAIN_VRAM;
+ if (bp->size <= PAGE_SIZE && adev->asic_type >= CHIP_VEGA10 &&
+ adev->flags & AMD_IS_APU)
+ bp->domain |= AMDGPU_GEM_DOMAIN_GTT;
+ bp->domain = amdgpu_bo_get_preferred_pin_domain(adev, bp->domain);
+ bp->flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
+ AMDGPU_GEM_CREATE_CPU_GTT_USWC;
+ if (vm->use_cpu_for_update)
+ bp->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
+ else if (!vm->root.base.bo || vm->root.base.bo->shadow)
+ bp->flags |= AMDGPU_GEM_CREATE_SHADOW;
+ bp->type = ttm_bo_type_kernel;
+ if (vm->root.base.bo)
+ bp->resv = vm->root.base.bo->tbo.resv;
}
/**
@@ -373,32 +873,117 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
* @saddr: Start address which needs to be allocated
* @size: Size from start address we need.
*
- * Make sure the page tables are allocated.
+ * Make sure the page directories and page tables are allocated
+ *
+ * Returns:
+ * 0 on success, errno otherwise.
*/
int amdgpu_vm_alloc_pts(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
uint64_t saddr, uint64_t size)
{
- uint64_t last_pfn;
+ struct amdgpu_vm_pt_cursor cursor;
+ struct amdgpu_bo *pt;
+ bool ats = false;
uint64_t eaddr;
+ int r;
/* validate the parameters */
if (saddr & AMDGPU_GPU_PAGE_MASK || size & AMDGPU_GPU_PAGE_MASK)
return -EINVAL;
eaddr = saddr + size - 1;
- last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
- if (last_pfn >= adev->vm_manager.max_pfn) {
+
+ if (vm->pte_support_ats)
+ ats = saddr < AMDGPU_GMC_HOLE_START;
+
+ saddr /= AMDGPU_GPU_PAGE_SIZE;
+ eaddr /= AMDGPU_GPU_PAGE_SIZE;
+
+ if (eaddr >= adev->vm_manager.max_pfn) {
dev_err(adev->dev, "va above limit (0x%08llX >= 0x%08llX)\n",
- last_pfn, adev->vm_manager.max_pfn);
+ eaddr, adev->vm_manager.max_pfn);
return -EINVAL;
}
- saddr /= AMDGPU_GPU_PAGE_SIZE;
- eaddr /= AMDGPU_GPU_PAGE_SIZE;
+ for_each_amdgpu_vm_pt_leaf(adev, vm, saddr, eaddr, cursor) {
+ struct amdgpu_vm_pt *entry = cursor.entry;
+ struct amdgpu_bo_param bp;
+
+ if (cursor.level < AMDGPU_VM_PTB) {
+ unsigned num_entries;
+
+ num_entries = amdgpu_vm_num_entries(adev, cursor.level);
+ entry->entries = kvmalloc_array(num_entries,
+ sizeof(*entry->entries),
+ GFP_KERNEL |
+ __GFP_ZERO);
+ if (!entry->entries)
+ return -ENOMEM;
+ }
+
+
+ if (entry->base.bo)
+ continue;
+
+ amdgpu_vm_bo_param(adev, vm, cursor.level, &bp);
- return amdgpu_vm_alloc_levels(adev, vm, &vm->root, saddr, eaddr,
- adev->vm_manager.root_level);
+ r = amdgpu_bo_create(adev, &bp, &pt);
+ if (r)
+ return r;
+
+ r = amdgpu_vm_clear_bo(adev, vm, pt, cursor.level, ats);
+ if (r)
+ goto error_free_pt;
+
+ if (vm->use_cpu_for_update) {
+ r = amdgpu_bo_kmap(pt, NULL);
+ if (r)
+ goto error_free_pt;
+ }
+
+ /* Keep a reference to the root directory to avoid
+ * freeing them up in the wrong order.
+ */
+ pt->parent = amdgpu_bo_ref(cursor.parent->base.bo);
+
+ amdgpu_vm_bo_base_init(&entry->base, vm, pt);
+ }
+
+ return 0;
+
+error_free_pt:
+ amdgpu_bo_unref(&pt->shadow);
+ amdgpu_bo_unref(&pt);
+ return r;
+}
+
+/**
+ * amdgpu_vm_free_pts - free PD/PT levels
+ *
+ * @adev: amdgpu device structure
+ * @vm: amdgpu vm structure
+ *
+ * Free the page directory or page table level and all sub levels.
+ */
+static void amdgpu_vm_free_pts(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm)
+{
+ struct amdgpu_vm_pt_cursor cursor;
+ struct amdgpu_vm_pt *entry;
+
+ for_each_amdgpu_vm_pt_dfs_safe(adev, vm, cursor, entry) {
+
+ if (entry->base.bo) {
+ entry->base.bo->vm_bo = NULL;
+ list_del(&entry->base.vm_status);
+ amdgpu_bo_unref(&entry->base.bo->shadow);
+ amdgpu_bo_unref(&entry->base.bo);
+ }
+ kvfree(entry->entries);
+ }
+
+ BUG_ON(vm->root.base.bo);
}
/**
@@ -436,6 +1021,15 @@ void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev)
}
}
+/**
+ * amdgpu_vm_need_pipeline_sync - Check if pipe sync is needed for job.
+ *
+ * @ring: ring on which the job will be submitted
+ * @job: job to submit
+ *
+ * Returns:
+ * True if sync is needed.
+ */
bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
struct amdgpu_job *job)
{
@@ -463,19 +1057,17 @@ bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring,
return vm_flush_needed || gds_switch_needed;
}
-static bool amdgpu_vm_is_large_bar(struct amdgpu_device *adev)
-{
- return (adev->mc.real_vram_size == adev->mc.visible_vram_size);
-}
-
/**
* amdgpu_vm_flush - hardware flush the vm
*
* @ring: ring to use for flush
- * @vmid: vmid number to use
- * @pd_addr: address of the page directory
+ * @job: related job
+ * @need_pipe_sync: is pipe sync needed
*
* Emit a VM flush when it is necessary.
+ *
+ * Returns:
+ * 0 on success, errno otherwise.
*/
int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync)
{
@@ -491,14 +1083,25 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
id->oa_base != job->oa_base ||
id->oa_size != job->oa_size);
bool vm_flush_needed = job->vm_needs_flush;
+ bool pasid_mapping_needed = id->pasid != job->pasid ||
+ !id->pasid_mapping ||
+ !dma_fence_is_signaled(id->pasid_mapping);
+ struct dma_fence *fence = NULL;
unsigned patch_offset = 0;
int r;
if (amdgpu_vmid_had_gpu_reset(adev, id)) {
gds_switch_needed = true;
vm_flush_needed = true;
+ pasid_mapping_needed = true;
}
+ gds_switch_needed &= !!ring->funcs->emit_gds_switch;
+ vm_flush_needed &= !!ring->funcs->emit_vm_flush &&
+ job->vm_pd_addr != AMDGPU_BO_INVALID_OFFSET;
+ pasid_mapping_needed &= adev->gmc.gmc_funcs->emit_pasid_mapping &&
+ ring->funcs->emit_wreg;
+
if (!vm_flush_needed && !gds_switch_needed && !need_pipe_sync)
return 0;
@@ -508,23 +1111,36 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
if (need_pipe_sync)
amdgpu_ring_emit_pipeline_sync(ring);
- if (ring->funcs->emit_vm_flush && vm_flush_needed) {
- struct dma_fence *fence;
-
+ if (vm_flush_needed) {
trace_amdgpu_vm_flush(ring, job->vmid, job->vm_pd_addr);
amdgpu_ring_emit_vm_flush(ring, job->vmid, job->vm_pd_addr);
+ }
+
+ if (pasid_mapping_needed)
+ amdgpu_gmc_emit_pasid_mapping(ring, job->vmid, job->pasid);
- r = amdgpu_fence_emit(ring, &fence);
+ if (vm_flush_needed || pasid_mapping_needed) {
+ r = amdgpu_fence_emit(ring, &fence, 0);
if (r)
return r;
+ }
+ if (vm_flush_needed) {
mutex_lock(&id_mgr->lock);
dma_fence_put(id->last_flush);
- id->last_flush = fence;
- id->current_gpu_reset_count = atomic_read(&adev->gpu_reset_counter);
+ id->last_flush = dma_fence_get(fence);
+ id->current_gpu_reset_count =
+ atomic_read(&adev->gpu_reset_counter);
mutex_unlock(&id_mgr->lock);
}
+ if (pasid_mapping_needed) {
+ id->pasid = job->pasid;
+ dma_fence_put(id->pasid_mapping);
+ id->pasid_mapping = dma_fence_get(fence);
+ }
+ dma_fence_put(fence);
+
if (ring->funcs->emit_gds_switch && gds_switch_needed) {
id->gds_base = job->gds_base;
id->gds_size = job->gds_size;
@@ -560,16 +1176,20 @@ int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_
* Returns the found bo_va or NULL if none is found
*
* Object has to be reserved!
+ *
+ * Returns:
+ * Found bo_va or NULL.
*/
struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
struct amdgpu_bo *bo)
{
- struct amdgpu_bo_va *bo_va;
+ struct amdgpu_vm_bo_base *base;
- list_for_each_entry(bo_va, &bo->va, base.bo_list) {
- if (bo_va->base.vm == vm) {
- return bo_va;
- }
+ for (base = bo->vm_bo; base; base = base->next) {
+ if (base->vm != vm)
+ continue;
+
+ return container_of(base, struct amdgpu_bo_va, base);
}
return NULL;
}
@@ -578,6 +1198,7 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
* amdgpu_vm_do_set_ptes - helper to call the right asic function
*
* @params: see amdgpu_pte_update_params definition
+ * @bo: PD/PT to update
* @pe: addr of the page entry
* @addr: dst addr to write into pe
* @count: number of page entries to update
@@ -588,10 +1209,12 @@ struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
* to setup the page table using the DMA.
*/
static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
+ struct amdgpu_bo *bo,
uint64_t pe, uint64_t addr,
unsigned count, uint32_t incr,
uint64_t flags)
{
+ pe += amdgpu_bo_gpu_offset(bo);
trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
if (count < 3) {
@@ -608,6 +1231,7 @@ static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
* amdgpu_vm_do_copy_ptes - copy the PTEs from the GART
*
* @params: see amdgpu_pte_update_params definition
+ * @bo: PD/PT to update
* @pe: addr of the page entry
* @addr: dst addr to write into pe
* @count: number of page entries to update
@@ -617,13 +1241,14 @@ static void amdgpu_vm_do_set_ptes(struct amdgpu_pte_update_params *params,
* Traces the parameters and calls the DMA function to copy the PTEs.
*/
static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
+ struct amdgpu_bo *bo,
uint64_t pe, uint64_t addr,
unsigned count, uint32_t incr,
uint64_t flags)
{
uint64_t src = (params->src + (addr >> 12) * 8);
-
+ pe += amdgpu_bo_gpu_offset(bo);
trace_amdgpu_vm_copy_ptes(pe, src, count);
amdgpu_vm_copy_pte(params->adev, params->ib, pe, src, count);
@@ -636,7 +1261,10 @@ static void amdgpu_vm_do_copy_ptes(struct amdgpu_pte_update_params *params,
* @addr: the unmapped addr
*
* Look up the physical address of the page that the pte resolves
- * to and return the pointer for the page table entry.
+ * to.
+ *
+ * Returns:
+ * The pointer for the page table entry.
*/
static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
{
@@ -657,6 +1285,7 @@ static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
* amdgpu_vm_cpu_set_ptes - helper to update page tables via CPU
*
* @params: see amdgpu_pte_update_params definition
+ * @bo: PD/PT to update
* @pe: kmap addr of the page entry
* @addr: dst addr to write into pe
* @count: number of page entries to update
@@ -666,6 +1295,7 @@ static uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr)
* Write count number of PT/PD entries directly.
*/
static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
+ struct amdgpu_bo *bo,
uint64_t pe, uint64_t addr,
unsigned count, uint32_t incr,
uint64_t flags)
@@ -673,18 +1303,31 @@ static void amdgpu_vm_cpu_set_ptes(struct amdgpu_pte_update_params *params,
unsigned int i;
uint64_t value;
+ pe += (unsigned long)amdgpu_bo_kptr(bo);
+
trace_amdgpu_vm_set_ptes(pe, addr, count, incr, flags);
for (i = 0; i < count; i++) {
value = params->pages_addr ?
amdgpu_vm_map_gart(params->pages_addr, addr) :
addr;
- amdgpu_gart_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
- i, value, flags);
+ amdgpu_gmc_set_pte_pde(params->adev, (void *)(uintptr_t)pe,
+ i, value, flags);
addr += incr;
}
}
+
+/**
+ * amdgpu_vm_wait_pd - Wait for PT BOs to be free.
+ *
+ * @adev: amdgpu_device pointer
+ * @vm: related vm
+ * @owner: fence owner
+ *
+ * Returns:
+ * 0 on success, errno otherwise.
+ */
static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
void *owner)
{
@@ -699,6 +1342,22 @@ static int amdgpu_vm_wait_pd(struct amdgpu_device *adev, struct amdgpu_vm *vm,
return r;
}
+/**
+ * amdgpu_vm_update_func - helper to call update function
+ *
+ * Calls the update function for both the given BO as well as its shadow.
+ */
+static void amdgpu_vm_update_func(struct amdgpu_pte_update_params *params,
+ struct amdgpu_bo *bo,
+ uint64_t pe, uint64_t addr,
+ unsigned count, uint32_t incr,
+ uint64_t flags)
+{
+ if (bo->shadow)
+ params->func(params, bo->shadow, pe, addr, count, incr, flags);
+ params->func(params, bo, pe, addr, count, incr, flags);
+}
+
/*
* amdgpu_vm_update_pde - update a single level in the hierarchy
*
@@ -714,8 +1373,7 @@ static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params,
struct amdgpu_vm_pt *parent,
struct amdgpu_vm_pt *entry)
{
- struct amdgpu_bo *bo = entry->base.bo, *shadow = NULL, *pbo;
- uint64_t pd_addr, shadow_addr = 0;
+ struct amdgpu_bo *bo = parent->base.bo, *pbo;
uint64_t pde, pt, flags;
unsigned level;
@@ -723,62 +1381,32 @@ static void amdgpu_vm_update_pde(struct amdgpu_pte_update_params *params,
if (entry->huge)
return;
- if (vm->use_cpu_for_update) {
- pd_addr = (unsigned long)amdgpu_bo_kptr(parent->base.bo);
- } else {
- pd_addr = amdgpu_bo_gpu_offset(parent->base.bo);
- shadow = parent->base.bo->shadow;
- if (shadow)
- shadow_addr = amdgpu_bo_gpu_offset(shadow);
- }
-
- for (level = 0, pbo = parent->base.bo->parent; pbo; ++level)
+ for (level = 0, pbo = bo->parent; pbo; ++level)
pbo = pbo->parent;
level += params->adev->vm_manager.root_level;
- pt = amdgpu_bo_gpu_offset(bo);
- flags = AMDGPU_PTE_VALID;
- amdgpu_gart_get_vm_pde(params->adev, level, &pt, &flags);
- if (shadow) {
- pde = shadow_addr + (entry - parent->entries) * 8;
- params->func(params, pde, pt, 1, 0, flags);
- }
-
- pde = pd_addr + (entry - parent->entries) * 8;
- params->func(params, pde, pt, 1, 0, flags);
+ amdgpu_gmc_get_pde_for_bo(entry->base.bo, level, &pt, &flags);
+ pde = (entry - parent->entries) * 8;
+ amdgpu_vm_update_func(params, bo, pde, pt, 1, 0, flags);
}
/*
- * amdgpu_vm_invalidate_level - mark all PD levels as invalid
+ * amdgpu_vm_invalidate_pds - mark all PDs as invalid
*
- * @parent: parent PD
+ * @adev: amdgpu_device pointer
+ * @vm: related vm
*
* Mark all PD level as invalid after an error.
*/
-static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev,
- struct amdgpu_vm *vm,
- struct amdgpu_vm_pt *parent,
- unsigned level)
+static void amdgpu_vm_invalidate_pds(struct amdgpu_device *adev,
+ struct amdgpu_vm *vm)
{
- unsigned pt_idx, num_entries;
-
- /*
- * Recurse into the subdirectories. This recursion is harmless because
- * we only have a maximum of 5 layers.
- */
- num_entries = amdgpu_vm_num_entries(adev, level);
- for (pt_idx = 0; pt_idx < num_entries; ++pt_idx) {
- struct amdgpu_vm_pt *entry = &parent->entries[pt_idx];
-
- if (!entry->base.bo)
- continue;
+ struct amdgpu_vm_pt_cursor cursor;
+ struct amdgpu_vm_pt *entry;
- spin_lock(&vm->status_lock);
- if (list_empty(&entry->base.vm_status))
- list_add(&entry->base.vm_status, &vm->relocated);
- spin_unlock(&vm->status_lock);
- amdgpu_vm_invalidate_level(adev, vm, entry, level + 1);
- }
+ for_each_amdgpu_vm_pt_dfs_safe(adev, vm, cursor, entry)
+ if (entry->base.bo && !entry->base.moved)
+ amdgpu_vm_bo_relocated(&entry->base);
}
/*
@@ -788,7 +1416,9 @@ static void amdgpu_vm_invalidate_level(struct amdgpu_device *adev,
* @vm: requested vm
*
* Makes sure all directories are up to date.
- * Returns 0 for success, error for failure.
+ *
+ * Returns:
+ * 0 for success, error for failure.
*/
int amdgpu_vm_update_directories(struct amdgpu_device *adev,
struct amdgpu_vm *vm)
@@ -821,42 +1451,28 @@ restart:
params.func = amdgpu_vm_do_set_ptes;
}
- spin_lock(&vm->status_lock);
while (!list_empty(&vm->relocated)) {
- struct amdgpu_vm_bo_base *bo_base, *parent;
struct amdgpu_vm_pt *pt, *entry;
- struct amdgpu_bo *bo;
- bo_base = list_first_entry(&vm->relocated,
- struct amdgpu_vm_bo_base,
- vm_status);
- list_del_init(&bo_base->vm_status);
- spin_unlock(&vm->status_lock);
+ entry = list_first_entry(&vm->relocated, struct amdgpu_vm_pt,
+ base.vm_status);
+ amdgpu_vm_bo_idle(&entry->base);
- bo = bo_base->bo->parent;
- if (!bo) {
- spin_lock(&vm->status_lock);
+ pt = amdgpu_vm_pt_parent(entry);
+ if (!pt)
continue;
- }
-
- parent = list_first_entry(&bo->va, struct amdgpu_vm_bo_base,
- bo_list);
- pt = container_of(parent, struct amdgpu_vm_pt, base);
- entry = container_of(bo_base, struct amdgpu_vm_pt, base);
amdgpu_vm_update_pde(&params, vm, pt, entry);
- spin_lock(&vm->status_lock);
if (!vm->use_cpu_for_update &&
(ndw - params.ib->length_dw) < 32)
break;
}
- spin_unlock(&vm->status_lock);
if (vm->use_cpu_for_update) {
/* Flush HDP */
mb();
- amdgpu_gart_flush_gpu_tlb(adev, 0);
+ amdgpu_asic_flush_hdp(adev, NULL);
} else if (params.ib->length_dw == 0) {
amdgpu_job_free(job);
} else {
@@ -864,20 +1480,15 @@ restart:
struct amdgpu_ring *ring;
struct dma_fence *fence;
- ring = container_of(vm->entity.sched, struct amdgpu_ring,
+ ring = container_of(vm->entity.rq->sched, struct amdgpu_ring,
sched);
amdgpu_ring_pad_ib(ring, params.ib);
amdgpu_sync_resv(adev, &job->sync, root->tbo.resv,
AMDGPU_FENCE_OWNER_VM, false);
- if (root->shadow)
- amdgpu_sync_resv(adev, &job->sync,
- root->shadow->tbo.resv,
- AMDGPU_FENCE_OWNER_VM, false);
-
WARN_ON(params.ib->length_dw > ndw);
- r = amdgpu_job_submit(job, ring, &vm->entity,
- AMDGPU_FENCE_OWNER_VM, &fence);
+ r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM,
+ &fence);
if (r)
goto error;
@@ -892,226 +1503,208 @@ restart:
return 0;
error:
- amdgpu_vm_invalidate_level(adev, vm, &vm->root,
- adev->vm_manager.root_level);
+ amdgpu_vm_invalidate_pds(adev, vm);
amdgpu_job_free(job);
return r;
}
/**
- * amdgpu_vm_find_entry - find the entry for an address
- *
- * @p: see amdgpu_pte_update_params definition
- * @addr: virtual address in question
- * @entry: resulting entry or NULL
- * @parent: parent entry
+ * amdgpu_vm_update_huge - figure out parameters for PTE updates
*
- * Find the vm_pt entry and it's parent for the given address.
+ * Make sure to set the right flags for the PTEs at the desired level.
*/
-void amdgpu_vm_get_entry(struct amdgpu_pte_update_params *p, uint64_t addr,
- struct amdgpu_vm_pt **entry,
- struct amdgpu_vm_pt **parent)
-{
- unsigned level = p->adev->vm_manager.root_level;
-
- *parent = NULL;
- *entry = &p->vm->root;
- while ((*entry)->entries) {
- unsigned shift = amdgpu_vm_level_shift(p->adev, level++);
+static void amdgpu_vm_update_huge(struct amdgpu_pte_update_params *params,
+ struct amdgpu_bo *bo, unsigned level,
+ uint64_t pe, uint64_t addr,
+ unsigned count, uint32_t incr,
+ uint64_t flags)
- *parent = *entry;
- *entry = &(*entry)->entries[addr >> shift];
- addr &= (1ULL << shift) - 1;
+{
+ if (level != AMDGPU_VM_PTB) {
+ flags |= AMDGPU_PDE_PTE;
+ amdgpu_gmc_get_vm_pde(params->adev, level, &addr, &flags);
}
- if (level != AMDGPU_VM_PTB)
- *entry = NULL;
+ amdgpu_vm_update_func(params, bo, pe, addr, count, incr, flags);
}
/**
- * amdgpu_vm_handle_huge_pages - handle updating the PD with huge pages
+ * amdgpu_vm_fragment - get fragment for PTEs
*
- * @p: see amdgpu_pte_update_params definition
- * @entry: vm_pt entry to check
- * @parent: parent entry
- * @nptes: number of PTEs updated with this operation
- * @dst: destination address where the PTEs should point to
- * @flags: access flags fro the PTEs
+ * @params: see amdgpu_pte_update_params definition
+ * @start: first PTE to handle
+ * @end: last PTE to handle
+ * @flags: hw mapping flags
+ * @frag: resulting fragment size
+ * @frag_end: end of this fragment
*
- * Check if we can update the PD with a huge page.
+ * Returns the first possible fragment for the start and end address.
*/
-static void amdgpu_vm_handle_huge_pages(struct amdgpu_pte_update_params *p,
- struct amdgpu_vm_pt *entry,
- struct amdgpu_vm_pt *parent,
- unsigned nptes, uint64_t dst,
- uint64_t flags)
+static void amdgpu_vm_fragment(struct amdgpu_pte_update_params *params,
+ uint64_t start, uint64_t end, uint64_t flags,
+ unsigned int *frag, uint64_t *frag_end)
{
- uint64_t pd_addr, pde;
+ /**
+ * The MC L1 TLB supports variable sized pages, based on a fragment
+ * field in the PTE. When this field is set to a non-zero value, page
+ * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
+ * flags are considered valid for all PTEs within the fragment range
+ * and corresponding mappings are assumed to be physically contiguous.
+ *
+ * The L1 TLB can store a single PTE for the whole fragment,
+ * significantly increasing the space available for translation
+ * caching. This leads to large improvements in throughput when the
+ * TLB is under pressure.
+ *
+ * The L2 TLB distributes small and large fragments into two
+ * asymmetric partitions. The large fragment cache is significantly
+ * larger. Thus, we try to use large fragments wherever possible.
+ * Userspace can support this by aligning virtual base address and
+ * allocation size to the fragment size.
+ *
+ * Starting with Vega10 the fragment size only controls the L1. The L2
+ * is now directly feed with small/huge/giant pages from the walker.
+ */
+ unsigned max_frag;
- /* In the case of a mixed PT the PDE must point to it*/
- if (p->adev->asic_type >= CHIP_VEGA10 && !p->src &&
- nptes == AMDGPU_VM_PTE_COUNT(p->adev)) {
- /* Set the huge page flag to stop scanning at this PDE */
- flags |= AMDGPU_PDE_PTE;
- }
+ if (params->adev->asic_type < CHIP_VEGA10)
+ max_frag = params->adev->vm_manager.fragment_size;
+ else
+ max_frag = 31;
- if (!(flags & AMDGPU_PDE_PTE)) {
- if (entry->huge) {
- /* Add the entry to the relocated list to update it. */
- entry->huge = false;
- spin_lock(&p->vm->status_lock);
- list_move(&entry->base.vm_status, &p->vm->relocated);
- spin_unlock(&p->vm->status_lock);
- }
+ /* system pages are non continuously */
+ if (params->src) {
+ *frag = 0;
+ *frag_end = end;
return;
}
- entry->huge = true;
- amdgpu_gart_get_vm_pde(p->adev, AMDGPU_VM_PDB0,
- &dst, &flags);
-
- if (p->func == amdgpu_vm_cpu_set_ptes) {
- pd_addr = (unsigned long)amdgpu_bo_kptr(parent->base.bo);
+ /* This intentionally wraps around if no bit is set */
+ *frag = min((unsigned)ffs(start) - 1, (unsigned)fls64(end - start) - 1);
+ if (*frag >= max_frag) {
+ *frag = max_frag;
+ *frag_end = end & ~((1ULL << max_frag) - 1);
} else {
- if (parent->base.bo->shadow) {
- pd_addr = amdgpu_bo_gpu_offset(parent->base.bo->shadow);
- pde = pd_addr + (entry - parent->entries) * 8;
- p->func(p, pde, dst, 1, 0, flags);
- }
- pd_addr = amdgpu_bo_gpu_offset(parent->base.bo);
+ *frag_end = start + (1 << *frag);
}
- pde = pd_addr + (entry - parent->entries) * 8;
- p->func(p, pde, dst, 1, 0, flags);
}
/**
* amdgpu_vm_update_ptes - make sure that page tables are valid
*
* @params: see amdgpu_pte_update_params definition
- * @vm: requested vm
* @start: start of GPU address range
* @end: end of GPU address range
* @dst: destination address to map to, the next dst inside the function
* @flags: mapping flags
*
* Update the page tables in the range @start - @end.
- * Returns 0 for success, -EINVAL for failure.
+ *
+ * Returns:
+ * 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_update_ptes(struct amdgpu_pte_update_params *params,
- uint64_t start, uint64_t end,
- uint64_t dst, uint64_t flags)
+ uint64_t start, uint64_t end,
+ uint64_t dst, uint64_t flags)
{
struct amdgpu_device *adev = params->adev;
- const uint64_t mask = AMDGPU_VM_PTE_COUNT(adev) - 1;
+ struct amdgpu_vm_pt_cursor cursor;
+ uint64_t frag_start = start, frag_end;
+ unsigned int frag;
- uint64_t addr, pe_start;
- struct amdgpu_bo *pt;
- unsigned nptes;
- bool use_cpu_update = (params->func == amdgpu_vm_cpu_set_ptes);
+ /* figure out the initial fragment */
+ amdgpu_vm_fragment(params, frag_start, end, flags, &frag, &frag_end);
- /* walk over the address space and update the page tables */
- for (addr = start; addr < end; addr += nptes,
- dst += nptes * AMDGPU_GPU_PAGE_SIZE) {
- struct amdgpu_vm_pt *entry, *parent;
+ /* walk over the address space and update the PTs */
+ amdgpu_vm_pt_start(adev, params->vm, start, &cursor);
+ while (cursor.pfn < end) {
+ struct amdgpu_bo *pt = cursor.entry->base.bo;
+ unsigned shift, parent_shift, mask;
+ uint64_t incr, entry_end, pe_start;
- amdgpu_vm_get_entry(params, addr, &entry, &parent);
- if (!entry)
+ if (!pt)
return -ENOENT;
- if ((addr & ~mask) == (end & ~mask))
- nptes = end - addr;
- else
- nptes = AMDGPU_VM_PTE_COUNT(adev) - (addr & mask);
-
- amdgpu_vm_handle_huge_pages(params, entry, parent,
- nptes, dst, flags);
- /* We don't need to update PTEs for huge pages */
- if (entry->huge)
+ /* The root level can't be a huge page */
+ if (cursor.level == adev->vm_manager.root_level) {
+ if (!amdgpu_vm_pt_descendant(adev, &cursor))
+ return -ENOENT;
continue;
+ }
- pt = entry->base.bo;
- if (use_cpu_update) {
- pe_start = (unsigned long)amdgpu_bo_kptr(pt);
- } else {
- if (pt->shadow) {
- pe_start = amdgpu_bo_gpu_offset(pt->shadow);
- pe_start += (addr & mask) * 8;
- params->func(params, pe_start, dst, nptes,
- AMDGPU_GPU_PAGE_SIZE, flags);
- }
- pe_start = amdgpu_bo_gpu_offset(pt);
+ /* If it isn't already handled it can't be a huge page */
+ if (cursor.entry->huge) {
+ /* Add the entry to the relocated list to update it. */
+ cursor.entry->huge = false;
+ amdgpu_vm_bo_relocated(&cursor.entry->base);
}
- pe_start += (addr & mask) * 8;
- params->func(params, pe_start, dst, nptes,
- AMDGPU_GPU_PAGE_SIZE, flags);
- }
+ shift = amdgpu_vm_level_shift(adev, cursor.level);
+ parent_shift = amdgpu_vm_level_shift(adev, cursor.level - 1);
+ if (adev->asic_type < CHIP_VEGA10) {
+ /* No huge page support before GMC v9 */
+ if (cursor.level != AMDGPU_VM_PTB) {
+ if (!amdgpu_vm_pt_descendant(adev, &cursor))
+ return -ENOENT;
+ continue;
+ }
+ } else if (frag < shift) {
+ /* We can't use this level when the fragment size is
+ * smaller than the address shift. Go to the next
+ * child entry and try again.
+ */
+ if (!amdgpu_vm_pt_descendant(adev, &cursor))
+ return -ENOENT;
+ continue;
+ } else if (frag >= parent_shift) {
+ /* If the fragment size is even larger than the parent
+ * shift we should go up one level and check it again.
+ */
+ if (!amdgpu_vm_pt_ancestor(&cursor))
+ return -ENOENT;
+ continue;
+ }
- return 0;
-}
+ /* Looks good so far, calculate parameters for the update */
+ incr = AMDGPU_GPU_PAGE_SIZE << shift;
+ mask = amdgpu_vm_entries_mask(adev, cursor.level);
+ pe_start = ((cursor.pfn >> shift) & mask) * 8;
+ entry_end = (mask + 1) << shift;
+ entry_end += cursor.pfn & ~(entry_end - 1);
+ entry_end = min(entry_end, end);
+
+ do {
+ uint64_t upd_end = min(entry_end, frag_end);
+ unsigned nptes = (upd_end - frag_start) >> shift;
+
+ amdgpu_vm_update_huge(params, pt, cursor.level,
+ pe_start, dst, nptes, incr,
+ flags | AMDGPU_PTE_FRAG(frag));
+
+ pe_start += nptes * 8;
+ dst += nptes * AMDGPU_GPU_PAGE_SIZE << shift;
+
+ frag_start = upd_end;
+ if (frag_start >= frag_end) {
+ /* figure out the next fragment */
+ amdgpu_vm_fragment(params, frag_start, end,
+ flags, &frag, &frag_end);
+ if (frag < shift)
+ break;
+ }
+ } while (frag_start < entry_end);
-/*
- * amdgpu_vm_frag_ptes - add fragment information to PTEs
- *
- * @params: see amdgpu_pte_update_params definition
- * @vm: requested vm
- * @start: first PTE to handle
- * @end: last PTE to handle
- * @dst: addr those PTEs should point to
- * @flags: hw mapping flags
- * Returns 0 for success, -EINVAL for failure.
- */
-static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
- uint64_t start, uint64_t end,
- uint64_t dst, uint64_t flags)
-{
- /**
- * The MC L1 TLB supports variable sized pages, based on a fragment
- * field in the PTE. When this field is set to a non-zero value, page
- * granularity is increased from 4KB to (1 << (12 + frag)). The PTE
- * flags are considered valid for all PTEs within the fragment range
- * and corresponding mappings are assumed to be physically contiguous.
- *
- * The L1 TLB can store a single PTE for the whole fragment,
- * significantly increasing the space available for translation
- * caching. This leads to large improvements in throughput when the
- * TLB is under pressure.
- *
- * The L2 TLB distributes small and large fragments into two
- * asymmetric partitions. The large fragment cache is significantly
- * larger. Thus, we try to use large fragments wherever possible.
- * Userspace can support this by aligning virtual base address and
- * allocation size to the fragment size.
- */
- unsigned max_frag = params->adev->vm_manager.fragment_size;
- int r;
+ if (amdgpu_vm_pt_descendant(adev, &cursor)) {
+ /* Mark all child entries as huge */
+ while (cursor.pfn < frag_start) {
+ cursor.entry->huge = true;
+ amdgpu_vm_pt_next(adev, &cursor);
+ }
- /* system pages are non continuously */
- if (params->src || !(flags & AMDGPU_PTE_VALID))
- return amdgpu_vm_update_ptes(params, start, end, dst, flags);
-
- while (start != end) {
- uint64_t frag_flags, frag_end;
- unsigned frag;
-
- /* This intentionally wraps around if no bit is set */
- frag = min((unsigned)ffs(start) - 1,
- (unsigned)fls64(end - start) - 1);
- if (frag >= max_frag) {
- frag_flags = AMDGPU_PTE_FRAG(max_frag);
- frag_end = end & ~((1ULL << max_frag) - 1);
- } else {
- frag_flags = AMDGPU_PTE_FRAG(frag);
- frag_end = start + (1 << frag);
+ } else if (frag >= shift) {
+ /* or just move on to the next on the same level. */
+ amdgpu_vm_pt_next(adev, &cursor);
}
-
- r = amdgpu_vm_update_ptes(params, start, frag_end, dst,
- flags | frag_flags);
- if (r)
- return r;
-
- dst += (frag_end - start) * AMDGPU_GPU_PAGE_SIZE;
- start = frag_end;
}
return 0;
@@ -1131,7 +1724,9 @@ static int amdgpu_vm_frag_ptes(struct amdgpu_pte_update_params *params,
* @fence: optional resulting fence
*
* Fill in the page table entries between @start and @last.
- * Returns 0 for success, -EINVAL for failure.
+ *
+ * Returns:
+ * 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
struct dma_fence *exclusive,
@@ -1171,11 +1766,11 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
params.func = amdgpu_vm_cpu_set_ptes;
params.pages_addr = pages_addr;
- return amdgpu_vm_frag_ptes(&params, start, last + 1,
- addr, flags);
+ return amdgpu_vm_update_ptes(&params, start, last + 1,
+ addr, flags);
}
- ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
+ ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
nptes = last - start + 1;
@@ -1204,11 +1799,13 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
} else {
/* set page commands needed */
- ndw += ncmds * adev->vm_manager.vm_pte_funcs->set_pte_pde_num_dw;
+ ndw += ncmds * 10;
/* extra commands for begin/end fragments */
- ndw += 2 * adev->vm_manager.vm_pte_funcs->set_pte_pde_num_dw
- * adev->vm_manager.fragment_size;
+ if (vm->root.base.bo->shadow)
+ ndw += 2 * 10 * adev->vm_manager.fragment_size * 2;
+ else
+ ndw += 2 * 10 * adev->vm_manager.fragment_size;
params.func = amdgpu_vm_do_set_ptes;
}
@@ -1249,14 +1846,13 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
if (r)
goto error_free;
- r = amdgpu_vm_frag_ptes(&params, start, last + 1, addr, flags);
+ r = amdgpu_vm_update_ptes(&params, start, last + 1, addr, flags);
if (r)
goto error_free;
amdgpu_ring_pad_ib(ring, params.ib);
WARN_ON(params.ib->length_dw > ndw);
- r = amdgpu_job_submit(job, ring, &vm->entity,
- AMDGPU_FENCE_OWNER_VM, &f);
+ r = amdgpu_job_submit(job, &vm->entity, AMDGPU_FENCE_OWNER_VM, &f);
if (r)
goto error_free;
@@ -1284,7 +1880,9 @@ error_free:
*
* Split the mapping into smaller chunks so that each update fits
* into a SDMA IB.
- * Returns 0 for success, -EINVAL for failure.
+ *
+ * Returns:
+ * 0 for success, -EINVAL for failure.
*/
static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
struct dma_fence *exclusive,
@@ -1337,7 +1935,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
if (nodes) {
addr = nodes->start << PAGE_SHIFT;
max_entries = (nodes->size - pfn) *
- (PAGE_SIZE / AMDGPU_GPU_PAGE_SIZE);
+ AMDGPU_GPU_PAGES_IN_CPU_PAGE;
} else {
addr = 0;
max_entries = S64_MAX;
@@ -1347,7 +1945,9 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
uint64_t count;
max_entries = min(max_entries, 16ull * 1024ull);
- for (count = 1; count < max_entries; ++count) {
+ for (count = 1;
+ count < max_entries / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
+ ++count) {
uint64_t idx = pfn + count;
if (pages_addr[idx] !=
@@ -1360,7 +1960,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
dma_addr = pages_addr;
} else {
addr = pages_addr[pfn];
- max_entries = count;
+ max_entries = count * AMDGPU_GPU_PAGES_IN_CPU_PAGE;
}
} else if (flags & AMDGPU_PTE_VALID) {
@@ -1375,7 +1975,7 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
if (r)
return r;
- pfn += last - start + 1;
+ pfn += (last - start + 1) / AMDGPU_GPU_PAGES_IN_CPU_PAGE;
if (nodes && nodes->size == pfn) {
pfn = 0;
++nodes;
@@ -1395,7 +1995,9 @@ static int amdgpu_vm_bo_split_mapping(struct amdgpu_device *adev,
* @clear: if true clear the entries
*
* Fill in the page table entries for @bo_va.
- * Returns 0 for success, -EINVAL for failure.
+ *
+ * Returns:
+ * 0 for success, -EINVAL for failure.
*/
int amdgpu_vm_bo_update(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va,
@@ -1411,18 +2013,17 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
uint64_t flags;
int r;
- if (clear || !bo_va->base.bo) {
+ if (clear || !bo) {
mem = NULL;
nodes = NULL;
exclusive = NULL;
} else {
struct ttm_dma_tt *ttm;
- mem = &bo_va->base.bo->tbo.mem;
+ mem = &bo->tbo.mem;
nodes = mem->mm_node;
if (mem->mem_type == TTM_PL_TT) {
- ttm = container_of(bo_va->base.bo->tbo.ttm,
- struct ttm_dma_tt, ttm);
+ ttm = container_of(bo->tbo.ttm, struct ttm_dma_tt, ttm);
pages_addr = ttm->dma_address;
}
exclusive = reservation_object_get_excl(bo->tbo.resv);
@@ -1457,12 +2058,23 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
if (vm->use_cpu_for_update) {
/* Flush HDP */
mb();
- amdgpu_gart_flush_gpu_tlb(adev, 0);
+ amdgpu_asic_flush_hdp(adev, NULL);
}
- spin_lock(&vm->status_lock);
- list_del_init(&bo_va->base.vm_status);
- spin_unlock(&vm->status_lock);
+ /* If the BO is not in its preferred location add it back to
+ * the evicted list so that it gets validated again on the
+ * next command submission.
+ */
+ if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
+ uint32_t mem_type = bo->tbo.mem.mem_type;
+
+ if (!(bo->preferred_domains & amdgpu_mem_type_to_domain(mem_type)))
+ amdgpu_vm_bo_evicted(&bo_va->base);
+ else
+ amdgpu_vm_bo_idle(&bo_va->base);
+ } else {
+ amdgpu_vm_bo_done(&bo_va->base);
+ }
list_splice_init(&bo_va->invalids, &bo_va->valids);
bo_va->cleared = clear;
@@ -1477,6 +2089,8 @@ int amdgpu_vm_bo_update(struct amdgpu_device *adev,
/**
* amdgpu_vm_update_prt_state - update the global PRT state
+ *
+ * @adev: amdgpu_device pointer
*/
static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
{
@@ -1485,16 +2099,18 @@ static void amdgpu_vm_update_prt_state(struct amdgpu_device *adev)
spin_lock_irqsave(&adev->vm_manager.prt_lock, flags);
enable = !!atomic_read(&adev->vm_manager.num_prt_users);
- adev->gart.gart_funcs->set_prt(adev, enable);
+ adev->gmc.gmc_funcs->set_prt(adev, enable);
spin_unlock_irqrestore(&adev->vm_manager.prt_lock, flags);
}
/**
* amdgpu_vm_prt_get - add a PRT user
+ *
+ * @adev: amdgpu_device pointer
*/
static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
{
- if (!adev->gart.gart_funcs->set_prt)
+ if (!adev->gmc.gmc_funcs->set_prt)
return;
if (atomic_inc_return(&adev->vm_manager.num_prt_users) == 1)
@@ -1503,6 +2119,8 @@ static void amdgpu_vm_prt_get(struct amdgpu_device *adev)
/**
* amdgpu_vm_prt_put - drop a PRT user
+ *
+ * @adev: amdgpu_device pointer
*/
static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
{
@@ -1512,6 +2130,9 @@ static void amdgpu_vm_prt_put(struct amdgpu_device *adev)
/**
* amdgpu_vm_prt_cb - callback for updating the PRT status
+ *
+ * @fence: fence for the callback
+ * @_cb: the callback function
*/
static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
{
@@ -1523,13 +2144,16 @@ static void amdgpu_vm_prt_cb(struct dma_fence *fence, struct dma_fence_cb *_cb)
/**
* amdgpu_vm_add_prt_cb - add callback for updating the PRT status
+ *
+ * @adev: amdgpu_device pointer
+ * @fence: fence for the callback
*/
static void amdgpu_vm_add_prt_cb(struct amdgpu_device *adev,
struct dma_fence *fence)
{
struct amdgpu_prt_cb *cb;
- if (!adev->gart.gart_funcs->set_prt)
+ if (!adev->gmc.gmc_funcs->set_prt)
return;
cb = kmalloc(sizeof(struct amdgpu_prt_cb), GFP_KERNEL);
@@ -1614,25 +2238,28 @@ static void amdgpu_vm_prt_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
* or if an error occurred)
*
* Make sure all freed BOs are cleared in the PT.
- * Returns 0 for success.
- *
* PTs have to be reserved and mutex must be locked!
+ *
+ * Returns:
+ * 0 for success.
+ *
*/
int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
struct dma_fence **fence)
{
struct amdgpu_bo_va_mapping *mapping;
+ uint64_t init_pte_value = 0;
struct dma_fence *f = NULL;
int r;
- uint64_t init_pte_value = 0;
while (!list_empty(&vm->freed)) {
mapping = list_first_entry(&vm->freed,
struct amdgpu_bo_va_mapping, list);
list_del(&mapping->list);
- if (vm->pte_support_ats)
+ if (vm->pte_support_ats &&
+ mapping->start < AMDGPU_GMC_HOLE_START)
init_pte_value = AMDGPU_PTE_DEFAULT_ATC;
r = amdgpu_vm_bo_update_mapping(adev, NULL, NULL, vm,
@@ -1661,35 +2288,38 @@ int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
*
* @adev: amdgpu_device pointer
* @vm: requested vm
- * @sync: sync object to add fences to
*
* Make sure all BOs which are moved are updated in the PTs.
- * Returns 0 for success.
+ *
+ * Returns:
+ * 0 for success.
*
* PTs have to be reserved!
*/
int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
struct amdgpu_vm *vm)
{
+ struct amdgpu_bo_va *bo_va, *tmp;
+ struct reservation_object *resv;
bool clear;
- int r = 0;
-
- spin_lock(&vm->status_lock);
- while (!list_empty(&vm->moved)) {
- struct amdgpu_bo_va *bo_va;
- struct reservation_object *resv;
+ int r;
- bo_va = list_first_entry(&vm->moved,
- struct amdgpu_bo_va, base.vm_status);
- spin_unlock(&vm->status_lock);
+ list_for_each_entry_safe(bo_va, tmp, &vm->moved, base.vm_status) {
+ /* Per VM BOs never need to bo cleared in the page tables */
+ r = amdgpu_vm_bo_update(adev, bo_va, false);
+ if (r)
+ return r;
+ }
+ spin_lock(&vm->invalidated_lock);
+ while (!list_empty(&vm->invalidated)) {
+ bo_va = list_first_entry(&vm->invalidated, struct amdgpu_bo_va,
+ base.vm_status);
resv = bo_va->base.bo->tbo.resv;
+ spin_unlock(&vm->invalidated_lock);
- /* Per VM BOs never need to bo cleared in the page tables */
- if (resv == vm->root.base.bo->tbo.resv)
- clear = false;
/* Try to reserve the BO to avoid clearing its ptes */
- else if (!amdgpu_vm_debug && reservation_object_trylock(resv))
+ if (!amdgpu_vm_debug && reservation_object_trylock(resv))
clear = false;
/* Somebody else is using the BO right now */
else
@@ -1699,14 +2329,13 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
if (r)
return r;
- if (!clear && resv != vm->root.base.bo->tbo.resv)
+ if (!clear)
reservation_object_unlock(resv);
-
- spin_lock(&vm->status_lock);
+ spin_lock(&vm->invalidated_lock);
}
- spin_unlock(&vm->status_lock);
+ spin_unlock(&vm->invalidated_lock);
- return r;
+ return 0;
}
/**
@@ -1718,7 +2347,9 @@ int amdgpu_vm_handle_moved(struct amdgpu_device *adev,
*
* Add @bo into the requested vm.
* Add @bo to the list of bos associated with the vm
- * Returns newly added bo_va or NULL for failure
+ *
+ * Returns:
+ * Newly added bo_va or NULL for failure
*
* Object has to be reserved!
*/
@@ -1732,36 +2363,12 @@ struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
if (bo_va == NULL) {
return NULL;
}
- bo_va->base.vm = vm;
- bo_va->base.bo = bo;
- INIT_LIST_HEAD(&bo_va->base.bo_list);
- INIT_LIST_HEAD(&bo_va->base.vm_status);
+ amdgpu_vm_bo_base_init(&bo_va->base, vm, bo);
bo_va->ref_count = 1;
INIT_LIST_HEAD(&bo_va->valids);
INIT_LIST_HEAD(&bo_va->invalids);
- if (!bo)
- return bo_va;
-
- list_add_tail(&bo_va->base.bo_list, &bo->va);
-
- if (bo->tbo.resv != vm->root.base.bo->tbo.resv)
- return bo_va;
-
- if (bo->preferred_domains &
- amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type))
- return bo_va;
-
- /*
- * We checked all the prerequisites, but it looks like this per VM BO
- * is currently evicted. add the BO to the evicted list to make sure it
- * is validated on next VM use to avoid fault.
- * */
- spin_lock(&vm->status_lock);
- list_move_tail(&bo_va->base.vm_status, &vm->evicted);
- spin_unlock(&vm->status_lock);
-
return bo_va;
}
@@ -1789,11 +2396,9 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
if (mapping->flags & AMDGPU_PTE_PRT)
amdgpu_vm_prt_get(adev);
- if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
- spin_lock(&vm->status_lock);
- if (list_empty(&bo_va->base.vm_status))
- list_add(&bo_va->base.vm_status, &vm->moved);
- spin_unlock(&vm->status_lock);
+ if (bo && bo->tbo.resv == vm->root.base.bo->tbo.resv &&
+ !bo_va->base.moved) {
+ list_move(&bo_va->base.vm_status, &vm->moved);
}
trace_amdgpu_vm_bo_map(bo_va, mapping);
}
@@ -1805,10 +2410,13 @@ static void amdgpu_vm_bo_insert_map(struct amdgpu_device *adev,
* @bo_va: bo_va to store the address
* @saddr: where to map the BO
* @offset: requested offset in the BO
+ * @size: BO size in bytes
* @flags: attributes of pages (read/write/valid/etc.)
*
* Add a mapping of the BO at the specefied addr into the VM.
- * Returns 0 for success, error for failure.
+ *
+ * Returns:
+ * 0 for success, error for failure.
*
* Object has to be reserved and unreserved outside!
*/
@@ -1866,11 +2474,14 @@ int amdgpu_vm_bo_map(struct amdgpu_device *adev,
* @bo_va: bo_va to store the address
* @saddr: where to map the BO
* @offset: requested offset in the BO
+ * @size: BO size in bytes
* @flags: attributes of pages (read/write/valid/etc.)
*
* Add a mapping of the BO at the specefied addr into the VM. Replace existing
* mappings as we do so.
- * Returns 0 for success, error for failure.
+ *
+ * Returns:
+ * 0 for success, error for failure.
*
* Object has to be reserved and unreserved outside!
*/
@@ -1927,7 +2538,9 @@ int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev,
* @saddr: where to the BO is mapped
*
* Remove a mapping of the BO at the specefied addr from the VM.
- * Returns 0 for success, error for failure.
+ *
+ * Returns:
+ * 0 for success, error for failure.
*
* Object has to be reserved and unreserved outside!
*/
@@ -1981,7 +2594,9 @@ int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
* @size: size of the range
*
* Remove all mappings in a range, split them as appropriate.
- * Returns 0 for success, error for failure.
+ *
+ * Returns:
+ * 0 for success, error for failure.
*/
int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
struct amdgpu_vm *vm,
@@ -2017,7 +2632,8 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
before->last = saddr - 1;
before->offset = tmp->offset;
before->flags = tmp->flags;
- list_add(&before->list, &tmp->list);
+ before->bo_va = tmp->bo_va;
+ list_add(&before->list, &tmp->bo_va->invalids);
}
/* Remember mapping split at the end */
@@ -2027,7 +2643,8 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
after->offset = tmp->offset;
after->offset += after->start - tmp->start;
after->flags = tmp->flags;
- list_add(&after->list, &tmp->list);
+ after->bo_va = tmp->bo_va;
+ list_add(&after->list, &tmp->bo_va->invalids);
}
list_del(&tmp->list);
@@ -2076,8 +2693,13 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
* amdgpu_vm_bo_lookup_mapping - find mapping by address
*
* @vm: the requested VM
+ * @addr: the address
*
* Find a mapping by it's address.
+ *
+ * Returns:
+ * The amdgpu_bo_va_mapping matching for addr or NULL
+ *
*/
struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
uint64_t addr)
@@ -2086,6 +2708,35 @@ struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
}
/**
+ * amdgpu_vm_bo_trace_cs - trace all reserved mappings
+ *
+ * @vm: the requested vm
+ * @ticket: CS ticket
+ *
+ * Trace all mappings of BOs reserved during a command submission.
+ */
+void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
+{
+ struct amdgpu_bo_va_mapping *mapping;
+
+ if (!trace_amdgpu_vm_bo_cs_enabled())
+ return;
+
+ for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
+ mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
+ if (mapping->bo_va && mapping->bo_va->base.bo) {
+ struct amdgpu_bo *bo;
+
+ bo = mapping->bo_va->base.bo;
+ if (READ_ONCE(bo->tbo.resv->lock.ctx) != ticket)
+ continue;
+ }
+
+ trace_amdgpu_vm_bo_cs(mapping);
+ }
+}
+
+/**
* amdgpu_vm_bo_rmv - remove a bo to a specific vm
*
* @adev: amdgpu_device pointer
@@ -2099,13 +2750,27 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va)
{
struct amdgpu_bo_va_mapping *mapping, *next;
+ struct amdgpu_bo *bo = bo_va->base.bo;
struct amdgpu_vm *vm = bo_va->base.vm;
+ struct amdgpu_vm_bo_base **base;
- list_del(&bo_va->base.bo_list);
+ if (bo) {
+ if (bo->tbo.resv == vm->root.base.bo->tbo.resv)
+ vm->bulk_moveable = false;
- spin_lock(&vm->status_lock);
+ for (base = &bo_va->base.bo->vm_bo; *base;
+ base = &(*base)->next) {
+ if (*base != &bo_va->base)
+ continue;
+
+ *base = bo_va->base.next;
+ break;
+ }
+ }
+
+ spin_lock(&vm->invalidated_lock);
list_del(&bo_va->base.vm_status);
- spin_unlock(&vm->status_lock);
+ spin_unlock(&vm->invalidated_lock);
list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
list_del(&mapping->list);
@@ -2129,8 +2794,8 @@ void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
* amdgpu_vm_bo_invalidate - mark the bo as invalid
*
* @adev: amdgpu_device pointer
- * @vm: requested vm
* @bo: amdgpu buffer object
+ * @evicted: is the BO evicted
*
* Mark @bo as invalid.
*/
@@ -2139,36 +2804,39 @@ void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
{
struct amdgpu_vm_bo_base *bo_base;
- list_for_each_entry(bo_base, &bo->va, bo_list) {
+ /* shadow bo doesn't have bo base, its validation needs its parent */
+ if (bo->parent && bo->parent->shadow == bo)
+ bo = bo->parent;
+
+ for (bo_base = bo->vm_bo; bo_base; bo_base = bo_base->next) {
struct amdgpu_vm *vm = bo_base->vm;
- bo_base->moved = true;
if (evicted && bo->tbo.resv == vm->root.base.bo->tbo.resv) {
- spin_lock(&bo_base->vm->status_lock);
- if (bo->tbo.type == ttm_bo_type_kernel)
- list_move(&bo_base->vm_status, &vm->evicted);
- else
- list_move_tail(&bo_base->vm_status,
- &vm->evicted);
- spin_unlock(&bo_base->vm->status_lock);
+ amdgpu_vm_bo_evicted(bo_base);
continue;
}
- if (bo->tbo.type == ttm_bo_type_kernel) {
- spin_lock(&bo_base->vm->status_lock);
- if (list_empty(&bo_base->vm_status))
- list_add(&bo_base->vm_status, &vm->relocated);
- spin_unlock(&bo_base->vm->status_lock);
+ if (bo_base->moved)
continue;
- }
+ bo_base->moved = true;
- spin_lock(&bo_base->vm->status_lock);
- if (list_empty(&bo_base->vm_status))
- list_add(&bo_base->vm_status, &vm->moved);
- spin_unlock(&bo_base->vm->status_lock);
+ if (bo->tbo.type == ttm_bo_type_kernel)
+ amdgpu_vm_bo_relocated(bo_base);
+ else if (bo->tbo.resv == vm->root.base.bo->tbo.resv)
+ amdgpu_vm_bo_moved(bo_base);
+ else
+ amdgpu_vm_bo_invalidated(bo_base);
}
}
+/**
+ * amdgpu_vm_get_block_size - calculate VM page table size as power of two
+ *
+ * @vm_size: VM size
+ *
+ * Returns:
+ * VM page table as power of two
+ */
static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
{
/* Total bits covered by PD + PTs */
@@ -2186,24 +2854,52 @@ static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
* amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
*
* @adev: amdgpu_device pointer
- * @vm_size: the default vm size if it's set auto
+ * @min_vm_size: the minimum vm size in GB if it's set auto
+ * @fragment_size_default: Default PTE fragment size
+ * @max_level: max VMPT level
+ * @max_bits: max address space size in bits
+ *
*/
-void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
+void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
uint32_t fragment_size_default, unsigned max_level,
unsigned max_bits)
{
+ unsigned int max_size = 1 << (max_bits - 30);
+ unsigned int vm_size;
uint64_t tmp;
/* adjust vm size first */
if (amdgpu_vm_size != -1) {
- unsigned max_size = 1 << (max_bits - 30);
-
vm_size = amdgpu_vm_size;
if (vm_size > max_size) {
dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
amdgpu_vm_size, max_size);
vm_size = max_size;
}
+ } else {
+ struct sysinfo si;
+ unsigned int phys_ram_gb;
+
+ /* Optimal VM size depends on the amount of physical
+ * RAM available. Underlying requirements and
+ * assumptions:
+ *
+ * - Need to map system memory and VRAM from all GPUs
+ * - VRAM from other GPUs not known here
+ * - Assume VRAM <= system memory
+ * - On GFX8 and older, VM space can be segmented for
+ * different MTYPEs
+ * - Need to allow room for fragmentation, guard pages etc.
+ *
+ * This adds up to a rough guess of system memory x3.
+ * Round up to power of two to maximize the available
+ * VM size with the given page table size.
+ */
+ si_meminfo(&si);
+ phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
+ (1 << 30) - 1) >> 30;
+ vm_size = roundup_pow_of_two(
+ min(max(phys_ram_gb * 3, min_vm_size), max_size));
}
adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
@@ -2248,44 +2944,56 @@ void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
adev->vm_manager.fragment_size);
}
+static struct amdgpu_retryfault_hashtable *init_fault_hash(void)
+{
+ struct amdgpu_retryfault_hashtable *fault_hash;
+
+ fault_hash = kmalloc(sizeof(*fault_hash), GFP_KERNEL);
+ if (!fault_hash)
+ return fault_hash;
+
+ INIT_CHASH_TABLE(fault_hash->hash,
+ AMDGPU_PAGEFAULT_HASH_BITS, 8, 0);
+ spin_lock_init(&fault_hash->lock);
+ fault_hash->count = 0;
+
+ return fault_hash;
+}
+
/**
* amdgpu_vm_init - initialize a vm instance
*
* @adev: amdgpu_device pointer
* @vm: requested vm
* @vm_context: Indicates if it GFX or Compute context
+ * @pasid: Process address space identifier
*
* Init @vm fields.
+ *
+ * Returns:
+ * 0 for success, error for failure.
*/
int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
int vm_context, unsigned int pasid)
{
- const unsigned align = min(AMDGPU_VM_PTB_ALIGN_SIZE,
- AMDGPU_VM_PTE_COUNT(adev) * 8);
- uint64_t init_pde_value = 0, flags;
- unsigned ring_instance;
- struct amdgpu_ring *ring;
- struct drm_sched_rq *rq;
- unsigned long size;
+ struct amdgpu_bo_param bp;
+ struct amdgpu_bo *root;
int r, i;
vm->va = RB_ROOT_CACHED;
for (i = 0; i < AMDGPU_MAX_VMHUBS; i++)
vm->reserved_vmid[i] = NULL;
- spin_lock_init(&vm->status_lock);
INIT_LIST_HEAD(&vm->evicted);
INIT_LIST_HEAD(&vm->relocated);
INIT_LIST_HEAD(&vm->moved);
+ INIT_LIST_HEAD(&vm->idle);
+ INIT_LIST_HEAD(&vm->invalidated);
+ spin_lock_init(&vm->invalidated_lock);
INIT_LIST_HEAD(&vm->freed);
/* create scheduler entity for page table updates */
-
- ring_instance = atomic_inc_return(&adev->vm_manager.vm_pte_next_ring);
- ring_instance %= adev->vm_manager.vm_pte_num_rings;
- ring = adev->vm_manager.vm_pte_rings[ring_instance];
- rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL];
- r = drm_sched_entity_init(&ring->sched, &vm->entity,
- rq, amdgpu_sched_jobs, NULL);
+ r = drm_sched_entity_init(&vm->entity, adev->vm_manager.vm_pte_rqs,
+ adev->vm_manager.vm_pte_num_rqs, NULL);
if (r)
return r;
@@ -2295,43 +3003,36 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
AMDGPU_VM_USE_CPU_FOR_COMPUTE);
- if (adev->asic_type == CHIP_RAVEN) {
+ if (adev->asic_type == CHIP_RAVEN)
vm->pte_support_ats = true;
- init_pde_value = AMDGPU_PTE_DEFAULT_ATC
- | AMDGPU_PDE_PTE;
-
- }
- } else
+ } else {
vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
AMDGPU_VM_USE_CPU_FOR_GFX);
+ }
DRM_DEBUG_DRIVER("VM update mode is %s\n",
vm->use_cpu_for_update ? "CPU" : "SDMA");
- WARN_ONCE((vm->use_cpu_for_update & !amdgpu_vm_is_large_bar(adev)),
+ WARN_ONCE((vm->use_cpu_for_update & !amdgpu_gmc_vram_full_visible(&adev->gmc)),
"CPU update of VM recommended only for large BAR system\n");
vm->last_update = NULL;
- flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS |
- AMDGPU_GEM_CREATE_VRAM_CLEARED;
- if (vm->use_cpu_for_update)
- flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
- else
- flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
- AMDGPU_GEM_CREATE_SHADOW);
-
- size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level);
- r = amdgpu_bo_create(adev, size, align, true, AMDGPU_GEM_DOMAIN_VRAM,
- flags, NULL, NULL, init_pde_value,
- &vm->root.base.bo);
+ amdgpu_vm_bo_param(adev, vm, adev->vm_manager.root_level, &bp);
+ if (vm_context == AMDGPU_VM_CONTEXT_COMPUTE)
+ bp.flags &= ~AMDGPU_GEM_CREATE_SHADOW;
+ r = amdgpu_bo_create(adev, &bp, &root);
if (r)
goto error_free_sched_entity;
- r = amdgpu_bo_reserve(vm->root.base.bo, true);
+ r = amdgpu_bo_reserve(root, true);
if (r)
goto error_free_root;
- vm->root.base.vm = vm;
- list_add_tail(&vm->root.base.bo_list, &vm->root.base.bo->va);
- list_add_tail(&vm->root.base.vm_status, &vm->evicted);
+ r = amdgpu_vm_clear_bo(adev, vm, root,
+ adev->vm_manager.root_level,
+ vm->pte_support_ats);
+ if (r)
+ goto error_unreserve;
+
+ amdgpu_vm_bo_base_init(&vm->root.base, vm, root);
amdgpu_bo_unreserve(vm->root.base.bo);
if (pasid) {
@@ -2347,50 +3048,152 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
vm->pasid = pasid;
}
+ vm->fault_hash = init_fault_hash();
+ if (!vm->fault_hash) {
+ r = -ENOMEM;
+ goto error_free_root;
+ }
+
INIT_KFIFO(vm->faults);
vm->fault_credit = 16;
return 0;
+error_unreserve:
+ amdgpu_bo_unreserve(vm->root.base.bo);
+
error_free_root:
amdgpu_bo_unref(&vm->root.base.bo->shadow);
amdgpu_bo_unref(&vm->root.base.bo);
vm->root.base.bo = NULL;
error_free_sched_entity:
- drm_sched_entity_fini(&ring->sched, &vm->entity);
+ drm_sched_entity_destroy(&vm->entity);
return r;
}
/**
- * amdgpu_vm_free_levels - free PD/PT levels
+ * amdgpu_vm_make_compute - Turn a GFX VM into a compute VM
*
- * @adev: amdgpu device structure
- * @parent: PD/PT starting level to free
- * @level: level of parent structure
+ * @adev: amdgpu_device pointer
+ * @vm: requested vm
*
- * Free the page directory or page table level and all sub levels.
+ * This only works on GFX VMs that don't have any BOs added and no
+ * page tables allocated yet.
+ *
+ * Changes the following VM parameters:
+ * - use_cpu_for_update
+ * - pte_supports_ats
+ * - pasid (old PASID is released, because compute manages its own PASIDs)
+ *
+ * Reinitializes the page directory to reflect the changed ATS
+ * setting.
+ *
+ * Returns:
+ * 0 for success, -errno for errors.
*/
-static void amdgpu_vm_free_levels(struct amdgpu_device *adev,
- struct amdgpu_vm_pt *parent,
- unsigned level)
+int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm, unsigned int pasid)
{
- unsigned i, num_entries = amdgpu_vm_num_entries(adev, level);
+ bool pte_support_ats = (adev->asic_type == CHIP_RAVEN);
+ int r;
+
+ r = amdgpu_bo_reserve(vm->root.base.bo, true);
+ if (r)
+ return r;
+
+ /* Sanity checks */
+ if (!RB_EMPTY_ROOT(&vm->va.rb_root) || vm->root.entries) {
+ r = -EINVAL;
+ goto unreserve_bo;
+ }
+
+ if (pasid) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
+ r = idr_alloc(&adev->vm_manager.pasid_idr, vm, pasid, pasid + 1,
+ GFP_ATOMIC);
+ spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
+
+ if (r == -ENOSPC)
+ goto unreserve_bo;
+ r = 0;
+ }
+
+ /* Check if PD needs to be reinitialized and do it before
+ * changing any other state, in case it fails.
+ */
+ if (pte_support_ats != vm->pte_support_ats) {
+ r = amdgpu_vm_clear_bo(adev, vm, vm->root.base.bo,
+ adev->vm_manager.root_level,
+ pte_support_ats);
+ if (r)
+ goto free_idr;
+ }
+
+ /* Update VM state */
+ vm->use_cpu_for_update = !!(adev->vm_manager.vm_update_mode &
+ AMDGPU_VM_USE_CPU_FOR_COMPUTE);
+ vm->pte_support_ats = pte_support_ats;
+ DRM_DEBUG_DRIVER("VM update mode is %s\n",
+ vm->use_cpu_for_update ? "CPU" : "SDMA");
+ WARN_ONCE((vm->use_cpu_for_update & !amdgpu_gmc_vram_full_visible(&adev->gmc)),
+ "CPU update of VM recommended only for large BAR system\n");
+
+ if (vm->pasid) {
+ unsigned long flags;
+
+ spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
+ idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
+ spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
+
+ /* Free the original amdgpu allocated pasid
+ * Will be replaced with kfd allocated pasid
+ */
+ amdgpu_pasid_free(vm->pasid);
+ vm->pasid = 0;
+ }
+
+ /* Free the shadow bo for compute VM */
+ amdgpu_bo_unref(&vm->root.base.bo->shadow);
+
+ if (pasid)
+ vm->pasid = pasid;
+
+ goto unreserve_bo;
+
+free_idr:
+ if (pasid) {
+ unsigned long flags;
- if (parent->base.bo) {
- list_del(&parent->base.bo_list);
- list_del(&parent->base.vm_status);
- amdgpu_bo_unref(&parent->base.bo->shadow);
- amdgpu_bo_unref(&parent->base.bo);
+ spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
+ idr_remove(&adev->vm_manager.pasid_idr, pasid);
+ spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
}
+unreserve_bo:
+ amdgpu_bo_unreserve(vm->root.base.bo);
+ return r;
+}
- if (parent->entries)
- for (i = 0; i < num_entries; i++)
- amdgpu_vm_free_levels(adev, &parent->entries[i],
- level + 1);
+/**
+ * amdgpu_vm_release_compute - release a compute vm
+ * @adev: amdgpu_device pointer
+ * @vm: a vm turned into compute vm by calling amdgpu_vm_make_compute
+ *
+ * This is a correspondant of amdgpu_vm_make_compute. It decouples compute
+ * pasid from vm. Compute should stop use of vm after this call.
+ */
+void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
+{
+ if (vm->pasid) {
+ unsigned long flags;
- kvfree(parent->entries);
+ spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
+ idr_remove(&adev->vm_manager.pasid_idr, vm->pasid);
+ spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
+ }
+ vm->pasid = 0;
}
/**
@@ -2405,14 +3208,16 @@ static void amdgpu_vm_free_levels(struct amdgpu_device *adev,
void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
{
struct amdgpu_bo_va_mapping *mapping, *tmp;
- bool prt_fini_needed = !!adev->gart.gart_funcs->set_prt;
+ bool prt_fini_needed = !!adev->gmc.gmc_funcs->set_prt;
struct amdgpu_bo *root;
u64 fault;
int i, r;
+ amdgpu_amdkfd_gpuvm_destroy_cb(adev, vm);
+
/* Clear pending page faults from IH when the VM is destroyed */
while (kfifo_get(&vm->faults, &fault))
- amdgpu_ih_clear_fault(adev, fault);
+ amdgpu_vm_clear_fault(vm->fault_hash, fault);
if (vm->pasid) {
unsigned long flags;
@@ -2422,15 +3227,20 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
}
- drm_sched_entity_fini(vm->entity.sched, &vm->entity);
+ kfree(vm->fault_hash);
+ vm->fault_hash = NULL;
+
+ drm_sched_entity_destroy(&vm->entity);
if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
dev_err(adev->dev, "still active bo inside vm\n");
}
rbtree_postorder_for_each_entry_safe(mapping, tmp,
&vm->va.rb_root, rb) {
+ /* Don't remove the mapping here, we don't want to trigger a
+ * rebalance and the tree is about to be destroyed anyway.
+ */
list_del(&mapping->list);
- amdgpu_vm_it_remove(mapping, &vm->va);
kfree(mapping);
}
list_for_each_entry_safe(mapping, tmp, &vm->freed, list) {
@@ -2448,8 +3258,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
if (r) {
dev_err(adev->dev, "Leaking page tables because BO reservation failed\n");
} else {
- amdgpu_vm_free_levels(adev, &vm->root,
- adev->vm_manager.root_level);
+ amdgpu_vm_free_pts(adev, vm);
amdgpu_bo_unreserve(root);
}
amdgpu_bo_unref(&root);
@@ -2464,8 +3273,10 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
* @adev: amdgpu_device pointer
* @pasid: PASID do identify the VM
*
- * This function is expected to be called in interrupt context. Returns
- * true if there was fault credit, false otherwise
+ * This function is expected to be called in interrupt context.
+ *
+ * Returns:
+ * True if there was fault credit, false otherwise
*/
bool amdgpu_vm_pasid_fault_credit(struct amdgpu_device *adev,
unsigned int pasid)
@@ -2510,7 +3321,6 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
for (i = 0; i < AMDGPU_MAX_RINGS; ++i)
adev->vm_manager.seqno[i] = 0;
- atomic_set(&adev->vm_manager.vm_pte_next_ring, 0);
spin_lock_init(&adev->vm_manager.prt_lock);
atomic_set(&adev->vm_manager.num_prt_users, 0);
@@ -2519,7 +3329,7 @@ void amdgpu_vm_manager_init(struct amdgpu_device *adev)
*/
#ifdef CONFIG_X86_64
if (amdgpu_vm_update_mode == -1) {
- if (amdgpu_vm_is_large_bar(adev))
+ if (amdgpu_gmc_vram_full_visible(&adev->gmc))
adev->vm_manager.vm_update_mode =
AMDGPU_VM_USE_CPU_FOR_COMPUTE;
else
@@ -2549,6 +3359,16 @@ void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
amdgpu_vmid_mgr_fini(adev);
}
+/**
+ * amdgpu_vm_ioctl - Manages VMID reservation for vm hubs.
+ *
+ * @dev: drm device pointer
+ * @data: drm_amdgpu_vm
+ * @filp: drm file pointer
+ *
+ * Returns:
+ * 0 for success, -errno for errors.
+ */
int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
{
union drm_amdgpu_vm *args = data;
@@ -2572,3 +3392,117 @@ int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
return 0;
}
+
+/**
+ * amdgpu_vm_get_task_info - Extracts task info for a PASID.
+ *
+ * @adev: drm device pointer
+ * @pasid: PASID identifier for VM
+ * @task_info: task_info to fill.
+ */
+void amdgpu_vm_get_task_info(struct amdgpu_device *adev, unsigned int pasid,
+ struct amdgpu_task_info *task_info)
+{
+ struct amdgpu_vm *vm;
+
+ spin_lock(&adev->vm_manager.pasid_lock);
+
+ vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
+ if (vm)
+ *task_info = vm->task_info;
+
+ spin_unlock(&adev->vm_manager.pasid_lock);
+}
+
+/**
+ * amdgpu_vm_set_task_info - Sets VMs task info.
+ *
+ * @vm: vm for which to set the info
+ */
+void amdgpu_vm_set_task_info(struct amdgpu_vm *vm)
+{
+ if (!vm->task_info.pid) {
+ vm->task_info.pid = current->pid;
+ get_task_comm(vm->task_info.task_name, current);
+
+ if (current->group_leader->mm == current->mm) {
+ vm->task_info.tgid = current->group_leader->pid;
+ get_task_comm(vm->task_info.process_name, current->group_leader);
+ }
+ }
+}
+
+/**
+ * amdgpu_vm_add_fault - Add a page fault record to fault hash table
+ *
+ * @fault_hash: fault hash table
+ * @key: 64-bit encoding of PASID and address
+ *
+ * This should be called when a retry page fault interrupt is
+ * received. If this is a new page fault, it will be added to a hash
+ * table. The return value indicates whether this is a new fault, or
+ * a fault that was already known and is already being handled.
+ *
+ * If there are too many pending page faults, this will fail. Retry
+ * interrupts should be ignored in this case until there is enough
+ * free space.
+ *
+ * Returns 0 if the fault was added, 1 if the fault was already known,
+ * -ENOSPC if there are too many pending faults.
+ */
+int amdgpu_vm_add_fault(struct amdgpu_retryfault_hashtable *fault_hash, u64 key)
+{
+ unsigned long flags;
+ int r = -ENOSPC;
+
+ if (WARN_ON_ONCE(!fault_hash))
+ /* Should be allocated in amdgpu_vm_init
+ */
+ return r;
+
+ spin_lock_irqsave(&fault_hash->lock, flags);
+
+ /* Only let the hash table fill up to 50% for best performance */
+ if (fault_hash->count >= (1 << (AMDGPU_PAGEFAULT_HASH_BITS-1)))
+ goto unlock_out;
+
+ r = chash_table_copy_in(&fault_hash->hash, key, NULL);
+ if (!r)
+ fault_hash->count++;
+
+ /* chash_table_copy_in should never fail unless we're losing count */
+ WARN_ON_ONCE(r < 0);
+
+unlock_out:
+ spin_unlock_irqrestore(&fault_hash->lock, flags);
+ return r;
+}
+
+/**
+ * amdgpu_vm_clear_fault - Remove a page fault record
+ *
+ * @fault_hash: fault hash table
+ * @key: 64-bit encoding of PASID and address
+ *
+ * This should be called when a page fault has been handled. Any
+ * future interrupt with this key will be processed as a new
+ * page fault.
+ */
+void amdgpu_vm_clear_fault(struct amdgpu_retryfault_hashtable *fault_hash, u64 key)
+{
+ unsigned long flags;
+ int r;
+
+ if (!fault_hash)
+ return;
+
+ spin_lock_irqsave(&fault_hash->lock, flags);
+
+ r = chash_table_remove(&fault_hash->hash, key, NULL);
+ if (!WARN_ON_ONCE(r < 0)) {
+ fault_hash->count--;
+ WARN_ON_ONCE(fault_hash->count < 0);
+ }
+
+ spin_unlock_irqrestore(&fault_hash->lock, flags);
+}