diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c | 50 | 
1 files changed, 22 insertions, 28 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index 291977b93b1d..f5daadcec865 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -34,6 +34,7 @@  #include <drm/amdgpu_drm.h>  #include <drm/drm_drv.h>  #include <drm/ttm/ttm_tt.h> +#include <drm/drm_exec.h>  #include "amdgpu.h"  #include "amdgpu_trace.h"  #include "amdgpu_amdkfd.h" @@ -111,9 +112,9 @@ struct amdgpu_prt_cb {  };  /** - * struct amdgpu_vm_tlb_seq_cb - Helper to increment the TLB flush sequence + * struct amdgpu_vm_tlb_seq_struct - Helper to increment the TLB flush sequence   */ -struct amdgpu_vm_tlb_seq_cb { +struct amdgpu_vm_tlb_seq_struct {  	/**  	 * @vm: pointer to the amdgpu_vm structure to set the fence sequence on  	 */ @@ -339,25 +340,20 @@ void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,  }  /** - * amdgpu_vm_get_pd_bo - add the VM PD to a validation list + * amdgpu_vm_lock_pd - lock PD in drm_exec   *   * @vm: vm providing the BOs - * @validated: head of validation list - * @entry: entry to add + * @exec: drm execution context + * @num_fences: number of extra fences to reserve   * - * Add the page directory to the list of BOs to - * validate for command submission. + * Lock the VM root PD in the DRM execution context.   */ -void amdgpu_vm_get_pd_bo(struct amdgpu_vm *vm, -			 struct list_head *validated, -			 struct amdgpu_bo_list_entry *entry) +int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec, +		      unsigned int num_fences)  { -	entry->priority = 0; -	entry->tv.bo = &vm->root.bo->tbo; -	/* Two for VM updates, one for TTM and one for the CS job */ -	entry->tv.num_shared = 4; -	entry->user_pages = NULL; -	list_add(&entry->tv.head, validated); +	/* We need at least two fences for the VM PD/PT updates */ +	return drm_exec_prepare_obj(exec, &vm->root.bo->tbo.base, +				    2 + num_fences);  }  /** @@ -833,7 +829,7 @@ error:  static void amdgpu_vm_tlb_seq_cb(struct dma_fence *fence,  				 struct dma_fence_cb *cb)  { -	struct amdgpu_vm_tlb_seq_cb *tlb_cb; +	struct amdgpu_vm_tlb_seq_struct *tlb_cb;  	tlb_cb = container_of(cb, typeof(*tlb_cb), cb);  	atomic64_inc(&tlb_cb->vm->tlb_seq); @@ -871,7 +867,7 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm,  			   struct dma_fence **fence)  {  	struct amdgpu_vm_update_params params; -	struct amdgpu_vm_tlb_seq_cb *tlb_cb; +	struct amdgpu_vm_tlb_seq_struct *tlb_cb;  	struct amdgpu_res_cursor cursor;  	enum amdgpu_sync_mode sync_mode;  	int r, idx; @@ -2121,13 +2117,14 @@ long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout)   *   * @adev: amdgpu_device pointer   * @vm: requested vm + * @xcp_id: GPU partition selection id   *   * Init @vm fields.   *   * Returns:   * 0 for success, error for failure.   */ -int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm) +int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id)  {  	struct amdgpu_bo *root_bo;  	struct amdgpu_bo_vm *root; @@ -2177,7 +2174,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm)  	vm->evicting = false;  	r = amdgpu_vm_pt_create(adev, vm, adev->vm_manager.root_level, -				false, &root); +				false, &root, xcp_id);  	if (r)  		goto error_free_delayed;  	root_bo = &root->bo; @@ -2279,16 +2276,13 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)  			goto unreserve_bo;  		vm->update_funcs = &amdgpu_vm_cpu_funcs; +		r = amdgpu_vm_pt_map_tables(adev, vm); +		if (r) +			goto unreserve_bo; +  	} else {  		vm->update_funcs = &amdgpu_vm_sdma_funcs;  	} -	/* -	 * Make sure root PD gets mapped. As vm_update_mode could be changed -	 * when turning a GFX VM into a compute VM. -	 */ -	r = vm->update_funcs->map_table(to_amdgpu_bo_vm(vm->root.bo)); -	if (r) -		goto unreserve_bo;  	dma_fence_put(vm->last_update);  	vm->last_update = dma_fence_get_stub(); @@ -2604,7 +2598,7 @@ bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid,  		/* Intentionally setting invalid PTE flag  		 * combination to force a no-retry-fault  		 */ -		flags = AMDGPU_PTE_SNOOPED | AMDGPU_PTE_PRT; +		flags = AMDGPU_VM_NORETRY_FLAGS;  		value = 0;  	} else if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_NEVER) {  		/* Redirect the access to the dummy page */  |