diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 45 | 
1 files changed, 18 insertions, 27 deletions
| diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 2616e2eafdeb..dee446278417 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -41,6 +41,7 @@  #include <linux/swap.h>  #include <linux/swiotlb.h>  #include <linux/dma-buf.h> +#include <linux/sizes.h>  #include <drm/ttm/ttm_bo_api.h>  #include <drm/ttm/ttm_bo_driver.h> @@ -1522,11 +1523,8 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,  	struct dma_fence *f;  	int i; -	/* Don't evict VM page tables while they are busy, otherwise we can't -	 * cleanly handle page faults. -	 */  	if (bo->type == ttm_bo_type_kernel && -	    !dma_resv_test_signaled_rcu(bo->base.resv, true)) +	    !amdgpu_vm_evictable(ttm_to_amdgpu_bo(bo)))  		return false;  	/* If bo is a KFD BO, check if the bo belongs to the current process. @@ -1717,12 +1715,17 @@ static int amdgpu_ttm_training_reserve_vram_fini(struct amdgpu_device *adev)  	amdgpu_bo_free_kernel(&ctx->c2p_bo, NULL, NULL);  	ctx->c2p_bo = NULL; -	amdgpu_bo_free_kernel(&ctx->p2c_bo, NULL, NULL); -	ctx->p2c_bo = NULL; -  	return 0;  } +static u64 amdgpu_ttm_training_get_c2p_offset(u64 vram_size) +{ +       if ((vram_size & (SZ_1M - 1)) < (SZ_4K + 1) ) +               vram_size -= SZ_1M; + +       return ALIGN(vram_size, SZ_1M); +} +  /**   * amdgpu_ttm_training_reserve_vram_init - create bo vram reservation from memory training   * @@ -1741,7 +1744,7 @@ static int amdgpu_ttm_training_reserve_vram_init(struct amdgpu_device *adev)  		return 0;  	} -	ctx->c2p_train_data_offset = adev->fw_vram_usage.mem_train_fb_loc; +	ctx->c2p_train_data_offset = amdgpu_ttm_training_get_c2p_offset(adev->gmc.mc_vram_size);  	ctx->p2c_train_data_offset = (adev->gmc.mc_vram_size - GDDR6_MEM_TRAINING_OFFSET);  	ctx->train_data_size = GDDR6_MEM_TRAINING_DATA_SIZE_IN_BYTES; @@ -1751,17 +1754,6 @@ static int amdgpu_ttm_training_reserve_vram_init(struct amdgpu_device *adev)  		  ctx->c2p_train_data_offset);  	ret = amdgpu_bo_create_kernel_at(adev, -					 ctx->p2c_train_data_offset, -					 ctx->train_data_size, -					 AMDGPU_GEM_DOMAIN_VRAM, -					 &ctx->p2c_bo, -					 NULL); -	if (ret) { -		DRM_ERROR("alloc p2c_bo failed(%d)!\n", ret); -		goto Err_out; -	} - -	ret = amdgpu_bo_create_kernel_at(adev,  					 ctx->c2p_train_data_offset,  					 ctx->train_data_size,  					 AMDGPU_GEM_DOMAIN_VRAM, @@ -1769,15 +1761,12 @@ static int amdgpu_ttm_training_reserve_vram_init(struct amdgpu_device *adev)  					 NULL);  	if (ret) {  		DRM_ERROR("alloc c2p_bo failed(%d)!\n", ret); -		goto Err_out; +		amdgpu_ttm_training_reserve_vram_fini(adev); +		return ret;  	}  	ctx->init = PSP_MEM_TRAIN_RESERVE_SUCCESS;  	return 0; - -Err_out: -	amdgpu_ttm_training_reserve_vram_fini(adev); -	return ret;  }  /** @@ -1990,11 +1979,13 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)  	if (enable) {  		struct amdgpu_ring *ring; -		struct drm_sched_rq *rq; +		struct drm_gpu_scheduler *sched;  		ring = adev->mman.buffer_funcs_ring; -		rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_KERNEL]; -		r = drm_sched_entity_init(&adev->mman.entity, &rq, 1, NULL); +		sched = &ring->sched; +		r = drm_sched_entity_init(&adev->mman.entity, +				          DRM_SCHED_PRIORITY_KERNEL, &sched, +					  1, NULL);  		if (r) {  			DRM_ERROR("Failed setting up TTM BO move entity (%d)\n",  				  r); |