diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdkfd')
| -rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 45 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_process.c | 3 | ||||
| -rw-r--r-- | drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 30 | 
3 files changed, 31 insertions, 47 deletions
| diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 67541c30327a..e48acdd03c1a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -1393,7 +1393,6 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,  	long err = 0;  	int i;  	uint32_t *devices_arr = NULL; -	bool table_freed = false;  	dev = kfd_device_by_id(GET_GPU_ID(args->handle));  	if (!dev) @@ -1451,8 +1450,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,  			goto get_mem_obj_from_handle_failed;  		}  		err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu( -			peer->kgd, (struct kgd_mem *)mem, -			peer_pdd->drm_priv, &table_freed); +			peer->kgd, (struct kgd_mem *)mem, peer_pdd->drm_priv);  		if (err) {  			pr_err("Failed to map to gpu %d/%d\n",  			       i, args->n_devices); @@ -1470,17 +1468,16 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,  	}  	/* Flush TLBs after waiting for the page table updates to complete */ -	if (table_freed) { -		for (i = 0; i < args->n_devices; i++) { -			peer = kfd_device_by_id(devices_arr[i]); -			if (WARN_ON_ONCE(!peer)) -				continue; -			peer_pdd = kfd_get_process_device_data(peer, p); -			if (WARN_ON_ONCE(!peer_pdd)) -				continue; -			kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY); -		} +	for (i = 0; i < args->n_devices; i++) { +		peer = kfd_device_by_id(devices_arr[i]); +		if (WARN_ON_ONCE(!peer)) +			continue; +		peer_pdd = kfd_get_process_device_data(peer, p); +		if (WARN_ON_ONCE(!peer_pdd)) +			continue; +		kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY);  	} +  	kfree(devices_arr);  	return err; @@ -1568,27 +1565,10 @@ static int kfd_ioctl_unmap_memory_from_gpu(struct file *filep,  		}  		args->n_success = i+1;  	} -	mutex_unlock(&p->mutex); - -	err = amdgpu_amdkfd_gpuvm_sync_memory(dev->kgd, (struct kgd_mem *) mem, true); -	if (err) { -		pr_debug("Sync memory failed, wait interrupted by user signal\n"); -		goto sync_memory_failed; -	} - -	/* Flush TLBs after waiting for the page table updates to complete */ -	for (i = 0; i < args->n_devices; i++) { -		peer = kfd_device_by_id(devices_arr[i]); -		if (WARN_ON_ONCE(!peer)) -			continue; -		peer_pdd = kfd_get_process_device_data(peer, p); -		if (WARN_ON_ONCE(!peer_pdd)) -			continue; -		kfd_flush_tlb(peer_pdd, TLB_FLUSH_HEAVYWEIGHT); -	} -  	kfree(devices_arr); +	mutex_unlock(&p->mutex); +  	return 0;  bind_process_to_device_failed: @@ -1596,7 +1576,6 @@ get_mem_obj_from_handle_failed:  unmap_memory_from_gpu_failed:  	mutex_unlock(&p->mutex);  copy_from_user_failed: -sync_memory_failed:  	kfree(devices_arr);  	return err;  } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 21ec8a18cad2..8a2c6fc438c0 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -714,8 +714,7 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,  	if (err)  		goto err_alloc_mem; -	err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem, -			pdd->drm_priv, NULL); +	err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem, pdd->drm_priv);  	if (err)  		goto err_map_mem; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 9a71d8919bd6..c7b364e4a287 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -2375,21 +2375,27 @@ static bool svm_range_skip_recover(struct svm_range *prange)  static void  svm_range_count_fault(struct amdgpu_device *adev, struct kfd_process *p, -		      struct svm_range *prange, int32_t gpuidx) +		      int32_t gpuidx)  {  	struct kfd_process_device *pdd; -	if (gpuidx == MAX_GPU_INSTANCE) -		/* fault is on different page of same range -		 * or fault is skipped to recover later -		 */ -		pdd = svm_range_get_pdd_by_adev(prange, adev); -	else -		/* fault recovered -		 * or fault cannot recover because GPU no access on the range -		 */ -		pdd = kfd_process_device_from_gpuidx(p, gpuidx); +	/* fault is on different page of same range +	 * or fault is skipped to recover later +	 * or fault is on invalid virtual address +	 */ +	if (gpuidx == MAX_GPU_INSTANCE) { +		uint32_t gpuid; +		int r; +		r = kfd_process_gpuid_from_kgd(p, adev, &gpuid, &gpuidx); +		if (r < 0) +			return; +	} + +	/* fault is recovered +	 * or fault cannot recover because GPU no access on the range +	 */ +	pdd = kfd_process_device_from_gpuidx(p, gpuidx);  	if (pdd)  		WRITE_ONCE(pdd->faults, pdd->faults + 1);  } @@ -2525,7 +2531,7 @@ out_unlock_svms:  	mutex_unlock(&svms->lock);  	mmap_read_unlock(mm); -	svm_range_count_fault(adev, p, prange, gpuidx); +	svm_range_count_fault(adev, p, gpuidx);  	mmput(mm);  out: |