diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c | 43 | 
1 files changed, 23 insertions, 20 deletions
| diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c index 365e3fb6a9e5..8516c814bc9b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c @@ -294,12 +294,8 @@ static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p,  	}  	for (i = 0; i < p->gang_size; ++i) { -		ret = amdgpu_job_alloc(p->adev, num_ibs[i], &p->jobs[i], vm); -		if (ret) -			goto free_all_kdata; - -		ret = drm_sched_job_init(&p->jobs[i]->base, p->entities[i], -					 &fpriv->vm); +		ret = amdgpu_job_alloc(p->adev, vm, p->entities[i], vm, +				       num_ibs[i], &p->jobs[i]);  		if (ret)  			goto free_all_kdata;  	} @@ -433,7 +429,7 @@ static int amdgpu_cs_p2_dependencies(struct amdgpu_cs_parser *p,  			dma_fence_put(old);  		} -		r = amdgpu_sync_fence(&p->gang_leader->sync, fence); +		r = amdgpu_sync_fence(&p->sync, fence);  		dma_fence_put(fence);  		if (r)  			return r; @@ -455,9 +451,20 @@ static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p,  		return r;  	} -	r = amdgpu_sync_fence(&p->gang_leader->sync, fence); -	dma_fence_put(fence); +	r = amdgpu_sync_fence(&p->sync, fence); +	if (r) +		goto error; +	/* +	 * When we have an explicit dependency it might be necessary to insert a +	 * pipeline sync to make sure that all caches etc are flushed and the +	 * next job actually sees the results from the previous one. +	 */ +	if (fence->context == p->gang_leader->base.entity->fence_context) +		r = amdgpu_sync_fence(&p->gang_leader->explicit_sync, fence); + +error: +	dma_fence_put(fence);  	return r;  } @@ -1106,7 +1113,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)  	if (r)  		return r; -	r = amdgpu_sync_fence(&job->sync, fpriv->prt_va->last_pt_update); +	r = amdgpu_sync_fence(&p->sync, fpriv->prt_va->last_pt_update);  	if (r)  		return r; @@ -1117,7 +1124,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)  		if (r)  			return r; -		r = amdgpu_sync_fence(&job->sync, bo_va->last_pt_update); +		r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update);  		if (r)  			return r;  	} @@ -1136,7 +1143,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)  		if (r)  			return r; -		r = amdgpu_sync_fence(&job->sync, bo_va->last_pt_update); +		r = amdgpu_sync_fence(&p->sync, bo_va->last_pt_update);  		if (r)  			return r;  	} @@ -1149,7 +1156,7 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)  	if (r)  		return r; -	r = amdgpu_sync_fence(&job->sync, vm->last_update); +	r = amdgpu_sync_fence(&p->sync, vm->last_update);  	if (r)  		return r; @@ -1181,7 +1188,6 @@ static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p)  static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)  {  	struct amdgpu_fpriv *fpriv = p->filp->driver_priv; -	struct amdgpu_job *leader = p->gang_leader;  	struct amdgpu_bo_list_entry *e;  	unsigned int i;  	int r; @@ -1193,17 +1199,14 @@ static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p)  		sync_mode = amdgpu_bo_explicit_sync(bo) ?  			AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER; -		r = amdgpu_sync_resv(p->adev, &leader->sync, resv, sync_mode, +		r = amdgpu_sync_resv(p->adev, &p->sync, resv, sync_mode,  				     &fpriv->vm);  		if (r)  			return r;  	}  	for (i = 0; i < p->gang_size; ++i) { -		if (p->jobs[i] == leader) -			continue; - -		r = amdgpu_sync_clone(&leader->sync, &p->jobs[i]->sync); +		r = amdgpu_sync_push_to_job(&p->sync, p->jobs[i]);  		if (r)  			return r;  	} @@ -1251,7 +1254,7 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,  			continue;  		fence = &p->jobs[i]->base.s_fence->scheduled; -		r = amdgpu_sync_fence(&leader->sync, fence); +		r = drm_sched_job_add_dependency(&leader->base, fence);  		if (r)  			goto error_cleanup;  	} |