diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c | 114 | 
1 files changed, 101 insertions, 13 deletions
| diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c index a11e44340b23..c184468e2b2b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c @@ -23,13 +23,41 @@   */  #include <drm/drmP.h> +#include <drm/drm_auth.h>  #include "amdgpu.h" +#include "amdgpu_sched.h" -static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx) +static int amdgpu_ctx_priority_permit(struct drm_file *filp, +				      enum amd_sched_priority priority) +{ +	/* NORMAL and below are accessible by everyone */ +	if (priority <= AMD_SCHED_PRIORITY_NORMAL) +		return 0; + +	if (capable(CAP_SYS_NICE)) +		return 0; + +	if (drm_is_current_master(filp)) +		return 0; + +	return -EACCES; +} + +static int amdgpu_ctx_init(struct amdgpu_device *adev, +			   enum amd_sched_priority priority, +			   struct drm_file *filp, +			   struct amdgpu_ctx *ctx)  {  	unsigned i, j;  	int r; +	if (priority < 0 || priority >= AMD_SCHED_PRIORITY_MAX) +		return -EINVAL; + +	r = amdgpu_ctx_priority_permit(filp, priority); +	if (r) +		return r; +  	memset(ctx, 0, sizeof(*ctx));  	ctx->adev = adev;  	kref_init(&ctx->refcount); @@ -39,19 +67,24 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)  	if (!ctx->fences)  		return -ENOMEM; +	mutex_init(&ctx->lock); +  	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {  		ctx->rings[i].sequence = 1;  		ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i];  	}  	ctx->reset_counter = atomic_read(&adev->gpu_reset_counter); +	ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter); +	ctx->init_priority = priority; +	ctx->override_priority = AMD_SCHED_PRIORITY_UNSET;  	/* create context entity for each ring */  	for (i = 0; i < adev->num_rings; i++) {  		struct amdgpu_ring *ring = adev->rings[i];  		struct amd_sched_rq *rq; -		rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL]; +		rq = &ring->sched.sched_rq[priority];  		if (ring == &adev->gfx.kiq.ring)  			continue; @@ -96,10 +129,14 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)  				      &ctx->rings[i].entity);  	amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr); + +	mutex_destroy(&ctx->lock);  }  static int amdgpu_ctx_alloc(struct amdgpu_device *adev,  			    struct amdgpu_fpriv *fpriv, +			    struct drm_file *filp, +			    enum amd_sched_priority priority,  			    uint32_t *id)  {  	struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr; @@ -117,8 +154,9 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,  		kfree(ctx);  		return r;  	} +  	*id = (uint32_t)r; -	r = amdgpu_ctx_init(adev, ctx); +	r = amdgpu_ctx_init(adev, priority, filp, ctx);  	if (r) {  		idr_remove(&mgr->ctx_handles, *id);  		*id = 0; @@ -193,6 +231,7 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,  {  	int r;  	uint32_t id; +	enum amd_sched_priority priority;  	union drm_amdgpu_ctx *args = data;  	struct amdgpu_device *adev = dev->dev_private; @@ -200,10 +239,16 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,  	r = 0;  	id = args->in.ctx_id; +	priority = amdgpu_to_sched_priority(args->in.priority); + +	/* For backwards compatibility reasons, we need to accept +	 * ioctls with garbage in the priority field */ +	if (priority == AMD_SCHED_PRIORITY_INVALID) +		priority = AMD_SCHED_PRIORITY_NORMAL;  	switch (args->in.op) {  	case AMDGPU_CTX_OP_ALLOC_CTX: -		r = amdgpu_ctx_alloc(adev, fpriv, &id); +		r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);  		args->out.alloc.ctx_id = id;  		break;  	case AMDGPU_CTX_OP_FREE_CTX: @@ -246,8 +291,8 @@ int amdgpu_ctx_put(struct amdgpu_ctx *ctx)  	return 0;  } -uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, -			      struct dma_fence *fence) +int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring, +			      struct dma_fence *fence, uint64_t* handler)  {  	struct amdgpu_ctx_ring *cring = & ctx->rings[ring->idx];  	uint64_t seq = cring->sequence; @@ -256,12 +301,8 @@ uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,  	idx = seq & (amdgpu_sched_jobs - 1);  	other = cring->fences[idx]; -	if (other) { -		signed long r; -		r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT); -		if (r < 0) -			DRM_ERROR("Error (%ld) waiting for fence!\n", r); -	} +	if (other) +		BUG_ON(!dma_fence_is_signaled(other));  	dma_fence_get(fence); @@ -271,8 +312,10 @@ uint64_t amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,  	spin_unlock(&ctx->ring_lock);  	dma_fence_put(other); +	if (handler) +		*handler = seq; -	return seq; +	return 0;  }  struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx, @@ -303,6 +346,51 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,  	return fence;  } +void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx, +				  enum amd_sched_priority priority) +{ +	int i; +	struct amdgpu_device *adev = ctx->adev; +	struct amd_sched_rq *rq; +	struct amd_sched_entity *entity; +	struct amdgpu_ring *ring; +	enum amd_sched_priority ctx_prio; + +	ctx->override_priority = priority; + +	ctx_prio = (ctx->override_priority == AMD_SCHED_PRIORITY_UNSET) ? +			ctx->init_priority : ctx->override_priority; + +	for (i = 0; i < adev->num_rings; i++) { +		ring = adev->rings[i]; +		entity = &ctx->rings[i].entity; +		rq = &ring->sched.sched_rq[ctx_prio]; + +		if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ) +			continue; + +		amd_sched_entity_set_rq(entity, rq); +	} +} + +int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id) +{ +	struct amdgpu_ctx_ring *cring = &ctx->rings[ring_id]; +	unsigned idx = cring->sequence & (amdgpu_sched_jobs - 1); +	struct dma_fence *other = cring->fences[idx]; + +	if (other) { +		signed long r; +		r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT); +		if (r < 0) { +			DRM_ERROR("Error (%ld) waiting for fence!\n", r); +			return r; +		} +	} + +	return 0; +} +  void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)  {  	mutex_init(&mgr->lock); |