diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c | 116 | 
1 files changed, 114 insertions, 2 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c index 49de3a3eebc7..80d6e132e409 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.c @@ -50,6 +50,26 @@   */  /** + * amdgpu_ring_max_ibs - Return max IBs that fit in a single submission. + * + * @type: ring type for which to return the limit. + */ +unsigned int amdgpu_ring_max_ibs(enum amdgpu_ring_type type) +{ +	switch (type) { +	case AMDGPU_RING_TYPE_GFX: +		/* Need to keep at least 192 on GFX7+ for old radv. */ +		return 192; +	case AMDGPU_RING_TYPE_COMPUTE: +		return 125; +	case AMDGPU_RING_TYPE_VCN_JPEG: +		return 16; +	default: +		return 49; +	} +} + +/**   * amdgpu_ring_alloc - allocate space on the ring buffer   *   * @ring: amdgpu_ring structure holding ring information @@ -58,7 +78,7 @@   * Allocate @ndw dwords in the ring buffer (all asics).   * Returns 0 on success, error on failure.   */ -int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned ndw) +int amdgpu_ring_alloc(struct amdgpu_ring *ring, unsigned int ndw)  {  	/* Align requested size with padding so unlock_commit can  	 * pad safely */ @@ -182,6 +202,7 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,  	int sched_hw_submission = amdgpu_sched_hw_submission;  	u32 *num_sched;  	u32 hw_ip; +	unsigned int max_ibs_dw;  	/* Set the hw submission limit higher for KIQ because  	 * it's used for a number of gfx/compute tasks by both @@ -290,6 +311,13 @@ int amdgpu_ring_init(struct amdgpu_device *adev, struct amdgpu_ring *ring,  		return r;  	} +	max_ibs_dw = ring->funcs->emit_frame_size + +		     amdgpu_ring_max_ibs(ring->funcs->type) * ring->funcs->emit_ib_size; +	max_ibs_dw = (max_ibs_dw + ring->funcs->align_mask) & ~ring->funcs->align_mask; + +	if (WARN_ON(max_ibs_dw > max_dw)) +		max_dw = max_ibs_dw; +  	ring->ring_size = roundup_pow_of_two(max_dw * 4 * sched_hw_submission);  	ring->buf_mask = (ring->ring_size / 4) - 1; @@ -361,6 +389,8 @@ void amdgpu_ring_fini(struct amdgpu_ring *ring)  		amdgpu_bo_free_kernel(&ring->ring_obj,  				      &ring->gpu_addr,  				      (void **)&ring->ring); +	} else { +		kfree(ring->fence_drv.fences);  	}  	dma_fence_put(ring->vmid_wait); @@ -403,11 +433,18 @@ void amdgpu_ring_emit_reg_write_reg_wait_helper(struct amdgpu_ring *ring,  bool amdgpu_ring_soft_recovery(struct amdgpu_ring *ring, unsigned int vmid,  			       struct dma_fence *fence)  { +	unsigned long flags; +  	ktime_t deadline = ktime_add_us(ktime_get(), 10000);  	if (amdgpu_sriov_vf(ring->adev) || !ring->funcs->soft_recovery || !fence)  		return false; +	spin_lock_irqsave(fence->lock, flags); +	if (!dma_fence_is_signaled_locked(fence)) +		dma_fence_set_error(fence, -ENODATA); +	spin_unlock_irqrestore(fence->lock, flags); +  	atomic_inc(&ring->adev->gpu_reset_counter);  	while (!dma_fence_is_signaled(fence) &&  	       ktime_to_ns(ktime_sub(deadline, ktime_get())) > 0) @@ -478,6 +515,70 @@ static const struct file_operations amdgpu_debugfs_ring_fops = {  	.llseek = default_llseek  }; +static ssize_t amdgpu_debugfs_mqd_read(struct file *f, char __user *buf, +				       size_t size, loff_t *pos) +{ +	struct amdgpu_ring *ring = file_inode(f)->i_private; +	volatile u32 *mqd; +	int r; +	uint32_t value, result; + +	if (*pos & 3 || size & 3) +		return -EINVAL; + +	result = 0; + +	r = amdgpu_bo_reserve(ring->mqd_obj, false); +	if (unlikely(r != 0)) +		return r; + +	r = amdgpu_bo_kmap(ring->mqd_obj, (void **)&mqd); +	if (r) { +		amdgpu_bo_unreserve(ring->mqd_obj); +		return r; +	} + +	while (size) { +		if (*pos >= ring->mqd_size) +			goto done; + +		value = mqd[*pos/4]; +		r = put_user(value, (uint32_t *)buf); +		if (r) +			goto done; +		buf += 4; +		result += 4; +		size -= 4; +		*pos += 4; +	} + +done: +	amdgpu_bo_kunmap(ring->mqd_obj); +	mqd = NULL; +	amdgpu_bo_unreserve(ring->mqd_obj); +	if (r) +		return r; + +	return result; +} + +static const struct file_operations amdgpu_debugfs_mqd_fops = { +	.owner = THIS_MODULE, +	.read = amdgpu_debugfs_mqd_read, +	.llseek = default_llseek +}; + +static int amdgpu_debugfs_ring_error(void *data, u64 val) +{ +	struct amdgpu_ring *ring = data; + +	amdgpu_fence_driver_set_error(ring, val); +	return 0; +} + +DEFINE_DEBUGFS_ATTRIBUTE_SIGNED(amdgpu_debugfs_error_fops, NULL, +				amdgpu_debugfs_ring_error, "%lld\n"); +  #endif  void amdgpu_debugfs_ring_init(struct amdgpu_device *adev, @@ -489,10 +590,21 @@ void amdgpu_debugfs_ring_init(struct amdgpu_device *adev,  	char name[32];  	sprintf(name, "amdgpu_ring_%s", ring->name); -	debugfs_create_file_size(name, S_IFREG | S_IRUGO, root, ring, +	debugfs_create_file_size(name, S_IFREG | 0444, root, ring,  				 &amdgpu_debugfs_ring_fops,  				 ring->ring_size + 12); +	if (ring->mqd_obj) { +		sprintf(name, "amdgpu_mqd_%s", ring->name); +		debugfs_create_file_size(name, S_IFREG | 0444, root, ring, +					 &amdgpu_debugfs_mqd_fops, +					 ring->mqd_size); +	} + +	sprintf(name, "amdgpu_error_%s", ring->name); +	debugfs_create_file(name, 0200, root, ring, +			    &amdgpu_debugfs_error_fops); +  #endif  }  |