aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
diff options
context:
space:
mode:
authorAndres Rodriguez <andresx7@gmail.com>2017-02-16 00:47:32 -0500
committerAlex Deucher <alexander.deucher@amd.com>2017-05-31 16:49:01 -0400
commiteffd924d2f3b9c52d5bd8137c3803e83f719a290 (patch)
tree2705aaf11786cd79e5f17f4283cc7ff511d9f616 /drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
parentecd910eb1f091dd25f4a737a3bc50c0c8892eac7 (diff)
drm/amdgpu: untie user ring ids from kernel ring ids v6
Add amdgpu_queue_mgr, a mechanism that allows disjointing usermode's ring ids from the kernel's ring ids. The queue manager maintains a per-file descriptor map of user ring ids to amdgpu_ring pointers. Once a map is created it is permanent (this is required to maintain FIFO execution guarantees for a context's ring). Different queue map policies can be configured for each HW IP. Currently all HW IPs use the identity mapper, i.e. kernel ring id is equal to the user ring id. The purpose of this mechanism is to distribute the load across multiple queues more effectively for HW IPs that support multiple rings. Userspace clients are unable to check whether a specific resource is in use by a different client. Therefore, it is up to the kernel driver to make the optimal choice. v2: remove amdgpu_queue_mapper_funcs v3: made amdgpu_queue_mgr per context instead of per-fd v4: add context_put on error paths v5: rebase and include new IPs UVD_ENC & VCN_* v6: drop unused amdgpu_ring_is_valid_index (Alex) Reviewed-by: Christian König <christian.koenig@amd.com> Signed-off-by: Andres Rodriguez <andresx7@gmail.com> Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c128
1 files changed, 26 insertions, 102 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 9a0b2e5f3c11..b17635c1b711 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -30,90 +30,6 @@
#include "amdgpu.h"
#include "amdgpu_trace.h"
-int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
- u32 ip_instance, u32 ring,
- struct amdgpu_ring **out_ring)
-{
- /* Right now all IPs have only one instance - multiple rings. */
- if (ip_instance != 0) {
- DRM_ERROR("invalid ip instance: %d\n", ip_instance);
- return -EINVAL;
- }
-
- switch (ip_type) {
- default:
- DRM_ERROR("unknown ip type: %d\n", ip_type);
- return -EINVAL;
- case AMDGPU_HW_IP_GFX:
- if (ring < adev->gfx.num_gfx_rings) {
- *out_ring = &adev->gfx.gfx_ring[ring];
- } else {
- DRM_ERROR("only %d gfx rings are supported now\n",
- adev->gfx.num_gfx_rings);
- return -EINVAL;
- }
- break;
- case AMDGPU_HW_IP_COMPUTE:
- if (ring < adev->gfx.num_compute_rings) {
- *out_ring = &adev->gfx.compute_ring[ring];
- } else {
- DRM_ERROR("only %d compute rings are supported now\n",
- adev->gfx.num_compute_rings);
- return -EINVAL;
- }
- break;
- case AMDGPU_HW_IP_DMA:
- if (ring < adev->sdma.num_instances) {
- *out_ring = &adev->sdma.instance[ring].ring;
- } else {
- DRM_ERROR("only %d SDMA rings are supported\n",
- adev->sdma.num_instances);
- return -EINVAL;
- }
- break;
- case AMDGPU_HW_IP_UVD:
- *out_ring = &adev->uvd.ring;
- break;
- case AMDGPU_HW_IP_VCE:
- if (ring < adev->vce.num_rings){
- *out_ring = &adev->vce.ring[ring];
- } else {
- DRM_ERROR("only %d VCE rings are supported\n", adev->vce.num_rings);
- return -EINVAL;
- }
- break;
- case AMDGPU_HW_IP_UVD_ENC:
- if (ring < adev->uvd.num_enc_rings){
- *out_ring = &adev->uvd.ring_enc[ring];
- } else {
- DRM_ERROR("only %d UVD ENC rings are supported\n",
- adev->uvd.num_enc_rings);
- return -EINVAL;
- }
- break;
- case AMDGPU_HW_IP_VCN_DEC:
- *out_ring = &adev->vcn.ring_dec;
- break;
- case AMDGPU_HW_IP_VCN_ENC:
- if (ring < adev->vcn.num_enc_rings){
- *out_ring = &adev->vcn.ring_enc[ring];
- } else {
- DRM_ERROR("only %d VCN ENC rings are supported\n",
- adev->vcn.num_enc_rings);
- return -EINVAL;
- }
- break;
- }
-
- if (!(*out_ring && (*out_ring)->adev)) {
- DRM_ERROR("Ring %d is not initialized on IP %d\n",
- ring, ip_type);
- return -EINVAL;
- }
-
- return 0;
-}
-
static int amdgpu_cs_user_fence_chunk(struct amdgpu_cs_parser *p,
struct drm_amdgpu_cs_chunk_fence *data,
uint32_t *offset)
@@ -928,9 +844,8 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
return -EINVAL;
}
- r = amdgpu_cs_get_ring(adev, chunk_ib->ip_type,
- chunk_ib->ip_instance, chunk_ib->ring,
- &ring);
+ r = amdgpu_queue_mgr_map(adev, &parser->ctx->queue_mgr, chunk_ib->ip_type,
+ chunk_ib->ip_instance, chunk_ib->ring, &ring);
if (r)
return r;
@@ -1032,16 +947,19 @@ static int amdgpu_cs_dependencies(struct amdgpu_device *adev,
struct amdgpu_ctx *ctx;
struct dma_fence *fence;
- r = amdgpu_cs_get_ring(adev, deps[j].ip_type,
- deps[j].ip_instance,
- deps[j].ring, &ring);
- if (r)
- return r;
-
ctx = amdgpu_ctx_get(fpriv, deps[j].ctx_id);
if (ctx == NULL)
return -EINVAL;
+ r = amdgpu_queue_mgr_map(adev, &ctx->queue_mgr,
+ deps[j].ip_type,
+ deps[j].ip_instance,
+ deps[j].ring, &ring);
+ if (r) {
+ amdgpu_ctx_put(ctx);
+ return r;
+ }
+
fence = amdgpu_ctx_get_fence(ctx, ring,
deps[j].handle);
if (IS_ERR(fence)) {
@@ -1177,15 +1095,19 @@ int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data,
if (amdgpu_kms_vram_lost(adev, fpriv))
return -ENODEV;
- r = amdgpu_cs_get_ring(adev, wait->in.ip_type, wait->in.ip_instance,
- wait->in.ring, &ring);
- if (r)
- return r;
ctx = amdgpu_ctx_get(filp->driver_priv, wait->in.ctx_id);
if (ctx == NULL)
return -EINVAL;
+ r = amdgpu_queue_mgr_map(adev, &ctx->queue_mgr,
+ wait->in.ip_type, wait->in.ip_instance,
+ wait->in.ring, &ring);
+ if (r) {
+ amdgpu_ctx_put(ctx);
+ return r;
+ }
+
fence = amdgpu_ctx_get_fence(ctx, ring, wait->in.handle);
if (IS_ERR(fence))
r = PTR_ERR(fence);
@@ -1221,15 +1143,17 @@ static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev,
struct dma_fence *fence;
int r;
- r = amdgpu_cs_get_ring(adev, user->ip_type, user->ip_instance,
- user->ring, &ring);
- if (r)
- return ERR_PTR(r);
-
ctx = amdgpu_ctx_get(filp->driver_priv, user->ctx_id);
if (ctx == NULL)
return ERR_PTR(-EINVAL);
+ r = amdgpu_queue_mgr_map(adev, &ctx->queue_mgr, user->ip_type,
+ user->ip_instance, user->ring, &ring);
+ if (r) {
+ amdgpu_ctx_put(ctx);
+ return ERR_PTR(r);
+ }
+
fence = amdgpu_ctx_get_fence(ctx, ring, user->seq_no);
amdgpu_ctx_put(ctx);