diff options
author | Andres Rodriguez <andresx7@gmail.com> | 2017-02-16 00:47:32 -0500 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2017-05-31 16:49:01 -0400 |
commit | effd924d2f3b9c52d5bd8137c3803e83f719a290 (patch) | |
tree | 2705aaf11786cd79e5f17f4283cc7ff511d9f616 /drivers/gpu/drm/amd/amdgpu/amdgpu.h | |
parent | ecd910eb1f091dd25f4a737a3bc50c0c8892eac7 (diff) |
drm/amdgpu: untie user ring ids from kernel ring ids v6
Add amdgpu_queue_mgr, a mechanism that allows disjointing usermode's
ring ids from the kernel's ring ids.
The queue manager maintains a per-file descriptor map of user ring ids
to amdgpu_ring pointers. Once a map is created it is permanent (this is
required to maintain FIFO execution guarantees for a context's ring).
Different queue map policies can be configured for each HW IP.
Currently all HW IPs use the identity mapper, i.e. kernel ring id is
equal to the user ring id.
The purpose of this mechanism is to distribute the load across multiple
queues more effectively for HW IPs that support multiple rings.
Userspace clients are unable to check whether a specific resource is in
use by a different client. Therefore, it is up to the kernel driver to
make the optimal choice.
v2: remove amdgpu_queue_mapper_funcs
v3: made amdgpu_queue_mgr per context instead of per-fd
v4: add context_put on error paths
v5: rebase and include new IPs UVD_ENC & VCN_*
v6: drop unused amdgpu_ring_is_valid_index (Alex)
Reviewed-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Andres Rodriguez <andresx7@gmail.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu.h')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu.h | 27 |
1 files changed, 24 insertions, 3 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h index 1135f19c9e9c..aad1d7bf695a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h @@ -777,6 +777,29 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct amdgpu_ring *ring, struct dma_fence **f); /* + * Queue manager + */ +struct amdgpu_queue_mapper { + int hw_ip; + struct mutex lock; + /* protected by lock */ + struct amdgpu_ring *queue_map[AMDGPU_MAX_RINGS]; +}; + +struct amdgpu_queue_mgr { + struct amdgpu_queue_mapper mapper[AMDGPU_MAX_IP_NUM]; +}; + +int amdgpu_queue_mgr_init(struct amdgpu_device *adev, + struct amdgpu_queue_mgr *mgr); +int amdgpu_queue_mgr_fini(struct amdgpu_device *adev, + struct amdgpu_queue_mgr *mgr); +int amdgpu_queue_mgr_map(struct amdgpu_device *adev, + struct amdgpu_queue_mgr *mgr, + int hw_ip, int instance, int ring, + struct amdgpu_ring **out_ring); + +/* * context related structures */ @@ -789,6 +812,7 @@ struct amdgpu_ctx_ring { struct amdgpu_ctx { struct kref refcount; struct amdgpu_device *adev; + struct amdgpu_queue_mgr queue_mgr; unsigned reset_counter; spinlock_t ring_lock; struct dma_fence **fences; @@ -1909,9 +1933,6 @@ bool amdgpu_need_post(struct amdgpu_device *adev); void amdgpu_update_display_priority(struct amdgpu_device *adev); int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data); -int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type, - u32 ip_instance, u32 ring, - struct amdgpu_ring **out_ring); void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes); void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain); bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo); |