diff options
author | Monk Liu <Monk.Liu@amd.com> | 2020-07-27 15:20:12 +0800 |
---|---|---|
committer | Alex Deucher <alexander.deucher@amd.com> | 2020-08-04 17:27:29 -0400 |
commit | a300de40f66b87fa90703c94ffb22917f98eb902 (patch) | |
tree | e1690029e39055ed5af5896a15ede13c51eba6a0 /drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | |
parent | 9b856defbe355f886f99777e667506841bd267a8 (diff) |
drm/amdgpu: introduce a new parameter to configure how many KCQ we want(v5)
what:
the MQD's save and restore of KCQ (kernel compute queue)
cost lots of clocks during world switch which impacts a lot
to multi-VF performance
how:
introduce a paramter to control the number of KCQ to avoid
performance drop if there is no kernel compute queue needed
notes:
this paramter only affects gfx 8/9/10
v2:
refine namings
v3:
choose queues for each ring to that try best to cross pipes evenly.
v4:
fix indentation
some cleanupsin the gfx_compute_queue_acquire()
v5:
further fix on indentations
more cleanupsin gfx_compute_queue_acquire()
TODO:
in the future we will let hypervisor driver to set this paramter
automatically thus no need for user to configure it through
modprobe in virtual machine
Signed-off-by: Monk Liu <Monk.Liu@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Acked-by: Christian König <christian.koenig@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c')
-rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 49 |
1 files changed, 19 insertions, 30 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 8eff0173360d..0cd9de69932b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -202,40 +202,29 @@ bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev, void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev) { - int i, queue, pipe, mec; + int i, queue, pipe; bool multipipe_policy = amdgpu_gfx_is_multipipe_capable(adev); - - /* policy for amdgpu compute queue ownership */ - for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) { - queue = i % adev->gfx.mec.num_queue_per_pipe; - pipe = (i / adev->gfx.mec.num_queue_per_pipe) - % adev->gfx.mec.num_pipe_per_mec; - mec = (i / adev->gfx.mec.num_queue_per_pipe) - / adev->gfx.mec.num_pipe_per_mec; - - /* we've run out of HW */ - if (mec >= adev->gfx.mec.num_mec) - break; - - if (multipipe_policy) { - /* policy: amdgpu owns the first two queues of the first MEC */ - if (mec == 0 && queue < 2) - set_bit(i, adev->gfx.mec.queue_bitmap); - } else { - /* policy: amdgpu owns all queues in the first pipe */ - if (mec == 0 && pipe == 0) - set_bit(i, adev->gfx.mec.queue_bitmap); + int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec * + adev->gfx.mec.num_queue_per_pipe, + adev->gfx.num_compute_rings); + + if (multipipe_policy) { + /* policy: make queues evenly cross all pipes on MEC1 only */ + for (i = 0; i < max_queues_per_mec; i++) { + pipe = i % adev->gfx.mec.num_pipe_per_mec; + queue = (i / adev->gfx.mec.num_pipe_per_mec) % + adev->gfx.mec.num_queue_per_pipe; + + set_bit(pipe * adev->gfx.mec.num_queue_per_pipe + queue, + adev->gfx.mec.queue_bitmap); } + } else { + /* policy: amdgpu owns all queues in the given pipe */ + for (i = 0; i < max_queues_per_mec; ++i) + set_bit(i, adev->gfx.mec.queue_bitmap); } - /* update the number of active compute rings */ - adev->gfx.num_compute_rings = - bitmap_weight(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES); - - /* If you hit this case and edited the policy, you probably just - * need to increase AMDGPU_MAX_COMPUTE_RINGS */ - if (WARN_ON(adev->gfx.num_compute_rings > AMDGPU_MAX_COMPUTE_RINGS)) - adev->gfx.num_compute_rings = AMDGPU_MAX_COMPUTE_RINGS; + dev_dbg(adev->dev, "mec queue bitmap weight=%d\n", bitmap_weight(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES)); } void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev) |