From 8dc1db3172ae2f17ae71e33b608a33411ce8a1aa Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Wed, 14 Sep 2022 16:39:48 +0800 Subject: drm/amdkfd: Introduce kfd_node struct (v5) Introduce a new structure, kfd_node, which will now represent a compute node. kfd_node is carved out of kfd_dev structure. kfd_dev struct now will become the parent of kfd_node, and will store common resources such as doorbells, GTT sub-alloctor etc. kfd_node struct will store all resources specific to a compute node, such as device queue manager, interrupt handling etc. This is the first step in adding compute partition support in KFD. v2: introduce kfd_node struct to gc v11 (Hawking) v3: make reference to kfd_dev struct through kfd_node (Morris) v4: use kfd_node instead for kfd isr/mqd functions (Morris) v5: rebase (Alex) Signed-off-by: Mukul Joshi Tested-by: Amber Lin Reviewed-by: Felix Kuehling Signed-off-by: Hawking Zhang Signed-off-by: Morris Zhang Signed-off-by: Alex Deucher --- .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 100 ++++++++++----------- 1 file changed, 50 insertions(+), 50 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 7a95698d83f7..34977d89f01c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -74,31 +74,31 @@ enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type) static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe) { int i; - int pipe_offset = (mec * dqm->dev->shared_resources.num_pipe_per_mec - + pipe) * dqm->dev->shared_resources.num_queue_per_pipe; + int pipe_offset = (mec * dqm->dev->kfd->shared_resources.num_pipe_per_mec + + pipe) * dqm->dev->kfd->shared_resources.num_queue_per_pipe; /* queue is available for KFD usage if bit is 1 */ - for (i = 0; i < dqm->dev->shared_resources.num_queue_per_pipe; ++i) + for (i = 0; i < dqm->dev->kfd->shared_resources.num_queue_per_pipe; ++i) if (test_bit(pipe_offset + i, - dqm->dev->shared_resources.cp_queue_bitmap)) + dqm->dev->kfd->shared_resources.cp_queue_bitmap)) return true; return false; } unsigned int get_cp_queues_num(struct device_queue_manager *dqm) { - return bitmap_weight(dqm->dev->shared_resources.cp_queue_bitmap, + return bitmap_weight(dqm->dev->kfd->shared_resources.cp_queue_bitmap, KGD_MAX_QUEUES); } unsigned int get_queues_per_pipe(struct device_queue_manager *dqm) { - return dqm->dev->shared_resources.num_queue_per_pipe; + return dqm->dev->kfd->shared_resources.num_queue_per_pipe; } unsigned int get_pipes_per_mec(struct device_queue_manager *dqm) { - return dqm->dev->shared_resources.num_pipe_per_mec; + return dqm->dev->kfd->shared_resources.num_pipe_per_mec; } static unsigned int get_num_all_sdma_engines(struct device_queue_manager *dqm) @@ -110,18 +110,18 @@ static unsigned int get_num_all_sdma_engines(struct device_queue_manager *dqm) unsigned int get_num_sdma_queues(struct device_queue_manager *dqm) { return kfd_get_num_sdma_engines(dqm->dev) * - dqm->dev->device_info.num_sdma_queues_per_engine; + dqm->dev->kfd->device_info.num_sdma_queues_per_engine; } unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm) { return kfd_get_num_xgmi_sdma_engines(dqm->dev) * - dqm->dev->device_info.num_sdma_queues_per_engine; + dqm->dev->kfd->device_info.num_sdma_queues_per_engine; } static inline uint64_t get_reserved_sdma_queues_bitmap(struct device_queue_manager *dqm) { - return dqm->dev->device_info.reserved_sdma_queues_bitmap; + return dqm->dev->kfd->device_info.reserved_sdma_queues_bitmap; } void program_sh_mem_settings(struct device_queue_manager *dqm, @@ -330,7 +330,7 @@ static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q, uint32_t const *restore_id) { - struct kfd_dev *dev = qpd->dqm->dev; + struct kfd_node *dev = qpd->dqm->dev; if (!KFD_IS_SOC15(dev)) { /* On pre-SOC15 chips we need to use the queue ID to @@ -349,7 +349,7 @@ static int allocate_doorbell(struct qcm_process_device *qpd, * for a SDMA engine is 512. */ - uint32_t *idx_offset = dev->shared_resources.sdma_doorbell_idx; + uint32_t *idx_offset = dev->kfd->shared_resources.sdma_doorbell_idx; uint32_t valid_id = idx_offset[q->properties.sdma_engine_id] + (q->properties.sdma_queue_id & 1) * KFD_QUEUE_DOORBELL_MIRROR_OFFSET @@ -382,7 +382,7 @@ static int allocate_doorbell(struct qcm_process_device *qpd, } q->properties.doorbell_off = - kfd_get_doorbell_dw_offset_in_bar(dev, qpd_to_pdd(qpd), + kfd_get_doorbell_dw_offset_in_bar(dev->kfd, qpd_to_pdd(qpd), q->doorbell_id); return 0; } @@ -391,7 +391,7 @@ static void deallocate_doorbell(struct qcm_process_device *qpd, struct queue *q) { unsigned int old; - struct kfd_dev *dev = qpd->dqm->dev; + struct kfd_node *dev = qpd->dqm->dev; if (!KFD_IS_SOC15(dev) || q->properties.type == KFD_QUEUE_TYPE_SDMA || @@ -441,7 +441,7 @@ static int allocate_vmid(struct device_queue_manager *dqm, program_sh_mem_settings(dqm, qpd); - if (KFD_IS_SOC15(dqm->dev) && dqm->dev->cwsr_enabled) + if (KFD_IS_SOC15(dqm->dev) && dqm->dev->kfd->cwsr_enabled) program_trap_handler_settings(dqm, qpd); /* qpd->page_table_base is set earlier when register_process() @@ -460,7 +460,7 @@ static int allocate_vmid(struct device_queue_manager *dqm, return 0; } -static int flush_texture_cache_nocpsch(struct kfd_dev *kdev, +static int flush_texture_cache_nocpsch(struct kfd_node *kdev, struct qcm_process_device *qpd) { const struct packet_manager_funcs *pmf = qpd->dqm->packet_mgr.pmf; @@ -661,7 +661,7 @@ static inline void deallocate_hqd(struct device_queue_manager *dqm, #define SQ_IND_CMD_CMD_KILL 0x00000003 #define SQ_IND_CMD_MODE_BROADCAST 0x00000001 -static int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p) +static int dbgdev_wave_reset_wavefronts(struct kfd_node *dev, struct kfd_process *p) { int status = 0; unsigned int vmid; @@ -837,7 +837,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q, /* Make sure the queue is unmapped before updating the MQD */ if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) { - if (!dqm->dev->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) retval = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false); else if (prev_active) @@ -858,7 +858,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q, } retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd, - (dqm->dev->cwsr_enabled ? + (dqm->dev->kfd->cwsr_enabled ? KFD_PREEMPT_TYPE_WAVEFRONT_SAVE : KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN), KFD_UNMAP_LATENCY_MS, q->pipe, q->queue); @@ -895,7 +895,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q, } if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) { - if (!dqm->dev->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) retval = map_queues_cpsch(dqm); else if (q->properties.is_active) retval = add_queue_mes(dqm, q, &pdd->qpd); @@ -951,7 +951,7 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm, continue; retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd, - (dqm->dev->cwsr_enabled ? + (dqm->dev->kfd->cwsr_enabled ? KFD_PREEMPT_TYPE_WAVEFRONT_SAVE : KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN), KFD_UNMAP_LATENCY_MS, q->pipe, q->queue); @@ -993,7 +993,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm, q->properties.is_active = false; decrement_queue_count(dqm, qpd, q); - if (dqm->dev->shared_resources.enable_mes) { + if (dqm->dev->kfd->shared_resources.enable_mes) { retval = remove_queue_mes(dqm, q, qpd); if (retval) { pr_err("Failed to evict queue %d\n", @@ -1003,7 +1003,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm, } } pdd->last_evict_timestamp = get_jiffies_64(); - if (!dqm->dev->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) retval = execute_queues_cpsch(dqm, qpd->is_debug ? KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES : @@ -1132,7 +1132,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm, q->properties.is_active = true; increment_queue_count(dqm, &pdd->qpd, q); - if (dqm->dev->shared_resources.enable_mes) { + if (dqm->dev->kfd->shared_resources.enable_mes) { retval = add_queue_mes(dqm, q, qpd); if (retval) { pr_err("Failed to restore queue %d\n", @@ -1141,7 +1141,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm, } } } - if (!dqm->dev->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) retval = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); qpd->evicted = 0; @@ -1282,7 +1282,7 @@ static int initialize_nocpsch(struct device_queue_manager *dqm) for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) if (test_bit(pipe_offset + queue, - dqm->dev->shared_resources.cp_queue_bitmap)) + dqm->dev->kfd->shared_resources.cp_queue_bitmap)) dqm->allocated_queues[pipe] |= 1 << queue; } @@ -1426,14 +1426,14 @@ static int set_sched_resources(struct device_queue_manager *dqm) int i, mec; struct scheduling_resources res; - res.vmid_mask = dqm->dev->shared_resources.compute_vmid_bitmap; + res.vmid_mask = dqm->dev->kfd->shared_resources.compute_vmid_bitmap; res.queue_mask = 0; for (i = 0; i < KGD_MAX_QUEUES; ++i) { - mec = (i / dqm->dev->shared_resources.num_queue_per_pipe) - / dqm->dev->shared_resources.num_pipe_per_mec; + mec = (i / dqm->dev->kfd->shared_resources.num_queue_per_pipe) + / dqm->dev->kfd->shared_resources.num_pipe_per_mec; - if (!test_bit(i, dqm->dev->shared_resources.cp_queue_bitmap)) + if (!test_bit(i, dqm->dev->kfd->shared_resources.cp_queue_bitmap)) continue; /* only acquire queues from the first MEC */ @@ -1489,7 +1489,7 @@ static int start_cpsch(struct device_queue_manager *dqm) dqm_lock(dqm); - if (!dqm->dev->shared_resources.enable_mes) { + if (!dqm->dev->kfd->shared_resources.enable_mes) { retval = pm_init(&dqm->packet_mgr, dqm); if (retval) goto fail_packet_manager_init; @@ -1516,14 +1516,14 @@ static int start_cpsch(struct device_queue_manager *dqm) dqm->is_hws_hang = false; dqm->is_resetting = false; dqm->sched_running = true; - if (!dqm->dev->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); dqm_unlock(dqm); return 0; fail_allocate_vidmem: fail_set_sched_resources: - if (!dqm->dev->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) pm_uninit(&dqm->packet_mgr, false); fail_packet_manager_init: dqm_unlock(dqm); @@ -1541,7 +1541,7 @@ static int stop_cpsch(struct device_queue_manager *dqm) } if (!dqm->is_hws_hang) { - if (!dqm->dev->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, false); else remove_all_queues_mes(dqm); @@ -1550,11 +1550,11 @@ static int stop_cpsch(struct device_queue_manager *dqm) hanging = dqm->is_hws_hang || dqm->is_resetting; dqm->sched_running = false; - if (!dqm->dev->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) pm_release_ib(&dqm->packet_mgr); kfd_gtt_sa_free(dqm->dev, dqm->fence_mem); - if (!dqm->dev->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) pm_uninit(&dqm->packet_mgr, hanging); dqm_unlock(dqm); @@ -1673,7 +1673,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, if (q->properties.is_active) { increment_queue_count(dqm, qpd, q); - if (!dqm->dev->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) retval = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); else @@ -1893,7 +1893,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm, list_del(&q->list); qpd->queue_count--; if (q->properties.is_active) { - if (!dqm->dev->shared_resources.enable_mes) { + if (!dqm->dev->kfd->shared_resources.enable_mes) { decrement_queue_count(dqm, qpd, q); retval = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); @@ -2056,7 +2056,7 @@ static int get_wave_state(struct device_queue_manager *dqm, mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP]; if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE || - q->properties.is_active || !q->device->cwsr_enabled || + q->properties.is_active || !q->device->kfd->cwsr_enabled || !mqd_mgr->get_wave_state) { dqm_unlock(dqm); return -EINVAL; @@ -2105,7 +2105,7 @@ static int checkpoint_mqd(struct device_queue_manager *dqm, dqm_lock(dqm); - if (q->properties.is_active || !q->device->cwsr_enabled) { + if (q->properties.is_active || !q->device->kfd->cwsr_enabled) { r = -EINVAL; goto dqm_unlock; } @@ -2158,7 +2158,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm, if (q->properties.is_active) { decrement_queue_count(dqm, qpd, q); - if (dqm->dev->shared_resources.enable_mes) { + if (dqm->dev->kfd->shared_resources.enable_mes) { retval = remove_queue_mes(dqm, q, qpd); if (retval) pr_err("Failed to remove queue %d\n", @@ -2180,7 +2180,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm, } } - if (!dqm->dev->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) retval = execute_queues_cpsch(dqm, filter, 0); if ((!dqm->is_hws_hang) && (retval || qpd->reset_wavefronts)) { @@ -2242,11 +2242,11 @@ out_free: static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm) { int retval; - struct kfd_dev *dev = dqm->dev; + struct kfd_node *dev = dqm->dev; struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd; uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size * get_num_all_sdma_engines(dqm) * - dev->device_info.num_sdma_queues_per_engine + + dev->kfd->device_info.num_sdma_queues_per_engine + dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size; retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev, size, @@ -2256,7 +2256,7 @@ static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm) return retval; } -struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) +struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev) { struct device_queue_manager *dqm; @@ -2373,7 +2373,7 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev) if (init_mqd_managers(dqm)) goto out_free; - if (!dev->shared_resources.enable_mes && allocate_hiq_sdma_mqd(dqm)) { + if (!dev->kfd->shared_resources.enable_mes && allocate_hiq_sdma_mqd(dqm)) { pr_err("Failed to allocate hiq sdma mqd trunk buffer\n"); goto out_free; } @@ -2386,7 +2386,7 @@ out_free: return NULL; } -static void deallocate_hiq_sdma_mqd(struct kfd_dev *dev, +static void deallocate_hiq_sdma_mqd(struct kfd_node *dev, struct kfd_mem_obj *mqd) { WARN(!mqd, "No hiq sdma mqd trunk to free"); @@ -2397,7 +2397,7 @@ static void deallocate_hiq_sdma_mqd(struct kfd_dev *dev, void device_queue_manager_uninit(struct device_queue_manager *dqm) { dqm->ops.uninitialize(dqm); - if (!dqm->dev->shared_resources.enable_mes) + if (!dqm->dev->kfd->shared_resources.enable_mes) deallocate_hiq_sdma_mqd(dqm->dev, &dqm->hiq_sdma_mqd); kfree(dqm); } @@ -2479,7 +2479,7 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data) for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) { if (!test_bit(pipe_offset + queue, - dqm->dev->shared_resources.cp_queue_bitmap)) + dqm->dev->kfd->shared_resources.cp_queue_bitmap)) continue; r = dqm->dev->kfd2kgd->hqd_dump( @@ -2497,7 +2497,7 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data) for (pipe = 0; pipe < get_num_all_sdma_engines(dqm); pipe++) { for (queue = 0; - queue < dqm->dev->device_info.num_sdma_queues_per_engine; + queue < dqm->dev->kfd->device_info.num_sdma_queues_per_engine; queue++) { r = dqm->dev->kfd2kgd->hqd_sdma_dump( dqm->dev->adev, pipe, queue, &dump, &n_regs); -- cgit From 74c5b85da75475c73a8f040397610fbfcc2c3e78 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Mon, 9 May 2022 16:33:38 -0400 Subject: drm/amdkfd: Add spatial partitioning support in KFD This patch introduces multi-partition support in KFD. This patch includes: - Support for maximum 8 spatial partitions in KFD. - Initialize one HIQ per partition. - Management of VMID range depending on partition mode. - Management of doorbell aperture space between all partitions. - Each partition does its own queue management, interrupt handling, SMI event reporting. - IOMMU, if enabled with multiple partitions, will only work on first partition. - SPM is only supported on the first partition. - Currently, there is no support for resetting individual partitions. All partitions will reset together. Signed-off-by: Mukul Joshi Tested-by: Amber Lin Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 214 ++++++++++++++++----- .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_iommu.c | 13 +- drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 8 +- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 16 +- drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c | 24 +-- drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 8 +- 7 files changed, 208 insertions(+), 77 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 23d9a7f77055..37c6dc5c37bf 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -567,23 +567,27 @@ kfd_interrupt_error: return err; } -static void kfd_cleanup_node(struct kfd_dev *kfd) +static void kfd_cleanup_nodes(struct kfd_dev *kfd, unsigned int num_nodes) { - struct kfd_node *knode = kfd->node; - - device_queue_manager_uninit(knode->dqm); - kfd_interrupt_exit(knode); - kfd_topology_remove_device(knode); - if (knode->gws) - amdgpu_amdkfd_free_gws(knode->adev, knode->gws); - kfree(knode); - kfd->node = NULL; + struct kfd_node *knode; + unsigned int i; + + for (i = 0; i < num_nodes; i++) { + knode = kfd->nodes[i]; + device_queue_manager_uninit(knode->dqm); + kfd_interrupt_exit(knode); + kfd_topology_remove_device(knode); + if (knode->gws) + amdgpu_amdkfd_free_gws(knode->adev, knode->gws); + kfree(knode); + kfd->nodes[i] = NULL; + } } bool kgd2kfd_device_init(struct kfd_dev *kfd, const struct kgd2kfd_shared_resources *gpu_resources) { - unsigned int size, map_process_packet_size; + unsigned int size, map_process_packet_size, i; struct kfd_node *node; uint32_t first_vmid_kfd, last_vmid_kfd, vmid_num_kfd; unsigned int max_proc_per_quantum; @@ -596,9 +600,18 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, KGD_ENGINE_SDMA1); kfd->shared_resources = *gpu_resources; - first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1; - last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1; - vmid_num_kfd = last_vmid_kfd - first_vmid_kfd + 1; + if (kfd->adev->gfx.num_xcd == 0 || kfd->adev->gfx.num_xcd == 1 || + kfd->adev->gfx.num_xcc_per_xcp == 0) + kfd->num_nodes = 1; + else + kfd->num_nodes = + kfd->adev->gfx.num_xcd/kfd->adev->gfx.num_xcc_per_xcp; + if (kfd->num_nodes == 0) { + dev_err(kfd_device, + "KFD num nodes cannot be 0, GC inst: %d, num_xcc_in_node: %d\n", + kfd->adev->gfx.num_xcd, kfd->adev->gfx.num_xcc_per_xcp); + goto out; + } /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps. * 32 and 64-bit requests are possible and must be @@ -617,6 +630,26 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, return false; } + first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1; + last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1; + vmid_num_kfd = last_vmid_kfd - first_vmid_kfd + 1; + + /* For GFX9.4.3, we need special handling for VMIDs depending on + * partition mode. + * In CPX mode, the VMID range needs to be shared between XCDs. + * Additionally, there are 13 VMIDs (3-15) available for KFD. To + * divide them equally, we change starting VMID to 4 and not use + * VMID 3. + * If the VMID range changes for GFX9.4.3, then this code MUST be + * revisited. + */ + if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) && + kfd->adev->gfx.partition_mode == AMDGPU_CPX_PARTITION_MODE && + kfd->num_nodes != 1) { + vmid_num_kfd /= 2; + first_vmid_kfd = last_vmid_kfd + 1 - vmid_num_kfd*2; + } + /* Verify module parameters regarding mapped process number*/ if (hws_max_conc_proc >= 0) max_proc_per_quantum = min((u32)hws_max_conc_proc, vmid_num_kfd); @@ -682,6 +715,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, kfd_cwsr_init(kfd); + /* TODO: Needs to be updated for memory partitioning */ svm_migrate_init(kfd->adev); /* Allocate the KFD node */ @@ -700,12 +734,51 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, node->max_proc_per_quantum = max_proc_per_quantum; atomic_set(&node->sram_ecc_flag, 0); - /* Initialize the KFD node */ - if (kfd_init_node(node)) { - dev_err(kfd_device, "Error initializing KFD node\n"); - goto node_init_error; + dev_info(kfd_device, "Total number of KFD nodes to be created: %d\n", + kfd->num_nodes); + for (i = 0; i < kfd->num_nodes; i++) { + node = kzalloc(sizeof(struct kfd_node), GFP_KERNEL); + if (!node) + goto node_alloc_error; + + node->adev = kfd->adev; + node->kfd = kfd; + node->kfd2kgd = kfd->kfd2kgd; + node->vm_info.vmid_num_kfd = vmid_num_kfd; + node->num_xcc_per_node = max(1U, kfd->adev->gfx.num_xcc_per_xcp); + node->start_xcc_id = node->num_xcc_per_node * i; + + if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) && + kfd->adev->gfx.partition_mode == AMDGPU_CPX_PARTITION_MODE && + kfd->num_nodes != 1) { + /* For GFX9.4.3 and CPX mode, first XCD gets VMID range + * 4-9 and second XCD gets VMID range 10-15. + */ + + node->vm_info.first_vmid_kfd = (i%2 == 0) ? + first_vmid_kfd : + first_vmid_kfd+vmid_num_kfd; + node->vm_info.last_vmid_kfd = (i%2 == 0) ? + last_vmid_kfd-vmid_num_kfd : + last_vmid_kfd; + node->compute_vmid_bitmap = + ((0x1 << (node->vm_info.last_vmid_kfd + 1)) - 1) - + ((0x1 << (node->vm_info.first_vmid_kfd)) - 1); + } else { + node->vm_info.first_vmid_kfd = first_vmid_kfd; + node->vm_info.last_vmid_kfd = last_vmid_kfd; + node->compute_vmid_bitmap = + gpu_resources->compute_vmid_bitmap; + } + node->max_proc_per_quantum = max_proc_per_quantum; + atomic_set(&node->sram_ecc_flag, 0); + /* Initialize the KFD node */ + if (kfd_init_node(node)) { + dev_err(kfd_device, "Error initializing KFD node\n"); + goto node_init_error; + } + kfd->nodes[i] = node; } - kfd->node = node; if (kfd_resume_iommu(kfd)) goto kfd_resume_iommu_error; @@ -722,9 +795,9 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, goto out; kfd_resume_iommu_error: - kfd_cleanup_node(kfd); node_init_error: node_alloc_error: + kfd_cleanup_nodes(kfd, i); device_iommu_error: kfd_doorbell_fini(kfd); kfd_doorbell_error: @@ -742,7 +815,9 @@ out: void kgd2kfd_device_exit(struct kfd_dev *kfd) { if (kfd->init_complete) { - kfd_cleanup_node(kfd); + /* Cleanup KFD nodes */ + kfd_cleanup_nodes(kfd, kfd->num_nodes); + /* Cleanup common/shared resources */ kfd_doorbell_fini(kfd); ida_destroy(&kfd->doorbell_ida); kfd_gtt_sa_fini(kfd); @@ -754,18 +829,23 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd) int kgd2kfd_pre_reset(struct kfd_dev *kfd) { - struct kfd_node *node = kfd->node; + struct kfd_node *node; + int i; if (!kfd->init_complete) return 0; - kfd_smi_event_update_gpu_reset(node, false); - - node->dqm->ops.pre_reset(node->dqm); + for (i = 0; i < kfd->num_nodes; i++) { + node = kfd->nodes[i]; + kfd_smi_event_update_gpu_reset(node, false); + node->dqm->ops.pre_reset(node->dqm); + } kgd2kfd_suspend(kfd, false); - kfd_signal_reset_event(node); + for (i = 0; i < kfd->num_nodes; i++) + kfd_signal_reset_event(kfd->nodes[i]); + return 0; } @@ -778,19 +858,25 @@ int kgd2kfd_pre_reset(struct kfd_dev *kfd) int kgd2kfd_post_reset(struct kfd_dev *kfd) { int ret; - struct kfd_node *node = kfd->node; + struct kfd_node *node; + int i; if (!kfd->init_complete) return 0; - ret = kfd_resume(node); - if (ret) - return ret; - atomic_dec(&kfd_locked); + for (i = 0; i < kfd->num_nodes; i++) { + ret = kfd_resume(kfd->nodes[i]); + if (ret) + return ret; + } - atomic_set(&node->sram_ecc_flag, 0); + atomic_dec(&kfd_locked); - kfd_smi_event_update_gpu_reset(node, true); + for (i = 0; i < kfd->num_nodes; i++) { + node = kfd->nodes[i]; + atomic_set(&node->sram_ecc_flag, 0); + kfd_smi_event_update_gpu_reset(node, true); + } return 0; } @@ -802,7 +888,8 @@ bool kfd_is_locked(void) void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) { - struct kfd_node *node = kfd->node; + struct kfd_node *node; + int i; if (!kfd->init_complete) return; @@ -814,21 +901,25 @@ void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) kfd_suspend_all_processes(); } - node->dqm->ops.stop(node->dqm); + for (i = 0; i < kfd->num_nodes; i++) { + node = kfd->nodes[i]; + node->dqm->ops.stop(node->dqm); + } kfd_iommu_suspend(kfd); } int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) { - int ret, count; - struct kfd_node *node = kfd->node; + int ret, count, i; if (!kfd->init_complete) return 0; - ret = kfd_resume(node); - if (ret) - return ret; + for (i = 0; i < kfd->num_nodes; i++) { + ret = kfd_resume(kfd->nodes[i]); + if (ret) + return ret; + } /* for runtime resume, skip unlocking kfd */ if (!run_pm) { @@ -892,10 +983,10 @@ static inline void kfd_queue_work(struct workqueue_struct *wq, /* This is called directly from KGD at ISR. */ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) { - uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE]; + uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE], i; bool is_patched = false; unsigned long flags; - struct kfd_node *node = kfd->node; + struct kfd_node *node; if (!kfd->init_complete) return; @@ -905,16 +996,22 @@ void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) return; } - spin_lock_irqsave(&node->interrupt_lock, flags); - - if (node->interrupts_active - && interrupt_is_wanted(node, ih_ring_entry, - patched_ihre, &is_patched) - && enqueue_ih_ring_entry(node, - is_patched ? patched_ihre : ih_ring_entry)) - kfd_queue_work(node->ih_wq, &node->interrupt_work); + for (i = 0; i < kfd->num_nodes; i++) { + node = kfd->nodes[i]; + spin_lock_irqsave(&node->interrupt_lock, flags); + + if (node->interrupts_active + && interrupt_is_wanted(node, ih_ring_entry, + patched_ihre, &is_patched) + && enqueue_ih_ring_entry(node, + is_patched ? patched_ihre : ih_ring_entry)) { + kfd_queue_work(node->ih_wq, &node->interrupt_work); + spin_unlock_irqrestore(&node->interrupt_lock, flags); + return; + } + spin_unlock_irqrestore(&node->interrupt_lock, flags); + } - spin_unlock_irqrestore(&node->interrupt_lock, flags); } int kgd2kfd_quiesce_mm(struct mm_struct *mm, uint32_t trigger) @@ -1181,8 +1278,13 @@ int kfd_gtt_sa_free(struct kfd_node *node, struct kfd_mem_obj *mem_obj) void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd) { + /* + * TODO: Currently update SRAM ECC flag for first node. + * This needs to be updated later when we can + * identify SRAM ECC error on other nodes also. + */ if (kfd) - atomic_inc(&kfd->node->sram_ecc_flag); + atomic_inc(&kfd->nodes[0]->sram_ecc_flag); } void kfd_inc_compute_active(struct kfd_node *node) @@ -1202,8 +1304,14 @@ void kfd_dec_compute_active(struct kfd_node *node) void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask) { + /* + * TODO: For now, raise the throttling event only on first node. + * This will need to change after we are able to determine + * which node raised the throttling event. + */ if (kfd && kfd->init_complete) - kfd_smi_event_update_thermal_throttling(kfd->node, throttle_bitmask); + kfd_smi_event_update_thermal_throttling(kfd->nodes[0], + throttle_bitmask); } /* kfd_get_num_sdma_engines returns the number of PCIe optimized SDMA and diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 34977d89f01c..6ee17100c333 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -1426,7 +1426,7 @@ static int set_sched_resources(struct device_queue_manager *dqm) int i, mec; struct scheduling_resources res; - res.vmid_mask = dqm->dev->kfd->shared_resources.compute_vmid_bitmap; + res.vmid_mask = dqm->dev->compute_vmid_bitmap; res.queue_mask = 0; for (i = 0; i < KGD_MAX_QUEUES; ++i) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c index 6eee9a0944f3..808ee010520a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c @@ -121,6 +121,12 @@ int kfd_iommu_bind_process_to_device(struct kfd_process_device *pdd) return -EINVAL; } + if (!kfd_is_first_node(dev)) { + dev_warn_once(kfd_device, + "IOMMU supported only on first node\n"); + return 0; + } + err = amd_iommu_bind_pasid(dev->adev->pdev, p->pasid, p->lead_thread); if (!err) pdd->bound = PDD_BOUND; @@ -138,7 +144,8 @@ void kfd_iommu_unbind_process(struct kfd_process *p) int i; for (i = 0; i < p->n_pdds; i++) - if (p->pdds[i]->bound == PDD_BOUND) + if ((p->pdds[i]->bound == PDD_BOUND) && + (kfd_is_first_node((p->pdds[i]->dev)))) amd_iommu_unbind_pasid(p->pdds[i]->dev->adev->pdev, p->pasid); } @@ -281,7 +288,7 @@ void kfd_iommu_suspend(struct kfd_dev *kfd) if (!kfd->use_iommu_v2) return; - kfd_unbind_processes_from_device(kfd->node); + kfd_unbind_processes_from_device(kfd->nodes[0]); amd_iommu_set_invalidate_ctx_cb(kfd->adev->pdev, NULL); amd_iommu_set_invalid_ppr_cb(kfd->adev->pdev, NULL); @@ -312,7 +319,7 @@ int kfd_iommu_resume(struct kfd_dev *kfd) amd_iommu_set_invalid_ppr_cb(kfd->adev->pdev, iommu_invalid_ppr_cb); - err = kfd_bind_processes_to_device(kfd->node); + err = kfd_bind_processes_to_device(kfd->nodes[0]); if (err) { amd_iommu_set_invalidate_ctx_cb(kfd->adev->pdev, NULL); amd_iommu_set_invalid_ppr_cb(kfd->adev->pdev, NULL); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 1e187677c90a..5f4dc2a45bd0 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -423,7 +423,7 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange, kfd_smi_event_migration_start(adev->kfd.dev, p->lead_thread->pid, start >> PAGE_SHIFT, end >> PAGE_SHIFT, - 0, adev->kfd.dev->node->id, prange->prefetch_loc, + 0, adev->kfd.dev->nodes[0]->id, prange->prefetch_loc, prange->preferred_loc, trigger); r = migrate_vma_setup(&migrate); @@ -456,7 +456,7 @@ svm_migrate_vma_to_vram(struct amdgpu_device *adev, struct svm_range *prange, kfd_smi_event_migration_end(adev->kfd.dev, p->lead_thread->pid, start >> PAGE_SHIFT, end >> PAGE_SHIFT, - 0, adev->kfd.dev->node->id, trigger); + 0, adev->kfd.dev->nodes[0]->id, trigger); svm_range_dma_unmap(adev->dev, scratch, 0, npages); svm_range_free_dma_mappings(prange); @@ -701,7 +701,7 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange, kfd_smi_event_migration_start(adev->kfd.dev, p->lead_thread->pid, start >> PAGE_SHIFT, end >> PAGE_SHIFT, - adev->kfd.dev->node->id, 0, prange->prefetch_loc, + adev->kfd.dev->nodes[0]->id, 0, prange->prefetch_loc, prange->preferred_loc, trigger); r = migrate_vma_setup(&migrate); @@ -737,7 +737,7 @@ svm_migrate_vma_to_ram(struct amdgpu_device *adev, struct svm_range *prange, kfd_smi_event_migration_end(adev->kfd.dev, p->lead_thread->pid, start >> PAGE_SHIFT, end >> PAGE_SHIFT, - adev->kfd.dev->node->id, 0, trigger); + adev->kfd.dev->nodes[0]->id, 0, trigger); svm_range_dma_unmap(adev->dev, scratch, 0, npages); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index fdb97e5d0c01..873b49238dc1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -255,6 +255,8 @@ struct kfd_vmid_info { uint32_t vmid_num_kfd; }; +#define MAX_KFD_NODES 8 + struct kfd_dev; struct kfd_node { @@ -267,6 +269,10 @@ struct kfd_node { */ struct kfd_vmid_info vm_info; unsigned int id; /* topology stub index */ + unsigned int num_xcc_per_node; + unsigned int start_xcc_id; /* Starting XCC instance + * number for the node + */ /* Interrupts */ struct kfifo ih_fifo; struct workqueue_struct *ih_wq; @@ -300,6 +306,8 @@ struct kfd_node { /* Maximum process number mapped to HW scheduler */ unsigned int max_proc_per_quantum; + unsigned int compute_vmid_bitmap; + struct kfd_dev *kfd; }; @@ -368,7 +376,8 @@ struct kfd_dev { /* HMM page migration MEMORY_DEVICE_PRIVATE mapping */ struct dev_pagemap pgmap; - struct kfd_node *node; + struct kfd_node *nodes[MAX_KFD_NODES]; + unsigned int num_nodes; }; enum kfd_mempool { @@ -1397,6 +1406,11 @@ static inline int kfd_devcgroup_check_permission(struct kfd_node *kfd) #endif } +static inline bool kfd_is_first_node(struct kfd_node *node) +{ + return (node == node->kfd->nodes[0]); +} + /* Debugfs */ #if defined(CONFIG_DEBUG_FS) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c index a0bf6558f4ac..b703da59e067 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_smi_events.c @@ -254,17 +254,17 @@ void kfd_smi_event_page_fault_start(struct kfd_dev *dev, pid_t pid, unsigned long address, bool write_fault, ktime_t ts) { - kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_PAGE_FAULT_START, + kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_PAGE_FAULT_START, "%lld -%d @%lx(%x) %c\n", ktime_to_ns(ts), pid, - address, dev->node->id, write_fault ? 'W' : 'R'); + address, dev->nodes[0]->id, write_fault ? 'W' : 'R'); } void kfd_smi_event_page_fault_end(struct kfd_dev *dev, pid_t pid, unsigned long address, bool migration) { - kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_PAGE_FAULT_END, + kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_PAGE_FAULT_END, "%lld -%d @%lx(%x) %c\n", ktime_get_boottime_ns(), - pid, address, dev->node->id, migration ? 'M' : 'U'); + pid, address, dev->nodes[0]->id, migration ? 'M' : 'U'); } void kfd_smi_event_migration_start(struct kfd_dev *dev, pid_t pid, @@ -273,7 +273,7 @@ void kfd_smi_event_migration_start(struct kfd_dev *dev, pid_t pid, uint32_t prefetch_loc, uint32_t preferred_loc, uint32_t trigger) { - kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_MIGRATE_START, + kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_MIGRATE_START, "%lld -%d @%lx(%lx) %x->%x %x:%x %d\n", ktime_get_boottime_ns(), pid, start, end - start, from, to, prefetch_loc, preferred_loc, trigger); @@ -283,7 +283,7 @@ void kfd_smi_event_migration_end(struct kfd_dev *dev, pid_t pid, unsigned long start, unsigned long end, uint32_t from, uint32_t to, uint32_t trigger) { - kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_MIGRATE_END, + kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_MIGRATE_END, "%lld -%d @%lx(%lx) %x->%x %d\n", ktime_get_boottime_ns(), pid, start, end - start, from, to, trigger); @@ -292,16 +292,16 @@ void kfd_smi_event_migration_end(struct kfd_dev *dev, pid_t pid, void kfd_smi_event_queue_eviction(struct kfd_dev *dev, pid_t pid, uint32_t trigger) { - kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_QUEUE_EVICTION, + kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_QUEUE_EVICTION, "%lld -%d %x %d\n", ktime_get_boottime_ns(), pid, - dev->node->id, trigger); + dev->nodes[0]->id, trigger); } void kfd_smi_event_queue_restore(struct kfd_dev *dev, pid_t pid) { - kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_QUEUE_RESTORE, + kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_QUEUE_RESTORE, "%lld -%d %x\n", ktime_get_boottime_ns(), pid, - dev->node->id); + dev->nodes[0]->id); } void kfd_smi_event_queue_restore_rescheduled(struct mm_struct *mm) @@ -328,9 +328,9 @@ void kfd_smi_event_unmap_from_gpu(struct kfd_dev *dev, pid_t pid, unsigned long address, unsigned long last, uint32_t trigger) { - kfd_smi_event_add(pid, dev->node, KFD_SMI_EVENT_UNMAP_FROM_GPU, + kfd_smi_event_add(pid, dev->nodes[0], KFD_SMI_EVENT_UNMAP_FROM_GPU, "%lld -%d @%lx(%lx) %x %d\n", ktime_get_boottime_ns(), - pid, address, last - address + 1, dev->node->id, trigger); + pid, address, last - address + 1, dev->nodes[0]->id, trigger); } int kfd_smi_event_open(struct kfd_node *dev, uint32_t *fd) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 06a11186d947..94af37df3ed2 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -555,7 +555,8 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, dev->gpu->kfd->sdma_fw_version); sysfs_show_64bit_prop(buffer, offs, "unique_id", dev->gpu->adev->unique_id); - + sysfs_show_32bit_prop(buffer, offs, "num_xcc", + dev->gpu->num_xcc_per_node); } return sysfs_show_32bit_prop(buffer, offs, "max_engine_clk_ccompute", @@ -1160,7 +1161,7 @@ void kfd_topology_shutdown(void) static uint32_t kfd_generate_gpu_id(struct kfd_node *gpu) { uint32_t hashout; - uint32_t buf[7]; + uint32_t buf[8]; uint64_t local_mem_size; int i; @@ -1177,8 +1178,9 @@ static uint32_t kfd_generate_gpu_id(struct kfd_node *gpu) buf[4] = gpu->adev->pdev->bus->number; buf[5] = lower_32_bits(local_mem_size); buf[6] = upper_32_bits(local_mem_size); + buf[7] = gpu->start_xcc_id | (gpu->num_xcc_per_node << 16); - for (i = 0, hashout = 0; i < 7; i++) + for (i = 0, hashout = 0; i < 8; i++) hashout ^= hash_32(buf[i], KFD_GPU_ID_HASH_WIDTH); return hashout; -- cgit From 2f77b9a242a2e01822efc80c8b63eaa31df0f8b4 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Mon, 9 May 2022 21:45:50 -0400 Subject: drm/amdkfd: Update MQD management on multi XCC setup Update MQD management for both HIQ and user-mode compute queues on a multi XCC setup. MQDs needs to be allocated, initialized, loaded and destroyed for each XCC in the KFD node. v2: squash in fix "drm/amdkfd: Fix SDMA+HIQ HQD allocation on GFX9.4.3" Signed-off-by: Mukul Joshi Signed-off-by: Amber Lin Tested-by: Amber Lin Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 51 ++-- .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 3 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c | 28 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h | 8 + drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c | 3 + drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c | 3 + drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 292 ++++++++++++++++++--- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c | 3 + .../gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | 16 +- drivers/gpu/drm/amd/include/v9_structs.h | 30 ++- 10 files changed, 380 insertions(+), 57 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index f61f07575f63..6bbe3b89aef5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -800,6 +800,41 @@ static void amdgpu_ttm_tt_unpin_userptr(struct ttm_device *bdev, sg_free_table(ttm->sg); } +/* + * total_pages is constructed as MQD0+CtrlStack0 + MQD1+CtrlStack1 + ... + * MQDn+CtrlStackn where n is the number of XCCs per partition. + * pages_per_xcc is the size of one MQD+CtrlStack. The first page is MQD + * and uses memory type default, UC. The rest of pages_per_xcc are + * Ctrl stack and modify their memory type to NC. + */ +static void amdgpu_ttm_gart_bind_gfx9_mqd(struct amdgpu_device *adev, + struct ttm_tt *ttm, uint64_t flags) +{ + struct amdgpu_ttm_tt *gtt = (void *)ttm; + uint64_t total_pages = ttm->num_pages; + int num_xcc = max(1U, adev->gfx.num_xcc_per_xcp); + uint64_t page_idx, pages_per_xcc = total_pages / num_xcc; + int i; + uint64_t ctrl_flags = (flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) | + AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC); + + for (i = 0, page_idx = 0; i < num_xcc; i++, page_idx += pages_per_xcc) { + /* MQD page: use default flags */ + amdgpu_gart_bind(adev, + gtt->offset + (page_idx << PAGE_SHIFT), + 1, >t->ttm.dma_address[page_idx], flags); + /* + * Ctrl pages - modify the memory type to NC (ctrl_flags) from + * the second page of the BO onward. + */ + amdgpu_gart_bind(adev, + gtt->offset + ((page_idx + 1) << PAGE_SHIFT), + pages_per_xcc - 1, + >t->ttm.dma_address[page_idx + 1], + ctrl_flags); + } +} + static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev, struct ttm_buffer_object *tbo, uint64_t flags) @@ -812,21 +847,7 @@ static void amdgpu_ttm_gart_bind(struct amdgpu_device *adev, flags |= AMDGPU_PTE_TMZ; if (abo->flags & AMDGPU_GEM_CREATE_CP_MQD_GFX9) { - uint64_t page_idx = 1; - - amdgpu_gart_bind(adev, gtt->offset, page_idx, - gtt->ttm.dma_address, flags); - - /* The memory type of the first page defaults to UC. Now - * modify the memory type to NC from the second page of - * the BO onward. - */ - flags &= ~AMDGPU_PTE_MTYPE_VG10_MASK; - flags |= AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC); - - amdgpu_gart_bind(adev, gtt->offset + (page_idx << PAGE_SHIFT), - ttm->num_pages - page_idx, - &(gtt->ttm.dma_address[page_idx]), flags); + amdgpu_ttm_gart_bind_gfx9_mqd(adev, ttm, flags); } else { amdgpu_gart_bind(adev, gtt->offset, ttm->num_pages, gtt->ttm.dma_address, flags); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 6ee17100c333..9afd3295ca85 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -2247,7 +2247,8 @@ static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm) uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size * get_num_all_sdma_engines(dqm) * dev->kfd->device_info.num_sdma_queues_per_engine + - dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size; + (dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size * + dqm->dev->num_xcc_per_node); retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev, size, &(mem_obj->gtt_mem), &(mem_obj->gpu_addr), diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c index 61f6dd68c84b..074f6075ccc7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c @@ -76,7 +76,8 @@ struct kfd_mem_obj *allocate_sdma_mqd(struct kfd_node *dev, q->sdma_queue_id) * dev->dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size; - offset += dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size; + offset += dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size * + dev->num_xcc_per_node; mqd_mem_obj->gtt_mem = (void *)((uint64_t)dev->dqm->hiq_sdma_mqd.gtt_mem + offset); @@ -246,3 +247,28 @@ bool kfd_is_occupied_sdma(struct mqd_manager *mm, void *mqd, { return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->adev, mqd); } + +uint64_t kfd_hiq_mqd_stride(struct kfd_node *dev) +{ + return dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size; +} + +void kfd_get_hiq_xcc_mqd(struct kfd_node *dev, struct kfd_mem_obj *mqd_mem_obj, + uint32_t virtual_xcc_id) +{ + uint64_t offset; + + offset = kfd_hiq_mqd_stride(dev) * virtual_xcc_id; + + mqd_mem_obj->gtt_mem = (virtual_xcc_id == 0) ? + dev->dqm->hiq_sdma_mqd.gtt_mem : NULL; + mqd_mem_obj->gpu_addr = dev->dqm->hiq_sdma_mqd.gpu_addr + offset; + mqd_mem_obj->cpu_ptr = (uint32_t *)((uintptr_t) + dev->dqm->hiq_sdma_mqd.cpu_ptr + offset); +} + +uint64_t kfd_mqd_stride(struct mqd_manager *mm, + struct queue_properties *q) +{ + return mm->mqd_size; +} diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h index 46fc3f273d0d..eb18be74f559 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h @@ -119,6 +119,8 @@ struct mqd_manager { int (*debugfs_show_mqd)(struct seq_file *m, void *data); #endif uint32_t (*read_doorbell_id)(void *mqd); + uint64_t (*mqd_stride)(struct mqd_manager *mm, + struct queue_properties *p); struct mutex mqd_mutex; struct kfd_node *dev; @@ -164,4 +166,10 @@ bool kfd_is_occupied_sdma(struct mqd_manager *mm, void *mqd, uint64_t queue_address, uint32_t pipe_id, uint32_t queue_id); +void kfd_get_hiq_xcc_mqd(struct kfd_node *dev, + struct kfd_mem_obj *mqd_mem_obj, uint32_t virtual_xcc_id); + +uint64_t kfd_hiq_mqd_stride(struct kfd_node *dev); +uint64_t kfd_mqd_stride(struct mqd_manager *mm, + struct queue_properties *q); #endif /* KFD_MQD_MANAGER_H_ */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c index 03e04d5e5a11..ca1966466759 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c @@ -428,6 +428,7 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type, mqd->destroy_mqd = kfd_destroy_mqd_cp; mqd->is_occupied = kfd_is_occupied_cp; mqd->mqd_size = sizeof(struct cik_mqd); + mqd->mqd_stride = kfd_mqd_stride; #if defined(CONFIG_DEBUG_FS) mqd->debugfs_show_mqd = debugfs_show_mqd; #endif @@ -442,6 +443,7 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type, mqd->destroy_mqd = kfd_destroy_mqd_cp; mqd->is_occupied = kfd_is_occupied_cp; mqd->mqd_size = sizeof(struct cik_mqd); + mqd->mqd_stride = kfd_mqd_stride; #if defined(CONFIG_DEBUG_FS) mqd->debugfs_show_mqd = debugfs_show_mqd; #endif @@ -457,6 +459,7 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type, mqd->checkpoint_mqd = checkpoint_mqd_sdma; mqd->restore_mqd = restore_mqd_sdma; mqd->mqd_size = sizeof(struct cik_sdma_rlc_registers); + mqd->mqd_stride = kfd_mqd_stride; #if defined(CONFIG_DEBUG_FS) mqd->debugfs_show_mqd = debugfs_show_mqd_sdma; #endif diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c index 7a93be0ebb19..c9565ea99df5 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c @@ -432,6 +432,7 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type, mqd->get_wave_state = get_wave_state; mqd->checkpoint_mqd = checkpoint_mqd; mqd->restore_mqd = restore_mqd; + mqd->mqd_stride = kfd_mqd_stride; #if defined(CONFIG_DEBUG_FS) mqd->debugfs_show_mqd = debugfs_show_mqd; #endif @@ -447,6 +448,7 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type, mqd->destroy_mqd = kfd_destroy_mqd_cp; mqd->is_occupied = kfd_is_occupied_cp; mqd->mqd_size = sizeof(struct v10_compute_mqd); + mqd->mqd_stride = kfd_mqd_stride; #if defined(CONFIG_DEBUG_FS) mqd->debugfs_show_mqd = debugfs_show_mqd; #endif @@ -478,6 +480,7 @@ struct mqd_manager *mqd_manager_init_v10(enum KFD_MQD_TYPE type, mqd->checkpoint_mqd = checkpoint_mqd_sdma; mqd->restore_mqd = restore_mqd_sdma; mqd->mqd_size = sizeof(struct v10_sdma_mqd); + mqd->mqd_stride = kfd_mqd_stride; #if defined(CONFIG_DEBUG_FS) mqd->debugfs_show_mqd = debugfs_show_mqd_sdma; #endif diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index 943a738e73f9..c677322057dd 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -33,6 +33,21 @@ #include "sdma0/sdma0_4_0_sh_mask.h" #include "amdgpu_amdkfd.h" +static void update_mqd(struct mqd_manager *mm, void *mqd, + struct queue_properties *q, + struct mqd_update_info *minfo); + +static uint64_t mqd_stride_v9(struct mqd_manager *mm, + struct queue_properties *q) +{ + if (mm->dev->kfd->cwsr_enabled && + q->type == KFD_QUEUE_TYPE_COMPUTE) + return ALIGN(q->ctl_stack_size, PAGE_SIZE) + + ALIGN(sizeof(struct v9_mqd), PAGE_SIZE); + + return mm->mqd_size; +} + static inline struct v9_mqd *get_mqd(void *mqd) { return (struct v9_mqd *)mqd; @@ -110,8 +125,9 @@ static struct kfd_mem_obj *allocate_mqd(struct kfd_node *node, if (!mqd_mem_obj) return NULL; retval = amdgpu_amdkfd_alloc_gtt_mem(node->adev, - ALIGN(q->ctl_stack_size, PAGE_SIZE) + - ALIGN(sizeof(struct v9_mqd), PAGE_SIZE), + (ALIGN(q->ctl_stack_size, PAGE_SIZE) + + ALIGN(sizeof(struct v9_mqd), PAGE_SIZE)) * + node->num_xcc_per_node, &(mqd_mem_obj->gtt_mem), &(mqd_mem_obj->gpu_addr), (void *)&(mqd_mem_obj->cpu_ptr), true); @@ -165,24 +181,9 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, 1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT | 1 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT; - if (q->format == KFD_QUEUE_FORMAT_AQL) { + if (q->format == KFD_QUEUE_FORMAT_AQL) m->cp_hqd_aql_control = 1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT; - if (KFD_GC_VERSION(mm->dev) == IP_VERSION(9, 4, 3)) { - /* On GC 9.4.3, DW 41 is re-purposed as - * compute_tg_chunk_size. - * TODO: review this setting when active CUs in the - * partition play a role - */ - m->compute_static_thread_mgmt_se6 = 1; - } - } else { - /* PM4 queue */ - if (KFD_GC_VERSION(mm->dev) == IP_VERSION(9, 4, 3)) { - m->compute_static_thread_mgmt_se6 = 0; - /* TODO: program pm4_target_xcc */ - } - } if (q->tba_addr) { m->compute_pgm_rsrc2 |= @@ -205,7 +206,7 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, *mqd = m; if (gart_addr) *gart_addr = addr; - mm->update_mqd(mm, m, q, NULL); + update_mqd(mm, m, q, NULL); } static int load_mqd(struct mqd_manager *mm, void *mqd, @@ -269,13 +270,10 @@ static void update_mqd(struct mqd_manager *mm, void *mqd, m->cp_hqd_vmid = q->vmid; if (q->format == KFD_QUEUE_FORMAT_AQL) { - m->cp_hqd_pq_control |= + m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK | 2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT | 1 << CP_HQD_PQ_CONTROL__QUEUE_FULL_EN__SHIFT | 1 << CP_HQD_PQ_CONTROL__WPP_CLAMP_EN__SHIFT; - if (KFD_GC_VERSION(mm->dev) != IP_VERSION(9, 4, 3)) - m->cp_hqd_pq_control |= - CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK; m->cp_hqd_pq_doorbell_control |= 1 << CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT; } @@ -466,6 +464,224 @@ static void restore_mqd_sdma(struct mqd_manager *mm, void **mqd, qp->is_active = 0; } +static void init_mqd_hiq_v9_4_3(struct mqd_manager *mm, void **mqd, + struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, + struct queue_properties *q) +{ + struct v9_mqd *m; + int xcc = 0; + struct kfd_mem_obj xcc_mqd_mem_obj; + uint64_t xcc_gart_addr = 0; + + memset(&xcc_mqd_mem_obj, 0x0, sizeof(struct kfd_mem_obj)); + + for (xcc = 0; xcc < mm->dev->num_xcc_per_node; xcc++) { + kfd_get_hiq_xcc_mqd(mm->dev, &xcc_mqd_mem_obj, xcc); + + init_mqd(mm, (void **)&m, &xcc_mqd_mem_obj, &xcc_gart_addr, q); + + m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK | + 1 << CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT | + 1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT; + m->cp_mqd_stride_size = kfd_hiq_mqd_stride(mm->dev); + if (xcc == 0) { + /* Set no_update_rptr = 0 in Master XCC */ + m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK; + + /* Set the MQD pointer and gart address to XCC0 MQD */ + *mqd = m; + *gart_addr = xcc_gart_addr; + } + } +} + +static int hiq_load_mqd_kiq_v9_4_3(struct mqd_manager *mm, void *mqd, + uint32_t pipe_id, uint32_t queue_id, + struct queue_properties *p, struct mm_struct *mms) +{ + int xcc, err; + void *xcc_mqd; + uint64_t hiq_mqd_size = kfd_hiq_mqd_stride(mm->dev); + + for (xcc = 0; xcc < mm->dev->num_xcc_per_node; xcc++) { + xcc_mqd = mqd + hiq_mqd_size * xcc; + err = mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->adev, xcc_mqd, + pipe_id, queue_id, + p->doorbell_off); + if (err) { + pr_debug("Failed to load HIQ MQD for XCC: %d\n", xcc); + break; + } + } + + return err; +} + +static int destroy_hiq_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, + enum kfd_preempt_type type, unsigned int timeout, + uint32_t pipe_id, uint32_t queue_id) +{ + int xcc = 0, err; + void *xcc_mqd; + uint64_t hiq_mqd_size = kfd_hiq_mqd_stride(mm->dev); + + for (xcc = 0; xcc < mm->dev->num_xcc_per_node; xcc++) { + xcc_mqd = mqd + hiq_mqd_size * xcc; + err = mm->dev->kfd2kgd->hqd_destroy(mm->dev->adev, xcc_mqd, + type, timeout, pipe_id, + queue_id); + if (err) { + pr_debug("Destroy MQD failed for xcc: %d\n", xcc); + break; + } + } + + return err; +} + +static void get_xcc_mqd(struct kfd_mem_obj *mqd_mem_obj, + struct kfd_mem_obj *xcc_mqd_mem_obj, + uint64_t offset) +{ + xcc_mqd_mem_obj->gtt_mem = (offset == 0) ? + mqd_mem_obj->gtt_mem : NULL; + xcc_mqd_mem_obj->gpu_addr = mqd_mem_obj->gpu_addr + offset; + xcc_mqd_mem_obj->cpu_ptr = (uint32_t *)((uintptr_t)mqd_mem_obj->cpu_ptr + + offset); +} + +static void init_mqd_v9_4_3(struct mqd_manager *mm, void **mqd, + struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, + struct queue_properties *q) +{ + struct v9_mqd *m; + int xcc = 0; + struct kfd_mem_obj xcc_mqd_mem_obj; + uint64_t xcc_gart_addr = 0; + uint64_t offset = mm->mqd_stride(mm, q); + + memset(&xcc_mqd_mem_obj, 0x0, sizeof(struct kfd_mem_obj)); + for (xcc = 0; xcc < mm->dev->num_xcc_per_node; xcc++) { + get_xcc_mqd(mqd_mem_obj, &xcc_mqd_mem_obj, offset*xcc); + + init_mqd(mm, (void **)&m, &xcc_mqd_mem_obj, &xcc_gart_addr, q); + + m->cp_mqd_stride_size = offset; + if (q->format == KFD_QUEUE_FORMAT_AQL) { + m->compute_tg_chunk_size = 1; + + switch (xcc) { + case 0: + /* Master XCC */ + m->cp_hqd_pq_control &= + ~CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK; + m->compute_current_logic_xcc_id = + mm->dev->num_xcc_per_node - 1; + break; + default: + m->compute_current_logic_xcc_id = + xcc - 1; + break; + } + } else { + /* PM4 Queue */ + m->compute_current_logic_xcc_id = 0; + m->compute_tg_chunk_size = 0; + } + + if (xcc == 0) { + /* Set the MQD pointer and gart address to XCC0 MQD */ + *mqd = m; + *gart_addr = xcc_gart_addr; + } + } +} + +static void update_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, + struct queue_properties *q, struct mqd_update_info *minfo) +{ + struct v9_mqd *m; + int xcc = 0; + uint64_t size = mm->mqd_stride(mm, q); + + for (xcc = 0; xcc < mm->dev->num_xcc_per_node; xcc++) { + m = get_mqd(mqd + size * xcc); + update_mqd(mm, m, q, minfo); + + if (q->format == KFD_QUEUE_FORMAT_AQL) { + switch (xcc) { + case 0: + /* Master XCC */ + m->cp_hqd_pq_control &= + ~CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK; + m->compute_current_logic_xcc_id = + mm->dev->num_xcc_per_node - 1; + break; + default: + m->compute_current_logic_xcc_id = + xcc - 1; + break; + } + m->compute_tg_chunk_size = 1; + } else { + /* PM4 Queue */ + m->compute_current_logic_xcc_id = 0; + m->compute_tg_chunk_size = 0; + } + } +} + +static int destroy_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, + enum kfd_preempt_type type, unsigned int timeout, + uint32_t pipe_id, uint32_t queue_id) +{ + int xcc = 0, err; + void *xcc_mqd; + struct v9_mqd *m; + uint64_t mqd_offset; + + m = get_mqd(mqd); + mqd_offset = m->cp_mqd_stride_size; + + for (xcc = 0; xcc < mm->dev->num_xcc_per_node; xcc++) { + xcc_mqd = mqd + mqd_offset * xcc; + err = mm->dev->kfd2kgd->hqd_destroy(mm->dev->adev, xcc_mqd, + type, timeout, pipe_id, + queue_id); + if (err) { + pr_debug("Destroy MQD failed for xcc: %d\n", xcc); + break; + } + } + + return err; +} + +static int load_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, + uint32_t pipe_id, uint32_t queue_id, + struct queue_properties *p, struct mm_struct *mms) +{ + /* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */ + uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0); + int xcc = 0, err; + void *xcc_mqd; + uint64_t mqd_stride_size = mm->mqd_stride(mm, p); + + for (xcc = 0; xcc < mm->dev->num_xcc_per_node; xcc++) { + xcc_mqd = mqd + mqd_stride_size * xcc; + err = mm->dev->kfd2kgd->hqd_load(mm->dev->adev, xcc_mqd, + pipe_id, queue_id, + (uint32_t __user *)p->write_ptr, + wptr_shift, 0, mms); + if (err) { + pr_debug("Load MQD failed for xcc: %d\n", xcc); + break; + } + } + + return err; +} + #if defined(CONFIG_DEBUG_FS) static int debugfs_show_mqd(struct seq_file *m, void *data) @@ -501,34 +717,49 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, switch (type) { case KFD_MQD_TYPE_CP: mqd->allocate_mqd = allocate_mqd; - mqd->init_mqd = init_mqd; mqd->free_mqd = kfd_free_mqd_cp; - mqd->load_mqd = load_mqd; - mqd->update_mqd = update_mqd; - mqd->destroy_mqd = kfd_destroy_mqd_cp; mqd->is_occupied = kfd_is_occupied_cp; mqd->get_wave_state = get_wave_state; mqd->get_checkpoint_info = get_checkpoint_info; mqd->checkpoint_mqd = checkpoint_mqd; mqd->restore_mqd = restore_mqd; mqd->mqd_size = sizeof(struct v9_mqd); + mqd->mqd_stride = mqd_stride_v9; #if defined(CONFIG_DEBUG_FS) mqd->debugfs_show_mqd = debugfs_show_mqd; #endif + if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3)) { + mqd->init_mqd = init_mqd_v9_4_3; + mqd->load_mqd = load_mqd_v9_4_3; + mqd->update_mqd = update_mqd_v9_4_3; + mqd->destroy_mqd = destroy_mqd_v9_4_3; + } else { + mqd->init_mqd = init_mqd; + mqd->load_mqd = load_mqd; + mqd->update_mqd = update_mqd; + mqd->destroy_mqd = kfd_destroy_mqd_cp; + } break; case KFD_MQD_TYPE_HIQ: mqd->allocate_mqd = allocate_hiq_mqd; - mqd->init_mqd = init_mqd_hiq; mqd->free_mqd = free_mqd_hiq_sdma; - mqd->load_mqd = kfd_hiq_load_mqd_kiq; mqd->update_mqd = update_mqd; - mqd->destroy_mqd = kfd_destroy_mqd_cp; mqd->is_occupied = kfd_is_occupied_cp; mqd->mqd_size = sizeof(struct v9_mqd); + mqd->mqd_stride = kfd_mqd_stride; #if defined(CONFIG_DEBUG_FS) mqd->debugfs_show_mqd = debugfs_show_mqd; #endif mqd->read_doorbell_id = read_doorbell_id; + if (KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 3)) { + mqd->init_mqd = init_mqd_hiq_v9_4_3; + mqd->load_mqd = hiq_load_mqd_kiq_v9_4_3; + mqd->destroy_mqd = destroy_hiq_mqd_v9_4_3; + } else { + mqd->init_mqd = init_mqd_hiq; + mqd->load_mqd = kfd_hiq_load_mqd_kiq; + mqd->destroy_mqd = kfd_destroy_mqd_cp; + } break; case KFD_MQD_TYPE_DIQ: mqd->allocate_mqd = allocate_mqd; @@ -554,6 +785,7 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, mqd->checkpoint_mqd = checkpoint_mqd_sdma; mqd->restore_mqd = restore_mqd_sdma; mqd->mqd_size = sizeof(struct v9_sdma_mqd); + mqd->mqd_stride = kfd_mqd_stride; #if defined(CONFIG_DEBUG_FS) mqd->debugfs_show_mqd = debugfs_show_mqd_sdma; #endif diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c index f6b4a5686dcb..8736a3cdbe1e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c @@ -486,6 +486,7 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type, mqd->destroy_mqd = kfd_destroy_mqd_cp; mqd->is_occupied = kfd_is_occupied_cp; mqd->mqd_size = sizeof(struct vi_mqd); + mqd->mqd_stride = kfd_mqd_stride; #if defined(CONFIG_DEBUG_FS) mqd->debugfs_show_mqd = debugfs_show_mqd; #endif @@ -500,6 +501,7 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type, mqd->destroy_mqd = kfd_destroy_mqd_cp; mqd->is_occupied = kfd_is_occupied_cp; mqd->mqd_size = sizeof(struct vi_mqd); + mqd->mqd_stride = kfd_mqd_stride; #if defined(CONFIG_DEBUG_FS) mqd->debugfs_show_mqd = debugfs_show_mqd; #endif @@ -515,6 +517,7 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type, mqd->checkpoint_mqd = checkpoint_mqd_sdma; mqd->restore_mqd = restore_mqd_sdma; mqd->mqd_size = sizeof(struct vi_sdma_mqd); + mqd->mqd_stride = kfd_mqd_stride; #if defined(CONFIG_DEBUG_FS) mqd->debugfs_show_mqd = debugfs_show_mqd_sdma; #endif diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 5602498e713f..b1fb017b2ef8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -927,7 +927,9 @@ int pqm_debugfs_mqds(struct seq_file *m, void *data) struct queue *q; enum KFD_MQD_TYPE mqd_type; struct mqd_manager *mqd_mgr; - int r = 0; + int r = 0, xcc, num_xccs = 1; + void *mqd; + uint64_t size = 0; list_for_each_entry(pqn, &pqm->queues, process_queue_list) { if (pqn->q) { @@ -943,6 +945,7 @@ int pqm_debugfs_mqds(struct seq_file *m, void *data) seq_printf(m, " Compute queue on device %x\n", q->device->id); mqd_type = KFD_MQD_TYPE_CP; + num_xccs = q->device->num_xcc_per_node; break; default: seq_printf(m, @@ -951,6 +954,8 @@ int pqm_debugfs_mqds(struct seq_file *m, void *data) continue; } mqd_mgr = q->device->dqm->mqd_mgrs[mqd_type]; + size = mqd_mgr->mqd_stride(mqd_mgr, + &q->properties); } else if (pqn->kq) { q = pqn->kq->queue; mqd_mgr = pqn->kq->mqd_mgr; @@ -972,9 +977,12 @@ int pqm_debugfs_mqds(struct seq_file *m, void *data) continue; } - r = mqd_mgr->debugfs_show_mqd(m, q->mqd); - if (r != 0) - break; + for (xcc = 0; xcc < num_xccs; xcc++) { + mqd = q->mqd + size * xcc; + r = mqd_mgr->debugfs_show_mqd(m, mqd); + if (r != 0) + break; + } } return r; diff --git a/drivers/gpu/drm/amd/include/v9_structs.h b/drivers/gpu/drm/amd/include/v9_structs.h index a0c672889fe4..a2f81b9c38af 100644 --- a/drivers/gpu/drm/amd/include/v9_structs.h +++ b/drivers/gpu/drm/amd/include/v9_structs.h @@ -196,10 +196,20 @@ struct v9_mqd { uint32_t compute_wave_restore_addr_lo; uint32_t compute_wave_restore_addr_hi; uint32_t compute_wave_restore_control; - uint32_t compute_static_thread_mgmt_se4; - uint32_t compute_static_thread_mgmt_se5; - uint32_t compute_static_thread_mgmt_se6; - uint32_t compute_static_thread_mgmt_se7; + union { + struct { + uint32_t compute_static_thread_mgmt_se4; + uint32_t compute_static_thread_mgmt_se5; + uint32_t compute_static_thread_mgmt_se6; + uint32_t compute_static_thread_mgmt_se7; + }; + struct { + uint32_t compute_current_logic_xcc_id; // offset: 39 (0x27) + uint32_t compute_restart_cg_tg_id; // offset: 40 (0x28) + uint32_t compute_tg_chunk_size; // offset: 41 (0x29) + uint32_t compute_restore_tg_chunk_size; // offset: 42 (0x2A) + }; + }; uint32_t reserved_43; uint32_t reserved_44; uint32_t reserved_45; @@ -382,8 +392,16 @@ struct v9_mqd { uint32_t iqtimer_pkt_dw29; uint32_t iqtimer_pkt_dw30; uint32_t iqtimer_pkt_dw31; - uint32_t reserved_225; - uint32_t reserved_226; + union { + struct { + uint32_t reserved_225; + uint32_t reserved_226; + }; + struct { + uint32_t pm4_target_xcc_in_xcp; // offset: 225 (0xE1) + uint32_t cp_mqd_stride_size; // offset: 226 (0xE2) + }; + }; uint32_t reserved_227; uint32_t set_resources_header; uint32_t set_resources_dw1; -- cgit From e2069a7b0880ccdc6fa6530b6091e47168705425 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Mon, 9 May 2022 22:20:52 -0400 Subject: drm/amdkfd: Add XCC instance to kgd2kfd interface (v3) Gfx 9 starts to have multiple XCC instances in one device. Add instance parameter to kgd2kfd functions where XCC instance was hard coded as 0. Also, update code to pass the correct instance number when running on a multi-XCC setup. v2: introduce the XCC instance to gfx v11 (Morris) v3: rebase (Alex) Signed-off-by: Amber Lin Signed-off-by: Mukul Joshi Reviewed-by: Felix Kuehling Tested-by: Amber Lin Signed-off-by: Morris Zhang Signed-off-by: Alex Deucher --- .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c | 38 ++--- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c | 22 +-- .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c | 27 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c | 19 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c | 17 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c | 17 +-- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 160 +++++++++++---------- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h | 29 ++-- .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 103 ++++++++----- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c | 6 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 15 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_process.c | 2 +- drivers/gpu/drm/amd/include/kgd_kfd_interface.h | 25 ++-- 17 files changed, 270 insertions(+), 218 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c index 562e1a04160f..49d8087e469e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c @@ -33,7 +33,7 @@ #include "soc15.h" static int kgd_gfx_v9_4_3_set_pasid_vmid_mapping(struct amdgpu_device *adev, - u32 pasid, unsigned int vmid) + u32 pasid, unsigned int vmid, uint32_t inst) { unsigned long timeout; @@ -47,11 +47,11 @@ static int kgd_gfx_v9_4_3_set_pasid_vmid_mapping(struct amdgpu_device *adev, uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid | ATC_VMID0_PASID_MAPPING__VALID_MASK; - WREG32(SOC15_REG_OFFSET(ATHUB, 0, + WREG32(SOC15_REG_OFFSET(ATHUB, inst, regATC_VMID0_PASID_MAPPING) + vmid, pasid_mapping); timeout = jiffies + msecs_to_jiffies(10); - while (!(RREG32(SOC15_REG_OFFSET(ATHUB, 0, + while (!(RREG32(SOC15_REG_OFFSET(ATHUB, inst, regATC_VMID_PASID_MAPPING_UPDATE_STATUS)) & (1U << vmid))) { if (time_after(jiffies, timeout)) { @@ -61,13 +61,13 @@ static int kgd_gfx_v9_4_3_set_pasid_vmid_mapping(struct amdgpu_device *adev, cpu_relax(); } - WREG32(SOC15_REG_OFFSET(ATHUB, 0, + WREG32(SOC15_REG_OFFSET(ATHUB, inst, regATC_VMID_PASID_MAPPING_UPDATE_STATUS), 1U << vmid); - WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT) + vmid, + WREG32(SOC15_REG_OFFSET(OSSSYS, inst, mmIH_VMID_0_LUT) + vmid, pasid_mapping); - WREG32(SOC15_REG_OFFSET(OSSSYS, 0, mmIH_VMID_0_LUT_MM) + vmid, + WREG32(SOC15_REG_OFFSET(OSSSYS, inst, mmIH_VMID_0_LUT_MM) + vmid, pasid_mapping); return 0; @@ -81,7 +81,7 @@ static inline struct v9_mqd *get_mqd(void *mqd) static int kgd_gfx_v9_4_3_hqd_load(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t __user *wptr, uint32_t wptr_shift, - uint32_t wptr_mask, struct mm_struct *mm) + uint32_t wptr_mask, struct mm_struct *mm, uint32_t inst) { struct v9_mqd *m; uint32_t *mqd_hqd; @@ -89,12 +89,12 @@ static int kgd_gfx_v9_4_3_hqd_load(struct amdgpu_device *adev, void *mqd, m = get_mqd(mqd); - kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id); + kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst); /* HQD registers extend to CP_HQD_AQL_DISPATCH_ID_HI */ mqd_hqd = &m->cp_mqd_base_addr_lo; - hqd_base = SOC15_REG_OFFSET(GC, 0, regCP_MQD_BASE_ADDR); - hqd_end = SOC15_REG_OFFSET(GC, 0, regCP_HQD_AQL_DISPATCH_ID_HI); + hqd_base = SOC15_REG_OFFSET(GC, inst, regCP_MQD_BASE_ADDR); + hqd_end = SOC15_REG_OFFSET(GC, inst, regCP_HQD_AQL_DISPATCH_ID_HI); for (reg = hqd_base; reg <= hqd_end; reg++) WREG32_RLC(reg, mqd_hqd[reg - hqd_base]); @@ -103,7 +103,7 @@ static int kgd_gfx_v9_4_3_hqd_load(struct amdgpu_device *adev, void *mqd, /* Activate doorbell logic before triggering WPTR poll. */ data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1); - WREG32_RLC(SOC15_REG_OFFSET(GC, 0, regCP_HQD_PQ_DOORBELL_CONTROL), + WREG32_RLC(SOC15_REG_OFFSET(GC, inst, regCP_HQD_PQ_DOORBELL_CONTROL), data); if (wptr) { @@ -133,29 +133,29 @@ static int kgd_gfx_v9_4_3_hqd_load(struct amdgpu_device *adev, void *mqd, guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1); guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32; - WREG32_RLC(SOC15_REG_OFFSET(GC, 0, regCP_HQD_PQ_WPTR_LO), + WREG32_RLC(SOC15_REG_OFFSET(GC, inst, regCP_HQD_PQ_WPTR_LO), lower_32_bits(guessed_wptr)); - WREG32_RLC(SOC15_REG_OFFSET(GC, 0, regCP_HQD_PQ_WPTR_HI), + WREG32_RLC(SOC15_REG_OFFSET(GC, inst, regCP_HQD_PQ_WPTR_HI), upper_32_bits(guessed_wptr)); - WREG32_RLC(SOC15_REG_OFFSET(GC, 0, regCP_HQD_PQ_WPTR_POLL_ADDR), + WREG32_RLC(SOC15_REG_OFFSET(GC, inst, regCP_HQD_PQ_WPTR_POLL_ADDR), lower_32_bits((uintptr_t)wptr)); - WREG32_RLC(SOC15_REG_OFFSET(GC, 0, + WREG32_RLC(SOC15_REG_OFFSET(GC, inst, regCP_HQD_PQ_WPTR_POLL_ADDR_HI), upper_32_bits((uintptr_t)wptr)); - WREG32(SOC15_REG_OFFSET(GC, 0, regCP_PQ_WPTR_POLL_CNTL1), + WREG32(SOC15_REG_OFFSET(GC, inst, regCP_PQ_WPTR_POLL_CNTL1), (uint32_t)kgd_gfx_v9_get_queue_mask(adev, pipe_id, queue_id)); } /* Start the EOP fetcher */ - WREG32_RLC(SOC15_REG_OFFSET(GC, 0, regCP_HQD_EOP_RPTR), + WREG32_RLC(SOC15_REG_OFFSET(GC, inst, regCP_HQD_EOP_RPTR), REG_SET_FIELD(m->cp_hqd_eop_rptr, CP_HQD_EOP_RPTR, INIT_FETCHER, 1)); data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); - WREG32_RLC(SOC15_REG_OFFSET(GC, 0, regCP_HQD_ACTIVE), data); + WREG32_RLC(SOC15_REG_OFFSET(GC, inst, regCP_HQD_ACTIVE), data); - kgd_gfx_v9_release_queue(adev); + kgd_gfx_v9_release_queue(adev, inst); return 0; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c index f599e1e74fcc..7b60268d93c0 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c @@ -79,7 +79,7 @@ static void kgd_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmi uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, - uint32_t sh_mem_bases) + uint32_t sh_mem_bases, uint32_t inst) { lock_srbm(adev, 0, 0, 0, vmid); @@ -91,7 +91,7 @@ static void kgd_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmi } static int kgd_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid, - unsigned int vmid) + unsigned int vmid, uint32_t inst) { /* * We have to assume that there is no outstanding mapping. @@ -135,7 +135,8 @@ static int kgd_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid, * but still works */ -static int kgd_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id) +static int kgd_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id, + uint32_t inst) { uint32_t mec; uint32_t pipe; @@ -205,7 +206,7 @@ static inline struct v10_sdma_mqd *get_sdma_mqd(void *mqd) static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t __user *wptr, uint32_t wptr_shift, - uint32_t wptr_mask, struct mm_struct *mm) + uint32_t wptr_mask, struct mm_struct *mm, uint32_t inst) { struct v10_compute_mqd *m; uint32_t *mqd_hqd; @@ -286,7 +287,7 @@ static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd, static int kgd_hiq_mqd_load(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, - uint32_t doorbell_off) + uint32_t doorbell_off, uint32_t inst) { struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring; struct v10_compute_mqd *m; @@ -338,7 +339,7 @@ out_unlock: static int kgd_hqd_dump(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id, - uint32_t (**dump)[2], uint32_t *n_regs) + uint32_t (**dump)[2], uint32_t *n_regs, uint32_t inst) { uint32_t i = 0, reg; #define HQD_N_REGS 56 @@ -469,7 +470,7 @@ static int kgd_hqd_sdma_dump(struct amdgpu_device *adev, static bool kgd_hqd_is_occupied(struct amdgpu_device *adev, uint64_t queue_address, uint32_t pipe_id, - uint32_t queue_id) + uint32_t queue_id, uint32_t inst) { uint32_t act; bool retval = false; @@ -510,7 +511,7 @@ static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd) static int kgd_hqd_destroy(struct amdgpu_device *adev, void *mqd, enum kfd_preempt_type reset_type, unsigned int utimeout, uint32_t pipe_id, - uint32_t queue_id) + uint32_t queue_id, uint32_t inst) { enum hqd_dequeue_request_type type; unsigned long end_jiffies; @@ -673,7 +674,7 @@ static bool get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev, static int kgd_wave_control_execute(struct amdgpu_device *adev, uint32_t gfx_index_val, - uint32_t sq_cmd) + uint32_t sq_cmd, uint32_t inst) { uint32_t data = 0; @@ -709,7 +710,8 @@ static void set_vm_context_page_table_base(struct amdgpu_device *adev, } static void program_trap_handler_settings(struct amdgpu_device *adev, - uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr) + uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr, + uint32_t inst) { lock_srbm(adev, 0, 0, 0, vmid); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c index 5c4152ae44da..52d0d35fb58d 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c @@ -80,7 +80,7 @@ static void program_sh_mem_settings_v10_3(struct amdgpu_device *adev, uint32_t v uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, - uint32_t sh_mem_bases) + uint32_t sh_mem_bases, uint32_t inst) { lock_srbm(adev, 0, 0, 0, vmid); @@ -93,7 +93,7 @@ static void program_sh_mem_settings_v10_3(struct amdgpu_device *adev, uint32_t v /* ATC is defeatured on Sienna_Cichlid */ static int set_pasid_vmid_mapping_v10_3(struct amdgpu_device *adev, unsigned int pasid, - unsigned int vmid) + unsigned int vmid, uint32_t inst) { uint32_t value = pasid << IH_VMID_0_LUT__PASID__SHIFT; @@ -105,7 +105,8 @@ static int set_pasid_vmid_mapping_v10_3(struct amdgpu_device *adev, unsigned int return 0; } -static int init_interrupts_v10_3(struct amdgpu_device *adev, uint32_t pipe_id) +static int init_interrupts_v10_3(struct amdgpu_device *adev, uint32_t pipe_id, + uint32_t inst) { uint32_t mec; uint32_t pipe; @@ -177,7 +178,7 @@ static inline struct v10_sdma_mqd *get_sdma_mqd(void *mqd) static int hqd_load_v10_3(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t __user *wptr, uint32_t wptr_shift, - uint32_t wptr_mask, struct mm_struct *mm) + uint32_t wptr_mask, struct mm_struct *mm, uint32_t inst) { struct v10_compute_mqd *m; uint32_t *mqd_hqd; @@ -273,7 +274,7 @@ static int hqd_load_v10_3(struct amdgpu_device *adev, void *mqd, static int hiq_mqd_load_v10_3(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, - uint32_t doorbell_off) + uint32_t doorbell_off, uint32_t inst) { struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring; struct v10_compute_mqd *m; @@ -325,7 +326,7 @@ out_unlock: static int hqd_dump_v10_3(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id, - uint32_t (**dump)[2], uint32_t *n_regs) + uint32_t (**dump)[2], uint32_t *n_regs, uint32_t inst) { uint32_t i = 0, reg; #define HQD_N_REGS 56 @@ -456,7 +457,7 @@ static int hqd_sdma_dump_v10_3(struct amdgpu_device *adev, static bool hqd_is_occupied_v10_3(struct amdgpu_device *adev, uint64_t queue_address, uint32_t pipe_id, - uint32_t queue_id) + uint32_t queue_id, uint32_t inst) { uint32_t act; bool retval = false; @@ -498,7 +499,7 @@ static bool hqd_sdma_is_occupied_v10_3(struct amdgpu_device *adev, static int hqd_destroy_v10_3(struct amdgpu_device *adev, void *mqd, enum kfd_preempt_type reset_type, unsigned int utimeout, uint32_t pipe_id, - uint32_t queue_id) + uint32_t queue_id, uint32_t inst) { enum hqd_dequeue_request_type type; unsigned long end_jiffies; @@ -586,7 +587,7 @@ static int hqd_sdma_destroy_v10_3(struct amdgpu_device *adev, void *mqd, static int wave_control_execute_v10_3(struct amdgpu_device *adev, uint32_t gfx_index_val, - uint32_t sq_cmd) + uint32_t sq_cmd, uint32_t inst) { uint32_t data = 0; @@ -628,7 +629,8 @@ static void set_vm_context_page_table_base_v10_3(struct amdgpu_device *adev, } static void program_trap_handler_settings_v10_3(struct amdgpu_device *adev, - uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr) + uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr, + uint32_t inst) { lock_srbm(adev, 0, 0, 0, vmid); @@ -765,7 +767,7 @@ uint32_t set_wave_launch_mode_v10_3(struct amdgpu_device *adev, * deq_retry_wait_time -- Wait Count for Global Wave Syncs. */ void get_iq_wait_times_v10_3(struct amdgpu_device *adev, - uint32_t *wait_times) + uint32_t *wait_times, uint32_t inst) { *wait_times = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2)); @@ -775,7 +777,8 @@ void build_grace_period_packet_info_v10_3(struct amdgpu_device *adev, uint32_t wait_times, uint32_t grace_period, uint32_t *reg_offset, - uint32_t *reg_data) + uint32_t *reg_data, + uint32_t inst) { *reg_data = wait_times; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c index 5cdb7289d35b..7deff8a547fb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v11.c @@ -78,7 +78,7 @@ static void program_sh_mem_settings_v11(struct amdgpu_device *adev, uint32_t vmi uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, - uint32_t sh_mem_bases) + uint32_t sh_mem_bases, uint32_t inst) { lock_srbm(adev, 0, 0, 0, vmid); @@ -89,7 +89,7 @@ static void program_sh_mem_settings_v11(struct amdgpu_device *adev, uint32_t vmi } static int set_pasid_vmid_mapping_v11(struct amdgpu_device *adev, unsigned int pasid, - unsigned int vmid) + unsigned int vmid, uint32_t inst) { uint32_t value = pasid << IH_VMID_0_LUT__PASID__SHIFT; @@ -101,7 +101,8 @@ static int set_pasid_vmid_mapping_v11(struct amdgpu_device *adev, unsigned int p return 0; } -static int init_interrupts_v11(struct amdgpu_device *adev, uint32_t pipe_id) +static int init_interrupts_v11(struct amdgpu_device *adev, uint32_t pipe_id, + uint32_t inst) { uint32_t mec; uint32_t pipe; @@ -162,7 +163,7 @@ static inline struct v11_sdma_mqd *get_sdma_mqd(void *mqd) static int hqd_load_v11(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t __user *wptr, uint32_t wptr_shift, uint32_t wptr_mask, - struct mm_struct *mm) + struct mm_struct *mm, uint32_t inst) { struct v11_compute_mqd *m; uint32_t *mqd_hqd; @@ -258,7 +259,7 @@ static int hqd_load_v11(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, static int hiq_mqd_load_v11(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, - uint32_t doorbell_off) + uint32_t doorbell_off, uint32_t inst) { struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring; struct v11_compute_mqd *m; @@ -310,7 +311,7 @@ out_unlock: static int hqd_dump_v11(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id, - uint32_t (**dump)[2], uint32_t *n_regs) + uint32_t (**dump)[2], uint32_t *n_regs, uint32_t inst) { uint32_t i = 0, reg; #define HQD_N_REGS 56 @@ -445,7 +446,7 @@ static int hqd_sdma_dump_v11(struct amdgpu_device *adev, } static bool hqd_is_occupied_v11(struct amdgpu_device *adev, uint64_t queue_address, - uint32_t pipe_id, uint32_t queue_id) + uint32_t pipe_id, uint32_t queue_id, uint32_t inst) { uint32_t act; bool retval = false; @@ -486,7 +487,7 @@ static bool hqd_sdma_is_occupied_v11(struct amdgpu_device *adev, void *mqd) static int hqd_destroy_v11(struct amdgpu_device *adev, void *mqd, enum kfd_preempt_type reset_type, unsigned int utimeout, uint32_t pipe_id, - uint32_t queue_id) + uint32_t queue_id, uint32_t inst) { enum hqd_dequeue_request_type type; unsigned long end_jiffies; @@ -571,7 +572,7 @@ static int hqd_sdma_destroy_v11(struct amdgpu_device *adev, void *mqd, static int wave_control_execute_v11(struct amdgpu_device *adev, uint32_t gfx_index_val, - uint32_t sq_cmd) + uint32_t sq_cmd, uint32_t inst) { uint32_t data = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c index e83cb1c09610..6bf448ab3dff 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c @@ -78,7 +78,7 @@ static void kgd_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmi uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, - uint32_t sh_mem_bases) + uint32_t sh_mem_bases, uint32_t inst) { lock_srbm(adev, 0, 0, 0, vmid); @@ -91,7 +91,7 @@ static void kgd_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmi } static int kgd_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid, - unsigned int vmid) + unsigned int vmid, uint32_t inst) { /* * We have to assume that there is no outstanding mapping. @@ -114,7 +114,8 @@ static int kgd_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid, return 0; } -static int kgd_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id) +static int kgd_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id, + uint32_t inst) { uint32_t mec; uint32_t pipe; @@ -158,7 +159,7 @@ static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd) static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t __user *wptr, uint32_t wptr_shift, - uint32_t wptr_mask, struct mm_struct *mm) + uint32_t wptr_mask, struct mm_struct *mm, uint32_t inst) { struct cik_mqd *m; uint32_t *mqd_hqd; @@ -202,7 +203,7 @@ static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd, static int kgd_hqd_dump(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id, - uint32_t (**dump)[2], uint32_t *n_regs) + uint32_t (**dump)[2], uint32_t *n_regs, uint32_t inst) { uint32_t i = 0, reg; #define HQD_N_REGS (35+4) @@ -318,7 +319,7 @@ static int kgd_hqd_sdma_dump(struct amdgpu_device *adev, static bool kgd_hqd_is_occupied(struct amdgpu_device *adev, uint64_t queue_address, uint32_t pipe_id, - uint32_t queue_id) + uint32_t queue_id, uint32_t inst) { uint32_t act; bool retval = false; @@ -358,7 +359,7 @@ static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd) static int kgd_hqd_destroy(struct amdgpu_device *adev, void *mqd, enum kfd_preempt_type reset_type, unsigned int utimeout, uint32_t pipe_id, - uint32_t queue_id) + uint32_t queue_id, uint32_t inst) { uint32_t temp; enum hqd_dequeue_request_type type; @@ -494,7 +495,7 @@ static int kgd_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd, static int kgd_wave_control_execute(struct amdgpu_device *adev, uint32_t gfx_index_val, - uint32_t sq_cmd) + uint32_t sq_cmd, uint32_t inst) { uint32_t data; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c index 870f352837fc..cd06e4a6d1da 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c @@ -72,7 +72,7 @@ static void kgd_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmi uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, - uint32_t sh_mem_bases) + uint32_t sh_mem_bases, uint32_t inst) { lock_srbm(adev, 0, 0, 0, vmid); @@ -85,7 +85,7 @@ static void kgd_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmi } static int kgd_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid, - unsigned int vmid) + unsigned int vmid, uint32_t inst) { /* * We have to assume that there is no outstanding mapping. @@ -109,7 +109,8 @@ static int kgd_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid, return 0; } -static int kgd_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id) +static int kgd_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id, + uint32_t inst) { uint32_t mec; uint32_t pipe; @@ -153,7 +154,7 @@ static inline struct vi_sdma_mqd *get_sdma_mqd(void *mqd) static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t __user *wptr, uint32_t wptr_shift, - uint32_t wptr_mask, struct mm_struct *mm) + uint32_t wptr_mask, struct mm_struct *mm, uint32_t inst) { struct vi_mqd *m; uint32_t *mqd_hqd; @@ -226,7 +227,7 @@ static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd, static int kgd_hqd_dump(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id, - uint32_t (**dump)[2], uint32_t *n_regs) + uint32_t (**dump)[2], uint32_t *n_regs, uint32_t inst) { uint32_t i = 0, reg; #define HQD_N_REGS (54+4) @@ -350,7 +351,7 @@ static int kgd_hqd_sdma_dump(struct amdgpu_device *adev, static bool kgd_hqd_is_occupied(struct amdgpu_device *adev, uint64_t queue_address, uint32_t pipe_id, - uint32_t queue_id) + uint32_t queue_id, uint32_t inst) { uint32_t act; bool retval = false; @@ -390,7 +391,7 @@ static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd) static int kgd_hqd_destroy(struct amdgpu_device *adev, void *mqd, enum kfd_preempt_type reset_type, unsigned int utimeout, uint32_t pipe_id, - uint32_t queue_id) + uint32_t queue_id, uint32_t inst) { uint32_t temp; enum hqd_dequeue_request_type type; @@ -540,7 +541,7 @@ static bool get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev, static int kgd_wave_control_execute(struct amdgpu_device *adev, uint32_t gfx_index_val, - uint32_t sq_cmd) + uint32_t sq_cmd, uint32_t inst) { uint32_t data = 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index d36219ecd3dd..7918a00cbb5a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -46,26 +46,26 @@ enum hqd_dequeue_request_type { SAVE_WAVES }; -static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe, - uint32_t queue, uint32_t vmid) +static void kgd_gfx_v9_lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe, + uint32_t queue, uint32_t vmid, uint32_t inst) { mutex_lock(&adev->srbm_mutex); - soc15_grbm_select(adev, mec, pipe, queue, vmid, 0); + soc15_grbm_select(adev, mec, pipe, queue, vmid, inst); } -static void unlock_srbm(struct amdgpu_device *adev) +static void kgd_gfx_v9_unlock_srbm(struct amdgpu_device *adev, uint32_t inst) { - soc15_grbm_select(adev, 0, 0, 0, 0, 0); + soc15_grbm_select(adev, 0, 0, 0, 0, inst); mutex_unlock(&adev->srbm_mutex); } void kgd_gfx_v9_acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id, - uint32_t queue_id) + uint32_t queue_id, uint32_t inst) { uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); - lock_srbm(adev, mec, pipe, queue_id, 0); + kgd_gfx_v9_lock_srbm(adev, mec, pipe, queue_id, 0, inst); } uint64_t kgd_gfx_v9_get_queue_mask(struct amdgpu_device *adev, @@ -77,28 +77,28 @@ uint64_t kgd_gfx_v9_get_queue_mask(struct amdgpu_device *adev, return 1ull << bit; } -void kgd_gfx_v9_release_queue(struct amdgpu_device *adev) +void kgd_gfx_v9_release_queue(struct amdgpu_device *adev, uint32_t inst) { - unlock_srbm(adev); + kgd_gfx_v9_unlock_srbm(adev, inst); } void kgd_gfx_v9_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid, uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, - uint32_t sh_mem_bases) + uint32_t sh_mem_bases, uint32_t inst) { - lock_srbm(adev, 0, 0, 0, vmid); + kgd_gfx_v9_lock_srbm(adev, 0, 0, 0, vmid, inst); - WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG), sh_mem_config); - WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmSH_MEM_BASES), sh_mem_bases); + WREG32_RLC(SOC15_REG_OFFSET(GC, inst, mmSH_MEM_CONFIG), sh_mem_config); + WREG32_RLC(SOC15_REG_OFFSET(GC, inst, mmSH_MEM_BASES), sh_mem_bases); /* APE1 no longer exists on GFX9 */ - unlock_srbm(adev); + kgd_gfx_v9_unlock_srbm(adev, inst); } int kgd_gfx_v9_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid, - unsigned int vmid) + unsigned int vmid, uint32_t inst) { /* * We have to assume that there is no outstanding mapping. @@ -156,7 +156,8 @@ int kgd_gfx_v9_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid, * but still works */ -int kgd_gfx_v9_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id) +int kgd_gfx_v9_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id, + uint32_t inst) { uint32_t mec; uint32_t pipe; @@ -164,13 +165,13 @@ int kgd_gfx_v9_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id) mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); - lock_srbm(adev, mec, pipe, 0, 0); + kgd_gfx_v9_lock_srbm(adev, mec, pipe, 0, 0, inst); - WREG32_SOC15(GC, 0, mmCPC_INT_CNTL, + WREG32_SOC15(GC, inst, mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK | CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK); - unlock_srbm(adev); + kgd_gfx_v9_unlock_srbm(adev, inst); return 0; } @@ -220,7 +221,8 @@ static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd) int kgd_gfx_v9_hqd_load(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t __user *wptr, uint32_t wptr_shift, - uint32_t wptr_mask, struct mm_struct *mm) + uint32_t wptr_mask, struct mm_struct *mm, + uint32_t inst) { struct v9_mqd *m; uint32_t *mqd_hqd; @@ -228,21 +230,22 @@ int kgd_gfx_v9_hqd_load(struct amdgpu_device *adev, void *mqd, m = get_mqd(mqd); - kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id); + kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst); /* HQD registers extend from CP_MQD_BASE_ADDR to CP_HQD_EOP_WPTR_MEM. */ mqd_hqd = &m->cp_mqd_base_addr_lo; - hqd_base = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR); + hqd_base = SOC15_REG_OFFSET(GC, inst, mmCP_MQD_BASE_ADDR); for (reg = hqd_base; - reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++) + reg <= SOC15_REG_OFFSET(GC, inst, mmCP_HQD_PQ_WPTR_HI); reg++) WREG32_RLC(reg, mqd_hqd[reg - hqd_base]); /* Activate doorbell logic before triggering WPTR poll. */ data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control, CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1); - WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_DOORBELL_CONTROL), data); + WREG32_RLC(SOC15_REG_OFFSET(GC, inst, mmCP_HQD_PQ_DOORBELL_CONTROL), + data); if (wptr) { /* Don't read wptr with get_user because the user @@ -271,43 +274,43 @@ int kgd_gfx_v9_hqd_load(struct amdgpu_device *adev, void *mqd, guessed_wptr += m->cp_hqd_pq_wptr_lo & ~(queue_size - 1); guessed_wptr += (uint64_t)m->cp_hqd_pq_wptr_hi << 32; - WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_LO), + WREG32_RLC(SOC15_REG_OFFSET(GC, inst, mmCP_HQD_PQ_WPTR_LO), lower_32_bits(guessed_wptr)); - WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI), + WREG32_RLC(SOC15_REG_OFFSET(GC, inst, mmCP_HQD_PQ_WPTR_HI), upper_32_bits(guessed_wptr)); - WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR), + WREG32_RLC(SOC15_REG_OFFSET(GC, inst, mmCP_HQD_PQ_WPTR_POLL_ADDR), lower_32_bits((uintptr_t)wptr)); - WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI), + WREG32_RLC(SOC15_REG_OFFSET(GC, inst, mmCP_HQD_PQ_WPTR_POLL_ADDR_HI), upper_32_bits((uintptr_t)wptr)); WREG32_SOC15(GC, 0, mmCP_PQ_WPTR_POLL_CNTL1, (uint32_t)kgd_gfx_v9_get_queue_mask(adev, pipe_id, queue_id)); } /* Start the EOP fetcher */ - WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_EOP_RPTR), + WREG32_RLC(SOC15_REG_OFFSET(GC, inst, mmCP_HQD_EOP_RPTR), REG_SET_FIELD(m->cp_hqd_eop_rptr, CP_HQD_EOP_RPTR, INIT_FETCHER, 1)); data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1); - WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_ACTIVE), data); + WREG32_RLC(SOC15_REG_OFFSET(GC, inst, mmCP_HQD_ACTIVE), data); - kgd_gfx_v9_release_queue(adev); + kgd_gfx_v9_release_queue(adev, inst); return 0; } int kgd_gfx_v9_hiq_mqd_load(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, - uint32_t doorbell_off) + uint32_t doorbell_off, uint32_t inst) { - struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring; + struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[inst].ring; struct v9_mqd *m; uint32_t mec, pipe; int r; m = get_mqd(mqd); - kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id); + kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst); mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1; pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec); @@ -315,7 +318,7 @@ int kgd_gfx_v9_hiq_mqd_load(struct amdgpu_device *adev, void *mqd, pr_debug("kfd: set HIQ, mec:%d, pipe:%d, queue:%d.\n", mec, pipe, queue_id); - spin_lock(&adev->gfx.kiq[0].ring_lock); + spin_lock(&adev->gfx.kiq[inst].ring_lock); r = amdgpu_ring_alloc(kiq_ring, 7); if (r) { pr_err("Failed to alloc KIQ (%d).\n", r); @@ -342,15 +345,15 @@ int kgd_gfx_v9_hiq_mqd_load(struct amdgpu_device *adev, void *mqd, amdgpu_ring_commit(kiq_ring); out_unlock: - spin_unlock(&adev->gfx.kiq[0].ring_lock); - kgd_gfx_v9_release_queue(adev); + spin_unlock(&adev->gfx.kiq[inst].ring_lock); + kgd_gfx_v9_release_queue(adev, inst); return r; } int kgd_gfx_v9_hqd_dump(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id, - uint32_t (**dump)[2], uint32_t *n_regs) + uint32_t (**dump)[2], uint32_t *n_regs, uint32_t inst) { uint32_t i = 0, reg; #define HQD_N_REGS 56 @@ -365,13 +368,13 @@ int kgd_gfx_v9_hqd_dump(struct amdgpu_device *adev, if (*dump == NULL) return -ENOMEM; - kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id); + kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst); - for (reg = SOC15_REG_OFFSET(GC, 0, mmCP_MQD_BASE_ADDR); - reg <= SOC15_REG_OFFSET(GC, 0, mmCP_HQD_PQ_WPTR_HI); reg++) + for (reg = SOC15_REG_OFFSET(GC, inst, mmCP_MQD_BASE_ADDR); + reg <= SOC15_REG_OFFSET(GC, inst, mmCP_HQD_PQ_WPTR_HI); reg++) DUMP_REG(reg); - kgd_gfx_v9_release_queue(adev); + kgd_gfx_v9_release_queue(adev, inst); WARN_ON_ONCE(i != HQD_N_REGS); *n_regs = i; @@ -481,23 +484,23 @@ static int kgd_hqd_sdma_dump(struct amdgpu_device *adev, bool kgd_gfx_v9_hqd_is_occupied(struct amdgpu_device *adev, uint64_t queue_address, uint32_t pipe_id, - uint32_t queue_id) + uint32_t queue_id, uint32_t inst) { uint32_t act; bool retval = false; uint32_t low, high; - kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id); - act = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE); + kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst); + act = RREG32_SOC15(GC, inst, mmCP_HQD_ACTIVE); if (act) { low = lower_32_bits(queue_address >> 8); high = upper_32_bits(queue_address >> 8); - if (low == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE) && - high == RREG32_SOC15(GC, 0, mmCP_HQD_PQ_BASE_HI)) + if (low == RREG32_SOC15(GC, inst, mmCP_HQD_PQ_BASE) && + high == RREG32_SOC15(GC, inst, mmCP_HQD_PQ_BASE_HI)) retval = true; } - kgd_gfx_v9_release_queue(adev); + kgd_gfx_v9_release_queue(adev, inst); return retval; } @@ -522,7 +525,7 @@ static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd) int kgd_gfx_v9_hqd_destroy(struct amdgpu_device *adev, void *mqd, enum kfd_preempt_type reset_type, unsigned int utimeout, uint32_t pipe_id, - uint32_t queue_id) + uint32_t queue_id, uint32_t inst) { enum hqd_dequeue_request_type type; unsigned long end_jiffies; @@ -532,10 +535,10 @@ int kgd_gfx_v9_hqd_destroy(struct amdgpu_device *adev, void *mqd, if (amdgpu_in_reset(adev)) return -EIO; - kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id); + kgd_gfx_v9_acquire_queue(adev, pipe_id, queue_id, inst); if (m->cp_hqd_vmid == 0) - WREG32_FIELD15_RLC(GC, 0, RLC_CP_SCHEDULERS, scheduler1, 0); + WREG32_FIELD15_RLC(GC, inst, RLC_CP_SCHEDULERS, scheduler1, 0); switch (reset_type) { case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN: @@ -552,22 +555,22 @@ int kgd_gfx_v9_hqd_destroy(struct amdgpu_device *adev, void *mqd, break; } - WREG32_RLC(SOC15_REG_OFFSET(GC, 0, mmCP_HQD_DEQUEUE_REQUEST), type); + WREG32_RLC(SOC15_REG_OFFSET(GC, inst, mmCP_HQD_DEQUEUE_REQUEST), type); end_jiffies = (utimeout * HZ / 1000) + jiffies; while (true) { - temp = RREG32_SOC15(GC, 0, mmCP_HQD_ACTIVE); + temp = RREG32_SOC15(GC, inst, mmCP_HQD_ACTIVE); if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK)) break; if (time_after(jiffies, end_jiffies)) { pr_err("cp queue preemption time out.\n"); - kgd_gfx_v9_release_queue(adev); + kgd_gfx_v9_release_queue(adev, inst); return -ETIME; } usleep_range(500, 1000); } - kgd_gfx_v9_release_queue(adev); + kgd_gfx_v9_release_queue(adev, inst); return 0; } @@ -624,14 +627,14 @@ bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev, int kgd_gfx_v9_wave_control_execute(struct amdgpu_device *adev, uint32_t gfx_index_val, - uint32_t sq_cmd) + uint32_t sq_cmd, uint32_t inst) { uint32_t data = 0; mutex_lock(&adev->grbm_idx_mutex); - WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, gfx_index_val); - WREG32_SOC15(GC, 0, mmSQ_CMD, sq_cmd); + WREG32_SOC15_RLC_SHADOW(GC, inst, mmGRBM_GFX_INDEX, gfx_index_val); + WREG32_SOC15(GC, inst, mmSQ_CMD, sq_cmd); data = REG_SET_FIELD(data, GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES, 1); @@ -640,7 +643,7 @@ int kgd_gfx_v9_wave_control_execute(struct amdgpu_device *adev, data = REG_SET_FIELD(data, GRBM_GFX_INDEX, SE_BROADCAST_WRITES, 1); - WREG32_SOC15_RLC_SHADOW(GC, 0, mmGRBM_GFX_INDEX, data); + WREG32_SOC15_RLC_SHADOW(GC, inst, mmGRBM_GFX_INDEX, data); mutex_unlock(&adev->grbm_idx_mutex); return 0; @@ -685,7 +688,7 @@ static void unlock_spi_csq_mutexes(struct amdgpu_device *adev) * is being collected */ static void get_wave_count(struct amdgpu_device *adev, int queue_idx, - int *wave_cnt, int *vmid) + int *wave_cnt, int *vmid, uint32_t inst) { int pipe_idx; int queue_slot; @@ -700,12 +703,12 @@ static void get_wave_count(struct amdgpu_device *adev, int queue_idx, *wave_cnt = 0; pipe_idx = queue_idx / adev->gfx.mec.num_queue_per_pipe; queue_slot = queue_idx % adev->gfx.mec.num_queue_per_pipe; - soc15_grbm_select(adev, 1, pipe_idx, queue_slot, 0, 0); - reg_val = RREG32_SOC15_IP(GC, SOC15_REG_OFFSET(GC, 0, mmSPI_CSQ_WF_ACTIVE_COUNT_0) + + soc15_grbm_select(adev, 1, pipe_idx, queue_slot, 0, inst); + reg_val = RREG32_SOC15_IP(GC, SOC15_REG_OFFSET(GC, inst, mmSPI_CSQ_WF_ACTIVE_COUNT_0) + queue_slot); *wave_cnt = reg_val & SPI_CSQ_WF_ACTIVE_COUNT_0__COUNT_MASK; if (*wave_cnt != 0) - *vmid = (RREG32_SOC15(GC, 0, mmCP_HQD_VMID) & + *vmid = (RREG32_SOC15(GC, inst, mmCP_HQD_VMID) & CP_HQD_VMID__VMID_MASK) >> CP_HQD_VMID__VMID__SHIFT; } @@ -756,7 +759,7 @@ static void get_wave_count(struct amdgpu_device *adev, int queue_idx, * Reading registers referenced above involves programming GRBM appropriately */ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid, - int *pasid_wave_cnt, int *max_waves_per_cu) + int *pasid_wave_cnt, int *max_waves_per_cu, uint32_t inst) { int qidx; int vmid; @@ -772,7 +775,7 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid, DECLARE_BITMAP(cp_queue_bitmap, KGD_MAX_QUEUES); lock_spi_csq_mutexes(adev); - soc15_grbm_select(adev, 1, 0, 0, 0, 0); + soc15_grbm_select(adev, 1, 0, 0, 0, inst); /* * Iterate through the shader engines and arrays of the device @@ -787,8 +790,8 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid, for (se_idx = 0; se_idx < se_cnt; se_idx++) { for (sh_idx = 0; sh_idx < sh_cnt; sh_idx++) { - amdgpu_gfx_select_se_sh(adev, se_idx, sh_idx, 0xffffffff, 0); - queue_map = RREG32_SOC15(GC, 0, mmSPI_CSQ_WF_ACTIVE_STATUS); + amdgpu_gfx_select_se_sh(adev, se_idx, sh_idx, 0xffffffff, inst); + queue_map = RREG32_SOC15(GC, inst, mmSPI_CSQ_WF_ACTIVE_STATUS); /* * Assumption: queue map encodes following schema: four @@ -808,10 +811,11 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid, continue; /* Get number of waves in flight and aggregate them */ - get_wave_count(adev, qidx, &wave_cnt, &vmid); + get_wave_count(adev, qidx, &wave_cnt, &vmid, + inst); if (wave_cnt != 0) { pasid_tmp = - RREG32(SOC15_REG_OFFSET(OSSSYS, 0, + RREG32(SOC15_REG_OFFSET(OSSSYS, inst, mmIH_VMID_0_LUT) + vmid); if (pasid_tmp == pasid) vmid_wave_cnt += wave_cnt; @@ -820,8 +824,8 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid, } } - amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); - soc15_grbm_select(adev, 0, 0, 0, 0, 0); + amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, inst); + soc15_grbm_select(adev, 0, 0, 0, 0, inst); unlock_spi_csq_mutexes(adev); /* Update the output parameters and return */ @@ -831,27 +835,27 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid, } void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev, - uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr) + uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr, uint32_t inst) { - lock_srbm(adev, 0, 0, 0, vmid); + kgd_gfx_v9_lock_srbm(adev, 0, 0, 0, vmid, inst); /* * Program TBA registers */ - WREG32_SOC15(GC, 0, mmSQ_SHADER_TBA_LO, + WREG32_SOC15(GC, inst, mmSQ_SHADER_TBA_LO, lower_32_bits(tba_addr >> 8)); - WREG32_SOC15(GC, 0, mmSQ_SHADER_TBA_HI, + WREG32_SOC15(GC, inst, mmSQ_SHADER_TBA_HI, upper_32_bits(tba_addr >> 8)); /* * Program TMA registers */ - WREG32_SOC15(GC, 0, mmSQ_SHADER_TMA_LO, + WREG32_SOC15(GC, inst, mmSQ_SHADER_TMA_LO, lower_32_bits(tma_addr >> 8)); - WREG32_SOC15(GC, 0, mmSQ_SHADER_TMA_HI, + WREG32_SOC15(GC, inst, mmSQ_SHADER_TMA_HI, upper_32_bits(tma_addr >> 8)); - unlock_srbm(adev); + kgd_gfx_v9_unlock_srbm(adev, inst); } const struct kfd2kgd_calls gfx_v9_kfd2kgd = { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h index 491273a02e30..a241299f4fbc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h @@ -25,41 +25,42 @@ void kgd_gfx_v9_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid, uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, - uint32_t sh_mem_bases); + uint32_t sh_mem_bases, uint32_t inst); int kgd_gfx_v9_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid, - unsigned int vmid); -int kgd_gfx_v9_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id); + unsigned int vmid, uint32_t inst); +int kgd_gfx_v9_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id, + uint32_t inst); int kgd_gfx_v9_hqd_load(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t __user *wptr, uint32_t wptr_shift, uint32_t wptr_mask, - struct mm_struct *mm); + struct mm_struct *mm, uint32_t inst); int kgd_gfx_v9_hiq_mqd_load(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, - uint32_t doorbell_off); + uint32_t doorbell_off, uint32_t inst); int kgd_gfx_v9_hqd_dump(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id, - uint32_t (**dump)[2], uint32_t *n_regs); + uint32_t (**dump)[2], uint32_t *n_regs, uint32_t inst); bool kgd_gfx_v9_hqd_is_occupied(struct amdgpu_device *adev, uint64_t queue_address, uint32_t pipe_id, - uint32_t queue_id); + uint32_t queue_id, uint32_t inst); int kgd_gfx_v9_hqd_destroy(struct amdgpu_device *adev, void *mqd, enum kfd_preempt_type reset_type, unsigned int utimeout, uint32_t pipe_id, - uint32_t queue_id); + uint32_t queue_id, uint32_t inst); int kgd_gfx_v9_wave_control_execute(struct amdgpu_device *adev, uint32_t gfx_index_val, - uint32_t sq_cmd); + uint32_t sq_cmd, uint32_t inst); bool kgd_gfx_v9_get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev, uint8_t vmid, uint16_t *p_pasid); - void kgd_gfx_v9_set_vm_context_page_table_base(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base); void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid, - int *pasid_wave_cnt, int *max_waves_per_cu); + int *pasid_wave_cnt, int *max_waves_per_cu, uint32_t inst); void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev, - uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr); + uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr, + uint32_t inst); void kgd_gfx_v9_acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id, - uint32_t queue_id); + uint32_t queue_id, uint32_t inst); uint64_t kgd_gfx_v9_get_queue_mask(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id); -void kgd_gfx_v9_release_queue(struct amdgpu_device *adev); +void kgd_gfx_v9_release_queue(struct amdgpu_device *adev, uint32_t inst); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 9afd3295ca85..33a9394f9e58 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -127,12 +127,16 @@ static inline uint64_t get_reserved_sdma_queues_bitmap(struct device_queue_manag void program_sh_mem_settings(struct device_queue_manager *dqm, struct qcm_process_device *qpd) { - return dqm->dev->kfd2kgd->program_sh_mem_settings( + int xcc = 0; + + for (xcc = 0; xcc < dqm->dev->num_xcc_per_node; xcc++) + dqm->dev->kfd2kgd->program_sh_mem_settings( dqm->dev->adev, qpd->vmid, qpd->sh_mem_config, qpd->sh_mem_ape1_base, qpd->sh_mem_ape1_limit, - qpd->sh_mem_bases); + qpd->sh_mem_bases, + dqm->dev->start_xcc_id + xcc); } static void kfd_hws_hang(struct device_queue_manager *dqm) @@ -405,10 +409,14 @@ static void deallocate_doorbell(struct qcm_process_device *qpd, static void program_trap_handler_settings(struct device_queue_manager *dqm, struct qcm_process_device *qpd) { + int xcc = 0; + if (dqm->dev->kfd2kgd->program_trap_handler_settings) - dqm->dev->kfd2kgd->program_trap_handler_settings( + for (xcc = 0; xcc < dqm->dev->num_xcc_per_node; xcc++) + dqm->dev->kfd2kgd->program_trap_handler_settings( dqm->dev->adev, qpd->vmid, - qpd->tba_addr, qpd->tma_addr); + qpd->tba_addr, qpd->tma_addr, + dqm->dev->start_xcc_id + xcc); } static int allocate_vmid(struct device_queue_manager *dqm, @@ -671,6 +679,7 @@ static int dbgdev_wave_reset_wavefronts(struct kfd_node *dev, struct kfd_process struct kfd_process_device *pdd; int first_vmid_to_scan = dev->vm_info.first_vmid_kfd; int last_vmid_to_scan = dev->vm_info.last_vmid_kfd; + int xcc = 0; reg_sq_cmd.u32All = 0; reg_gfx_index.u32All = 0; @@ -715,9 +724,11 @@ static int dbgdev_wave_reset_wavefronts(struct kfd_node *dev, struct kfd_process reg_sq_cmd.bits.cmd = SQ_IND_CMD_CMD_KILL; reg_sq_cmd.bits.vm_id = vmid; - dev->kfd2kgd->wave_control_execute(dev->adev, + for (xcc = 0; xcc < dev->num_xcc_per_node; xcc++) + dev->kfd2kgd->wave_control_execute(dev->adev, reg_gfx_index.u32All, - reg_sq_cmd.u32All); + reg_sq_cmd.u32All, + dev->start_xcc_id + xcc); return 0; } @@ -1229,17 +1240,32 @@ static int set_pasid_vmid_mapping(struct device_queue_manager *dqm, u32 pasid, unsigned int vmid) { - return dqm->dev->kfd2kgd->set_pasid_vmid_mapping( - dqm->dev->adev, pasid, vmid); + int xcc = 0, ret; + + for (xcc = 0; xcc < dqm->dev->num_xcc_per_node; xcc++) { + ret = dqm->dev->kfd2kgd->set_pasid_vmid_mapping( + dqm->dev->adev, pasid, vmid, + dqm->dev->start_xcc_id + xcc); + if (ret) + break; + } + + return ret; } static void init_interrupts(struct device_queue_manager *dqm) { - unsigned int i; + unsigned int i, xcc; - for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++) - if (is_pipe_enabled(dqm, 0, i)) - dqm->dev->kfd2kgd->init_interrupts(dqm->dev->adev, i); + for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++) { + if (is_pipe_enabled(dqm, 0, i)) { + for (xcc = 0; xcc < dqm->dev->num_xcc_per_node; xcc++) + dqm->dev->kfd2kgd->init_interrupts( + dqm->dev->adev, i, + dqm->dev->start_xcc_id + + xcc); + } + } } static void init_sdma_bitmaps(struct device_queue_manager *dqm) @@ -2455,44 +2481,49 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data) struct device_queue_manager *dqm = data; uint32_t (*dump)[2], n_regs; int pipe, queue; - int r = 0; + int r = 0, xcc; + uint32_t inst; if (!dqm->sched_running) { seq_puts(m, " Device is stopped\n"); return 0; } - r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->adev, + for (xcc = 0; xcc < dqm->dev->num_xcc_per_node; xcc++) { + inst = dqm->dev->start_xcc_id + xcc; + r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->adev, KFD_CIK_HIQ_PIPE, KFD_CIK_HIQ_QUEUE, - &dump, &n_regs); - if (!r) { - seq_printf(m, " HIQ on MEC %d Pipe %d Queue %d\n", - KFD_CIK_HIQ_PIPE/get_pipes_per_mec(dqm)+1, - KFD_CIK_HIQ_PIPE%get_pipes_per_mec(dqm), - KFD_CIK_HIQ_QUEUE); - seq_reg_dump(m, dump, n_regs); + &dump, &n_regs, inst); + if (!r) { + seq_printf(m, + " Inst %d, HIQ on MEC %d Pipe %d Queue %d\n", + inst, KFD_CIK_HIQ_PIPE/get_pipes_per_mec(dqm)+1, + KFD_CIK_HIQ_PIPE%get_pipes_per_mec(dqm), + KFD_CIK_HIQ_QUEUE); + seq_reg_dump(m, dump, n_regs); - kfree(dump); - } + kfree(dump); + } - for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) { - int pipe_offset = pipe * get_queues_per_pipe(dqm); + for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) { + int pipe_offset = pipe * get_queues_per_pipe(dqm); - for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) { - if (!test_bit(pipe_offset + queue, + for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) { + if (!test_bit(pipe_offset + queue, dqm->dev->kfd->shared_resources.cp_queue_bitmap)) - continue; + continue; - r = dqm->dev->kfd2kgd->hqd_dump( - dqm->dev->adev, pipe, queue, &dump, &n_regs); - if (r) - break; + r = dqm->dev->kfd2kgd->hqd_dump( + dqm->dev->adev, pipe, queue, &dump, &n_regs, inst); + if (r) + break; - seq_printf(m, " CP Pipe %d, Queue %d\n", - pipe, queue); - seq_reg_dump(m, dump, n_regs); + seq_printf(m, " Inst %d, CP Pipe %d, Queue %d\n", + inst, pipe, queue); + seq_reg_dump(m, dump, n_regs); - kfree(dump); + kfree(dump); + } } } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c index 074f6075ccc7..d81125421aaf 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c @@ -190,7 +190,7 @@ int kfd_hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd, struct queue_properties *p, struct mm_struct *mms) { return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->adev, mqd, pipe_id, - queue_id, p->doorbell_off); + queue_id, p->doorbell_off, 0); } int kfd_destroy_mqd_cp(struct mqd_manager *mm, void *mqd, @@ -198,7 +198,7 @@ int kfd_destroy_mqd_cp(struct mqd_manager *mm, void *mqd, uint32_t pipe_id, uint32_t queue_id) { return mm->dev->kfd2kgd->hqd_destroy(mm->dev->adev, mqd, type, timeout, - pipe_id, queue_id); + pipe_id, queue_id, 0); } void kfd_free_mqd_cp(struct mqd_manager *mm, void *mqd, @@ -217,7 +217,7 @@ bool kfd_is_occupied_cp(struct mqd_manager *mm, void *mqd, uint32_t queue_id) { return mm->dev->kfd2kgd->hqd_is_occupied(mm->dev->adev, queue_address, - pipe_id, queue_id); + pipe_id, queue_id, 0); } int kfd_load_mqd_sdma(struct mqd_manager *mm, void *mqd, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c index ca1966466759..eb11940bec34 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c @@ -167,7 +167,7 @@ static int load_mqd(struct mqd_manager *mm, void *mqd, uint32_t pipe_id, return mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id, (uint32_t __user *)p->write_ptr, - wptr_shift, wptr_mask, mms); + wptr_shift, wptr_mask, mms, 0); } static void __update_mqd(struct mqd_manager *mm, void *mqd, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c index c9565ea99df5..d54c6fdebbb6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c @@ -151,7 +151,7 @@ static int load_mqd(struct mqd_manager *mm, void *mqd, r = mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id, (uint32_t __user *)p->write_ptr, - wptr_shift, 0, mms); + wptr_shift, 0, mms, 0); return r; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c index dff171b54b5c..338d639c1898 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c @@ -184,7 +184,7 @@ static int load_mqd(struct mqd_manager *mm, void *mqd, r = mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id, (uint32_t __user *)p->write_ptr, - wptr_shift, 0, mms); + wptr_shift, 0, mms, 0); return r; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index b46c984b3a17..b53cd8f9620b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -218,7 +218,7 @@ static int load_mqd(struct mqd_manager *mm, void *mqd, return mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id, (uint32_t __user *)p->write_ptr, - wptr_shift, 0, mms); + wptr_shift, 0, mms, 0); } static void update_mqd(struct mqd_manager *mm, void *mqd, @@ -501,13 +501,15 @@ static int hiq_load_mqd_kiq_v9_4_3(struct mqd_manager *mm, void *mqd, { int xcc, err; void *xcc_mqd; + uint32_t start_inst = mm->dev->start_xcc_id; uint64_t hiq_mqd_size = kfd_hiq_mqd_stride(mm->dev); for (xcc = 0; xcc < mm->dev->num_xcc_per_node; xcc++) { xcc_mqd = mqd + hiq_mqd_size * xcc; err = mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->adev, xcc_mqd, pipe_id, queue_id, - p->doorbell_off); + p->doorbell_off, + start_inst+xcc); if (err) { pr_debug("Failed to load HIQ MQD for XCC: %d\n", xcc); break; @@ -523,13 +525,14 @@ static int destroy_hiq_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, { int xcc = 0, err; void *xcc_mqd; + uint32_t start_inst = mm->dev->start_xcc_id; uint64_t hiq_mqd_size = kfd_hiq_mqd_stride(mm->dev); for (xcc = 0; xcc < mm->dev->num_xcc_per_node; xcc++) { xcc_mqd = mqd + hiq_mqd_size * xcc; err = mm->dev->kfd2kgd->hqd_destroy(mm->dev->adev, xcc_mqd, type, timeout, pipe_id, - queue_id); + queue_id, start_inst+xcc); if (err) { pr_debug("Destroy MQD failed for xcc: %d\n", xcc); break; @@ -641,6 +644,7 @@ static int destroy_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, void *xcc_mqd; struct v9_mqd *m; uint64_t mqd_offset; + uint32_t start_inst = mm->dev->start_xcc_id; m = get_mqd(mqd); mqd_offset = m->cp_mqd_stride_size; @@ -649,7 +653,7 @@ static int destroy_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, xcc_mqd = mqd + mqd_offset * xcc; err = mm->dev->kfd2kgd->hqd_destroy(mm->dev->adev, xcc_mqd, type, timeout, pipe_id, - queue_id); + queue_id, start_inst+xcc); if (err) { pr_debug("Destroy MQD failed for xcc: %d\n", xcc); break; @@ -667,6 +671,7 @@ static int load_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0); int xcc = 0, err; void *xcc_mqd; + uint32_t start_inst = mm->dev->start_xcc_id; uint64_t mqd_stride_size = mm->mqd_stride(mm, p); for (xcc = 0; xcc < mm->dev->num_xcc_per_node; xcc++) { @@ -674,7 +679,7 @@ static int load_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, err = mm->dev->kfd2kgd->hqd_load(mm->dev->adev, xcc_mqd, pipe_id, queue_id, (uint32_t __user *)p->write_ptr, - wptr_shift, 0, mms); + wptr_shift, 0, mms, start_inst+xcc); if (err) { pr_debug("Load MQD failed for xcc: %d\n", xcc); break; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c index 8736a3cdbe1e..ebf963f42b51 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c @@ -165,7 +165,7 @@ static int load_mqd(struct mqd_manager *mm, void *mqd, return mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id, (uint32_t __user *)p->write_ptr, - wptr_shift, wptr_mask, mms); + wptr_shift, wptr_mask, mms, 0); } static void __update_mqd(struct mqd_manager *mm, void *mqd, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 66e021889c64..888590dfa646 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -290,7 +290,7 @@ static int kfd_get_cu_occupancy(struct attribute *attr, char *buffer) wave_cnt = 0; max_waves_per_cu = 0; dev->kfd2kgd->get_cu_occupancy(dev->adev, proc->pasid, &wave_cnt, - &max_waves_per_cu); + &max_waves_per_cu, 0); /* Translate wave count to number of compute units */ cu_cnt = (wave_cnt + (max_waves_per_cu - 1)) / max_waves_per_cu; diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h index 5cb3e8634739..8cb3dbcae3e4 100644 --- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h +++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h @@ -230,28 +230,30 @@ struct kfd2kgd_calls { /* Register access functions */ void (*program_sh_mem_settings)(struct amdgpu_device *adev, uint32_t vmid, uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, - uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases); + uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases, + uint32_t inst); int (*set_pasid_vmid_mapping)(struct amdgpu_device *adev, u32 pasid, - unsigned int vmid); + unsigned int vmid, uint32_t inst); - int (*init_interrupts)(struct amdgpu_device *adev, uint32_t pipe_id); + int (*init_interrupts)(struct amdgpu_device *adev, uint32_t pipe_id, + uint32_t inst); int (*hqd_load)(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, uint32_t __user *wptr, uint32_t wptr_shift, uint32_t wptr_mask, - struct mm_struct *mm); + struct mm_struct *mm, uint32_t inst); int (*hiq_mqd_load)(struct amdgpu_device *adev, void *mqd, uint32_t pipe_id, uint32_t queue_id, - uint32_t doorbell_off); + uint32_t doorbell_off, uint32_t inst); int (*hqd_sdma_load)(struct amdgpu_device *adev, void *mqd, uint32_t __user *wptr, struct mm_struct *mm); int (*hqd_dump)(struct amdgpu_device *adev, uint32_t pipe_id, uint32_t queue_id, - uint32_t (**dump)[2], uint32_t *n_regs); + uint32_t (**dump)[2], uint32_t *n_regs, uint32_t inst); int (*hqd_sdma_dump)(struct amdgpu_device *adev, uint32_t engine_id, uint32_t queue_id, @@ -259,12 +261,12 @@ struct kfd2kgd_calls { bool (*hqd_is_occupied)(struct amdgpu_device *adev, uint64_t queue_address, uint32_t pipe_id, - uint32_t queue_id); + uint32_t queue_id, uint32_t inst); int (*hqd_destroy)(struct amdgpu_device *adev, void *mqd, enum kfd_preempt_type reset_type, unsigned int timeout, uint32_t pipe_id, - uint32_t queue_id); + uint32_t queue_id, uint32_t inst); bool (*hqd_sdma_is_occupied)(struct amdgpu_device *adev, void *mqd); @@ -273,7 +275,7 @@ struct kfd2kgd_calls { int (*wave_control_execute)(struct amdgpu_device *adev, uint32_t gfx_index_val, - uint32_t sq_cmd); + uint32_t sq_cmd, uint32_t inst); bool (*get_atc_vmid_pasid_mapping_info)(struct amdgpu_device *adev, uint8_t vmid, uint16_t *p_pasid); @@ -290,9 +292,10 @@ struct kfd2kgd_calls { uint32_t (*read_vmid_from_vmfault_reg)(struct amdgpu_device *adev); void (*get_cu_occupancy)(struct amdgpu_device *adev, int pasid, - int *wave_cnt, int *max_waves_per_cu); + int *wave_cnt, int *max_waves_per_cu, uint32_t inst); void (*program_trap_handler_settings)(struct amdgpu_device *adev, - uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr); + uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr, + uint32_t inst); }; #endif /* KGD_KFD_INTERFACE_H_INCLUDED */ -- cgit From 7fe51e6fd2368b358441a1f6e0c94f4cd7e0720f Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Mon, 9 May 2022 22:25:25 -0400 Subject: drm/amdkfd: Update context save handling for multi XCC setup (v2) Context save handling needs to be updated for a multi XCC setup: - On a multi XCC setup, KFD needs to report context save base address and size for each XCC in MQD. - Thunk will allocate a large context save area covering all XCCs which will be equal to: num_of_xccs in a partition * size of context save area for 1 XCC. However, it will report only the size of context save area for 1 XCC only in the ioctl call. - Driver then setups the MQD correctly using the size passed from Thunk and information about number of XCCs in a partition. - Update get_wave_state function to return context save area for all XCCs in the partition. v2: update the get_wave_state function for mqd manager v11 (Morris) Signed-off-by: Mukul Joshi Tested-by: Amber Lin Reviewed-by: Felix Kuehling Signed-off-by: Morris Zhang Signed-off-by: Alex Deucher --- .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 4 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h | 1 + drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c | 1 + drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c | 1 + drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 62 +++++++++++++++++++++- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c | 1 + 6 files changed, 67 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 33a9394f9e58..01ea038cab6d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -2095,8 +2095,8 @@ static int get_wave_state(struct device_queue_manager *dqm, * and the queue should be protected against destruction by the process * lock. */ - return mqd_mgr->get_wave_state(mqd_mgr, q->mqd, ctl_stack, - ctl_stack_used_size, save_area_used_size); + return mqd_mgr->get_wave_state(mqd_mgr, q->mqd, &q->properties, + ctl_stack, ctl_stack_used_size, save_area_used_size); } static void get_queue_checkpoint_info(struct device_queue_manager *dqm, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h index eb18be74f559..23158db7da03 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h @@ -97,6 +97,7 @@ struct mqd_manager { uint32_t queue_id); int (*get_wave_state)(struct mqd_manager *mm, void *mqd, + struct queue_properties *q, void __user *ctl_stack, u32 *ctl_stack_used_size, u32 *save_area_used_size); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c index d54c6fdebbb6..772c09b5821b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c @@ -227,6 +227,7 @@ static uint32_t read_doorbell_id(void *mqd) } static int get_wave_state(struct mqd_manager *mm, void *mqd, + struct queue_properties *q, void __user *ctl_stack, u32 *ctl_stack_used_size, u32 *save_area_used_size) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c index 338d639c1898..632344b95d90 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c @@ -260,6 +260,7 @@ static uint32_t read_doorbell_id(void *mqd) } static int get_wave_state(struct mqd_manager *mm, void *mqd, + struct queue_properties *q, void __user *ctl_stack, u32 *ctl_stack_used_size, u32 *save_area_used_size) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index b53cd8f9620b..5c9b3392758e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -295,6 +295,7 @@ static uint32_t read_doorbell_id(void *mqd) } static int get_wave_state(struct mqd_manager *mm, void *mqd, + struct queue_properties *q, void __user *ctl_stack, u32 *ctl_stack_used_size, u32 *save_area_used_size) @@ -561,6 +562,7 @@ static void init_mqd_v9_4_3(struct mqd_manager *mm, void **mqd, int xcc = 0; struct kfd_mem_obj xcc_mqd_mem_obj; uint64_t xcc_gart_addr = 0; + uint64_t xcc_ctx_save_restore_area_address; uint64_t offset = mm->mqd_stride(mm, q); memset(&xcc_mqd_mem_obj, 0x0, sizeof(struct kfd_mem_obj)); @@ -570,6 +572,23 @@ static void init_mqd_v9_4_3(struct mqd_manager *mm, void **mqd, init_mqd(mm, (void **)&m, &xcc_mqd_mem_obj, &xcc_gart_addr, q); m->cp_mqd_stride_size = offset; + + /* + * Update the CWSR address for each XCC if CWSR is enabled + * and CWSR area is allocated in thunk + */ + if (mm->dev->kfd->cwsr_enabled && + q->ctx_save_restore_area_address) { + xcc_ctx_save_restore_area_address = + q->ctx_save_restore_area_address + + (xcc * q->ctx_save_restore_area_size); + + m->cp_hqd_ctx_save_base_addr_lo = + lower_32_bits(xcc_ctx_save_restore_area_address); + m->cp_hqd_ctx_save_base_addr_hi = + upper_32_bits(xcc_ctx_save_restore_area_address); + } + if (q->format == KFD_QUEUE_FORMAT_AQL) { m->compute_tg_chunk_size = 1; @@ -689,6 +708,46 @@ static int load_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, return err; } +static int get_wave_state_v9_4_3(struct mqd_manager *mm, void *mqd, + struct queue_properties *q, + void __user *ctl_stack, + u32 *ctl_stack_used_size, + u32 *save_area_used_size) +{ + int xcc, err = 0; + void *xcc_mqd; + void __user *xcc_ctl_stack; + uint64_t mqd_stride_size = mm->mqd_stride(mm, q); + u32 tmp_ctl_stack_used_size = 0, tmp_save_area_used_size = 0; + + for (xcc = 0; xcc < mm->dev->num_xcc_per_node; xcc++) { + xcc_mqd = mqd + mqd_stride_size * xcc; + xcc_ctl_stack = (void __user *)((uintptr_t)ctl_stack + + q->ctx_save_restore_area_size * xcc); + + err = get_wave_state(mm, xcc_mqd, q, xcc_ctl_stack, + &tmp_ctl_stack_used_size, + &tmp_save_area_used_size); + if (err) + break; + + /* + * Set the ctl_stack_used_size and save_area_used_size to + * ctl_stack_used_size and save_area_used_size of XCC 0 when + * passing the info the user-space. + * For multi XCC, user-space would have to look at the header + * info of each Control stack area to determine the control + * stack size and save area used. + */ + if (xcc == 0) { + *ctl_stack_used_size = tmp_ctl_stack_used_size; + *save_area_used_size = tmp_save_area_used_size; + } + } + + return err; +} + #if defined(CONFIG_DEBUG_FS) static int debugfs_show_mqd(struct seq_file *m, void *data) @@ -726,7 +785,6 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, mqd->allocate_mqd = allocate_mqd; mqd->free_mqd = kfd_free_mqd_cp; mqd->is_occupied = kfd_is_occupied_cp; - mqd->get_wave_state = get_wave_state; mqd->get_checkpoint_info = get_checkpoint_info; mqd->checkpoint_mqd = checkpoint_mqd; mqd->restore_mqd = restore_mqd; @@ -740,11 +798,13 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type, mqd->load_mqd = load_mqd_v9_4_3; mqd->update_mqd = update_mqd_v9_4_3; mqd->destroy_mqd = destroy_mqd_v9_4_3; + mqd->get_wave_state = get_wave_state_v9_4_3; } else { mqd->init_mqd = init_mqd; mqd->load_mqd = load_mqd; mqd->update_mqd = update_mqd; mqd->destroy_mqd = kfd_destroy_mqd_cp; + mqd->get_wave_state = get_wave_state; } break; case KFD_MQD_TYPE_HIQ: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c index ebf963f42b51..fe69492b1bb3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c @@ -261,6 +261,7 @@ static void update_mqd_tonga(struct mqd_manager *mm, void *mqd, } static int get_wave_state(struct mqd_manager *mm, void *mqd, + struct queue_properties *q, void __user *ctl_stack, u32 *ctl_stack_used_size, u32 *save_area_used_size) -- cgit From 5e4060123687c4f2c9fb855874f77b14f07526d6 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Mon, 9 May 2022 22:34:38 -0400 Subject: drm/amdkfd: Call DQM stop during DQM uninitialize During DQM tear down, call DQM stop to unitialize HIQ and associated memory allocated during packet manager init. Signed-off-by: Mukul Joshi Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 8 ++++++++ 1 file changed, 8 insertions(+) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 01ea038cab6d..f78c1e7aad57 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -1348,9 +1348,16 @@ static int start_nocpsch(struct device_queue_manager *dqm) static int stop_nocpsch(struct device_queue_manager *dqm) { + dqm_lock(dqm); + if (!dqm->sched_running) { + dqm_unlock(dqm); + return 0; + } + if (dqm->dev->adev->asic_type == CHIP_HAWAII) pm_uninit(&dqm->packet_mgr, false); dqm->sched_running = false; + dqm_unlock(dqm); return 0; } @@ -2423,6 +2430,7 @@ static void deallocate_hiq_sdma_mqd(struct kfd_node *dev, void device_queue_manager_uninit(struct device_queue_manager *dqm) { + dqm->ops.stop(dqm); dqm->ops.uninitialize(dqm); if (!dqm->dev->kfd->shared_resources.enable_mes) deallocate_hiq_sdma_mqd(dqm->dev, &dqm->hiq_sdma_mqd); -- cgit From a805889a15315f7fa78c1c4bb2f1875c7c43f919 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Mon, 9 May 2022 22:52:39 -0400 Subject: drm/amdkfd: Update SDMA queue management for GFX9.4.3 This patch updates SDMA queue management for multi XCC in GFX9.4.3. - Allocate/deallocate SDMA queues from the correct SDMA engines based on the partition mode. - Updates the kgd2kfd interface to fetch the correct SDMA register addresses. - It also fixes dumping correct SDMA queue info in debugfs. v2: squash in fix "drm/amdkfd: Fix XGMI SDMA user-mode queue allocation" Signed-off-by: Mukul Joshi Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c | 194 ++++++++++++++++++++- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 8 +- .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 59 +++---- .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.h | 4 +- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 3 + 5 files changed, 227 insertions(+), 41 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c index 49d8087e469e..e81bdca53f42 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gc_9_4_3.c @@ -31,6 +31,192 @@ #include "oss/osssys_4_0_sh_mask.h" #include "v9_structs.h" #include "soc15.h" +#include "sdma/sdma_4_4_2_offset.h" +#include "sdma/sdma_4_4_2_sh_mask.h" + +static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd) +{ + return (struct v9_sdma_mqd *)mqd; +} + +static uint32_t get_sdma_rlc_reg_offset(struct amdgpu_device *adev, + unsigned int engine_id, + unsigned int queue_id) +{ + uint32_t sdma_engine_reg_base = SOC15_REG_OFFSET(SDMA0, engine_id, + regSDMA_RLC0_RB_CNTL) - + regSDMA_RLC0_RB_CNTL; + uint32_t retval = sdma_engine_reg_base + + queue_id * (regSDMA_RLC1_RB_CNTL - regSDMA_RLC0_RB_CNTL); + + pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n", engine_id, + queue_id, retval); + return retval; +} + +int kgd_gfx_v9_4_3_hqd_sdma_load(struct amdgpu_device *adev, void *mqd, + uint32_t __user *wptr, struct mm_struct *mm) +{ + struct v9_sdma_mqd *m; + uint32_t sdma_rlc_reg_offset; + unsigned long end_jiffies; + uint32_t data; + uint64_t data64; + uint64_t __user *wptr64 = (uint64_t __user *)wptr; + + m = get_sdma_mqd(mqd); + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, + m->sdma_queue_id); + + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_CNTL, + m->sdmax_rlcx_rb_cntl & (~SDMA_RLC0_RB_CNTL__RB_ENABLE_MASK)); + + end_jiffies = msecs_to_jiffies(2000) + jiffies; + while (true) { + data = RREG32(sdma_rlc_reg_offset + regSDMA_RLC0_CONTEXT_STATUS); + if (data & SDMA_RLC0_CONTEXT_STATUS__IDLE_MASK) + break; + if (time_after(jiffies, end_jiffies)) { + pr_err("SDMA RLC not idle in %s\n", __func__); + return -ETIME; + } + usleep_range(500, 1000); + } + + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_DOORBELL_OFFSET, + m->sdmax_rlcx_doorbell_offset); + + data = REG_SET_FIELD(m->sdmax_rlcx_doorbell, SDMA_RLC0_DOORBELL, + ENABLE, 1); + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_DOORBELL, data); + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_RPTR, + m->sdmax_rlcx_rb_rptr); + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_RPTR_HI, + m->sdmax_rlcx_rb_rptr_hi); + + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_MINOR_PTR_UPDATE, 1); + if (read_user_wptr(mm, wptr64, data64)) { + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_WPTR, + lower_32_bits(data64)); + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_WPTR_HI, + upper_32_bits(data64)); + } else { + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_WPTR, + m->sdmax_rlcx_rb_rptr); + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_WPTR_HI, + m->sdmax_rlcx_rb_rptr_hi); + } + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_MINOR_PTR_UPDATE, 0); + + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_BASE, m->sdmax_rlcx_rb_base); + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_BASE_HI, + m->sdmax_rlcx_rb_base_hi); + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_RPTR_ADDR_LO, + m->sdmax_rlcx_rb_rptr_addr_lo); + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_RPTR_ADDR_HI, + m->sdmax_rlcx_rb_rptr_addr_hi); + + data = REG_SET_FIELD(m->sdmax_rlcx_rb_cntl, SDMA_RLC0_RB_CNTL, + RB_ENABLE, 1); + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_CNTL, data); + + return 0; +} + +int kgd_gfx_v9_4_3_hqd_sdma_dump(struct amdgpu_device *adev, + uint32_t engine_id, uint32_t queue_id, + uint32_t (**dump)[2], uint32_t *n_regs) +{ + uint32_t sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, + engine_id, queue_id); + uint32_t i = 0, reg; +#undef HQD_N_REGS +#define HQD_N_REGS (19+6+7+12) +#define DUMP_REG(addr) do { \ + if (WARN_ON_ONCE(i >= HQD_N_REGS)) \ + break; \ + (*dump)[i][0] = (addr) << 2; \ + (*dump)[i++][1] = RREG32(addr); \ + } while (0) + + *dump = kmalloc_array(HQD_N_REGS * 2, sizeof(uint32_t), GFP_KERNEL); + if (*dump == NULL) + return -ENOMEM; + + for (reg = regSDMA_RLC0_RB_CNTL; reg <= regSDMA_RLC0_DOORBELL; reg++) + DUMP_REG(sdma_rlc_reg_offset + reg); + for (reg = regSDMA_RLC0_STATUS; reg <= regSDMA_RLC0_CSA_ADDR_HI; reg++) + DUMP_REG(sdma_rlc_reg_offset + reg); + for (reg = regSDMA_RLC0_IB_SUB_REMAIN; + reg <= regSDMA_RLC0_MINOR_PTR_UPDATE; reg++) + DUMP_REG(sdma_rlc_reg_offset + reg); + for (reg = regSDMA_RLC0_MIDCMD_DATA0; + reg <= regSDMA_RLC0_MIDCMD_CNTL; reg++) + DUMP_REG(sdma_rlc_reg_offset + reg); + + WARN_ON_ONCE(i != HQD_N_REGS); + *n_regs = i; + + return 0; +} + +bool kgd_gfx_v9_4_3_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd) +{ + struct v9_sdma_mqd *m; + uint32_t sdma_rlc_reg_offset; + uint32_t sdma_rlc_rb_cntl; + + m = get_sdma_mqd(mqd); + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, + m->sdma_queue_id); + + sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_CNTL); + + if (sdma_rlc_rb_cntl & SDMA_RLC0_RB_CNTL__RB_ENABLE_MASK) + return true; + + return false; +} + +int kgd_gfx_v9_4_3_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd, + unsigned int utimeout) +{ + struct v9_sdma_mqd *m; + uint32_t sdma_rlc_reg_offset; + uint32_t temp; + unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies; + + m = get_sdma_mqd(mqd); + sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(adev, m->sdma_engine_id, + m->sdma_queue_id); + + temp = RREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_CNTL); + temp = temp & ~SDMA_RLC0_RB_CNTL__RB_ENABLE_MASK; + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_CNTL, temp); + + while (true) { + temp = RREG32(sdma_rlc_reg_offset + regSDMA_RLC0_CONTEXT_STATUS); + if (temp & SDMA_RLC0_CONTEXT_STATUS__IDLE_MASK) + break; + if (time_after(jiffies, end_jiffies)) { + pr_err("SDMA RLC not idle in %s\n", __func__); + return -ETIME; + } + usleep_range(500, 1000); + } + + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_DOORBELL, 0); + WREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_CNTL, + RREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_CNTL) | + SDMA_RLC0_RB_CNTL__RB_ENABLE_MASK); + + m->sdmax_rlcx_rb_rptr = + RREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_RPTR); + m->sdmax_rlcx_rb_rptr_hi = + RREG32(sdma_rlc_reg_offset + regSDMA_RLC0_RB_RPTR_HI); + + return 0; +} static int kgd_gfx_v9_4_3_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid, unsigned int vmid, uint32_t inst) @@ -166,13 +352,13 @@ const struct kfd2kgd_calls gc_9_4_3_kfd2kgd = { .init_interrupts = kgd_gfx_v9_init_interrupts, .hqd_load = kgd_gfx_v9_4_3_hqd_load, .hiq_mqd_load = kgd_gfx_v9_hiq_mqd_load, - .hqd_sdma_load = kgd_arcturus_hqd_sdma_load, + .hqd_sdma_load = kgd_gfx_v9_4_3_hqd_sdma_load, .hqd_dump = kgd_gfx_v9_hqd_dump, - .hqd_sdma_dump = kgd_arcturus_hqd_sdma_dump, + .hqd_sdma_dump = kgd_gfx_v9_4_3_hqd_sdma_dump, .hqd_is_occupied = kgd_gfx_v9_hqd_is_occupied, - .hqd_sdma_is_occupied = kgd_arcturus_hqd_sdma_is_occupied, + .hqd_sdma_is_occupied = kgd_gfx_v9_4_3_hqd_sdma_is_occupied, .hqd_destroy = kgd_gfx_v9_hqd_destroy, - .hqd_sdma_destroy = kgd_arcturus_hqd_sdma_destroy, + .hqd_sdma_destroy = kgd_gfx_v9_4_3_hqd_sdma_destroy, .wave_control_execute = kgd_gfx_v9_wave_control_execute, .get_atc_vmid_pasid_mapping_info = kgd_gfx_v9_get_atc_vmid_pasid_mapping_info, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 37c6dc5c37bf..ec5f85ff34e5 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -741,6 +741,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, if (!node) goto node_alloc_error; + node->node_id = i; node->adev = kfd->adev; node->kfd = kfd; node->kfd2kgd = kfd->kfd2kgd; @@ -1323,15 +1324,16 @@ unsigned int kfd_get_num_sdma_engines(struct kfd_node *node) { /* If XGMI is not supported, all SDMA engines are PCIe */ if (!node->adev->gmc.xgmi.supported) - return node->adev->sdma.num_instances; + return node->adev->sdma.num_instances/(int)node->kfd->num_nodes; - return min(node->adev->sdma.num_instances, 2); + return min(node->adev->sdma.num_instances/(int)node->kfd->num_nodes, 2); } unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_node *node) { /* After reserved for PCIe, the rest of engines are XGMI */ - return node->adev->sdma.num_instances - kfd_get_num_sdma_engines(node); + return node->adev->sdma.num_instances/(int)node->kfd->num_nodes - + kfd_get_num_sdma_engines(node); } #if defined(CONFIG_DEBUG_FS) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index f78c1e7aad57..69419a53a14e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -124,6 +124,15 @@ static inline uint64_t get_reserved_sdma_queues_bitmap(struct device_queue_manag return dqm->dev->kfd->device_info.reserved_sdma_queues_bitmap; } +static void init_sdma_bitmaps(struct device_queue_manager *dqm) +{ + bitmap_zero(dqm->sdma_bitmap, KFD_MAX_SDMA_QUEUES); + bitmap_set(dqm->sdma_bitmap, 0, get_num_sdma_queues(dqm)); + + bitmap_zero(dqm->xgmi_sdma_bitmap, KFD_MAX_SDMA_QUEUES); + bitmap_set(dqm->xgmi_sdma_bitmap, 0, get_num_xgmi_sdma_queues(dqm)); +} + void program_sh_mem_settings(struct device_queue_manager *dqm, struct qcm_process_device *qpd) { @@ -1268,24 +1277,6 @@ static void init_interrupts(struct device_queue_manager *dqm) } } -static void init_sdma_bitmaps(struct device_queue_manager *dqm) -{ - unsigned int num_sdma_queues = - min_t(unsigned int, sizeof(dqm->sdma_bitmap)*8, - get_num_sdma_queues(dqm)); - unsigned int num_xgmi_sdma_queues = - min_t(unsigned int, sizeof(dqm->xgmi_sdma_bitmap)*8, - get_num_xgmi_sdma_queues(dqm)); - - if (num_sdma_queues) - dqm->sdma_bitmap = GENMASK_ULL(num_sdma_queues-1, 0); - if (num_xgmi_sdma_queues) - dqm->xgmi_sdma_bitmap = GENMASK_ULL(num_xgmi_sdma_queues-1, 0); - - dqm->sdma_bitmap &= ~get_reserved_sdma_queues_bitmap(dqm); - pr_info("sdma_bitmap: %llx\n", dqm->sdma_bitmap); -} - static int initialize_nocpsch(struct device_queue_manager *dqm) { int pipe, queue; @@ -1375,46 +1366,49 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm, int bit; if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { - if (dqm->sdma_bitmap == 0) { + if (bitmap_empty(dqm->sdma_bitmap, KFD_MAX_SDMA_QUEUES)) { pr_err("No more SDMA queue to allocate\n"); return -ENOMEM; } if (restore_sdma_id) { /* Re-use existing sdma_id */ - if (!(dqm->sdma_bitmap & (1ULL << *restore_sdma_id))) { + if (!test_bit(*restore_sdma_id, dqm->sdma_bitmap)) { pr_err("SDMA queue already in use\n"); return -EBUSY; } - dqm->sdma_bitmap &= ~(1ULL << *restore_sdma_id); + clear_bit(*restore_sdma_id, dqm->sdma_bitmap); q->sdma_id = *restore_sdma_id; } else { /* Find first available sdma_id */ - bit = __ffs64(dqm->sdma_bitmap); - dqm->sdma_bitmap &= ~(1ULL << bit); + bit = find_first_bit(dqm->sdma_bitmap, + get_num_sdma_queues(dqm)); + clear_bit(bit, dqm->sdma_bitmap); q->sdma_id = bit; } - q->properties.sdma_engine_id = q->sdma_id % - kfd_get_num_sdma_engines(dqm->dev); + q->properties.sdma_engine_id = + dqm->dev->node_id * get_num_all_sdma_engines(dqm) + + q->sdma_id % kfd_get_num_sdma_engines(dqm->dev); q->properties.sdma_queue_id = q->sdma_id / kfd_get_num_sdma_engines(dqm->dev); } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { - if (dqm->xgmi_sdma_bitmap == 0) { + if (bitmap_empty(dqm->xgmi_sdma_bitmap, KFD_MAX_SDMA_QUEUES)) { pr_err("No more XGMI SDMA queue to allocate\n"); return -ENOMEM; } if (restore_sdma_id) { /* Re-use existing sdma_id */ - if (!(dqm->xgmi_sdma_bitmap & (1ULL << *restore_sdma_id))) { + if (!test_bit(*restore_sdma_id, dqm->xgmi_sdma_bitmap)) { pr_err("SDMA queue already in use\n"); return -EBUSY; } - dqm->xgmi_sdma_bitmap &= ~(1ULL << *restore_sdma_id); + clear_bit(*restore_sdma_id, dqm->xgmi_sdma_bitmap); q->sdma_id = *restore_sdma_id; } else { - bit = __ffs64(dqm->xgmi_sdma_bitmap); - dqm->xgmi_sdma_bitmap &= ~(1ULL << bit); + bit = find_first_bit(dqm->xgmi_sdma_bitmap, + get_num_xgmi_sdma_queues(dqm)); + clear_bit(bit, dqm->xgmi_sdma_bitmap); q->sdma_id = bit; } /* sdma_engine_id is sdma id including @@ -1424,6 +1418,7 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm, * PCIe-optimized ones */ q->properties.sdma_engine_id = + dqm->dev->node_id * get_num_all_sdma_engines(dqm) + kfd_get_num_sdma_engines(dqm->dev) + q->sdma_id % kfd_get_num_xgmi_sdma_engines(dqm->dev); q->properties.sdma_queue_id = q->sdma_id / @@ -1442,11 +1437,11 @@ static void deallocate_sdma_queue(struct device_queue_manager *dqm, if (q->properties.type == KFD_QUEUE_TYPE_SDMA) { if (q->sdma_id >= get_num_sdma_queues(dqm)) return; - dqm->sdma_bitmap |= (1ULL << q->sdma_id); + set_bit(q->sdma_id, dqm->sdma_bitmap); } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) { if (q->sdma_id >= get_num_xgmi_sdma_queues(dqm)) return; - dqm->xgmi_sdma_bitmap |= (1ULL << q->sdma_id); + set_bit(q->sdma_id, dqm->xgmi_sdma_bitmap); } } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index e554a48f3054..b11c474d4067 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -239,8 +239,8 @@ struct device_queue_manager { unsigned int total_queue_count; unsigned int next_pipe_to_allocate; unsigned int *allocated_queues; - uint64_t sdma_bitmap; - uint64_t xgmi_sdma_bitmap; + DECLARE_BITMAP(sdma_bitmap, KFD_MAX_SDMA_QUEUES); + DECLARE_BITMAP(xgmi_sdma_bitmap, KFD_MAX_SDMA_QUEUES); /* the pasid mapping for each kfd vmid */ uint16_t vmid_pasid[VMID_NUM]; uint64_t pipelines_addr; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 1337fcdf8958..5cfebcc8b305 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -113,6 +113,8 @@ #define KFD_UNMAP_LATENCY_MS (4000) +#define KFD_MAX_SDMA_QUEUES 128 + /* * 512 = 0x200 * The doorbell index distance between SDMA RLC (2*i) and (2*i+1) in the @@ -260,6 +262,7 @@ struct kfd_vmid_info { struct kfd_dev; struct kfd_node { + unsigned int node_id; struct amdgpu_device *adev; /* Duplicated here along with keeping * a copy in kfd_dev to save a hop */ -- cgit From 643e40d4c06f8c887af1789c7bf8d279e9c8e4cf Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Tue, 31 May 2022 16:25:16 -0400 Subject: drm/amdkfd: Fix SDMA in CPX mode When creating a user-mode SDMA queue, CP FW expects driver to use/set virtual SDMA engine id in MAP_QUEUES packet instead of using the physical SDMA engine id. Each partition node's virtual SDMA number should start from 0. However, when allocating doorbell for the queue, KFD needs to allocate the doorbell from doorbell space corresponding to the physical SDMA engine id, otherwise the hwardware will not see the doorbell press. Signed-off-by: Mukul Joshi Reviewed-by: Amber Lin Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 69419a53a14e..2b5c4b2dd242 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -363,7 +363,16 @@ static int allocate_doorbell(struct qcm_process_device *qpd, */ uint32_t *idx_offset = dev->kfd->shared_resources.sdma_doorbell_idx; - uint32_t valid_id = idx_offset[q->properties.sdma_engine_id] + + /* + * q->properties.sdma_engine_id corresponds to the virtual + * sdma engine number. However, for doorbell allocation, + * we need the physical sdma engine id in order to get the + * correct doorbell offset. + */ + uint32_t valid_id = idx_offset[qpd->dqm->dev->node_id * + get_num_all_sdma_engines(qpd->dqm) + + q->properties.sdma_engine_id] + (q->properties.sdma_queue_id & 1) * KFD_QUEUE_DOORBELL_MIRROR_OFFSET + (q->properties.sdma_queue_id >> 1); @@ -1388,7 +1397,6 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm, } q->properties.sdma_engine_id = - dqm->dev->node_id * get_num_all_sdma_engines(dqm) + q->sdma_id % kfd_get_num_sdma_engines(dqm->dev); q->properties.sdma_queue_id = q->sdma_id / kfd_get_num_sdma_engines(dqm->dev); @@ -1418,7 +1426,6 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm, * PCIe-optimized ones */ q->properties.sdma_engine_id = - dqm->dev->node_id * get_num_all_sdma_engines(dqm) + kfd_get_num_sdma_engines(dqm->dev) + q->sdma_id % kfd_get_num_xgmi_sdma_engines(dqm->dev); q->properties.sdma_queue_id = q->sdma_id / @@ -2486,6 +2493,7 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data) int pipe, queue; int r = 0, xcc; uint32_t inst; + uint32_t sdma_engine_start; if (!dqm->sched_running) { seq_puts(m, " Device is stopped\n"); @@ -2530,7 +2538,10 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data) } } - for (pipe = 0; pipe < get_num_all_sdma_engines(dqm); pipe++) { + sdma_engine_start = dqm->dev->node_id * get_num_all_sdma_engines(dqm); + for (pipe = sdma_engine_start; + pipe < (sdma_engine_start + get_num_all_sdma_engines(dqm)); + pipe++) { for (queue = 0; queue < dqm->dev->kfd->device_info.num_sdma_queues_per_engine; queue++) { -- cgit From c4050ff1a43eec08498b1ed876efc6213592dba0 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Thu, 9 Feb 2023 16:30:53 +0530 Subject: drm/amdkfd: Use xcc mask for identifying xcc Instead of start xcc id and number of xcc per node, use the xcc mask which is the mask of logical ids of xccs belonging to a parition. Signed-off-by: Lijo Lazar Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 9 +-- .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 86 +++++++++++----------- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 71 +++++++++--------- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 4 - drivers/gpu/drm/amd/amdkfd/kfd_process.c | 8 +- .../gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 8 +- 8 files changed, 95 insertions(+), 95 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 647c3313c27e..b5497d2ee984 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -745,15 +745,14 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, node->vm_info.vmid_num_kfd = vmid_num_kfd; node->xcp = amdgpu_get_next_xcp(kfd->adev->xcp_mgr, &xcp_idx); /* TODO : Check if error handling is needed */ - if (node->xcp) + if (node->xcp) { amdgpu_xcp_get_inst_details(node->xcp, AMDGPU_XCP_GFX, &node->xcc_mask); - else + ++xcp_idx; + } else { node->xcc_mask = (1U << NUM_XCC(kfd->adev->gfx.xcc_mask)) - 1; - - node->num_xcc_per_node = max(1U, kfd->adev->gfx.num_xcc_per_xcp); - node->start_xcc_id = node->num_xcc_per_node * i; + } if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) && partition_mode == AMDGPU_CPX_PARTITION_MODE && diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 2b5c4b2dd242..493b4b66f180 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -136,16 +136,14 @@ static void init_sdma_bitmaps(struct device_queue_manager *dqm) void program_sh_mem_settings(struct device_queue_manager *dqm, struct qcm_process_device *qpd) { - int xcc = 0; + uint32_t xcc_mask = dqm->dev->xcc_mask; + int xcc_id; - for (xcc = 0; xcc < dqm->dev->num_xcc_per_node; xcc++) + for_each_inst(xcc_id, xcc_mask) dqm->dev->kfd2kgd->program_sh_mem_settings( - dqm->dev->adev, qpd->vmid, - qpd->sh_mem_config, - qpd->sh_mem_ape1_base, - qpd->sh_mem_ape1_limit, - qpd->sh_mem_bases, - dqm->dev->start_xcc_id + xcc); + dqm->dev->adev, qpd->vmid, qpd->sh_mem_config, + qpd->sh_mem_ape1_base, qpd->sh_mem_ape1_limit, + qpd->sh_mem_bases, xcc_id); } static void kfd_hws_hang(struct device_queue_manager *dqm) @@ -427,14 +425,14 @@ static void deallocate_doorbell(struct qcm_process_device *qpd, static void program_trap_handler_settings(struct device_queue_manager *dqm, struct qcm_process_device *qpd) { - int xcc = 0; + uint32_t xcc_mask = dqm->dev->xcc_mask; + int xcc_id; if (dqm->dev->kfd2kgd->program_trap_handler_settings) - for (xcc = 0; xcc < dqm->dev->num_xcc_per_node; xcc++) + for_each_inst(xcc_id, xcc_mask) dqm->dev->kfd2kgd->program_trap_handler_settings( - dqm->dev->adev, qpd->vmid, - qpd->tba_addr, qpd->tma_addr, - dqm->dev->start_xcc_id + xcc); + dqm->dev->adev, qpd->vmid, qpd->tba_addr, + qpd->tma_addr, xcc_id); } static int allocate_vmid(struct device_queue_manager *dqm, @@ -697,7 +695,8 @@ static int dbgdev_wave_reset_wavefronts(struct kfd_node *dev, struct kfd_process struct kfd_process_device *pdd; int first_vmid_to_scan = dev->vm_info.first_vmid_kfd; int last_vmid_to_scan = dev->vm_info.last_vmid_kfd; - int xcc = 0; + uint32_t xcc_mask = dev->xcc_mask; + int xcc_id; reg_sq_cmd.u32All = 0; reg_gfx_index.u32All = 0; @@ -742,11 +741,10 @@ static int dbgdev_wave_reset_wavefronts(struct kfd_node *dev, struct kfd_process reg_sq_cmd.bits.cmd = SQ_IND_CMD_CMD_KILL; reg_sq_cmd.bits.vm_id = vmid; - for (xcc = 0; xcc < dev->num_xcc_per_node; xcc++) - dev->kfd2kgd->wave_control_execute(dev->adev, - reg_gfx_index.u32All, - reg_sq_cmd.u32All, - dev->start_xcc_id + xcc); + for_each_inst(xcc_id, xcc_mask) + dev->kfd2kgd->wave_control_execute( + dev->adev, reg_gfx_index.u32All, + reg_sq_cmd.u32All, xcc_id); return 0; } @@ -1258,12 +1256,12 @@ static int set_pasid_vmid_mapping(struct device_queue_manager *dqm, u32 pasid, unsigned int vmid) { - int xcc = 0, ret; + uint32_t xcc_mask = dqm->dev->xcc_mask; + int xcc_id, ret; - for (xcc = 0; xcc < dqm->dev->num_xcc_per_node; xcc++) { + for_each_inst(xcc_id, xcc_mask) { ret = dqm->dev->kfd2kgd->set_pasid_vmid_mapping( - dqm->dev->adev, pasid, vmid, - dqm->dev->start_xcc_id + xcc); + dqm->dev->adev, pasid, vmid, xcc_id); if (ret) break; } @@ -1273,15 +1271,14 @@ set_pasid_vmid_mapping(struct device_queue_manager *dqm, u32 pasid, static void init_interrupts(struct device_queue_manager *dqm) { - unsigned int i, xcc; + uint32_t xcc_mask = dqm->dev->xcc_mask; + unsigned int i, xcc_id; for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++) { if (is_pipe_enabled(dqm, 0, i)) { - for (xcc = 0; xcc < dqm->dev->num_xcc_per_node; xcc++) + for_each_inst(xcc_id, xcc_mask) dqm->dev->kfd2kgd->init_interrupts( - dqm->dev->adev, i, - dqm->dev->start_xcc_id + - xcc); + dqm->dev->adev, i, xcc_id); } } } @@ -2283,7 +2280,7 @@ static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm) get_num_all_sdma_engines(dqm) * dev->kfd->device_info.num_sdma_queues_per_engine + (dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size * - dqm->dev->num_xcc_per_node); + NUM_XCC(dqm->dev->xcc_mask)); retval = amdgpu_amdkfd_alloc_gtt_mem(dev->adev, size, &(mem_obj->gtt_mem), &(mem_obj->gpu_addr), @@ -2489,10 +2486,10 @@ static void seq_reg_dump(struct seq_file *m, int dqm_debugfs_hqds(struct seq_file *m, void *data) { struct device_queue_manager *dqm = data; + uint32_t xcc_mask = dqm->dev->xcc_mask; uint32_t (*dump)[2], n_regs; int pipe, queue; - int r = 0, xcc; - uint32_t inst; + int r = 0, xcc_id; uint32_t sdma_engine_start; if (!dqm->sched_running) { @@ -2500,16 +2497,18 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data) return 0; } - for (xcc = 0; xcc < dqm->dev->num_xcc_per_node; xcc++) { - inst = dqm->dev->start_xcc_id + xcc; + for_each_inst(xcc_id, xcc_mask) { r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->adev, - KFD_CIK_HIQ_PIPE, KFD_CIK_HIQ_QUEUE, - &dump, &n_regs, inst); + KFD_CIK_HIQ_PIPE, + KFD_CIK_HIQ_QUEUE, &dump, + &n_regs, xcc_id); if (!r) { - seq_printf(m, + seq_printf( + m, " Inst %d, HIQ on MEC %d Pipe %d Queue %d\n", - inst, KFD_CIK_HIQ_PIPE/get_pipes_per_mec(dqm)+1, - KFD_CIK_HIQ_PIPE%get_pipes_per_mec(dqm), + xcc_id, + KFD_CIK_HIQ_PIPE / get_pipes_per_mec(dqm) + 1, + KFD_CIK_HIQ_PIPE % get_pipes_per_mec(dqm), KFD_CIK_HIQ_QUEUE); seq_reg_dump(m, dump, n_regs); @@ -2524,13 +2523,16 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data) dqm->dev->kfd->shared_resources.cp_queue_bitmap)) continue; - r = dqm->dev->kfd2kgd->hqd_dump( - dqm->dev->adev, pipe, queue, &dump, &n_regs, inst); + r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->adev, + pipe, queue, + &dump, &n_regs, + xcc_id); if (r) break; - seq_printf(m, " Inst %d, CP Pipe %d, Queue %d\n", - inst, pipe, queue); + seq_printf(m, + " Inst %d, CP Pipe %d, Queue %d\n", + xcc_id, pipe, queue); seq_reg_dump(m, dump, n_regs); kfree(dump); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c index d81125421aaf..863cf060af48 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c @@ -77,7 +77,7 @@ struct kfd_mem_obj *allocate_sdma_mqd(struct kfd_node *dev, dev->dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size; offset += dev->dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size * - dev->num_xcc_per_node; + NUM_XCC(dev->xcc_mask); mqd_mem_obj->gtt_mem = (void *)((uint64_t)dev->dqm->hiq_sdma_mqd.gtt_mem + offset); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index c781314b213c..226132ec3714 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -128,7 +128,7 @@ static struct kfd_mem_obj *allocate_mqd(struct kfd_node *node, retval = amdgpu_amdkfd_alloc_gtt_mem(node->adev, (ALIGN(q->ctl_stack_size, PAGE_SIZE) + ALIGN(sizeof(struct v9_mqd), PAGE_SIZE)) * - node->num_xcc_per_node, + NUM_XCC(node->xcc_mask), &(mqd_mem_obj->gtt_mem), &(mqd_mem_obj->gpu_addr), (void *)&(mqd_mem_obj->cpu_ptr), true); @@ -482,7 +482,7 @@ static void init_mqd_hiq_v9_4_3(struct mqd_manager *mm, void **mqd, memset(&xcc_mqd_mem_obj, 0x0, sizeof(struct kfd_mem_obj)); - for (xcc = 0; xcc < mm->dev->num_xcc_per_node; xcc++) { + for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) { kfd_get_hiq_xcc_mqd(mm->dev, &xcc_mqd_mem_obj, xcc); init_mqd(mm, (void **)&m, &xcc_mqd_mem_obj, &xcc_gart_addr, q); @@ -506,21 +506,21 @@ static int hiq_load_mqd_kiq_v9_4_3(struct mqd_manager *mm, void *mqd, uint32_t pipe_id, uint32_t queue_id, struct queue_properties *p, struct mm_struct *mms) { - int xcc, err; + uint32_t xcc_mask = mm->dev->xcc_mask; + int xcc_id, err, inst = 0; void *xcc_mqd; - uint32_t start_inst = mm->dev->start_xcc_id; uint64_t hiq_mqd_size = kfd_hiq_mqd_stride(mm->dev); - for (xcc = 0; xcc < mm->dev->num_xcc_per_node; xcc++) { - xcc_mqd = mqd + hiq_mqd_size * xcc; + for_each_inst(xcc_id, xcc_mask) { + xcc_mqd = mqd + hiq_mqd_size * inst; err = mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->adev, xcc_mqd, pipe_id, queue_id, - p->doorbell_off, - start_inst+xcc); + p->doorbell_off, xcc_id); if (err) { - pr_debug("Failed to load HIQ MQD for XCC: %d\n", xcc); + pr_debug("Failed to load HIQ MQD for XCC: %d\n", inst); break; } + ++inst; } return err; @@ -530,20 +530,21 @@ static int destroy_hiq_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, enum kfd_preempt_type type, unsigned int timeout, uint32_t pipe_id, uint32_t queue_id) { - int xcc = 0, err; + uint32_t xcc_mask = mm->dev->xcc_mask; + int xcc_id, err, inst = 0; void *xcc_mqd; - uint32_t start_inst = mm->dev->start_xcc_id; uint64_t hiq_mqd_size = kfd_hiq_mqd_stride(mm->dev); - for (xcc = 0; xcc < mm->dev->num_xcc_per_node; xcc++) { - xcc_mqd = mqd + hiq_mqd_size * xcc; + for_each_inst(xcc_id, xcc_mask) { + xcc_mqd = mqd + hiq_mqd_size * inst; err = mm->dev->kfd2kgd->hqd_destroy(mm->dev->adev, xcc_mqd, type, timeout, pipe_id, - queue_id, start_inst+xcc); + queue_id, xcc_id); if (err) { - pr_debug("Destroy MQD failed for xcc: %d\n", xcc); + pr_debug("Destroy MQD failed for xcc: %d\n", inst); break; } + ++inst; } return err; @@ -573,7 +574,7 @@ static void init_mqd_v9_4_3(struct mqd_manager *mm, void **mqd, uint32_t local_xcc_start = mm->dev->dqm->current_logical_xcc_start++; memset(&xcc_mqd_mem_obj, 0x0, sizeof(struct kfd_mem_obj)); - for (xcc = 0; xcc < mm->dev->num_xcc_per_node; xcc++) { + for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) { get_xcc_mqd(mqd_mem_obj, &xcc_mqd_mem_obj, offset*xcc); init_mqd(mm, (void **)&m, &xcc_mqd_mem_obj, &xcc_gart_addr, q); @@ -600,7 +601,7 @@ static void init_mqd_v9_4_3(struct mqd_manager *mm, void **mqd, m->compute_tg_chunk_size = 1; m->compute_current_logic_xcc_id = (local_xcc_start + xcc) % - mm->dev->num_xcc_per_node; + NUM_XCC(mm->dev->xcc_mask); switch (xcc) { case 0: @@ -633,7 +634,7 @@ static void update_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, int xcc = 0; uint64_t size = mm->mqd_stride(mm, q); - for (xcc = 0; xcc < mm->dev->num_xcc_per_node; xcc++) { + for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) { m = get_mqd(mqd + size * xcc); update_mqd(mm, m, q, minfo); @@ -661,24 +662,25 @@ static int destroy_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, enum kfd_preempt_type type, unsigned int timeout, uint32_t pipe_id, uint32_t queue_id) { - int xcc = 0, err; + uint32_t xcc_mask = mm->dev->xcc_mask; + int xcc_id, err, inst = 0; void *xcc_mqd; struct v9_mqd *m; uint64_t mqd_offset; - uint32_t start_inst = mm->dev->start_xcc_id; m = get_mqd(mqd); mqd_offset = m->cp_mqd_stride_size; - for (xcc = 0; xcc < mm->dev->num_xcc_per_node; xcc++) { - xcc_mqd = mqd + mqd_offset * xcc; + for_each_inst(xcc_id, xcc_mask) { + xcc_mqd = mqd + mqd_offset * inst; err = mm->dev->kfd2kgd->hqd_destroy(mm->dev->adev, xcc_mqd, type, timeout, pipe_id, - queue_id, start_inst+xcc); + queue_id, xcc_id); if (err) { - pr_debug("Destroy MQD failed for xcc: %d\n", xcc); + pr_debug("Destroy MQD failed for xcc: %d\n", inst); break; } + ++inst; } return err; @@ -690,21 +692,22 @@ static int load_mqd_v9_4_3(struct mqd_manager *mm, void *mqd, { /* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */ uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0); - int xcc = 0, err; + uint32_t xcc_mask = mm->dev->xcc_mask; + int xcc_id, err, inst = 0; void *xcc_mqd; - uint32_t start_inst = mm->dev->start_xcc_id; uint64_t mqd_stride_size = mm->mqd_stride(mm, p); - for (xcc = 0; xcc < mm->dev->num_xcc_per_node; xcc++) { - xcc_mqd = mqd + mqd_stride_size * xcc; - err = mm->dev->kfd2kgd->hqd_load(mm->dev->adev, xcc_mqd, - pipe_id, queue_id, - (uint32_t __user *)p->write_ptr, - wptr_shift, 0, mms, start_inst+xcc); + for_each_inst(xcc_id, xcc_mask) { + xcc_mqd = mqd + mqd_stride_size * inst; + err = mm->dev->kfd2kgd->hqd_load( + mm->dev->adev, xcc_mqd, pipe_id, queue_id, + (uint32_t __user *)p->write_ptr, wptr_shift, 0, mms, + xcc_id); if (err) { - pr_debug("Load MQD failed for xcc: %d\n", xcc); + pr_debug("Load MQD failed for xcc: %d\n", inst); break; } + ++inst; } return err; @@ -722,7 +725,7 @@ static int get_wave_state_v9_4_3(struct mqd_manager *mm, void *mqd, uint64_t mqd_stride_size = mm->mqd_stride(mm, q); u32 tmp_ctl_stack_used_size = 0, tmp_save_area_used_size = 0; - for (xcc = 0; xcc < mm->dev->num_xcc_per_node; xcc++) { + for (xcc = 0; xcc < NUM_XCC(mm->dev->xcc_mask); xcc++) { xcc_mqd = mqd + mqd_stride_size * xcc; xcc_ctl_stack = (void __user *)((uintptr_t)ctl_stack + q->ctx_save_restore_area_size * xcc); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 559ac5efdc26..02a90fd7f646 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -274,10 +274,6 @@ struct kfd_node { */ struct kfd_vmid_info vm_info; unsigned int id; /* topology stub index */ - unsigned int num_xcc_per_node; - unsigned int start_xcc_id; /* Starting XCC instance - * number for the node - */ uint32_t xcc_mask; /* Instance mask of XCCs present */ struct amdgpu_xcp *xcp; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index a6ff57f11472..7f7d1378a2f8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -2058,6 +2058,7 @@ void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type) struct amdgpu_vm *vm = drm_priv_to_vm(pdd->drm_priv); uint64_t tlb_seq = amdgpu_vm_tlb_seq(vm); struct kfd_node *dev = pdd->dev; + uint32_t xcc_mask = dev->xcc_mask; int xcc = 0; /* @@ -2076,10 +2077,9 @@ void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type) amdgpu_amdkfd_flush_gpu_tlb_vmid(dev->adev, pdd->qpd.vmid); } else { - for (xcc = 0; xcc < dev->num_xcc_per_node; xcc++) - amdgpu_amdkfd_flush_gpu_tlb_pasid(dev->adev, - pdd->process->pasid, type, - dev->start_xcc_id + xcc); + for_each_inst(xcc, xcc_mask) + amdgpu_amdkfd_flush_gpu_tlb_pasid( + dev->adev, pdd->process->pasid, type, xcc); } } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 2b2ae0c9902b..a3c23d07c7df 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -946,7 +946,7 @@ int pqm_debugfs_mqds(struct seq_file *m, void *data) seq_printf(m, " Compute queue on device %x\n", q->device->id); mqd_type = KFD_MQD_TYPE_CP; - num_xccs = q->device->num_xcc_per_node; + num_xccs = NUM_XCC(q->device->xcc_mask); break; default: seq_printf(m, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index c7072fff778e..d2a42b6b1fa8 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -469,7 +469,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, dev->node_props.cpu_cores_count); sysfs_show_32bit_prop(buffer, offs, "simd_count", dev->gpu ? (dev->node_props.simd_count * - dev->gpu->num_xcc_per_node) : 0); + NUM_XCC(dev->gpu->xcc_mask)) : 0); sysfs_show_32bit_prop(buffer, offs, "mem_banks_count", dev->node_props.mem_banks_count); sysfs_show_32bit_prop(buffer, offs, "caches_count", @@ -494,7 +494,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, dev->node_props.wave_front_size); sysfs_show_32bit_prop(buffer, offs, "array_count", dev->gpu ? (dev->node_props.array_count * - dev->gpu->num_xcc_per_node) : 0); + NUM_XCC(dev->gpu->xcc_mask)) : 0); sysfs_show_32bit_prop(buffer, offs, "simd_arrays_per_engine", dev->node_props.simd_arrays_per_engine); sysfs_show_32bit_prop(buffer, offs, "cu_per_simd_array", @@ -558,7 +558,7 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr, sysfs_show_64bit_prop(buffer, offs, "unique_id", dev->gpu->adev->unique_id); sysfs_show_32bit_prop(buffer, offs, "num_xcc", - dev->gpu->num_xcc_per_node); + NUM_XCC(dev->gpu->xcc_mask)); } return sysfs_show_32bit_prop(buffer, offs, "max_engine_clk_ccompute", @@ -1180,7 +1180,7 @@ static uint32_t kfd_generate_gpu_id(struct kfd_node *gpu) buf[4] = gpu->adev->pdev->bus->number; buf[5] = lower_32_bits(local_mem_size); buf[6] = upper_32_bits(local_mem_size); - buf[7] = gpu->start_xcc_id | (gpu->num_xcc_per_node << 16); + buf[7] = (ffs(gpu->xcc_mask) - 1) | (NUM_XCC(gpu->xcc_mask) << 16); for (i = 0, hashout = 0; i < 8; i++) hashout ^= hash_32(buf[i], KFD_GPU_ID_HASH_WIDTH); -- cgit From b695c97b580a1949d0dd96aa17b01d4de738eda3 Mon Sep 17 00:00:00 2001 From: Lijo Lazar Date: Mon, 29 May 2023 19:18:54 +0530 Subject: drm/amdkfd: Fix MEC pipe interrupt enablement for_each_inst modifies xcc_mask and therefore the loop doesn't initialize properly interrupts on all pipes. Keep looping through xcc as the outer loop to fix this issue. Fixes: c4050ff1a43e ("drm/amdkfd: Use xcc mask for identifying xcc") Signed-off-by: Lijo Lazar Reviewed-by: Hawking Zhang Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 493b4b66f180..80cddb46657f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -1274,11 +1274,12 @@ static void init_interrupts(struct device_queue_manager *dqm) uint32_t xcc_mask = dqm->dev->xcc_mask; unsigned int i, xcc_id; - for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++) { - if (is_pipe_enabled(dqm, 0, i)) { - for_each_inst(xcc_id, xcc_mask) + for_each_inst(xcc_id, xcc_mask) { + for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++) { + if (is_pipe_enabled(dqm, 0, i)) { dqm->dev->kfd2kgd->init_interrupts( dqm->dev->adev, i, xcc_id); + } } } } -- cgit From 0ab2d7532b05a3e7c06fd3b0c8bd6b46c1dfb508 Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Fri, 25 Mar 2022 14:55:30 -0400 Subject: drm/amdkfd: prepare per-process debug enable and disable The ROCm debugger will attach to a process to debug by PTRACE and will expect the KFD to prepare a process for the target PID, whether the target PID has opened the KFD device or not. This patch is to explicity handle this requirement. Further HW mode setting and runtime coordination requirements will be handled in following patches. In the case where the target process has not opened the KFD device, a new KFD process must be created for the target PID. The debugger as well as the target process for this case will have not acquired any VMs so handle process restoration to correctly account for this. To coordinate with HSA runtime, the debugger must be aware of the target process' runtime enablement status and will copy the runtime status information into the debugged KFD process for later query. On enablement, the debugger will subscribe to a set of exceptions where each exception events will notify the debugger through a pollable FIFO file descriptor that the debugger provides to the KFD to manage. Finally on process termination of either the debugger or the target, debugging must be disabled if it has not been done so. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/Makefile | 3 +- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 102 ++++++++++++++++++++- drivers/gpu/drm/amd/amdkfd/kfd_debug.c | 80 ++++++++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_debug.h | 32 +++++++ .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 26 ++++-- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 31 ++++++- drivers/gpu/drm/amd/amdkfd/kfd_process.c | 60 ++++++++---- 7 files changed, 304 insertions(+), 30 deletions(-) create mode 100644 drivers/gpu/drm/amd/amdkfd/kfd_debug.c create mode 100644 drivers/gpu/drm/amd/amdkfd/kfd_debug.h (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c') diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile index e758c2a24cd0..747754428073 100644 --- a/drivers/gpu/drm/amd/amdkfd/Makefile +++ b/drivers/gpu/drm/amd/amdkfd/Makefile @@ -55,7 +55,8 @@ AMDKFD_FILES := $(AMDKFD_PATH)/kfd_module.o \ $(AMDKFD_PATH)/kfd_int_process_v9.o \ $(AMDKFD_PATH)/kfd_int_process_v11.o \ $(AMDKFD_PATH)/kfd_smi_events.o \ - $(AMDKFD_PATH)/kfd_crat.o + $(AMDKFD_PATH)/kfd_crat.o \ + $(AMDKFD_PATH)/kfd_debug.o ifneq ($(CONFIG_AMD_IOMMU_V2),) AMDKFD_FILES += $(AMDKFD_PATH)/kfd_iommu.o diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 00e34125987c..ee086a0a46df 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -44,6 +44,7 @@ #include "amdgpu_amdkfd.h" #include "kfd_smi_events.h" #include "amdgpu_dma_buf.h" +#include "kfd_debug.h" static long kfd_ioctl(struct file *, unsigned int, unsigned long); static int kfd_open(struct inode *, struct file *); @@ -142,10 +143,15 @@ static int kfd_open(struct inode *inode, struct file *filep) return -EPERM; } - process = kfd_create_process(filep); + process = kfd_create_process(current); if (IS_ERR(process)) return PTR_ERR(process); + if (kfd_process_init_cwsr_apu(process, filep)) { + kfd_unref_process(process); + return -EFAULT; + } + /* filep now owns the reference returned by kfd_create_process */ filep->private_data = process; @@ -2737,6 +2743,10 @@ static int kfd_ioctl_runtime_enable(struct file *filep, struct kfd_process *p, v static int kfd_ioctl_set_debug_trap(struct file *filep, struct kfd_process *p, void *data) { struct kfd_ioctl_dbg_trap_args *args = data; + struct task_struct *thread = NULL; + struct mm_struct *mm = NULL; + struct pid *pid = NULL; + struct kfd_process *target = NULL; int r = 0; if (sched_policy == KFD_SCHED_POLICY_NO_HWS) { @@ -2744,9 +2754,81 @@ static int kfd_ioctl_set_debug_trap(struct file *filep, struct kfd_process *p, v return -EINVAL; } + pid = find_get_pid(args->pid); + if (!pid) { + pr_debug("Cannot find pid info for %i\n", args->pid); + r = -ESRCH; + goto out; + } + + thread = get_pid_task(pid, PIDTYPE_PID); + if (!thread) { + r = -ESRCH; + goto out; + } + + mm = get_task_mm(thread); + if (!mm) { + r = -ESRCH; + goto out; + } + + if (args->op == KFD_IOC_DBG_TRAP_ENABLE) { + bool create_process; + + rcu_read_lock(); + create_process = thread && thread != current && ptrace_parent(thread) == current; + rcu_read_unlock(); + + target = create_process ? kfd_create_process(thread) : + kfd_lookup_process_by_pid(pid); + } else { + target = kfd_lookup_process_by_pid(pid); + } + + if (!target) { + pr_debug("Cannot find process PID %i to debug\n", args->pid); + r = -ESRCH; + goto out; + } + + /* Check if target is still PTRACED. */ + rcu_read_lock(); + if (target != p && args->op != KFD_IOC_DBG_TRAP_DISABLE + && ptrace_parent(target->lead_thread) != current) { + pr_err("PID %i is not PTRACED and cannot be debugged\n", args->pid); + r = -EPERM; + } + rcu_read_unlock(); + + if (r) + goto out; + + mutex_lock(&target->mutex); + + if (args->op != KFD_IOC_DBG_TRAP_ENABLE && !target->debug_trap_enabled) { + pr_err("PID %i not debug enabled for op %i\n", args->pid, args->op); + r = -EINVAL; + goto unlock_out; + } + switch (args->op) { case KFD_IOC_DBG_TRAP_ENABLE: + if (target != p) + target->debugger_process = p; + + r = kfd_dbg_trap_enable(target, + args->enable.dbg_fd, + (void __user *)args->enable.rinfo_ptr, + &args->enable.rinfo_size); + if (!r) + target->exception_enable_mask = args->enable.exception_mask; + + pr_warn("Debug functions limited\n"); + break; case KFD_IOC_DBG_TRAP_DISABLE: + r = kfd_dbg_trap_disable(target); + break; case KFD_IOC_DBG_TRAP_SEND_RUNTIME_EVENT: case KFD_IOC_DBG_TRAP_SET_EXCEPTIONS_ENABLED: case KFD_IOC_DBG_TRAP_SET_WAVE_LAUNCH_OVERRIDE: @@ -2760,7 +2842,7 @@ static int kfd_ioctl_set_debug_trap(struct file *filep, struct kfd_process *p, v case KFD_IOC_DBG_TRAP_QUERY_EXCEPTION_INFO: case KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT: case KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT: - pr_warn("Debugging not supported yet\n"); + pr_warn("Debug op %i not supported yet\n", args->op); r = -EACCES; break; default: @@ -2768,6 +2850,22 @@ static int kfd_ioctl_set_debug_trap(struct file *filep, struct kfd_process *p, v r = -EINVAL; } +unlock_out: + mutex_unlock(&target->mutex); + +out: + if (thread) + put_task_struct(thread); + + if (mm) + mmput(mm); + + if (pid) + put_pid(pid); + + if (target) + kfd_unref_process(target); + return r; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c new file mode 100644 index 000000000000..898cc1fe3d13 --- /dev/null +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c @@ -0,0 +1,80 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include "kfd_debug.h" +#include + +int kfd_dbg_trap_disable(struct kfd_process *target) +{ + if (!target->debug_trap_enabled) + return 0; + + fput(target->dbg_ev_file); + target->dbg_ev_file = NULL; + + if (target->debugger_process) { + atomic_dec(&target->debugger_process->debugged_process_count); + target->debugger_process = NULL; + } + + target->debug_trap_enabled = false; + kfd_unref_process(target); + + return 0; +} + +int kfd_dbg_trap_enable(struct kfd_process *target, uint32_t fd, + void __user *runtime_info, uint32_t *runtime_size) +{ + struct file *f; + uint32_t copy_size; + int r = 0; + + if (target->debug_trap_enabled) + return -EALREADY; + + copy_size = min((size_t)(*runtime_size), sizeof(target->runtime_info)); + + f = fget(fd); + if (!f) { + pr_err("Failed to get file for (%i)\n", fd); + return -EBADF; + } + + target->dbg_ev_file = f; + + /* We already hold the process reference but hold another one for the + * debug session. + */ + kref_get(&target->ref); + target->debug_trap_enabled = true; + + if (target->debugger_process) + atomic_inc(&target->debugger_process->debugged_process_count); + + if (copy_to_user(runtime_info, (void *)&target->runtime_info, copy_size)) + r = -EFAULT; + + *runtime_size = sizeof(target->runtime_info); + + return r; +} diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h new file mode 100644 index 000000000000..a8abfe2a0a14 --- /dev/null +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h @@ -0,0 +1,32 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#ifndef KFD_DEBUG_EVENTS_H_INCLUDED +#define KFD_DEBUG_EVENTS_H_INCLUDED + +#include "kfd_priv.h" + +int kfd_dbg_trap_disable(struct kfd_process *target); +int kfd_dbg_trap_enable(struct kfd_process *target, uint32_t fd, + void __user *runtime_info, + uint32_t *runtime_info_size); +#endif diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 80cddb46657f..2baa0781eafc 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -1006,6 +1006,14 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm, goto out; pdd = qpd_to_pdd(qpd); + + /* The debugger creates processes that temporarily have not acquired + * all VMs for all devices and has no VMs itself. + * Skip queue eviction on process eviction. + */ + if (!pdd->drm_priv) + goto out; + pr_debug_ratelimited("Evicting PASID 0x%x queues\n", pdd->process->pasid); @@ -1127,13 +1135,10 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm, { struct queue *q; struct kfd_process_device *pdd; - uint64_t pd_base; uint64_t eviction_duration; int retval = 0; pdd = qpd_to_pdd(qpd); - /* Retrieve PD base */ - pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->drm_priv); dqm_lock(dqm); if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */ @@ -1143,12 +1148,19 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm, goto out; } + /* The debugger creates processes that temporarily have not acquired + * all VMs for all devices and has no VMs itself. + * Skip queue restore on process restore. + */ + if (!pdd->drm_priv) + goto vm_not_acquired; + pr_debug_ratelimited("Restoring PASID 0x%x queues\n", pdd->process->pasid); /* Update PD Base in QPD */ - qpd->page_table_base = pd_base; - pr_debug("Updated PD address to 0x%llx\n", pd_base); + qpd->page_table_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->drm_priv); + pr_debug("Updated PD address to 0x%llx\n", qpd->page_table_base); /* activate all active queues on the qpd */ list_for_each_entry(q, &qpd->queues_list, list) { @@ -1171,9 +1183,11 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm, if (!dqm->dev->kfd->shared_resources.enable_mes) retval = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); - qpd->evicted = 0; + eviction_duration = get_jiffies_64() - pdd->last_evict_timestamp; atomic64_add(eviction_duration, &pdd->evict_duration_counter); +vm_not_acquired: + qpd->evicted = 0; out: dqm_unlock(dqm); return retval; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 3bd222e8f6c3..1b272f879b4c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -920,11 +920,33 @@ struct kfd_process { */ unsigned long last_restore_timestamp; + /* Indicates device process is debug attached with reserved vmid. */ + bool debug_trap_enabled; + + /* per-process-per device debug event fd file */ + struct file *dbg_ev_file; + + /* If the process is a kfd debugger, we need to know so we can clean + * up at exit time. If a process enables debugging on itself, it does + * its own clean-up, so we don't set the flag here. We track this by + * counting the number of processes this process is debugging. + */ + atomic_t debugged_process_count; + + /* If the process is a debugged, this is the debugger process */ + struct kfd_process *debugger_process; + /* Kobj for our procfs */ struct kobject *kobj; struct kobject *kobj_queues; struct attribute attr_pasid; + /* Keep track cwsr init */ + bool has_cwsr; + + /* Exception code enable mask and status */ + uint64_t exception_enable_mask; + /* shared virtual memory registered by this process */ struct svm_range_list svms; @@ -933,6 +955,10 @@ struct kfd_process { atomic_t poison; /* Queues are in paused stated because we are in the process of doing a CRIU checkpoint */ bool queues_paused; + + /* Tracks runtime enable status */ + struct kfd_runtime_info runtime_info; + }; #define KFD_PROCESS_TABLE_SIZE 5 /* bits: 32 entries */ @@ -963,7 +989,7 @@ bool kfd_dev_is_large_bar(struct kfd_node *dev); int kfd_process_create_wq(void); void kfd_process_destroy_wq(void); void kfd_cleanup_processes(void); -struct kfd_process *kfd_create_process(struct file *filep); +struct kfd_process *kfd_create_process(struct task_struct *thread); struct kfd_process *kfd_get_process(const struct task_struct *task); struct kfd_process *kfd_lookup_process_by_pasid(u32 pasid); struct kfd_process *kfd_lookup_process_by_mm(const struct mm_struct *mm); @@ -1108,6 +1134,9 @@ void kfd_process_set_trap_handler(struct qcm_process_device *qpd, uint64_t tba_addr, uint64_t tma_addr); +/* CWSR initialization */ +int kfd_process_init_cwsr_apu(struct kfd_process *process, struct file *filep); + /* CRIU */ /* * Need to increment KFD_CRIU_PRIV_VERSION each time a change is made to any of the CRIU private diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 7f7d1378a2f8..ef67f5e37c6f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -44,6 +44,7 @@ struct mm_struct; #include "kfd_iommu.h" #include "kfd_svm.h" #include "kfd_smi_events.h" +#include "kfd_debug.h" /* * List of struct kfd_process (field kfd_process). @@ -69,7 +70,6 @@ static struct kfd_process *find_process(const struct task_struct *thread, bool ref); static void kfd_process_ref_release(struct kref *ref); static struct kfd_process *create_process(const struct task_struct *thread); -static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep); static void evict_process_worker(struct work_struct *work); static void restore_process_worker(struct work_struct *work); @@ -798,18 +798,19 @@ static void kfd_process_device_destroy_ib_mem(struct kfd_process_device *pdd) kfd_process_free_gpuvm(qpd->ib_mem, pdd, &qpd->ib_kaddr); } -struct kfd_process *kfd_create_process(struct file *filep) +struct kfd_process *kfd_create_process(struct task_struct *thread) { struct kfd_process *process; - struct task_struct *thread = current; int ret; - if (!thread->mm) + if (!(thread->mm && mmget_not_zero(thread->mm))) return ERR_PTR(-EINVAL); /* Only the pthreads threading model is supported. */ - if (thread->group_leader->mm != thread->mm) + if (thread->group_leader->mm != thread->mm) { + mmput(thread->mm); return ERR_PTR(-EINVAL); + } /* * take kfd processes mutex before starting of process creation @@ -833,10 +834,6 @@ struct kfd_process *kfd_create_process(struct file *filep) if (IS_ERR(process)) goto out; - ret = kfd_process_init_cwsr_apu(process, filep); - if (ret) - goto out_destroy; - if (!procfs.kobj) goto out; @@ -870,16 +867,9 @@ out: if (!IS_ERR(process)) kref_get(&process->ref); mutex_unlock(&kfd_processes_mutex); + mmput(thread->mm); return process; - -out_destroy: - hash_del_rcu(&process->kfd_processes); - mutex_unlock(&kfd_processes_mutex); - synchronize_srcu(&kfd_processes_srcu); - /* kfd_process_free_notifier will trigger the cleanup */ - mmu_notifier_put(&process->mmu_notifier); - return ERR_PTR(ret); } struct kfd_process *kfd_get_process(const struct task_struct *thread) @@ -1180,6 +1170,25 @@ static void kfd_process_notifier_release_internal(struct kfd_process *p) /* Indicate to other users that MM is no longer valid */ p->mm = NULL; + kfd_dbg_trap_disable(p); + + if (atomic_read(&p->debugged_process_count) > 0) { + struct kfd_process *target; + unsigned int temp; + int idx = srcu_read_lock(&kfd_processes_srcu); + + hash_for_each_rcu(kfd_processes_table, temp, target, kfd_processes) { + if (target->debugger_process && target->debugger_process == p) { + mutex_lock_nested(&target->mutex, 1); + kfd_dbg_trap_disable(target); + mutex_unlock(&target->mutex); + if (atomic_read(&p->debugged_process_count) == 0) + break; + } + } + + srcu_read_unlock(&kfd_processes_srcu, idx); + } mmu_notifier_put(&p->mmu_notifier); } @@ -1259,11 +1268,14 @@ void kfd_cleanup_processes(void) mmu_notifier_synchronize(); } -static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep) +int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep) { unsigned long offset; int i; + if (p->has_cwsr) + return 0; + for (i = 0; i < p->n_pdds; i++) { struct kfd_node *dev = p->pdds[i]->dev; struct qcm_process_device *qpd = &p->pdds[i]->qpd; @@ -1292,6 +1304,8 @@ static int kfd_process_init_cwsr_apu(struct kfd_process *p, struct file *filep) qpd->tba_addr, qpd->tma_addr, qpd->cwsr_kaddr); } + p->has_cwsr = true; + return 0; } @@ -1434,6 +1448,10 @@ static struct kfd_process *create_process(const struct task_struct *thread) if (err) goto err_event_init; process->is_32bit_user_mode = in_compat_syscall(); + process->debug_trap_enabled = false; + process->debugger_process = NULL; + process->exception_enable_mask = 0; + atomic_set(&process->debugged_process_count, 0); process->pasid = kfd_pasid_alloc(); if (process->pasid == 0) { @@ -1967,8 +1985,10 @@ static void restore_process_worker(struct work_struct *work) */ p->last_restore_timestamp = get_jiffies_64(); - ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info, - &p->ef); + /* VMs may not have been acquired yet during debugging. */ + if (p->kgd_process_info) + ret = amdgpu_amdkfd_gpuvm_restore_process_bos(p->kgd_process_info, + &p->ef); if (ret) { pr_debug("Failed to restore BOs of pasid 0x%x, retry after %d ms\n", p->pasid, PROCESS_BACK_OFF_TIME_MS); -- cgit From 7cee6a6824a0429a6255abe91b5af01b9a01cd03 Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Thu, 23 Mar 2023 17:17:20 -0400 Subject: drm/amdgpu: add configurable grace period for unmap queues The HWS schedule allows a grace period for wave completion prior to preemption for better performance by avoiding CWSR on waves that can potentially complete quickly. The debugger, on the other hand, will want to inspect wave status immediately after it actively triggers preemption (a suspend function to be provided). To minimize latency between preemption and debugger wave inspection, allow immediate preemption by setting the grace period to 0. Note that setting the preepmtion grace period to 0 will result in an infinite grace period being set due to a CP FW bug so set it to 1 for now. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c | 2 + .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c | 2 + drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c | 43 ++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h | 6 ++ .../gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c | 2 + drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c | 43 ++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h | 8 ++- .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 63 +++++++++++++++------ .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.h | 3 + drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c | 32 +++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c | 39 +++++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c | 2 + drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h | 65 ++++++++++++++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 5 ++ 14 files changed, 295 insertions(+), 20 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c index a6f98141c29c..b811a0985050 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_aldebaran.c @@ -82,5 +82,7 @@ const struct kfd2kgd_calls aldebaran_kfd2kgd = { .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy, .enable_debug_trap = kgd_aldebaran_enable_debug_trap, .disable_debug_trap = kgd_aldebaran_disable_debug_trap, + .get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times, + .build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info, .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c index d2918e5c0dea..a62bd0068515 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_arcturus.c @@ -410,6 +410,8 @@ const struct kfd2kgd_calls arcturus_kfd2kgd = { kgd_gfx_v9_set_vm_context_page_table_base, .enable_debug_trap = kgd_arcturus_enable_debug_trap, .disable_debug_trap = kgd_arcturus_disable_debug_trap, + .get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times, + .build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info, .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy, .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c index 240f5006e278..98006c7021dd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.c @@ -803,6 +803,47 @@ uint32_t kgd_gfx_v10_disable_debug_trap(struct amdgpu_device *adev, return 0; } +/* kgd_gfx_v10_get_iq_wait_times: Returns the mmCP_IQ_WAIT_TIME1/2 values + * The values read are: + * ib_offload_wait_time -- Wait Count for Indirect Buffer Offloads. + * atomic_offload_wait_time -- Wait Count for L2 and GDS Atomics Offloads. + * wrm_offload_wait_time -- Wait Count for WAIT_REG_MEM Offloads. + * gws_wait_time -- Wait Count for Global Wave Syncs. + * que_sleep_wait_time -- Wait Count for Dequeue Retry. + * sch_wave_wait_time -- Wait Count for Scheduling Wave Message. + * sem_rearm_wait_time -- Wait Count for Semaphore re-arm. + * deq_retry_wait_time -- Wait Count for Global Wave Syncs. + */ +void kgd_gfx_v10_get_iq_wait_times(struct amdgpu_device *adev, + uint32_t *wait_times) + +{ + *wait_times = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2)); +} + +void kgd_gfx_v10_build_grace_period_packet_info(struct amdgpu_device *adev, + uint32_t wait_times, + uint32_t grace_period, + uint32_t *reg_offset, + uint32_t *reg_data) +{ + *reg_data = wait_times; + + /* + * The CP cannont handle a 0 grace period input and will result in + * an infinite grace period being set so set to 1 to prevent this. + */ + if (grace_period == 0) + grace_period = 1; + + *reg_data = REG_SET_FIELD(*reg_data, + CP_IQ_WAIT_TIME2, + SCH_WAVE, + grace_period); + + *reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2); +} + static void program_trap_handler_settings(struct amdgpu_device *adev, uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr, uint32_t inst) @@ -848,5 +889,7 @@ const struct kfd2kgd_calls gfx_v10_kfd2kgd = { .set_vm_context_page_table_base = set_vm_context_page_table_base, .enable_debug_trap = kgd_gfx_v10_enable_debug_trap, .disable_debug_trap = kgd_gfx_v10_disable_debug_trap, + .get_iq_wait_times = kgd_gfx_v10_get_iq_wait_times, + .build_grace_period_packet_info = kgd_gfx_v10_build_grace_period_packet_info, .program_trap_handler_settings = program_trap_handler_settings, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h index 251d61fbde07..1e993a213646 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10.h @@ -26,3 +26,9 @@ uint32_t kgd_gfx_v10_enable_debug_trap(struct amdgpu_device *adev, uint32_t kgd_gfx_v10_disable_debug_trap(struct amdgpu_device *adev, bool keep_trap_enabled, uint32_t vmid); +void kgd_gfx_v10_get_iq_wait_times(struct amdgpu_device *adev, uint32_t *wait_times); +void kgd_gfx_v10_build_grace_period_packet_info(struct amdgpu_device *adev, + uint32_t wait_times, + uint32_t grace_period, + uint32_t *reg_offset, + uint32_t *reg_data); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c index 8b293f3dcbd2..387bdf4823c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v10_3.c @@ -672,6 +672,8 @@ const struct kfd2kgd_calls gfx_v10_3_kfd2kgd = { .get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info_v10_3, .set_vm_context_page_table_base = set_vm_context_page_table_base_v10_3, .program_trap_handler_settings = program_trap_handler_settings_v10_3, + .get_iq_wait_times = kgd_gfx_v10_get_iq_wait_times, + .build_grace_period_packet_info = kgd_gfx_v10_build_grace_period_packet_info, .enable_debug_trap = kgd_gfx_v10_enable_debug_trap, .disable_debug_trap = kgd_gfx_v10_disable_debug_trap }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c index 8d7d04704b00..829ee720cc44 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c @@ -739,6 +739,24 @@ uint32_t kgd_gfx_v9_disable_debug_trap(struct amdgpu_device *adev, return 0; } +/* kgd_gfx_v9_get_iq_wait_times: Returns the mmCP_IQ_WAIT_TIME1/2 values + * The values read are: + * ib_offload_wait_time -- Wait Count for Indirect Buffer Offloads. + * atomic_offload_wait_time -- Wait Count for L2 and GDS Atomics Offloads. + * wrm_offload_wait_time -- Wait Count for WAIT_REG_MEM Offloads. + * gws_wait_time -- Wait Count for Global Wave Syncs. + * que_sleep_wait_time -- Wait Count for Dequeue Retry. + * sch_wave_wait_time -- Wait Count for Scheduling Wave Message. + * sem_rearm_wait_time -- Wait Count for Semaphore re-arm. + * deq_retry_wait_time -- Wait Count for Global Wave Syncs. + */ +void kgd_gfx_v9_get_iq_wait_times(struct amdgpu_device *adev, + uint32_t *wait_times) + +{ + *wait_times = RREG32(SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2)); +} + void kgd_gfx_v9_set_vm_context_page_table_base(struct amdgpu_device *adev, uint32_t vmid, uint64_t page_table_base) { @@ -926,6 +944,29 @@ void kgd_gfx_v9_get_cu_occupancy(struct amdgpu_device *adev, int pasid, adev->gfx.cu_info.max_waves_per_simd; } +void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev, + uint32_t wait_times, + uint32_t grace_period, + uint32_t *reg_offset, + uint32_t *reg_data) +{ + *reg_data = wait_times; + + /* + * The CP cannont handle a 0 grace period input and will result in + * an infinite grace period being set so set to 1 to prevent this. + */ + if (grace_period == 0) + grace_period = 1; + + *reg_data = REG_SET_FIELD(*reg_data, + CP_IQ_WAIT_TIME2, + SCH_WAVE, + grace_period); + + *reg_offset = SOC15_REG_OFFSET(GC, 0, mmCP_IQ_WAIT_TIME2); +} + void kgd_gfx_v9_program_trap_handler_settings(struct amdgpu_device *adev, uint32_t vmid, uint64_t tba_addr, uint64_t tma_addr, uint32_t inst) { @@ -969,6 +1010,8 @@ const struct kfd2kgd_calls gfx_v9_kfd2kgd = { .set_vm_context_page_table_base = kgd_gfx_v9_set_vm_context_page_table_base, .enable_debug_trap = kgd_gfx_v9_enable_debug_trap, .disable_debug_trap = kgd_gfx_v9_disable_debug_trap, + .get_iq_wait_times = kgd_gfx_v9_get_iq_wait_times, + .build_grace_period_packet_info = kgd_gfx_v9_build_grace_period_packet_info, .get_cu_occupancy = kgd_gfx_v9_get_cu_occupancy, .program_trap_handler_settings = kgd_gfx_v9_program_trap_handler_settings, }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h index 9588ff055393..fed5b7f18b1a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.h @@ -20,8 +20,6 @@ * OTHER DEALINGS IN THE SOFTWARE. */ - - void kgd_gfx_v9_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid, uint32_t sh_mem_config, uint32_t sh_mem_ape1_base, uint32_t sh_mem_ape1_limit, @@ -73,3 +71,9 @@ uint32_t kgd_gfx_v9_enable_debug_trap(struct amdgpu_device *adev, uint32_t kgd_gfx_v9_disable_debug_trap(struct amdgpu_device *adev, bool keep_trap_enabled, uint32_t vmid); +void kgd_gfx_v9_get_iq_wait_times(struct amdgpu_device *adev, uint32_t *wait_times); +void kgd_gfx_v9_build_grace_period_packet_info(struct amdgpu_device *adev, + uint32_t wait_times, + uint32_t grace_period, + uint32_t *reg_offset, + uint32_t *reg_data); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 2baa0781eafc..0b88a64e61fe 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -46,10 +46,13 @@ static int set_pasid_vmid_mapping(struct device_queue_manager *dqm, static int execute_queues_cpsch(struct device_queue_manager *dqm, enum kfd_unmap_queues_filter filter, - uint32_t filter_param); + uint32_t filter_param, + uint32_t grace_period); static int unmap_queues_cpsch(struct device_queue_manager *dqm, enum kfd_unmap_queues_filter filter, - uint32_t filter_param, bool reset); + uint32_t filter_param, + uint32_t grace_period, + bool reset); static int map_queues_cpsch(struct device_queue_manager *dqm); @@ -866,7 +869,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q, if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) { if (!dqm->dev->kfd->shared_resources.enable_mes) retval = unmap_queues_cpsch(dqm, - KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false); + KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD, false); else if (prev_active) retval = remove_queue_mes(dqm, q, &pdd->qpd); @@ -1042,7 +1045,8 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm, retval = execute_queues_cpsch(dqm, qpd->is_debug ? KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES : - KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); + KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, + USE_DEFAULT_GRACE_PERIOD); out: dqm_unlock(dqm); @@ -1182,8 +1186,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm, } if (!dqm->dev->kfd->shared_resources.enable_mes) retval = execute_queues_cpsch(dqm, - KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); - + KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD); eviction_duration = get_jiffies_64() - pdd->last_evict_timestamp; atomic64_add(eviction_duration, &pdd->evict_duration_counter); vm_not_acquired: @@ -1525,6 +1528,9 @@ static int initialize_cpsch(struct device_queue_manager *dqm) init_sdma_bitmaps(dqm); + if (dqm->dev->kfd2kgd->get_iq_wait_times) + dqm->dev->kfd2kgd->get_iq_wait_times(dqm->dev->adev, + &dqm->wait_times); return 0; } @@ -1563,8 +1569,9 @@ static int start_cpsch(struct device_queue_manager *dqm) dqm->is_hws_hang = false; dqm->is_resetting = false; dqm->sched_running = true; + if (!dqm->dev->kfd->shared_resources.enable_mes) - execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); + execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD); dqm_unlock(dqm); return 0; @@ -1589,7 +1596,7 @@ static int stop_cpsch(struct device_queue_manager *dqm) if (!dqm->is_hws_hang) { if (!dqm->dev->kfd->shared_resources.enable_mes) - unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, false); + unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD, false); else remove_all_queues_mes(dqm); } @@ -1631,7 +1638,8 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm, list_add(&kq->list, &qpd->priv_queue_list); increment_queue_count(dqm, qpd, kq->queue); qpd->is_debug = true; - execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); + execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, + USE_DEFAULT_GRACE_PERIOD); dqm_unlock(dqm); return 0; @@ -1645,7 +1653,8 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm, list_del(&kq->list); decrement_queue_count(dqm, qpd, kq->queue); qpd->is_debug = false; - execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0); + execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, + USE_DEFAULT_GRACE_PERIOD); /* * Unconditionally decrement this counter, regardless of the queue's * type. @@ -1722,7 +1731,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, if (!dqm->dev->kfd->shared_resources.enable_mes) retval = execute_queues_cpsch(dqm, - KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); + KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, USE_DEFAULT_GRACE_PERIOD); else retval = add_queue_mes(dqm, q, qpd); if (retval) @@ -1811,7 +1820,9 @@ static int map_queues_cpsch(struct device_queue_manager *dqm) /* dqm->lock mutex has to be locked before calling this function */ static int unmap_queues_cpsch(struct device_queue_manager *dqm, enum kfd_unmap_queues_filter filter, - uint32_t filter_param, bool reset) + uint32_t filter_param, + uint32_t grace_period, + bool reset) { int retval = 0; struct mqd_manager *mqd_mgr; @@ -1823,6 +1834,12 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm, if (!dqm->active_runlist) return retval; + if (grace_period != USE_DEFAULT_GRACE_PERIOD) { + retval = pm_update_grace_period(&dqm->packet_mgr, grace_period); + if (retval) + return retval; + } + retval = pm_send_unmap_queue(&dqm->packet_mgr, filter, filter_param, reset); if (retval) return retval; @@ -1855,6 +1872,13 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm, return -ETIME; } + /* We need to reset the grace period value for this device */ + if (grace_period != USE_DEFAULT_GRACE_PERIOD) { + if (pm_update_grace_period(&dqm->packet_mgr, + USE_DEFAULT_GRACE_PERIOD)) + pr_err("Failed to reset grace period\n"); + } + pm_release_ib(&dqm->packet_mgr); dqm->active_runlist = false; @@ -1870,7 +1894,7 @@ static int reset_queues_cpsch(struct device_queue_manager *dqm, dqm_lock(dqm); retval = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_BY_PASID, - pasid, true); + pasid, USE_DEFAULT_GRACE_PERIOD, true); dqm_unlock(dqm); return retval; @@ -1879,13 +1903,14 @@ static int reset_queues_cpsch(struct device_queue_manager *dqm, /* dqm->lock mutex has to be locked before calling this function */ static int execute_queues_cpsch(struct device_queue_manager *dqm, enum kfd_unmap_queues_filter filter, - uint32_t filter_param) + uint32_t filter_param, + uint32_t grace_period) { int retval; if (dqm->is_hws_hang) return -EIO; - retval = unmap_queues_cpsch(dqm, filter, filter_param, false); + retval = unmap_queues_cpsch(dqm, filter, filter_param, grace_period, false); if (retval) return retval; @@ -1943,7 +1968,8 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm, if (!dqm->dev->kfd->shared_resources.enable_mes) { decrement_queue_count(dqm, qpd, q); retval = execute_queues_cpsch(dqm, - KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0); + KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, + USE_DEFAULT_GRACE_PERIOD); if (retval == -ETIME) qpd->reset_wavefronts = true; } else { @@ -2228,7 +2254,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm, } if (!dqm->dev->kfd->shared_resources.enable_mes) - retval = execute_queues_cpsch(dqm, filter, 0); + retval = execute_queues_cpsch(dqm, filter, 0, USE_DEFAULT_GRACE_PERIOD); if ((!dqm->is_hws_hang) && (retval || qpd->reset_wavefronts)) { pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev); @@ -2589,7 +2615,8 @@ int dqm_debugfs_hang_hws(struct device_queue_manager *dqm) return r; } dqm->active_runlist = true; - r = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0); + r = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, + 0, USE_DEFAULT_GRACE_PERIOD); dqm_unlock(dqm); return r; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index cd4383bb207f..d4dd3b4acbf0 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -37,6 +37,7 @@ #define KFD_MES_PROCESS_QUANTUM 100000 #define KFD_MES_GANG_QUANTUM 10000 +#define USE_DEFAULT_GRACE_PERIOD 0xffffffff struct device_process_node { struct qcm_process_device *qpd; @@ -259,6 +260,8 @@ struct device_queue_manager { /* used for GFX 9.4.3 only */ uint32_t current_logical_xcc_start; + + uint32_t wait_times; }; void device_queue_manager_init_cik( diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c index 2f54172e9175..401096c103b2 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c @@ -370,6 +370,38 @@ out: return retval; } +int pm_update_grace_period(struct packet_manager *pm, uint32_t grace_period) +{ + int retval = 0; + uint32_t *buffer, size; + + size = pm->pmf->set_grace_period_size; + + mutex_lock(&pm->lock); + + if (size) { + kq_acquire_packet_buffer(pm->priv_queue, + size / sizeof(uint32_t), + (unsigned int **)&buffer); + + if (!buffer) { + pr_err("Failed to allocate buffer on kernel queue\n"); + retval = -ENOMEM; + goto out; + } + + retval = pm->pmf->set_grace_period(pm, buffer, grace_period); + if (!retval) + kq_submit_packet(pm->priv_queue); + else + kq_rollback_packet(pm->priv_queue); + } + +out: + mutex_unlock(&pm->lock); + return retval; +} + int pm_send_unmap_queue(struct packet_manager *pm, enum kfd_unmap_queues_filter filter, uint32_t filter_param, bool reset) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c index 44cf3a5f6fdb..1fda6dcf84b1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c @@ -262,6 +262,41 @@ static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer, return 0; } +static int pm_set_grace_period_v9(struct packet_manager *pm, + uint32_t *buffer, + uint32_t grace_period) +{ + struct pm4_mec_write_data_mmio *packet; + uint32_t reg_offset = 0; + uint32_t reg_data = 0; + + pm->dqm->dev->kfd2kgd->build_grace_period_packet_info( + pm->dqm->dev->adev, + pm->dqm->wait_times, + grace_period, + ®_offset, + ®_data); + + if (grace_period == USE_DEFAULT_GRACE_PERIOD) + reg_data = pm->dqm->wait_times; + + packet = (struct pm4_mec_write_data_mmio *)buffer; + memset(buffer, 0, sizeof(struct pm4_mec_write_data_mmio)); + + packet->header.u32All = pm_build_pm4_header(IT_WRITE_DATA, + sizeof(struct pm4_mec_write_data_mmio)); + + packet->bitfields2.dst_sel = dst_sel___write_data__mem_mapped_register; + packet->bitfields2.addr_incr = + addr_incr___write_data__do_not_increment_address; + + packet->bitfields3.dst_mmreg_addr = reg_offset; + + packet->data = reg_data; + + return 0; +} + static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer, enum kfd_unmap_queues_filter filter, uint32_t filter_param, bool reset) @@ -345,6 +380,7 @@ const struct packet_manager_funcs kfd_v9_pm_funcs = { .set_resources = pm_set_resources_v9, .map_queues = pm_map_queues_v9, .unmap_queues = pm_unmap_queues_v9, + .set_grace_period = pm_set_grace_period_v9, .query_status = pm_query_status_v9, .release_mem = NULL, .map_process_size = sizeof(struct pm4_mes_map_process), @@ -352,6 +388,7 @@ const struct packet_manager_funcs kfd_v9_pm_funcs = { .set_resources_size = sizeof(struct pm4_mes_set_resources), .map_queues_size = sizeof(struct pm4_mes_map_queues), .unmap_queues_size = sizeof(struct pm4_mes_unmap_queues), + .set_grace_period_size = sizeof(struct pm4_mec_write_data_mmio), .query_status_size = sizeof(struct pm4_mes_query_status), .release_mem_size = 0, }; @@ -362,6 +399,7 @@ const struct packet_manager_funcs kfd_aldebaran_pm_funcs = { .set_resources = pm_set_resources_v9, .map_queues = pm_map_queues_v9, .unmap_queues = pm_unmap_queues_v9, + .set_grace_period = pm_set_grace_period_v9, .query_status = pm_query_status_v9, .release_mem = NULL, .map_process_size = sizeof(struct pm4_mes_map_process_aldebaran), @@ -369,6 +407,7 @@ const struct packet_manager_funcs kfd_aldebaran_pm_funcs = { .set_resources_size = sizeof(struct pm4_mes_set_resources), .map_queues_size = sizeof(struct pm4_mes_map_queues), .unmap_queues_size = sizeof(struct pm4_mes_unmap_queues), + .set_grace_period_size = sizeof(struct pm4_mec_write_data_mmio), .query_status_size = sizeof(struct pm4_mes_query_status), .release_mem_size = 0, }; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c index faf4772ed317..c1199d06d131 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_vi.c @@ -303,6 +303,7 @@ const struct packet_manager_funcs kfd_vi_pm_funcs = { .set_resources = pm_set_resources_vi, .map_queues = pm_map_queues_vi, .unmap_queues = pm_unmap_queues_vi, + .set_grace_period = NULL, .query_status = pm_query_status_vi, .release_mem = pm_release_mem_vi, .map_process_size = sizeof(struct pm4_mes_map_process), @@ -310,6 +311,7 @@ const struct packet_manager_funcs kfd_vi_pm_funcs = { .set_resources_size = sizeof(struct pm4_mes_set_resources), .map_queues_size = sizeof(struct pm4_mes_map_queues), .unmap_queues_size = sizeof(struct pm4_mes_unmap_queues), + .set_grace_period_size = 0, .query_status_size = sizeof(struct pm4_mes_query_status), .release_mem_size = sizeof(struct pm4_mec_release_mem) }; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h index 2ad708c64012..206f1960857f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h @@ -584,6 +584,71 @@ struct pm4_mec_release_mem { #endif +#ifndef PM4_MEC_WRITE_DATA_DEFINED +#define PM4_MEC_WRITE_DATA_DEFINED + +enum WRITE_DATA_dst_sel_enum { + dst_sel___write_data__mem_mapped_register = 0, + dst_sel___write_data__tc_l2 = 2, + dst_sel___write_data__gds = 3, + dst_sel___write_data__memory = 5, + dst_sel___write_data__memory_mapped_adc_persistent_state = 6, +}; + +enum WRITE_DATA_addr_incr_enum { + addr_incr___write_data__increment_address = 0, + addr_incr___write_data__do_not_increment_address = 1 +}; + +enum WRITE_DATA_wr_confirm_enum { + wr_confirm___write_data__do_not_wait_for_write_confirmation = 0, + wr_confirm___write_data__wait_for_write_confirmation = 1 +}; + +enum WRITE_DATA_cache_policy_enum { + cache_policy___write_data__lru = 0, + cache_policy___write_data__stream = 1 +}; + + +struct pm4_mec_write_data_mmio { + union { + union PM4_MES_TYPE_3_HEADER header; /*header */ + unsigned int ordinal1; + }; + + union { + struct { + unsigned int reserved1:8; + unsigned int dst_sel:4; + unsigned int reserved2:4; + unsigned int addr_incr:1; + unsigned int reserved3:2; + unsigned int resume_vf:1; + unsigned int wr_confirm:1; + unsigned int reserved4:4; + unsigned int cache_policy:2; + unsigned int reserved5:5; + } bitfields2; + unsigned int ordinal2; + }; + + union { + struct { + unsigned int dst_mmreg_addr:18; + unsigned int reserved6:14; + } bitfields3; + unsigned int ordinal3; + }; + + uint32_t reserved7; + + uint32_t data; + +}; + +#endif + enum { CACHE_FLUSH_AND_INV_TS_EVENT = 0x00000014 }; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 1b272f879b4c..4c912b7735b5 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -1350,6 +1350,8 @@ struct packet_manager_funcs { int (*unmap_queues)(struct packet_manager *pm, uint32_t *buffer, enum kfd_unmap_queues_filter mode, uint32_t filter_param, bool reset); + int (*set_grace_period)(struct packet_manager *pm, uint32_t *buffer, + uint32_t grace_period); int (*query_status)(struct packet_manager *pm, uint32_t *buffer, uint64_t fence_address, uint64_t fence_value); int (*release_mem)(uint64_t gpu_addr, uint32_t *buffer); @@ -1360,6 +1362,7 @@ struct packet_manager_funcs { int set_resources_size; int map_queues_size; int unmap_queues_size; + int set_grace_period_size; int query_status_size; int release_mem_size; }; @@ -1382,6 +1385,8 @@ int pm_send_unmap_queue(struct packet_manager *pm, void pm_release_ib(struct packet_manager *pm); +int pm_update_grace_period(struct packet_manager *pm, uint32_t grace_period); + /* Following PM funcs can be shared among VI and AI */ unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size); -- cgit From 97ae3c8cce96f3bebf883d0812cef5d3fdbe3e64 Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Mon, 4 Apr 2022 12:27:43 -0400 Subject: drm/amdkfd: prepare map process for single process debug devices Older HW only supports debugging on a single process because the SPI debug mode setting registers are device global. The HWS has supplied a single pinned VMID (0xf) for MAP_PROCESS for debug purposes. To pin the VMID, the KFD will remove the VMID from the HWS dynamic VMID allocation via SET_RESOUCES so that a debugged process will never migrate away from its pinned VMID. The KFD is responsible for reserving and releasing this pinned VMID accordingly whenever the debugger attaches and detaches respectively. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 93 ++++++++++++++++++++++ .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.h | 5 ++ drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c | 9 +++ drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h | 5 +- 4 files changed, 111 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 0b88a64e61fe..cfe5bd59070e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -1525,6 +1525,7 @@ static int initialize_cpsch(struct device_queue_manager *dqm) dqm->gws_queue_count = 0; dqm->active_runlist = false; INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception); + dqm->trap_debug_vmid = 0; init_sdma_bitmaps(dqm); @@ -2501,6 +2502,98 @@ static void kfd_process_hw_exception(struct work_struct *work) amdgpu_amdkfd_gpu_reset(dqm->dev->adev); } +int reserve_debug_trap_vmid(struct device_queue_manager *dqm, + struct qcm_process_device *qpd) +{ + int r; + int updated_vmid_mask; + + if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { + pr_err("Unsupported on sched_policy: %i\n", dqm->sched_policy); + return -EINVAL; + } + + dqm_lock(dqm); + + if (dqm->trap_debug_vmid != 0) { + pr_err("Trap debug id already reserved\n"); + r = -EBUSY; + goto out_unlock; + } + + r = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, + USE_DEFAULT_GRACE_PERIOD, false); + if (r) + goto out_unlock; + + updated_vmid_mask = dqm->dev->kfd->shared_resources.compute_vmid_bitmap; + updated_vmid_mask &= ~(1 << dqm->dev->vm_info.last_vmid_kfd); + + dqm->dev->kfd->shared_resources.compute_vmid_bitmap = updated_vmid_mask; + dqm->trap_debug_vmid = dqm->dev->vm_info.last_vmid_kfd; + r = set_sched_resources(dqm); + if (r) + goto out_unlock; + + r = map_queues_cpsch(dqm); + if (r) + goto out_unlock; + + pr_debug("Reserved VMID for trap debug: %i\n", dqm->trap_debug_vmid); + +out_unlock: + dqm_unlock(dqm); + return r; +} + +/* + * Releases vmid for the trap debugger + */ +int release_debug_trap_vmid(struct device_queue_manager *dqm, + struct qcm_process_device *qpd) +{ + int r; + int updated_vmid_mask; + uint32_t trap_debug_vmid; + + if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { + pr_err("Unsupported on sched_policy: %i\n", dqm->sched_policy); + return -EINVAL; + } + + dqm_lock(dqm); + trap_debug_vmid = dqm->trap_debug_vmid; + if (dqm->trap_debug_vmid == 0) { + pr_err("Trap debug id is not reserved\n"); + r = -EINVAL; + goto out_unlock; + } + + r = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, + USE_DEFAULT_GRACE_PERIOD, false); + if (r) + goto out_unlock; + + updated_vmid_mask = dqm->dev->kfd->shared_resources.compute_vmid_bitmap; + updated_vmid_mask |= (1 << dqm->dev->vm_info.last_vmid_kfd); + + dqm->dev->kfd->shared_resources.compute_vmid_bitmap = updated_vmid_mask; + dqm->trap_debug_vmid = 0; + r = set_sched_resources(dqm); + if (r) + goto out_unlock; + + r = map_queues_cpsch(dqm); + if (r) + goto out_unlock; + + pr_debug("Released VMID for trap debug: %i\n", trap_debug_vmid); + +out_unlock: + dqm_unlock(dqm); + return r; +} + #if defined(CONFIG_DEBUG_FS) static void seq_reg_dump(struct seq_file *m, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index d4dd3b4acbf0..bf7aa3f84182 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -250,6 +250,7 @@ struct device_queue_manager { struct kfd_mem_obj *fence_mem; bool active_runlist; int sched_policy; + uint32_t trap_debug_vmid; /* hw exception */ bool is_hws_hang; @@ -285,6 +286,10 @@ unsigned int get_queues_per_pipe(struct device_queue_manager *dqm); unsigned int get_pipes_per_mec(struct device_queue_manager *dqm); unsigned int get_num_sdma_queues(struct device_queue_manager *dqm); unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm); +int reserve_debug_trap_vmid(struct device_queue_manager *dqm, + struct qcm_process_device *qpd); +int release_debug_trap_vmid(struct device_queue_manager *dqm, + struct qcm_process_device *qpd); static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c index 1fda6dcf84b1..0fe73dbd28af 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c @@ -34,6 +34,9 @@ static int pm_map_process_v9(struct packet_manager *pm, { struct pm4_mes_map_process *packet; uint64_t vm_page_table_base_addr = qpd->page_table_base; + struct kfd_node *kfd = pm->dqm->dev; + struct kfd_process_device *pdd = + container_of(qpd, struct kfd_process_device, qpd); packet = (struct pm4_mes_map_process *)buffer; memset(buffer, 0, sizeof(struct pm4_mes_map_process)); @@ -49,6 +52,12 @@ static int pm_map_process_v9(struct packet_manager *pm, packet->bitfields14.sdma_enable = 1; packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count; + if (kfd->dqm->trap_debug_vmid && pdd->process->debug_trap_enabled && + pdd->process->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED) { + packet->bitfields2.debug_vmid = kfd->dqm->trap_debug_vmid; + packet->bitfields2.new_debug = 1; + } + packet->sh_mem_config = qpd->sh_mem_config; packet->sh_mem_bases = qpd->sh_mem_bases; if (qpd->tba_addr) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h index 206f1960857f..8b6b2bd5c148 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_pm4_headers_ai.h @@ -146,7 +146,10 @@ struct pm4_mes_map_process { union { struct { uint32_t pasid:16; - uint32_t reserved1:8; + uint32_t reserved1:2; + uint32_t debug_vmid:4; + uint32_t new_debug:1; + uint32_t reserved2:1; uint32_t diq_enable:1; uint32_t process_quantum:7; } bitfields2; -- cgit From 0de4ec9a03537bd2b189b5afbf83acd6b72b0258 Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Mon, 4 Apr 2022 13:38:11 -0400 Subject: drm/amdgpu: prepare map process for multi-process debug devices Unlike single process debug devices, multi-process debug devices allow debug mode setting per-VMID (non-device-global). Because the HWS manages PASID-VMID mapping, the new MAP_PROCESS API allows the KFD to forward the required SPI debug register write requests. To request a new debug mode setting change, the KFD must be able to preempt all queues then remap all queues with these new setting requests for MAP_PROCESS to take effect. Note that by default, trap enablement in non-debug mode must be disabled for performance reasons for multi-process debug devices due to setup overhead in FW. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_debug.h | 5 +++ .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 51 ++++++++++++++++++++++ .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.h | 3 ++ drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c | 14 ++++++ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 9 ++++ drivers/gpu/drm/amd/amdkfd/kfd_process.c | 5 +++ 6 files changed, 87 insertions(+) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h index a8abfe2a0a14..db6d72e7930f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h @@ -29,4 +29,9 @@ int kfd_dbg_trap_disable(struct kfd_process *target); int kfd_dbg_trap_enable(struct kfd_process *target, uint32_t fd, void __user *runtime_info, uint32_t *runtime_info_size); +static inline bool kfd_dbg_is_per_vmid_supported(struct kfd_node *dev) +{ + return KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2); +} + #endif diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index cfe5bd59070e..495c9238254e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -36,6 +36,7 @@ #include "kfd_kernel_queue.h" #include "amdgpu_amdkfd.h" #include "mes_api_def.h" +#include "kfd_debug.h" /* Size of the per-pipe EOP queue */ #define CIK_HPD_EOP_BYTES_LOG2 11 @@ -2594,6 +2595,56 @@ out_unlock: return r; } +int debug_lock_and_unmap(struct device_queue_manager *dqm) +{ + int r; + + if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { + pr_err("Unsupported on sched_policy: %i\n", dqm->sched_policy); + return -EINVAL; + } + + if (!kfd_dbg_is_per_vmid_supported(dqm->dev)) + return 0; + + dqm_lock(dqm); + + r = unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0, 0, false); + if (r) + dqm_unlock(dqm); + + return r; +} + +int debug_map_and_unlock(struct device_queue_manager *dqm) +{ + int r; + + if (dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { + pr_err("Unsupported on sched_policy: %i\n", dqm->sched_policy); + return -EINVAL; + } + + if (!kfd_dbg_is_per_vmid_supported(dqm->dev)) + return 0; + + r = map_queues_cpsch(dqm); + + dqm_unlock(dqm); + + return r; +} + +int debug_refresh_runlist(struct device_queue_manager *dqm) +{ + int r = debug_lock_and_unmap(dqm); + + if (r) + return r; + + return debug_map_and_unlock(dqm); +} + #if defined(CONFIG_DEBUG_FS) static void seq_reg_dump(struct seq_file *m, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index bf7aa3f84182..bb75d93712eb 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -290,6 +290,9 @@ int reserve_debug_trap_vmid(struct device_queue_manager *dqm, struct qcm_process_device *qpd); int release_debug_trap_vmid(struct device_queue_manager *dqm, struct qcm_process_device *qpd); +int debug_lock_and_unmap(struct device_queue_manager *dqm); +int debug_map_and_unlock(struct device_queue_manager *dqm); +int debug_refresh_runlist(struct device_queue_manager *dqm); static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c index 0fe73dbd28af..29a2d0499b67 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager_v9.c @@ -88,6 +88,10 @@ static int pm_map_process_aldebaran(struct packet_manager *pm, { struct pm4_mes_map_process_aldebaran *packet; uint64_t vm_page_table_base_addr = qpd->page_table_base; + struct kfd_dev *kfd = pm->dqm->dev->kfd; + struct kfd_process_device *pdd = + container_of(qpd, struct kfd_process_device, qpd); + int i; packet = (struct pm4_mes_map_process_aldebaran *)buffer; memset(buffer, 0, sizeof(struct pm4_mes_map_process_aldebaran)); @@ -102,6 +106,16 @@ static int pm_map_process_aldebaran(struct packet_manager *pm, packet->bitfields14.num_oac = qpd->num_oac; packet->bitfields14.sdma_enable = 1; packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count; + packet->spi_gdbg_per_vmid_cntl = pdd->spi_dbg_override | + pdd->spi_dbg_launch_mode; + + if (pdd->process->debug_trap_enabled) { + for (i = 0; i < kfd->device_info.num_of_watch_points; i++) + packet->tcp_watch_cntl[i] = pdd->watch_points[i]; + + packet->bitfields2.single_memops = + !!(pdd->process->dbg_flags & KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP); + } packet->sh_mem_config = qpd->sh_mem_config; packet->sh_mem_bases = qpd->sh_mem_bases; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 4c912b7735b5..8fca7175daab 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -816,6 +816,12 @@ struct kfd_process_device { uint64_t faults; uint64_t page_in; uint64_t page_out; + + /* Tracks debug per-vmid request settings */ + uint32_t spi_dbg_override; + uint32_t spi_dbg_launch_mode; + uint32_t watch_points[4]; + /* * If this process has been checkpointed before, then the user * application will use the original gpu_id on the @@ -952,6 +958,9 @@ struct kfd_process { bool xnack_enabled; + /* Tracks debug per-vmid request for debug flags */ + bool dbg_flags; + atomic_t poison; /* Queues are in paused stated because we are in the process of doing a CRIU checkpoint */ bool queues_paused; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index d75dac92775c..725d936b2cc7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -1612,6 +1612,11 @@ struct kfd_process_device *kfd_create_process_device_data(struct kfd_node *dev, } p->pdds[p->n_pdds++] = pdd; + if (kfd_dbg_is_per_vmid_supported(pdd->dev)) + pdd->spi_dbg_override = pdd->dev->kfd2kgd->disable_debug_trap( + pdd->dev->adev, + false, + 0); /* Init idr used for memory handle translation */ idr_init(&pdd->alloc_idr); -- cgit From 69a8c3ae2dea84a6d571e4c1aad306f630f3ccfd Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Thu, 1 Sep 2022 11:27:15 -0400 Subject: drm/amdkfd: apply trap workaround for gfx11 Due to a HW bug, waves in only half the shader arrays can enter trap. When starting a debug session, relocate all waves to the first shader array of each shader engine and mask off the 2nd shader array as unavailable. When ending a debug session, re-enable the 2nd shader array per shader engine. User CU masking per queue cannot be guaranteed to remain functional if requested during debugging (e.g. user cu mask requests only 2nd shader array as an available resource leading to zero HW resources available) nor can runtime be alerted of any of these changes during execution. Make user CU masking and debugging mutual exclusive with respect to availability. If the debugger tries to attach to a process with a user cu masked queue, return the runtime status as enabled but busy. If the debugger tries to attach and fails to reallocate queue waves to the first shader array of each shader engine, return the runtime status as enabled but with an error. In addition, like any other mutli-process debug supported devices, disable trap temporary setup per-process to avoid performance impact from setup overhead. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h | 2 + drivers/gpu/drm/amd/amdgpu/mes_v11_0.c | 7 +-- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 2 - drivers/gpu/drm/amd/amdkfd/kfd_debug.c | 57 ++++++++++++++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_debug.h | 3 +- .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 7 +++ drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c | 3 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c | 3 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c | 42 +++++++++++----- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 3 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c | 3 +- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 5 +- .../gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | 9 +++- drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 7 ++- 14 files changed, 122 insertions(+), 31 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h index d20df0cf0d88..b5f5eed2b5ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h @@ -219,6 +219,8 @@ struct mes_add_queue_input { uint32_t gws_size; uint64_t tba_addr; uint64_t tma_addr; + uint32_t trap_en; + uint32_t skip_process_ctx_clear; uint32_t is_kfd_process; uint32_t is_aql_queue; uint32_t queue_size; diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index 861910a6662d..c4e3cb8d44de 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -202,17 +202,14 @@ static int mes_v11_0_add_hw_queue(struct amdgpu_mes *mes, mes_add_queue_pkt.gws_size = input->gws_size; mes_add_queue_pkt.trap_handler_addr = input->tba_addr; mes_add_queue_pkt.tma_addr = input->tma_addr; + mes_add_queue_pkt.trap_en = input->trap_en; + mes_add_queue_pkt.skip_process_ctx_clear = input->skip_process_ctx_clear; mes_add_queue_pkt.is_kfd_process = input->is_kfd_process; /* For KFD, gds_size is re-used for queue size (needed in MES for AQL queues) */ mes_add_queue_pkt.is_aql_queue = input->is_aql_queue; mes_add_queue_pkt.gds_size = input->queue_size; - if (!(((adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 4) && - (adev->ip_versions[GC_HWIP][0] >= IP_VERSION(11, 0, 0)) && - (adev->ip_versions[GC_HWIP][0] <= IP_VERSION(11, 0, 3)))) - mes_add_queue_pkt.trap_en = 1; - /* For KFD, gds_size is re-used for queue size (needed in MES for AQL queues) */ mes_add_queue_pkt.is_aql_queue = input->is_aql_queue; mes_add_queue_pkt.gds_size = input->queue_size; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 826a99acb6fb..d4df424e4514 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -537,8 +537,6 @@ static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p, goto out; } - minfo.update_flag = UPDATE_FLAG_CU_MASK; - mutex_lock(&p->mutex); retval = pqm_update_mqd(&p->pqm, args->queue_id, &minfo); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c index 73b07b5f17f1..5e2ee2d1acc4 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c @@ -24,6 +24,57 @@ #include "kfd_device_queue_manager.h" #include +static int kfd_dbg_set_queue_workaround(struct queue *q, bool enable) +{ + struct mqd_update_info minfo = {0}; + int err; + + if (!q) + return 0; + + if (KFD_GC_VERSION(q->device) < IP_VERSION(11, 0, 0) || + KFD_GC_VERSION(q->device) >= IP_VERSION(12, 0, 0)) + return 0; + + if (enable && q->properties.is_user_cu_masked) + return -EBUSY; + + minfo.update_flag = enable ? UPDATE_FLAG_DBG_WA_ENABLE : UPDATE_FLAG_DBG_WA_DISABLE; + + q->properties.is_dbg_wa = enable; + err = q->device->dqm->ops.update_queue(q->device->dqm, q, &minfo); + if (err) + q->properties.is_dbg_wa = false; + + return err; +} + +static int kfd_dbg_set_workaround(struct kfd_process *target, bool enable) +{ + struct process_queue_manager *pqm = &target->pqm; + struct process_queue_node *pqn; + int r = 0; + + list_for_each_entry(pqn, &pqm->queues, process_queue_list) { + r = kfd_dbg_set_queue_workaround(pqn->q, enable); + if (enable && r) + goto unwind; + } + + return 0; + +unwind: + list_for_each_entry(pqn, &pqm->queues, process_queue_list) + kfd_dbg_set_queue_workaround(pqn->q, false); + + if (enable) + target->runtime_info.runtime_state = r == -EBUSY ? + DEBUG_RUNTIME_STATE_ENABLED_BUSY : + DEBUG_RUNTIME_STATE_ENABLED_ERROR; + + return r; +} + static int kfd_dbg_set_mes_debug_mode(struct kfd_process_device *pdd) { uint32_t spi_dbg_cntl = pdd->spi_dbg_override | pdd->spi_dbg_launch_mode; @@ -77,6 +128,8 @@ static void kfd_dbg_trap_deactivate(struct kfd_process *target, bool unwind, int else kfd_dbg_set_mes_debug_mode(pdd); } + + kfd_dbg_set_workaround(target, false); } int kfd_dbg_trap_disable(struct kfd_process *target) @@ -111,6 +164,10 @@ static int kfd_dbg_trap_activate(struct kfd_process *target) { int i, r = 0; + r = kfd_dbg_set_workaround(target, true); + if (r) + return r; + for (i = 0; i < target->n_pdds; i++) { struct kfd_process_device *pdd = target->pdds[i]; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h index 17481f824647..3e56225f6ef6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h @@ -31,7 +31,8 @@ int kfd_dbg_trap_enable(struct kfd_process *target, uint32_t fd, uint32_t *runtime_info_size); static inline bool kfd_dbg_is_per_vmid_supported(struct kfd_node *dev) { - return KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2); + return KFD_GC_VERSION(dev) == IP_VERSION(9, 4, 2) || + KFD_GC_VERSION(dev) >= IP_VERSION(11, 0, 0); } /* diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 495c9238254e..44d87943e40a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -226,6 +226,10 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q, queue_input.paging = false; queue_input.tba_addr = qpd->tba_addr; queue_input.tma_addr = qpd->tma_addr; + queue_input.trap_en = KFD_GC_VERSION(q->device) < IP_VERSION(11, 0, 0) || + KFD_GC_VERSION(q->device) >= IP_VERSION(12, 0, 0) || + q->properties.is_dbg_wa; + queue_input.skip_process_ctx_clear = qpd->pqm->process->debug_trap_enabled; queue_type = convert_to_mes_queue_type(q->properties.type); if (queue_type < 0) { @@ -1716,6 +1720,9 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q, * updates the is_evicted flag but is a no-op otherwise. */ q->properties.is_evicted = !!qpd->evicted; + q->properties.is_dbg_wa = qpd->pqm->process->debug_trap_enabled && + KFD_GC_VERSION(q->device) >= IP_VERSION(11, 0, 0) && + KFD_GC_VERSION(q->device) < IP_VERSION(12, 0, 0); if (qd) mqd_mgr->restore_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj, &q->gart_mqd_addr, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c index eb11940bec34..65c9f01a1f86 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c @@ -48,8 +48,7 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd, struct cik_mqd *m; uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */ - if (!minfo || (minfo->update_flag != UPDATE_FLAG_CU_MASK) || - !minfo->cu_mask.ptr) + if (!minfo || !minfo->cu_mask.ptr) return; mqd_symmetrically_map_cu_mask(mm, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c index eaaa4f4ddaaa..a0ac4f2fe6b5 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c @@ -48,8 +48,7 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd, struct v10_compute_mqd *m; uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */ - if (!minfo || (minfo->update_flag != UPDATE_FLAG_CU_MASK) || - !minfo->cu_mask.ptr) + if (!minfo || !minfo->cu_mask.ptr) return; mqd_symmetrically_map_cu_mask(mm, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c index 3a48bbc589fe..9a9b4e853516 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c @@ -46,15 +46,33 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd, { struct v11_compute_mqd *m; uint32_t se_mask[KFD_MAX_NUM_SE] = {0}; + bool has_wa_flag = minfo && (minfo->update_flag & (UPDATE_FLAG_DBG_WA_ENABLE | + UPDATE_FLAG_DBG_WA_DISABLE)); - if (!minfo || (minfo->update_flag != UPDATE_FLAG_CU_MASK) || - !minfo->cu_mask.ptr) + if (!minfo || !(has_wa_flag || minfo->cu_mask.ptr)) return; + m = get_mqd(mqd); + + if (has_wa_flag) { + uint32_t wa_mask = minfo->update_flag == UPDATE_FLAG_DBG_WA_ENABLE ? + 0xffff : 0xffffffff; + + m->compute_static_thread_mgmt_se0 = wa_mask; + m->compute_static_thread_mgmt_se1 = wa_mask; + m->compute_static_thread_mgmt_se2 = wa_mask; + m->compute_static_thread_mgmt_se3 = wa_mask; + m->compute_static_thread_mgmt_se4 = wa_mask; + m->compute_static_thread_mgmt_se5 = wa_mask; + m->compute_static_thread_mgmt_se6 = wa_mask; + m->compute_static_thread_mgmt_se7 = wa_mask; + + return; + } + mqd_symmetrically_map_cu_mask(mm, minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask); - m = get_mqd(mqd); m->compute_static_thread_mgmt_se0 = se_mask[0]; m->compute_static_thread_mgmt_se1 = se_mask[1]; m->compute_static_thread_mgmt_se2 = se_mask[2]; @@ -109,6 +127,7 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, uint64_t addr; struct v11_compute_mqd *m; int size; + uint32_t wa_mask = q->is_dbg_wa ? 0xffff : 0xffffffff; m = (struct v11_compute_mqd *) mqd_mem_obj->cpu_ptr; addr = mqd_mem_obj->gpu_addr; @@ -122,14 +141,15 @@ static void init_mqd(struct mqd_manager *mm, void **mqd, m->header = 0xC0310800; m->compute_pipelinestat_enable = 1; - m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF; - m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF; - m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF; - m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF; - m->compute_static_thread_mgmt_se4 = 0xFFFFFFFF; - m->compute_static_thread_mgmt_se5 = 0xFFFFFFFF; - m->compute_static_thread_mgmt_se6 = 0xFFFFFFFF; - m->compute_static_thread_mgmt_se7 = 0xFFFFFFFF; + + m->compute_static_thread_mgmt_se0 = wa_mask; + m->compute_static_thread_mgmt_se1 = wa_mask; + m->compute_static_thread_mgmt_se2 = wa_mask; + m->compute_static_thread_mgmt_se3 = wa_mask; + m->compute_static_thread_mgmt_se4 = wa_mask; + m->compute_static_thread_mgmt_se5 = wa_mask; + m->compute_static_thread_mgmt_se6 = wa_mask; + m->compute_static_thread_mgmt_se7 = wa_mask; m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK | 0x55 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index b7c95158d4a0..5b87c244e909 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -65,8 +65,7 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd, struct v9_mqd *m; uint32_t se_mask[KFD_MAX_NUM_SE] = {0}; - if (!minfo || (minfo->update_flag != UPDATE_FLAG_CU_MASK) || - !minfo->cu_mask.ptr) + if (!minfo || !minfo->cu_mask.ptr) return; mqd_symmetrically_map_cu_mask(mm, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c index fe69492b1bb3..d1e962da51dd 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c @@ -51,8 +51,7 @@ static void update_cu_mask(struct mqd_manager *mm, void *mqd, struct vi_mqd *m; uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */ - if (!minfo || (minfo->update_flag != UPDATE_FLAG_CU_MASK) || - !minfo->cu_mask.ptr) + if (!minfo || !minfo->cu_mask.ptr) return; mqd_symmetrically_map_cu_mask(mm, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 8fca7175daab..f0a45d184c8f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -513,6 +513,8 @@ struct queue_properties { bool is_active; bool is_gws; uint32_t pm4_target_xcc; + bool is_dbg_wa; + bool is_user_cu_masked; /* Not relevant for user mode queues in cp scheduling */ unsigned int vmid; /* Relevant only for sdma queues*/ @@ -535,7 +537,8 @@ struct queue_properties { !(q).is_evicted) enum mqd_update_flag { - UPDATE_FLAG_CU_MASK = 0, + UPDATE_FLAG_DBG_WA_ENABLE = 1, + UPDATE_FLAG_DBG_WA_DISABLE = 2, }; struct mqd_update_info { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index b100933340d2..43d432b5c5bc 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -506,8 +506,12 @@ int pqm_update_mqd(struct process_queue_manager *pqm, return -EFAULT; } + /* CUs are masked for debugger requirements so deny user mask */ + if (pqn->q->properties.is_dbg_wa && minfo && minfo->cu_mask.ptr) + return -EBUSY; + /* ASICs that have WGPs must enforce pairwise enabled mask checks. */ - if (minfo && minfo->update_flag == UPDATE_FLAG_CU_MASK && minfo->cu_mask.ptr && + if (minfo && minfo->cu_mask.ptr && KFD_GC_VERSION(pqn->q->device) >= IP_VERSION(10, 0, 0)) { int i; @@ -526,6 +530,9 @@ int pqm_update_mqd(struct process_queue_manager *pqm, if (retval != 0) return retval; + if (minfo && minfo->cu_mask.ptr) + pqn->q->properties.is_user_cu_masked = true; + return 0; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index 3def25b2bdbb..faa7939f35bd 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -1863,10 +1863,13 @@ static void kfd_topology_set_dbg_firmware_support(struct kfd_topology_device *de { bool firmware_supported = true; + /* + * FIXME: GFX11 FW currently not sufficient to deal with CWSR WA. + * Updated FW with API changes coming soon. + */ if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(11, 0, 0) && KFD_GC_VERSION(dev->gpu) < IP_VERSION(12, 0, 0)) { - firmware_supported = - (dev->gpu->adev->mes.sched_version & AMDGPU_MES_VERSION_MASK) >= 9; + firmware_supported = false; goto out; } -- cgit From a70a93fa568b4f05aba548dadb673703eccf5480 Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Thu, 5 May 2022 16:15:37 -0400 Subject: drm/amdkfd: add debug suspend and resume process queues operation In order to inspect waves from the saved context at any point during a debug session, the debugger must be able to preempt queues to trigger context save by suspending them. On queue suspend, the KFD will copy the context save header information so that the debugger can correctly crawl the appropriate size of the saved context. The debugger must then also be allowed to resume suspended queues. A queue that is newly created cannot be suspended because queue ids are recycled after destruction so the debugger needs to know that this has occurred. Query functions will be later added that will clear a given queue of its new queue status. A queue cannot be destroyed while it is suspended to preserve its saved context during debugger inspection. Have queue destruction block while a queue is suspended and unblocked when it is resumed. Likewise, if a queue is about to be destroyed, it cannot be suspended. Return the number of queues successfully suspended or resumed along with a per queue status array where the upper bits per queue status show that the request was invalid (new/destroyed queue suspend request, missing queue) or an error occurred (HWS in a fatal state so it can't suspend or resume queues). Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 5 + drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 1 + drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 11 + drivers/gpu/drm/amd/amdkfd/kfd_debug.c | 7 + .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 447 ++++++++++++++++++++- .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.h | 10 + drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c | 10 + drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c | 15 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 14 +- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 5 +- .../gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | 1 + 11 files changed, 512 insertions(+), 14 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 98cd52bb005f..b4fcad0e62f7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -772,6 +772,11 @@ bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev) return adev->have_atomics_support; } +void amdgpu_amdkfd_debug_mem_fence(struct amdgpu_device *adev) +{ + amdgpu_device_flush_hdp(adev, NULL); +} + void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, bool reset) { amdgpu_umc_poison_handler(adev, reset); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index dd740e64e6e1..2d0406bff84e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -322,6 +322,7 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev, uint64_t *mmap_offset); int amdgpu_amdkfd_gpuvm_export_dmabuf(struct kgd_mem *mem, struct dma_buf **dmabuf); +void amdgpu_amdkfd_debug_mem_fence(struct amdgpu_device *adev); int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev, struct tile_config *config); void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index a6570b124b2b..1fae97df7a1e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -410,6 +410,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, pr_debug("Write ptr address == 0x%016llX\n", args->write_pointer_address); + kfd_dbg_ev_raise(KFD_EC_MASK(EC_QUEUE_NEW), p, dev, queue_id, false, NULL, 0); return 0; err_create_queue: @@ -2996,7 +2997,17 @@ static int kfd_ioctl_set_debug_trap(struct file *filep, struct kfd_process *p, v args->launch_mode.launch_mode); break; case KFD_IOC_DBG_TRAP_SUSPEND_QUEUES: + r = suspend_queues(target, + args->suspend_queues.num_queues, + args->suspend_queues.grace_period, + args->suspend_queues.exception_mask, + (uint32_t *)args->suspend_queues.queue_array_ptr); + + break; case KFD_IOC_DBG_TRAP_RESUME_QUEUES: + r = resume_queues(target, args->resume_queues.num_queues, + (uint32_t *)args->resume_queues.queue_array_ptr); + break; case KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH: case KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH: case KFD_IOC_DBG_TRAP_SET_FLAGS: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c index 53c3418562d4..f4d3dfb35cb3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c @@ -339,6 +339,13 @@ void kfd_dbg_trap_deactivate(struct kfd_process *target, bool unwind, int unwind } kfd_dbg_set_workaround(target, false); + + if (!unwind) { + int resume_count = resume_queues(target, 0, NULL); + + if (resume_count) + pr_debug("Resumed %d queues\n", resume_count); + } } static void kfd_dbg_clean_exception_status(struct kfd_process *target) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 44d87943e40a..bc9e81293165 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -952,6 +952,92 @@ out_unlock: return retval; } +/* suspend_single_queue does not lock the dqm like the + * evict_process_queues_cpsch or evict_process_queues_nocpsch. You should + * lock the dqm before calling, and unlock after calling. + * + * The reason we don't lock the dqm is because this function may be + * called on multiple queues in a loop, so rather than locking/unlocking + * multiple times, we will just keep the dqm locked for all of the calls. + */ +static int suspend_single_queue(struct device_queue_manager *dqm, + struct kfd_process_device *pdd, + struct queue *q) +{ + bool is_new; + + if (q->properties.is_suspended) + return 0; + + pr_debug("Suspending PASID %u queue [%i]\n", + pdd->process->pasid, + q->properties.queue_id); + + is_new = q->properties.exception_status & KFD_EC_MASK(EC_QUEUE_NEW); + + if (is_new || q->properties.is_being_destroyed) { + pr_debug("Suspend: skip %s queue id %i\n", + is_new ? "new" : "destroyed", + q->properties.queue_id); + return -EBUSY; + } + + q->properties.is_suspended = true; + if (q->properties.is_active) { + if (dqm->dev->kfd->shared_resources.enable_mes) { + int r = remove_queue_mes(dqm, q, &pdd->qpd); + + if (r) + return r; + } + + decrement_queue_count(dqm, &pdd->qpd, q); + q->properties.is_active = false; + } + + return 0; +} + +/* resume_single_queue does not lock the dqm like the functions + * restore_process_queues_cpsch or restore_process_queues_nocpsch. You should + * lock the dqm before calling, and unlock after calling. + * + * The reason we don't lock the dqm is because this function may be + * called on multiple queues in a loop, so rather than locking/unlocking + * multiple times, we will just keep the dqm locked for all of the calls. + */ +static int resume_single_queue(struct device_queue_manager *dqm, + struct qcm_process_device *qpd, + struct queue *q) +{ + struct kfd_process_device *pdd; + + if (!q->properties.is_suspended) + return 0; + + pdd = qpd_to_pdd(qpd); + + pr_debug("Restoring from suspend PASID %u queue [%i]\n", + pdd->process->pasid, + q->properties.queue_id); + + q->properties.is_suspended = false; + + if (QUEUE_IS_ACTIVE(q->properties)) { + if (dqm->dev->kfd->shared_resources.enable_mes) { + int r = add_queue_mes(dqm, q, &pdd->qpd); + + if (r) + return r; + } + + q->properties.is_active = true; + increment_queue_count(dqm, qpd, q); + } + + return 0; +} + static int evict_process_queues_nocpsch(struct device_queue_manager *dqm, struct qcm_process_device *qpd) { @@ -1926,6 +2012,31 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm, return map_queues_cpsch(dqm); } +static int wait_on_destroy_queue(struct device_queue_manager *dqm, + struct queue *q) +{ + struct kfd_process_device *pdd = kfd_get_process_device_data(q->device, + q->process); + int ret = 0; + + if (pdd->qpd.is_debug) + return ret; + + q->properties.is_being_destroyed = true; + + if (pdd->process->debug_trap_enabled && q->properties.is_suspended) { + dqm_unlock(dqm); + mutex_unlock(&q->process->mutex); + ret = wait_event_interruptible(dqm->destroy_wait, + !q->properties.is_suspended); + + mutex_lock(&q->process->mutex); + dqm_lock(dqm); + } + + return ret; +} + static int destroy_queue_cpsch(struct device_queue_manager *dqm, struct qcm_process_device *qpd, struct queue *q) @@ -1945,11 +2056,16 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm, q->properties.queue_id); } - retval = 0; - /* remove queue from list to prevent rescheduling after preemption */ dqm_lock(dqm); + retval = wait_on_destroy_queue(dqm, q); + + if (retval) { + dqm_unlock(dqm); + return retval; + } + if (qpd->is_debug) { /* * error, currently we do not allow to destroy a queue @@ -1996,7 +2112,14 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm, dqm_unlock(dqm); - /* Do free_mqd after dqm_unlock(dqm) to avoid circular locking */ + /* + * Do free_mqd and raise delete event after dqm_unlock(dqm) to avoid + * circular locking + */ + kfd_dbg_ev_raise(KFD_EC_MASK(EC_DEVICE_QUEUE_DELETE), + qpd->pqm->process, q->device, + -1, false, NULL, 0); + mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj); return retval; @@ -2461,8 +2584,10 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev) goto out_free; } - if (!dqm->ops.initialize(dqm)) + if (!dqm->ops.initialize(dqm)) { + init_waitqueue_head(&dqm->destroy_wait); return dqm; + } out_free: kfree(dqm); @@ -2602,6 +2727,320 @@ out_unlock: return r; } +#define QUEUE_NOT_FOUND -1 +/* invalidate queue operation in array */ +static void q_array_invalidate(uint32_t num_queues, uint32_t *queue_ids) +{ + int i; + + for (i = 0; i < num_queues; i++) + queue_ids[i] |= KFD_DBG_QUEUE_INVALID_MASK; +} + +/* find queue index in array */ +static int q_array_get_index(unsigned int queue_id, + uint32_t num_queues, + uint32_t *queue_ids) +{ + int i; + + for (i = 0; i < num_queues; i++) + if (queue_id == (queue_ids[i] & ~KFD_DBG_QUEUE_INVALID_MASK)) + return i; + + return QUEUE_NOT_FOUND; +} + +struct copy_context_work_handler_workarea { + struct work_struct copy_context_work; + struct kfd_process *p; +}; + +static void copy_context_work_handler (struct work_struct *work) +{ + struct copy_context_work_handler_workarea *workarea; + struct mqd_manager *mqd_mgr; + struct queue *q; + struct mm_struct *mm; + struct kfd_process *p; + uint32_t tmp_ctl_stack_used_size, tmp_save_area_used_size; + int i; + + workarea = container_of(work, + struct copy_context_work_handler_workarea, + copy_context_work); + + p = workarea->p; + mm = get_task_mm(p->lead_thread); + + if (!mm) + return; + + kthread_use_mm(mm); + for (i = 0; i < p->n_pdds; i++) { + struct kfd_process_device *pdd = p->pdds[i]; + struct device_queue_manager *dqm = pdd->dev->dqm; + struct qcm_process_device *qpd = &pdd->qpd; + + list_for_each_entry(q, &qpd->queues_list, list) { + mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP]; + + /* We ignore the return value from get_wave_state + * because + * i) right now, it always returns 0, and + * ii) if we hit an error, we would continue to the + * next queue anyway. + */ + mqd_mgr->get_wave_state(mqd_mgr, + q->mqd, + &q->properties, + (void __user *) q->properties.ctx_save_restore_area_address, + &tmp_ctl_stack_used_size, + &tmp_save_area_used_size); + } + } + kthread_unuse_mm(mm); + mmput(mm); +} + +static uint32_t *get_queue_ids(uint32_t num_queues, uint32_t *usr_queue_id_array) +{ + size_t array_size = num_queues * sizeof(uint32_t); + uint32_t *queue_ids = NULL; + + if (!usr_queue_id_array) + return NULL; + + queue_ids = kzalloc(array_size, GFP_KERNEL); + if (!queue_ids) + return ERR_PTR(-ENOMEM); + + if (copy_from_user(queue_ids, usr_queue_id_array, array_size)) + return ERR_PTR(-EFAULT); + + return queue_ids; +} + +int resume_queues(struct kfd_process *p, + uint32_t num_queues, + uint32_t *usr_queue_id_array) +{ + uint32_t *queue_ids = NULL; + int total_resumed = 0; + int i; + + if (usr_queue_id_array) { + queue_ids = get_queue_ids(num_queues, usr_queue_id_array); + + if (IS_ERR(queue_ids)) + return PTR_ERR(queue_ids); + + /* mask all queues as invalid. unmask per successful request */ + q_array_invalidate(num_queues, queue_ids); + } + + for (i = 0; i < p->n_pdds; i++) { + struct kfd_process_device *pdd = p->pdds[i]; + struct device_queue_manager *dqm = pdd->dev->dqm; + struct qcm_process_device *qpd = &pdd->qpd; + struct queue *q; + int r, per_device_resumed = 0; + + dqm_lock(dqm); + + /* unmask queues that resume or already resumed as valid */ + list_for_each_entry(q, &qpd->queues_list, list) { + int q_idx = QUEUE_NOT_FOUND; + + if (queue_ids) + q_idx = q_array_get_index( + q->properties.queue_id, + num_queues, + queue_ids); + + if (!queue_ids || q_idx != QUEUE_NOT_FOUND) { + int err = resume_single_queue(dqm, &pdd->qpd, q); + + if (queue_ids) { + if (!err) { + queue_ids[q_idx] &= + ~KFD_DBG_QUEUE_INVALID_MASK; + } else { + queue_ids[q_idx] |= + KFD_DBG_QUEUE_ERROR_MASK; + break; + } + } + + if (dqm->dev->kfd->shared_resources.enable_mes) { + wake_up_all(&dqm->destroy_wait); + if (!err) + total_resumed++; + } else { + per_device_resumed++; + } + } + } + + if (!per_device_resumed) { + dqm_unlock(dqm); + continue; + } + + r = execute_queues_cpsch(dqm, + KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, + 0, + USE_DEFAULT_GRACE_PERIOD); + if (r) { + pr_err("Failed to resume process queues\n"); + if (queue_ids) { + list_for_each_entry(q, &qpd->queues_list, list) { + int q_idx = q_array_get_index( + q->properties.queue_id, + num_queues, + queue_ids); + + /* mask queue as error on resume fail */ + if (q_idx != QUEUE_NOT_FOUND) + queue_ids[q_idx] |= + KFD_DBG_QUEUE_ERROR_MASK; + } + } + } else { + wake_up_all(&dqm->destroy_wait); + total_resumed += per_device_resumed; + } + + dqm_unlock(dqm); + } + + if (queue_ids) { + if (copy_to_user((void __user *)usr_queue_id_array, queue_ids, + num_queues * sizeof(uint32_t))) + pr_err("copy_to_user failed on queue resume\n"); + + kfree(queue_ids); + } + + return total_resumed; +} + +int suspend_queues(struct kfd_process *p, + uint32_t num_queues, + uint32_t grace_period, + uint64_t exception_clear_mask, + uint32_t *usr_queue_id_array) +{ + uint32_t *queue_ids = get_queue_ids(num_queues, usr_queue_id_array); + int total_suspended = 0; + int i; + + if (IS_ERR(queue_ids)) + return PTR_ERR(queue_ids); + + /* mask all queues as invalid. umask on successful request */ + q_array_invalidate(num_queues, queue_ids); + + for (i = 0; i < p->n_pdds; i++) { + struct kfd_process_device *pdd = p->pdds[i]; + struct device_queue_manager *dqm = pdd->dev->dqm; + struct qcm_process_device *qpd = &pdd->qpd; + struct queue *q; + int r, per_device_suspended = 0; + + mutex_lock(&p->event_mutex); + dqm_lock(dqm); + + /* unmask queues that suspend or already suspended */ + list_for_each_entry(q, &qpd->queues_list, list) { + int q_idx = q_array_get_index(q->properties.queue_id, + num_queues, + queue_ids); + + if (q_idx != QUEUE_NOT_FOUND) { + int err = suspend_single_queue(dqm, pdd, q); + bool is_mes = dqm->dev->kfd->shared_resources.enable_mes; + + if (!err) { + queue_ids[q_idx] &= ~KFD_DBG_QUEUE_INVALID_MASK; + if (exception_clear_mask && is_mes) + q->properties.exception_status &= + ~exception_clear_mask; + + if (is_mes) + total_suspended++; + else + per_device_suspended++; + } else if (err != -EBUSY) { + r = err; + queue_ids[q_idx] |= KFD_DBG_QUEUE_ERROR_MASK; + break; + } + } + } + + if (!per_device_suspended) { + dqm_unlock(dqm); + mutex_unlock(&p->event_mutex); + if (total_suspended) + amdgpu_amdkfd_debug_mem_fence(dqm->dev->adev); + continue; + } + + r = execute_queues_cpsch(dqm, + KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, + grace_period); + + if (r) + pr_err("Failed to suspend process queues.\n"); + else + total_suspended += per_device_suspended; + + list_for_each_entry(q, &qpd->queues_list, list) { + int q_idx = q_array_get_index(q->properties.queue_id, + num_queues, queue_ids); + + if (q_idx == QUEUE_NOT_FOUND) + continue; + + /* mask queue as error on suspend fail */ + if (r) + queue_ids[q_idx] |= KFD_DBG_QUEUE_ERROR_MASK; + else if (exception_clear_mask) + q->properties.exception_status &= + ~exception_clear_mask; + } + + dqm_unlock(dqm); + mutex_unlock(&p->event_mutex); + amdgpu_device_flush_hdp(dqm->dev->adev, NULL); + } + + if (total_suspended) { + struct copy_context_work_handler_workarea copy_context_worker; + + INIT_WORK_ONSTACK( + ©_context_worker.copy_context_work, + copy_context_work_handler); + + copy_context_worker.p = p; + + schedule_work(©_context_worker.copy_context_work); + + + flush_work(©_context_worker.copy_context_work); + destroy_work_on_stack(©_context_worker.copy_context_work); + } + + if (copy_to_user((void __user *)usr_queue_id_array, queue_ids, + num_queues * sizeof(uint32_t))) + pr_err("copy_to_user failed on queue suspend\n"); + + kfree(queue_ids); + + return total_suspended; +} + int debug_lock_and_unmap(struct device_queue_manager *dqm) { int r; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index bb75d93712eb..d4e6dbffe8c2 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -263,6 +263,8 @@ struct device_queue_manager { uint32_t current_logical_xcc_start; uint32_t wait_times; + + wait_queue_head_t destroy_wait; }; void device_queue_manager_init_cik( @@ -290,6 +292,14 @@ int reserve_debug_trap_vmid(struct device_queue_manager *dqm, struct qcm_process_device *qpd); int release_debug_trap_vmid(struct device_queue_manager *dqm, struct qcm_process_device *qpd); +int suspend_queues(struct kfd_process *p, + uint32_t num_queues, + uint32_t grace_period, + uint64_t exception_clear_mask, + uint32_t *usr_queue_id_array); +int resume_queues(struct kfd_process *p, + uint32_t num_queues, + uint32_t *usr_queue_id_array); int debug_lock_and_unmap(struct device_queue_manager *dqm); int debug_map_and_unlock(struct device_queue_manager *dqm); int debug_refresh_runlist(struct device_queue_manager *dqm); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c index a0ac4f2fe6b5..94c0fc2e57b7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c @@ -237,6 +237,7 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd, u32 *save_area_used_size) { struct v10_compute_mqd *m; + struct kfd_context_save_area_header header; m = get_mqd(mqd); @@ -255,6 +256,15 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd, * accessible to user mode */ + header.wave_state.control_stack_size = *ctl_stack_used_size; + header.wave_state.wave_state_size = *save_area_used_size; + + header.wave_state.wave_state_offset = m->cp_hqd_wg_state_offset; + header.wave_state.control_stack_offset = m->cp_hqd_cntl_stack_offset; + + if (copy_to_user(ctl_stack, &header, sizeof(header.wave_state))) + return -EFAULT; + return 0; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c index 9a9b4e853516..31fec5e70d13 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c @@ -291,7 +291,7 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd, u32 *save_area_used_size) { struct v11_compute_mqd *m; - /*struct mqd_user_context_save_area_header header;*/ + struct kfd_context_save_area_header header; m = get_mqd(mqd); @@ -309,16 +309,15 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd, * it's part of the context save area that is already * accessible to user mode */ -/* - header.control_stack_size = *ctl_stack_used_size; - header.wave_state_size = *save_area_used_size; + header.wave_state.control_stack_size = *ctl_stack_used_size; + header.wave_state.wave_state_size = *save_area_used_size; - header.wave_state_offset = m->cp_hqd_wg_state_offset; - header.control_stack_offset = m->cp_hqd_cntl_stack_offset; + header.wave_state.wave_state_offset = m->cp_hqd_wg_state_offset; + header.wave_state.control_stack_offset = m->cp_hqd_cntl_stack_offset; - if (copy_to_user(ctl_stack, &header, sizeof(header))) + if (copy_to_user(ctl_stack, &header, sizeof(header.wave_state))) return -EFAULT; -*/ + return 0; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index 5b87c244e909..601bb9f68048 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -311,6 +311,7 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd, u32 *save_area_used_size) { struct v9_mqd *m; + struct kfd_context_save_area_header header; /* Control stack is located one page after MQD. */ void *mqd_ctl_stack = (void *)((uintptr_t)mqd + PAGE_SIZE); @@ -322,7 +323,18 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd, *save_area_used_size = m->cp_hqd_wg_state_offset - m->cp_hqd_cntl_stack_size; - if (copy_to_user(ctl_stack, mqd_ctl_stack, m->cp_hqd_cntl_stack_size)) + header.wave_state.control_stack_size = *ctl_stack_used_size; + header.wave_state.wave_state_size = *save_area_used_size; + + header.wave_state.wave_state_offset = m->cp_hqd_wg_state_offset; + header.wave_state.control_stack_offset = m->cp_hqd_cntl_stack_offset; + + if (copy_to_user(ctl_stack, &header, sizeof(header.wave_state))) + return -EFAULT; + + if (copy_to_user(ctl_stack + m->cp_hqd_cntl_stack_offset, + mqd_ctl_stack + m->cp_hqd_cntl_stack_offset, + *ctl_stack_used_size)) return -EFAULT; return 0; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index cd2d56e5cdf0..05da43bf233a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -510,6 +510,8 @@ struct queue_properties { uint32_t doorbell_off; bool is_interop; bool is_evicted; + bool is_suspended; + bool is_being_destroyed; bool is_active; bool is_gws; uint32_t pm4_target_xcc; @@ -535,7 +537,8 @@ struct queue_properties { #define QUEUE_IS_ACTIVE(q) ((q).queue_size > 0 && \ (q).queue_address != 0 && \ (q).queue_percent > 0 && \ - !(q).is_evicted) + !(q).is_evicted && \ + !(q).is_suspended) enum mqd_update_flag { UPDATE_FLAG_DBG_WA_ENABLE = 1, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 70852a200d8f..01ccab607a69 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -187,6 +187,7 @@ static int init_user_queue(struct process_queue_manager *pqm, /* Doorbell initialized in user space*/ q_properties->doorbell_ptr = NULL; + q_properties->exception_status = KFD_EC_MASK(EC_QUEUE_NEW); /* let DQM handle it*/ q_properties->vmid = 0; -- cgit From b17bd5dbf64677682a3bca249c64521d5eabcb38 Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Tue, 10 May 2022 11:15:29 -0400 Subject: drm/amdkfd: add debug queue snapshot operation Allow the debugger to get a snapshot of a specified number of queues containing various queue property information that is copied to the debugger. Since the debugger doesn't know how many queues exist at any given time, allow the debugger to pass the requested number of snapshots as 0 to get the actual number of potential snapshots to use for a subsequent snapshot request for actual information. To prevent future ABI breakage, pass in the requested entry_size. The KFD will return it's own entry_size in case the debugger still wants log the information in a core dump on sizing failure. Also allow the debugger to clear exceptions when doing a snapshot. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 6 ++++ .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 36 +++++++++++++++++++ .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.h | 3 ++ drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 5 +++ .../gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | 40 ++++++++++++++++++++++ 5 files changed, 90 insertions(+) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index b7ee79b5220a..24066756e478 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -3053,6 +3053,12 @@ static int kfd_ioctl_set_debug_trap(struct file *filep, struct kfd_process *p, v &args->query_exception_info.info_size); break; case KFD_IOC_DBG_TRAP_GET_QUEUE_SNAPSHOT: + r = pqm_get_queue_snapshot(&target->pqm, + args->queue_snapshot.exception_mask, + (void __user *)args->queue_snapshot.snapshot_buf_ptr, + &args->queue_snapshot.num_queues, + &args->queue_snapshot.entry_size); + break; case KFD_IOC_DBG_TRAP_GET_DEVICE_SNAPSHOT: pr_warn("Debug op %i not supported yet\n", args->op); r = -EACCES; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index bc9e81293165..0c1be91a87c6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -3041,6 +3041,42 @@ int suspend_queues(struct kfd_process *p, return total_suspended; } +static uint32_t set_queue_type_for_user(struct queue_properties *q_props) +{ + switch (q_props->type) { + case KFD_QUEUE_TYPE_COMPUTE: + return q_props->format == KFD_QUEUE_FORMAT_PM4 + ? KFD_IOC_QUEUE_TYPE_COMPUTE + : KFD_IOC_QUEUE_TYPE_COMPUTE_AQL; + case KFD_QUEUE_TYPE_SDMA: + return KFD_IOC_QUEUE_TYPE_SDMA; + case KFD_QUEUE_TYPE_SDMA_XGMI: + return KFD_IOC_QUEUE_TYPE_SDMA_XGMI; + default: + WARN_ONCE(true, "queue type not recognized!"); + return 0xffffffff; + }; +} + +void set_queue_snapshot_entry(struct queue *q, + uint64_t exception_clear_mask, + struct kfd_queue_snapshot_entry *qss_entry) +{ + qss_entry->ring_base_address = q->properties.queue_address; + qss_entry->write_pointer_address = (uint64_t)q->properties.write_ptr; + qss_entry->read_pointer_address = (uint64_t)q->properties.read_ptr; + qss_entry->ctx_save_restore_address = + q->properties.ctx_save_restore_area_address; + qss_entry->ctx_save_restore_area_size = + q->properties.ctx_save_restore_area_size; + qss_entry->exception_status = q->properties.exception_status; + qss_entry->queue_id = q->properties.queue_id; + qss_entry->gpu_id = q->device->id; + qss_entry->ring_size = (uint32_t)q->properties.queue_size; + qss_entry->queue_type = set_queue_type_for_user(&q->properties); + q->properties.exception_status &= ~exception_clear_mask; +} + int debug_lock_and_unmap(struct device_queue_manager *dqm) { int r; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index d4e6dbffe8c2..7dd4b177219d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -300,6 +300,9 @@ int suspend_queues(struct kfd_process *p, int resume_queues(struct kfd_process *p, uint32_t num_queues, uint32_t *usr_queue_id_array); +void set_queue_snapshot_entry(struct queue *q, + uint64_t exception_clear_mask, + struct kfd_queue_snapshot_entry *qss_entry); int debug_lock_and_unmap(struct device_queue_manager *dqm); int debug_map_and_unlock(struct device_queue_manager *dqm); int debug_refresh_runlist(struct device_queue_manager *dqm); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 8ec87bc8ba82..023b17e0116b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -1355,6 +1355,11 @@ int pqm_get_wave_state(struct process_queue_manager *pqm, void __user *ctl_stack, u32 *ctl_stack_used_size, u32 *save_area_used_size); +int pqm_get_queue_snapshot(struct process_queue_manager *pqm, + uint64_t exception_clear_mask, + void __user *buf, + int *num_qss_entries, + uint32_t *entry_size); int amdkfd_fence_wait_timeout(uint64_t *fence_addr, uint64_t fence_value, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 01ccab607a69..9ad1a2186a24 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -585,6 +585,46 @@ int pqm_get_wave_state(struct process_queue_manager *pqm, save_area_used_size); } +int pqm_get_queue_snapshot(struct process_queue_manager *pqm, + uint64_t exception_clear_mask, + void __user *buf, + int *num_qss_entries, + uint32_t *entry_size) +{ + struct process_queue_node *pqn; + struct kfd_queue_snapshot_entry src; + uint32_t tmp_entry_size = *entry_size, tmp_qss_entries = *num_qss_entries; + int r = 0; + + *num_qss_entries = 0; + if (!(*entry_size)) + return -EINVAL; + + *entry_size = min_t(size_t, *entry_size, sizeof(struct kfd_queue_snapshot_entry)); + mutex_lock(&pqm->process->event_mutex); + + memset(&src, 0, sizeof(src)); + + list_for_each_entry(pqn, &pqm->queues, process_queue_list) { + if (!pqn->q) + continue; + + if (*num_qss_entries < tmp_qss_entries) { + set_queue_snapshot_entry(pqn->q, exception_clear_mask, &src); + + if (copy_to_user(buf, &src, *entry_size)) { + r = -EFAULT; + break; + } + buf += tmp_entry_size; + } + *num_qss_entries += 1; + } + + mutex_unlock(&pqm->process->event_mutex); + return r; +} + static int get_queue_data_sizes(struct kfd_process_device *pdd, struct queue *q, uint32_t *mqd_size, -- cgit From 597364adc0fcf71617b3adbe647b6eec76e27554 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Wed, 31 May 2023 11:22:03 -0400 Subject: drm/amdkfd: Fix reserved SDMA queues handling This patch fixes a regression caused by a bad merge where the handling of reserved SDMA queues was accidentally removed. With the fix, the reserved SDMA queues are again correctly marked as unavailable for allocation. Fixes: a805889a1531 ("drm/amdkfd: Update SDMA queue management for GFX9.4.3") Signed-off-by: Mukul Joshi Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 13 ++++++------- drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 10 +++++----- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 2 +- 3 files changed, 12 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c') diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 9fc9d32cb579..9d4abfd8b55e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -106,20 +106,19 @@ static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd) kfd->device_info.num_sdma_queues_per_engine = 8; } + bitmap_zero(kfd->device_info.reserved_sdma_queues_bitmap, KFD_MAX_SDMA_QUEUES); + switch (sdma_version) { case IP_VERSION(6, 0, 0): + case IP_VERSION(6, 0, 1): case IP_VERSION(6, 0, 2): case IP_VERSION(6, 0, 3): /* Reserve 1 for paging and 1 for gfx */ kfd->device_info.num_reserved_sdma_queues_per_engine = 2; /* BIT(0)=engine-0 queue-0; BIT(1)=engine-1 queue-0; BIT(2)=engine-0 queue-1; ... */ - kfd->device_info.reserved_sdma_queues_bitmap = 0xFULL; - break; - case IP_VERSION(6, 0, 1): - /* Reserve 1 for paging and 1 for gfx */ - kfd->device_info.num_reserved_sdma_queues_per_engine = 2; - /* BIT(0)=engine-0 queue-0; BIT(1)=engine-0 queue-1; ... */ - kfd->device_info.reserved_sdma_queues_bitmap = 0x3ULL; + bitmap_set(kfd->device_info.reserved_sdma_queues_bitmap, 0, + kfd->adev->sdma.num_instances * + kfd->device_info.num_reserved_sdma_queues_per_engine); break; default: break; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 0c1be91a87c6..498ad7d4e7d9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -123,11 +123,6 @@ unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm) dqm->dev->kfd->device_info.num_sdma_queues_per_engine; } -static inline uint64_t get_reserved_sdma_queues_bitmap(struct device_queue_manager *dqm) -{ - return dqm->dev->kfd->device_info.reserved_sdma_queues_bitmap; -} - static void init_sdma_bitmaps(struct device_queue_manager *dqm) { bitmap_zero(dqm->sdma_bitmap, KFD_MAX_SDMA_QUEUES); @@ -135,6 +130,11 @@ static void init_sdma_bitmaps(struct device_queue_manager *dqm) bitmap_zero(dqm->xgmi_sdma_bitmap, KFD_MAX_SDMA_QUEUES); bitmap_set(dqm->xgmi_sdma_bitmap, 0, get_num_xgmi_sdma_queues(dqm)); + + /* Mask out the reserved queues */ + bitmap_andnot(dqm->sdma_bitmap, dqm->sdma_bitmap, + dqm->dev->kfd->device_info.reserved_sdma_queues_bitmap, + KFD_MAX_SDMA_QUEUES); } void program_sh_mem_settings(struct device_queue_manager *dqm, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 023b17e0116b..7364a5d77c6e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -239,7 +239,7 @@ struct kfd_device_info { uint32_t no_atomic_fw_version; unsigned int num_sdma_queues_per_engine; unsigned int num_reserved_sdma_queues_per_engine; - uint64_t reserved_sdma_queues_bitmap; + DECLARE_BITMAP(reserved_sdma_queues_bitmap, KFD_MAX_SDMA_QUEUES); }; unsigned int kfd_get_num_sdma_engines(struct kfd_node *kdev); -- cgit From 09d49e14ea6fd125a21f89b80f888c09be32a174 Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Tue, 23 May 2023 11:57:27 -0400 Subject: drm/amdkfd: fix and enable debugging for gfx11 There are a couple of fixes required to enable gfx11 debugging. First, ADD_QUEUE.trap_en is an inappropriate place to toggle a per-process register so move it to SET_SHADER_DEBUGGER.trap_en. When ADD_QUEUE.skip_process_ctx_clear is set, MES will prioritize the SET_SHADER_DEBUGGER.trap_en setting. Second, to preserve correct save/restore priviledged wave states in coordination with the trap enablement setting, resume suspended waves early in the disable call. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c | 7 ++++++- drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h | 4 +++- drivers/gpu/drm/amd/amdgpu/mes_v11_0.c | 1 + drivers/gpu/drm/amd/amdkfd/kfd_debug.c | 14 ++++++-------- drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 3 +-- drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 12 +++++++----- drivers/gpu/drm/amd/include/mes_v11_api_def.h | 1 + 7 files changed, 25 insertions(+), 17 deletions(-) (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c index 20cc3fffe921..e9091ebfe230 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.c @@ -928,7 +928,8 @@ int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev, uint64_t process_context_addr, uint32_t spi_gdbg_per_vmid_cntl, const uint32_t *tcp_watch_cntl, - uint32_t flags) + uint32_t flags, + bool trap_en) { struct mes_misc_op_input op_input = {0}; int r; @@ -945,6 +946,10 @@ int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev, memcpy(op_input.set_shader_debugger.tcp_watch_cntl, tcp_watch_cntl, sizeof(op_input.set_shader_debugger.tcp_watch_cntl)); + if (((adev->mes.sched_version & AMDGPU_MES_API_VERSION_MASK) >> + AMDGPU_MES_API_VERSION_SHIFT) >= 14) + op_input.set_shader_debugger.trap_en = trap_en; + amdgpu_mes_lock(&adev->mes); r = adev->mes.funcs->misc_op(&adev->mes, &op_input); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h index b5f5eed2b5ef..2d6ac30b7135 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mes.h @@ -294,6 +294,7 @@ struct mes_misc_op_input { } flags; uint32_t spi_gdbg_per_vmid_cntl; uint32_t tcp_watch_cntl[4]; + uint32_t trap_en; } set_shader_debugger; }; }; @@ -361,7 +362,8 @@ int amdgpu_mes_set_shader_debugger(struct amdgpu_device *adev, uint64_t process_context_addr, uint32_t spi_gdbg_per_vmid_cntl, const uint32_t *tcp_watch_cntl, - uint32_t flags); + uint32_t flags, + bool trap_en); int amdgpu_mes_add_ring(struct amdgpu_device *adev, int gang_id, int queue_type, int idx, diff --git a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c index c4e3cb8d44de..1bdaa00c0b46 100644 --- a/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c +++ b/drivers/gpu/drm/amd/amdgpu/mes_v11_0.c @@ -347,6 +347,7 @@ static int mes_v11_0_misc_op(struct amdgpu_mes *mes, memcpy(misc_pkt.set_shader_debugger.tcp_watch_cntl, input->set_shader_debugger.tcp_watch_cntl, sizeof(misc_pkt.set_shader_debugger.tcp_watch_cntl)); + misc_pkt.set_shader_debugger.trap_en = input->set_shader_debugger.trap_en; break; default: DRM_ERROR("unsupported misc op (%d) \n", input->op); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c index 125274445f43..cd34e7aaead4 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c @@ -349,12 +349,13 @@ int kfd_dbg_set_mes_debug_mode(struct kfd_process_device *pdd) { uint32_t spi_dbg_cntl = pdd->spi_dbg_override | pdd->spi_dbg_launch_mode; uint32_t flags = pdd->process->dbg_flags; + bool sq_trap_en = !!spi_dbg_cntl; if (!kfd_dbg_is_per_vmid_supported(pdd->dev)) return 0; return amdgpu_mes_set_shader_debugger(pdd->dev->adev, pdd->proc_ctx_gpu_addr, spi_dbg_cntl, - pdd->watch_points, flags); + pdd->watch_points, flags, sq_trap_en); } #define KFD_DEBUGGER_INVALID_WATCH_POINT_ID -1 @@ -557,6 +558,10 @@ void kfd_dbg_trap_deactivate(struct kfd_process *target, bool unwind, int unwind if (!unwind) { uint32_t flags = 0; + int resume_count = resume_queues(target, 0, NULL); + + if (resume_count) + pr_debug("Resumed %d queues\n", resume_count); cancel_work_sync(&target->debug_event_workarea); kfd_dbg_clear_process_address_watch(target); @@ -598,13 +603,6 @@ void kfd_dbg_trap_deactivate(struct kfd_process *target, bool unwind, int unwind } kfd_dbg_set_workaround(target, false); - - if (!unwind) { - int resume_count = resume_queues(target, 0, NULL); - - if (resume_count) - pr_debug("Resumed %d queues\n", resume_count); - } } static void kfd_dbg_clean_exception_status(struct kfd_process *target) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 498ad7d4e7d9..d6b15493fffd 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -227,8 +227,7 @@ static int add_queue_mes(struct device_queue_manager *dqm, struct queue *q, queue_input.tba_addr = qpd->tba_addr; queue_input.tma_addr = qpd->tma_addr; queue_input.trap_en = KFD_GC_VERSION(q->device) < IP_VERSION(11, 0, 0) || - KFD_GC_VERSION(q->device) >= IP_VERSION(12, 0, 0) || - q->properties.is_dbg_wa; + KFD_GC_VERSION(q->device) >= IP_VERSION(12, 0, 0); queue_input.skip_process_ctx_clear = qpd->pqm->process->debug_trap_enabled; queue_type = convert_to_mes_queue_type(q->properties.type); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index faa7939f35bd..90b86a6ac7bd 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -1863,13 +1863,15 @@ static void kfd_topology_set_dbg_firmware_support(struct kfd_topology_device *de { bool firmware_supported = true; - /* - * FIXME: GFX11 FW currently not sufficient to deal with CWSR WA. - * Updated FW with API changes coming soon. - */ if (KFD_GC_VERSION(dev->gpu) >= IP_VERSION(11, 0, 0) && KFD_GC_VERSION(dev->gpu) < IP_VERSION(12, 0, 0)) { - firmware_supported = false; + uint32_t mes_api_rev = (dev->gpu->adev->mes.sched_version & + AMDGPU_MES_API_VERSION_MASK) >> + AMDGPU_MES_API_VERSION_SHIFT; + uint32_t mes_rev = dev->gpu->adev->mes.sched_version & + AMDGPU_MES_VERSION_MASK; + + firmware_supported = (mes_api_rev >= 14) && (mes_rev >= 64); goto out; } diff --git a/drivers/gpu/drm/amd/include/mes_v11_api_def.h b/drivers/gpu/drm/amd/include/mes_v11_api_def.h index f3c15f18ddb5..0997e999416a 100644 --- a/drivers/gpu/drm/amd/include/mes_v11_api_def.h +++ b/drivers/gpu/drm/amd/include/mes_v11_api_def.h @@ -575,6 +575,7 @@ struct SET_SHADER_DEBUGGER { } flags; uint32_t spi_gdbg_per_vmid_cntl; uint32_t tcp_watch_cntl[4]; /* TCP_WATCHx_CNTL */ + uint32_t trap_en; }; union MESAPI__MISC { -- cgit