From f87f686482c6d2d4465245356854710b01f312c1 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Mon, 9 May 2022 22:22:20 -0400 Subject: drm/amdgpu: Add XCC inst to PASID TLB flushing Add XCC instance to select the correct KIQ ring when flushing TLBs on a multi-XCC setup. Signed-off-by: Mukul Joshi Tested-by: Amber Lin Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 01ba3589b60a..df07e212c21e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -160,7 +160,8 @@ bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev); int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct amdgpu_device *adev, uint16_t vmid); int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct amdgpu_device *adev, - uint16_t pasid, enum TLB_FLUSH_TYPE flush_type); + uint16_t pasid, enum TLB_FLUSH_TYPE flush_type, + uint32_t inst); bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid); -- cgit From 0c7315e7d5ef9b36ca4db32ffeb34a187cbaf231 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Fri, 10 Jun 2022 09:41:29 -0400 Subject: drm/amdkfd: Add device repartition support GFX9.4.3 will support dynamic repartitioning of the GPU through sysfs. Add device repartitioning support in KFD to repartition GPU from one mode to other. v2: squash in fix ("drm/amdkfd: Fix warning kgd2kfd_unlock_kfd defined but not used") Signed-off-by: Mukul Joshi Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 10 ++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 13 +++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c | 22 +++++++++++++++++++++- drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c | 5 +---- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 21 +++++++++++++++++++++ 5 files changed, 66 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 9d19c7ceda3f..bbbfe9ec4adf 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -773,3 +773,13 @@ bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev) else return false; } + +int amdgpu_amdkfd_check_and_lock_kfd(struct amdgpu_device *adev) +{ + return kgd2kfd_check_and_lock_kfd(); +} + +void amdgpu_amdkfd_unlock_kfd(struct amdgpu_device *adev) +{ + kgd2kfd_unlock_kfd(); +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index df07e212c21e..d1d643a050a1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -151,6 +151,8 @@ void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev, void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev); void amdgpu_amdkfd_device_init(struct amdgpu_device *adev); void amdgpu_amdkfd_device_fini_sw(struct amdgpu_device *adev); +int amdgpu_amdkfd_check_and_lock_kfd(struct amdgpu_device *adev); +void amdgpu_amdkfd_unlock_kfd(struct amdgpu_device *adev); int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev, enum kgd_engine_type engine, uint32_t vmid, uint64_t gpu_addr, @@ -373,6 +375,8 @@ int kgd2kfd_post_reset(struct kfd_dev *kfd); void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry); void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd); void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask); +int kgd2kfd_check_and_lock_kfd(void); +void kgd2kfd_unlock_kfd(void); #else static inline int kgd2kfd_init(void) { @@ -438,5 +442,14 @@ static inline void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask) { } + +static inline int kgd2kfd_check_and_lock_kfd(void) +{ + return 0; +} + +static inline void kgd2kfd_unlock_kfd(void) +{ +} #endif #endif /* AMDGPU_AMDKFD_H_INCLUDED */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index 069b259f384c..69bac5b801ce 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -1233,10 +1233,30 @@ static ssize_t amdgpu_gfx_set_compute_partition(struct device *dev, return -EINVAL; } + if (!adev->kfd.init_complete) + return -EPERM; + mutex_lock(&adev->gfx.partition_mutex); - ret = adev->gfx.funcs->switch_partition_mode(adev, mode); + if (mode == adev->gfx.funcs->query_partition_mode(adev)) + goto out; + + ret = amdgpu_amdkfd_check_and_lock_kfd(adev); + if (ret) + goto out; + + amdgpu_amdkfd_device_fini_sw(adev); + + adev->gfx.funcs->switch_partition_mode(adev, mode); + + amdgpu_amdkfd_device_probe(adev); + amdgpu_amdkfd_device_init(adev); + /* If KFD init failed, return failure */ + if (!adev->kfd.init_complete) + ret = -EIO; + amdgpu_amdkfd_unlock_kfd(adev); +out: mutex_unlock(&adev->gfx.partition_mutex); if (ret) diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c index c776fc5884de..47d8ac64e877 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_4_3.c @@ -675,7 +675,7 @@ static void gfx_v9_4_3_select_me_pipe_q(struct amdgpu_device *adev, static enum amdgpu_gfx_partition gfx_v9_4_3_query_compute_partition(struct amdgpu_device *adev) { - enum amdgpu_gfx_partition mode = AMDGPU_UNKNOWN_COMPUTE_PARTITION_MODE; + enum amdgpu_gfx_partition mode = adev->gfx.partition_mode; if (adev->nbio.funcs->get_compute_partition_mode) mode = adev->nbio.funcs->get_compute_partition_mode(adev); @@ -689,9 +689,6 @@ static int gfx_v9_4_3_switch_compute_partition(struct amdgpu_device *adev, u32 tmp = 0; int num_xcc_per_partition, i, num_xcc; - if (mode == adev->gfx.partition_mode) - return mode; - num_xcc = NUM_XCC(adev->gfx.xcc_mask); switch (mode) { case AMDGPU_SPX_PARTITION_MODE: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index eb2b44fddf74..293787290e36 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -1356,6 +1356,27 @@ unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_node *node) kfd_get_num_sdma_engines(node); } +int kgd2kfd_check_and_lock_kfd(void) +{ + mutex_lock(&kfd_processes_mutex); + if (!hash_empty(kfd_processes_table) || kfd_is_locked()) { + mutex_unlock(&kfd_processes_mutex); + return -EBUSY; + } + + ++kfd_locked; + mutex_unlock(&kfd_processes_mutex); + + return 0; +} + +void kgd2kfd_unlock_kfd(void) +{ + mutex_lock(&kfd_processes_mutex); + --kfd_locked; + mutex_unlock(&kfd_processes_mutex); +} + #if defined(CONFIG_DEBUG_FS) /* This function will send a package to HIQ to hang the HWS -- cgit From 610dab118ff5013d46069c828b58d576e0907b66 Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Fri, 31 Mar 2023 11:13:40 -0400 Subject: drm/amdkfd: Move pgmap to amdgpu_kfd_dev structure VRAM pgmap resource is allocated every time when switching compute partitions because kfd_dev is re-initialized by post_partition_switch, As a result, it causes memory region resource leaking and system memory usage accounting unbalanced. pgmap resource should be allocated and registered only once when loading driver and freed when unloading driver, move it from kfd_dev to amdgpu_kfd_dev. Signed-off-by: Philip Yang Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 4 ++++ drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 8 ++++---- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 3 --- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 6 +++--- drivers/gpu/drm/amd/amdkfd/kfd_svm.h | 4 ++-- drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 2 +- 6 files changed, 14 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index d1d643a050a1..e4e1dbba060a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -30,6 +30,7 @@ #include #include #include +#include #include #include #include "amdgpu_sync.h" @@ -101,6 +102,9 @@ struct amdgpu_kfd_dev { uint64_t vram_used_aligned; bool init_complete; struct work_struct reset_work; + + /* HMM page migration MEMORY_DEVICE_PRIVATE mapping */ + struct dev_pagemap pgmap; }; enum kgd_engine_type { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 42e599912e52..199d32c7c289 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -206,7 +206,7 @@ svm_migrate_copy_done(struct amdgpu_device *adev, struct dma_fence *mfence) unsigned long svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr) { - return (addr + adev->kfd.dev->pgmap.range.start) >> PAGE_SHIFT; + return (addr + adev->kfd.pgmap.range.start) >> PAGE_SHIFT; } static void @@ -236,7 +236,7 @@ svm_migrate_addr(struct amdgpu_device *adev, struct page *page) unsigned long addr; addr = page_to_pfn(page) << PAGE_SHIFT; - return (addr - adev->kfd.dev->pgmap.range.start); + return (addr - adev->kfd.pgmap.range.start); } static struct page * @@ -990,14 +990,14 @@ static const struct dev_pagemap_ops svm_migrate_pgmap_ops = { int svm_migrate_init(struct amdgpu_device *adev) { - struct kfd_dev *kfddev = adev->kfd.dev; + struct amdgpu_kfd_dev *kfddev = &adev->kfd; struct dev_pagemap *pgmap; struct resource *res = NULL; unsigned long size; void *r; /* Page migration works on Vega10 or newer */ - if (!KFD_IS_SOC15(kfddev)) + if (!KFD_IS_SOC15(kfddev->dev)) return -EINVAL; pgmap = &kfddev->pgmap; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 02a90fd7f646..214d950f948e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -378,9 +378,6 @@ struct kfd_dev { int noretry; - /* HMM page migration MEMORY_DEVICE_PRIVATE mapping */ - struct dev_pagemap pgmap; - struct kfd_node *nodes[MAX_KFD_NODES]; unsigned int num_nodes; }; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index cf354f9e4285..2b2129dd1e4a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -174,7 +174,7 @@ svm_range_dma_map_dev(struct amdgpu_device *adev, struct svm_range *prange, addr[i] = (hmm_pfns[i] << PAGE_SHIFT) + bo_adev->vm_manager.vram_base_offset - - bo_adev->kfd.dev->pgmap.range.start; + bo_adev->kfd.pgmap.range.start; addr[i] |= SVM_RANGE_VRAM_DOMAIN; pr_debug_ratelimited("vram address: 0x%llx\n", addr[i]); continue; @@ -2827,7 +2827,7 @@ svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, bool migration = false; int r = 0; - if (!KFD_IS_SVM_API_SUPPORTED(adev->kfd.dev)) { + if (!KFD_IS_SVM_API_SUPPORTED(adev)) { pr_debug("device does not support SVM\n"); return -EFAULT; } @@ -3112,7 +3112,7 @@ int svm_range_list_init(struct kfd_process *p) spin_lock_init(&svms->deferred_list_lock); for (i = 0; i < p->n_pdds; i++) - if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev->kfd)) + if (KFD_IS_SVM_API_SUPPORTED(p->pdds[i]->dev->adev)) bitmap_set(svms->bitmap_supported, i, 1); return 0; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h index 7515ddade3ae..021def496f5a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h @@ -200,8 +200,8 @@ void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_s /* SVM API and HMM page migration work together, device memory type * is initialized to not 0 when page migration register device memory. */ -#define KFD_IS_SVM_API_SUPPORTED(dev) ((dev)->pgmap.type != 0 ||\ - (dev)->adev->gmc.is_app_apu) +#define KFD_IS_SVM_API_SUPPORTED(adev) ((adev)->kfd.pgmap.type != 0 ||\ + (adev)->gmc.is_app_apu) void svm_range_bo_unref_async(struct svm_range_bo *svm_bo); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index d2a42b6b1fa8..6d6243b978e1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -2021,7 +2021,7 @@ int kfd_topology_add_device(struct kfd_node *gpu) dev->node_props.capability |= (dev->gpu->adev->ras_enabled != 0) ? HSA_CAP_RASEVENTNOTIFY : 0; - if (KFD_IS_SVM_API_SUPPORTED(dev->gpu->adev->kfd.dev)) + if (KFD_IS_SVM_API_SUPPORTED(dev->gpu->adev)) dev->node_props.capability |= HSA_CAP_SVMAPI_SUPPORTED; kfd_debug_print_topology(); -- cgit From 4c6ce75fdd628c43aea11448ed41b52119dae42b Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Thu, 26 Jan 2023 18:11:29 -0500 Subject: drm/amdkfd: Show KFD node memory partition info Show KFD node memory partition id and size, add helper function KFD_XCP_MEMORY_SIZE to get kfd node memory size, will be used later to support memory accounting per partition. Signed-off-by: Philip Yang Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 5 +++++ drivers/gpu/drm/amd/amdkfd/kfd_device.c | 7 ++++++- 2 files changed, 11 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index e4e1dbba060a..324cb566ca2f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -330,6 +330,11 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev, uint64_t size, u32 alloc_flag); +#define KFD_XCP_MEMORY_SIZE(n) ((n)->adev->gmc.num_mem_partitions ?\ + (n)->adev->gmc.mem_partitions[(n)->xcp->mem_id].size /\ + (n)->adev->xcp_mgr->num_xcp_per_mem_partition :\ + (n)->adev->gmc.real_vram_size) + #if IS_ENABLED(CONFIG_HSA_AMD) void amdgpu_amdkfd_gpuvm_init_mem_limits(void); void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index b5497d2ee984..db5b53fcdf11 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -724,7 +724,6 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, kfd_cwsr_init(kfd); - /* TODO: Needs to be updated for memory partitioning */ svm_migrate_init(kfd->adev); amdgpu_amdkfd_get_local_mem_info(kfd->adev, &kfd->local_mem_info); @@ -754,6 +753,12 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, (1U << NUM_XCC(kfd->adev->gfx.xcc_mask)) - 1; } + if (node->xcp) { + dev_info(kfd_device, "KFD node %d partition %d size %lldM\n", + node->node_id, node->xcp->mem_id, + KFD_XCP_MEMORY_SIZE(node) >> 20); + } + if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) && partition_mode == AMDGPU_CPX_PARTITION_MODE && kfd->num_nodes != 1) { -- cgit From 3ebfd221c1a83e5f0edadb87d173d8fd93d1d125 Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Wed, 8 Mar 2023 11:57:00 -0500 Subject: drm/amdkfd: Store xcp partition id to amdgpu bo For memory accounting per compute partition and export drm amdgpu bo and then import to KFD, we need the xcp id to account the memory usage or find the KFD node of the original amdgpu bo to create the KFD bo on the correct adev KFD node. Set xcp_id_plus1 of amdgpu_bo_param to create bo and store xcp_id to amddgpu bo. Add helper macro to get the mem_id from adev and xcp_id. v2: squash in fix ("drm/amdgpu: Fix BO creation failure on GFX 9.4.3 dGPU") Signed-off-by: Philip Yang Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 4 ++++ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 11 ++++++----- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c | 4 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h | 2 +- drivers/gpu/drm/amd/amdgpu/amdgpu_object.c | 15 ++++++++++----- drivers/gpu/drm/amd/amdgpu/amdgpu_object.h | 12 ++++++++---- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 6 +++++- drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c | 5 +++-- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 4 ++-- 10 files changed, 42 insertions(+), 23 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 324cb566ca2f..05c54776951b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -330,6 +330,10 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev, uint64_t size, u32 alloc_flag); +#define KFD_XCP_MEM_ID(adev, xcp_id) \ + ((adev)->xcp_mgr && (xcp_id) >= 0 ?\ + (adev)->xcp_mgr->xcp[(xcp_id)].mem_id : -1) + #define KFD_XCP_MEMORY_SIZE(n) ((n)->adev->gmc.num_mem_partitions ?\ (n)->adev->gmc.mem_partitions[(n)->xcp->mem_id].size /\ (n)->adev->xcp_mgr->num_xcp_per_mem_partition :\ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index c234dc0db799..8724a0be31b8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -1634,6 +1634,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( uint64_t *offset, uint32_t flags, bool criu_resume) { struct amdgpu_vm *avm = drm_priv_to_vm(drm_priv); + struct amdgpu_fpriv *fpriv = container_of(avm, struct amdgpu_fpriv, vm); enum ttm_bo_type bo_type = ttm_bo_type_device; struct sg_table *sg = NULL; uint64_t user_addr = 0; @@ -1641,7 +1642,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( struct drm_gem_object *gobj = NULL; u32 domain, alloc_domain; uint64_t aligned_size; - int8_t mem_id = -1; + int8_t xcp_id = -1; u64 alloc_flags; int ret; @@ -1660,7 +1661,7 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( alloc_flags |= (flags & KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC) ? AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED : 0; } - mem_id = avm->mem_id; + xcp_id = fpriv->xcp_id == ~0 ? 0 : fpriv->xcp_id; } else if (flags & KFD_IOC_ALLOC_MEM_FLAGS_GTT) { domain = alloc_domain = AMDGPU_GEM_DOMAIN_GTT; alloc_flags = 0; @@ -1718,12 +1719,12 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( goto err_reserve_limit; } - pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s mem_id %d\n", + pr_debug("\tcreate BO VA 0x%llx size 0x%llx domain %s xcp_id %d\n", va, (*mem)->aql_queue ? size << 1 : size, - domain_string(alloc_domain), mem_id); + domain_string(alloc_domain), xcp_id); ret = amdgpu_gem_object_create(adev, aligned_size, 1, alloc_domain, alloc_flags, - bo_type, NULL, &gobj, mem_id + 1); + bo_type, NULL, &gobj, xcp_id + 1); if (ret) { pr_debug("Failed to create BO on domain %s. ret %d\n", domain_string(alloc_domain), ret); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c index 33ebee18b80d..7e8839cc6f58 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c @@ -98,7 +98,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, int alignment, u32 initial_domain, u64 flags, enum ttm_bo_type type, struct dma_resv *resv, - struct drm_gem_object **obj, int8_t mem_id_plus1) + struct drm_gem_object **obj, int8_t xcp_id_plus1) { struct amdgpu_bo *bo; struct amdgpu_bo_user *ubo; @@ -116,7 +116,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, bp.flags = flags; bp.domain = initial_domain; bp.bo_ptr_size = sizeof(struct amdgpu_bo); - bp.mem_id_plus1 = mem_id_plus1; + bp.xcp_id_plus1 = xcp_id_plus1; r = amdgpu_bo_create_user(adev, &bp, &ubo); if (r) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h index 646c4fcc8e40..f30264782ba2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.h @@ -43,7 +43,7 @@ int amdgpu_gem_object_create(struct amdgpu_device *adev, unsigned long size, int alignment, u32 initial_domain, u64 flags, enum ttm_bo_type type, struct dma_resv *resv, - struct drm_gem_object **obj, int8_t mem_id_plus1); + struct drm_gem_object **obj, int8_t xcp_id_plus1); int amdgpu_mode_dumb_create(struct drm_file *file_priv, struct drm_device *dev, struct drm_mode_create_dumb *args); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index b2d11c4f39b0..42c02f48c3a8 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -131,14 +131,15 @@ void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain) if (domain & AMDGPU_GEM_DOMAIN_VRAM) { unsigned int visible_pfn = adev->gmc.visible_vram_size >> PAGE_SHIFT; + int8_t mem_id = KFD_XCP_MEM_ID(adev, abo->xcp_id); - if (adev->gmc.mem_partitions && abo->mem_id >= 0) { - places[c].fpfn = adev->gmc.mem_partitions[abo->mem_id].range.fpfn; + if (adev->gmc.mem_partitions && mem_id >= 0) { + places[c].fpfn = adev->gmc.mem_partitions[mem_id].range.fpfn; /* * memory partition range lpfn is inclusive start + size - 1 * TTM place lpfn is exclusive start + size */ - places[c].lpfn = adev->gmc.mem_partitions[abo->mem_id].range.lpfn + 1; + places[c].lpfn = adev->gmc.mem_partitions[mem_id].range.lpfn + 1; } else { places[c].fpfn = 0; places[c].lpfn = 0; @@ -583,8 +584,12 @@ int amdgpu_bo_create(struct amdgpu_device *adev, bo->flags = bp->flags; - /* bo->mem_id -1 means any partition */ - bo->mem_id = bp->mem_id_plus1 - 1; + if (adev->gmc.mem_partitions) + /* For GPUs with spatial partitioning, bo->xcp_id=-1 means any partition */ + bo->xcp_id = bp->xcp_id_plus1 - 1; + else + /* For GPUs without spatial partitioning */ + bo->xcp_id = 0; if (!amdgpu_bo_support_uswc(bo->flags)) bo->flags &= ~AMDGPU_GEM_CREATE_CPU_GTT_USWC; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h index eb24a66ccee5..05496b97ef93 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h @@ -56,8 +56,8 @@ struct amdgpu_bo_param { bool no_wait_gpu; struct dma_resv *resv; void (*destroy)(struct ttm_buffer_object *bo); - /* memory partition number plus 1, 0 means any partition */ - int8_t mem_id_plus1; + /* xcp partition number plus 1, 0 means any partition */ + int8_t xcp_id_plus1; }; /* bo virtual addresses in a vm */ @@ -111,8 +111,12 @@ struct amdgpu_bo { #endif struct kgd_mem *kfd_bo; - /* memory partition number, -1 means any partition */ - int8_t mem_id; + /* + * For GPUs with spatial partitioning, xcp partition number, -1 means + * any partition. For other ASICs without spatial partition, always 0 + * for memory accounting. + */ + int8_t xcp_id; }; struct amdgpu_bo_user { diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 129c593cb2bd..23101c82519a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1051,6 +1051,7 @@ static void amdgpu_ttm_backend_destroy(struct ttm_device *bdev, static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags) { + struct amdgpu_device *adev = amdgpu_ttm_adev(bo->bdev); struct amdgpu_bo *abo = ttm_to_amdgpu_bo(bo); struct amdgpu_ttm_tt *gtt; enum ttm_caching caching; @@ -1060,7 +1061,10 @@ static struct ttm_tt *amdgpu_ttm_tt_create(struct ttm_buffer_object *bo, return NULL; } gtt->gobj = &bo->base; - gtt->pool_id = abo->mem_id; + if (adev->gmc.mem_partitions && abo->xcp_id >= 0) + gtt->pool_id = KFD_XCP_MEM_ID(adev, abo->xcp_id); + else + gtt->pool_id = abo->xcp_id; if (abo->flags & AMDGPU_GEM_CREATE_CPU_GTT_USWC) caching = ttm_write_combined; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c index 62fc7e8d326e..cc3b1b596e56 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c @@ -502,6 +502,7 @@ exit: int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm, int level, bool immediate, struct amdgpu_bo_vm **vmbo) { + struct amdgpu_fpriv *fpriv = container_of(vm, struct amdgpu_fpriv, vm); struct amdgpu_bo_param bp; struct amdgpu_bo *bo; struct dma_resv *resv; @@ -534,7 +535,7 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm, bp.type = ttm_bo_type_kernel; bp.no_wait_gpu = immediate; - bp.mem_id_plus1 = vm->mem_id + 1; + bp.xcp_id_plus1 = fpriv->xcp_id == ~0 ? 0 : fpriv->xcp_id + 1; if (vm->root.bo) bp.resv = vm->root.bo->tbo.base.resv; @@ -560,7 +561,7 @@ int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm, bp.type = ttm_bo_type_kernel; bp.resv = bo->tbo.base.resv; bp.bo_ptr_size = sizeof(struct amdgpu_bo); - bp.mem_id_plus1 = vm->mem_id + 1; + bp.xcp_id_plus1 = fpriv->xcp_id == ~0 ? 0 : fpriv->xcp_id + 1; r = amdgpu_bo_create(adev, &bp, &(*vmbo)->shadow); diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 263d17a8b433..7ea80bdf8e1e 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1248,7 +1248,7 @@ static void gmc_v9_0_get_coherence_flags(struct amdgpu_device *adev, is_local = (!is_vram && (adev->flags & AMD_IS_APU) && num_possible_nodes() <= 1) || (is_vram && adev == bo_adev && - bo->mem_id == vm->mem_id); + KFD_XCP_MEM_ID(adev, bo->xcp_id) == vm->mem_id); snoop = true; if (uncached) { mtype = MTYPE_UC; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index a700d9ccd054..45959892bc0f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -556,7 +556,7 @@ svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange, bp.type = ttm_bo_type_device; bp.resv = NULL; if (node->xcp) - bp.mem_id_plus1 = node->xcp->mem_id + 1; + bp.xcp_id_plus1 = node->xcp->id + 1; r = amdgpu_bo_create_user(node->adev, &bp, &ubo); if (r) { @@ -567,7 +567,7 @@ svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange, pr_debug("alloc bo at offset 0x%lx size 0x%lx on partition %d\n", bo->tbo.resource->start << PAGE_SHIFT, bp.size, - bp.mem_id_plus1 - 1); + bp.xcp_id_plus1 - 1); r = amdgpu_bo_reserve(bo, true); if (r) { -- cgit From 2fa9ff25de08e598af051c76b216d2f073b2ee89 Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Thu, 9 Mar 2023 19:30:02 -0500 Subject: drm/amdgpu: KFD graphics interop support compute partition kfd_ioctl_get_dmabuf use the amdgpu bo xcp_id to get the gpu_id of the KFD node from the exported dmabuf_adev, and then create kfd bo on the correct adev and KFD node when importing the amdgpu bo to KFD. Remove function kfd_device_by_adev, it is not needed as it is the same result as dmabuf_adev->kfd.dev->nodes[0]->id. Signed-off-by: Philip Yang Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 4 +++- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 14 ++++++-------- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 1 - drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 18 ------------------ 5 files changed, 10 insertions(+), 29 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index bbbfe9ec4adf..00edb13d2124 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -498,7 +498,7 @@ int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd, struct amdgpu_device **dmabuf_adev, uint64_t *bo_size, void *metadata_buffer, size_t buffer_size, uint32_t *metadata_size, - uint32_t *flags) + uint32_t *flags, int8_t *xcp_id) { struct dma_buf *dma_buf; struct drm_gem_object *obj; @@ -542,6 +542,8 @@ int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd, if (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) *flags |= KFD_IOC_ALLOC_MEM_FLAGS_PUBLIC; } + if (xcp_id) + *xcp_id = bo->xcp_id; out_put: dma_buf_put(dma_buf); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 05c54776951b..4e6221bccffe 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -241,7 +241,7 @@ int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd, struct amdgpu_device **dmabuf_adev, uint64_t *bo_size, void *metadata_buffer, size_t buffer_size, uint32_t *metadata_size, - uint32_t *flags); + uint32_t *flags, int8_t *xcp_id); uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device *dst, struct amdgpu_device *src); int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index f85ac4dbc673..fcad90d53c9b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -1499,6 +1499,7 @@ static int kfd_ioctl_get_dmabuf_info(struct file *filep, struct amdgpu_device *dmabuf_adev; void *metadata_buffer = NULL; uint32_t flags; + int8_t xcp_id; unsigned int i; int r; @@ -1519,17 +1520,14 @@ static int kfd_ioctl_get_dmabuf_info(struct file *filep, r = amdgpu_amdkfd_get_dmabuf_info(dev->adev, args->dmabuf_fd, &dmabuf_adev, &args->size, metadata_buffer, args->metadata_size, - &args->metadata_size, &flags); + &args->metadata_size, &flags, &xcp_id); if (r) goto exit; - /* Reverse-lookup gpu_id from kgd pointer */ - dev = kfd_device_by_adev(dmabuf_adev); - if (!dev) { - r = -EINVAL; - goto exit; - } - args->gpu_id = dev->id; + if (xcp_id >= 0) + args->gpu_id = dmabuf_adev->kfd.dev->nodes[xcp_id]->id; + else + args->gpu_id = dmabuf_adev->kfd.dev->nodes[0]->id; args->flags = flags; /* Copy metadata buffer to user mode */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 214d950f948e..44f4d5509db6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -1068,7 +1068,6 @@ struct kfd_topology_device *kfd_topology_device_by_proximity_domain_no_lock( struct kfd_topology_device *kfd_topology_device_by_id(uint32_t gpu_id); struct kfd_node *kfd_device_by_id(uint32_t gpu_id); struct kfd_node *kfd_device_by_pci_dev(const struct pci_dev *pdev); -struct kfd_node *kfd_device_by_adev(const struct amdgpu_device *adev); static inline bool kfd_irq_is_from_node(struct kfd_node *node, uint32_t node_id, uint32_t vmid) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index a8e25aecf839..dbb6159344b3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -125,24 +125,6 @@ struct kfd_node *kfd_device_by_pci_dev(const struct pci_dev *pdev) return device; } -struct kfd_node *kfd_device_by_adev(const struct amdgpu_device *adev) -{ - struct kfd_topology_device *top_dev; - struct kfd_node *device = NULL; - - down_read(&topology_lock); - - list_for_each_entry(top_dev, &topology_device_list, list) - if (top_dev->gpu && top_dev->gpu->adev == adev) { - device = top_dev->gpu; - break; - } - - up_read(&topology_lock); - - return device; -} - /* Called with write topology_lock acquired */ static void kfd_release_topology_device(struct kfd_topology_device *dev) { -- cgit From 315e29eca57f85107cc6f687c2d510aa532fb3f0 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Mon, 20 Mar 2023 11:21:38 -0400 Subject: drm/amdkfd: Move local_mem_info to kfd_node We need to track memory usage on a per partition basis. To do that, store the local memory information in KFD node instead of kfd device. v2: squash in fix ("amdkfd: Use mem_id to access mem_partition info") Signed-off-by: Mukul Joshi Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 17 +++++++++++++---- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 12 +++++++----- drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 7 ++++--- drivers/gpu/drm/amd/amdkfd/kfd_crat.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 7 +++++-- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 3 ++- drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 7 ++++--- 7 files changed, 36 insertions(+), 19 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 00edb13d2124..85df73f2c85e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -428,14 +428,23 @@ uint32_t amdgpu_amdkfd_get_fw_version(struct amdgpu_device *adev, } void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev, - struct kfd_local_mem_info *mem_info) + struct kfd_local_mem_info *mem_info, + uint8_t xcp_id) { memset(mem_info, 0, sizeof(*mem_info)); - mem_info->local_mem_size_public = adev->gmc.visible_vram_size; - mem_info->local_mem_size_private = adev->gmc.real_vram_size - + if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) { + if (adev->gmc.real_vram_size == adev->gmc.visible_vram_size) + mem_info->local_mem_size_public = + KFD_XCP_MEMORY_SIZE(adev, xcp_id); + else + mem_info->local_mem_size_private = + KFD_XCP_MEMORY_SIZE(adev, xcp_id); + } else { + mem_info->local_mem_size_public = adev->gmc.visible_vram_size; + mem_info->local_mem_size_private = adev->gmc.real_vram_size - adev->gmc.visible_vram_size; - + } mem_info->vram_width = adev->gmc.vram_width; pr_debug("Address base: %pap public 0x%llx private 0x%llx\n", diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 4e6221bccffe..4bf6f5659568 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -231,7 +231,8 @@ int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem); uint32_t amdgpu_amdkfd_get_fw_version(struct amdgpu_device *adev, enum kgd_engine_type type); void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev, - struct kfd_local_mem_info *mem_info); + struct kfd_local_mem_info *mem_info, + uint8_t xcp_id); uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct amdgpu_device *adev); uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct amdgpu_device *adev); @@ -334,10 +335,11 @@ void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev, ((adev)->xcp_mgr && (xcp_id) >= 0 ?\ (adev)->xcp_mgr->xcp[(xcp_id)].mem_id : -1) -#define KFD_XCP_MEMORY_SIZE(n) ((n)->adev->gmc.num_mem_partitions ?\ - (n)->adev->gmc.mem_partitions[(n)->xcp->mem_id].size /\ - (n)->adev->xcp_mgr->num_xcp_per_mem_partition :\ - (n)->adev->gmc.real_vram_size) +#define KFD_XCP_MEMORY_SIZE(adev, xcp_id)\ + ((adev)->gmc.num_mem_partitions && (xcp_id) >= 0 ?\ + (adev)->gmc.mem_partitions[KFD_XCP_MEM_ID((adev), (xcp_id))].size /\ + (adev)->xcp_mgr->num_xcp_per_mem_partition :\ + (adev)->gmc.real_vram_size) #if IS_ENABLED(CONFIG_HSA_AMD) void amdgpu_amdkfd_gpuvm_init_mem_limits(void); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index fcad90d53c9b..1ae867482bc7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -1023,11 +1023,12 @@ bool kfd_dev_is_large_bar(struct kfd_node *dev) if (dev->kfd->use_iommu_v2) return false; - if (dev->kfd->local_mem_info.local_mem_size_private == 0 && - dev->kfd->local_mem_info.local_mem_size_public > 0) + if (dev->local_mem_info.local_mem_size_private == 0 && + dev->local_mem_info.local_mem_size_public > 0) return true; - if (dev->kfd->local_mem_info.local_mem_size_public == 0 && dev->kfd->adev->gmc.is_app_apu) { + if (dev->local_mem_info.local_mem_size_public == 0 && + dev->kfd->adev->gmc.is_app_apu) { pr_debug("APP APU, Consider like a large bar system\n"); return true; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c index 1aaf933f9f48..950af6820153 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c @@ -2191,7 +2191,7 @@ static int kfd_create_vcrat_image_gpu(void *pcrat_image, * report the total FB size (public+private) as a single * private heap. */ - local_mem_info = kdev->kfd->local_mem_info; + local_mem_info = kdev->local_mem_info; sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr + sub_type_hdr->length); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index db5b53fcdf11..d41da964d2f5 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -726,7 +726,6 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, svm_migrate_init(kfd->adev); - amdgpu_amdkfd_get_local_mem_info(kfd->adev, &kfd->local_mem_info); dev_info(kfd_device, "Total number of KFD nodes to be created: %d\n", kfd->num_nodes); @@ -756,7 +755,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, if (node->xcp) { dev_info(kfd_device, "KFD node %d partition %d size %lldM\n", node->node_id, node->xcp->mem_id, - KFD_XCP_MEMORY_SIZE(node) >> 20); + KFD_XCP_MEMORY_SIZE(node->adev, node->node_id) >> 20); } if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) && @@ -783,6 +782,10 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, } node->max_proc_per_quantum = max_proc_per_quantum; atomic_set(&node->sram_ecc_flag, 0); + + amdgpu_amdkfd_get_local_mem_info(kfd->adev, + &node->local_mem_info, node->xcp->id); + /* Initialize the KFD node */ if (kfd_init_node(node)) { dev_err(kfd_device, "Error initializing KFD node\n"); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index 44f4d5509db6..3bd222e8f6c3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -313,6 +313,8 @@ struct kfd_node { unsigned int compute_vmid_bitmap; + struct kfd_local_mem_info local_mem_info; + struct kfd_dev *kfd; }; @@ -335,7 +337,6 @@ struct kfd_dev { */ struct kgd2kfd_shared_resources shared_resources; - struct kfd_local_mem_info local_mem_info; const struct kfd2kgd_calls *kfd2kgd; struct mutex doorbell_mutex; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index dbb6159344b3..e0bacf017a40 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -1152,8 +1152,8 @@ static uint32_t kfd_generate_gpu_id(struct kfd_node *gpu) if (!gpu) return 0; - local_mem_size = gpu->kfd->local_mem_info.local_mem_size_private + - gpu->kfd->local_mem_info.local_mem_size_public; + local_mem_size = gpu->local_mem_info.local_mem_size_private + + gpu->local_mem_info.local_mem_size_public; buf[0] = gpu->adev->pdev->devfn; buf[1] = gpu->adev->pdev->subsystem_vendor | (gpu->adev->pdev->subsystem_device << 16); @@ -1234,7 +1234,8 @@ static void kfd_fill_mem_clk_max_info(struct kfd_topology_device *dev) * for APUs - If CRAT from ACPI reports more than one bank, then * all the banks will report the same mem_clk_max information */ - amdgpu_amdkfd_get_local_mem_info(dev->gpu->adev, &local_mem_info); + amdgpu_amdkfd_get_local_mem_info(dev->gpu->adev, &local_mem_info, + dev->gpu->xcp->id); list_for_each_entry(mem, &dev->mem_props, list) mem->mem_clk_max = local_mem_info.mem_clk_max; -- cgit From 1c77527a69d5ca19cb276e2728992d922b687f35 Mon Sep 17 00:00:00 2001 From: Mukul Joshi Date: Mon, 20 Mar 2023 11:22:30 -0400 Subject: drm/amdkfd: Fix memory reporting on GFX 9.4.3 This patch fixes memory reporting on the GFX 9.4.3 APU and dGPU by reporting available memory on a per partition basis. If its an APU, available and used memory calculations take into account system and TTM memory. v2: squash in fix ("drm/amdkfd: Fix array out of bound warning") squash in fix ("drm/amdgpu: Update memory reporting for GFX9.4.3") Signed-off-by: Mukul Joshi Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 12 ++-- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c | 81 ++++++++++++++++++------ drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h | 5 ++ drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 3 +- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 14 ++-- 5 files changed, 84 insertions(+), 31 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 4bf6f5659568..948d362adabb 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -35,6 +35,7 @@ #include #include "amdgpu_sync.h" #include "amdgpu_vm.h" +#include "amdgpu_xcp.h" extern uint64_t amdgpu_amdkfd_total_mem_size; @@ -98,8 +99,8 @@ struct amdgpu_amdkfd_fence { struct amdgpu_kfd_dev { struct kfd_dev *dev; - int64_t vram_used; - uint64_t vram_used_aligned; + int64_t vram_used[MAX_XCP]; + uint64_t vram_used_aligned[MAX_XCP]; bool init_complete; struct work_struct reset_work; @@ -287,7 +288,8 @@ int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev, void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev, void *drm_priv); uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv); -size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev); +size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev, + uint8_t xcp_id); int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( struct amdgpu_device *adev, uint64_t va, uint64_t size, void *drm_priv, struct kgd_mem **mem, @@ -327,9 +329,9 @@ void amdgpu_amdkfd_block_mmu_notifications(void *p); int amdgpu_amdkfd_criu_resume(void *p); bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev); int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, - uint64_t size, u32 alloc_flag); + uint64_t size, u32 alloc_flag, int8_t xcp_id); void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev, - uint64_t size, u32 alloc_flag); + uint64_t size, u32 alloc_flag, int8_t xcp_id); #define KFD_XCP_MEM_ID(adev, xcp_id) \ ((adev)->xcp_mgr && (xcp_id) >= 0 ?\ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c index 8724a0be31b8..cc37f04651e2 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c @@ -157,12 +157,13 @@ void amdgpu_amdkfd_reserve_system_mem(uint64_t size) * Return: returns -ENOMEM in case of error, ZERO otherwise */ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, - uint64_t size, u32 alloc_flag) + uint64_t size, u32 alloc_flag, int8_t xcp_id) { uint64_t reserved_for_pt = ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size); size_t system_mem_needed, ttm_mem_needed, vram_needed; int ret = 0; + uint64_t vram_size = 0; system_mem_needed = 0; ttm_mem_needed = 0; @@ -177,6 +178,17 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, * 2M BO chunk. */ vram_needed = size; + /* + * For GFX 9.4.3, get the VRAM size from XCP structs + */ + if (WARN_ONCE(xcp_id < 0, "invalid XCP ID %d", xcp_id)) + return -EINVAL; + + vram_size = KFD_XCP_MEMORY_SIZE(adev, xcp_id); + if (adev->gmc.is_app_apu) { + system_mem_needed = size; + ttm_mem_needed = size; + } } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) { system_mem_needed = size; } else if (!(alloc_flag & @@ -196,8 +208,8 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, kfd_mem_limit.max_system_mem_limit && !no_system_mem_limit) || (kfd_mem_limit.ttm_mem_used + ttm_mem_needed > kfd_mem_limit.max_ttm_mem_limit) || - (adev && adev->kfd.vram_used + vram_needed > - adev->gmc.real_vram_size - reserved_for_pt)) { + (adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] + vram_needed > + vram_size - reserved_for_pt)) { ret = -ENOMEM; goto release; } @@ -207,9 +219,11 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, */ WARN_ONCE(vram_needed && !adev, "adev reference can't be null when vram is used"); - if (adev) { - adev->kfd.vram_used += vram_needed; - adev->kfd.vram_used_aligned += ALIGN(vram_needed, VRAM_AVAILABLITY_ALIGN); + if (adev && xcp_id >= 0) { + adev->kfd.vram_used[xcp_id] += vram_needed; + adev->kfd.vram_used_aligned[xcp_id] += adev->gmc.is_app_apu ? + vram_needed : + ALIGN(vram_needed, VRAM_AVAILABLITY_ALIGN); } kfd_mem_limit.system_mem_used += system_mem_needed; kfd_mem_limit.ttm_mem_used += ttm_mem_needed; @@ -220,7 +234,7 @@ release: } void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev, - uint64_t size, u32 alloc_flag) + uint64_t size, u32 alloc_flag, int8_t xcp_id) { spin_lock(&kfd_mem_limit.mem_limit_lock); @@ -230,9 +244,19 @@ void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev, } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_VRAM) { WARN_ONCE(!adev, "adev reference can't be null when alloc mem flags vram is set"); + if (WARN_ONCE(xcp_id < 0, "invalid XCP ID %d", xcp_id)) + goto release; + if (adev) { - adev->kfd.vram_used -= size; - adev->kfd.vram_used_aligned -= ALIGN(size, VRAM_AVAILABLITY_ALIGN); + adev->kfd.vram_used[xcp_id] -= size; + if (adev->gmc.is_app_apu) { + adev->kfd.vram_used_aligned[xcp_id] -= size; + kfd_mem_limit.system_mem_used -= size; + kfd_mem_limit.ttm_mem_used -= size; + } else { + adev->kfd.vram_used_aligned[xcp_id] -= + ALIGN(size, VRAM_AVAILABLITY_ALIGN); + } } } else if (alloc_flag & KFD_IOC_ALLOC_MEM_FLAGS_USERPTR) { kfd_mem_limit.system_mem_used -= size; @@ -242,8 +266,8 @@ void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev, pr_err("%s: Invalid BO type %#x\n", __func__, alloc_flag); goto release; } - WARN_ONCE(adev && adev->kfd.vram_used < 0, - "KFD VRAM memory accounting unbalanced"); + WARN_ONCE(adev && xcp_id >= 0 && adev->kfd.vram_used[xcp_id] < 0, + "KFD VRAM memory accounting unbalanced for xcp: %d", xcp_id); WARN_ONCE(kfd_mem_limit.ttm_mem_used < 0, "KFD TTM memory accounting unbalanced"); WARN_ONCE(kfd_mem_limit.system_mem_used < 0, @@ -259,7 +283,8 @@ void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo) u32 alloc_flags = bo->kfd_bo->alloc_flags; u64 size = amdgpu_bo_size(bo); - amdgpu_amdkfd_unreserve_mem_limit(adev, size, alloc_flags); + amdgpu_amdkfd_unreserve_mem_limit(adev, size, alloc_flags, + bo->xcp_id); kfree(bo->kfd_bo); } @@ -1609,23 +1634,42 @@ out_unlock: return ret; } -size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev) +size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev, + uint8_t xcp_id) { uint64_t reserved_for_pt = ESTIMATE_PT_SIZE(amdgpu_amdkfd_total_mem_size); ssize_t available; + uint64_t vram_available, system_mem_available, ttm_mem_available; spin_lock(&kfd_mem_limit.mem_limit_lock); - available = adev->gmc.real_vram_size - - adev->kfd.vram_used_aligned + vram_available = KFD_XCP_MEMORY_SIZE(adev, xcp_id) + - adev->kfd.vram_used_aligned[xcp_id] - atomic64_read(&adev->vram_pin_size) - reserved_for_pt; + + if (adev->gmc.is_app_apu) { + system_mem_available = no_system_mem_limit ? + kfd_mem_limit.max_system_mem_limit : + kfd_mem_limit.max_system_mem_limit - + kfd_mem_limit.system_mem_used; + + ttm_mem_available = kfd_mem_limit.max_ttm_mem_limit - + kfd_mem_limit.ttm_mem_used; + + available = min3(system_mem_available, ttm_mem_available, + vram_available); + available = ALIGN_DOWN(available, PAGE_SIZE); + } else { + available = ALIGN_DOWN(vram_available, VRAM_AVAILABLITY_ALIGN); + } + spin_unlock(&kfd_mem_limit.mem_limit_lock); if (available < 0) available = 0; - return ALIGN_DOWN(available, VRAM_AVAILABLITY_ALIGN); + return available; } int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( @@ -1713,7 +1757,8 @@ int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( amdgpu_sync_create(&(*mem)->sync); - ret = amdgpu_amdkfd_reserve_mem_limit(adev, aligned_size, flags); + ret = amdgpu_amdkfd_reserve_mem_limit(adev, aligned_size, flags, + xcp_id); if (ret) { pr_debug("Insufficient memory\n"); goto err_reserve_limit; @@ -1781,7 +1826,7 @@ err_node_allow: /* Don't unreserve system mem limit twice */ goto err_reserve_limit; err_bo_create: - amdgpu_amdkfd_unreserve_mem_limit(adev, aligned_size, flags); + amdgpu_amdkfd_unreserve_mem_limit(adev, aligned_size, flags, xcp_id); err_reserve_limit: mutex_destroy(&(*mem)->lock); if (gobj) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h index 68b63b970ce8..9c5912b9d8bd 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_xcp.h @@ -24,8 +24,11 @@ #ifndef AMDGPU_XCP_H #define AMDGPU_XCP_H +#include #include +#include "amdgpu_ctx.h" + #define MAX_XCP 8 #define AMDGPU_XCP_MODE_NONE -1 @@ -34,6 +37,8 @@ #define AMDGPU_XCP_FL_NONE 0 #define AMDGPU_XCP_FL_LOCKED (1 << 0) +struct amdgpu_fpriv; + enum AMDGPU_XCP_IP_BLOCK { AMDGPU_XCP_GFXHUB, AMDGPU_XCP_GFX, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index 1ae867482bc7..a9efff94390b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -1044,7 +1044,8 @@ static int kfd_ioctl_get_available_memory(struct file *filep, if (!pdd) return -EINVAL; - args->available = amdgpu_amdkfd_get_available_memory(pdd->dev->adev); + args->available = amdgpu_amdkfd_get_available_memory(pdd->dev->adev, + pdd->dev->node_id); kfd_unlock_pdd(pdd); return 0; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 45959892bc0f..c1ab70faf36e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -280,7 +280,7 @@ static void svm_range_free(struct svm_range *prange, bool update_mem_usage) if (update_mem_usage && !p->xnack_enabled) { pr_debug("unreserve prange 0x%p size: 0x%llx\n", prange, size); amdgpu_amdkfd_unreserve_mem_limit(NULL, size, - KFD_IOC_ALLOC_MEM_FLAGS_USERPTR); + KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0); } mutex_destroy(&prange->lock); mutex_destroy(&prange->migrate_mutex); @@ -313,7 +313,7 @@ svm_range *svm_range_new(struct svm_range_list *svms, uint64_t start, p = container_of(svms, struct kfd_process, svms); if (!p->xnack_enabled && update_mem_usage && amdgpu_amdkfd_reserve_mem_limit(NULL, size << PAGE_SHIFT, - KFD_IOC_ALLOC_MEM_FLAGS_USERPTR)) { + KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0)) { pr_info("SVM mapping failed, exceeds resident system memory limit\n"); kfree(prange); return NULL; @@ -3037,10 +3037,10 @@ svm_range_switch_xnack_reserve_mem(struct kfd_process *p, bool xnack_enabled) size = (pchild->last - pchild->start + 1) << PAGE_SHIFT; if (xnack_enabled) { amdgpu_amdkfd_unreserve_mem_limit(NULL, size, - KFD_IOC_ALLOC_MEM_FLAGS_USERPTR); + KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0); } else { r = amdgpu_amdkfd_reserve_mem_limit(NULL, size, - KFD_IOC_ALLOC_MEM_FLAGS_USERPTR); + KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0); if (r) goto out_unlock; reserved_size += size; @@ -3050,10 +3050,10 @@ svm_range_switch_xnack_reserve_mem(struct kfd_process *p, bool xnack_enabled) size = (prange->last - prange->start + 1) << PAGE_SHIFT; if (xnack_enabled) { amdgpu_amdkfd_unreserve_mem_limit(NULL, size, - KFD_IOC_ALLOC_MEM_FLAGS_USERPTR); + KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0); } else { r = amdgpu_amdkfd_reserve_mem_limit(NULL, size, - KFD_IOC_ALLOC_MEM_FLAGS_USERPTR); + KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0); if (r) goto out_unlock; reserved_size += size; @@ -3066,7 +3066,7 @@ out_unlock: if (r) amdgpu_amdkfd_unreserve_mem_limit(NULL, reserved_size, - KFD_IOC_ALLOC_MEM_FLAGS_USERPTR); + KFD_IOC_ALLOC_MEM_FLAGS_USERPTR, 0); else /* Change xnack mode must be inside svms lock, to avoid race with * svm_range_deferred_list_work unreserve memory in parallel. -- cgit From 84b4dd3f84de424a68e1fda0d483530ddaa92b45 Mon Sep 17 00:00:00 2001 From: Philip Yang Date: Fri, 31 Mar 2023 11:18:12 -0400 Subject: drm/amdkfd: Refactor migrate init to support partition switch Rename smv_migrate_init to a better name kgd2kfd_init_zone_device because it setup zone devive pgmap for page migration and keep it in kfd_migrate.c to access static functions svm_migrate_pgmap_ops. Call it only once in amdgpu_device_ip_init after adev ip blocks are initialized, but before amdgpu_amdkfd_device_init initialize kfd nodes which enable SVM support based on pgmap. svm_range_set_max_pages is called by kgd2kfd_device_init everytime after switching compute partition mode. Signed-off-by: Philip Yang Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 11 +++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 4 +++- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 3 ++- drivers/gpu/drm/amd/amdkfd/kfd_migrate.c | 8 +++----- drivers/gpu/drm/amd/amdkfd/kfd_migrate.h | 9 --------- drivers/gpu/drm/amd/amdkfd/kfd_svm.h | 4 ++++ 6 files changed, 23 insertions(+), 16 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 948d362adabb..48d12dbff968 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -372,6 +372,17 @@ void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo) { } #endif + +#if IS_ENABLED(CONFIG_HSA_AMD_SVM) +int kgd2kfd_init_zone_device(struct amdgpu_device *adev); +#else +static inline +int kgd2kfd_init_zone_device(struct amdgpu_device *adev) +{ + return 0; +} +#endif + /* KGD2KFD callbacks */ int kgd2kfd_quiesce_mm(struct mm_struct *mm, uint32_t trigger); int kgd2kfd_resume_mm(struct mm_struct *mm); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 02ee79b7b56d..f0666230b2ed 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -2633,8 +2633,10 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) goto init_failed; /* Don't init kfd if whole hive need to be reset during init */ - if (!adev->gmc.xgmi.pending_reset) + if (!adev->gmc.xgmi.pending_reset) { + kgd2kfd_init_zone_device(adev); amdgpu_amdkfd_device_init(adev); + } amdgpu_fru_get_product_info(adev); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 882ff86bba08..bf32e547182c 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -32,6 +32,7 @@ #include "kfd_iommu.h" #include "amdgpu_amdkfd.h" #include "kfd_smi_events.h" +#include "kfd_svm.h" #include "kfd_migrate.h" #include "amdgpu.h" #include "amdgpu_xcp.h" @@ -791,7 +792,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, kfd->nodes[i] = node; } - svm_migrate_init(kfd->adev); + svm_range_set_max_pages(kfd->adev); if (kfd_resume_iommu(kfd)) goto kfd_resume_iommu_error; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 2512bf681112..35cf6558cf1b 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -988,7 +988,7 @@ static const struct dev_pagemap_ops svm_migrate_pgmap_ops = { /* Each VRAM page uses sizeof(struct page) on system memory */ #define SVM_HMM_PAGE_STRUCT_SIZE(size) ((size)/PAGE_SIZE * sizeof(struct page)) -int svm_migrate_init(struct amdgpu_device *adev) +int kgd2kfd_init_zone_device(struct amdgpu_device *adev) { struct amdgpu_kfd_dev *kfddev = &adev->kfd; struct dev_pagemap *pgmap; @@ -996,12 +996,10 @@ int svm_migrate_init(struct amdgpu_device *adev) unsigned long size; void *r; - /* Page migration works on Vega10 or newer */ - if (!KFD_IS_SOC15(kfddev->dev)) + /* Page migration works on gfx9 or newer */ + if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(9, 0, 1)) return -EINVAL; - svm_range_set_max_pages(adev); - if (adev->gmc.is_app_apu) return 0; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h index a5d7e6d22264..487f26368164 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.h @@ -47,15 +47,6 @@ int svm_migrate_vram_to_ram(struct svm_range *prange, struct mm_struct *mm, unsigned long svm_migrate_addr_to_pfn(struct amdgpu_device *adev, unsigned long addr); -int svm_migrate_init(struct amdgpu_device *adev); - -#else - -static inline int svm_migrate_init(struct amdgpu_device *adev) -{ - return 0; -} - #endif /* IS_ENABLED(CONFIG_HSA_AMD_SVM) */ #endif /* KFD_MIGRATE_H_ */ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h index 021def496f5a..762679835e31 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.h @@ -265,6 +265,10 @@ static inline int kfd_criu_resume_svm(struct kfd_process *p) return 0; } +static inline void svm_range_set_max_pages(struct amdgpu_device *adev) +{ +} + #define KFD_IS_SVM_API_SUPPORTED(dev) false #endif /* IS_ENABLED(CONFIG_HSA_AMD_SVM) */ -- cgit From 9a3ce1a7a9e5372d8c275bf3fbef4456c8407145 Mon Sep 17 00:00:00 2001 From: Hawking Zhang Date: Fri, 12 May 2023 13:22:57 +0800 Subject: drm/amdgpu: Do not access members of xcp w/o check (v2) Not all the asic needs xcp. ensure check xcp availabity before accessing its member. v2: add missing change in kfd_topology.c Signed-off-by: Hawking Zhang Reviewed-by: Le Ma Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 8 ++++---- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_device.c | 2 +- drivers/gpu/drm/amd/amdkfd/kfd_topology.c | 2 +- 4 files changed, 7 insertions(+), 7 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 85df73f2c85e..739eb7c0d133 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -429,17 +429,17 @@ uint32_t amdgpu_amdkfd_get_fw_version(struct amdgpu_device *adev, void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev, struct kfd_local_mem_info *mem_info, - uint8_t xcp_id) + struct amdgpu_xcp *xcp) { memset(mem_info, 0, sizeof(*mem_info)); - if (adev->ip_versions[GC_HWIP][0] == IP_VERSION(9, 4, 3)) { + if (xcp) { if (adev->gmc.real_vram_size == adev->gmc.visible_vram_size) mem_info->local_mem_size_public = - KFD_XCP_MEMORY_SIZE(adev, xcp_id); + KFD_XCP_MEMORY_SIZE(adev, xcp->id); else mem_info->local_mem_size_private = - KFD_XCP_MEMORY_SIZE(adev, xcp_id); + KFD_XCP_MEMORY_SIZE(adev, xcp->id); } else { mem_info->local_mem_size_public = adev->gmc.visible_vram_size; mem_info->local_mem_size_private = adev->gmc.real_vram_size - diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 48d12dbff968..be43d71ba7ef 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -233,7 +233,7 @@ uint32_t amdgpu_amdkfd_get_fw_version(struct amdgpu_device *adev, enum kgd_engine_type type); void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev, struct kfd_local_mem_info *mem_info, - uint8_t xcp_id); + struct amdgpu_xcp *xcp); uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct amdgpu_device *adev); uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct amdgpu_device *adev); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index 2cfef3f9456f..986543a000bf 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -784,7 +784,7 @@ bool kgd2kfd_device_init(struct kfd_dev *kfd, atomic_set(&node->sram_ecc_flag, 0); amdgpu_amdkfd_get_local_mem_info(kfd->adev, - &node->local_mem_info, node->xcp->id); + &node->local_mem_info, node->xcp); /* Initialize the KFD node */ if (kfd_init_node(node)) { diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c index e0bacf017a40..8302d8967158 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_topology.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_topology.c @@ -1235,7 +1235,7 @@ static void kfd_fill_mem_clk_max_info(struct kfd_topology_device *dev) * all the banks will report the same mem_clk_max information */ amdgpu_amdkfd_get_local_mem_info(dev->gpu->adev, &local_mem_info, - dev->gpu->xcp->id); + dev->gpu->xcp); list_for_each_entry(mem, &dev->mem_props, list) mem->mem_clk_max = local_mem_info.mem_clk_max; -- cgit From 45b3a914d40e63d2c9e3a3e02fb2014be975b9b0 Mon Sep 17 00:00:00 2001 From: Alex Deucher Date: Tue, 16 May 2023 17:16:30 -0400 Subject: drm/amdgpu/gmc9: fix 64 bit division in partition code Rework logic or use do_div() to avoid problems on 32 bit. v2: add a missing case for XCP macro v3: fix out of bounds array access v4: fix xcp handling harder Acked-by: Guchun Chen (v1) Reviewed-by: Mukul Joshi (v3) Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 15 +++++++++++++++ drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 9 ++++----- drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c | 5 ++++- drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c | 11 ++++++----- drivers/gpu/drm/amd/amdkfd/kfd_svm.c | 8 ++++++-- 5 files changed, 35 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 739eb7c0d133..5de92c9ab18f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -794,3 +794,18 @@ void amdgpu_amdkfd_unlock_kfd(struct amdgpu_device *adev) { kgd2kfd_unlock_kfd(); } + + +u64 amdgpu_amdkfd_xcp_memory_size(struct amdgpu_device *adev, int xcp_id) +{ + u64 tmp; + s8 mem_id = KFD_XCP_MEM_ID(adev, xcp_id); + + if (adev->gmc.num_mem_partitions && xcp_id >= 0 && mem_id >= 0) { + tmp = adev->gmc.mem_partitions[mem_id].size; + do_div(tmp, adev->xcp_mgr->num_xcp_per_mem_partition); + return tmp; + } else { + return adev->gmc.real_vram_size; + } +} diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index be43d71ba7ef..94cc456761e5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -333,15 +333,14 @@ int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev, uint64_t size, u32 alloc_flag, int8_t xcp_id); +u64 amdgpu_amdkfd_xcp_memory_size(struct amdgpu_device *adev, int xcp_id); + #define KFD_XCP_MEM_ID(adev, xcp_id) \ ((adev)->xcp_mgr && (xcp_id) >= 0 ?\ (adev)->xcp_mgr->xcp[(xcp_id)].mem_id : -1) -#define KFD_XCP_MEMORY_SIZE(adev, xcp_id)\ - ((adev)->gmc.num_mem_partitions && (xcp_id) >= 0 ?\ - (adev)->gmc.mem_partitions[KFD_XCP_MEM_ID((adev), (xcp_id))].size /\ - (adev)->xcp_mgr->num_xcp_per_mem_partition :\ - (adev)->gmc.real_vram_size) +#define KFD_XCP_MEMORY_SIZE(adev, xcp_id) amdgpu_amdkfd_xcp_memory_size((adev), (xcp_id)) + #if IS_ENABLED(CONFIG_HSA_AMD) void amdgpu_amdkfd_gpuvm_init_mem_limits(void); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 23101c82519a..902773ce41b5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -814,11 +814,14 @@ static void amdgpu_ttm_gart_bind_gfx9_mqd(struct amdgpu_device *adev, struct amdgpu_ttm_tt *gtt = (void *)ttm; uint64_t total_pages = ttm->num_pages; int num_xcc = max(1U, adev->gfx.num_xcc_per_xcp); - uint64_t page_idx, pages_per_xcc = total_pages / num_xcc; + uint64_t page_idx, pages_per_xcc; int i; uint64_t ctrl_flags = (flags & ~AMDGPU_PTE_MTYPE_VG10_MASK) | AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_NC); + pages_per_xcc = total_pages; + do_div(pages_per_xcc, num_xcc); + for (i = 0, page_idx = 0; i < num_xcc; i++, page_idx += pages_per_xcc) { /* MQD page: use default flags */ amdgpu_gart_bind(adev, diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c index 7ea80bdf8e1e..f70e666cecf2 100644 --- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c @@ -1914,9 +1914,10 @@ gmc_v9_0_init_acpi_mem_ranges(struct amdgpu_device *adev, adev->gmc.num_mem_partitions = num_ranges; /* If there is only partition, don't use entire size */ - if (adev->gmc.num_mem_partitions == 1) - mem_ranges[0].size = - (mem_ranges[0].size * (mem_groups - 1) / mem_groups); + if (adev->gmc.num_mem_partitions == 1) { + mem_ranges[0].size = mem_ranges[0].size * (mem_groups - 1); + do_div(mem_ranges[0].size, mem_groups); + } } static void @@ -1948,8 +1949,8 @@ gmc_v9_0_init_sw_mem_ranges(struct amdgpu_device *adev, break; } - size = (adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT) / - adev->gmc.num_mem_partitions; + size = adev->gmc.real_vram_size >> AMDGPU_GPU_PAGE_SHIFT; + size /= adev->gmc.num_mem_partitions; for (i = 0; i < adev->gmc.num_mem_partitions; ++i) { mem_ranges[i].range.fpfn = start_addr; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c index 206851c9e642..b0f0d31bf3e6 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_svm.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_svm.c @@ -1939,10 +1939,14 @@ void svm_range_set_max_pages(struct amdgpu_device *adev) uint64_t max_pages; uint64_t pages, _pages; uint64_t min_pages = 0; - int i; + int i, id; for (i = 0; i < adev->kfd.dev->num_nodes; i++) { - pages = KFD_XCP_MEMORY_SIZE(adev, adev->kfd.dev->nodes[i]->xcp->id) >> 17; + if (adev->kfd.dev->nodes[i]->xcp) + id = adev->kfd.dev->nodes[i]->xcp->id; + else + id = -1; + pages = KFD_XCP_MEMORY_SIZE(adev, id) >> 17; pages = clamp(pages, 1ULL << 9, 1ULL << 18); pages = rounddown_pow_of_two(pages); min_pages = min_not_zero(min_pages, pages); -- cgit From 12fb1ad70d65edc3405884792d044fa79df7244f Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Fri, 22 Apr 2022 12:26:18 -0400 Subject: drm/amdkfd: update process interrupt handling for debug events The debugger must be notified by any debugger subscribed exception that comes from hardware interrupts. If a debugger session exits, any exceptions it subscribed to may still have interrupts in the interrupt ring buffer or KGD/KFD pipeline. To prevent a new session from inheriting stale interrupts, when a new queue is created, open an interrupt drain and allow the IH ring to drain from a timestamped checkpoint. Then inject a custom IV so that once the custom IV is picked up by the KFD, it's safe to close the drain and proceed with queue creation. The drain must also be on debug disable as SW interrupts may still be processed. Drain at this time and clear all the exception status. The debugger may also not be attached nor subscibed to certain exceptions so forward them directly to the runtime. GFX10 also requires its own IV processing, hence the creation of kfd_int_process_v10.c. This is because the IV from SQ interrupts are packed into a new continguous format unlike GFX9. To make this clear, a separate interrupting handling code file was created. Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 16 + drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 2 + drivers/gpu/drm/amd/amdkfd/Makefile | 1 + drivers/gpu/drm/amd/amdkfd/kfd_debug.c | 84 +++++ drivers/gpu/drm/amd/amdkfd/kfd_debug.h | 6 + drivers/gpu/drm/amd/amdkfd/kfd_device.c | 4 +- drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c | 405 +++++++++++++++++++++ drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c | 26 +- drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c | 98 ++++- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 12 + drivers/gpu/drm/amd/amdkfd/kfd_process.c | 47 +++ .../gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | 4 + 12 files changed, 686 insertions(+), 19 deletions(-) create mode 100644 drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 66f80b9ab0c5..98cd52bb005f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -777,6 +777,22 @@ void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, bo amdgpu_umc_poison_handler(adev, reset); } +int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev, + uint32_t *payload) +{ + int ret; + + /* Device or IH ring is not ready so bail. */ + ret = amdgpu_ih_wait_on_checkpoint_process_ts(adev, &adev->irq.ih); + if (ret) + return ret; + + /* Send payload to fence KFD interrupts */ + amdgpu_amdkfd_interrupt(adev, payload); + + return 0; +} + bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev) { if (adev->gfx.ras && adev->gfx.ras->query_utcl2_poison_status) diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index 94cc456761e5..dd740e64e6e1 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -250,6 +250,8 @@ int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst, struct amdgpu_device *src, bool is_min); int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_min); +int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev, + uint32_t *payload); /* Read user wptr from a specified user address space with page fault * disabled. The memory must be pinned and mapped to the hardware when diff --git a/drivers/gpu/drm/amd/amdkfd/Makefile b/drivers/gpu/drm/amd/amdkfd/Makefile index 747754428073..2ec8f27c5366 100644 --- a/drivers/gpu/drm/amd/amdkfd/Makefile +++ b/drivers/gpu/drm/amd/amdkfd/Makefile @@ -53,6 +53,7 @@ AMDKFD_FILES := $(AMDKFD_PATH)/kfd_module.o \ $(AMDKFD_PATH)/kfd_events.o \ $(AMDKFD_PATH)/cik_event_interrupt.o \ $(AMDKFD_PATH)/kfd_int_process_v9.o \ + $(AMDKFD_PATH)/kfd_int_process_v10.o \ $(AMDKFD_PATH)/kfd_int_process_v11.o \ $(AMDKFD_PATH)/kfd_smi_events.o \ $(AMDKFD_PATH)/kfd_crat.o \ diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c index 17e8e9edccbf..68b657398d41 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c @@ -125,6 +125,64 @@ bool kfd_dbg_ev_raise(uint64_t event_mask, return is_subscribed; } +/* set pending event queue entry from ring entry */ +bool kfd_set_dbg_ev_from_interrupt(struct kfd_node *dev, + unsigned int pasid, + uint32_t doorbell_id, + uint64_t trap_mask, + void *exception_data, + size_t exception_data_size) +{ + struct kfd_process *p; + bool signaled_to_debugger_or_runtime = false; + + p = kfd_lookup_process_by_pasid(pasid); + + if (!p) + return false; + + if (!kfd_dbg_ev_raise(trap_mask, p, dev, doorbell_id, true, + exception_data, exception_data_size)) { + struct process_queue_manager *pqm; + struct process_queue_node *pqn; + + if (!!(trap_mask & KFD_EC_MASK_QUEUE) && + p->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED) { + mutex_lock(&p->mutex); + + pqm = &p->pqm; + list_for_each_entry(pqn, &pqm->queues, + process_queue_list) { + + if (!(pqn->q && pqn->q->device == dev && + pqn->q->doorbell_id == doorbell_id)) + continue; + + kfd_send_exception_to_runtime(p, pqn->q->properties.queue_id, + trap_mask); + + signaled_to_debugger_or_runtime = true; + + break; + } + + mutex_unlock(&p->mutex); + } else if (trap_mask & KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION)) { + kfd_dqm_evict_pasid(dev->dqm, p->pasid); + kfd_signal_vm_fault_event(dev, p->pasid, NULL, + exception_data); + + signaled_to_debugger_or_runtime = true; + } + } else { + signaled_to_debugger_or_runtime = true; + } + + kfd_unref_process(p); + + return signaled_to_debugger_or_runtime; +} + int kfd_dbg_send_exception_to_runtime(struct kfd_process *p, unsigned int dev_id, unsigned int queue_id, @@ -281,6 +339,31 @@ void kfd_dbg_trap_deactivate(struct kfd_process *target, bool unwind, int unwind kfd_dbg_set_workaround(target, false); } +static void kfd_dbg_clean_exception_status(struct kfd_process *target) +{ + struct process_queue_manager *pqm; + struct process_queue_node *pqn; + int i; + + for (i = 0; i < target->n_pdds; i++) { + struct kfd_process_device *pdd = target->pdds[i]; + + kfd_process_drain_interrupts(pdd); + + pdd->exception_status = 0; + } + + pqm = &target->pqm; + list_for_each_entry(pqn, &pqm->queues, process_queue_list) { + if (!pqn->q) + continue; + + pqn->q->properties.exception_status = 0; + } + + target->exception_status = 0; +} + int kfd_dbg_trap_disable(struct kfd_process *target) { if (!target->debug_trap_enabled) @@ -304,6 +387,7 @@ int kfd_dbg_trap_disable(struct kfd_process *target) } target->debug_trap_enabled = false; + kfd_dbg_clean_exception_status(target); kfd_unref_process(target); return 0; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h index fca928564948..5153ccbd7fd1 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.h @@ -27,6 +27,12 @@ void kfd_dbg_trap_deactivate(struct kfd_process *target, bool unwind, int unwind_count); int kfd_dbg_trap_activate(struct kfd_process *target); +bool kfd_set_dbg_ev_from_interrupt(struct kfd_node *dev, + unsigned int pasid, + uint32_t doorbell_id, + uint64_t trap_mask, + void *exception_data, + size_t exception_data_size); bool kfd_dbg_ev_raise(uint64_t event_mask, struct kfd_process *process, struct kfd_node *dev, unsigned int source_id, bool use_worker, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c index f0ed6e6416c3..2c36bb578633 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c @@ -140,6 +140,8 @@ static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd) case IP_VERSION(9, 4, 1): /* ARCTURUS */ case IP_VERSION(9, 4, 2): /* ALDEBARAN */ case IP_VERSION(9, 4, 3): /* GC 9.4.3 */ + kfd->device_info.event_interrupt_class = &event_interrupt_class_v9; + break; case IP_VERSION(10, 3, 1): /* VANGOGH */ case IP_VERSION(10, 3, 3): /* YELLOW_CARP */ case IP_VERSION(10, 3, 6): /* GC 10.3.6 */ @@ -153,7 +155,7 @@ static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd) case IP_VERSION(10, 3, 2): /* NAVY_FLOUNDER */ case IP_VERSION(10, 3, 4): /* DIMGREY_CAVEFISH */ case IP_VERSION(10, 3, 5): /* BEIGE_GOBY */ - kfd->device_info.event_interrupt_class = &event_interrupt_class_v9; + kfd->device_info.event_interrupt_class = &event_interrupt_class_v10; break; case IP_VERSION(11, 0, 0): case IP_VERSION(11, 0, 1): diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c new file mode 100644 index 000000000000..c7991e07b6be --- /dev/null +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v10.c @@ -0,0 +1,405 @@ +/* + * Copyright 2023 Advanced Micro Devices, Inc. + * + * Permission is hereby granted, free of charge, to any person obtaining a + * copy of this software and associated documentation files (the "Software"), + * to deal in the Software without restriction, including without limitation + * the rights to use, copy, modify, merge, publish, distribute, sublicense, + * and/or sell copies of the Software, and to permit persons to whom the + * Software is furnished to do so, subject to the following conditions: + * + * The above copyright notice and this permission notice shall be included in + * all copies or substantial portions of the Software. + * + * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL + * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR + * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR + * OTHER DEALINGS IN THE SOFTWARE. + */ + +#include "kfd_events.h" +#include "kfd_debug.h" +#include "soc15_int.h" +#include "kfd_device_queue_manager.h" + +/* + * GFX10 SQ Interrupts + * + * There are 3 encoding types of interrupts sourced from SQ sent as a 44-bit + * packet to the Interrupt Handler: + * Auto - Generated by the SQG (various cmd overflows, timestamps etc) + * Wave - Generated by S_SENDMSG through a shader program + * Error - HW generated errors (Illegal instructions, Memviols, EDC etc) + * + * The 44-bit packet is mapped as {context_id1[7:0],context_id0[31:0]} plus + * 4-bits for VMID (SOC15_VMID_FROM_IH_ENTRY) as such: + * + * - context_id1[7:6] + * Encoding type (0 = Auto, 1 = Wave, 2 = Error) + * + * - context_id0[24] + * PRIV bit indicates that Wave S_SEND or error occurred within trap + * + * - context_id0[22:0] + * 23-bit data with the following layout per encoding type: + * Auto - only context_id0[8:0] is used, which reports various interrupts + * generated by SQG. The rest is 0. + * Wave - user data sent from m0 via S_SENDMSG + * Error - Error type (context_id0[22:19]), Error Details (rest of bits) + * + * The other context_id bits show coordinates (SE/SH/CU/SIMD/WGP) for wave + * S_SENDMSG and Errors. These are 0 for Auto. + */ + +enum SQ_INTERRUPT_WORD_ENCODING { + SQ_INTERRUPT_WORD_ENCODING_AUTO = 0x0, + SQ_INTERRUPT_WORD_ENCODING_INST, + SQ_INTERRUPT_WORD_ENCODING_ERROR, +}; + +enum SQ_INTERRUPT_ERROR_TYPE { + SQ_INTERRUPT_ERROR_TYPE_EDC_FUE = 0x0, + SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST, + SQ_INTERRUPT_ERROR_TYPE_MEMVIOL, + SQ_INTERRUPT_ERROR_TYPE_EDC_FED, +}; + +/* SQ_INTERRUPT_WORD_AUTO_CTXID */ +#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE__SHIFT 0 +#define SQ_INTERRUPT_WORD_AUTO_CTXID0__WLT__SHIFT 1 +#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_BUF0_FULL__SHIFT 2 +#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_BUF1_FULL__SHIFT 3 +#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_UTC_ERROR__SHIFT 7 +#define SQ_INTERRUPT_WORD_AUTO_CTXID1__SE_ID__SHIFT 4 +#define SQ_INTERRUPT_WORD_AUTO_CTXID1__ENCODING__SHIFT 6 + +#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_MASK 0x00000001 +#define SQ_INTERRUPT_WORD_AUTO_CTXID0__WLT_MASK 0x00000002 +#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_BUF0_FULL_MASK 0x00000004 +#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_BUF1_FULL_MASK 0x00000008 +#define SQ_INTERRUPT_WORD_AUTO_CTXID0__THREAD_TRACE_UTC_ERROR_MASK 0x00000080 +#define SQ_INTERRUPT_WORD_AUTO_CTXID1__SE_ID_MASK 0x030 +#define SQ_INTERRUPT_WORD_AUTO_CTXID1__ENCODING_MASK 0x0c0 + +/* SQ_INTERRUPT_WORD_WAVE_CTXID */ +#define SQ_INTERRUPT_WORD_WAVE_CTXID0__DATA__SHIFT 0 +#define SQ_INTERRUPT_WORD_WAVE_CTXID0__SA_ID__SHIFT 23 +#define SQ_INTERRUPT_WORD_WAVE_CTXID0__PRIV__SHIFT 24 +#define SQ_INTERRUPT_WORD_WAVE_CTXID0__WAVE_ID__SHIFT 25 +#define SQ_INTERRUPT_WORD_WAVE_CTXID0__SIMD_ID__SHIFT 30 +#define SQ_INTERRUPT_WORD_WAVE_CTXID1__WGP_ID__SHIFT 0 +#define SQ_INTERRUPT_WORD_WAVE_CTXID1__SE_ID__SHIFT 4 +#define SQ_INTERRUPT_WORD_WAVE_CTXID1__ENCODING__SHIFT 6 + +#define SQ_INTERRUPT_WORD_WAVE_CTXID0__DATA_MASK 0x000007fffff +#define SQ_INTERRUPT_WORD_WAVE_CTXID0__SA_ID_MASK 0x0000800000 +#define SQ_INTERRUPT_WORD_WAVE_CTXID0__PRIV_MASK 0x00001000000 +#define SQ_INTERRUPT_WORD_WAVE_CTXID0__WAVE_ID_MASK 0x0003e000000 +#define SQ_INTERRUPT_WORD_WAVE_CTXID0__SIMD_ID_MASK 0x000c0000000 +#define SQ_INTERRUPT_WORD_WAVE_CTXID1__WGP_ID_MASK 0x00f +#define SQ_INTERRUPT_WORD_WAVE_CTXID1__SE_ID_MASK 0x030 +#define SQ_INTERRUPT_WORD_WAVE_CTXID1__ENCODING_MASK 0x0c0 + +#define KFD_CTXID0__ERR_TYPE_MASK 0x780000 +#define KFD_CTXID0__ERR_TYPE__SHIFT 19 + +/* GFX10 SQ interrupt ENC type bit (context_id1[7:6]) for wave s_sendmsg */ +#define KFD_CONTEXT_ID1_ENC_TYPE_WAVE_MASK 0x40 +/* GFX10 SQ interrupt PRIV bit (context_id0[24]) for s_sendmsg inside trap */ +#define KFD_CONTEXT_ID0_PRIV_MASK 0x1000000 +/* + * The debugger will send user data(m0) with PRIV=1 to indicate it requires + * notification from the KFD with the following queue id (DOORBELL_ID) and + * trap code (TRAP_CODE). + */ +#define KFD_CONTEXT_ID0_DEBUG_DOORBELL_MASK 0x0003ff +#define KFD_CONTEXT_ID0_DEBUG_TRAP_CODE_SHIFT 10 +#define KFD_CONTEXT_ID0_DEBUG_TRAP_CODE_MASK 0x07fc00 +#define KFD_DEBUG_DOORBELL_ID(ctxid0) ((ctxid0) & \ + KFD_CONTEXT_ID0_DEBUG_DOORBELL_MASK) +#define KFD_DEBUG_TRAP_CODE(ctxid0) (((ctxid0) & \ + KFD_CONTEXT_ID0_DEBUG_TRAP_CODE_MASK) \ + >> KFD_CONTEXT_ID0_DEBUG_TRAP_CODE_SHIFT) +#define KFD_DEBUG_CP_BAD_OP_ECODE_MASK 0x3fffc00 +#define KFD_DEBUG_CP_BAD_OP_ECODE_SHIFT 10 +#define KFD_DEBUG_CP_BAD_OP_ECODE(ctxid0) (((ctxid0) & \ + KFD_DEBUG_CP_BAD_OP_ECODE_MASK) \ + >> KFD_DEBUG_CP_BAD_OP_ECODE_SHIFT) + +static void event_interrupt_poison_consumption(struct kfd_node *dev, + uint16_t pasid, uint16_t client_id) +{ + int old_poison, ret = -EINVAL; + struct kfd_process *p = kfd_lookup_process_by_pasid(pasid); + + if (!p) + return; + + /* all queues of a process will be unmapped in one time */ + old_poison = atomic_cmpxchg(&p->poison, 0, 1); + kfd_unref_process(p); + if (old_poison) + return; + + switch (client_id) { + case SOC15_IH_CLIENTID_SE0SH: + case SOC15_IH_CLIENTID_SE1SH: + case SOC15_IH_CLIENTID_SE2SH: + case SOC15_IH_CLIENTID_SE3SH: + case SOC15_IH_CLIENTID_UTCL2: + ret = kfd_dqm_evict_pasid(dev->dqm, pasid); + break; + case SOC15_IH_CLIENTID_SDMA0: + case SOC15_IH_CLIENTID_SDMA1: + case SOC15_IH_CLIENTID_SDMA2: + case SOC15_IH_CLIENTID_SDMA3: + case SOC15_IH_CLIENTID_SDMA4: + break; + default: + break; + } + + kfd_signal_poison_consumed_event(dev, pasid); + + /* resetting queue passes, do page retirement without gpu reset + * resetting queue fails, fallback to gpu reset solution + */ + if (!ret) { + dev_warn(dev->adev->dev, + "RAS poison consumption, unmap queue flow succeeded: client id %d\n", + client_id); + amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, false); + } else { + dev_warn(dev->adev->dev, + "RAS poison consumption, fall back to gpu reset flow: client id %d\n", + client_id); + amdgpu_amdkfd_ras_poison_consumption_handler(dev->adev, true); + } +} + +static bool event_interrupt_isr_v10(struct kfd_node *dev, + const uint32_t *ih_ring_entry, + uint32_t *patched_ihre, + bool *patched_flag) +{ + uint16_t source_id, client_id, pasid, vmid; + const uint32_t *data = ih_ring_entry; + + source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry); + client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry); + + /* Only handle interrupts from KFD VMIDs */ + vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry); + if (!KFD_IRQ_IS_FENCE(client_id, source_id) && + (vmid < dev->vm_info.first_vmid_kfd || + vmid > dev->vm_info.last_vmid_kfd)) + return false; + + pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry); + + /* Only handle clients we care about */ + if (client_id != SOC15_IH_CLIENTID_GRBM_CP && + client_id != SOC15_IH_CLIENTID_SDMA0 && + client_id != SOC15_IH_CLIENTID_SDMA1 && + client_id != SOC15_IH_CLIENTID_SDMA2 && + client_id != SOC15_IH_CLIENTID_SDMA3 && + client_id != SOC15_IH_CLIENTID_SDMA4 && + client_id != SOC15_IH_CLIENTID_SDMA5 && + client_id != SOC15_IH_CLIENTID_SDMA6 && + client_id != SOC15_IH_CLIENTID_SDMA7 && + client_id != SOC15_IH_CLIENTID_VMC && + client_id != SOC15_IH_CLIENTID_VMC1 && + client_id != SOC15_IH_CLIENTID_UTCL2 && + client_id != SOC15_IH_CLIENTID_SE0SH && + client_id != SOC15_IH_CLIENTID_SE1SH && + client_id != SOC15_IH_CLIENTID_SE2SH && + client_id != SOC15_IH_CLIENTID_SE3SH) + return false; + + pr_debug("client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n", + client_id, source_id, vmid, pasid); + pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n", + data[0], data[1], data[2], data[3], + data[4], data[5], data[6], data[7]); + + /* If there is no valid PASID, it's likely a bug */ + if (WARN_ONCE(pasid == 0, "Bug: No PASID in KFD interrupt")) + return 0; + + /* Interrupt types we care about: various signals and faults. + * They will be forwarded to a work queue (see below). + */ + return source_id == SOC15_INTSRC_CP_END_OF_PIPE || + source_id == SOC15_INTSRC_SDMA_TRAP || + source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG || + source_id == SOC15_INTSRC_CP_BAD_OPCODE || + client_id == SOC15_IH_CLIENTID_VMC || + client_id == SOC15_IH_CLIENTID_VMC1 || + client_id == SOC15_IH_CLIENTID_UTCL2 || + KFD_IRQ_IS_FENCE(client_id, source_id); +} + +static void event_interrupt_wq_v10(struct kfd_node *dev, + const uint32_t *ih_ring_entry) +{ + uint16_t source_id, client_id, pasid, vmid; + uint32_t context_id0, context_id1; + uint32_t encoding, sq_intr_err_type; + + source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry); + client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry); + pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry); + vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry); + context_id0 = SOC15_CONTEXT_ID0_FROM_IH_ENTRY(ih_ring_entry); + context_id1 = SOC15_CONTEXT_ID1_FROM_IH_ENTRY(ih_ring_entry); + + if (client_id == SOC15_IH_CLIENTID_GRBM_CP || + client_id == SOC15_IH_CLIENTID_SE0SH || + client_id == SOC15_IH_CLIENTID_SE1SH || + client_id == SOC15_IH_CLIENTID_SE2SH || + client_id == SOC15_IH_CLIENTID_SE3SH) { + if (source_id == SOC15_INTSRC_CP_END_OF_PIPE) + kfd_signal_event_interrupt(pasid, context_id0, 32); + else if (source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG) { + encoding = REG_GET_FIELD(context_id1, + SQ_INTERRUPT_WORD_WAVE_CTXID1, ENCODING); + switch (encoding) { + case SQ_INTERRUPT_WORD_ENCODING_AUTO: + pr_debug( + "sq_intr: auto, se %d, ttrace %d, wlt %d, ttrac_buf0_full %d, ttrac_buf1_full %d, ttrace_utc_err %d\n", + REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_AUTO_CTXID1, + SE_ID), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, + THREAD_TRACE), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, + WLT), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, + THREAD_TRACE_BUF0_FULL), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, + THREAD_TRACE_BUF1_FULL), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID0, + THREAD_TRACE_UTC_ERROR)); + break; + case SQ_INTERRUPT_WORD_ENCODING_INST: + pr_debug("sq_intr: inst, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d\n", + REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1, + SE_ID), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, + DATA), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, + SA_ID), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, + PRIV), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, + WAVE_ID), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, + SIMD_ID), + REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1, + WGP_ID)); + if (context_id0 & SQ_INTERRUPT_WORD_WAVE_CTXID0__PRIV_MASK) { + if (kfd_set_dbg_ev_from_interrupt(dev, pasid, + KFD_DEBUG_DOORBELL_ID(context_id0), + KFD_DEBUG_TRAP_CODE(context_id0), + NULL, 0)) + return; + } + break; + case SQ_INTERRUPT_WORD_ENCODING_ERROR: + sq_intr_err_type = REG_GET_FIELD(context_id0, KFD_CTXID0, + ERR_TYPE); + pr_warn("sq_intr: error, se %d, data 0x%x, sa %d, priv %d, wave_id %d, simd_id %d, wgp_id %d, err_type %d\n", + REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1, + SE_ID), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, + DATA), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, + SA_ID), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, + PRIV), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, + WAVE_ID), + REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID0, + SIMD_ID), + REG_GET_FIELD(context_id1, SQ_INTERRUPT_WORD_WAVE_CTXID1, + WGP_ID), + sq_intr_err_type); + if (sq_intr_err_type != SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST && + sq_intr_err_type != SQ_INTERRUPT_ERROR_TYPE_MEMVIOL) { + event_interrupt_poison_consumption(dev, pasid, source_id); + return; + } + break; + default: + break; + } + kfd_signal_event_interrupt(pasid, context_id0 & 0x7fffff, 23); + } else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) { + kfd_set_dbg_ev_from_interrupt(dev, pasid, + KFD_DEBUG_DOORBELL_ID(context_id0), + KFD_EC_MASK(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0)), + NULL, + 0); + } + } else if (client_id == SOC15_IH_CLIENTID_SDMA0 || + client_id == SOC15_IH_CLIENTID_SDMA1 || + client_id == SOC15_IH_CLIENTID_SDMA2 || + client_id == SOC15_IH_CLIENTID_SDMA3 || + (client_id == SOC15_IH_CLIENTID_SDMA3_Sienna_Cichlid && + KFD_GC_VERSION(dev) == IP_VERSION(10, 3, 0)) || + client_id == SOC15_IH_CLIENTID_SDMA4 || + client_id == SOC15_IH_CLIENTID_SDMA5 || + client_id == SOC15_IH_CLIENTID_SDMA6 || + client_id == SOC15_IH_CLIENTID_SDMA7) { + if (source_id == SOC15_INTSRC_SDMA_TRAP) { + kfd_signal_event_interrupt(pasid, context_id0 & 0xfffffff, 28); + } else if (source_id == SOC15_INTSRC_SDMA_ECC) { + event_interrupt_poison_consumption(dev, pasid, source_id); + return; + } + } else if (client_id == SOC15_IH_CLIENTID_VMC || + client_id == SOC15_IH_CLIENTID_VMC1 || + client_id == SOC15_IH_CLIENTID_UTCL2) { + struct kfd_vm_fault_info info = {0}; + uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry); + struct kfd_hsa_memory_exception_data exception_data; + + if (client_id == SOC15_IH_CLIENTID_UTCL2 && + amdgpu_amdkfd_ras_query_utcl2_poison_status(dev->adev)) { + event_interrupt_poison_consumption(dev, pasid, client_id); + return; + } + + info.vmid = vmid; + info.mc_id = client_id; + info.page_addr = ih_ring_entry[4] | + (uint64_t)(ih_ring_entry[5] & 0xf) << 32; + info.prot_valid = ring_id & 0x08; + info.prot_read = ring_id & 0x10; + info.prot_write = ring_id & 0x20; + + memset(&exception_data, 0, sizeof(exception_data)); + exception_data.gpu_id = dev->id; + exception_data.va = (info.page_addr) << PAGE_SHIFT; + exception_data.failure.NotPresent = info.prot_valid ? 1 : 0; + exception_data.failure.NoExecute = info.prot_exec ? 1 : 0; + exception_data.failure.ReadOnly = info.prot_write ? 1 : 0; + exception_data.failure.imprecise = 0; + + kfd_set_dbg_ev_from_interrupt(dev, + pasid, + -1, + KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION), + &exception_data, + sizeof(exception_data)); + } else if (KFD_IRQ_IS_FENCE(client_id, source_id)) { + kfd_process_close_interrupt_drain(pasid); + } +} + +const struct kfd_event_interrupt_class event_interrupt_class_v10 = { + .interrupt_isr = event_interrupt_isr_v10, + .interrupt_wq = event_interrupt_wq_v10, +}; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c index c2166bf964ef..f933bd231fb9 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v11.c @@ -26,6 +26,7 @@ #include "kfd_device_queue_manager.h" #include "ivsrcid/vmc/irqsrcs_vmc_1_0.h" #include "kfd_smi_events.h" +#include "kfd_debug.h" /* * GFX11 SQ Interrupts @@ -238,7 +239,7 @@ static bool event_interrupt_isr_v11(struct kfd_node *dev, client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry); /* Only handle interrupts from KFD VMIDs */ vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry); - if (/*!KFD_IRQ_IS_FENCE(client_id, source_id) &&*/ + if (!KFD_IRQ_IS_FENCE(client_id, source_id) && (vmid < dev->vm_info.first_vmid_kfd || vmid > dev->vm_info.last_vmid_kfd)) return false; @@ -267,7 +268,7 @@ static bool event_interrupt_isr_v11(struct kfd_node *dev, source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG || source_id == SOC15_INTSRC_CP_BAD_OPCODE || source_id == SOC21_INTSRC_SDMA_TRAP || - /* KFD_IRQ_IS_FENCE(client_id, source_id) || */ + KFD_IRQ_IS_FENCE(client_id, source_id) || (((client_id == SOC21_IH_CLIENTID_VMC) || ((client_id == SOC21_IH_CLIENTID_GFX) && (source_id == UTCL2_1_0__SRCID__FAULT))) && @@ -279,7 +280,7 @@ static void event_interrupt_wq_v11(struct kfd_node *dev, { uint16_t source_id, client_id, ring_id, pasid, vmid; uint32_t context_id0, context_id1; - uint8_t sq_int_enc, sq_int_errtype; + uint8_t sq_int_enc, sq_int_priv, sq_int_errtype; struct kfd_vm_fault_info info = {0}; struct kfd_hsa_memory_exception_data exception_data; @@ -312,9 +313,9 @@ static void event_interrupt_wq_v11(struct kfd_node *dev, exception_data.failure.ReadOnly = info.prot_write ? 1 : 0; exception_data.failure.imprecise = 0; - /*kfd_set_dbg_ev_from_interrupt(dev, pasid, -1, + kfd_set_dbg_ev_from_interrupt(dev, pasid, -1, KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION), - &exception_data, sizeof(exception_data));*/ + &exception_data, sizeof(exception_data)); kfd_smi_event_update_vmfault(dev, pasid); /* GRBM, SDMA, SE, PMM */ @@ -324,11 +325,11 @@ static void event_interrupt_wq_v11(struct kfd_node *dev, /* CP */ if (source_id == SOC15_INTSRC_CP_END_OF_PIPE) kfd_signal_event_interrupt(pasid, context_id0, 32); - /*else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) + else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) kfd_set_dbg_ev_from_interrupt(dev, pasid, KFD_CTXID0_DOORBELL_ID(context_id0), KFD_EC_MASK(KFD_CTXID0_CP_BAD_OP_ECODE(context_id0)), - NULL, 0);*/ + NULL, 0); /* SDMA */ else if (source_id == SOC21_INTSRC_SDMA_TRAP) @@ -348,6 +349,13 @@ static void event_interrupt_wq_v11(struct kfd_node *dev, break; case SQ_INTERRUPT_WORD_ENCODING_INST: print_sq_intr_info_inst(context_id0, context_id1); + sq_int_priv = REG_GET_FIELD(context_id0, + SQ_INTERRUPT_WORD_WAVE_CTXID0, PRIV); + if (sq_int_priv && (kfd_set_dbg_ev_from_interrupt(dev, pasid, + KFD_CTXID0_DOORBELL_ID(context_id0), + KFD_CTXID0_TRAP_CODE(context_id0), + NULL, 0))) + return; break; case SQ_INTERRUPT_WORD_ENCODING_ERROR: print_sq_intr_info_error(context_id0, context_id1); @@ -366,8 +374,8 @@ static void event_interrupt_wq_v11(struct kfd_node *dev, kfd_signal_event_interrupt(pasid, context_id0 & 0xffffff, 24); } - /*} else if (KFD_IRQ_IS_FENCE(client_id, source_id)) { - kfd_process_close_interrupt_drain(pasid);*/ + } else if (KFD_IRQ_IS_FENCE(client_id, source_id)) { + kfd_process_close_interrupt_drain(pasid); } } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c index 8cf58be80f4e..d5c9f30552e3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c @@ -23,10 +23,40 @@ #include "kfd_priv.h" #include "kfd_events.h" +#include "kfd_debug.h" #include "soc15_int.h" #include "kfd_device_queue_manager.h" #include "kfd_smi_events.h" +/* + * GFX9 SQ Interrupts + * + * There are 3 encoding types of interrupts sourced from SQ sent as a 44-bit + * packet to the Interrupt Handler: + * Auto - Generated by the SQG (various cmd overflows, timestamps etc) + * Wave - Generated by S_SENDMSG through a shader program + * Error - HW generated errors (Illegal instructions, Memviols, EDC etc) + * + * The 44-bit packet is mapped as {context_id1[7:0],context_id0[31:0]} plus + * 4-bits for VMID (SOC15_VMID_FROM_IH_ENTRY) as such: + * + * - context_id0[27:26] + * Encoding type (0 = Auto, 1 = Wave, 2 = Error) + * + * - context_id0[13] + * PRIV bit indicates that Wave S_SEND or error occurred within trap + * + * - {context_id1[7:0],context_id0[31:28],context_id0[11:0]} + * 24-bit data with the following layout per encoding type: + * Auto - only context_id0[8:0] is used, which reports various interrupts + * generated by SQG. The rest is 0. + * Wave - user data sent from m0 via S_SENDMSG + * Error - Error type (context_id1[7:4]), Error Details (rest of bits) + * + * The other context_id bits show coordinates (SE/SH/CU/SIMD/WAVE) for wave + * S_SENDMSG and Errors. These are 0 for Auto. + */ + enum SQ_INTERRUPT_WORD_ENCODING { SQ_INTERRUPT_WORD_ENCODING_AUTO = 0x0, SQ_INTERRUPT_WORD_ENCODING_INST, @@ -84,12 +114,32 @@ enum SQ_INTERRUPT_ERROR_TYPE { #define SQ_INTERRUPT_WORD_WAVE_CTXID__SE_ID_MASK 0x03000000 #define SQ_INTERRUPT_WORD_WAVE_CTXID__ENCODING_MASK 0x0c000000 +/* GFX9 SQ interrupt 24-bit data from context_id<0,1> */ #define KFD_CONTEXT_ID_GET_SQ_INT_DATA(ctx0, ctx1) \ ((ctx0 & 0xfff) | ((ctx0 >> 16) & 0xf000) | ((ctx1 << 16) & 0xff0000)) #define KFD_SQ_INT_DATA__ERR_TYPE_MASK 0xF00000 #define KFD_SQ_INT_DATA__ERR_TYPE__SHIFT 20 +/* + * The debugger will send user data(m0) with PRIV=1 to indicate it requires + * notification from the KFD with the following queue id (DOORBELL_ID) and + * trap code (TRAP_CODE). + */ +#define KFD_INT_DATA_DEBUG_DOORBELL_MASK 0x0003ff +#define KFD_INT_DATA_DEBUG_TRAP_CODE_SHIFT 10 +#define KFD_INT_DATA_DEBUG_TRAP_CODE_MASK 0x07fc00 +#define KFD_DEBUG_DOORBELL_ID(sq_int_data) ((sq_int_data) & \ + KFD_INT_DATA_DEBUG_DOORBELL_MASK) +#define KFD_DEBUG_TRAP_CODE(sq_int_data) (((sq_int_data) & \ + KFD_INT_DATA_DEBUG_TRAP_CODE_MASK) \ + >> KFD_INT_DATA_DEBUG_TRAP_CODE_SHIFT) +#define KFD_DEBUG_CP_BAD_OP_ECODE_MASK 0x3fffc00 +#define KFD_DEBUG_CP_BAD_OP_ECODE_SHIFT 10 +#define KFD_DEBUG_CP_BAD_OP_ECODE(ctxid0) (((ctxid0) & \ + KFD_DEBUG_CP_BAD_OP_ECODE_MASK) \ + >> KFD_DEBUG_CP_BAD_OP_ECODE_SHIFT) + static void event_interrupt_poison_consumption_v9(struct kfd_node *dev, uint16_t pasid, uint16_t client_id) { @@ -168,14 +218,16 @@ static bool event_interrupt_isr_v9(struct kfd_node *dev, uint16_t source_id, client_id, pasid, vmid; const uint32_t *data = ih_ring_entry; + source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry); + client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry); + /* Only handle interrupts from KFD VMIDs */ vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry); - if (vmid < dev->vm_info.first_vmid_kfd || - vmid > dev->vm_info.last_vmid_kfd) + if (!KFD_IRQ_IS_FENCE(client_id, source_id) && + (vmid < dev->vm_info.first_vmid_kfd || + vmid > dev->vm_info.last_vmid_kfd)) return false; - source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry); - client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry); pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry); /* Only handle clients we care about */ @@ -194,7 +246,8 @@ static bool event_interrupt_isr_v9(struct kfd_node *dev, client_id != SOC15_IH_CLIENTID_SE0SH && client_id != SOC15_IH_CLIENTID_SE1SH && client_id != SOC15_IH_CLIENTID_SE2SH && - client_id != SOC15_IH_CLIENTID_SE3SH) + client_id != SOC15_IH_CLIENTID_SE3SH && + !KFD_IRQ_IS_FENCE(client_id, source_id)) return false; /* This is a known issue for gfx9. Under non HWS, pasid is not set @@ -247,6 +300,7 @@ static bool event_interrupt_isr_v9(struct kfd_node *dev, source_id == SOC15_INTSRC_SDMA_ECC || source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG || source_id == SOC15_INTSRC_CP_BAD_OPCODE || + KFD_IRQ_IS_FENCE(client_id, source_id) || ((client_id == SOC15_IH_CLIENTID_VMC || client_id == SOC15_IH_CLIENTID_VMC1 || client_id == SOC15_IH_CLIENTID_UTCL2) && @@ -302,6 +356,13 @@ static void event_interrupt_wq_v9(struct kfd_node *dev, REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SIMD_ID), REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, CU_ID), sq_int_data); + if (context_id0 & SQ_INTERRUPT_WORD_WAVE_CTXID__PRIV_MASK) { + if (kfd_set_dbg_ev_from_interrupt(dev, pasid, + KFD_DEBUG_DOORBELL_ID(sq_int_data), + KFD_DEBUG_TRAP_CODE(sq_int_data), + NULL, 0)) + return; + } break; case SQ_INTERRUPT_WORD_ENCODING_ERROR: sq_intr_err = REG_GET_FIELD(sq_int_data, KFD_SQ_INT_DATA, ERR_TYPE); @@ -324,8 +385,12 @@ static void event_interrupt_wq_v9(struct kfd_node *dev, break; } kfd_signal_event_interrupt(pasid, context_id0 & 0xffffff, 24); - } else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) - kfd_signal_hw_exception_event(pasid); + } else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE) { + kfd_set_dbg_ev_from_interrupt(dev, pasid, + KFD_DEBUG_DOORBELL_ID(context_id0), + KFD_EC_MASK(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0)), + NULL, 0); + } } else if (client_id == SOC15_IH_CLIENTID_SDMA0 || client_id == SOC15_IH_CLIENTID_SDMA1 || client_id == SOC15_IH_CLIENTID_SDMA2 || @@ -345,6 +410,7 @@ static void event_interrupt_wq_v9(struct kfd_node *dev, client_id == SOC15_IH_CLIENTID_UTCL2) { struct kfd_vm_fault_info info = {0}; uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry); + struct kfd_hsa_memory_exception_data exception_data; if (client_id == SOC15_IH_CLIENTID_UTCL2 && amdgpu_amdkfd_ras_query_utcl2_poison_status(dev->adev)) { @@ -360,9 +426,23 @@ static void event_interrupt_wq_v9(struct kfd_node *dev, info.prot_read = ring_id & 0x10; info.prot_write = ring_id & 0x20; + memset(&exception_data, 0, sizeof(exception_data)); + exception_data.gpu_id = dev->id; + exception_data.va = (info.page_addr) << PAGE_SHIFT; + exception_data.failure.NotPresent = info.prot_valid ? 1 : 0; + exception_data.failure.NoExecute = info.prot_exec ? 1 : 0; + exception_data.failure.ReadOnly = info.prot_write ? 1 : 0; + exception_data.failure.imprecise = 0; + + kfd_set_dbg_ev_from_interrupt(dev, + pasid, + -1, + KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION), + &exception_data, + sizeof(exception_data)); kfd_smi_event_update_vmfault(dev, pasid); - kfd_dqm_evict_pasid(dev->dqm, pasid); - kfd_signal_vm_fault_event(dev, pasid, &info, NULL); + } else if (KFD_IRQ_IS_FENCE(client_id, source_id)) { + kfd_process_close_interrupt_drain(pasid); } } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index a02fb939614a..cd2d56e5cdf0 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -963,6 +963,10 @@ struct kfd_process { uint64_t exception_enable_mask; uint64_t exception_status; + /* Used to drain stale interrupts */ + wait_queue_head_t wait_irq_drain; + bool irq_drain_is_open; + /* shared virtual memory registered by this process */ struct svm_range_list svms; @@ -1144,12 +1148,19 @@ int kfd_numa_node_to_apic_id(int numa_node_id); void kfd_double_confirm_iommu_support(struct kfd_dev *gpu); /* Interrupts */ +#define KFD_IRQ_FENCE_CLIENTID 0xff +#define KFD_IRQ_FENCE_SOURCEID 0xff +#define KFD_IRQ_IS_FENCE(client, source) \ + ((client) == KFD_IRQ_FENCE_CLIENTID && \ + (source) == KFD_IRQ_FENCE_SOURCEID) int kfd_interrupt_init(struct kfd_node *dev); void kfd_interrupt_exit(struct kfd_node *dev); bool enqueue_ih_ring_entry(struct kfd_node *kfd, const void *ih_ring_entry); bool interrupt_is_wanted(struct kfd_node *dev, const uint32_t *ih_ring_entry, uint32_t *patched_ihre, bool *flag); +int kfd_process_drain_interrupts(struct kfd_process_device *pdd); +void kfd_process_close_interrupt_drain(unsigned int pasid); /* amdkfd Apertures */ int kfd_init_apertures(struct kfd_process *process); @@ -1421,6 +1432,7 @@ uint64_t kfd_get_number_elems(struct kfd_dev *kfd); /* Events */ extern const struct kfd_event_interrupt_class event_interrupt_class_cik; extern const struct kfd_event_interrupt_class event_interrupt_class_v9; +extern const struct kfd_event_interrupt_class event_interrupt_class_v10; extern const struct kfd_event_interrupt_class event_interrupt_class_v11; extern const struct kfd_device_global_init_class device_global_init_class_cik; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c index 3b7f219c9d06..3d3611705d41 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c @@ -862,6 +862,8 @@ struct kfd_process *kfd_create_process(struct task_struct *thread) kfd_procfs_add_sysfs_stats(process); kfd_procfs_add_sysfs_files(process); kfd_procfs_add_sysfs_counters(process); + + init_waitqueue_head(&process->wait_irq_drain); } out: if (!IS_ERR(process)) @@ -2136,6 +2138,51 @@ void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type) } } +/* assumes caller holds process lock. */ +int kfd_process_drain_interrupts(struct kfd_process_device *pdd) +{ + uint32_t irq_drain_fence[8]; + int r = 0; + + if (!KFD_IS_SOC15(pdd->dev)) + return 0; + + pdd->process->irq_drain_is_open = true; + + memset(irq_drain_fence, 0, sizeof(irq_drain_fence)); + irq_drain_fence[0] = (KFD_IRQ_FENCE_SOURCEID << 8) | + KFD_IRQ_FENCE_CLIENTID; + irq_drain_fence[3] = pdd->process->pasid; + + /* ensure stale irqs scheduled KFD interrupts and send drain fence. */ + if (amdgpu_amdkfd_send_close_event_drain_irq(pdd->dev->adev, + irq_drain_fence)) { + pdd->process->irq_drain_is_open = false; + return 0; + } + + r = wait_event_interruptible(pdd->process->wait_irq_drain, + !READ_ONCE(pdd->process->irq_drain_is_open)); + if (r) + pdd->process->irq_drain_is_open = false; + + return r; +} + +void kfd_process_close_interrupt_drain(unsigned int pasid) +{ + struct kfd_process *p; + + p = kfd_lookup_process_by_pasid(pasid); + + if (!p) + return; + + WRITE_ONCE(p->irq_drain_is_open, false); + wake_up_all(&p->wait_irq_drain); + kfd_unref_process(p); +} + struct send_exception_work_handler_workarea { struct work_struct work; struct kfd_process *p; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 43d432b5c5bc..70852a200d8f 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -337,6 +337,10 @@ int pqm_create_queue(struct process_queue_manager *pqm, kq->queue->properties.queue_id = *qid; pqn->kq = kq; pqn->q = NULL; + retval = kfd_process_drain_interrupts(pdd); + if (retval) + break; + retval = dev->dqm->ops.create_kernel_queue(dev->dqm, kq, &pdd->qpd); break; -- cgit From a70a93fa568b4f05aba548dadb673703eccf5480 Mon Sep 17 00:00:00 2001 From: Jonathan Kim Date: Thu, 5 May 2022 16:15:37 -0400 Subject: drm/amdkfd: add debug suspend and resume process queues operation In order to inspect waves from the saved context at any point during a debug session, the debugger must be able to preempt queues to trigger context save by suspending them. On queue suspend, the KFD will copy the context save header information so that the debugger can correctly crawl the appropriate size of the saved context. The debugger must then also be allowed to resume suspended queues. A queue that is newly created cannot be suspended because queue ids are recycled after destruction so the debugger needs to know that this has occurred. Query functions will be later added that will clear a given queue of its new queue status. A queue cannot be destroyed while it is suspended to preserve its saved context during debugger inspection. Have queue destruction block while a queue is suspended and unblocked when it is resumed. Likewise, if a queue is about to be destroyed, it cannot be suspended. Return the number of queues successfully suspended or resumed along with a per queue status array where the upper bits per queue status show that the request was invalid (new/destroyed queue suspend request, missing queue) or an error occurred (HWS in a fatal state so it can't suspend or resume queues). Signed-off-by: Jonathan Kim Reviewed-by: Felix Kuehling Signed-off-by: Alex Deucher --- drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c | 5 + drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h | 1 + drivers/gpu/drm/amd/amdkfd/kfd_chardev.c | 11 + drivers/gpu/drm/amd/amdkfd/kfd_debug.c | 7 + .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.c | 447 ++++++++++++++++++++- .../gpu/drm/amd/amdkfd/kfd_device_queue_manager.h | 10 + drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c | 10 + drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c | 15 +- drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c | 14 +- drivers/gpu/drm/amd/amdkfd/kfd_priv.h | 5 +- .../gpu/drm/amd/amdkfd/kfd_process_queue_manager.c | 1 + 11 files changed, 512 insertions(+), 14 deletions(-) (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h') diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c index 98cd52bb005f..b4fcad0e62f7 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c @@ -772,6 +772,11 @@ bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev) return adev->have_atomics_support; } +void amdgpu_amdkfd_debug_mem_fence(struct amdgpu_device *adev) +{ + amdgpu_device_flush_hdp(adev, NULL); +} + void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, bool reset) { amdgpu_umc_poison_handler(adev, reset); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h index dd740e64e6e1..2d0406bff84e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h @@ -322,6 +322,7 @@ int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev, uint64_t *mmap_offset); int amdgpu_amdkfd_gpuvm_export_dmabuf(struct kgd_mem *mem, struct dma_buf **dmabuf); +void amdgpu_amdkfd_debug_mem_fence(struct amdgpu_device *adev); int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev, struct tile_config *config); void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c index a6570b124b2b..1fae97df7a1e 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c @@ -410,6 +410,7 @@ static int kfd_ioctl_create_queue(struct file *filep, struct kfd_process *p, pr_debug("Write ptr address == 0x%016llX\n", args->write_pointer_address); + kfd_dbg_ev_raise(KFD_EC_MASK(EC_QUEUE_NEW), p, dev, queue_id, false, NULL, 0); return 0; err_create_queue: @@ -2996,7 +2997,17 @@ static int kfd_ioctl_set_debug_trap(struct file *filep, struct kfd_process *p, v args->launch_mode.launch_mode); break; case KFD_IOC_DBG_TRAP_SUSPEND_QUEUES: + r = suspend_queues(target, + args->suspend_queues.num_queues, + args->suspend_queues.grace_period, + args->suspend_queues.exception_mask, + (uint32_t *)args->suspend_queues.queue_array_ptr); + + break; case KFD_IOC_DBG_TRAP_RESUME_QUEUES: + r = resume_queues(target, args->resume_queues.num_queues, + (uint32_t *)args->resume_queues.queue_array_ptr); + break; case KFD_IOC_DBG_TRAP_SET_NODE_ADDRESS_WATCH: case KFD_IOC_DBG_TRAP_CLEAR_NODE_ADDRESS_WATCH: case KFD_IOC_DBG_TRAP_SET_FLAGS: diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c index 53c3418562d4..f4d3dfb35cb3 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_debug.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_debug.c @@ -339,6 +339,13 @@ void kfd_dbg_trap_deactivate(struct kfd_process *target, bool unwind, int unwind } kfd_dbg_set_workaround(target, false); + + if (!unwind) { + int resume_count = resume_queues(target, 0, NULL); + + if (resume_count) + pr_debug("Resumed %d queues\n", resume_count); + } } static void kfd_dbg_clean_exception_status(struct kfd_process *target) diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c index 44d87943e40a..bc9e81293165 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c @@ -952,6 +952,92 @@ out_unlock: return retval; } +/* suspend_single_queue does not lock the dqm like the + * evict_process_queues_cpsch or evict_process_queues_nocpsch. You should + * lock the dqm before calling, and unlock after calling. + * + * The reason we don't lock the dqm is because this function may be + * called on multiple queues in a loop, so rather than locking/unlocking + * multiple times, we will just keep the dqm locked for all of the calls. + */ +static int suspend_single_queue(struct device_queue_manager *dqm, + struct kfd_process_device *pdd, + struct queue *q) +{ + bool is_new; + + if (q->properties.is_suspended) + return 0; + + pr_debug("Suspending PASID %u queue [%i]\n", + pdd->process->pasid, + q->properties.queue_id); + + is_new = q->properties.exception_status & KFD_EC_MASK(EC_QUEUE_NEW); + + if (is_new || q->properties.is_being_destroyed) { + pr_debug("Suspend: skip %s queue id %i\n", + is_new ? "new" : "destroyed", + q->properties.queue_id); + return -EBUSY; + } + + q->properties.is_suspended = true; + if (q->properties.is_active) { + if (dqm->dev->kfd->shared_resources.enable_mes) { + int r = remove_queue_mes(dqm, q, &pdd->qpd); + + if (r) + return r; + } + + decrement_queue_count(dqm, &pdd->qpd, q); + q->properties.is_active = false; + } + + return 0; +} + +/* resume_single_queue does not lock the dqm like the functions + * restore_process_queues_cpsch or restore_process_queues_nocpsch. You should + * lock the dqm before calling, and unlock after calling. + * + * The reason we don't lock the dqm is because this function may be + * called on multiple queues in a loop, so rather than locking/unlocking + * multiple times, we will just keep the dqm locked for all of the calls. + */ +static int resume_single_queue(struct device_queue_manager *dqm, + struct qcm_process_device *qpd, + struct queue *q) +{ + struct kfd_process_device *pdd; + + if (!q->properties.is_suspended) + return 0; + + pdd = qpd_to_pdd(qpd); + + pr_debug("Restoring from suspend PASID %u queue [%i]\n", + pdd->process->pasid, + q->properties.queue_id); + + q->properties.is_suspended = false; + + if (QUEUE_IS_ACTIVE(q->properties)) { + if (dqm->dev->kfd->shared_resources.enable_mes) { + int r = add_queue_mes(dqm, q, &pdd->qpd); + + if (r) + return r; + } + + q->properties.is_active = true; + increment_queue_count(dqm, qpd, q); + } + + return 0; +} + static int evict_process_queues_nocpsch(struct device_queue_manager *dqm, struct qcm_process_device *qpd) { @@ -1926,6 +2012,31 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm, return map_queues_cpsch(dqm); } +static int wait_on_destroy_queue(struct device_queue_manager *dqm, + struct queue *q) +{ + struct kfd_process_device *pdd = kfd_get_process_device_data(q->device, + q->process); + int ret = 0; + + if (pdd->qpd.is_debug) + return ret; + + q->properties.is_being_destroyed = true; + + if (pdd->process->debug_trap_enabled && q->properties.is_suspended) { + dqm_unlock(dqm); + mutex_unlock(&q->process->mutex); + ret = wait_event_interruptible(dqm->destroy_wait, + !q->properties.is_suspended); + + mutex_lock(&q->process->mutex); + dqm_lock(dqm); + } + + return ret; +} + static int destroy_queue_cpsch(struct device_queue_manager *dqm, struct qcm_process_device *qpd, struct queue *q) @@ -1945,11 +2056,16 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm, q->properties.queue_id); } - retval = 0; - /* remove queue from list to prevent rescheduling after preemption */ dqm_lock(dqm); + retval = wait_on_destroy_queue(dqm, q); + + if (retval) { + dqm_unlock(dqm); + return retval; + } + if (qpd->is_debug) { /* * error, currently we do not allow to destroy a queue @@ -1996,7 +2112,14 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm, dqm_unlock(dqm); - /* Do free_mqd after dqm_unlock(dqm) to avoid circular locking */ + /* + * Do free_mqd and raise delete event after dqm_unlock(dqm) to avoid + * circular locking + */ + kfd_dbg_ev_raise(KFD_EC_MASK(EC_DEVICE_QUEUE_DELETE), + qpd->pqm->process, q->device, + -1, false, NULL, 0); + mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj); return retval; @@ -2461,8 +2584,10 @@ struct device_queue_manager *device_queue_manager_init(struct kfd_node *dev) goto out_free; } - if (!dqm->ops.initialize(dqm)) + if (!dqm->ops.initialize(dqm)) { + init_waitqueue_head(&dqm->destroy_wait); return dqm; + } out_free: kfree(dqm); @@ -2602,6 +2727,320 @@ out_unlock: return r; } +#define QUEUE_NOT_FOUND -1 +/* invalidate queue operation in array */ +static void q_array_invalidate(uint32_t num_queues, uint32_t *queue_ids) +{ + int i; + + for (i = 0; i < num_queues; i++) + queue_ids[i] |= KFD_DBG_QUEUE_INVALID_MASK; +} + +/* find queue index in array */ +static int q_array_get_index(unsigned int queue_id, + uint32_t num_queues, + uint32_t *queue_ids) +{ + int i; + + for (i = 0; i < num_queues; i++) + if (queue_id == (queue_ids[i] & ~KFD_DBG_QUEUE_INVALID_MASK)) + return i; + + return QUEUE_NOT_FOUND; +} + +struct copy_context_work_handler_workarea { + struct work_struct copy_context_work; + struct kfd_process *p; +}; + +static void copy_context_work_handler (struct work_struct *work) +{ + struct copy_context_work_handler_workarea *workarea; + struct mqd_manager *mqd_mgr; + struct queue *q; + struct mm_struct *mm; + struct kfd_process *p; + uint32_t tmp_ctl_stack_used_size, tmp_save_area_used_size; + int i; + + workarea = container_of(work, + struct copy_context_work_handler_workarea, + copy_context_work); + + p = workarea->p; + mm = get_task_mm(p->lead_thread); + + if (!mm) + return; + + kthread_use_mm(mm); + for (i = 0; i < p->n_pdds; i++) { + struct kfd_process_device *pdd = p->pdds[i]; + struct device_queue_manager *dqm = pdd->dev->dqm; + struct qcm_process_device *qpd = &pdd->qpd; + + list_for_each_entry(q, &qpd->queues_list, list) { + mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP]; + + /* We ignore the return value from get_wave_state + * because + * i) right now, it always returns 0, and + * ii) if we hit an error, we would continue to the + * next queue anyway. + */ + mqd_mgr->get_wave_state(mqd_mgr, + q->mqd, + &q->properties, + (void __user *) q->properties.ctx_save_restore_area_address, + &tmp_ctl_stack_used_size, + &tmp_save_area_used_size); + } + } + kthread_unuse_mm(mm); + mmput(mm); +} + +static uint32_t *get_queue_ids(uint32_t num_queues, uint32_t *usr_queue_id_array) +{ + size_t array_size = num_queues * sizeof(uint32_t); + uint32_t *queue_ids = NULL; + + if (!usr_queue_id_array) + return NULL; + + queue_ids = kzalloc(array_size, GFP_KERNEL); + if (!queue_ids) + return ERR_PTR(-ENOMEM); + + if (copy_from_user(queue_ids, usr_queue_id_array, array_size)) + return ERR_PTR(-EFAULT); + + return queue_ids; +} + +int resume_queues(struct kfd_process *p, + uint32_t num_queues, + uint32_t *usr_queue_id_array) +{ + uint32_t *queue_ids = NULL; + int total_resumed = 0; + int i; + + if (usr_queue_id_array) { + queue_ids = get_queue_ids(num_queues, usr_queue_id_array); + + if (IS_ERR(queue_ids)) + return PTR_ERR(queue_ids); + + /* mask all queues as invalid. unmask per successful request */ + q_array_invalidate(num_queues, queue_ids); + } + + for (i = 0; i < p->n_pdds; i++) { + struct kfd_process_device *pdd = p->pdds[i]; + struct device_queue_manager *dqm = pdd->dev->dqm; + struct qcm_process_device *qpd = &pdd->qpd; + struct queue *q; + int r, per_device_resumed = 0; + + dqm_lock(dqm); + + /* unmask queues that resume or already resumed as valid */ + list_for_each_entry(q, &qpd->queues_list, list) { + int q_idx = QUEUE_NOT_FOUND; + + if (queue_ids) + q_idx = q_array_get_index( + q->properties.queue_id, + num_queues, + queue_ids); + + if (!queue_ids || q_idx != QUEUE_NOT_FOUND) { + int err = resume_single_queue(dqm, &pdd->qpd, q); + + if (queue_ids) { + if (!err) { + queue_ids[q_idx] &= + ~KFD_DBG_QUEUE_INVALID_MASK; + } else { + queue_ids[q_idx] |= + KFD_DBG_QUEUE_ERROR_MASK; + break; + } + } + + if (dqm->dev->kfd->shared_resources.enable_mes) { + wake_up_all(&dqm->destroy_wait); + if (!err) + total_resumed++; + } else { + per_device_resumed++; + } + } + } + + if (!per_device_resumed) { + dqm_unlock(dqm); + continue; + } + + r = execute_queues_cpsch(dqm, + KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, + 0, + USE_DEFAULT_GRACE_PERIOD); + if (r) { + pr_err("Failed to resume process queues\n"); + if (queue_ids) { + list_for_each_entry(q, &qpd->queues_list, list) { + int q_idx = q_array_get_index( + q->properties.queue_id, + num_queues, + queue_ids); + + /* mask queue as error on resume fail */ + if (q_idx != QUEUE_NOT_FOUND) + queue_ids[q_idx] |= + KFD_DBG_QUEUE_ERROR_MASK; + } + } + } else { + wake_up_all(&dqm->destroy_wait); + total_resumed += per_device_resumed; + } + + dqm_unlock(dqm); + } + + if (queue_ids) { + if (copy_to_user((void __user *)usr_queue_id_array, queue_ids, + num_queues * sizeof(uint32_t))) + pr_err("copy_to_user failed on queue resume\n"); + + kfree(queue_ids); + } + + return total_resumed; +} + +int suspend_queues(struct kfd_process *p, + uint32_t num_queues, + uint32_t grace_period, + uint64_t exception_clear_mask, + uint32_t *usr_queue_id_array) +{ + uint32_t *queue_ids = get_queue_ids(num_queues, usr_queue_id_array); + int total_suspended = 0; + int i; + + if (IS_ERR(queue_ids)) + return PTR_ERR(queue_ids); + + /* mask all queues as invalid. umask on successful request */ + q_array_invalidate(num_queues, queue_ids); + + for (i = 0; i < p->n_pdds; i++) { + struct kfd_process_device *pdd = p->pdds[i]; + struct device_queue_manager *dqm = pdd->dev->dqm; + struct qcm_process_device *qpd = &pdd->qpd; + struct queue *q; + int r, per_device_suspended = 0; + + mutex_lock(&p->event_mutex); + dqm_lock(dqm); + + /* unmask queues that suspend or already suspended */ + list_for_each_entry(q, &qpd->queues_list, list) { + int q_idx = q_array_get_index(q->properties.queue_id, + num_queues, + queue_ids); + + if (q_idx != QUEUE_NOT_FOUND) { + int err = suspend_single_queue(dqm, pdd, q); + bool is_mes = dqm->dev->kfd->shared_resources.enable_mes; + + if (!err) { + queue_ids[q_idx] &= ~KFD_DBG_QUEUE_INVALID_MASK; + if (exception_clear_mask && is_mes) + q->properties.exception_status &= + ~exception_clear_mask; + + if (is_mes) + total_suspended++; + else + per_device_suspended++; + } else if (err != -EBUSY) { + r = err; + queue_ids[q_idx] |= KFD_DBG_QUEUE_ERROR_MASK; + break; + } + } + } + + if (!per_device_suspended) { + dqm_unlock(dqm); + mutex_unlock(&p->event_mutex); + if (total_suspended) + amdgpu_amdkfd_debug_mem_fence(dqm->dev->adev); + continue; + } + + r = execute_queues_cpsch(dqm, + KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, + grace_period); + + if (r) + pr_err("Failed to suspend process queues.\n"); + else + total_suspended += per_device_suspended; + + list_for_each_entry(q, &qpd->queues_list, list) { + int q_idx = q_array_get_index(q->properties.queue_id, + num_queues, queue_ids); + + if (q_idx == QUEUE_NOT_FOUND) + continue; + + /* mask queue as error on suspend fail */ + if (r) + queue_ids[q_idx] |= KFD_DBG_QUEUE_ERROR_MASK; + else if (exception_clear_mask) + q->properties.exception_status &= + ~exception_clear_mask; + } + + dqm_unlock(dqm); + mutex_unlock(&p->event_mutex); + amdgpu_device_flush_hdp(dqm->dev->adev, NULL); + } + + if (total_suspended) { + struct copy_context_work_handler_workarea copy_context_worker; + + INIT_WORK_ONSTACK( + ©_context_worker.copy_context_work, + copy_context_work_handler); + + copy_context_worker.p = p; + + schedule_work(©_context_worker.copy_context_work); + + + flush_work(©_context_worker.copy_context_work); + destroy_work_on_stack(©_context_worker.copy_context_work); + } + + if (copy_to_user((void __user *)usr_queue_id_array, queue_ids, + num_queues * sizeof(uint32_t))) + pr_err("copy_to_user failed on queue suspend\n"); + + kfree(queue_ids); + + return total_suspended; +} + int debug_lock_and_unmap(struct device_queue_manager *dqm) { int r; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h index bb75d93712eb..d4e6dbffe8c2 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h @@ -263,6 +263,8 @@ struct device_queue_manager { uint32_t current_logical_xcc_start; uint32_t wait_times; + + wait_queue_head_t destroy_wait; }; void device_queue_manager_init_cik( @@ -290,6 +292,14 @@ int reserve_debug_trap_vmid(struct device_queue_manager *dqm, struct qcm_process_device *qpd); int release_debug_trap_vmid(struct device_queue_manager *dqm, struct qcm_process_device *qpd); +int suspend_queues(struct kfd_process *p, + uint32_t num_queues, + uint32_t grace_period, + uint64_t exception_clear_mask, + uint32_t *usr_queue_id_array); +int resume_queues(struct kfd_process *p, + uint32_t num_queues, + uint32_t *usr_queue_id_array); int debug_lock_and_unmap(struct device_queue_manager *dqm); int debug_map_and_unlock(struct device_queue_manager *dqm); int debug_refresh_runlist(struct device_queue_manager *dqm); diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c index a0ac4f2fe6b5..94c0fc2e57b7 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v10.c @@ -237,6 +237,7 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd, u32 *save_area_used_size) { struct v10_compute_mqd *m; + struct kfd_context_save_area_header header; m = get_mqd(mqd); @@ -255,6 +256,15 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd, * accessible to user mode */ + header.wave_state.control_stack_size = *ctl_stack_used_size; + header.wave_state.wave_state_size = *save_area_used_size; + + header.wave_state.wave_state_offset = m->cp_hqd_wg_state_offset; + header.wave_state.control_stack_offset = m->cp_hqd_cntl_stack_offset; + + if (copy_to_user(ctl_stack, &header, sizeof(header.wave_state))) + return -EFAULT; + return 0; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c index 9a9b4e853516..31fec5e70d13 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v11.c @@ -291,7 +291,7 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd, u32 *save_area_used_size) { struct v11_compute_mqd *m; - /*struct mqd_user_context_save_area_header header;*/ + struct kfd_context_save_area_header header; m = get_mqd(mqd); @@ -309,16 +309,15 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd, * it's part of the context save area that is already * accessible to user mode */ -/* - header.control_stack_size = *ctl_stack_used_size; - header.wave_state_size = *save_area_used_size; + header.wave_state.control_stack_size = *ctl_stack_used_size; + header.wave_state.wave_state_size = *save_area_used_size; - header.wave_state_offset = m->cp_hqd_wg_state_offset; - header.control_stack_offset = m->cp_hqd_cntl_stack_offset; + header.wave_state.wave_state_offset = m->cp_hqd_wg_state_offset; + header.wave_state.control_stack_offset = m->cp_hqd_cntl_stack_offset; - if (copy_to_user(ctl_stack, &header, sizeof(header))) + if (copy_to_user(ctl_stack, &header, sizeof(header.wave_state))) return -EFAULT; -*/ + return 0; } diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c index 5b87c244e909..601bb9f68048 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c @@ -311,6 +311,7 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd, u32 *save_area_used_size) { struct v9_mqd *m; + struct kfd_context_save_area_header header; /* Control stack is located one page after MQD. */ void *mqd_ctl_stack = (void *)((uintptr_t)mqd + PAGE_SIZE); @@ -322,7 +323,18 @@ static int get_wave_state(struct mqd_manager *mm, void *mqd, *save_area_used_size = m->cp_hqd_wg_state_offset - m->cp_hqd_cntl_stack_size; - if (copy_to_user(ctl_stack, mqd_ctl_stack, m->cp_hqd_cntl_stack_size)) + header.wave_state.control_stack_size = *ctl_stack_used_size; + header.wave_state.wave_state_size = *save_area_used_size; + + header.wave_state.wave_state_offset = m->cp_hqd_wg_state_offset; + header.wave_state.control_stack_offset = m->cp_hqd_cntl_stack_offset; + + if (copy_to_user(ctl_stack, &header, sizeof(header.wave_state))) + return -EFAULT; + + if (copy_to_user(ctl_stack + m->cp_hqd_cntl_stack_offset, + mqd_ctl_stack + m->cp_hqd_cntl_stack_offset, + *ctl_stack_used_size)) return -EFAULT; return 0; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h index cd2d56e5cdf0..05da43bf233a 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h +++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h @@ -510,6 +510,8 @@ struct queue_properties { uint32_t doorbell_off; bool is_interop; bool is_evicted; + bool is_suspended; + bool is_being_destroyed; bool is_active; bool is_gws; uint32_t pm4_target_xcc; @@ -535,7 +537,8 @@ struct queue_properties { #define QUEUE_IS_ACTIVE(q) ((q).queue_size > 0 && \ (q).queue_address != 0 && \ (q).queue_percent > 0 && \ - !(q).is_evicted) + !(q).is_evicted && \ + !(q).is_suspended) enum mqd_update_flag { UPDATE_FLAG_DBG_WA_ENABLE = 1, diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c index 70852a200d8f..01ccab607a69 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c @@ -187,6 +187,7 @@ static int init_user_queue(struct process_queue_manager *pqm, /* Doorbell initialized in user space*/ q_properties->doorbell_ptr = NULL; + q_properties->exception_status = KFD_EC_MASK(EC_QUEUE_NEW); /* let DQM handle it*/ q_properties->vmid = 0; -- cgit