aboutsummaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/amd/amdkfd/kfd_process.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/amd/amdkfd/kfd_process.c')
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c400
1 files changed, 269 insertions, 131 deletions
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index d97e330a5022..8a2c6fc438c0 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -42,6 +42,7 @@ struct mm_struct;
#include "kfd_device_queue_manager.h"
#include "kfd_dbgmgr.h"
#include "kfd_iommu.h"
+#include "kfd_svm.h"
/*
* List of struct kfd_process (field kfd_process).
@@ -108,8 +109,6 @@ static void kfd_sdma_activity_worker(struct work_struct *work)
workarea = container_of(work, struct kfd_sdma_activity_handler_workarea,
sdma_activity_work);
- if (!workarea)
- return;
pdd = workarea->pdd;
if (!pdd)
@@ -250,7 +249,7 @@ cleanup:
}
/**
- * @kfd_get_cu_occupancy() - Collect number of waves in-flight on this device
+ * @kfd_get_cu_occupancy - Collect number of waves in-flight on this device
* by current process. Translates acquired wave count into number of compute units
* that are occupied.
*
@@ -417,6 +416,29 @@ static ssize_t kfd_procfs_stats_show(struct kobject *kobj,
return 0;
}
+static ssize_t kfd_sysfs_counters_show(struct kobject *kobj,
+ struct attribute *attr, char *buf)
+{
+ struct kfd_process_device *pdd;
+
+ if (!strcmp(attr->name, "faults")) {
+ pdd = container_of(attr, struct kfd_process_device,
+ attr_faults);
+ return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->faults));
+ }
+ if (!strcmp(attr->name, "page_in")) {
+ pdd = container_of(attr, struct kfd_process_device,
+ attr_page_in);
+ return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->page_in));
+ }
+ if (!strcmp(attr->name, "page_out")) {
+ pdd = container_of(attr, struct kfd_process_device,
+ attr_page_out);
+ return sysfs_emit(buf, "%llu\n", READ_ONCE(pdd->page_out));
+ }
+ return 0;
+}
+
static struct attribute attr_queue_size = {
.name = "size",
.mode = KFD_SYSFS_FILE_MODE
@@ -452,13 +474,18 @@ static const struct sysfs_ops procfs_stats_ops = {
.show = kfd_procfs_stats_show,
};
-static struct attribute *procfs_stats_attrs[] = {
- NULL
-};
-
static struct kobj_type procfs_stats_type = {
.sysfs_ops = &procfs_stats_ops,
- .default_attrs = procfs_stats_attrs,
+ .release = kfd_procfs_kobj_release,
+};
+
+static const struct sysfs_ops sysfs_counters_ops = {
+ .show = kfd_sysfs_counters_show,
+};
+
+static struct kobj_type sysfs_counters_type = {
+ .sysfs_ops = &sysfs_counters_ops,
+ .release = kfd_procfs_kobj_release,
};
int kfd_procfs_add_queue(struct queue *q)
@@ -485,34 +512,31 @@ int kfd_procfs_add_queue(struct queue *q)
return 0;
}
-static int kfd_sysfs_create_file(struct kfd_process *p, struct attribute *attr,
+static void kfd_sysfs_create_file(struct kobject *kobj, struct attribute *attr,
char *name)
{
- int ret = 0;
+ int ret;
- if (!p || !attr || !name)
- return -EINVAL;
+ if (!kobj || !attr || !name)
+ return;
attr->name = name;
attr->mode = KFD_SYSFS_FILE_MODE;
sysfs_attr_init(attr);
- ret = sysfs_create_file(p->kobj, attr);
-
- return ret;
+ ret = sysfs_create_file(kobj, attr);
+ if (ret)
+ pr_warn("Create sysfs %s/%s failed %d", kobj->name, name, ret);
}
-static int kfd_procfs_add_sysfs_stats(struct kfd_process *p)
+static void kfd_procfs_add_sysfs_stats(struct kfd_process *p)
{
- int ret = 0;
+ int ret;
int i;
char stats_dir_filename[MAX_SYSFS_FILENAME_LEN];
- if (!p)
- return -EINVAL;
-
- if (!p->kobj)
- return -EFAULT;
+ if (!p || !p->kobj)
+ return;
/*
* Create sysfs files for each GPU:
@@ -522,63 +546,87 @@ static int kfd_procfs_add_sysfs_stats(struct kfd_process *p)
*/
for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *pdd = p->pdds[i];
- struct kobject *kobj_stats;
snprintf(stats_dir_filename, MAX_SYSFS_FILENAME_LEN,
"stats_%u", pdd->dev->id);
- kobj_stats = kfd_alloc_struct(kobj_stats);
- if (!kobj_stats)
- return -ENOMEM;
+ pdd->kobj_stats = kfd_alloc_struct(pdd->kobj_stats);
+ if (!pdd->kobj_stats)
+ return;
- ret = kobject_init_and_add(kobj_stats,
- &procfs_stats_type,
- p->kobj,
- stats_dir_filename);
+ ret = kobject_init_and_add(pdd->kobj_stats,
+ &procfs_stats_type,
+ p->kobj,
+ stats_dir_filename);
if (ret) {
pr_warn("Creating KFD proc/stats_%s folder failed",
- stats_dir_filename);
- kobject_put(kobj_stats);
- goto err;
+ stats_dir_filename);
+ kobject_put(pdd->kobj_stats);
+ pdd->kobj_stats = NULL;
+ return;
}
- pdd->kobj_stats = kobj_stats;
- pdd->attr_evict.name = "evicted_ms";
- pdd->attr_evict.mode = KFD_SYSFS_FILE_MODE;
- sysfs_attr_init(&pdd->attr_evict);
- ret = sysfs_create_file(kobj_stats, &pdd->attr_evict);
- if (ret)
- pr_warn("Creating eviction stats for gpuid %d failed",
- (int)pdd->dev->id);
-
+ kfd_sysfs_create_file(pdd->kobj_stats, &pdd->attr_evict,
+ "evicted_ms");
/* Add sysfs file to report compute unit occupancy */
- if (pdd->dev->kfd2kgd->get_cu_occupancy != NULL) {
- pdd->attr_cu_occupancy.name = "cu_occupancy";
- pdd->attr_cu_occupancy.mode = KFD_SYSFS_FILE_MODE;
- sysfs_attr_init(&pdd->attr_cu_occupancy);
- ret = sysfs_create_file(kobj_stats,
- &pdd->attr_cu_occupancy);
- if (ret)
- pr_warn("Creating %s failed for gpuid: %d",
- pdd->attr_cu_occupancy.name,
- (int)pdd->dev->id);
- }
+ if (pdd->dev->kfd2kgd->get_cu_occupancy)
+ kfd_sysfs_create_file(pdd->kobj_stats,
+ &pdd->attr_cu_occupancy,
+ "cu_occupancy");
}
-err:
- return ret;
}
-
-static int kfd_procfs_add_sysfs_files(struct kfd_process *p)
+static void kfd_procfs_add_sysfs_counters(struct kfd_process *p)
{
int ret = 0;
int i;
+ char counters_dir_filename[MAX_SYSFS_FILENAME_LEN];
- if (!p)
- return -EINVAL;
+ if (!p || !p->kobj)
+ return;
- if (!p->kobj)
- return -EFAULT;
+ /*
+ * Create sysfs files for each GPU which supports SVM
+ * - proc/<pid>/counters_<gpuid>/
+ * - proc/<pid>/counters_<gpuid>/faults
+ * - proc/<pid>/counters_<gpuid>/page_in
+ * - proc/<pid>/counters_<gpuid>/page_out
+ */
+ for_each_set_bit(i, p->svms.bitmap_supported, p->n_pdds) {
+ struct kfd_process_device *pdd = p->pdds[i];
+ struct kobject *kobj_counters;
+
+ snprintf(counters_dir_filename, MAX_SYSFS_FILENAME_LEN,
+ "counters_%u", pdd->dev->id);
+ kobj_counters = kfd_alloc_struct(kobj_counters);
+ if (!kobj_counters)
+ return;
+
+ ret = kobject_init_and_add(kobj_counters, &sysfs_counters_type,
+ p->kobj, counters_dir_filename);
+ if (ret) {
+ pr_warn("Creating KFD proc/%s folder failed",
+ counters_dir_filename);
+ kobject_put(kobj_counters);
+ return;
+ }
+
+ pdd->kobj_counters = kobj_counters;
+ kfd_sysfs_create_file(kobj_counters, &pdd->attr_faults,
+ "faults");
+ kfd_sysfs_create_file(kobj_counters, &pdd->attr_page_in,
+ "page_in");
+ kfd_sysfs_create_file(kobj_counters, &pdd->attr_page_out,
+ "page_out");
+ }
+}
+
+static void kfd_procfs_add_sysfs_files(struct kfd_process *p)
+{
+ int i;
+
+ if (!p || !p->kobj)
+ return;
/*
* Create sysfs files for each GPU:
@@ -590,20 +638,14 @@ static int kfd_procfs_add_sysfs_files(struct kfd_process *p)
snprintf(pdd->vram_filename, MAX_SYSFS_FILENAME_LEN, "vram_%u",
pdd->dev->id);
- ret = kfd_sysfs_create_file(p, &pdd->attr_vram, pdd->vram_filename);
- if (ret)
- pr_warn("Creating vram usage for gpu id %d failed",
- (int)pdd->dev->id);
+ kfd_sysfs_create_file(p->kobj, &pdd->attr_vram,
+ pdd->vram_filename);
snprintf(pdd->sdma_filename, MAX_SYSFS_FILENAME_LEN, "sdma_%u",
pdd->dev->id);
- ret = kfd_sysfs_create_file(p, &pdd->attr_sdma, pdd->sdma_filename);
- if (ret)
- pr_warn("Creating sdma usage for gpu id %d failed",
- (int)pdd->dev->id);
+ kfd_sysfs_create_file(p->kobj, &pdd->attr_sdma,
+ pdd->sdma_filename);
}
-
- return ret;
}
void kfd_procfs_del_queue(struct queue *q)
@@ -647,8 +689,9 @@ static void kfd_process_free_gpuvm(struct kgd_mem *mem,
{
struct kfd_dev *dev = pdd->dev;
- amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->kgd, mem, pdd->vm);
- amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem, NULL);
+ amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(dev->kgd, mem, pdd->drm_priv);
+ amdgpu_amdkfd_gpuvm_free_memory_of_gpu(dev->kgd, mem, pdd->drm_priv,
+ NULL);
}
/* kfd_process_alloc_gpuvm - Allocate GPU VM for the KFD process
@@ -667,11 +710,11 @@ static int kfd_process_alloc_gpuvm(struct kfd_process_device *pdd,
int err;
err = amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu(kdev->kgd, gpu_va, size,
- pdd->vm, &mem, NULL, flags);
+ pdd->drm_priv, &mem, NULL, flags);
if (err)
goto err_alloc_mem;
- err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem, pdd->vm);
+ err = amdgpu_amdkfd_gpuvm_map_memory_to_gpu(kdev->kgd, mem, pdd->drm_priv);
if (err)
goto err_map_mem;
@@ -712,7 +755,8 @@ sync_memory_failed:
return err;
err_map_mem:
- amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, mem, NULL);
+ amdgpu_amdkfd_gpuvm_free_memory_of_gpu(kdev->kgd, mem, pdd->drm_priv,
+ NULL);
err_alloc_mem:
*kptr = NULL;
return err;
@@ -798,28 +842,17 @@ struct kfd_process *kfd_create_process(struct file *filep)
goto out;
}
- process->attr_pasid.name = "pasid";
- process->attr_pasid.mode = KFD_SYSFS_FILE_MODE;
- sysfs_attr_init(&process->attr_pasid);
- ret = sysfs_create_file(process->kobj, &process->attr_pasid);
- if (ret)
- pr_warn("Creating pasid for pid %d failed",
- (int)process->lead_thread->pid);
+ kfd_sysfs_create_file(process->kobj, &process->attr_pasid,
+ "pasid");
process->kobj_queues = kobject_create_and_add("queues",
process->kobj);
if (!process->kobj_queues)
pr_warn("Creating KFD proc/queues folder failed");
- ret = kfd_procfs_add_sysfs_stats(process);
- if (ret)
- pr_warn("Creating sysfs stats dir for pid %d failed",
- (int)process->lead_thread->pid);
-
- ret = kfd_procfs_add_sysfs_files(process);
- if (ret)
- pr_warn("Creating sysfs usage file for pid %d failed",
- (int)process->lead_thread->pid);
+ kfd_procfs_add_sysfs_stats(process);
+ kfd_procfs_add_sysfs_files(process);
+ kfd_procfs_add_sysfs_counters(process);
}
out:
if (!IS_ERR(process))
@@ -901,13 +934,14 @@ static void kfd_process_device_free_bos(struct kfd_process_device *pdd)
for (i = 0; i < p->n_pdds; i++) {
struct kfd_process_device *peer_pdd = p->pdds[i];
- if (!peer_pdd->vm)
+ if (!peer_pdd->drm_priv)
continue;
amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu(
- peer_pdd->dev->kgd, mem, peer_pdd->vm);
+ peer_pdd->dev->kgd, mem, peer_pdd->drm_priv);
}
- amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem, NULL);
+ amdgpu_amdkfd_gpuvm_free_memory_of_gpu(pdd->dev->kgd, mem,
+ pdd->drm_priv, NULL);
kfd_process_device_remove_obj_handle(pdd, id);
}
}
@@ -932,7 +966,7 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
if (pdd->drm_file) {
amdgpu_amdkfd_gpuvm_release_process_vm(
- pdd->dev->kgd, pdd->vm);
+ pdd->dev->kgd, pdd->drm_priv);
fput(pdd->drm_file);
}
@@ -961,6 +995,50 @@ static void kfd_process_destroy_pdds(struct kfd_process *p)
p->n_pdds = 0;
}
+static void kfd_process_remove_sysfs(struct kfd_process *p)
+{
+ struct kfd_process_device *pdd;
+ int i;
+
+ if (!p->kobj)
+ return;
+
+ sysfs_remove_file(p->kobj, &p->attr_pasid);
+ kobject_del(p->kobj_queues);
+ kobject_put(p->kobj_queues);
+ p->kobj_queues = NULL;
+
+ for (i = 0; i < p->n_pdds; i++) {
+ pdd = p->pdds[i];
+
+ sysfs_remove_file(p->kobj, &pdd->attr_vram);
+ sysfs_remove_file(p->kobj, &pdd->attr_sdma);
+
+ sysfs_remove_file(pdd->kobj_stats, &pdd->attr_evict);
+ if (pdd->dev->kfd2kgd->get_cu_occupancy)
+ sysfs_remove_file(pdd->kobj_stats,
+ &pdd->attr_cu_occupancy);
+ kobject_del(pdd->kobj_stats);
+ kobject_put(pdd->kobj_stats);
+ pdd->kobj_stats = NULL;
+ }
+
+ for_each_set_bit(i, p->svms.bitmap_supported, p->n_pdds) {
+ pdd = p->pdds[i];
+
+ sysfs_remove_file(pdd->kobj_counters, &pdd->attr_faults);
+ sysfs_remove_file(pdd->kobj_counters, &pdd->attr_page_in);
+ sysfs_remove_file(pdd->kobj_counters, &pdd->attr_page_out);
+ kobject_del(pdd->kobj_counters);
+ kobject_put(pdd->kobj_counters);
+ pdd->kobj_counters = NULL;
+ }
+
+ kobject_del(p->kobj);
+ kobject_put(p->kobj);
+ p->kobj = NULL;
+}
+
/* No process locking is needed in this function, because the process
* is not findable any more. We must assume that no other thread is
* using it any more, otherwise we couldn't safely free the process
@@ -970,36 +1048,11 @@ static void kfd_process_wq_release(struct work_struct *work)
{
struct kfd_process *p = container_of(work, struct kfd_process,
release_work);
- int i;
-
- /* Remove the procfs files */
- if (p->kobj) {
- sysfs_remove_file(p->kobj, &p->attr_pasid);
- kobject_del(p->kobj_queues);
- kobject_put(p->kobj_queues);
- p->kobj_queues = NULL;
-
- for (i = 0; i < p->n_pdds; i++) {
- struct kfd_process_device *pdd = p->pdds[i];
-
- sysfs_remove_file(p->kobj, &pdd->attr_vram);
- sysfs_remove_file(p->kobj, &pdd->attr_sdma);
- sysfs_remove_file(p->kobj, &pdd->attr_evict);
- if (pdd->dev->kfd2kgd->get_cu_occupancy != NULL)
- sysfs_remove_file(p->kobj, &pdd->attr_cu_occupancy);
- kobject_del(pdd->kobj_stats);
- kobject_put(pdd->kobj_stats);
- pdd->kobj_stats = NULL;
- }
-
- kobject_del(p->kobj);
- kobject_put(p->kobj);
- p->kobj = NULL;
- }
-
+ kfd_process_remove_sysfs(p);
kfd_iommu_unbind_process(p);
kfd_process_free_outstanding_kfd_bos(p);
+ svm_range_list_fini(p);
kfd_process_destroy_pdds(p);
dma_fence_put(p->ef);
@@ -1058,6 +1111,7 @@ static void kfd_process_notifier_release(struct mmu_notifier *mn,
cancel_delayed_work_sync(&p->eviction_work);
cancel_delayed_work_sync(&p->restore_work);
+ cancel_delayed_work_sync(&p->svms.restore_work);
mutex_lock(&p->mutex);
@@ -1186,6 +1240,56 @@ void kfd_process_set_trap_handler(struct qcm_process_device *qpd,
}
}
+bool kfd_process_xnack_mode(struct kfd_process *p, bool supported)
+{
+ int i;
+
+ /* On most GFXv9 GPUs, the retry mode in the SQ must match the
+ * boot time retry setting. Mixing processes with different
+ * XNACK/retry settings can hang the GPU.
+ *
+ * Different GPUs can have different noretry settings depending
+ * on HW bugs or limitations. We need to find at least one
+ * XNACK mode for this process that's compatible with all GPUs.
+ * Fortunately GPUs with retry enabled (noretry=0) can run code
+ * built for XNACK-off. On GFXv9 it may perform slower.
+ *
+ * Therefore applications built for XNACK-off can always be
+ * supported and will be our fallback if any GPU does not
+ * support retry.
+ */
+ for (i = 0; i < p->n_pdds; i++) {
+ struct kfd_dev *dev = p->pdds[i]->dev;
+
+ /* Only consider GFXv9 and higher GPUs. Older GPUs don't
+ * support the SVM APIs and don't need to be considered
+ * for the XNACK mode selection.
+ */
+ if (dev->device_info->asic_family < CHIP_VEGA10)
+ continue;
+ /* Aldebaran can always support XNACK because it can support
+ * per-process XNACK mode selection. But let the dev->noretry
+ * setting still influence the default XNACK mode.
+ */
+ if (supported &&
+ dev->device_info->asic_family == CHIP_ALDEBARAN)
+ continue;
+
+ /* GFXv10 and later GPUs do not support shader preemption
+ * during page faults. This can lead to poor QoS for queue
+ * management and memory-manager-related preemptions or
+ * even deadlocks.
+ */
+ if (dev->device_info->asic_family >= CHIP_NAVI10)
+ return false;
+
+ if (dev->noretry)
+ return false;
+ }
+
+ return true;
+}
+
/*
* On return the kfd_process is fully operational and will be freed when the
* mm is released
@@ -1224,6 +1328,13 @@ static struct kfd_process *create_process(const struct task_struct *thread)
if (err != 0)
goto err_init_apertures;
+ /* Check XNACK support after PDDs are created in kfd_init_apertures */
+ process->xnack_enabled = kfd_process_xnack_mode(process, false);
+
+ err = svm_range_list_init(process);
+ if (err)
+ goto err_init_svm_range_list;
+
/* alloc_notifier needs to find the process in the hash table */
hash_add_rcu(kfd_processes_table, &process->kfd_processes,
(uintptr_t)process->mm);
@@ -1246,6 +1357,8 @@ static struct kfd_process *create_process(const struct task_struct *thread)
err_register_notifier:
hash_del_rcu(&process->kfd_processes);
+ svm_range_list_fini(process);
+err_init_svm_range_list:
kfd_process_free_outstanding_kfd_bos(process);
kfd_process_destroy_pdds(process);
err_init_apertures:
@@ -1375,7 +1488,7 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
if (!drm_file)
return -EINVAL;
- if (pdd->vm)
+ if (pdd->drm_priv)
return -EBUSY;
p = pdd->process;
@@ -1383,13 +1496,12 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
ret = amdgpu_amdkfd_gpuvm_acquire_process_vm(
dev->kgd, drm_file, p->pasid,
- &pdd->vm, &p->kgd_process_info, &p->ef);
+ &p->kgd_process_info, &p->ef);
if (ret) {
pr_err("Failed to create process VM object\n");
return ret;
}
-
- amdgpu_vm_set_task_info(pdd->vm);
+ pdd->drm_priv = drm_file->private_data;
ret = kfd_process_device_reserve_ib_mem(pdd);
if (ret)
@@ -1405,7 +1517,7 @@ int kfd_process_device_init_vm(struct kfd_process_device *pdd,
err_init_cwsr:
err_reserve_ib_mem:
kfd_process_device_free_bos(pdd);
- pdd->vm = NULL;
+ pdd->drm_priv = NULL;
return ret;
}
@@ -1429,7 +1541,7 @@ struct kfd_process_device *kfd_bind_process_to_device(struct kfd_dev *dev,
return ERR_PTR(-ENOMEM);
}
- if (!pdd->vm)
+ if (!pdd->drm_priv)
return ERR_PTR(-ENODEV);
/*
@@ -1600,6 +1712,32 @@ int kfd_process_restore_queues(struct kfd_process *p)
return ret;
}
+int kfd_process_gpuidx_from_gpuid(struct kfd_process *p, uint32_t gpu_id)
+{
+ int i;
+
+ for (i = 0; i < p->n_pdds; i++)
+ if (p->pdds[i] && gpu_id == p->pdds[i]->dev->id)
+ return i;
+ return -EINVAL;
+}
+
+int
+kfd_process_gpuid_from_kgd(struct kfd_process *p, struct amdgpu_device *adev,
+ uint32_t *gpuid, uint32_t *gpuidx)
+{
+ struct kgd_dev *kgd = (struct kgd_dev *)adev;
+ int i;
+
+ for (i = 0; i < p->n_pdds; i++)
+ if (p->pdds[i] && p->pdds[i]->dev->kgd == kgd) {
+ *gpuid = p->pdds[i]->dev->id;
+ *gpuidx = i;
+ return 0;
+ }
+ return -EINVAL;
+}
+
static void evict_process_worker(struct work_struct *work)
{
int ret;
@@ -1748,7 +1886,7 @@ int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot);
}
-void kfd_flush_tlb(struct kfd_process_device *pdd)
+void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type)
{
struct kfd_dev *dev = pdd->dev;
@@ -1761,7 +1899,7 @@ void kfd_flush_tlb(struct kfd_process_device *pdd)
pdd->qpd.vmid);
} else {
amdgpu_amdkfd_flush_gpu_tlb_pasid(dev->kgd,
- pdd->process->pasid);
+ pdd->process->pasid, type);
}
}