OSDN Git Service

drm/amdkfd: Add flush-type parameter to kfd_flush_tlb
authorEric Huang <jinhuieric.huang@amd.com>
Tue, 1 Jun 2021 22:19:42 +0000 (18:19 -0400)
committerAlex Deucher <alexander.deucher@amd.com>
Fri, 4 Jun 2021 16:40:00 +0000 (12:40 -0400)
It is to provide more tlb flush types option for different
case scenario.

Signed-off-by: Eric Huang <jinhuieric.huang@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_priv.h
drivers/gpu/drm/amd/amdkfd/kfd_process.c

index 059c3f1..e864224 100644 (file)
@@ -1475,7 +1475,7 @@ static int kfd_ioctl_map_memory_to_gpu(struct file *filep,
                peer_pdd = kfd_get_process_device_data(peer, p);
                if (WARN_ON_ONCE(!peer_pdd))
                        continue;
-               kfd_flush_tlb(peer_pdd);
+               kfd_flush_tlb(peer_pdd, TLB_FLUSH_LEGACY);
        }
 
        kfree(devices_arr);
index 5914e38..4006c8f 100644 (file)
@@ -248,7 +248,7 @@ static int allocate_vmid(struct device_queue_manager *dqm,
                        qpd->vmid,
                        qpd->page_table_base);
        /* invalidate the VM context after pasid and vmid mapping is set up */
-       kfd_flush_tlb(qpd_to_pdd(qpd));
+       kfd_flush_tlb(qpd_to_pdd(qpd), TLB_FLUSH_LEGACY);
 
        if (dqm->dev->kfd2kgd->set_scratch_backing_va)
                dqm->dev->kfd2kgd->set_scratch_backing_va(dqm->dev->kgd,
@@ -284,7 +284,7 @@ static void deallocate_vmid(struct device_queue_manager *dqm,
                if (flush_texture_cache_nocpsch(q->device, qpd))
                        pr_err("Failed to flush TC\n");
 
-       kfd_flush_tlb(qpd_to_pdd(qpd));
+       kfd_flush_tlb(qpd_to_pdd(qpd), TLB_FLUSH_LEGACY);
 
        /* Release the vmid mapping */
        set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
@@ -760,7 +760,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
                                dqm->dev->kgd,
                                qpd->vmid,
                                qpd->page_table_base);
-               kfd_flush_tlb(pdd);
+               kfd_flush_tlb(pdd, TLB_FLUSH_LEGACY);
        }
 
        /* Take a safe reference to the mm_struct, which may otherwise
index daa9d47..329684e 100644 (file)
@@ -1146,7 +1146,7 @@ void kfd_signal_reset_event(struct kfd_dev *dev);
 
 void kfd_signal_poison_consumed_event(struct kfd_dev *dev, u32 pasid);
 
-void kfd_flush_tlb(struct kfd_process_device *pdd);
+void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type);
 
 int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p);
 
index 2f8d352..1a99771 100644 (file)
@@ -1838,7 +1838,7 @@ int kfd_reserved_mem_mmap(struct kfd_dev *dev, struct kfd_process *process,
                               KFD_CWSR_TBA_TMA_SIZE, vma->vm_page_prot);
 }
 
-void kfd_flush_tlb(struct kfd_process_device *pdd)
+void kfd_flush_tlb(struct kfd_process_device *pdd, enum TLB_FLUSH_TYPE type)
 {
        struct kfd_dev *dev = pdd->dev;
 
@@ -1851,7 +1851,7 @@ void kfd_flush_tlb(struct kfd_process_device *pdd)
                                                        pdd->qpd.vmid);
        } else {
                amdgpu_amdkfd_flush_gpu_tlb_pasid(dev->kgd,
-                                       pdd->process->pasid, TLB_FLUSH_LEGACY);
+                                       pdd->process->pasid, type);
        }
 }