OSDN Git Service

drm/amdkfd: add owner ref param to get hmm pages
authorAlex Sierra <alex.sierra@amd.com>
Thu, 6 May 2021 17:23:07 +0000 (12:23 -0500)
committerAlex Deucher <alexander.deucher@amd.com>
Thu, 1 Jul 2021 04:05:41 +0000 (00:05 -0400)
The parameter is used in the dev_private_owner to decide if device
pages in the range require to be migrated back to system memory, based
if they are or not in the same memory domain.
In this case, this reference could come from the same memory domain
with devices connected to the same hive.

Signed-off-by: Alex Sierra <alex.sierra@amd.com>
Reviewed-by: Felix Kuehling <Felix.Kuehling@amd.com>
Signed-off-by: Alex Deucher <alexander.deucher@amd.com>
drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
drivers/gpu/drm/amd/amdgpu/amdgpu_mn.h
drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
drivers/gpu/drm/amd/amdkfd/kfd_svm.c

index d6c54c7..4b153da 100644 (file)
@@ -160,7 +160,7 @@ int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
                               struct mm_struct *mm, struct page **pages,
                               uint64_t start, uint64_t npages,
                               struct hmm_range **phmm_range, bool readonly,
-                              bool mmap_locked)
+                              bool mmap_locked, void *owner)
 {
        struct hmm_range *hmm_range;
        unsigned long timeout;
@@ -185,6 +185,7 @@ int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
        hmm_range->hmm_pfns = pfns;
        hmm_range->start = start;
        hmm_range->end = start + npages * PAGE_SIZE;
+       hmm_range->dev_private_owner = owner;
 
        /* Assuming 512MB takes maxmium 1 second to fault page address */
        timeout = max(npages >> 17, 1ULL) * HMM_RANGE_DEFAULT_TIMEOUT;
index 7f7d37a..14a3c18 100644 (file)
@@ -34,7 +34,7 @@ int amdgpu_hmm_range_get_pages(struct mmu_interval_notifier *notifier,
                               struct mm_struct *mm, struct page **pages,
                               uint64_t start, uint64_t npages,
                               struct hmm_range **phmm_range, bool readonly,
-                              bool mmap_locked);
+                              bool mmap_locked, void *owner);
 int amdgpu_hmm_range_get_pages_done(struct hmm_range *hmm_range);
 
 #if defined(CONFIG_HMM_MIRROR)
index a2d1ab1..2e9ad6e 100644 (file)
@@ -692,7 +692,7 @@ int amdgpu_ttm_tt_get_user_pages(struct amdgpu_bo *bo, struct page **pages)
        readonly = amdgpu_ttm_tt_is_readonly(ttm);
        r = amdgpu_hmm_range_get_pages(&bo->notifier, mm, pages, start,
                                       ttm->num_pages, &gtt->range, readonly,
-                                      false);
+                                      false, NULL);
 out_putmm:
        mmput(mm);
 
index 34abf64..e64427c 100644 (file)
@@ -1416,7 +1416,7 @@ static int svm_range_validate_and_map(struct mm_struct *mm,
                r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
                                               prange->start << PAGE_SHIFT,
                                               prange->npages, &hmm_range,
-                                              false, true);
+                                              false, true, NULL);
                if (r) {
                        pr_debug("failed %d to get svm range pages\n", r);
                        goto unreserve_out;
@@ -2728,7 +2728,7 @@ void svm_range_prefault(struct svm_range *prange, struct mm_struct *mm)
        r = amdgpu_hmm_range_get_pages(&prange->notifier, mm, NULL,
                                       prange->start << PAGE_SHIFT,
                                       prange->npages, &hmm_range,
-                                      false, true);
+                                      false, true, NULL);
        if (!r) {
                amdgpu_hmm_range_get_pages_done(hmm_range);
                prange->validated_once = true;