OSDN Git Service

drm/nouveau/mmu: store mapped flag separately from memory pointer
authorBen Skeggs <bskeggs@redhat.com>
Sat, 7 Jul 2018 08:29:20 +0000 (18:29 +1000)
committerBen Skeggs <bskeggs@redhat.com>
Tue, 19 Feb 2019 23:00:00 +0000 (09:00 +1000)
This will be used to support a privileged client providing PTEs directly,
without a memory object to use as a reference.

Signed-off-by: Ben Skeggs <bskeggs@redhat.com>
drivers/gpu/drm/nouveau/include/nvkm/subdev/mmu.h
drivers/gpu/drm/nouveau/nvkm/subdev/mmu/vmm.c

index 215a672..da00faf 100644 (file)
@@ -17,6 +17,7 @@ struct nvkm_vma {
        bool part:1; /* Region was split from an allocated region by map(). */
        bool user:1; /* Region user-allocated. */
        bool busy:1; /* Region busy (for temporarily preventing user access). */
+       bool mapped:1; /* Region contains valid pages. */
        struct nvkm_memory *memory; /* Memory currently mapped into VMA. */
        struct nvkm_tags *tags; /* Compression tag reference. */
 };
index 5274ab0..69b61e7 100644 (file)
@@ -763,6 +763,7 @@ nvkm_vma_tail(struct nvkm_vma *vma, u64 tail)
        new->part = vma->part;
        new->user = vma->user;
        new->busy = vma->busy;
+       new->mapped = vma->mapped;
        list_add(&new->head, &vma->head);
        return new;
 }
@@ -1112,10 +1113,11 @@ nvkm_vmm_unmap_region(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
 
        nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags);
        nvkm_memory_unref(&vma->memory);
+       vma->mapped = false;
 
-       if (!vma->part || ((prev = node(vma, prev)), prev->memory))
+       if (!vma->part || ((prev = node(vma, prev)), prev->mapped))
                prev = NULL;
-       if (!next->part || next->memory)
+       if (!next->part || next->mapped)
                next = NULL;
        nvkm_vmm_node_merge(vmm, prev, vma, next, vma->size);
 }
@@ -1274,6 +1276,7 @@ nvkm_vmm_map_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma,
        nvkm_memory_tags_put(vma->memory, vmm->mmu->subdev.device, &vma->tags);
        nvkm_memory_unref(&vma->memory);
        vma->memory = nvkm_memory_ref(map->memory);
+       vma->mapped = true;
        vma->tags = map->tags;
        return 0;
 }
@@ -1319,14 +1322,16 @@ nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
 
        if (vma->mapref || !vma->sparse) {
                do {
-                       const bool map = next->memory != NULL;
+                       const bool mem = next->memory != NULL;
+                       const bool map = next->mapped;
                        const u8  refd = next->refd;
                        const u64 addr = next->addr;
                        u64 size = next->size;
 
                        /* Merge regions that are in the same state. */
                        while ((next = node(next, next)) && next->part &&
-                              (next->memory != NULL) == map &&
+                              (next->mapped == map) &&
+                              (next->memory != NULL) == mem &&
                               (next->refd == refd))
                                size += next->size;
 
@@ -1351,7 +1356,7 @@ nvkm_vmm_put_locked(struct nvkm_vmm *vmm, struct nvkm_vma *vma)
         */
        next = vma;
        do {
-               if (next->memory)
+               if (next->mapped)
                        nvkm_vmm_unmap_region(vmm, next);
        } while ((next = node(vma, next)) && next->part);