OSDN Git Service

mm/mmap: leave adjust_next as virtual address instead of page frame number
authorWei Yang <richard.weiyang@linux.alibaba.com>
Tue, 13 Oct 2020 23:53:57 +0000 (16:53 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 14 Oct 2020 01:38:31 +0000 (18:38 -0700)
Instead of converting adjust_next between bytes and pages number, let's
just store the virtual address into adjust_next.

Also, this patch fixes one typo in the comment of vma_adjust_trans_huge().

[vbabka@suse.cz: changelog tweak]

Signed-off-by: Wei Yang <richard.weiyang@linux.alibaba.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Link: http://lkml.kernel.org/r/20200828081031.11306-1-richard.weiyang@linux.alibaba.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/huge_memory.c
mm/mmap.c

index ec0f0cc..65c289c 100644 (file)
@@ -2306,13 +2306,13 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma,
 
        /*
         * If we're also updating the vma->vm_next->vm_start, if the new
-        * vm_next->vm_start isn't page aligned and it could previously
+        * vm_next->vm_start isn't hpage aligned and it could previously
         * contain an hugepage: check if we need to split an huge pmd.
         */
        if (adjust_next > 0) {
                struct vm_area_struct *next = vma->vm_next;
                unsigned long nstart = next->vm_start;
-               nstart += adjust_next << PAGE_SHIFT;
+               nstart += adjust_next;
                if (nstart & ~HPAGE_PMD_MASK &&
                    (nstart & HPAGE_PMD_MASK) >= next->vm_start &&
                    (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end)
index 0f3ca52..57de816 100644 (file)
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -758,7 +758,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
                         * vma expands, overlapping part of the next:
                         * mprotect case 5 shifting the boundary up.
                         */
-                       adjust_next = (end - next->vm_start) >> PAGE_SHIFT;
+                       adjust_next = (end - next->vm_start);
                        exporter = next;
                        importer = vma;
                        VM_WARN_ON(expand != importer);
@@ -768,7 +768,7 @@ int __vma_adjust(struct vm_area_struct *vma, unsigned long start,
                         * split_vma inserting another: so it must be
                         * mprotect case 4 shifting the boundary down.
                         */
-                       adjust_next = -((vma->vm_end - end) >> PAGE_SHIFT);
+                       adjust_next = -(vma->vm_end - end);
                        exporter = vma;
                        importer = next;
                        VM_WARN_ON(expand != importer);
@@ -840,8 +840,8 @@ again:
        }
        vma->vm_pgoff = pgoff;
        if (adjust_next) {
-               next->vm_start += adjust_next << PAGE_SHIFT;
-               next->vm_pgoff += adjust_next;
+               next->vm_start += adjust_next;
+               next->vm_pgoff += adjust_next >> PAGE_SHIFT;
        }
 
        if (root) {