OSDN Git Service

ARM: dts: at91: sama5d3: define clock rate range for tcb1
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / mm / gup.c
index deafa2c..2cd3b31 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -58,6 +58,16 @@ static int follow_pfn_pte(struct vm_area_struct *vma, unsigned long address,
        return -EEXIST;
 }
 
+/*
+ * FOLL_FORCE can write to even unwritable pte's, but only
+ * after we've gone through a COW cycle and they are dirty.
+ */
+static inline bool can_follow_write_pte(pte_t pte, unsigned int flags)
+{
+       return pte_write(pte) ||
+               ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte));
+}
+
 static struct page *follow_page_pte(struct vm_area_struct *vma,
                unsigned long address, pmd_t *pmd, unsigned int flags)
 {
@@ -92,7 +102,7 @@ retry:
        }
        if ((flags & FOLL_NUMA) && pte_protnone(pte))
                goto no_page;
-       if ((flags & FOLL_WRITE) && !pte_write(pte)) {
+       if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) {
                pte_unmap_unlock(ptep, ptl);
                return NULL;
        }
@@ -302,11 +312,6 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
        /* mlock all present pages, but do not fault in new pages */
        if ((*flags & (FOLL_POPULATE | FOLL_MLOCK)) == FOLL_MLOCK)
                return -ENOENT;
-       /* For mm_populate(), just skip the stack guard page. */
-       if ((*flags & FOLL_POPULATE) &&
-                       (stack_guard_page_start(vma, address) ||
-                        stack_guard_page_end(vma, address + PAGE_SIZE)))
-               return -ENOENT;
        if (*flags & FOLL_WRITE)
                fault_flags |= FAULT_FLAG_WRITE;
        if (nonblocking)
@@ -352,7 +357,7 @@ static int faultin_page(struct task_struct *tsk, struct vm_area_struct *vma,
         * reCOWed by userspace write).
         */
        if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE))
-               *flags &= ~FOLL_WRITE;
+               *flags |= FOLL_COW;
        return 0;
 }
 
@@ -363,6 +368,9 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
        if (vm_flags & (VM_IO | VM_PFNMAP))
                return -EFAULT;
 
+       if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
+               return -EFAULT;
+
        if (gup_flags & FOLL_WRITE) {
                if (!(vm_flags & VM_WRITE)) {
                        if (!(gup_flags & FOLL_FORCE))
@@ -622,7 +630,6 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
                                                struct mm_struct *mm,
                                                unsigned long start,
                                                unsigned long nr_pages,
-                                               int write, int force,
                                                struct page **pages,
                                                struct vm_area_struct **vmas,
                                                int *locked, bool notify_drop,
@@ -640,10 +647,6 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
 
        if (pages)
                flags |= FOLL_GET;
-       if (write)
-               flags |= FOLL_WRITE;
-       if (force)
-               flags |= FOLL_FORCE;
 
        pages_done = 0;
        lock_dropped = false;
@@ -737,11 +740,12 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
  */
 long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm,
                           unsigned long start, unsigned long nr_pages,
-                          int write, int force, struct page **pages,
+                          unsigned int gup_flags, struct page **pages,
                           int *locked)
 {
-       return __get_user_pages_locked(tsk, mm, start, nr_pages, write, force,
-                                      pages, NULL, locked, true, FOLL_TOUCH);
+       return __get_user_pages_locked(tsk, mm, start, nr_pages,
+                                      pages, NULL, locked, true,
+                                      gup_flags | FOLL_TOUCH);
 }
 EXPORT_SYMBOL(get_user_pages_locked);
 
@@ -757,14 +761,14 @@ EXPORT_SYMBOL(get_user_pages_locked);
  */
 __always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
                                               unsigned long start, unsigned long nr_pages,
-                                              int write, int force, struct page **pages,
-                                              unsigned int gup_flags)
+                                              struct page **pages, unsigned int gup_flags)
 {
        long ret;
        int locked = 1;
+
        down_read(&mm->mmap_sem);
-       ret = __get_user_pages_locked(tsk, mm, start, nr_pages, write, force,
-                                     pages, NULL, &locked, false, gup_flags);
+       ret = __get_user_pages_locked(tsk, mm, start, nr_pages, pages, NULL,
+                                     &locked, false, gup_flags);
        if (locked)
                up_read(&mm->mmap_sem);
        return ret;
@@ -790,10 +794,10 @@ EXPORT_SYMBOL(__get_user_pages_unlocked);
  */
 long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
                             unsigned long start, unsigned long nr_pages,
-                            int write, int force, struct page **pages)
+                            struct page **pages, unsigned int gup_flags)
 {
-       return __get_user_pages_unlocked(tsk, mm, start, nr_pages, write,
-                                        force, pages, FOLL_TOUCH);
+       return __get_user_pages_unlocked(tsk, mm, start, nr_pages,
+                                        pages, gup_flags | FOLL_TOUCH);
 }
 EXPORT_SYMBOL(get_user_pages_unlocked);
 
@@ -853,11 +857,13 @@ EXPORT_SYMBOL(get_user_pages_unlocked);
  * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
  */
 long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
-               unsigned long start, unsigned long nr_pages, int write,
-               int force, struct page **pages, struct vm_area_struct **vmas)
+               unsigned long start, unsigned long nr_pages,
+               unsigned int gup_flags, struct page **pages,
+               struct vm_area_struct **vmas)
 {
-       return __get_user_pages_locked(tsk, mm, start, nr_pages, write, force,
-                                      pages, vmas, NULL, false, FOLL_TOUCH);
+       return __get_user_pages_locked(tsk, mm, start, nr_pages,
+                                      pages, vmas, NULL, false,
+                                      gup_flags | FOLL_TOUCH);
 }
 EXPORT_SYMBOL(get_user_pages);
 
@@ -935,8 +941,6 @@ int __mm_populate(unsigned long start, unsigned long len, int ignore_errors)
        int locked = 0;
        long ret = 0;
 
-       VM_BUG_ON(start & ~PAGE_MASK);
-       VM_BUG_ON(len != PAGE_ALIGN(len));
        end = start + len;
 
        for (nstart = start; nstart < end; nstart = nend) {
@@ -1408,7 +1412,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
                pages += nr;
 
                ret = get_user_pages_unlocked(current, mm, start,
-                                             nr_pages - nr, write, 0, pages);
+                                             nr_pages - nr, pages,
+                                             write ? FOLL_WRITE : 0);
 
                /* Have to be a bit careful with return values */
                if (nr > 0) {