OSDN Git Service

ARM: dts: at91: sama5d3: define clock rate range for tcb1
[sagit-ice-cold/kernel_xiaomi_msm8998.git] / mm / gup.c
index 018144c..2cd3b31 100644 (file)
--- a/mm/gup.c
+++ b/mm/gup.c
@@ -368,6 +368,9 @@ static int check_vma_flags(struct vm_area_struct *vma, unsigned long gup_flags)
        if (vm_flags & (VM_IO | VM_PFNMAP))
                return -EFAULT;
 
+       if (gup_flags & FOLL_ANON && !vma_is_anonymous(vma))
+               return -EFAULT;
+
        if (gup_flags & FOLL_WRITE) {
                if (!(vm_flags & VM_WRITE)) {
                        if (!(gup_flags & FOLL_FORCE))
@@ -627,7 +630,6 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
                                                struct mm_struct *mm,
                                                unsigned long start,
                                                unsigned long nr_pages,
-                                               int write, int force,
                                                struct page **pages,
                                                struct vm_area_struct **vmas,
                                                int *locked, bool notify_drop,
@@ -645,10 +647,6 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
 
        if (pages)
                flags |= FOLL_GET;
-       if (write)
-               flags |= FOLL_WRITE;
-       if (force)
-               flags |= FOLL_FORCE;
 
        pages_done = 0;
        lock_dropped = false;
@@ -742,11 +740,12 @@ static __always_inline long __get_user_pages_locked(struct task_struct *tsk,
  */
 long get_user_pages_locked(struct task_struct *tsk, struct mm_struct *mm,
                           unsigned long start, unsigned long nr_pages,
-                          int write, int force, struct page **pages,
+                          unsigned int gup_flags, struct page **pages,
                           int *locked)
 {
-       return __get_user_pages_locked(tsk, mm, start, nr_pages, write, force,
-                                      pages, NULL, locked, true, FOLL_TOUCH);
+       return __get_user_pages_locked(tsk, mm, start, nr_pages,
+                                      pages, NULL, locked, true,
+                                      gup_flags | FOLL_TOUCH);
 }
 EXPORT_SYMBOL(get_user_pages_locked);
 
@@ -762,14 +761,14 @@ EXPORT_SYMBOL(get_user_pages_locked);
  */
 __always_inline long __get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
                                               unsigned long start, unsigned long nr_pages,
-                                              int write, int force, struct page **pages,
-                                              unsigned int gup_flags)
+                                              struct page **pages, unsigned int gup_flags)
 {
        long ret;
        int locked = 1;
+
        down_read(&mm->mmap_sem);
-       ret = __get_user_pages_locked(tsk, mm, start, nr_pages, write, force,
-                                     pages, NULL, &locked, false, gup_flags);
+       ret = __get_user_pages_locked(tsk, mm, start, nr_pages, pages, NULL,
+                                     &locked, false, gup_flags);
        if (locked)
                up_read(&mm->mmap_sem);
        return ret;
@@ -795,10 +794,10 @@ EXPORT_SYMBOL(__get_user_pages_unlocked);
  */
 long get_user_pages_unlocked(struct task_struct *tsk, struct mm_struct *mm,
                             unsigned long start, unsigned long nr_pages,
-                            int write, int force, struct page **pages)
+                            struct page **pages, unsigned int gup_flags)
 {
-       return __get_user_pages_unlocked(tsk, mm, start, nr_pages, write,
-                                        force, pages, FOLL_TOUCH);
+       return __get_user_pages_unlocked(tsk, mm, start, nr_pages,
+                                        pages, gup_flags | FOLL_TOUCH);
 }
 EXPORT_SYMBOL(get_user_pages_unlocked);
 
@@ -858,11 +857,13 @@ EXPORT_SYMBOL(get_user_pages_unlocked);
  * FAULT_FLAG_ALLOW_RETRY to handle_mm_fault.
  */
 long get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
-               unsigned long start, unsigned long nr_pages, int write,
-               int force, struct page **pages, struct vm_area_struct **vmas)
+               unsigned long start, unsigned long nr_pages,
+               unsigned int gup_flags, struct page **pages,
+               struct vm_area_struct **vmas)
 {
-       return __get_user_pages_locked(tsk, mm, start, nr_pages, write, force,
-                                      pages, vmas, NULL, false, FOLL_TOUCH);
+       return __get_user_pages_locked(tsk, mm, start, nr_pages,
+                                      pages, vmas, NULL, false,
+                                      gup_flags | FOLL_TOUCH);
 }
 EXPORT_SYMBOL(get_user_pages);
 
@@ -1411,7 +1412,8 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write,
                pages += nr;
 
                ret = get_user_pages_unlocked(current, mm, start,
-                                             nr_pages - nr, write, 0, pages);
+                                             nr_pages - nr, pages,
+                                             write ? FOLL_WRITE : 0);
 
                /* Have to be a bit careful with return values */
                if (nr > 0) {