OSDN Git Service

filemap: pass vm_fault to the mmap ra helpers
authorJosef Bacik <josef@toxicpanda.com>
Wed, 13 Mar 2019 18:44:18 +0000 (11:44 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 14 Mar 2019 21:36:20 +0000 (14:36 -0700)
All of the arguments to these functions come from the vmf.

Cut down on the amount of arguments passed by simply passing in the vmf
to these two helpers.

Link: http://lkml.kernel.org/r/20181211173801.29535-3-josef@toxicpanda.com
Signed-off-by: Josef Bacik <josef@toxicpanda.com>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Jan Kara <jack@suse.cz>
Cc: Dave Chinner <david@fromorbit.com>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Rik van Riel <riel@redhat.com>
Cc: Tejun Heo <tj@kernel.org>
Cc: "Kirill A. Shutemov" <kirill@shutemov.name>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/filemap.c

index a3b4021..ec6566f 100644 (file)
@@ -2420,20 +2420,20 @@ static int page_cache_read(struct file *file, pgoff_t offset, gfp_t gfp_mask)
  * Synchronous readahead happens when we don't even find
  * a page in the page cache at all.
  */
-static void do_sync_mmap_readahead(struct vm_area_struct *vma,
-                                  struct file_ra_state *ra,
-                                  struct file *file,
-                                  pgoff_t offset)
+static void do_sync_mmap_readahead(struct vm_fault *vmf)
 {
+       struct file *file = vmf->vma->vm_file;
+       struct file_ra_state *ra = &file->f_ra;
        struct address_space *mapping = file->f_mapping;
+       pgoff_t offset = vmf->pgoff;
 
        /* If we don't want any read-ahead, don't bother */
-       if (vma->vm_flags & VM_RAND_READ)
+       if (vmf->vma->vm_flags & VM_RAND_READ)
                return;
        if (!ra->ra_pages)
                return;
 
-       if (vma->vm_flags & VM_SEQ_READ) {
+       if (vmf->vma->vm_flags & VM_SEQ_READ) {
                page_cache_sync_readahead(mapping, ra, file, offset,
                                          ra->ra_pages);
                return;
@@ -2463,16 +2463,16 @@ static void do_sync_mmap_readahead(struct vm_area_struct *vma,
  * Asynchronous readahead happens when we find the page and PG_readahead,
  * so we want to possibly extend the readahead further..
  */
-static void do_async_mmap_readahead(struct vm_area_struct *vma,
-                                   struct file_ra_state *ra,
-                                   struct file *file,
-                                   struct page *page,
-                                   pgoff_t offset)
+static void do_async_mmap_readahead(struct vm_fault *vmf,
+                                   struct page *page)
 {
+       struct file *file = vmf->vma->vm_file;
+       struct file_ra_state *ra = &file->f_ra;
        struct address_space *mapping = file->f_mapping;
+       pgoff_t offset = vmf->pgoff;
 
        /* If we don't want any read-ahead, don't bother */
-       if (vma->vm_flags & VM_RAND_READ)
+       if (vmf->vma->vm_flags & VM_RAND_READ)
                return;
        if (ra->mmap_miss > 0)
                ra->mmap_miss--;
@@ -2531,10 +2531,10 @@ vm_fault_t filemap_fault(struct vm_fault *vmf)
                 * We found the page, so try async readahead before
                 * waiting for the lock.
                 */
-               do_async_mmap_readahead(vmf->vma, ra, file, page, offset);
+               do_async_mmap_readahead(vmf, page);
        } else if (!page) {
                /* No page in the page cache at all */
-               do_sync_mmap_readahead(vmf->vma, ra, file, offset);
+               do_sync_mmap_readahead(vmf);
                count_vm_event(PGMAJFAULT);
                count_memcg_event_mm(vmf->vma->vm_mm, PGMAJFAULT);
                ret = VM_FAULT_MAJOR;