OSDN Git Service

afs: Make afs_readpages() fetch data in bulk
authorDavid Howells <dhowells@redhat.com>
Thu, 5 Jan 2017 10:38:35 +0000 (10:38 +0000)
committerDavid Howells <dhowells@redhat.com>
Fri, 6 Jan 2017 16:54:41 +0000 (16:54 +0000)
Make afs_readpages() use afs_vnode_fetch_data()'s new ability to take a
list of pages and do a bulk fetch.

Signed-off-by: David Howells <dhowells@redhat.com>
fs/afs/file.c
fs/afs/volume.c

index 6c262ce..82897a7 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/pagemap.h>
 #include <linux/writeback.h>
 #include <linux/gfp.h>
+#include <linux/task_io_accounting_ops.h>
 #include "internal.h"
 
 static int afs_readpage(struct file *file, struct page *page);
@@ -262,6 +263,129 @@ static int afs_readpage(struct file *file, struct page *page)
 }
 
 /*
+ * Make pages available as they're filled.
+ */
+static void afs_readpages_page_done(struct afs_call *call, struct afs_read *req)
+{
+       struct afs_vnode *vnode = call->reply;
+       struct page *page = req->pages[req->index];
+
+       req->pages[req->index] = NULL;
+       SetPageUptodate(page);
+
+       /* send the page to the cache */
+#ifdef CONFIG_AFS_FSCACHE
+       if (PageFsCache(page) &&
+           fscache_write_page(vnode->cache, page, GFP_KERNEL) != 0) {
+               fscache_uncache_page(vnode->cache, page);
+               BUG_ON(PageFsCache(page));
+       }
+#endif
+       unlock_page(page);
+       put_page(page);
+}
+
+/*
+ * Read a contiguous set of pages.
+ */
+static int afs_readpages_one(struct file *file, struct address_space *mapping,
+                            struct list_head *pages)
+{
+       struct afs_vnode *vnode = AFS_FS_I(mapping->host);
+       struct afs_read *req;
+       struct list_head *p;
+       struct page *first, *page;
+       struct key *key = file->private_data;
+       pgoff_t index;
+       int ret, n, i;
+
+       /* Count the number of contiguous pages at the front of the list.  Note
+        * that the list goes prev-wards rather than next-wards.
+        */
+       first = list_entry(pages->prev, struct page, lru);
+       index = first->index + 1;
+       n = 1;
+       for (p = first->lru.prev; p != pages; p = p->prev) {
+               page = list_entry(p, struct page, lru);
+               if (page->index != index)
+                       break;
+               index++;
+               n++;
+       }
+
+       req = kzalloc(sizeof(struct afs_read) + sizeof(struct page *) * n,
+                     GFP_NOFS);
+       if (!req)
+               return -ENOMEM;
+
+       atomic_set(&req->usage, 1);
+       req->page_done = afs_readpages_page_done;
+       req->pos = first->index;
+       req->pos <<= PAGE_SHIFT;
+
+       /* Transfer the pages to the request.  We add them in until one fails
+        * to add to the LRU and then we stop (as that'll make a hole in the
+        * contiguous run.
+        *
+        * Note that it's possible for the file size to change whilst we're
+        * doing this, but we rely on the server returning less than we asked
+        * for if the file shrank.  We also rely on this to deal with a partial
+        * page at the end of the file.
+        */
+       do {
+               page = list_entry(pages->prev, struct page, lru);
+               list_del(&page->lru);
+               index = page->index;
+               if (add_to_page_cache_lru(page, mapping, index,
+                                         readahead_gfp_mask(mapping))) {
+#ifdef CONFIG_AFS_FSCACHE
+                       fscache_uncache_page(vnode->cache, page);
+#endif
+                       put_page(page);
+                       break;
+               }
+
+               req->pages[req->nr_pages++] = page;
+               req->len += PAGE_SIZE;
+       } while (req->nr_pages < n);
+
+       if (req->nr_pages == 0) {
+               kfree(req);
+               return 0;
+       }
+
+       ret = afs_vnode_fetch_data(vnode, key, req);
+       if (ret < 0)
+               goto error;
+
+       task_io_account_read(PAGE_SIZE * req->nr_pages);
+       afs_put_read(req);
+       return 0;
+
+error:
+       if (ret == -ENOENT) {
+               _debug("got NOENT from server"
+                      " - marking file deleted and stale");
+               set_bit(AFS_VNODE_DELETED, &vnode->flags);
+               ret = -ESTALE;
+       }
+
+       for (i = 0; i < req->nr_pages; i++) {
+               page = req->pages[i];
+               if (page) {
+#ifdef CONFIG_AFS_FSCACHE
+                       fscache_uncache_page(vnode->cache, page);
+#endif
+                       SetPageError(page);
+                       unlock_page(page);
+               }
+       }
+
+       afs_put_read(req);
+       return ret;
+}
+
+/*
  * read a set of pages
  */
 static int afs_readpages(struct file *file, struct address_space *mapping,
@@ -314,8 +438,11 @@ static int afs_readpages(struct file *file, struct address_space *mapping,
                return ret;
        }
 
-       /* load the missing pages from the network */
-       ret = read_cache_pages(mapping, pages, afs_page_filler, key);
+       while (!list_empty(pages)) {
+               ret = afs_readpages_one(file, mapping, pages);
+               if (ret < 0)
+                       break;
+       }
 
        _leave(" = %d [netting]", ret);
        return ret;
index d142a24..546f9d0 100644 (file)
@@ -106,6 +106,7 @@ struct afs_volume *afs_volume_lookup(struct afs_mount_params *params)
        volume->cell            = params->cell;
        volume->vid             = vlocation->vldb.vid[params->type];
 
+       volume->bdi.ra_pages    = VM_MAX_READAHEAD*1024/PAGE_SIZE; 
        ret = bdi_setup_and_register(&volume->bdi, "afs");
        if (ret)
                goto error_bdi;