OSDN Git Service

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/viro/vfs
[uclinux-h8/linux.git] / mm / filemap.c
index f851e36..1283fc8 100644 (file)
  *    ->tree_lock              (page_remove_rmap->set_page_dirty)
  *    bdi.wb->list_lock                (page_remove_rmap->set_page_dirty)
  *    ->inode->i_lock          (page_remove_rmap->set_page_dirty)
+ *    ->memcg->move_lock       (page_remove_rmap->mem_cgroup_begin_page_stat)
  *    bdi.wb->list_lock                (zap_pte_range->set_page_dirty)
  *    ->inode->i_lock          (zap_pte_range->set_page_dirty)
  *    ->private_lock           (zap_pte_range->__set_page_dirty_buffers)
@@ -174,9 +175,11 @@ static void page_cache_tree_delete(struct address_space *mapping,
 /*
  * Delete a page from the page cache and free it. Caller has to make
  * sure the page is locked and that nobody else uses it - or that usage
- * is safe.  The caller must hold the mapping's tree_lock.
+ * is safe.  The caller must hold the mapping's tree_lock and
+ * mem_cgroup_begin_page_stat().
  */
-void __delete_from_page_cache(struct page *page, void *shadow)
+void __delete_from_page_cache(struct page *page, void *shadow,
+                             struct mem_cgroup *memcg)
 {
        struct address_space *mapping = page->mapping;
 
@@ -196,7 +199,9 @@ void __delete_from_page_cache(struct page *page, void *shadow)
        page->mapping = NULL;
        /* Leave page->index set: truncation lookup relies upon it */
 
-       __dec_zone_page_state(page, NR_FILE_PAGES);
+       /* hugetlb pages do not participate in page cache accounting. */
+       if (!PageHuge(page))
+               __dec_zone_page_state(page, NR_FILE_PAGES);
        if (PageSwapBacked(page))
                __dec_zone_page_state(page, NR_SHMEM);
        BUG_ON(page_mapped(page));
@@ -210,7 +215,8 @@ void __delete_from_page_cache(struct page *page, void *shadow)
         * anyway will be cleared before returning page into buddy allocator.
         */
        if (WARN_ON_ONCE(PageDirty(page)))
-               account_page_cleaned(page, mapping);
+               account_page_cleaned(page, mapping, memcg,
+                                    inode_to_wb(mapping->host));
 }
 
 /**
@@ -224,14 +230,20 @@ void __delete_from_page_cache(struct page *page, void *shadow)
 void delete_from_page_cache(struct page *page)
 {
        struct address_space *mapping = page->mapping;
+       struct mem_cgroup *memcg;
+       unsigned long flags;
+
        void (*freepage)(struct page *);
 
        BUG_ON(!PageLocked(page));
 
        freepage = mapping->a_ops->freepage;
-       spin_lock_irq(&mapping->tree_lock);
-       __delete_from_page_cache(page, NULL);
-       spin_unlock_irq(&mapping->tree_lock);
+
+       memcg = mem_cgroup_begin_page_stat(page);
+       spin_lock_irqsave(&mapping->tree_lock, flags);
+       __delete_from_page_cache(page, NULL, memcg);
+       spin_unlock_irqrestore(&mapping->tree_lock, flags);
+       mem_cgroup_end_page_stat(memcg);
 
        if (freepage)
                freepage(page);
@@ -281,7 +293,9 @@ int __filemap_fdatawrite_range(struct address_space *mapping, loff_t start,
        if (!mapping_cap_writeback_dirty(mapping))
                return 0;
 
+       wbc_attach_fdatawrite_inode(&wbc, mapping->host);
        ret = do_writepages(mapping, &wbc);
+       wbc_detach_inode(&wbc);
        return ret;
 }
 
@@ -470,6 +484,8 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
        if (!error) {
                struct address_space *mapping = old->mapping;
                void (*freepage)(struct page *);
+               struct mem_cgroup *memcg;
+               unsigned long flags;
 
                pgoff_t offset = old->index;
                freepage = mapping->a_ops->freepage;
@@ -478,15 +494,22 @@ int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask)
                new->mapping = mapping;
                new->index = offset;
 
-               spin_lock_irq(&mapping->tree_lock);
-               __delete_from_page_cache(old, NULL);
+               memcg = mem_cgroup_begin_page_stat(old);
+               spin_lock_irqsave(&mapping->tree_lock, flags);
+               __delete_from_page_cache(old, NULL, memcg);
                error = radix_tree_insert(&mapping->page_tree, offset, new);
                BUG_ON(error);
                mapping->nrpages++;
-               __inc_zone_page_state(new, NR_FILE_PAGES);
+
+               /*
+                * hugetlb pages do not participate in page cache accounting.
+                */
+               if (!PageHuge(new))
+                       __inc_zone_page_state(new, NR_FILE_PAGES);
                if (PageSwapBacked(new))
                        __inc_zone_page_state(new, NR_SHMEM);
-               spin_unlock_irq(&mapping->tree_lock);
+               spin_unlock_irqrestore(&mapping->tree_lock, flags);
+               mem_cgroup_end_page_stat(memcg);
                mem_cgroup_migrate(old, new, true);
                radix_tree_preload_end();
                if (freepage)
@@ -575,7 +598,10 @@ static int __add_to_page_cache_locked(struct page *page,
        radix_tree_preload_end();
        if (unlikely(error))
                goto err_insert;
-       __inc_zone_page_state(page, NR_FILE_PAGES);
+
+       /* hugetlb pages do not participate in page cache accounting. */
+       if (!huge)
+               __inc_zone_page_state(page, NR_FILE_PAGES);
        spin_unlock_irq(&mapping->tree_lock);
        if (!huge)
                mem_cgroup_commit_charge(page, memcg, false);
@@ -1654,8 +1680,8 @@ no_cached_page:
                        error = -ENOMEM;
                        goto out;
                }
-               error = add_to_page_cache_lru(page, mapping,
-                                               index, GFP_KERNEL);
+               error = add_to_page_cache_lru(page, mapping, index,
+                                       GFP_KERNEL & mapping_gfp_mask(mapping));
                if (error) {
                        page_cache_release(page);
                        if (error == -EEXIST) {
@@ -1756,7 +1782,8 @@ static int page_cache_read(struct file *file, pgoff_t offset)
                if (!page)
                        return -ENOMEM;
 
-               ret = add_to_page_cache_lru(page, mapping, offset, GFP_KERNEL);
+               ret = add_to_page_cache_lru(page, mapping, offset,
+                               GFP_KERNEL & mapping_gfp_mask(mapping));
                if (ret == 0)
                        ret = mapping->a_ops->readpage(file, page);
                else if (ret == -EEXIST)