OSDN Git Service

mm/swap: implement workingset detection for anonymous LRU
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>
Wed, 12 Aug 2020 01:30:50 +0000 (18:30 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Wed, 12 Aug 2020 17:57:56 +0000 (10:57 -0700)
This patch implements workingset detection for anonymous LRU.  All the
infrastructure is implemented by the previous patches so this patch just
activates the workingset detection by installing/retrieving the shadow
entry and adding refault calculation.

Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Hugh Dickins <hughd@google.com>
Cc: Matthew Wilcox <willy@infradead.org>
Cc: Mel Gorman <mgorman@techsingularity.net>
Cc: Michal Hocko <mhocko@kernel.org>
Cc: Minchan Kim <minchan@kernel.org>
Link: http://lkml.kernel.org/r/1595490560-15117-6-git-send-email-iamjoonsoo.kim@lge.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/swap.h
mm/memory.c
mm/swap_state.c
mm/vmscan.c
mm/workingset.c

index 8a4c592..6610469 100644 (file)
@@ -414,6 +414,7 @@ extern struct address_space *swapper_spaces[];
 extern unsigned long total_swapcache_pages(void);
 extern void show_swap_cache_info(void);
 extern int add_to_swap(struct page *page);
+extern void *get_shadow_from_swap_cache(swp_entry_t entry);
 extern int add_to_swap_cache(struct page *page, swp_entry_t entry,
                        gfp_t gfp, void **shadowp);
 extern void __delete_from_swap_cache(struct page *page,
@@ -573,6 +574,11 @@ static inline int add_to_swap(struct page *page)
        return 0;
 }
 
+static inline void *get_shadow_from_swap_cache(swp_entry_t entry)
+{
+       return NULL;
+}
+
 static inline int add_to_swap_cache(struct page *page, swp_entry_t entry,
                                        gfp_t gfp_mask, void **shadowp)
 {
index 6fe8b5b..de311fc 100644 (file)
@@ -3098,6 +3098,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
        int locked;
        int exclusive = 0;
        vm_fault_t ret = 0;
+       void *shadow = NULL;
 
        if (!pte_unmap_same(vma->vm_mm, vmf->pmd, vmf->pte, vmf->orig_pte))
                goto out;
@@ -3149,13 +3150,9 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
                                        goto out_page;
                                }
 
-                               /*
-                                * XXX: Move to lru_cache_add() when it
-                                * supports new vs putback
-                                */
-                               spin_lock_irq(&page_pgdat(page)->lru_lock);
-                               lru_note_cost_page(page);
-                               spin_unlock_irq(&page_pgdat(page)->lru_lock);
+                               shadow = get_shadow_from_swap_cache(entry);
+                               if (shadow)
+                                       workingset_refault(page, shadow);
 
                                lru_cache_add(page);
                                swap_readpage(page, true);
index a29b33c..b73aabd 100644 (file)
@@ -106,6 +106,20 @@ void show_swap_cache_info(void)
        printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
 }
 
+void *get_shadow_from_swap_cache(swp_entry_t entry)
+{
+       struct address_space *address_space = swap_address_space(entry);
+       pgoff_t idx = swp_offset(entry);
+       struct page *page;
+
+       page = find_get_entry(address_space, idx);
+       if (xa_is_value(page))
+               return page;
+       if (page)
+               put_page(page);
+       return NULL;
+}
+
 /*
  * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
  * but sets SwapCache flag and private instead of mapping and index.
@@ -406,6 +420,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
 {
        struct swap_info_struct *si;
        struct page *page;
+       void *shadow = NULL;
 
        *new_page_allocated = false;
 
@@ -474,7 +489,7 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
        __SetPageSwapBacked(page);
 
        /* May fail (-ENOMEM) if XArray node allocation failed. */
-       if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK, NULL)) {
+       if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) {
                put_swap_page(page, entry);
                goto fail_unlock;
        }
@@ -484,10 +499,8 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
                goto fail_unlock;
        }
 
-       /* XXX: Move to lru_cache_add() when it supports new vs putback */
-       spin_lock_irq(&page_pgdat(page)->lru_lock);
-       lru_note_cost_page(page);
-       spin_unlock_irq(&page_pgdat(page)->lru_lock);
+       if (shadow)
+               workingset_refault(page, shadow);
 
        /* Caller will initiate read into locked page */
        SetPageWorkingset(page);
index e84c4dd..66d73fe 100644 (file)
@@ -854,6 +854,7 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
 {
        unsigned long flags;
        int refcount;
+       void *shadow = NULL;
 
        BUG_ON(!PageLocked(page));
        BUG_ON(mapping != page_mapping(page));
@@ -896,13 +897,13 @@ static int __remove_mapping(struct address_space *mapping, struct page *page,
        if (PageSwapCache(page)) {
                swp_entry_t swap = { .val = page_private(page) };
                mem_cgroup_swapout(page, swap);
-               __delete_from_swap_cache(page, swap, NULL);
+               if (reclaimed && !mapping_exiting(mapping))
+                       shadow = workingset_eviction(page, target_memcg);
+               __delete_from_swap_cache(page, swap, shadow);
                xa_unlock_irqrestore(&mapping->i_pages, flags);
                put_swap_page(page, swap);
-               workingset_eviction(page, target_memcg);
        } else {
                void (*freepage)(struct page *);
-               void *shadow = NULL;
 
                freepage = mapping->a_ops->freepage;
                /*
index 941bbaa..8cbe4e3 100644 (file)
@@ -353,15 +353,22 @@ void workingset_refault(struct page *page, void *shadow)
        /*
         * Compare the distance to the existing workingset size. We
         * don't activate pages that couldn't stay resident even if
-        * all the memory was available to the page cache. Whether
-        * cache can compete with anon or not depends on having swap.
+        * all the memory was available to the workingset. Whether
+        * workingset competition needs to consider anon or not depends
+        * on having swap.
         */
        workingset_size = lruvec_page_state(eviction_lruvec, NR_ACTIVE_FILE);
-       if (mem_cgroup_get_nr_swap_pages(memcg) > 0) {
+       if (!file) {
                workingset_size += lruvec_page_state(eviction_lruvec,
-                                                    NR_INACTIVE_ANON);
+                                                    NR_INACTIVE_FILE);
+       }
+       if (mem_cgroup_get_nr_swap_pages(memcg) > 0) {
                workingset_size += lruvec_page_state(eviction_lruvec,
                                                     NR_ACTIVE_ANON);
+               if (file) {
+                       workingset_size += lruvec_page_state(eviction_lruvec,
+                                                    NR_INACTIVE_ANON);
+               }
        }
        if (refault_distance > workingset_size)
                goto out;