OSDN Git Service

mm: Convert [un]account_slab_page() to struct slab
authorMatthew Wilcox (Oracle) <willy@infradead.org>
Mon, 4 Oct 2021 13:45:52 +0000 (14:45 +0100)
committerVlastimil Babka <vbabka@suse.cz>
Thu, 6 Jan 2022 11:25:40 +0000 (12:25 +0100)
Convert the parameter of these functions to struct slab instead of
struct page and drop _page from the names. For now their callers just
convert page to slab.

[ vbabka@suse.cz: replace existing functions instead of calling them ]

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Signed-off-by: Vlastimil Babka <vbabka@suse.cz>
Acked-by: Johannes Weiner <hannes@cmpxchg.org>
Reviewed-by: Roman Gushchin <guro@fb.com>
mm/slab.c
mm/slab.h
mm/slub.c

index 381875e..7f14780 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -1380,7 +1380,7 @@ static struct page *kmem_getpages(struct kmem_cache *cachep, gfp_t flags,
                return NULL;
        }
 
-       account_slab_page(page, cachep->gfporder, cachep, flags);
+       account_slab(page_slab(page), cachep->gfporder, cachep, flags);
        __SetPageSlab(page);
        /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
        if (sk_memalloc_socks() && page_is_pfmemalloc(page))
@@ -1405,7 +1405,7 @@ static void kmem_freepages(struct kmem_cache *cachep, struct page *page)
 
        if (current->reclaim_state)
                current->reclaim_state->reclaimed_slab += 1 << order;
-       unaccount_slab_page(page, order, cachep);
+       unaccount_slab(page_slab(page), order, cachep);
        __free_pages(page, order);
 }
 
index 0e67a8c..dd3f72f 100644 (file)
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -583,24 +583,23 @@ static inline struct kmem_cache *virt_to_cache(const void *obj)
        return page->slab_cache;
 }
 
-static __always_inline void account_slab_page(struct page *page, int order,
-                                             struct kmem_cache *s,
-                                             gfp_t gfp)
+static __always_inline void account_slab(struct slab *slab, int order,
+                                        struct kmem_cache *s, gfp_t gfp)
 {
        if (memcg_kmem_enabled() && (s->flags & SLAB_ACCOUNT))
-               memcg_alloc_page_obj_cgroups(page, s, gfp, true);
+               memcg_alloc_page_obj_cgroups(slab_page(slab), s, gfp, true);
 
-       mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
+       mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
                            PAGE_SIZE << order);
 }
 
-static __always_inline void unaccount_slab_page(struct page *page, int order,
-                                               struct kmem_cache *s)
+static __always_inline void unaccount_slab(struct slab *slab, int order,
+                                          struct kmem_cache *s)
 {
        if (memcg_kmem_enabled())
-               memcg_free_page_obj_cgroups(page);
+               memcg_free_page_obj_cgroups(slab_page(slab));
 
-       mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s),
+       mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s),
                            -(PAGE_SIZE << order));
 }
 
index a211d96..c94fb4b 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1943,7 +1943,7 @@ static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
 
        page->objects = oo_objects(oo);
 
-       account_slab_page(page, oo_order(oo), s, flags);
+       account_slab(page_slab(page), oo_order(oo), s, flags);
 
        page->slab_cache = s;
        __SetPageSlab(page);
@@ -2014,7 +2014,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
        page->slab_cache = NULL;
        if (current->reclaim_state)
                current->reclaim_state->reclaimed_slab += pages;
-       unaccount_slab_page(page, order, s);
+       unaccount_slab(page_slab(page), order, s);
        __free_pages(page, order);
 }