OSDN Git Service

mm: switch s_mem and slab_cache in struct page
authorMatthew Wilcox <mawilcox@microsoft.com>
Fri, 8 Jun 2018 00:08:26 +0000 (17:08 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 8 Jun 2018 00:34:37 +0000 (17:34 -0700)
This will allow us to store slub's counters in the same bits as slab's
s_mem.  slub now needs to set page->mapping to NULL as it frees the page,
just like slab does.

Link: http://lkml.kernel.org/r/20180518194519.3820-5-willy@infradead.org
Signed-off-by: Matthew Wilcox <mawilcox@microsoft.com>
Acked-by: Christoph Lameter <cl@linux.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Jérôme Glisse <jglisse@redhat.com>
Cc: "Kirill A . Shutemov" <kirill.shutemov@linux.intel.com>
Cc: Lai Jiangshan <jiangshanlai@gmail.com>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Randy Dunlap <rdunlap@infradead.org>
Cc: Andrey Ryabinin <aryabinin@virtuozzo.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
include/linux/mm_types.h
mm/slub.c

index 3a554fd..6f153f2 100644 (file)
@@ -83,7 +83,7 @@ struct page {
                /* See page-flags.h for the definition of PAGE_MAPPING_FLAGS */
                struct address_space *mapping;
 
-               void *s_mem;                    /* slab first object */
+               struct kmem_cache *slab_cache;  /* SL[AU]B: Pointer to slab */
                atomic_t compound_mapcount;     /* first tail page */
                /* page_deferred_list().next     -- second tail page */
        };
@@ -194,7 +194,7 @@ struct page {
                spinlock_t ptl;
 #endif
 #endif
-               struct kmem_cache *slab_cache;  /* SL[AU]B: Pointer to slab */
+               void *s_mem;                    /* slab first object */
        };
 
 #ifdef CONFIG_MEMCG
index 48f7587..0170ea8 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1695,6 +1695,7 @@ static void __free_slab(struct kmem_cache *s, struct page *page)
        __ClearPageSlab(page);
 
        page_mapcount_reset(page);
+       page->mapping = NULL;
        if (current->reclaim_state)
                current->reclaim_state->reclaimed_slab += pages;
        memcg_uncharge_slab(page, order, s);