OSDN Git Service

mm/slub.c: replace kmem_cache->cpu_partial with wrapped APIs
authorchenqiwu <chenqiwu@xiaomi.com>
Thu, 2 Apr 2020 04:04:19 +0000 (21:04 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 2 Apr 2020 16:35:26 +0000 (09:35 -0700)
There are slub_cpu_partial() and slub_set_cpu_partial() APIs to wrap
kmem_cache->cpu_partial.  This patch will use the two APIs to replace
kmem_cache->cpu_partial in slub code.

Signed-off-by: chenqiwu <chenqiwu@xiaomi.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Reviewed-by: Andrew Morton <akpm@linux-foundation.org>
Cc: Christoph Lameter <cl@linux.com>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: David Rientjes <rientjes@google.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Link: http://lkml.kernel.org/r/1582079562-17980-1-git-send-email-qiwuchen55@gmail.com
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
mm/slub.c

index db0f657..fc911c2 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2282,7 +2282,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
                if (oldpage) {
                        pobjects = oldpage->pobjects;
                        pages = oldpage->pages;
-                       if (drain && pobjects > s->cpu_partial) {
+                       if (drain && pobjects > slub_cpu_partial(s)) {
                                unsigned long flags;
                                /*
                                 * partial array is full. Move the existing
@@ -2307,7 +2307,7 @@ static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
 
        } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page)
                                                                != oldpage);
-       if (unlikely(!s->cpu_partial)) {
+       if (unlikely(!slub_cpu_partial(s))) {
                unsigned long flags;
 
                local_irq_save(flags);
@@ -3512,15 +3512,15 @@ static void set_cpu_partial(struct kmem_cache *s)
         *    50% to keep some capacity around for frees.
         */
        if (!kmem_cache_has_cpu_partial(s))
-               s->cpu_partial = 0;
+               slub_set_cpu_partial(s, 0);
        else if (s->size >= PAGE_SIZE)
-               s->cpu_partial = 2;
+               slub_set_cpu_partial(s, 2);
        else if (s->size >= 1024)
-               s->cpu_partial = 6;
+               slub_set_cpu_partial(s, 6);
        else if (s->size >= 256)
-               s->cpu_partial = 13;
+               slub_set_cpu_partial(s, 13);
        else
-               s->cpu_partial = 30;
+               slub_set_cpu_partial(s, 30);
 #endif
 }