struct kmem_cache_node {
spinlock_t list_lock; /* Protect partial list and nr_partial */
unsigned long nr_partial;
--- -- unsigned long min_partial;
struct list_head partial;
#ifdef CONFIG_SLUB_DEBUG
atomic_long_t nr_slabs;
void (*ctor)(void *);
int inuse; /* Offset to metadata */
int align; /* Alignment */
+++ ++ unsigned long min_partial;
const char *name; /* Name (only for display!) */
struct list_head list; /* List of slab caches */
#ifdef CONFIG_SLUB_DEBUG
#define KMALLOC_SHIFT_LOW ilog2(KMALLOC_MIN_SIZE)
/*
+++++ * Maximum kmalloc object size handled by SLUB. Larger object allocations
+++++ * are passed through to the page allocator. The page allocator "fastpath"
+++++ * is relatively slow so we need this value sufficiently high so that
+++++ * performance critical objects are allocated through the SLUB fastpath.
+++++ *
+++++ * This should be dropped to PAGE_SIZE / 2 once the page allocator
+++++ * "fastpath" becomes competitive with the slab allocator fastpaths.
+++++ */
+++++ #define SLUB_MAX_SIZE (2 * PAGE_SIZE)
+++++
+++++ #define SLUB_PAGE_SHIFT (PAGE_SHIFT + 2)
+++++
+++++ /*
* We keep the general caches in an array of slab caches that are used for
* 2^x bytes of allocations.
*/
----- extern struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1];
+++++ extern struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT];
/*
* Sorry that the following has to be that ugly but some versions of GCC
static __always_inline void *kmalloc(size_t size, gfp_t flags)
{
if (__builtin_constant_p(size)) {
----- if (size > PAGE_SIZE)
+++++ if (size > SLUB_MAX_SIZE)
return kmalloc_large(size, flags);
if (!(flags & SLUB_DMA)) {
static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
{
if (__builtin_constant_p(size) &&
----- size <= PAGE_SIZE && !(flags & SLUB_DMA)) {
+++++ size <= SLUB_MAX_SIZE && !(flags & SLUB_DMA)) {
struct kmem_cache *s = kmalloc_slab(size);
if (!s)
static LIST_HEAD(free_slob_large);
/*
- ---- * slob_page: True for all slob pages (false for bigblock pages)
+ ++++ * is_slob_page: True for all slob pages (false for bigblock pages)
*/
- ----static inline int slob_page(struct slob_page *sp)
+ ++++static inline int is_slob_page(struct slob_page *sp)
{
return PageSlobPage((struct page *)sp);
}
__ClearPageSlobPage((struct page *)sp);
}
+ ++++static inline struct slob_page *slob_page(const void *addr)
+ ++++{
+ ++++ return (struct slob_page *)virt_to_page(addr);
+ ++++}
+ ++++
/*
* slob_page_free: true for pages on free_slob_pages list.
*/
return !((unsigned long)slob_next(s) & ~PAGE_MASK);
}
- ----static void *slob_new_page(gfp_t gfp, int order, int node)
+ ++++static void *slob_new_pages(gfp_t gfp, int order, int node)
{
void *page;
return page_address(page);
}
+ ++++static void slob_free_pages(void *b, int order)
+ ++++{
+ ++++ free_pages((unsigned long)b, order);
+ ++++}
+ ++++
/*
* Allocate a slob block within a given slob_page sp.
*/
static void *slob_page_alloc(struct slob_page *sp, size_t size, int align)
{
- ---- slob_t *prev, *cur, *aligned = 0;
+ ++++ slob_t *prev, *cur, *aligned = NULL;
int delta = 0, units = SLOB_UNITS(size);
for (prev = NULL, cur = sp->free; ; prev = cur, cur = slob_next(cur)) {
/* Not enough space: must allocate a new page */
if (!b) {
- ---- b = slob_new_page(gfp & ~__GFP_ZERO, 0, node);
+ ++++ b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
if (!b)
- ---- return 0;
- ---- sp = (struct slob_page *)virt_to_page(b);
+ ++++ return NULL;
+ ++++ sp = slob_page(b);
set_slob_page(sp);
spin_lock_irqsave(&slob_lock, flags);
return;
BUG_ON(!size);
- ---- sp = (struct slob_page *)virt_to_page(block);
+ ++++ sp = slob_page(block);
units = SLOB_UNITS(size);
spin_lock_irqsave(&slob_lock, flags);
/* Go directly to page allocator. Do not pass slob allocator */
if (slob_page_free(sp))
clear_slob_page_free(sp);
++ +++ spin_unlock_irqrestore(&slob_lock, flags);
clear_slob_page(sp);
free_slob_page(sp);
free_page((unsigned long)b);
-- --- goto out;
++ +++ return;
}
if (!slob_page_free(sp)) {
} else {
void *ret;
- ---- ret = slob_new_page(gfp | __GFP_COMP, get_order(size), node);
+ ++++ ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node);
if (ret) {
struct page *page;
page = virt_to_page(ret);
if (unlikely(ZERO_OR_NULL_PTR(block)))
return;
- ---- sp = (struct slob_page *)virt_to_page(block);
- ---- if (slob_page(sp)) {
+ ++++ sp = slob_page(block);
+ ++++ if (is_slob_page(sp)) {
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
unsigned int *m = (unsigned int *)(block - align);
slob_free(m, *m + align);
if (unlikely(block == ZERO_SIZE_PTR))
return 0;
- ---- sp = (struct slob_page *)virt_to_page(block);
- ---- if (slob_page(sp)) {
+ ++++ sp = slob_page(block);
+ ++++ if (is_slob_page(sp)) {
int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
unsigned int *m = (unsigned int *)(block - align);
return SLOB_UNITS(*m) * SLOB_UNIT;
} else
return sp->page.private;
}
+++++EXPORT_SYMBOL(ksize);
struct kmem_cache {
unsigned int size, align;
if (c->size < PAGE_SIZE)
b = slob_alloc(c->size, flags, c->align, node);
else
- ---- b = slob_new_page(flags, get_order(c->size), node);
+ ++++ b = slob_new_pages(flags, get_order(c->size), node);
if (c->ctor)
c->ctor(b);
if (size < PAGE_SIZE)
slob_free(b, size);
else
- ---- free_pages((unsigned long)b, get_order(size));
+ ++++ slob_free_pages(b, get_order(size));
}
static void kmem_rcu_free(struct rcu_head *head)
static void set_track(struct kmem_cache *s, void *object,
enum track_item alloc, unsigned long addr)
{
---- - struct track *p;
--- -
--- - if (s->offset)
--- - p = object + s->offset + sizeof(void *);
--- - else
--- - p = object + s->inuse;
++++ + struct track *p = get_track(s, object, alloc);
- if (s->offset)
- p = object + s->offset + sizeof(void *);
- else
- p = object + s->inuse;
-
---- - p += alloc;
if (addr) {
p->addr = addr;
p->cpu = smp_processor_id();
n = get_node(s, zone_to_nid(zone));
if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
--- -- n->nr_partial > n->min_partial) {
+++ ++ n->nr_partial > s->min_partial) {
page = get_partial_node(n);
if (page)
return page;
slab_unlock(page);
} else {
stat(c, DEACTIVATE_EMPTY);
--- -- if (n->nr_partial < n->min_partial) {
+++ ++ if (n->nr_partial < s->min_partial) {
/*
* Adding an empty slab to the partial slabs in order
* to avoid page allocator overhead. This slab needs
c = get_cpu_slab(s, smp_processor_id());
debug_check_no_locks_freed(object, c->objsize);
if (!(s->flags & SLAB_DEBUG_OBJECTS))
---- - debug_check_no_obj_freed(object, s->objsize);
++++ + debug_check_no_obj_freed(object, c->objsize);
if (likely(page == c->page && c->node >= 0)) {
object[c->offset] = c->freelist;
c->freelist = object;
int order;
int min_objects;
int fraction;
+++++ int max_objects;
/*
* Attempt to find best configuration for a slab. This
min_objects = slub_min_objects;
if (!min_objects)
min_objects = 4 * (fls(nr_cpu_ids) + 1);
+++++ max_objects = (PAGE_SIZE << slub_max_order)/size;
+++++ min_objects = min(min_objects, max_objects);
+++++
while (min_objects > 1) {
fraction = 16;
while (fraction >= 4) {
return order;
fraction /= 2;
}
----- min_objects /= 2;
+++++ min_objects --;
}
/*
init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
{
n->nr_partial = 0;
--- --
--- -- /*
--- -- * The larger the object size is, the more pages we want on the partial
--- -- * list to avoid pounding the page allocator excessively.
--- -- */
--- -- n->min_partial = ilog2(s->size);
--- -- if (n->min_partial < MIN_PARTIAL)
--- -- n->min_partial = MIN_PARTIAL;
--- -- else if (n->min_partial > MAX_PARTIAL)
--- -- n->min_partial = MAX_PARTIAL;
--- --
spin_lock_init(&n->list_lock);
INIT_LIST_HEAD(&n->partial);
#ifdef CONFIG_SLUB_DEBUG
static void free_kmem_cache_cpu(struct kmem_cache_cpu *c, int cpu)
{
if (c < per_cpu(kmem_cache_cpu, cpu) ||
- - c > per_cpu(kmem_cache_cpu, cpu) + NR_KMEM_CACHE_CPU) {
+ + c >= per_cpu(kmem_cache_cpu, cpu) + NR_KMEM_CACHE_CPU) {
kfree(c);
return;
}
}
#endif
+++ ++static void set_min_partial(struct kmem_cache *s, unsigned long min)
+++ ++{
+++ ++ if (min < MIN_PARTIAL)
+++ ++ min = MIN_PARTIAL;
+++ ++ else if (min > MAX_PARTIAL)
+++ ++ min = MAX_PARTIAL;
+++ ++ s->min_partial = min;
+++ ++}
+++ ++
/*
* calculate_sizes() determines the order and the distribution of data within
* a slab object.
if (!calculate_sizes(s, -1))
goto error;
+++ ++ /*
+++ ++ * The larger the object size is, the more pages we want on the partial
+++ ++ * list to avoid pounding the page allocator excessively.
+++ ++ */
+++ ++ set_min_partial(s, ilog2(s->size));
s->refcount = 1;
#ifdef CONFIG_NUMA
s->remote_node_defrag_ratio = 1000;
* Kmalloc subsystem
*******************************************************************/
----- struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned;
+++++ struct kmem_cache kmalloc_caches[SLUB_PAGE_SHIFT] __cacheline_aligned;
EXPORT_SYMBOL(kmalloc_caches);
static int __init setup_slub_min_order(char *str)
}
#ifdef CONFIG_ZONE_DMA
----- static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1];
+++++ static struct kmem_cache *kmalloc_caches_dma[SLUB_PAGE_SHIFT];
static void sysfs_add_func(struct work_struct *w)
{
{
struct kmem_cache *s;
----- if (unlikely(size > PAGE_SIZE))
+++++ if (unlikely(size > SLUB_MAX_SIZE))
return kmalloc_large(size, flags);
s = get_slab(size, flags);
{
struct kmem_cache *s;
----- if (unlikely(size > PAGE_SIZE))
+++++ if (unlikely(size > SLUB_MAX_SIZE))
return kmalloc_large_node(size, flags, node);
s = get_slab(size, flags);
*/
return s->size;
}
+++++EXPORT_SYMBOL(ksize);
void kfree(const void *x)
{
caches++;
}
----- for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) {
+++++ for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++) {
create_kmalloc_cache(&kmalloc_caches[i],
"kmalloc", 1 << i, GFP_KERNEL);
caches++;
slab_state = UP;
/* Provide the correct kmalloc names now that the caches are up */
----- for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++)
+++++ for (i = KMALLOC_SHIFT_LOW; i < SLUB_PAGE_SHIFT; i++)
kmalloc_caches[i]. name =
kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i);
{
struct kmem_cache *s;
----- if (unlikely(size > PAGE_SIZE))
+++++ if (unlikely(size > SLUB_MAX_SIZE))
return kmalloc_large(size, gfpflags);
s = get_slab(size, gfpflags);
{
struct kmem_cache *s;
----- if (unlikely(size > PAGE_SIZE))
+++++ if (unlikely(size > SLUB_MAX_SIZE))
return kmalloc_large_node(size, gfpflags, node);
s = get_slab(size, gfpflags);
}
SLAB_ATTR(order);
+++ ++static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
+++ ++{
+++ ++ return sprintf(buf, "%lu\n", s->min_partial);
+++ ++}
+++ ++
+++ ++static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
+++ ++ size_t length)
+++ ++{
+++ ++ unsigned long min;
+++ ++ int err;
+++ ++
+++ ++ err = strict_strtoul(buf, 10, &min);
+++ ++ if (err)
+++ ++ return err;
+++ ++
+++ ++ set_min_partial(s, min);
+++ ++ return length;
+++ ++}
+++ ++SLAB_ATTR(min_partial);
+++ ++
static ssize_t ctor_show(struct kmem_cache *s, char *buf)
{
if (s->ctor) {
&object_size_attr.attr,
&objs_per_slab_attr.attr,
&order_attr.attr,
+++ ++ &min_partial_attr.attr,
&objects_attr.attr,
&objects_partial_attr.attr,
&total_objects_attr.attr,