2 * Simple NUMA memory policy for the Linux kernel.
4 * Copyright 2003,2004 Andi Kleen, SuSE Labs.
5 * (C) Copyright 2005 Christoph Lameter, Silicon Graphics, Inc.
6 * Subject to the GNU Public License, version 2.
8 * NUMA policy allows the user to give hints in which node(s) memory should
11 * Support four policies per VMA and per process:
13 * The VMA policy has priority over the process policy for a page fault.
15 * interleave Allocate memory interleaved over a set of nodes,
16 * with normal fallback if it fails.
17 * For VMA based allocations this interleaves based on the
18 * offset into the backing object or offset into the mapping
19 * for anonymous memory. For process policy an process counter
22 * bind Only allocate memory on a specific set of nodes,
24 * FIXME: memory is allocated starting with the first node
25 * to the last. It would be better if bind would truly restrict
26 * the allocation to memory nodes instead
28 * preferred Try a specific node first before normal fallback.
29 * As a special case NUMA_NO_NODE here means do the allocation
30 * on the local CPU. This is normally identical to default,
31 * but useful to set in a VMA when you have a non default
34 * default Allocate on the local node first, or when on a VMA
35 * use the process policy. This is what Linux always did
36 * in a NUMA aware kernel and still does by, ahem, default.
38 * The process policy is applied for most non interrupt memory allocations
39 * in that process' context. Interrupts ignore the policies and always
40 * try to allocate on the local CPU. The VMA policy is only applied for memory
41 * allocations for a VMA in the VM.
43 * Currently there are a few corner cases in swapping where the policy
44 * is not applied, but the majority should be handled. When process policy
45 * is used it is not remembered over swap outs/swap ins.
47 * Only the highest zone in the zone hierarchy gets policied. Allocations
48 * requesting a lower zone just use default policy. This implies that
49 * on systems with highmem kernel lowmem allocation don't get policied.
50 * Same with GFP_DMA allocations.
52 * For shmfs/tmpfs/hugetlbfs shared memory the policy is shared between
53 * all users and remembered even when nobody has memory mapped.
57 fix mmap readahead to honour policy and enable policy for any page cache
59 statistics for bigpages
60 global policy for page cache? currently it uses process policy. Requires
62 handle mremap for shared memory (currently ignored for the policy)
64 make bind policy root only? It can trigger oom much faster and the
65 kernel is not always grateful with that.
68 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
70 #include <linux/mempolicy.h>
72 #include <linux/highmem.h>
73 #include <linux/hugetlb.h>
74 #include <linux/kernel.h>
75 #include <linux/sched.h>
76 #include <linux/nodemask.h>
77 #include <linux/cpuset.h>
78 #include <linux/slab.h>
79 #include <linux/string.h>
80 #include <linux/export.h>
81 #include <linux/nsproxy.h>
82 #include <linux/interrupt.h>
83 #include <linux/init.h>
84 #include <linux/compat.h>
85 #include <linux/swap.h>
86 #include <linux/seq_file.h>
87 #include <linux/proc_fs.h>
88 #include <linux/migrate.h>
89 #include <linux/ksm.h>
90 #include <linux/rmap.h>
91 #include <linux/security.h>
92 #include <linux/syscalls.h>
93 #include <linux/ctype.h>
94 #include <linux/mm_inline.h>
95 #include <linux/mmu_notifier.h>
96 #include <linux/printk.h>
98 #include <asm/tlbflush.h>
99 #include <asm/uaccess.h>
101 #include "internal.h"
104 #define MPOL_MF_DISCONTIG_OK (MPOL_MF_INTERNAL << 0) /* Skip checks for continuous vmas */
105 #define MPOL_MF_INVERT (MPOL_MF_INTERNAL << 1) /* Invert check for nodemask */
107 static struct kmem_cache *policy_cache;
108 static struct kmem_cache *sn_cache;
110 /* Highest zone. An specific allocation for a zone below that is not
112 enum zone_type policy_zone = 0;
115 * run-time system-wide default policy => local allocation
117 static struct mempolicy default_policy = {
118 .refcnt = ATOMIC_INIT(1), /* never free it */
119 .mode = MPOL_PREFERRED,
120 .flags = MPOL_F_LOCAL,
123 static struct mempolicy preferred_node_policy[MAX_NUMNODES];
125 struct mempolicy *get_task_policy(struct task_struct *p)
127 struct mempolicy *pol = p->mempolicy;
133 node = numa_node_id();
134 if (node != NUMA_NO_NODE) {
135 pol = &preferred_node_policy[node];
136 /* preferred_node_policy is not initialised early in boot */
141 return &default_policy;
144 static const struct mempolicy_operations {
145 int (*create)(struct mempolicy *pol, const nodemask_t *nodes);
147 * If read-side task has no lock to protect task->mempolicy, write-side
148 * task will rebind the task->mempolicy by two step. The first step is
149 * setting all the newly nodes, and the second step is cleaning all the
150 * disallowed nodes. In this way, we can avoid finding no node to alloc
152 * If we have a lock to protect task->mempolicy in read-side, we do
156 * MPOL_REBIND_ONCE - do rebind work at once
157 * MPOL_REBIND_STEP1 - set all the newly nodes
158 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
160 void (*rebind)(struct mempolicy *pol, const nodemask_t *nodes,
161 enum mpol_rebind_step step);
162 } mpol_ops[MPOL_MAX];
164 static inline int mpol_store_user_nodemask(const struct mempolicy *pol)
166 return pol->flags & MPOL_MODE_FLAGS;
169 static void mpol_relative_nodemask(nodemask_t *ret, const nodemask_t *orig,
170 const nodemask_t *rel)
173 nodes_fold(tmp, *orig, nodes_weight(*rel));
174 nodes_onto(*ret, tmp, *rel);
177 static int mpol_new_interleave(struct mempolicy *pol, const nodemask_t *nodes)
179 if (nodes_empty(*nodes))
181 pol->v.nodes = *nodes;
185 static int mpol_new_preferred(struct mempolicy *pol, const nodemask_t *nodes)
188 pol->flags |= MPOL_F_LOCAL; /* local allocation */
189 else if (nodes_empty(*nodes))
190 return -EINVAL; /* no allowed nodes */
192 pol->v.preferred_node = first_node(*nodes);
196 static int mpol_new_bind(struct mempolicy *pol, const nodemask_t *nodes)
198 if (nodes_empty(*nodes))
200 pol->v.nodes = *nodes;
205 * mpol_set_nodemask is called after mpol_new() to set up the nodemask, if
206 * any, for the new policy. mpol_new() has already validated the nodes
207 * parameter with respect to the policy mode and flags. But, we need to
208 * handle an empty nodemask with MPOL_PREFERRED here.
210 * Must be called holding task's alloc_lock to protect task's mems_allowed
211 * and mempolicy. May also be called holding the mmap_semaphore for write.
213 static int mpol_set_nodemask(struct mempolicy *pol,
214 const nodemask_t *nodes, struct nodemask_scratch *nsc)
218 /* if mode is MPOL_DEFAULT, pol is NULL. This is right. */
222 nodes_and(nsc->mask1,
223 cpuset_current_mems_allowed, node_states[N_MEMORY]);
226 if (pol->mode == MPOL_PREFERRED && nodes_empty(*nodes))
227 nodes = NULL; /* explicit local allocation */
229 if (pol->flags & MPOL_F_RELATIVE_NODES)
230 mpol_relative_nodemask(&nsc->mask2, nodes, &nsc->mask1);
232 nodes_and(nsc->mask2, *nodes, nsc->mask1);
234 if (mpol_store_user_nodemask(pol))
235 pol->w.user_nodemask = *nodes;
237 pol->w.cpuset_mems_allowed =
238 cpuset_current_mems_allowed;
242 ret = mpol_ops[pol->mode].create(pol, &nsc->mask2);
244 ret = mpol_ops[pol->mode].create(pol, NULL);
249 * This function just creates a new policy, does some check and simple
250 * initialization. You must invoke mpol_set_nodemask() to set nodes.
252 static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags,
255 struct mempolicy *policy;
257 pr_debug("setting mode %d flags %d nodes[0] %lx\n",
258 mode, flags, nodes ? nodes_addr(*nodes)[0] : NUMA_NO_NODE);
260 if (mode == MPOL_DEFAULT) {
261 if (nodes && !nodes_empty(*nodes))
262 return ERR_PTR(-EINVAL);
268 * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or
269 * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation).
270 * All other modes require a valid pointer to a non-empty nodemask.
272 if (mode == MPOL_PREFERRED) {
273 if (nodes_empty(*nodes)) {
274 if (((flags & MPOL_F_STATIC_NODES) ||
275 (flags & MPOL_F_RELATIVE_NODES)))
276 return ERR_PTR(-EINVAL);
278 } else if (mode == MPOL_LOCAL) {
279 if (!nodes_empty(*nodes))
280 return ERR_PTR(-EINVAL);
281 mode = MPOL_PREFERRED;
282 } else if (nodes_empty(*nodes))
283 return ERR_PTR(-EINVAL);
284 policy = kmem_cache_alloc(policy_cache, GFP_KERNEL);
286 return ERR_PTR(-ENOMEM);
287 atomic_set(&policy->refcnt, 1);
289 policy->flags = flags;
294 /* Slow path of a mpol destructor. */
295 void __mpol_put(struct mempolicy *p)
297 if (!atomic_dec_and_test(&p->refcnt))
299 kmem_cache_free(policy_cache, p);
302 static void mpol_rebind_default(struct mempolicy *pol, const nodemask_t *nodes,
303 enum mpol_rebind_step step)
309 * MPOL_REBIND_ONCE - do rebind work at once
310 * MPOL_REBIND_STEP1 - set all the newly nodes
311 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
313 static void mpol_rebind_nodemask(struct mempolicy *pol, const nodemask_t *nodes,
314 enum mpol_rebind_step step)
318 if (pol->flags & MPOL_F_STATIC_NODES)
319 nodes_and(tmp, pol->w.user_nodemask, *nodes);
320 else if (pol->flags & MPOL_F_RELATIVE_NODES)
321 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
324 * if step == 1, we use ->w.cpuset_mems_allowed to cache the
327 if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP1) {
328 nodes_remap(tmp, pol->v.nodes,
329 pol->w.cpuset_mems_allowed, *nodes);
330 pol->w.cpuset_mems_allowed = step ? tmp : *nodes;
331 } else if (step == MPOL_REBIND_STEP2) {
332 tmp = pol->w.cpuset_mems_allowed;
333 pol->w.cpuset_mems_allowed = *nodes;
338 if (nodes_empty(tmp))
341 if (step == MPOL_REBIND_STEP1)
342 nodes_or(pol->v.nodes, pol->v.nodes, tmp);
343 else if (step == MPOL_REBIND_ONCE || step == MPOL_REBIND_STEP2)
348 if (!node_isset(current->il_next, tmp)) {
349 current->il_next = next_node_in(current->il_next, tmp);
350 if (current->il_next >= MAX_NUMNODES)
351 current->il_next = numa_node_id();
355 static void mpol_rebind_preferred(struct mempolicy *pol,
356 const nodemask_t *nodes,
357 enum mpol_rebind_step step)
361 if (pol->flags & MPOL_F_STATIC_NODES) {
362 int node = first_node(pol->w.user_nodemask);
364 if (node_isset(node, *nodes)) {
365 pol->v.preferred_node = node;
366 pol->flags &= ~MPOL_F_LOCAL;
368 pol->flags |= MPOL_F_LOCAL;
369 } else if (pol->flags & MPOL_F_RELATIVE_NODES) {
370 mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes);
371 pol->v.preferred_node = first_node(tmp);
372 } else if (!(pol->flags & MPOL_F_LOCAL)) {
373 pol->v.preferred_node = node_remap(pol->v.preferred_node,
374 pol->w.cpuset_mems_allowed,
376 pol->w.cpuset_mems_allowed = *nodes;
381 * mpol_rebind_policy - Migrate a policy to a different set of nodes
383 * If read-side task has no lock to protect task->mempolicy, write-side
384 * task will rebind the task->mempolicy by two step. The first step is
385 * setting all the newly nodes, and the second step is cleaning all the
386 * disallowed nodes. In this way, we can avoid finding no node to alloc
388 * If we have a lock to protect task->mempolicy in read-side, we do
392 * MPOL_REBIND_ONCE - do rebind work at once
393 * MPOL_REBIND_STEP1 - set all the newly nodes
394 * MPOL_REBIND_STEP2 - clean all the disallowed nodes
396 static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
397 enum mpol_rebind_step step)
401 if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE &&
402 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
405 if (step == MPOL_REBIND_STEP1 && (pol->flags & MPOL_F_REBINDING))
408 if (step == MPOL_REBIND_STEP2 && !(pol->flags & MPOL_F_REBINDING))
411 if (step == MPOL_REBIND_STEP1)
412 pol->flags |= MPOL_F_REBINDING;
413 else if (step == MPOL_REBIND_STEP2)
414 pol->flags &= ~MPOL_F_REBINDING;
415 else if (step >= MPOL_REBIND_NSTEP)
418 mpol_ops[pol->mode].rebind(pol, newmask, step);
422 * Wrapper for mpol_rebind_policy() that just requires task
423 * pointer, and updates task mempolicy.
425 * Called with task's alloc_lock held.
428 void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new,
429 enum mpol_rebind_step step)
431 mpol_rebind_policy(tsk->mempolicy, new, step);
435 * Rebind each vma in mm to new nodemask.
437 * Call holding a reference to mm. Takes mm->mmap_sem during call.
440 void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
442 struct vm_area_struct *vma;
444 down_write(&mm->mmap_sem);
445 for (vma = mm->mmap; vma; vma = vma->vm_next)
446 mpol_rebind_policy(vma->vm_policy, new, MPOL_REBIND_ONCE);
447 up_write(&mm->mmap_sem);
450 static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
452 .rebind = mpol_rebind_default,
454 [MPOL_INTERLEAVE] = {
455 .create = mpol_new_interleave,
456 .rebind = mpol_rebind_nodemask,
459 .create = mpol_new_preferred,
460 .rebind = mpol_rebind_preferred,
463 .create = mpol_new_bind,
464 .rebind = mpol_rebind_nodemask,
468 static void migrate_page_add(struct page *page, struct list_head *pagelist,
469 unsigned long flags);
472 struct list_head *pagelist;
475 struct vm_area_struct *prev;
479 * Scan through pages checking if pages follow certain conditions,
480 * and move them to the pagelist if they do.
482 static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr,
483 unsigned long end, struct mm_walk *walk)
485 struct vm_area_struct *vma = walk->vma;
487 struct queue_pages *qp = walk->private;
488 unsigned long flags = qp->flags;
493 if (pmd_trans_huge(*pmd)) {
494 ptl = pmd_lock(walk->mm, pmd);
495 if (pmd_trans_huge(*pmd)) {
496 page = pmd_page(*pmd);
497 if (is_huge_zero_page(page)) {
499 split_huge_pmd(vma, pmd, addr);
504 ret = split_huge_page(page);
515 if (pmd_trans_unstable(pmd))
518 pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
519 for (; addr != end; pte++, addr += PAGE_SIZE) {
520 if (!pte_present(*pte))
522 page = vm_normal_page(vma, addr, *pte);
526 * vm_normal_page() filters out zero pages, but there might
527 * still be PageReserved pages to skip, perhaps in a VDSO.
529 if (PageReserved(page))
531 nid = page_to_nid(page);
532 if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
534 if (PageTransCompound(page)) {
536 pte_unmap_unlock(pte, ptl);
538 ret = split_huge_page(page);
541 /* Failed to split -- skip. */
543 pte = pte_offset_map_lock(walk->mm, pmd,
550 migrate_page_add(page, qp->pagelist, flags);
552 pte_unmap_unlock(pte - 1, ptl);
557 static int queue_pages_hugetlb(pte_t *pte, unsigned long hmask,
558 unsigned long addr, unsigned long end,
559 struct mm_walk *walk)
561 #ifdef CONFIG_HUGETLB_PAGE
562 struct queue_pages *qp = walk->private;
563 unsigned long flags = qp->flags;
569 ptl = huge_pte_lock(hstate_vma(walk->vma), walk->mm, pte);
570 entry = huge_ptep_get(pte);
571 if (!pte_present(entry))
573 page = pte_page(entry);
574 nid = page_to_nid(page);
575 if (node_isset(nid, *qp->nmask) == !!(flags & MPOL_MF_INVERT))
577 /* With MPOL_MF_MOVE, we migrate only unshared hugepage. */
578 if (flags & (MPOL_MF_MOVE_ALL) ||
579 (flags & MPOL_MF_MOVE && page_mapcount(page) == 1))
580 isolate_huge_page(page, qp->pagelist);
589 #ifdef CONFIG_NUMA_BALANCING
591 * This is used to mark a range of virtual addresses to be inaccessible.
592 * These are later cleared by a NUMA hinting fault. Depending on these
593 * faults, pages may be migrated for better NUMA placement.
595 * This is assuming that NUMA faults are handled using PROT_NONE. If
596 * an architecture makes a different choice, it will need further
597 * changes to the core.
599 unsigned long change_prot_numa(struct vm_area_struct *vma,
600 unsigned long addr, unsigned long end)
604 nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
606 count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
611 static unsigned long change_prot_numa(struct vm_area_struct *vma,
612 unsigned long addr, unsigned long end)
616 #endif /* CONFIG_NUMA_BALANCING */
618 static int queue_pages_test_walk(unsigned long start, unsigned long end,
619 struct mm_walk *walk)
621 struct vm_area_struct *vma = walk->vma;
622 struct queue_pages *qp = walk->private;
623 unsigned long endvma = vma->vm_end;
624 unsigned long flags = qp->flags;
626 if (!vma_migratable(vma))
631 if (vma->vm_start > start)
632 start = vma->vm_start;
634 if (!(flags & MPOL_MF_DISCONTIG_OK)) {
635 if (!vma->vm_next && vma->vm_end < end)
637 if (qp->prev && qp->prev->vm_end < vma->vm_start)
643 if (flags & MPOL_MF_LAZY) {
644 /* Similar to task_numa_work, skip inaccessible VMAs */
645 if (!is_vm_hugetlb_page(vma) &&
646 (vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)) &&
647 !(vma->vm_flags & VM_MIXEDMAP))
648 change_prot_numa(vma, start, endvma);
652 /* queue pages from current vma */
653 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL))
659 * Walk through page tables and collect pages to be migrated.
661 * If pages found in a given range are on a set of nodes (determined by
662 * @nodes and @flags,) it's isolated and queued to the pagelist which is
663 * passed via @private.)
666 queue_pages_range(struct mm_struct *mm, unsigned long start, unsigned long end,
667 nodemask_t *nodes, unsigned long flags,
668 struct list_head *pagelist)
670 struct queue_pages qp = {
671 .pagelist = pagelist,
676 struct mm_walk queue_pages_walk = {
677 .hugetlb_entry = queue_pages_hugetlb,
678 .pmd_entry = queue_pages_pte_range,
679 .test_walk = queue_pages_test_walk,
684 return walk_page_range(start, end, &queue_pages_walk);
688 * Apply policy to a single VMA
689 * This must be called with the mmap_sem held for writing.
691 static int vma_replace_policy(struct vm_area_struct *vma,
692 struct mempolicy *pol)
695 struct mempolicy *old;
696 struct mempolicy *new;
698 pr_debug("vma %lx-%lx/%lx vm_ops %p vm_file %p set_policy %p\n",
699 vma->vm_start, vma->vm_end, vma->vm_pgoff,
700 vma->vm_ops, vma->vm_file,
701 vma->vm_ops ? vma->vm_ops->set_policy : NULL);
707 if (vma->vm_ops && vma->vm_ops->set_policy) {
708 err = vma->vm_ops->set_policy(vma, new);
713 old = vma->vm_policy;
714 vma->vm_policy = new; /* protected by mmap_sem */
723 /* Step 2: apply policy to a range and do splits. */
724 static int mbind_range(struct mm_struct *mm, unsigned long start,
725 unsigned long end, struct mempolicy *new_pol)
727 struct vm_area_struct *next;
728 struct vm_area_struct *prev;
729 struct vm_area_struct *vma;
732 unsigned long vmstart;
735 vma = find_vma(mm, start);
736 if (!vma || vma->vm_start > start)
740 if (start > vma->vm_start)
743 for (; vma && vma->vm_start < end; prev = vma, vma = next) {
745 vmstart = max(start, vma->vm_start);
746 vmend = min(end, vma->vm_end);
748 if (mpol_equal(vma_policy(vma), new_pol))
751 pgoff = vma->vm_pgoff +
752 ((vmstart - vma->vm_start) >> PAGE_SHIFT);
753 prev = vma_merge(mm, prev, vmstart, vmend, vma->vm_flags,
754 vma->anon_vma, vma->vm_file, pgoff,
755 new_pol, vma->vm_userfaultfd_ctx);
759 if (mpol_equal(vma_policy(vma), new_pol))
761 /* vma_merge() joined vma && vma->next, case 8 */
764 if (vma->vm_start != vmstart) {
765 err = split_vma(vma->vm_mm, vma, vmstart, 1);
769 if (vma->vm_end != vmend) {
770 err = split_vma(vma->vm_mm, vma, vmend, 0);
775 err = vma_replace_policy(vma, new_pol);
784 /* Set the process memory policy */
785 static long do_set_mempolicy(unsigned short mode, unsigned short flags,
788 struct mempolicy *new, *old;
789 NODEMASK_SCRATCH(scratch);
795 new = mpol_new(mode, flags, nodes);
802 ret = mpol_set_nodemask(new, nodes, scratch);
804 task_unlock(current);
808 old = current->mempolicy;
809 current->mempolicy = new;
810 if (new && new->mode == MPOL_INTERLEAVE &&
811 nodes_weight(new->v.nodes))
812 current->il_next = first_node(new->v.nodes);
813 task_unlock(current);
817 NODEMASK_SCRATCH_FREE(scratch);
822 * Return nodemask for policy for get_mempolicy() query
824 * Called with task's alloc_lock held
826 static void get_policy_nodemask(struct mempolicy *p, nodemask_t *nodes)
829 if (p == &default_policy)
835 case MPOL_INTERLEAVE:
839 if (!(p->flags & MPOL_F_LOCAL))
840 node_set(p->v.preferred_node, *nodes);
841 /* else return empty node mask for local allocation */
848 static int lookup_node(unsigned long addr)
853 err = get_user_pages(addr & PAGE_MASK, 1, 0, &p, NULL);
855 err = page_to_nid(p);
861 /* Retrieve NUMA policy */
862 static long do_get_mempolicy(int *policy, nodemask_t *nmask,
863 unsigned long addr, unsigned long flags)
866 struct mm_struct *mm = current->mm;
867 struct vm_area_struct *vma = NULL;
868 struct mempolicy *pol = current->mempolicy;
871 ~(unsigned long)(MPOL_F_NODE|MPOL_F_ADDR|MPOL_F_MEMS_ALLOWED))
874 if (flags & MPOL_F_MEMS_ALLOWED) {
875 if (flags & (MPOL_F_NODE|MPOL_F_ADDR))
877 *policy = 0; /* just so it's initialized */
879 *nmask = cpuset_current_mems_allowed;
880 task_unlock(current);
884 if (flags & MPOL_F_ADDR) {
886 * Do NOT fall back to task policy if the
887 * vma/shared policy at addr is NULL. We
888 * want to return MPOL_DEFAULT in this case.
890 down_read(&mm->mmap_sem);
891 vma = find_vma_intersection(mm, addr, addr+1);
893 up_read(&mm->mmap_sem);
896 if (vma->vm_ops && vma->vm_ops->get_policy)
897 pol = vma->vm_ops->get_policy(vma, addr);
899 pol = vma->vm_policy;
904 pol = &default_policy; /* indicates default behavior */
906 if (flags & MPOL_F_NODE) {
907 if (flags & MPOL_F_ADDR) {
908 err = lookup_node(addr);
912 } else if (pol == current->mempolicy &&
913 pol->mode == MPOL_INTERLEAVE) {
914 *policy = current->il_next;
920 *policy = pol == &default_policy ? MPOL_DEFAULT :
923 * Internal mempolicy flags must be masked off before exposing
924 * the policy to userspace.
926 *policy |= (pol->flags & MPOL_MODE_FLAGS);
931 if (mpol_store_user_nodemask(pol)) {
932 *nmask = pol->w.user_nodemask;
935 get_policy_nodemask(pol, nmask);
936 task_unlock(current);
943 up_read(¤t->mm->mmap_sem);
947 #ifdef CONFIG_MIGRATION
951 static void migrate_page_add(struct page *page, struct list_head *pagelist,
955 * Avoid migrating a page that is shared with others.
957 if ((flags & MPOL_MF_MOVE_ALL) || page_mapcount(page) == 1) {
958 if (!isolate_lru_page(page)) {
959 list_add_tail(&page->lru, pagelist);
960 inc_node_page_state(page, NR_ISOLATED_ANON +
961 page_is_file_cache(page));
966 static struct page *new_node_page(struct page *page, unsigned long node, int **x)
969 return alloc_huge_page_node(page_hstate(compound_head(page)),
972 return __alloc_pages_node(node, GFP_HIGHUSER_MOVABLE |
977 * Migrate pages from one node to a target node.
978 * Returns error or the number of pages not migrated.
980 static int migrate_to_node(struct mm_struct *mm, int source, int dest,
988 node_set(source, nmask);
991 * This does not "check" the range but isolates all pages that
992 * need migration. Between passing in the full user address
993 * space range and MPOL_MF_DISCONTIG_OK, this call can not fail.
995 VM_BUG_ON(!(flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)));
996 queue_pages_range(mm, mm->mmap->vm_start, mm->task_size, &nmask,
997 flags | MPOL_MF_DISCONTIG_OK, &pagelist);
999 if (!list_empty(&pagelist)) {
1000 err = migrate_pages(&pagelist, new_node_page, NULL, dest,
1001 MIGRATE_SYNC, MR_SYSCALL);
1003 putback_movable_pages(&pagelist);
1010 * Move pages between the two nodesets so as to preserve the physical
1011 * layout as much as possible.
1013 * Returns the number of page that could not be moved.
1015 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1016 const nodemask_t *to, int flags)
1022 err = migrate_prep();
1026 down_read(&mm->mmap_sem);
1029 * Find a 'source' bit set in 'tmp' whose corresponding 'dest'
1030 * bit in 'to' is not also set in 'tmp'. Clear the found 'source'
1031 * bit in 'tmp', and return that <source, dest> pair for migration.
1032 * The pair of nodemasks 'to' and 'from' define the map.
1034 * If no pair of bits is found that way, fallback to picking some
1035 * pair of 'source' and 'dest' bits that are not the same. If the
1036 * 'source' and 'dest' bits are the same, this represents a node
1037 * that will be migrating to itself, so no pages need move.
1039 * If no bits are left in 'tmp', or if all remaining bits left
1040 * in 'tmp' correspond to the same bit in 'to', return false
1041 * (nothing left to migrate).
1043 * This lets us pick a pair of nodes to migrate between, such that
1044 * if possible the dest node is not already occupied by some other
1045 * source node, minimizing the risk of overloading the memory on a
1046 * node that would happen if we migrated incoming memory to a node
1047 * before migrating outgoing memory source that same node.
1049 * A single scan of tmp is sufficient. As we go, we remember the
1050 * most recent <s, d> pair that moved (s != d). If we find a pair
1051 * that not only moved, but what's better, moved to an empty slot
1052 * (d is not set in tmp), then we break out then, with that pair.
1053 * Otherwise when we finish scanning from_tmp, we at least have the
1054 * most recent <s, d> pair that moved. If we get all the way through
1055 * the scan of tmp without finding any node that moved, much less
1056 * moved to an empty node, then there is nothing left worth migrating.
1060 while (!nodes_empty(tmp)) {
1062 int source = NUMA_NO_NODE;
1065 for_each_node_mask(s, tmp) {
1068 * do_migrate_pages() tries to maintain the relative
1069 * node relationship of the pages established between
1070 * threads and memory areas.
1072 * However if the number of source nodes is not equal to
1073 * the number of destination nodes we can not preserve
1074 * this node relative relationship. In that case, skip
1075 * copying memory from a node that is in the destination
1078 * Example: [2,3,4] -> [3,4,5] moves everything.
1079 * [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1082 if ((nodes_weight(*from) != nodes_weight(*to)) &&
1083 (node_isset(s, *to)))
1086 d = node_remap(s, *from, *to);
1090 source = s; /* Node moved. Memorize */
1093 /* dest not in remaining from nodes? */
1094 if (!node_isset(dest, tmp))
1097 if (source == NUMA_NO_NODE)
1100 node_clear(source, tmp);
1101 err = migrate_to_node(mm, source, dest, flags);
1107 up_read(&mm->mmap_sem);
1115 * Allocate a new page for page migration based on vma policy.
1116 * Start by assuming the page is mapped by the same vma as contains @start.
1117 * Search forward from there, if not. N.B., this assumes that the
1118 * list of pages handed to migrate_pages()--which is how we get here--
1119 * is in virtual address order.
1121 static struct page *new_page(struct page *page, unsigned long start, int **x)
1123 struct vm_area_struct *vma;
1124 unsigned long uninitialized_var(address);
1126 vma = find_vma(current->mm, start);
1128 address = page_address_in_vma(page, vma);
1129 if (address != -EFAULT)
1134 if (PageHuge(page)) {
1136 return alloc_huge_page_noerr(vma, address, 1);
1139 * if !vma, alloc_page_vma() will use task or system default policy
1141 return alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address);
1145 static void migrate_page_add(struct page *page, struct list_head *pagelist,
1146 unsigned long flags)
1150 int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1151 const nodemask_t *to, int flags)
1156 static struct page *new_page(struct page *page, unsigned long start, int **x)
1162 static long do_mbind(unsigned long start, unsigned long len,
1163 unsigned short mode, unsigned short mode_flags,
1164 nodemask_t *nmask, unsigned long flags)
1166 struct mm_struct *mm = current->mm;
1167 struct mempolicy *new;
1170 LIST_HEAD(pagelist);
1172 if (flags & ~(unsigned long)MPOL_MF_VALID)
1174 if ((flags & MPOL_MF_MOVE_ALL) && !capable(CAP_SYS_NICE))
1177 if (start & ~PAGE_MASK)
1180 if (mode == MPOL_DEFAULT)
1181 flags &= ~MPOL_MF_STRICT;
1183 len = (len + PAGE_SIZE - 1) & PAGE_MASK;
1191 new = mpol_new(mode, mode_flags, nmask);
1193 return PTR_ERR(new);
1195 if (flags & MPOL_MF_LAZY)
1196 new->flags |= MPOL_F_MOF;
1199 * If we are using the default policy then operation
1200 * on discontinuous address spaces is okay after all
1203 flags |= MPOL_MF_DISCONTIG_OK;
1205 pr_debug("mbind %lx-%lx mode:%d flags:%d nodes:%lx\n",
1206 start, start + len, mode, mode_flags,
1207 nmask ? nodes_addr(*nmask)[0] : NUMA_NO_NODE);
1209 if (flags & (MPOL_MF_MOVE | MPOL_MF_MOVE_ALL)) {
1211 err = migrate_prep();
1216 NODEMASK_SCRATCH(scratch);
1218 down_write(&mm->mmap_sem);
1220 err = mpol_set_nodemask(new, nmask, scratch);
1221 task_unlock(current);
1223 up_write(&mm->mmap_sem);
1226 NODEMASK_SCRATCH_FREE(scratch);
1231 err = queue_pages_range(mm, start, end, nmask,
1232 flags | MPOL_MF_INVERT, &pagelist);
1234 err = mbind_range(mm, start, end, new);
1239 if (!list_empty(&pagelist)) {
1240 WARN_ON_ONCE(flags & MPOL_MF_LAZY);
1241 nr_failed = migrate_pages(&pagelist, new_page, NULL,
1242 start, MIGRATE_SYNC, MR_MEMPOLICY_MBIND);
1244 putback_movable_pages(&pagelist);
1247 if (nr_failed && (flags & MPOL_MF_STRICT))
1250 putback_movable_pages(&pagelist);
1252 up_write(&mm->mmap_sem);
1259 * User space interface with variable sized bitmaps for nodelists.
1262 /* Copy a node mask from user space. */
1263 static int get_nodes(nodemask_t *nodes, const unsigned long __user *nmask,
1264 unsigned long maxnode)
1268 unsigned long nlongs;
1269 unsigned long endmask;
1272 nodes_clear(*nodes);
1273 if (maxnode == 0 || !nmask)
1275 if (maxnode > PAGE_SIZE*BITS_PER_BYTE)
1278 nlongs = BITS_TO_LONGS(maxnode);
1279 if ((maxnode % BITS_PER_LONG) == 0)
1282 endmask = (1UL << (maxnode % BITS_PER_LONG)) - 1;
1285 * When the user specified more nodes than supported just check
1286 * if the non supported part is all zero.
1288 * If maxnode have more longs than MAX_NUMNODES, check
1289 * the bits in that area first. And then go through to
1290 * check the rest bits which equal or bigger than MAX_NUMNODES.
1291 * Otherwise, just check bits [MAX_NUMNODES, maxnode).
1293 if (nlongs > BITS_TO_LONGS(MAX_NUMNODES)) {
1294 if (nlongs > PAGE_SIZE/sizeof(long))
1296 for (k = BITS_TO_LONGS(MAX_NUMNODES); k < nlongs; k++) {
1297 if (get_user(t, nmask + k))
1299 if (k == nlongs - 1) {
1305 nlongs = BITS_TO_LONGS(MAX_NUMNODES);
1309 if (maxnode > MAX_NUMNODES && MAX_NUMNODES % BITS_PER_LONG != 0) {
1310 unsigned long valid_mask = endmask;
1312 valid_mask &= ~((1UL << (MAX_NUMNODES % BITS_PER_LONG)) - 1);
1313 if (get_user(t, nmask + nlongs - 1))
1319 if (copy_from_user(nodes_addr(*nodes), nmask, nlongs*sizeof(unsigned long)))
1321 nodes_addr(*nodes)[nlongs-1] &= endmask;
1325 /* Copy a kernel node mask to user space */
1326 static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
1329 unsigned long copy = ALIGN(maxnode-1, 64) / 8;
1330 const int nbytes = BITS_TO_LONGS(MAX_NUMNODES) * sizeof(long);
1332 if (copy > nbytes) {
1333 if (copy > PAGE_SIZE)
1335 if (clear_user((char __user *)mask + nbytes, copy - nbytes))
1339 return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
1342 SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
1343 unsigned long, mode, const unsigned long __user *, nmask,
1344 unsigned long, maxnode, unsigned, flags)
1348 unsigned short mode_flags;
1350 mode_flags = mode & MPOL_MODE_FLAGS;
1351 mode &= ~MPOL_MODE_FLAGS;
1352 if (mode >= MPOL_MAX)
1354 if ((mode_flags & MPOL_F_STATIC_NODES) &&
1355 (mode_flags & MPOL_F_RELATIVE_NODES))
1357 err = get_nodes(&nodes, nmask, maxnode);
1360 return do_mbind(start, len, mode, mode_flags, &nodes, flags);
1363 /* Set the process memory policy */
1364 SYSCALL_DEFINE3(set_mempolicy, int, mode, const unsigned long __user *, nmask,
1365 unsigned long, maxnode)
1369 unsigned short flags;
1371 flags = mode & MPOL_MODE_FLAGS;
1372 mode &= ~MPOL_MODE_FLAGS;
1373 if ((unsigned int)mode >= MPOL_MAX)
1375 if ((flags & MPOL_F_STATIC_NODES) && (flags & MPOL_F_RELATIVE_NODES))
1377 err = get_nodes(&nodes, nmask, maxnode);
1380 return do_set_mempolicy(mode, flags, &nodes);
1383 SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
1384 const unsigned long __user *, old_nodes,
1385 const unsigned long __user *, new_nodes)
1387 const struct cred *cred = current_cred(), *tcred;
1388 struct mm_struct *mm = NULL;
1389 struct task_struct *task;
1390 nodemask_t task_nodes;
1394 NODEMASK_SCRATCH(scratch);
1399 old = &scratch->mask1;
1400 new = &scratch->mask2;
1402 err = get_nodes(old, old_nodes, maxnode);
1406 err = get_nodes(new, new_nodes, maxnode);
1410 /* Find the mm_struct */
1412 task = pid ? find_task_by_vpid(pid) : current;
1418 get_task_struct(task);
1423 * Check if this process has the right to modify the specified
1424 * process. The right exists if the process has administrative
1425 * capabilities, superuser privileges or the same
1426 * userid as the target process.
1428 tcred = __task_cred(task);
1429 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
1430 !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
1431 !capable(CAP_SYS_NICE)) {
1438 task_nodes = cpuset_mems_allowed(task);
1439 /* Is the user allowed to access the target nodes? */
1440 if (!nodes_subset(*new, task_nodes) && !capable(CAP_SYS_NICE)) {
1445 task_nodes = cpuset_mems_allowed(current);
1446 nodes_and(*new, *new, task_nodes);
1447 if (nodes_empty(*new))
1450 nodes_and(*new, *new, node_states[N_MEMORY]);
1451 if (nodes_empty(*new))
1454 err = security_task_movememory(task);
1458 mm = get_task_mm(task);
1459 put_task_struct(task);
1466 err = do_migrate_pages(mm, old, new,
1467 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
1471 NODEMASK_SCRATCH_FREE(scratch);
1476 put_task_struct(task);
1482 /* Retrieve NUMA policy */
1483 SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1484 unsigned long __user *, nmask, unsigned long, maxnode,
1485 unsigned long, addr, unsigned long, flags)
1488 int uninitialized_var(pval);
1491 if (nmask != NULL && maxnode < MAX_NUMNODES)
1494 err = do_get_mempolicy(&pval, &nodes, addr, flags);
1499 if (policy && put_user(pval, policy))
1503 err = copy_nodes_to_user(nmask, maxnode, &nodes);
1508 #ifdef CONFIG_COMPAT
1510 COMPAT_SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
1511 compat_ulong_t __user *, nmask,
1512 compat_ulong_t, maxnode,
1513 compat_ulong_t, addr, compat_ulong_t, flags)
1516 unsigned long __user *nm = NULL;
1517 unsigned long nr_bits, alloc_size;
1518 DECLARE_BITMAP(bm, MAX_NUMNODES);
1520 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1521 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1524 nm = compat_alloc_user_space(alloc_size);
1526 err = sys_get_mempolicy(policy, nm, nr_bits+1, addr, flags);
1528 if (!err && nmask) {
1529 unsigned long copy_size;
1530 copy_size = min_t(unsigned long, sizeof(bm), alloc_size);
1531 err = copy_from_user(bm, nm, copy_size);
1532 /* ensure entire bitmap is zeroed */
1533 err |= clear_user(nmask, ALIGN(maxnode-1, 8) / 8);
1534 err |= compat_put_bitmap(nmask, bm, nr_bits);
1540 COMPAT_SYSCALL_DEFINE3(set_mempolicy, int, mode, compat_ulong_t __user *, nmask,
1541 compat_ulong_t, maxnode)
1543 unsigned long __user *nm = NULL;
1544 unsigned long nr_bits, alloc_size;
1545 DECLARE_BITMAP(bm, MAX_NUMNODES);
1547 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1548 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1551 if (compat_get_bitmap(bm, nmask, nr_bits))
1553 nm = compat_alloc_user_space(alloc_size);
1554 if (copy_to_user(nm, bm, alloc_size))
1558 return sys_set_mempolicy(mode, nm, nr_bits+1);
1561 COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
1562 compat_ulong_t, mode, compat_ulong_t __user *, nmask,
1563 compat_ulong_t, maxnode, compat_ulong_t, flags)
1565 unsigned long __user *nm = NULL;
1566 unsigned long nr_bits, alloc_size;
1569 nr_bits = min_t(unsigned long, maxnode-1, MAX_NUMNODES);
1570 alloc_size = ALIGN(nr_bits, BITS_PER_LONG) / 8;
1573 if (compat_get_bitmap(nodes_addr(bm), nmask, nr_bits))
1575 nm = compat_alloc_user_space(alloc_size);
1576 if (copy_to_user(nm, nodes_addr(bm), alloc_size))
1580 return sys_mbind(start, len, mode, nm, nr_bits+1, flags);
1585 struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
1588 struct mempolicy *pol = NULL;
1591 if (vma->vm_ops && vma->vm_ops->get_policy) {
1592 pol = vma->vm_ops->get_policy(vma, addr);
1593 } else if (vma->vm_policy) {
1594 pol = vma->vm_policy;
1597 * shmem_alloc_page() passes MPOL_F_SHARED policy with
1598 * a pseudo vma whose vma->vm_ops=NULL. Take a reference
1599 * count on these policies which will be dropped by
1600 * mpol_cond_put() later
1602 if (mpol_needs_cond_ref(pol))
1611 * get_vma_policy(@vma, @addr)
1612 * @vma: virtual memory area whose policy is sought
1613 * @addr: address in @vma for shared policy lookup
1615 * Returns effective policy for a VMA at specified address.
1616 * Falls back to current->mempolicy or system default policy, as necessary.
1617 * Shared policies [those marked as MPOL_F_SHARED] require an extra reference
1618 * count--added by the get_policy() vm_op, as appropriate--to protect against
1619 * freeing by another task. It is the caller's responsibility to free the
1620 * extra reference for shared policies.
1622 static struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
1625 struct mempolicy *pol = __get_vma_policy(vma, addr);
1628 pol = get_task_policy(current);
1633 bool vma_policy_mof(struct vm_area_struct *vma)
1635 struct mempolicy *pol;
1637 if (vma->vm_ops && vma->vm_ops->get_policy) {
1640 pol = vma->vm_ops->get_policy(vma, vma->vm_start);
1641 if (pol && (pol->flags & MPOL_F_MOF))
1648 pol = vma->vm_policy;
1650 pol = get_task_policy(current);
1652 return pol->flags & MPOL_F_MOF;
1655 static int apply_policy_zone(struct mempolicy *policy, enum zone_type zone)
1657 enum zone_type dynamic_policy_zone = policy_zone;
1659 BUG_ON(dynamic_policy_zone == ZONE_MOVABLE);
1662 * if policy->v.nodes has movable memory only,
1663 * we apply policy when gfp_zone(gfp) = ZONE_MOVABLE only.
1665 * policy->v.nodes is intersect with node_states[N_MEMORY].
1666 * so if the following test faile, it implies
1667 * policy->v.nodes has movable memory only.
1669 if (!nodes_intersects(policy->v.nodes, node_states[N_HIGH_MEMORY]))
1670 dynamic_policy_zone = ZONE_MOVABLE;
1672 return zone >= dynamic_policy_zone;
1676 * Return a nodemask representing a mempolicy for filtering nodes for
1679 static nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy)
1681 /* Lower zones don't get a nodemask applied for MPOL_BIND */
1682 if (unlikely(policy->mode == MPOL_BIND) &&
1683 apply_policy_zone(policy, gfp_zone(gfp)) &&
1684 cpuset_nodemask_valid_mems_allowed(&policy->v.nodes))
1685 return &policy->v.nodes;
1690 /* Return a zonelist indicated by gfp for node representing a mempolicy */
1691 static struct zonelist *policy_zonelist(gfp_t gfp, struct mempolicy *policy,
1694 switch (policy->mode) {
1695 case MPOL_PREFERRED:
1696 if (!(policy->flags & MPOL_F_LOCAL))
1697 nd = policy->v.preferred_node;
1701 * Normally, MPOL_BIND allocations are node-local within the
1702 * allowed nodemask. However, if __GFP_THISNODE is set and the
1703 * current node isn't part of the mask, we use the zonelist for
1704 * the first node in the mask instead.
1706 if (unlikely(gfp & __GFP_THISNODE) &&
1707 unlikely(!node_isset(nd, policy->v.nodes)))
1708 nd = first_node(policy->v.nodes);
1713 return node_zonelist(nd, gfp);
1716 /* Do dynamic interleaving for a process */
1717 static unsigned interleave_nodes(struct mempolicy *policy)
1720 struct task_struct *me = current;
1723 next = next_node_in(nid, policy->v.nodes);
1724 if (next < MAX_NUMNODES)
1730 * Depending on the memory policy provide a node from which to allocate the
1733 unsigned int mempolicy_slab_node(void)
1735 struct mempolicy *policy;
1736 int node = numa_mem_id();
1741 policy = current->mempolicy;
1742 if (!policy || policy->flags & MPOL_F_LOCAL)
1745 switch (policy->mode) {
1746 case MPOL_PREFERRED:
1748 * handled MPOL_F_LOCAL above
1750 return policy->v.preferred_node;
1752 case MPOL_INTERLEAVE:
1753 return interleave_nodes(policy);
1759 * Follow bind policy behavior and start allocation at the
1762 struct zonelist *zonelist;
1763 enum zone_type highest_zoneidx = gfp_zone(GFP_KERNEL);
1764 zonelist = &NODE_DATA(node)->node_zonelists[ZONELIST_FALLBACK];
1765 z = first_zones_zonelist(zonelist, highest_zoneidx,
1767 return z->zone ? z->zone->node : node;
1776 * Do static interleaving for a VMA with known offset @n. Returns the n'th
1777 * node in pol->v.nodes (starting from n=0), wrapping around if n exceeds the
1778 * number of present nodes.
1780 static unsigned offset_il_node(struct mempolicy *pol,
1781 struct vm_area_struct *vma, unsigned long n)
1783 unsigned nnodes = nodes_weight(pol->v.nodes);
1789 return numa_node_id();
1790 target = (unsigned int)n % nnodes;
1791 nid = first_node(pol->v.nodes);
1792 for (i = 0; i < target; i++)
1793 nid = next_node(nid, pol->v.nodes);
1797 /* Determine a node number for interleave */
1798 static inline unsigned interleave_nid(struct mempolicy *pol,
1799 struct vm_area_struct *vma, unsigned long addr, int shift)
1805 * for small pages, there is no difference between
1806 * shift and PAGE_SHIFT, so the bit-shift is safe.
1807 * for huge pages, since vm_pgoff is in units of small
1808 * pages, we need to shift off the always 0 bits to get
1811 BUG_ON(shift < PAGE_SHIFT);
1812 off = vma->vm_pgoff >> (shift - PAGE_SHIFT);
1813 off += (addr - vma->vm_start) >> shift;
1814 return offset_il_node(pol, vma, off);
1816 return interleave_nodes(pol);
1819 #ifdef CONFIG_HUGETLBFS
1821 * huge_zonelist(@vma, @addr, @gfp_flags, @mpol)
1822 * @vma: virtual memory area whose policy is sought
1823 * @addr: address in @vma for shared policy lookup and interleave policy
1824 * @gfp_flags: for requested zone
1825 * @mpol: pointer to mempolicy pointer for reference counted mempolicy
1826 * @nodemask: pointer to nodemask pointer for MPOL_BIND nodemask
1828 * Returns a zonelist suitable for a huge page allocation and a pointer
1829 * to the struct mempolicy for conditional unref after allocation.
1830 * If the effective policy is 'BIND, returns a pointer to the mempolicy's
1831 * @nodemask for filtering the zonelist.
1833 * Must be protected by read_mems_allowed_begin()
1835 struct zonelist *huge_zonelist(struct vm_area_struct *vma, unsigned long addr,
1836 gfp_t gfp_flags, struct mempolicy **mpol,
1837 nodemask_t **nodemask)
1839 struct zonelist *zl;
1841 *mpol = get_vma_policy(vma, addr);
1842 *nodemask = NULL; /* assume !MPOL_BIND */
1844 if (unlikely((*mpol)->mode == MPOL_INTERLEAVE)) {
1845 zl = node_zonelist(interleave_nid(*mpol, vma, addr,
1846 huge_page_shift(hstate_vma(vma))), gfp_flags);
1848 zl = policy_zonelist(gfp_flags, *mpol, numa_node_id());
1849 if ((*mpol)->mode == MPOL_BIND)
1850 *nodemask = &(*mpol)->v.nodes;
1856 * init_nodemask_of_mempolicy
1858 * If the current task's mempolicy is "default" [NULL], return 'false'
1859 * to indicate default policy. Otherwise, extract the policy nodemask
1860 * for 'bind' or 'interleave' policy into the argument nodemask, or
1861 * initialize the argument nodemask to contain the single node for
1862 * 'preferred' or 'local' policy and return 'true' to indicate presence
1863 * of non-default mempolicy.
1865 * We don't bother with reference counting the mempolicy [mpol_get/put]
1866 * because the current task is examining it's own mempolicy and a task's
1867 * mempolicy is only ever changed by the task itself.
1869 * N.B., it is the caller's responsibility to free a returned nodemask.
1871 bool init_nodemask_of_mempolicy(nodemask_t *mask)
1873 struct mempolicy *mempolicy;
1876 if (!(mask && current->mempolicy))
1880 mempolicy = current->mempolicy;
1881 switch (mempolicy->mode) {
1882 case MPOL_PREFERRED:
1883 if (mempolicy->flags & MPOL_F_LOCAL)
1884 nid = numa_node_id();
1886 nid = mempolicy->v.preferred_node;
1887 init_nodemask_of_node(mask, nid);
1892 case MPOL_INTERLEAVE:
1893 *mask = mempolicy->v.nodes;
1899 task_unlock(current);
1906 * mempolicy_nodemask_intersects
1908 * If tsk's mempolicy is "default" [NULL], return 'true' to indicate default
1909 * policy. Otherwise, check for intersection between mask and the policy
1910 * nodemask for 'bind' or 'interleave' policy. For 'perferred' or 'local'
1911 * policy, always return true since it may allocate elsewhere on fallback.
1913 * Takes task_lock(tsk) to prevent freeing of its mempolicy.
1915 bool mempolicy_nodemask_intersects(struct task_struct *tsk,
1916 const nodemask_t *mask)
1918 struct mempolicy *mempolicy;
1924 mempolicy = tsk->mempolicy;
1928 switch (mempolicy->mode) {
1929 case MPOL_PREFERRED:
1931 * MPOL_PREFERRED and MPOL_F_LOCAL are only preferred nodes to
1932 * allocate from, they may fallback to other nodes when oom.
1933 * Thus, it's possible for tsk to have allocated memory from
1938 case MPOL_INTERLEAVE:
1939 ret = nodes_intersects(mempolicy->v.nodes, *mask);
1949 /* Allocate a page in interleaved policy.
1950 Own path because it needs to do special accounting. */
1951 static struct page *alloc_page_interleave(gfp_t gfp, unsigned order,
1954 struct zonelist *zl;
1957 zl = node_zonelist(nid, gfp);
1958 page = __alloc_pages(gfp, order, zl);
1959 if (page && page_zone(page) == zonelist_zone(&zl->_zonerefs[0]))
1960 inc_zone_page_state(page, NUMA_INTERLEAVE_HIT);
1965 * alloc_pages_vma - Allocate a page for a VMA.
1968 * %GFP_USER user allocation.
1969 * %GFP_KERNEL kernel allocations,
1970 * %GFP_HIGHMEM highmem/user allocations,
1971 * %GFP_FS allocation should not call back into a file system.
1972 * %GFP_ATOMIC don't sleep.
1974 * @order:Order of the GFP allocation.
1975 * @vma: Pointer to VMA or NULL if not available.
1976 * @addr: Virtual Address of the allocation. Must be inside the VMA.
1977 * @node: Which node to prefer for allocation (modulo policy).
1978 * @hugepage: for hugepages try only the preferred node if possible
1980 * This function allocates a page from the kernel page pool and applies
1981 * a NUMA policy associated with the VMA or the current process.
1982 * When VMA is not NULL caller must hold down_read on the mmap_sem of the
1983 * mm_struct of the VMA to prevent it from going away. Should be used for
1984 * all allocations for pages that will be mapped into user space. Returns
1985 * NULL when no page can be allocated.
1988 alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
1989 unsigned long addr, int node, bool hugepage)
1991 struct mempolicy *pol;
1993 unsigned int cpuset_mems_cookie;
1994 struct zonelist *zl;
1998 pol = get_vma_policy(vma, addr);
1999 cpuset_mems_cookie = read_mems_allowed_begin();
2001 if (pol->mode == MPOL_INTERLEAVE) {
2004 nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
2006 page = alloc_page_interleave(gfp, order, nid);
2010 if (unlikely(IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) && hugepage)) {
2011 int hpage_node = node;
2014 * For hugepage allocation and non-interleave policy which
2015 * allows the current node (or other explicitly preferred
2016 * node) we only try to allocate from the current/preferred
2017 * node and don't fall back to other nodes, as the cost of
2018 * remote accesses would likely offset THP benefits.
2020 * If the policy is interleave, or does not allow the current
2021 * node in its nodemask, we allocate the standard way.
2023 if (pol->mode == MPOL_PREFERRED &&
2024 !(pol->flags & MPOL_F_LOCAL))
2025 hpage_node = pol->v.preferred_node;
2027 nmask = policy_nodemask(gfp, pol);
2028 if (!nmask || node_isset(hpage_node, *nmask)) {
2030 page = __alloc_pages_node(hpage_node,
2031 gfp | __GFP_THISNODE, order);
2036 nmask = policy_nodemask(gfp, pol);
2037 zl = policy_zonelist(gfp, pol, node);
2038 page = __alloc_pages_nodemask(gfp, order, zl, nmask);
2041 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
2047 * alloc_pages_current - Allocate pages.
2050 * %GFP_USER user allocation,
2051 * %GFP_KERNEL kernel allocation,
2052 * %GFP_HIGHMEM highmem allocation,
2053 * %GFP_FS don't call back into a file system.
2054 * %GFP_ATOMIC don't sleep.
2055 * @order: Power of two of allocation size in pages. 0 is a single page.
2057 * Allocate a page from the kernel page pool. When not in
2058 * interrupt context and apply the current process NUMA policy.
2059 * Returns NULL when no page can be allocated.
2061 * Don't call cpuset_update_task_memory_state() unless
2062 * 1) it's ok to take cpuset_sem (can WAIT), and
2063 * 2) allocating for current task (not interrupt).
2065 struct page *alloc_pages_current(gfp_t gfp, unsigned order)
2067 struct mempolicy *pol = &default_policy;
2069 unsigned int cpuset_mems_cookie;
2071 if (!in_interrupt() && !(gfp & __GFP_THISNODE))
2072 pol = get_task_policy(current);
2075 cpuset_mems_cookie = read_mems_allowed_begin();
2078 * No reference counting needed for current->mempolicy
2079 * nor system default_policy
2081 if (pol->mode == MPOL_INTERLEAVE)
2082 page = alloc_page_interleave(gfp, order, interleave_nodes(pol));
2084 page = __alloc_pages_nodemask(gfp, order,
2085 policy_zonelist(gfp, pol, numa_node_id()),
2086 policy_nodemask(gfp, pol));
2088 if (unlikely(!page && read_mems_allowed_retry(cpuset_mems_cookie)))
2093 EXPORT_SYMBOL(alloc_pages_current);
2095 int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
2097 struct mempolicy *pol = mpol_dup(vma_policy(src));
2100 return PTR_ERR(pol);
2101 dst->vm_policy = pol;
2106 * If mpol_dup() sees current->cpuset == cpuset_being_rebound, then it
2107 * rebinds the mempolicy its copying by calling mpol_rebind_policy()
2108 * with the mems_allowed returned by cpuset_mems_allowed(). This
2109 * keeps mempolicies cpuset relative after its cpuset moves. See
2110 * further kernel/cpuset.c update_nodemask().
2112 * current's mempolicy may be rebinded by the other task(the task that changes
2113 * cpuset's mems), so we needn't do rebind work for current task.
2116 /* Slow path of a mempolicy duplicate */
2117 struct mempolicy *__mpol_dup(struct mempolicy *old)
2119 struct mempolicy *new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2122 return ERR_PTR(-ENOMEM);
2124 /* task's mempolicy is protected by alloc_lock */
2125 if (old == current->mempolicy) {
2128 task_unlock(current);
2132 if (current_cpuset_is_being_rebound()) {
2133 nodemask_t mems = cpuset_mems_allowed(current);
2134 if (new->flags & MPOL_F_REBINDING)
2135 mpol_rebind_policy(new, &mems, MPOL_REBIND_STEP2);
2137 mpol_rebind_policy(new, &mems, MPOL_REBIND_ONCE);
2139 atomic_set(&new->refcnt, 1);
2143 /* Slow path of a mempolicy comparison */
2144 bool __mpol_equal(struct mempolicy *a, struct mempolicy *b)
2148 if (a->mode != b->mode)
2150 if (a->flags != b->flags)
2152 if (mpol_store_user_nodemask(a))
2153 if (!nodes_equal(a->w.user_nodemask, b->w.user_nodemask))
2159 case MPOL_INTERLEAVE:
2160 return !!nodes_equal(a->v.nodes, b->v.nodes);
2161 case MPOL_PREFERRED:
2162 /* a's ->flags is the same as b's */
2163 if (a->flags & MPOL_F_LOCAL)
2165 return a->v.preferred_node == b->v.preferred_node;
2173 * Shared memory backing store policy support.
2175 * Remember policies even when nobody has shared memory mapped.
2176 * The policies are kept in Red-Black tree linked from the inode.
2177 * They are protected by the sp->lock rwlock, which should be held
2178 * for any accesses to the tree.
2182 * lookup first element intersecting start-end. Caller holds sp->lock for
2183 * reading or for writing
2185 static struct sp_node *
2186 sp_lookup(struct shared_policy *sp, unsigned long start, unsigned long end)
2188 struct rb_node *n = sp->root.rb_node;
2191 struct sp_node *p = rb_entry(n, struct sp_node, nd);
2193 if (start >= p->end)
2195 else if (end <= p->start)
2203 struct sp_node *w = NULL;
2204 struct rb_node *prev = rb_prev(n);
2207 w = rb_entry(prev, struct sp_node, nd);
2208 if (w->end <= start)
2212 return rb_entry(n, struct sp_node, nd);
2216 * Insert a new shared policy into the list. Caller holds sp->lock for
2219 static void sp_insert(struct shared_policy *sp, struct sp_node *new)
2221 struct rb_node **p = &sp->root.rb_node;
2222 struct rb_node *parent = NULL;
2227 nd = rb_entry(parent, struct sp_node, nd);
2228 if (new->start < nd->start)
2230 else if (new->end > nd->end)
2231 p = &(*p)->rb_right;
2235 rb_link_node(&new->nd, parent, p);
2236 rb_insert_color(&new->nd, &sp->root);
2237 pr_debug("inserting %lx-%lx: %d\n", new->start, new->end,
2238 new->policy ? new->policy->mode : 0);
2241 /* Find shared policy intersecting idx */
2243 mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
2245 struct mempolicy *pol = NULL;
2248 if (!sp->root.rb_node)
2250 read_lock(&sp->lock);
2251 sn = sp_lookup(sp, idx, idx+1);
2253 mpol_get(sn->policy);
2256 read_unlock(&sp->lock);
2260 static void sp_free(struct sp_node *n)
2262 mpol_put(n->policy);
2263 kmem_cache_free(sn_cache, n);
2267 * mpol_misplaced - check whether current page node is valid in policy
2269 * @page: page to be checked
2270 * @vma: vm area where page mapped
2271 * @addr: virtual address where page mapped
2273 * Lookup current policy node id for vma,addr and "compare to" page's
2277 * -1 - not misplaced, page is in the right node
2278 * node - node id where the page should be
2280 * Policy determination "mimics" alloc_page_vma().
2281 * Called from fault path where we know the vma and faulting address.
2283 int mpol_misplaced(struct page *page, struct vm_area_struct *vma, unsigned long addr)
2285 struct mempolicy *pol;
2287 int curnid = page_to_nid(page);
2288 unsigned long pgoff;
2289 int thiscpu = raw_smp_processor_id();
2290 int thisnid = cpu_to_node(thiscpu);
2296 pol = get_vma_policy(vma, addr);
2297 if (!(pol->flags & MPOL_F_MOF))
2300 switch (pol->mode) {
2301 case MPOL_INTERLEAVE:
2302 BUG_ON(addr >= vma->vm_end);
2303 BUG_ON(addr < vma->vm_start);
2305 pgoff = vma->vm_pgoff;
2306 pgoff += (addr - vma->vm_start) >> PAGE_SHIFT;
2307 polnid = offset_il_node(pol, vma, pgoff);
2310 case MPOL_PREFERRED:
2311 if (pol->flags & MPOL_F_LOCAL)
2312 polnid = numa_node_id();
2314 polnid = pol->v.preferred_node;
2320 * allows binding to multiple nodes.
2321 * use current page if in policy nodemask,
2322 * else select nearest allowed node, if any.
2323 * If no allowed nodes, use current [!misplaced].
2325 if (node_isset(curnid, pol->v.nodes))
2327 z = first_zones_zonelist(
2328 node_zonelist(numa_node_id(), GFP_HIGHUSER),
2329 gfp_zone(GFP_HIGHUSER),
2331 polnid = z->zone->node;
2338 /* Migrate the page towards the node whose CPU is referencing it */
2339 if (pol->flags & MPOL_F_MORON) {
2342 if (!should_numa_migrate_memory(current, page, curnid, thiscpu))
2346 if (curnid != polnid)
2355 * Drop the (possibly final) reference to task->mempolicy. It needs to be
2356 * dropped after task->mempolicy is set to NULL so that any allocation done as
2357 * part of its kmem_cache_free(), such as by KASAN, doesn't reference a freed
2360 void mpol_put_task_policy(struct task_struct *task)
2362 struct mempolicy *pol;
2365 pol = task->mempolicy;
2366 task->mempolicy = NULL;
2371 static void sp_delete(struct shared_policy *sp, struct sp_node *n)
2373 pr_debug("deleting %lx-l%lx\n", n->start, n->end);
2374 rb_erase(&n->nd, &sp->root);
2378 static void sp_node_init(struct sp_node *node, unsigned long start,
2379 unsigned long end, struct mempolicy *pol)
2381 node->start = start;
2386 static struct sp_node *sp_alloc(unsigned long start, unsigned long end,
2387 struct mempolicy *pol)
2390 struct mempolicy *newpol;
2392 n = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2396 newpol = mpol_dup(pol);
2397 if (IS_ERR(newpol)) {
2398 kmem_cache_free(sn_cache, n);
2401 newpol->flags |= MPOL_F_SHARED;
2402 sp_node_init(n, start, end, newpol);
2407 /* Replace a policy range. */
2408 static int shared_policy_replace(struct shared_policy *sp, unsigned long start,
2409 unsigned long end, struct sp_node *new)
2412 struct sp_node *n_new = NULL;
2413 struct mempolicy *mpol_new = NULL;
2417 write_lock(&sp->lock);
2418 n = sp_lookup(sp, start, end);
2419 /* Take care of old policies in the same range. */
2420 while (n && n->start < end) {
2421 struct rb_node *next = rb_next(&n->nd);
2422 if (n->start >= start) {
2428 /* Old policy spanning whole new range. */
2433 *mpol_new = *n->policy;
2434 atomic_set(&mpol_new->refcnt, 1);
2435 sp_node_init(n_new, end, n->end, mpol_new);
2437 sp_insert(sp, n_new);
2446 n = rb_entry(next, struct sp_node, nd);
2450 write_unlock(&sp->lock);
2457 kmem_cache_free(sn_cache, n_new);
2462 write_unlock(&sp->lock);
2464 n_new = kmem_cache_alloc(sn_cache, GFP_KERNEL);
2467 mpol_new = kmem_cache_alloc(policy_cache, GFP_KERNEL);
2474 * mpol_shared_policy_init - initialize shared policy for inode
2475 * @sp: pointer to inode shared policy
2476 * @mpol: struct mempolicy to install
2478 * Install non-NULL @mpol in inode's shared policy rb-tree.
2479 * On entry, the current task has a reference on a non-NULL @mpol.
2480 * This must be released on exit.
2481 * This is called at get_inode() calls and we can use GFP_KERNEL.
2483 void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol)
2487 sp->root = RB_ROOT; /* empty tree == default mempolicy */
2488 rwlock_init(&sp->lock);
2491 struct vm_area_struct pvma;
2492 struct mempolicy *new;
2493 NODEMASK_SCRATCH(scratch);
2497 /* contextualize the tmpfs mount point mempolicy */
2498 new = mpol_new(mpol->mode, mpol->flags, &mpol->w.user_nodemask);
2500 goto free_scratch; /* no valid nodemask intersection */
2503 ret = mpol_set_nodemask(new, &mpol->w.user_nodemask, scratch);
2504 task_unlock(current);
2508 /* Create pseudo-vma that contains just the policy */
2509 memset(&pvma, 0, sizeof(struct vm_area_struct));
2510 pvma.vm_end = TASK_SIZE; /* policy covers entire file */
2511 mpol_set_shared_policy(sp, &pvma, new); /* adds ref */
2514 mpol_put(new); /* drop initial ref */
2516 NODEMASK_SCRATCH_FREE(scratch);
2518 mpol_put(mpol); /* drop our incoming ref on sb mpol */
2522 int mpol_set_shared_policy(struct shared_policy *info,
2523 struct vm_area_struct *vma, struct mempolicy *npol)
2526 struct sp_node *new = NULL;
2527 unsigned long sz = vma_pages(vma);
2529 pr_debug("set_shared_policy %lx sz %lu %d %d %lx\n",
2531 sz, npol ? npol->mode : -1,
2532 npol ? npol->flags : -1,
2533 npol ? nodes_addr(npol->v.nodes)[0] : NUMA_NO_NODE);
2536 new = sp_alloc(vma->vm_pgoff, vma->vm_pgoff + sz, npol);
2540 err = shared_policy_replace(info, vma->vm_pgoff, vma->vm_pgoff+sz, new);
2546 /* Free a backing policy store on inode delete. */
2547 void mpol_free_shared_policy(struct shared_policy *p)
2550 struct rb_node *next;
2552 if (!p->root.rb_node)
2554 write_lock(&p->lock);
2555 next = rb_first(&p->root);
2557 n = rb_entry(next, struct sp_node, nd);
2558 next = rb_next(&n->nd);
2561 write_unlock(&p->lock);
2564 #ifdef CONFIG_NUMA_BALANCING
2565 static int __initdata numabalancing_override;
2567 static void __init check_numabalancing_enable(void)
2569 bool numabalancing_default = false;
2571 if (IS_ENABLED(CONFIG_NUMA_BALANCING_DEFAULT_ENABLED))
2572 numabalancing_default = true;
2574 /* Parsed by setup_numabalancing. override == 1 enables, -1 disables */
2575 if (numabalancing_override)
2576 set_numabalancing_state(numabalancing_override == 1);
2578 if (num_online_nodes() > 1 && !numabalancing_override) {
2579 pr_info("%s automatic NUMA balancing. Configure with numa_balancing= or the kernel.numa_balancing sysctl\n",
2580 numabalancing_default ? "Enabling" : "Disabling");
2581 set_numabalancing_state(numabalancing_default);
2585 static int __init setup_numabalancing(char *str)
2591 if (!strcmp(str, "enable")) {
2592 numabalancing_override = 1;
2594 } else if (!strcmp(str, "disable")) {
2595 numabalancing_override = -1;
2600 pr_warn("Unable to parse numa_balancing=\n");
2604 __setup("numa_balancing=", setup_numabalancing);
2606 static inline void __init check_numabalancing_enable(void)
2609 #endif /* CONFIG_NUMA_BALANCING */
2611 /* assumes fs == KERNEL_DS */
2612 void __init numa_policy_init(void)
2614 nodemask_t interleave_nodes;
2615 unsigned long largest = 0;
2616 int nid, prefer = 0;
2618 policy_cache = kmem_cache_create("numa_policy",
2619 sizeof(struct mempolicy),
2620 0, SLAB_PANIC, NULL);
2622 sn_cache = kmem_cache_create("shared_policy_node",
2623 sizeof(struct sp_node),
2624 0, SLAB_PANIC, NULL);
2626 for_each_node(nid) {
2627 preferred_node_policy[nid] = (struct mempolicy) {
2628 .refcnt = ATOMIC_INIT(1),
2629 .mode = MPOL_PREFERRED,
2630 .flags = MPOL_F_MOF | MPOL_F_MORON,
2631 .v = { .preferred_node = nid, },
2636 * Set interleaving policy for system init. Interleaving is only
2637 * enabled across suitably sized nodes (default is >= 16MB), or
2638 * fall back to the largest node if they're all smaller.
2640 nodes_clear(interleave_nodes);
2641 for_each_node_state(nid, N_MEMORY) {
2642 unsigned long total_pages = node_present_pages(nid);
2644 /* Preserve the largest node */
2645 if (largest < total_pages) {
2646 largest = total_pages;
2650 /* Interleave this node? */
2651 if ((total_pages << PAGE_SHIFT) >= (16 << 20))
2652 node_set(nid, interleave_nodes);
2655 /* All too small, use the largest */
2656 if (unlikely(nodes_empty(interleave_nodes)))
2657 node_set(prefer, interleave_nodes);
2659 if (do_set_mempolicy(MPOL_INTERLEAVE, 0, &interleave_nodes))
2660 pr_err("%s: interleaving failed\n", __func__);
2662 check_numabalancing_enable();
2665 /* Reset policy of current process to default */
2666 void numa_default_policy(void)
2668 do_set_mempolicy(MPOL_DEFAULT, 0, NULL);
2672 * Parse and format mempolicy from/to strings
2676 * "local" is implemented internally by MPOL_PREFERRED with MPOL_F_LOCAL flag.
2678 static const char * const policy_modes[] =
2680 [MPOL_DEFAULT] = "default",
2681 [MPOL_PREFERRED] = "prefer",
2682 [MPOL_BIND] = "bind",
2683 [MPOL_INTERLEAVE] = "interleave",
2684 [MPOL_LOCAL] = "local",
2690 * mpol_parse_str - parse string to mempolicy, for tmpfs mpol mount option.
2691 * @str: string containing mempolicy to parse
2692 * @mpol: pointer to struct mempolicy pointer, returned on success.
2695 * <mode>[=<flags>][:<nodelist>]
2697 * On success, returns 0, else 1
2699 int mpol_parse_str(char *str, struct mempolicy **mpol)
2701 struct mempolicy *new = NULL;
2702 unsigned short mode;
2703 unsigned short mode_flags;
2705 char *nodelist = strchr(str, ':');
2706 char *flags = strchr(str, '=');
2710 /* NUL-terminate mode or flags string */
2712 if (nodelist_parse(nodelist, nodes))
2714 if (!nodes_subset(nodes, node_states[N_MEMORY]))
2720 *flags++ = '\0'; /* terminate mode string */
2722 for (mode = 0; mode < MPOL_MAX; mode++) {
2723 if (!strcmp(str, policy_modes[mode])) {
2727 if (mode >= MPOL_MAX)
2731 case MPOL_PREFERRED:
2733 * Insist on a nodelist of one node only
2736 char *rest = nodelist;
2737 while (isdigit(*rest))
2743 case MPOL_INTERLEAVE:
2745 * Default to online nodes with memory if no nodelist
2748 nodes = node_states[N_MEMORY];
2752 * Don't allow a nodelist; mpol_new() checks flags
2756 mode = MPOL_PREFERRED;
2760 * Insist on a empty nodelist
2767 * Insist on a nodelist
2776 * Currently, we only support two mutually exclusive
2779 if (!strcmp(flags, "static"))
2780 mode_flags |= MPOL_F_STATIC_NODES;
2781 else if (!strcmp(flags, "relative"))
2782 mode_flags |= MPOL_F_RELATIVE_NODES;
2787 new = mpol_new(mode, mode_flags, &nodes);
2792 * Save nodes for mpol_to_str() to show the tmpfs mount options
2793 * for /proc/mounts, /proc/pid/mounts and /proc/pid/mountinfo.
2795 if (mode != MPOL_PREFERRED)
2796 new->v.nodes = nodes;
2798 new->v.preferred_node = first_node(nodes);
2800 new->flags |= MPOL_F_LOCAL;
2803 * Save nodes for contextualization: this will be used to "clone"
2804 * the mempolicy in a specific context [cpuset] at a later time.
2806 new->w.user_nodemask = nodes;
2811 /* Restore string for error message */
2820 #endif /* CONFIG_TMPFS */
2823 * mpol_to_str - format a mempolicy structure for printing
2824 * @buffer: to contain formatted mempolicy string
2825 * @maxlen: length of @buffer
2826 * @pol: pointer to mempolicy to be formatted
2828 * Convert @pol into a string. If @buffer is too short, truncate the string.
2829 * Recommend a @maxlen of at least 32 for the longest mode, "interleave", the
2830 * longest flag, "relative", and to display at least a few node ids.
2832 void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol)
2835 nodemask_t nodes = NODE_MASK_NONE;
2836 unsigned short mode = MPOL_DEFAULT;
2837 unsigned short flags = 0;
2839 if (pol && pol != &default_policy && !(pol->flags & MPOL_F_MORON)) {
2847 case MPOL_PREFERRED:
2848 if (flags & MPOL_F_LOCAL)
2851 node_set(pol->v.preferred_node, nodes);
2854 case MPOL_INTERLEAVE:
2855 nodes = pol->v.nodes;
2859 snprintf(p, maxlen, "unknown");
2863 p += snprintf(p, maxlen, "%s", policy_modes[mode]);
2865 if (flags & MPOL_MODE_FLAGS) {
2866 p += snprintf(p, buffer + maxlen - p, "=");
2869 * Currently, the only defined flags are mutually exclusive
2871 if (flags & MPOL_F_STATIC_NODES)
2872 p += snprintf(p, buffer + maxlen - p, "static");
2873 else if (flags & MPOL_F_RELATIVE_NODES)
2874 p += snprintf(p, buffer + maxlen - p, "relative");
2877 if (!nodes_empty(nodes))
2878 p += scnprintf(p, buffer + maxlen - p, ":%*pbl",
2879 nodemask_pr_args(&nodes));