OSDN Git Service

mm/tlb: Remove tlb_remove_table() non-concurrent condition
[android-x86/kernel.git] / mm / mremap.c
1 /*
2  *      mm/mremap.c
3  *
4  *      (C) Copyright 1996 Linus Torvalds
5  *
6  *      Address space accounting code   <alan@lxorguk.ukuu.org.uk>
7  *      (C) Copyright 2002 Red Hat Inc, All Rights Reserved
8  */
9
10 #include <linux/mm.h>
11 #include <linux/hugetlb.h>
12 #include <linux/shm.h>
13 #include <linux/ksm.h>
14 #include <linux/mman.h>
15 #include <linux/swap.h>
16 #include <linux/capability.h>
17 #include <linux/fs.h>
18 #include <linux/swapops.h>
19 #include <linux/highmem.h>
20 #include <linux/security.h>
21 #include <linux/syscalls.h>
22 #include <linux/mmu_notifier.h>
23 #include <linux/uaccess.h>
24 #include <linux/mm-arch-hooks.h>
25
26 #include <asm/cacheflush.h>
27 #include <asm/tlbflush.h>
28
29 #include "internal.h"
30
31 static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
32 {
33         pgd_t *pgd;
34         pud_t *pud;
35         pmd_t *pmd;
36
37         pgd = pgd_offset(mm, addr);
38         if (pgd_none_or_clear_bad(pgd))
39                 return NULL;
40
41         pud = pud_offset(pgd, addr);
42         if (pud_none_or_clear_bad(pud))
43                 return NULL;
44
45         pmd = pmd_offset(pud, addr);
46         if (pmd_none(*pmd))
47                 return NULL;
48
49         return pmd;
50 }
51
52 static pmd_t *alloc_new_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
53                             unsigned long addr)
54 {
55         pgd_t *pgd;
56         pud_t *pud;
57         pmd_t *pmd;
58
59         pgd = pgd_offset(mm, addr);
60         pud = pud_alloc(mm, pgd, addr);
61         if (!pud)
62                 return NULL;
63
64         pmd = pmd_alloc(mm, pud, addr);
65         if (!pmd)
66                 return NULL;
67
68         VM_BUG_ON(pmd_trans_huge(*pmd));
69
70         return pmd;
71 }
72
73 static void take_rmap_locks(struct vm_area_struct *vma)
74 {
75         if (vma->vm_file)
76                 i_mmap_lock_write(vma->vm_file->f_mapping);
77         if (vma->anon_vma)
78                 anon_vma_lock_write(vma->anon_vma);
79 }
80
81 static void drop_rmap_locks(struct vm_area_struct *vma)
82 {
83         if (vma->anon_vma)
84                 anon_vma_unlock_write(vma->anon_vma);
85         if (vma->vm_file)
86                 i_mmap_unlock_write(vma->vm_file->f_mapping);
87 }
88
89 static pte_t move_soft_dirty_pte(pte_t pte)
90 {
91         /*
92          * Set soft dirty bit so we can notice
93          * in userspace the ptes were moved.
94          */
95 #ifdef CONFIG_MEM_SOFT_DIRTY
96         if (pte_present(pte))
97                 pte = pte_mksoft_dirty(pte);
98         else if (is_swap_pte(pte))
99                 pte = pte_swp_mksoft_dirty(pte);
100 #endif
101         return pte;
102 }
103
104 static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
105                 unsigned long old_addr, unsigned long old_end,
106                 struct vm_area_struct *new_vma, pmd_t *new_pmd,
107                 unsigned long new_addr, bool need_rmap_locks, bool *need_flush)
108 {
109         struct mm_struct *mm = vma->vm_mm;
110         pte_t *old_pte, *new_pte, pte;
111         spinlock_t *old_ptl, *new_ptl;
112         bool force_flush = false;
113         unsigned long len = old_end - old_addr;
114
115         /*
116          * When need_rmap_locks is true, we take the i_mmap_rwsem and anon_vma
117          * locks to ensure that rmap will always observe either the old or the
118          * new ptes. This is the easiest way to avoid races with
119          * truncate_pagecache(), page migration, etc...
120          *
121          * When need_rmap_locks is false, we use other ways to avoid
122          * such races:
123          *
124          * - During exec() shift_arg_pages(), we use a specially tagged vma
125          *   which rmap call sites look for using is_vma_temporary_stack().
126          *
127          * - During mremap(), new_vma is often known to be placed after vma
128          *   in rmap traversal order. This ensures rmap will always observe
129          *   either the old pte, or the new pte, or both (the page table locks
130          *   serialize access to individual ptes, but only rmap traversal
131          *   order guarantees that we won't miss both the old and new ptes).
132          */
133         if (need_rmap_locks)
134                 take_rmap_locks(vma);
135
136         /*
137          * We don't have to worry about the ordering of src and dst
138          * pte locks because exclusive mmap_sem prevents deadlock.
139          */
140         old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
141         new_pte = pte_offset_map(new_pmd, new_addr);
142         new_ptl = pte_lockptr(mm, new_pmd);
143         if (new_ptl != old_ptl)
144                 spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
145         flush_tlb_batched_pending(vma->vm_mm);
146         arch_enter_lazy_mmu_mode();
147
148         for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
149                                    new_pte++, new_addr += PAGE_SIZE) {
150                 if (pte_none(*old_pte))
151                         continue;
152
153                 pte = ptep_get_and_clear(mm, old_addr, old_pte);
154                 /*
155                  * If we are remapping a dirty PTE, make sure
156                  * to flush TLB before we drop the PTL for the
157                  * old PTE or we may race with page_mkclean().
158                  *
159                  * This check has to be done after we removed the
160                  * old PTE from page tables or another thread may
161                  * dirty it after the check and before the removal.
162                  */
163                 if (pte_present(pte) && pte_dirty(pte))
164                         force_flush = true;
165                 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
166                 pte = move_soft_dirty_pte(pte);
167                 set_pte_at(mm, new_addr, new_pte, pte);
168         }
169
170         arch_leave_lazy_mmu_mode();
171         if (new_ptl != old_ptl)
172                 spin_unlock(new_ptl);
173         pte_unmap(new_pte - 1);
174         if (force_flush)
175                 flush_tlb_range(vma, old_end - len, old_end);
176         else
177                 *need_flush = true;
178         pte_unmap_unlock(old_pte - 1, old_ptl);
179         if (need_rmap_locks)
180                 drop_rmap_locks(vma);
181 }
182
183 #define LATENCY_LIMIT   (64 * PAGE_SIZE)
184
185 unsigned long move_page_tables(struct vm_area_struct *vma,
186                 unsigned long old_addr, struct vm_area_struct *new_vma,
187                 unsigned long new_addr, unsigned long len,
188                 bool need_rmap_locks)
189 {
190         unsigned long extent, next, old_end;
191         pmd_t *old_pmd, *new_pmd;
192         bool need_flush = false;
193         unsigned long mmun_start;       /* For mmu_notifiers */
194         unsigned long mmun_end;         /* For mmu_notifiers */
195
196         old_end = old_addr + len;
197         flush_cache_range(vma, old_addr, old_end);
198
199         mmun_start = old_addr;
200         mmun_end   = old_end;
201         mmu_notifier_invalidate_range_start(vma->vm_mm, mmun_start, mmun_end);
202
203         for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
204                 cond_resched();
205                 next = (old_addr + PMD_SIZE) & PMD_MASK;
206                 /* even if next overflowed, extent below will be ok */
207                 extent = next - old_addr;
208                 if (extent > old_end - old_addr)
209                         extent = old_end - old_addr;
210                 old_pmd = get_old_pmd(vma->vm_mm, old_addr);
211                 if (!old_pmd)
212                         continue;
213                 new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
214                 if (!new_pmd)
215                         break;
216                 if (pmd_trans_huge(*old_pmd)) {
217                         if (extent == HPAGE_PMD_SIZE) {
218                                 bool moved;
219                                 /* See comment in move_ptes() */
220                                 if (need_rmap_locks)
221                                         take_rmap_locks(vma);
222                                 moved = move_huge_pmd(vma, old_addr, new_addr,
223                                                     old_end, old_pmd, new_pmd,
224                                                     &need_flush);
225                                 if (need_rmap_locks)
226                                         drop_rmap_locks(vma);
227                                 if (moved)
228                                         continue;
229                         }
230                         split_huge_pmd(vma, old_pmd, old_addr);
231                         if (pmd_trans_unstable(old_pmd))
232                                 continue;
233                 }
234                 if (pte_alloc(new_vma->vm_mm, new_pmd, new_addr))
235                         break;
236                 next = (new_addr + PMD_SIZE) & PMD_MASK;
237                 if (extent > next - new_addr)
238                         extent = next - new_addr;
239                 if (extent > LATENCY_LIMIT)
240                         extent = LATENCY_LIMIT;
241                 move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma,
242                           new_pmd, new_addr, need_rmap_locks, &need_flush);
243         }
244         if (need_flush)
245                 flush_tlb_range(vma, old_end-len, old_addr);
246
247         mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
248
249         return len + old_addr - old_end;        /* how much done */
250 }
251
252 static unsigned long move_vma(struct vm_area_struct *vma,
253                 unsigned long old_addr, unsigned long old_len,
254                 unsigned long new_len, unsigned long new_addr, bool *locked)
255 {
256         struct mm_struct *mm = vma->vm_mm;
257         struct vm_area_struct *new_vma;
258         unsigned long vm_flags = vma->vm_flags;
259         unsigned long new_pgoff;
260         unsigned long moved_len;
261         unsigned long excess = 0;
262         unsigned long hiwater_vm;
263         int split = 0;
264         int err;
265         bool need_rmap_locks;
266
267         /*
268          * We'd prefer to avoid failure later on in do_munmap:
269          * which may split one vma into three before unmapping.
270          */
271         if (mm->map_count >= sysctl_max_map_count - 3)
272                 return -ENOMEM;
273
274         /*
275          * Advise KSM to break any KSM pages in the area to be moved:
276          * it would be confusing if they were to turn up at the new
277          * location, where they happen to coincide with different KSM
278          * pages recently unmapped.  But leave vma->vm_flags as it was,
279          * so KSM can come around to merge on vma and new_vma afterwards.
280          */
281         err = ksm_madvise(vma, old_addr, old_addr + old_len,
282                                                 MADV_UNMERGEABLE, &vm_flags);
283         if (err)
284                 return err;
285
286         new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
287         new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff,
288                            &need_rmap_locks);
289         if (!new_vma)
290                 return -ENOMEM;
291
292         moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len,
293                                      need_rmap_locks);
294         if (moved_len < old_len) {
295                 err = -ENOMEM;
296         } else if (vma->vm_ops && vma->vm_ops->mremap) {
297                 err = vma->vm_ops->mremap(new_vma);
298         }
299
300         if (unlikely(err)) {
301                 /*
302                  * On error, move entries back from new area to old,
303                  * which will succeed since page tables still there,
304                  * and then proceed to unmap new area instead of old.
305                  */
306                 move_page_tables(new_vma, new_addr, vma, old_addr, moved_len,
307                                  true);
308                 vma = new_vma;
309                 old_len = new_len;
310                 old_addr = new_addr;
311                 new_addr = err;
312         } else {
313                 arch_remap(mm, old_addr, old_addr + old_len,
314                            new_addr, new_addr + new_len);
315         }
316
317         /* Conceal VM_ACCOUNT so old reservation is not undone */
318         if (vm_flags & VM_ACCOUNT) {
319                 vma->vm_flags &= ~VM_ACCOUNT;
320                 excess = vma->vm_end - vma->vm_start - old_len;
321                 if (old_addr > vma->vm_start &&
322                     old_addr + old_len < vma->vm_end)
323                         split = 1;
324         }
325
326         /*
327          * If we failed to move page tables we still do total_vm increment
328          * since do_munmap() will decrement it by old_len == new_len.
329          *
330          * Since total_vm is about to be raised artificially high for a
331          * moment, we need to restore high watermark afterwards: if stats
332          * are taken meanwhile, total_vm and hiwater_vm appear too high.
333          * If this were a serious issue, we'd add a flag to do_munmap().
334          */
335         hiwater_vm = mm->hiwater_vm;
336         vm_stat_account(mm, vma->vm_flags, new_len >> PAGE_SHIFT);
337
338         /* Tell pfnmap has moved from this vma */
339         if (unlikely(vma->vm_flags & VM_PFNMAP))
340                 untrack_pfn_moved(vma);
341
342         if (do_munmap(mm, old_addr, old_len) < 0) {
343                 /* OOM: unable to split vma, just get accounts right */
344                 vm_unacct_memory(excess >> PAGE_SHIFT);
345                 excess = 0;
346         }
347         mm->hiwater_vm = hiwater_vm;
348
349         /* Restore VM_ACCOUNT if one or two pieces of vma left */
350         if (excess) {
351                 vma->vm_flags |= VM_ACCOUNT;
352                 if (split)
353                         vma->vm_next->vm_flags |= VM_ACCOUNT;
354         }
355
356         if (vm_flags & VM_LOCKED) {
357                 mm->locked_vm += new_len >> PAGE_SHIFT;
358                 *locked = true;
359         }
360
361         return new_addr;
362 }
363
364 static struct vm_area_struct *vma_to_resize(unsigned long addr,
365         unsigned long old_len, unsigned long new_len, unsigned long *p)
366 {
367         struct mm_struct *mm = current->mm;
368         struct vm_area_struct *vma = find_vma(mm, addr);
369         unsigned long pgoff;
370
371         if (!vma || vma->vm_start > addr)
372                 return ERR_PTR(-EFAULT);
373
374         if (is_vm_hugetlb_page(vma))
375                 return ERR_PTR(-EINVAL);
376
377         /* We can't remap across vm area boundaries */
378         if (old_len > vma->vm_end - addr)
379                 return ERR_PTR(-EFAULT);
380
381         if (new_len == old_len)
382                 return vma;
383
384         /* Need to be careful about a growing mapping */
385         pgoff = (addr - vma->vm_start) >> PAGE_SHIFT;
386         pgoff += vma->vm_pgoff;
387         if (pgoff + (new_len >> PAGE_SHIFT) < pgoff)
388                 return ERR_PTR(-EINVAL);
389
390         if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP))
391                 return ERR_PTR(-EFAULT);
392
393         if (vma->vm_flags & VM_LOCKED) {
394                 unsigned long locked, lock_limit;
395                 locked = mm->locked_vm << PAGE_SHIFT;
396                 lock_limit = rlimit(RLIMIT_MEMLOCK);
397                 locked += new_len - old_len;
398                 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
399                         return ERR_PTR(-EAGAIN);
400         }
401
402         if (!may_expand_vm(mm, vma->vm_flags,
403                                 (new_len - old_len) >> PAGE_SHIFT))
404                 return ERR_PTR(-ENOMEM);
405
406         if (vma->vm_flags & VM_ACCOUNT) {
407                 unsigned long charged = (new_len - old_len) >> PAGE_SHIFT;
408                 if (security_vm_enough_memory_mm(mm, charged))
409                         return ERR_PTR(-ENOMEM);
410                 *p = charged;
411         }
412
413         return vma;
414 }
415
416 static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
417                 unsigned long new_addr, unsigned long new_len, bool *locked)
418 {
419         struct mm_struct *mm = current->mm;
420         struct vm_area_struct *vma;
421         unsigned long ret = -EINVAL;
422         unsigned long charged = 0;
423         unsigned long map_flags;
424
425         if (offset_in_page(new_addr))
426                 goto out;
427
428         if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
429                 goto out;
430
431         /* Ensure the old/new locations do not overlap */
432         if (addr + old_len > new_addr && new_addr + new_len > addr)
433                 goto out;
434
435         ret = do_munmap(mm, new_addr, new_len);
436         if (ret)
437                 goto out;
438
439         if (old_len >= new_len) {
440                 ret = do_munmap(mm, addr+new_len, old_len - new_len);
441                 if (ret && old_len != new_len)
442                         goto out;
443                 old_len = new_len;
444         }
445
446         vma = vma_to_resize(addr, old_len, new_len, &charged);
447         if (IS_ERR(vma)) {
448                 ret = PTR_ERR(vma);
449                 goto out;
450         }
451
452         map_flags = MAP_FIXED;
453         if (vma->vm_flags & VM_MAYSHARE)
454                 map_flags |= MAP_SHARED;
455
456         ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
457                                 ((addr - vma->vm_start) >> PAGE_SHIFT),
458                                 map_flags);
459         if (offset_in_page(ret))
460                 goto out1;
461
462         ret = move_vma(vma, addr, old_len, new_len, new_addr, locked);
463         if (!(offset_in_page(ret)))
464                 goto out;
465 out1:
466         vm_unacct_memory(charged);
467
468 out:
469         return ret;
470 }
471
472 static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
473 {
474         unsigned long end = vma->vm_end + delta;
475         if (end < vma->vm_end) /* overflow */
476                 return 0;
477         if (vma->vm_next && vma->vm_next->vm_start < end) /* intersection */
478                 return 0;
479         if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
480                               0, MAP_FIXED) & ~PAGE_MASK)
481                 return 0;
482         return 1;
483 }
484
485 /*
486  * Expand (or shrink) an existing mapping, potentially moving it at the
487  * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
488  *
489  * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
490  * This option implies MREMAP_MAYMOVE.
491  */
492 SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
493                 unsigned long, new_len, unsigned long, flags,
494                 unsigned long, new_addr)
495 {
496         struct mm_struct *mm = current->mm;
497         struct vm_area_struct *vma;
498         unsigned long ret = -EINVAL;
499         unsigned long charged = 0;
500         bool locked = false;
501
502         if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
503                 return ret;
504
505         if (flags & MREMAP_FIXED && !(flags & MREMAP_MAYMOVE))
506                 return ret;
507
508         if (offset_in_page(addr))
509                 return ret;
510
511         old_len = PAGE_ALIGN(old_len);
512         new_len = PAGE_ALIGN(new_len);
513
514         /*
515          * We allow a zero old-len as a special case
516          * for DOS-emu "duplicate shm area" thing. But
517          * a zero new-len is nonsensical.
518          */
519         if (!new_len)
520                 return ret;
521
522         if (down_write_killable(&current->mm->mmap_sem))
523                 return -EINTR;
524
525         if (flags & MREMAP_FIXED) {
526                 ret = mremap_to(addr, old_len, new_addr, new_len,
527                                 &locked);
528                 goto out;
529         }
530
531         /*
532          * Always allow a shrinking remap: that just unmaps
533          * the unnecessary pages..
534          * do_munmap does all the needed commit accounting
535          */
536         if (old_len >= new_len) {
537                 ret = do_munmap(mm, addr+new_len, old_len - new_len);
538                 if (ret && old_len != new_len)
539                         goto out;
540                 ret = addr;
541                 goto out;
542         }
543
544         /*
545          * Ok, we need to grow..
546          */
547         vma = vma_to_resize(addr, old_len, new_len, &charged);
548         if (IS_ERR(vma)) {
549                 ret = PTR_ERR(vma);
550                 goto out;
551         }
552
553         /* old_len exactly to the end of the area..
554          */
555         if (old_len == vma->vm_end - addr) {
556                 /* can we just expand the current mapping? */
557                 if (vma_expandable(vma, new_len - old_len)) {
558                         int pages = (new_len - old_len) >> PAGE_SHIFT;
559
560                         if (vma_adjust(vma, vma->vm_start, addr + new_len,
561                                        vma->vm_pgoff, NULL)) {
562                                 ret = -ENOMEM;
563                                 goto out;
564                         }
565
566                         vm_stat_account(mm, vma->vm_flags, pages);
567                         if (vma->vm_flags & VM_LOCKED) {
568                                 mm->locked_vm += pages;
569                                 locked = true;
570                                 new_addr = addr;
571                         }
572                         ret = addr;
573                         goto out;
574                 }
575         }
576
577         /*
578          * We weren't able to just expand or shrink the area,
579          * we need to create a new one and move it..
580          */
581         ret = -ENOMEM;
582         if (flags & MREMAP_MAYMOVE) {
583                 unsigned long map_flags = 0;
584                 if (vma->vm_flags & VM_MAYSHARE)
585                         map_flags |= MAP_SHARED;
586
587                 new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
588                                         vma->vm_pgoff +
589                                         ((addr - vma->vm_start) >> PAGE_SHIFT),
590                                         map_flags);
591                 if (offset_in_page(new_addr)) {
592                         ret = new_addr;
593                         goto out;
594                 }
595
596                 ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
597         }
598 out:
599         if (offset_in_page(ret)) {
600                 vm_unacct_memory(charged);
601                 locked = 0;
602         }
603         up_write(&current->mm->mmap_sem);
604         if (locked && new_len > old_len)
605                 mm_populate(new_addr + old_len, new_len - old_len);
606         return ret;
607 }