OSDN Git Service

crypto: talitos - HMAC SNOOP NO AFEU mode requires SW icv checking.
[android-x86/kernel.git] / mm / oom_kill.c
index ec9f11d..d514eeb 100644 (file)
@@ -37,6 +37,7 @@
 #include <linux/ratelimit.h>
 #include <linux/kthread.h>
 #include <linux/init.h>
+#include <linux/mmu_notifier.h>
 
 #include <asm/tlb.h>
 #include "internal.h"
@@ -491,6 +492,21 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
        }
 
        /*
+        * If the mm has notifiers then we would need to invalidate them around
+        * unmap_page_range and that is risky because notifiers can sleep and
+        * what they do is basically undeterministic.  So let's have a short
+        * sleep to give the oom victim some more time.
+        * TODO: we really want to get rid of this ugly hack and make sure that
+        * notifiers cannot block for unbounded amount of time and add
+        * mmu_notifier_invalidate_range_{start,end} around unmap_page_range
+        */
+       if (mm_has_notifiers(mm)) {
+               up_read(&mm->mmap_sem);
+               schedule_timeout_idle(HZ);
+               goto unlock_oom;
+       }
+
+       /*
         * increase mm_users only after we know we will reap something so
         * that the mmput_async is called only when we have reaped something
         * and delayed __mmput doesn't matter that much
@@ -508,7 +524,6 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
         */
        set_bit(MMF_UNSTABLE, &mm->flags);
 
-       tlb_gather_mmu(&tlb, mm, 0, -1);
        for (vma = mm->mmap ; vma; vma = vma->vm_next) {
                if (is_vm_hugetlb_page(vma))
                        continue;
@@ -530,11 +545,13 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm)
                 * we do not want to block exit_mmap by keeping mm ref
                 * count elevated without a good reason.
                 */
-               if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED))
+               if (vma_is_anonymous(vma) || !(vma->vm_flags & VM_SHARED)) {
+                       tlb_gather_mmu(&tlb, mm, vma->vm_start, vma->vm_end);
                        unmap_page_range(&tlb, vma, vma->vm_start, vma->vm_end,
                                         &details);
+                       tlb_finish_mmu(&tlb, vma->vm_start, vma->vm_end);
+               }
        }
-       tlb_finish_mmu(&tlb, 0, -1);
        pr_info("oom_reaper: reaped process %d (%s), now anon-rss:%lukB, file-rss:%lukB, shmem-rss:%lukB\n",
                        task_pid_nr(tsk), tsk->comm,
                        K(get_mm_counter(mm, MM_ANONPAGES)),
@@ -609,8 +626,8 @@ static void wake_oom_reaper(struct task_struct *tsk)
        if (!oom_reaper_th)
                return;
 
-       /* tsk is already queued? */
-       if (tsk == oom_reaper_list || tsk->oom_reaper_list)
+       /* mm is already queued? */
+       if (test_and_set_bit(MMF_OOM_REAP_QUEUED, &tsk->signal->oom_mm->flags))
                return;
 
        get_task_struct(tsk);
@@ -844,6 +861,13 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
         * still freeing memory.
         */
        read_lock(&tasklist_lock);
+
+       /*
+        * The task 'p' might have already exited before reaching here. The
+        * put_task_struct() will free task_struct 'p' while the loop still try
+        * to access the field of 'p', so, get an extra reference.
+        */
+       get_task_struct(p);
        for_each_thread(p, t) {
                list_for_each_entry(child, &t->children, sibling) {
                        unsigned int child_points;
@@ -863,6 +887,7 @@ static void oom_kill_process(struct oom_control *oc, const char *message)
                        }
                }
        }
+       put_task_struct(p);
        read_unlock(&tasklist_lock);
 
        p = find_lock_task_mm(victim);