OSDN Git Service

mm: migrate: lock buffers before migrate_page_move_mapping()
[uclinux-h8/linux.git] / mm / migrate.c
index f7e4bfd..e0bc03e 100644 (file)
@@ -327,16 +327,13 @@ void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep,
 
        /*
         * Once page cache replacement of page migration started, page_count
-        * *must* be zero. And, we don't want to call wait_on_page_locked()
-        * against a page without get_page().
-        * So, we use get_page_unless_zero(), here. Even failed, page fault
-        * will occur again.
+        * is zero; but we must not call put_and_wait_on_page_locked() without
+        * a ref. Use get_page_unless_zero(), and just fault again if it fails.
         */
        if (!get_page_unless_zero(page))
                goto out;
        pte_unmap_unlock(ptep, ptl);
-       wait_on_page_locked(page);
-       put_page(page);
+       put_and_wait_on_page_locked(page);
        return;
 out:
        pte_unmap_unlock(ptep, ptl);
@@ -370,8 +367,7 @@ void pmd_migration_entry_wait(struct mm_struct *mm, pmd_t *pmd)
        if (!get_page_unless_zero(page))
                goto unlock;
        spin_unlock(ptl);
-       wait_on_page_locked(page);
-       put_page(page);
+       put_and_wait_on_page_locked(page);
        return;
 unlock:
        spin_unlock(ptl);
@@ -428,6 +424,22 @@ static inline bool buffer_migrate_lock_buffers(struct buffer_head *head,
 }
 #endif /* CONFIG_BLOCK */
 
+static int expected_page_refs(struct page *page)
+{
+       int expected_count = 1;
+
+       /*
+        * Device public or private pages have an extra refcount as they are
+        * ZONE_DEVICE pages.
+        */
+       expected_count += is_device_private_page(page);
+       expected_count += is_device_public_page(page);
+       if (page_mapping(page))
+               expected_count += hpage_nr_pages(page) + page_has_private(page);
+
+       return expected_count;
+}
+
 /*
  * Replace the page in the mapping.
  *
@@ -444,14 +456,7 @@ int migrate_page_move_mapping(struct address_space *mapping,
        XA_STATE(xas, &mapping->i_pages, page_index(page));
        struct zone *oldzone, *newzone;
        int dirty;
-       int expected_count = 1 + extra_count;
-
-       /*
-        * Device public or private pages have an extra refcount as they are
-        * ZONE_DEVICE pages.
-        */
-       expected_count += is_device_private_page(page);
-       expected_count += is_device_public_page(page);
+       int expected_count = expected_page_refs(page) + extra_count;
 
        if (!mapping) {
                /* Anonymous page without mapping */
@@ -471,8 +476,6 @@ int migrate_page_move_mapping(struct address_space *mapping,
        newzone = page_zone(newpage);
 
        xas_lock_irq(&xas);
-
-       expected_count += hpage_nr_pages(page) + page_has_private(page);
        if (page_count(page) != expected_count || xas_load(&xas) != page) {
                xas_unlock_irq(&xas);
                return -EAGAIN;
@@ -484,20 +487,6 @@ int migrate_page_move_mapping(struct address_space *mapping,
        }
 
        /*
-        * In the async migration case of moving a page with buffers, lock the
-        * buffers using trylock before the mapping is moved. If the mapping
-        * was moved, we later failed to lock the buffers and could not move
-        * the mapping back due to an elevated page count, we would have to
-        * block waiting on other references to be dropped.
-        */
-       if (mode == MIGRATE_ASYNC && head &&
-                       !buffer_migrate_lock_buffers(head, mode)) {
-               page_ref_unfreeze(page, expected_count);
-               xas_unlock_irq(&xas);
-               return -EAGAIN;
-       }
-
-       /*
         * Now we know that no one else is looking at the page:
         * no turning back from here.
         */
@@ -772,24 +761,23 @@ int buffer_migrate_page(struct address_space *mapping,
 {
        struct buffer_head *bh, *head;
        int rc;
+       int expected_count;
 
        if (!page_has_buffers(page))
                return migrate_page(mapping, newpage, page, mode);
 
-       head = page_buffers(page);
+       /* Check whether page does not have extra refs before we do more work */
+       expected_count = expected_page_refs(page);
+       if (page_count(page) != expected_count)
+               return -EAGAIN;
 
-       rc = migrate_page_move_mapping(mapping, newpage, page, head, mode, 0);
+       head = page_buffers(page);
+       if (!buffer_migrate_lock_buffers(head, mode))
+               return -EAGAIN;
 
+       rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0);
        if (rc != MIGRATEPAGE_SUCCESS)
-               return rc;
-
-       /*
-        * In the async case, migrate_page_move_mapping locked the buffers
-        * with an IRQ-safe spinlock held. In the sync case, the buffers
-        * need to be locked now
-        */
-       if (mode != MIGRATE_ASYNC)
-               BUG_ON(!buffer_migrate_lock_buffers(head, mode));
+               goto unlock_buffers;
 
        ClearPagePrivate(page);
        set_page_private(newpage, page_private(page));
@@ -811,6 +799,8 @@ int buffer_migrate_page(struct address_space *mapping,
        else
                migrate_page_states(newpage, page);
 
+       rc = MIGRATEPAGE_SUCCESS;
+unlock_buffers:
        bh = head;
        do {
                unlock_buffer(bh);
@@ -819,7 +809,7 @@ int buffer_migrate_page(struct address_space *mapping,
 
        } while (bh != head);
 
-       return MIGRATEPAGE_SUCCESS;
+       return rc;
 }
 EXPORT_SYMBOL(buffer_migrate_page);
 #endif
@@ -2303,6 +2293,7 @@ next:
  */
 static void migrate_vma_collect(struct migrate_vma *migrate)
 {
+       struct mmu_notifier_range range;
        struct mm_walk mm_walk;
 
        mm_walk.pmd_entry = migrate_vma_collect_pmd;
@@ -2314,13 +2305,11 @@ static void migrate_vma_collect(struct migrate_vma *migrate)
        mm_walk.mm = migrate->vma->vm_mm;
        mm_walk.private = migrate;
 
-       mmu_notifier_invalidate_range_start(mm_walk.mm,
-                                           migrate->start,
-                                           migrate->end);
+       mmu_notifier_range_init(&range, mm_walk.mm, migrate->start,
+                               migrate->end);
+       mmu_notifier_invalidate_range_start(&range);
        walk_page_range(migrate->start, migrate->end, &mm_walk);
-       mmu_notifier_invalidate_range_end(mm_walk.mm,
-                                         migrate->start,
-                                         migrate->end);
+       mmu_notifier_invalidate_range_end(&range);
 
        migrate->end = migrate->start + (migrate->npages << PAGE_SHIFT);
 }
@@ -2701,9 +2690,8 @@ static void migrate_vma_pages(struct migrate_vma *migrate)
 {
        const unsigned long npages = migrate->npages;
        const unsigned long start = migrate->start;
-       struct vm_area_struct *vma = migrate->vma;
-       struct mm_struct *mm = vma->vm_mm;
-       unsigned long addr, i, mmu_start;
+       struct mmu_notifier_range range;
+       unsigned long addr, i;
        bool notified = false;
 
        for (i = 0, addr = start; i < npages; addr += PAGE_SIZE, i++) {
@@ -2722,11 +2710,12 @@ static void migrate_vma_pages(struct migrate_vma *migrate)
                                continue;
                        }
                        if (!notified) {
-                               mmu_start = addr;
                                notified = true;
-                               mmu_notifier_invalidate_range_start(mm,
-                                                               mmu_start,
-                                                               migrate->end);
+
+                               mmu_notifier_range_init(&range,
+                                                       migrate->vma->vm_mm,
+                                                       addr, migrate->end);
+                               mmu_notifier_invalidate_range_start(&range);
                        }
                        migrate_vma_insert_page(migrate, addr, newpage,
                                                &migrate->src[i],
@@ -2767,8 +2756,7 @@ static void migrate_vma_pages(struct migrate_vma *migrate)
         * did already call it.
         */
        if (notified)
-               mmu_notifier_invalidate_range_only_end(mm, mmu_start,
-                                                      migrate->end);
+               mmu_notifier_invalidate_range_only_end(&range);
 }
 
 /*