OSDN Git Service

Merge tag 'asm-generic-5.18' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd...
[uclinux-h8/linux.git] / mm / memory.c
index 53bd9e5..6666bc2 100644 (file)
@@ -1306,22 +1306,34 @@ copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
  * Parameter block passed down to zap_pte_range in exceptional cases.
  */
 struct zap_details {
-       struct address_space *zap_mapping;      /* Check page->mapping if set */
        struct folio *single_folio;     /* Locked folio to be unmapped */
+       bool even_cows;                 /* Zap COWed private pages too? */
 };
 
-/*
- * We set details->zap_mapping when we want to unmap shared but keep private
- * pages. Return true if skip zapping this page, false otherwise.
- */
-static inline bool
-zap_skip_check_mapping(struct zap_details *details, struct page *page)
+/* Whether we should zap all COWed (private) pages too */
+static inline bool should_zap_cows(struct zap_details *details)
 {
-       if (!details || !page)
-               return false;
+       /* By default, zap all pages */
+       if (!details)
+               return true;
 
-       return details->zap_mapping &&
-               (details->zap_mapping != page_rmapping(page));
+       /* Or, we zap COWed pages only if the caller wants to */
+       return details->even_cows;
+}
+
+/* Decides whether we should zap this page with the page pointer specified */
+static inline bool should_zap_page(struct zap_details *details, struct page *page)
+{
+       /* If we can make a decision without *page.. */
+       if (should_zap_cows(details))
+               return true;
+
+       /* E.g. the caller passes NULL for the case of a zero page */
+       if (!page)
+               return true;
+
+       /* Otherwise we should only zap non-anon pages */
+       return !PageAnon(page);
 }
 
 static unsigned long zap_pte_range(struct mmu_gather *tlb,
@@ -1346,6 +1358,8 @@ again:
        arch_enter_lazy_mmu_mode();
        do {
                pte_t ptent = *pte;
+               struct page *page;
+
                if (pte_none(ptent))
                        continue;
 
@@ -1353,10 +1367,8 @@ again:
                        break;
 
                if (pte_present(ptent)) {
-                       struct page *page;
-
                        page = vm_normal_page(vma, addr, ptent);
-                       if (unlikely(zap_skip_check_mapping(details, page)))
+                       if (unlikely(!should_zap_page(details, page)))
                                continue;
                        ptent = ptep_get_and_clear_full(mm, addr, pte,
                                                        tlb->fullmm);
@@ -1388,32 +1400,32 @@ again:
                entry = pte_to_swp_entry(ptent);
                if (is_device_private_entry(entry) ||
                    is_device_exclusive_entry(entry)) {
-                       struct page *page = pfn_swap_entry_to_page(entry);
-
-                       if (unlikely(zap_skip_check_mapping(details, page)))
+                       page = pfn_swap_entry_to_page(entry);
+                       if (unlikely(!should_zap_page(details, page)))
                                continue;
-                       pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
                        rss[mm_counter(page)]--;
                        if (is_device_private_entry(entry))
                                page_remove_rmap(page, vma, false);
                        put_page(page);
-                       continue;
-               }
-
-               /* If details->check_mapping, we leave swap entries. */
-               if (unlikely(details))
-                       continue;
-
-               if (!non_swap_entry(entry))
+               } else if (!non_swap_entry(entry)) {
+                       /* Genuine swap entry, hence a private anon page */
+                       if (!should_zap_cows(details))
+                               continue;
                        rss[MM_SWAPENTS]--;
-               else if (is_migration_entry(entry)) {
-                       struct page *page;
-
+                       if (unlikely(!free_swap_and_cache(entry)))
+                               print_bad_pte(vma, addr, ptent, NULL);
+               } else if (is_migration_entry(entry)) {
                        page = pfn_swap_entry_to_page(entry);
+                       if (!should_zap_page(details, page))
+                               continue;
                        rss[mm_counter(page)]--;
+               } else if (is_hwpoison_entry(entry)) {
+                       if (!should_zap_cows(details))
+                               continue;
+               } else {
+                       /* We should have covered all the swap entry types */
+                       WARN_ON_ONCE(1);
                }
-               if (unlikely(!free_swap_and_cache(entry)))
-                       print_bad_pte(vma, addr, ptent, NULL);
                pte_clear_not_present_full(mm, addr, pte, tlb->fullmm);
        } while (pte++, addr += PAGE_SIZE, addr != end);
 
@@ -1700,7 +1712,7 @@ static void zap_page_range_single(struct vm_area_struct *vma, unsigned long addr
 void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address,
                unsigned long size)
 {
-       if (address < vma->vm_start || address + size > vma->vm_end ||
+       if (!range_in_vma(vma, address, address + size) ||
                        !(vma->vm_flags & VM_PFNMAP))
                return;
 
@@ -3324,12 +3336,8 @@ static inline void unmap_mapping_range_tree(struct rb_root_cached *root,
        vma_interval_tree_foreach(vma, root, first_index, last_index) {
                vba = vma->vm_pgoff;
                vea = vba + vma_pages(vma) - 1;
-               zba = first_index;
-               if (zba < vba)
-                       zba = vba;
-               zea = last_index;
-               if (zea > vea)
-                       zea = vea;
+               zba = max(first_index, vba);
+               zea = min(last_index, vea);
 
                unmap_mapping_range_vma(vma,
                        ((zba - vba) << PAGE_SHIFT) + vma->vm_start,
@@ -3361,7 +3369,7 @@ void unmap_mapping_folio(struct folio *folio)
        first_index = folio->index;
        last_index = folio->index + folio_nr_pages(folio) - 1;
 
-       details.zap_mapping = mapping;
+       details.even_cows = false;
        details.single_folio = folio;
 
        i_mmap_lock_write(mapping);
@@ -3390,7 +3398,7 @@ void unmap_mapping_pages(struct address_space *mapping, pgoff_t start,
        pgoff_t first_index = start;
        pgoff_t last_index = start + nr - 1;
 
-       details.zap_mapping = even_cows ? NULL : mapping;
+       details.even_cows = even_cows;
        if (last_index < first_index)
                last_index = ULONG_MAX;
 
@@ -3855,11 +3863,16 @@ static vm_fault_t __do_fault(struct vm_fault *vmf)
                return ret;
 
        if (unlikely(PageHWPoison(vmf->page))) {
-               if (ret & VM_FAULT_LOCKED)
+               vm_fault_t poisonret = VM_FAULT_HWPOISON;
+               if (ret & VM_FAULT_LOCKED) {
+                       /* Retry if a clean page was removed from the cache. */
+                       if (invalidate_inode_page(vmf->page))
+                               poisonret = 0;
                        unlock_page(vmf->page);
+               }
                put_page(vmf->page);
                vmf->page = NULL;
-               return VM_FAULT_HWPOISON;
+               return poisonret;
        }
 
        if (unlikely(!(ret & VM_FAULT_LOCKED)))
@@ -4607,6 +4620,7 @@ static vm_fault_t __handle_mm_fault(struct vm_area_struct *vma,
        struct vm_fault vmf = {
                .vma = vma,
                .address = address & PAGE_MASK,
+               .real_address = address,
                .flags = flags,
                .pgoff = linear_page_index(vma, address),
                .gfp_mask = __get_fault_gfp_mask(vma),
@@ -5241,14 +5255,6 @@ void print_vma_addr(char *prefix, unsigned long ip)
 #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
 void __might_fault(const char *file, int line)
 {
-       /*
-        * Some code (nfs/sunrpc) uses socket ops on kernel memory while
-        * holding the mmap_lock, this is safe because kernel memory doesn't
-        * get paged out, therefore we'll never actually fault, and the
-        * below annotations will generate false positives.
-        */
-       if (uaccess_kernel())
-               return;
        if (pagefault_disabled())
                return;
        __might_sleep(file, line);
@@ -5429,6 +5435,8 @@ long copy_huge_page_from_user(struct page *dst_page,
                if (rc)
                        break;
 
+               flush_dcache_page(subpage);
+
                cond_resched();
        }
        return ret_val;