* try_to_unmap - try to remove all page table mappings to a page
* @page: the page to get unmapped
* @flags: action and flags
+ * @vma : target vma for reclaim
*
* Tries to remove all the page table entries which are mapping this
* page, used in the pageout path. Caller must hold the page lock.
+ * If @vma is not NULL, this function try to remove @page from only @vma
+ * without peeking all mapped vma for @page.
* Return values are:
*
* SWAP_SUCCESS - we succeeded in removing all mappings
* SWAP_FAIL - the page is unswappable
* SWAP_MLOCK - page is mlocked.
*/
-int try_to_unmap(struct page *page, enum ttu_flags flags)
+int try_to_unmap(struct page *page, enum ttu_flags flags,
+ struct vm_area_struct *vma)
{
int ret;
struct rmap_walk_control rwc = {
.arg = (void *)flags,
.done = page_not_mapped,
.anon_lock = page_lock_anon_vma_read,
+ .target_vma = vma,
};
VM_BUG_ON_PAGE(!PageHuge(page) && PageTransHuge(page), page);
.arg = (void *)TTU_MUNLOCK,
.done = page_not_mapped,
.anon_lock = page_lock_anon_vma_read,
+ .target_vma = NULL,
};
struct anon_vma_chain *avc;
int ret = SWAP_AGAIN;
+ if (rwc->target_vma) {
+ unsigned long address = vma_address(page, rwc->target_vma);
+ return rwc->rmap_one(page, rwc->target_vma, address, rwc->arg);
+ }
+
anon_vma = rmap_walk_anon_lock(page, rwc);
if (!anon_vma)
return ret;
struct address_space *mapping = page->mapping;
pgoff_t pgoff;
struct vm_area_struct *vma;
+ unsigned long address;
int ret = SWAP_AGAIN;
/*
pgoff = page_to_pgoff(page);
i_mmap_lock_read(mapping);
+ if (rwc->target_vma) {
+ address = vma_address(page, rwc->target_vma);
+ ret = rwc->rmap_one(page, rwc->target_vma, address, rwc->arg);
+ goto done;
+ }
+
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
unsigned long address = vma_address(page, vma);