Skip to content

Instantly share code, notes, and snippets.

@paranlee
Last active May 27, 2021 17:23
Show Gist options
  • Save paranlee/441778236268832ebeadea2954dbe803 to your computer and use it in GitHub Desktop.
Save paranlee/441778236268832ebeadea2954dbe803 to your computer and use it in GitHub Desktop.
pageAnon(page) at try_to_unmap_one
/**
* v5.10.37
* https://elixir.bootlin.com/linux/v5.10.37/source/mm/rmap.c#L1617
*/
/*
* @arg: enum ttu_flags will be passed to this argument
*/
static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
unsigned long address, void *arg)
{
struct mm_struct *mm = vma->vm_mm;
struct page_vma_mapped_walk pvmw = {
.page = page,
.vma = vma,
.address = address,
};
pte_t pteval;
struct page *subpage;
bool ret = true;
struct mmu_notifier_range range;
enum ttu_flags flags = (enum ttu_flags)(long)arg;
// ..
// ...
while (page_vma_mapped_walk(&pvmw)) {
// ...
// continue case
// ...
if (PageHuge(page) && !PageAnon(page)) {
// ...
// ...
} else if (PageAnon(page)) {
swp_entry_t entry = { .val = page_private(subpage) };
pte_t swp_pte;
/*
* Store the swap location in the pte.
* See handle_pte_fault() ...
*/
if (unlikely(PageSwapBacked(page) != PageSwapCache(page))) {
WARN_ON_ONCE(1);
ret = false;
/* We have to invalidate as we cleared the pte */
mmu_notifier_invalidate_range(mm, address,
address + PAGE_SIZE);
page_vma_mapped_walk_done(&pvmw);
break;
}
/* MADV_FREE page check */
if (!PageSwapBacked(page)) {
if (!PageDirty(page)) {
/* Invalidate as we cleared the pte */
mmu_notifier_invalidate_range(mm,
address, address + PAGE_SIZE);
dec_mm_counter(mm, MM_ANONPAGES);
goto discard;
}
/*
* If the page was redirtied, it cannot be
* discarded. Remap the page to page table.
*/
set_pte_at(mm, address, pvmw.pte, pteval);
SetPageSwapBacked(page);
ret = false;
page_vma_mapped_walk_done(&pvmw);
break;
}
if (swap_duplicate(entry) < 0) {
set_pte_at(mm, address, pvmw.pte, pteval);
ret = false;
page_vma_mapped_walk_done(&pvmw);
break;
}
if (arch_unmap_one(mm, vma, address, pteval) < 0) {
set_pte_at(mm, address, pvmw.pte, pteval);
ret = false;
page_vma_mapped_walk_done(&pvmw);
break;
}
if (list_empty(&mm->mmlist)) {
spin_lock(&mmlist_lock);
if (list_empty(&mm->mmlist))
list_add(&mm->mmlist, &init_mm.mmlist);
spin_unlock(&mmlist_lock);
}
dec_mm_counter(mm, MM_ANONPAGES);
inc_mm_counter(mm, MM_SWAPENTS);
swp_pte = swp_entry_to_pte(entry);
if (pte_soft_dirty(pteval))
swp_pte = pte_swp_mksoft_dirty(swp_pte);
if (pte_uffd_wp(pteval))
swp_pte = pte_swp_mkuffd_wp(swp_pte);
set_pte_at(mm, address, pvmw.pte, swp_pte);
/* Invalidate as we cleared the pte */
mmu_notifier_invalidate_range(mm, address,
address + PAGE_SIZE);
} else {
/*
* This is a locked file-backed page, thus it cannot
* be removed from the page cache and replaced by a new
* page before mmu_notifier_invalidate_range_end, so no
* concurrent thread might update its page table to
* point at new page while a device still is using this
* page.
*
* See Documentation/vm/mmu_notifier.rst
*/
dec_mm_counter(mm, mm_counter_file(page));
}
discard:
/*
* No need to call mmu_notifier_invalidate_range() it has be
* done above for all cases requiring it to happen under page
* table lock before mmu_notifier_invalidate_range_end()
*
* See Documentation/vm/mmu_notifier.rst
*/
page_remove_rmap(subpage, PageHuge(page));
put_page(page);
}
mmu_notifier_invalidate_range_end(&range);
return ret;
}
/**
* v2.6.7
* https://elixir.bootlin.com/linux/v2.6.7/source/mm/rmap.c#L424
*/
/*
* Subfunctions of try_to_unmap: try_to_unmap_one called
* repeatedly from either try_to_unmap_anon or try_to_unmap_file.
*/
static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma)
{
struct mm_struct *mm = vma->vm_mm;
unsigned long address;
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
pte_t pteval;
int ret = SWAP_AGAIN;
if (!mm->rss)
goto out;
address = vma_address(page, vma);
if (address == -EFAULT)
goto out;
/*
* We need the page_table_lock to protect us from page faults,
* munmap, fork, etc...
*/
if (!spin_trylock(&mm->page_table_lock))
goto out;
pgd = pgd_offset(mm, address);
if (!pgd_present(*pgd))
goto out_unlock;
pmd = pmd_offset(pgd, address);
if (!pmd_present(*pmd))
goto out_unlock;
pte = pte_offset_map(pmd, address);
if (!pte_present(*pte))
goto out_unmap;
if (page_to_pfn(page) != pte_pfn(*pte))
goto out_unmap;
/*
* If the page is mlock()d, we cannot swap it out.
* If it's recently referenced (perhaps page_referenced
* skipped over this mm) then we should reactivate it.
*/
if ((vma->vm_flags & (VM_LOCKED|VM_RESERVED)) ||
ptep_test_and_clear_young(pte)) {
ret = SWAP_FAIL;
goto out_unmap;
}
/*
* Don't pull an anonymous page out from under get_user_pages.
* GUP carefully breaks COW and raises page count (while holding
* page_table_lock, as we have here) to make sure that the page
* cannot be freed. If we unmap that page here, a user write
* access to the virtual address will bring back the page, but
* its raised count will (ironically) be taken to mean it's not
* an exclusive swap page, do_wp_page will replace it by a copy
* page, and the user never get to see the data GUP was holding
* the original page for.
*/
if (PageSwapCache(page) &&
page_count(page) != page->mapcount + 2) {
ret = SWAP_FAIL;
goto out_unmap;
}
/* Nuke the page table entry. */
flush_cache_page(vma, address);
pteval = ptep_clear_flush(vma, address, pte);
/* Move the dirty bit to the physical page now the pte is gone. */
if (pte_dirty(pteval))
set_page_dirty(page);
if (PageAnon(page)) {
swp_entry_t entry = { .val = page->private };
/*
* Store the swap location in the pte.
* See handle_pte_fault() ...
*/
BUG_ON(!PageSwapCache(page));
swap_duplicate(entry);
set_pte(pte, swp_entry_to_pte(entry));
BUG_ON(pte_file(*pte));
}
mm->rss--;
BUG_ON(!page->mapcount);
page->mapcount--;
page_cache_release(page);
out_unmap:
pte_unmap(pte);
out_unlock:
spin_unlock(&mm->page_table_lock);
out:
return ret;
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment