mm: Enhance per process reclaim to consider shared pages

Some pages could be shared by several processes. (ex, libc)
In case of that, it's too bad to reclaim them from the beginnig.

This patch causes VM to keep them on memory until last task
try to reclaim them so shared pages will be reclaimed only if
all of task has gone swapping out.

This feature doesn't handle non-linear mapping on ramfs because
it's very time-consuming and doesn't make sure of reclaiming and
not common.

Change-Id: I7e5f34f2e947f5db6d405867fe2ad34863ca40f7
Signed-off-by: Sangseok Lee <sangseok.lee@lge.com>
Signed-off-by: Minchan Kim <minchan@kernel.org>
Patch-mainline: linux-mm @ 9 May 2013 16:21:27
[vinmenon@codeaurora.org: merge conflict fixes + fix for ksm]
Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org>
This commit is contained in:
Minchan Kim 2013-05-09 16:21:27 +09:00 committed by Gerrit - the friendly Code Review server
parent bf4eaa44f7
commit 6aee86262a
9 changed files with 49 additions and 13 deletions

View File

@ -1746,7 +1746,7 @@ static int reclaim_pte_range(pmd_t *pmd, unsigned long addr,
break;
}
pte_unmap_unlock(pte - 1, ptl);
reclaim_pages_from_list(&page_list);
reclaim_pages_from_list(&page_list, vma);
if (addr != end)
goto cont;

View File

@ -14,7 +14,8 @@
extern int isolate_lru_page(struct page *page);
extern void putback_lru_page(struct page *page);
extern unsigned long reclaim_pages_from_list(struct list_head *page_list);
extern unsigned long reclaim_pages_from_list(struct list_head *page_list,
struct vm_area_struct *vma);
/*
* The anon_vma heads a list of private "related" vmas, to scan if
@ -199,7 +200,8 @@ static inline void page_dup_rmap(struct page *page, bool compound)
int page_referenced(struct page *, int is_locked,
struct mem_cgroup *memcg, unsigned long *vm_flags);
bool try_to_unmap(struct page *, enum ttu_flags flags);
bool try_to_unmap(struct page *page, enum ttu_flags flags,
struct vm_area_struct *vma);
/* Avoid racy checks */
#define PVMW_SYNC (1 << 0)
@ -265,6 +267,7 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
*/
struct rmap_walk_control {
void *arg;
struct vm_area_struct *target_vma;
/*
* Return false if page table scanning in rmap_walk should be stopped.
* Otherwise, return true.
@ -293,7 +296,7 @@ static inline int page_referenced(struct page *page, int is_locked,
return 0;
}
#define try_to_unmap(page, refs) false
#define try_to_unmap(page, refs, vma) false
static inline int page_mkclean(struct page *page)
{

View File

@ -2419,7 +2419,7 @@ static void unmap_page(struct page *page)
if (PageAnon(page))
ttu_flags |= TTU_SPLIT_FREEZE;
unmap_success = try_to_unmap(page, ttu_flags);
unmap_success = try_to_unmap(page, ttu_flags, NULL);
VM_BUG_ON_PAGE(!unmap_success, page);
}

View File

@ -2604,6 +2604,7 @@ void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc)
stable_node = page_stable_node(page);
if (!stable_node)
return;
again:
hlist_for_each_entry(rmap_item, &stable_node->hlist, hlist) {
struct anon_vma *anon_vma = rmap_item->anon_vma;

View File

@ -1030,7 +1030,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
if (kill)
collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
unmap_success = try_to_unmap(hpage, ttu);
unmap_success = try_to_unmap(hpage, ttu, NULL);
if (!unmap_success)
pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n",
pfn, page_mapcount(hpage));

View File

@ -1374,7 +1374,9 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
if (WARN_ON(PageLRU(page)))
isolate_lru_page(page);
if (page_mapped(page))
try_to_unmap(page, TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS);
try_to_unmap(page,
TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS,
NULL);
continue;
}

View File

@ -1108,7 +1108,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma,
page);
try_to_unmap(page,
TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS, NULL);
page_was_mapped = 1;
}
@ -1334,7 +1334,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
if (page_mapped(hpage)) {
try_to_unmap(hpage,
TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS, NULL);
page_was_mapped = 1;
}
@ -2565,7 +2565,7 @@ static void migrate_vma_unmap(struct migrate_vma *migrate)
continue;
if (page_mapped(page)) {
try_to_unmap(page, flags);
try_to_unmap(page, flags, NULL);
if (page_mapped(page))
goto restore;
}

View File

@ -1699,19 +1699,24 @@ static int page_mapcount_is_zero(struct page *page)
* try_to_unmap - try to remove all page table mappings to a page
* @page: the page to get unmapped
* @flags: action and flags
* @vma : target vma for reclaim
*
* Tries to remove all the page table entries which are mapping this
* page, used in the pageout path. Caller must hold the page lock.
* If @vma is not NULL, this function try to remove @page from only @vma
* without peeking all mapped vma for @page.
*
* If unmap is successful, return true. Otherwise, false.
*/
bool try_to_unmap(struct page *page, enum ttu_flags flags)
bool try_to_unmap(struct page *page, enum ttu_flags flags,
struct vm_area_struct *vma)
{
struct rmap_walk_control rwc = {
.rmap_one = try_to_unmap_one,
.arg = (void *)flags,
.done = page_mapcount_is_zero,
.anon_lock = page_lock_anon_vma_read,
.target_vma = vma,
};
/*
@ -1755,6 +1760,7 @@ void try_to_munlock(struct page *page)
.arg = (void *)TTU_MUNLOCK,
.done = page_not_mapped,
.anon_lock = page_lock_anon_vma_read,
.target_vma = NULL,
};
@ -1816,6 +1822,13 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
pgoff_t pgoff_start, pgoff_end;
struct anon_vma_chain *avc;
if (rwc->target_vma) {
unsigned long address = vma_address(page, rwc->target_vma);
rwc->rmap_one(page, rwc->target_vma, address, rwc->arg);
return;
}
if (locked) {
anon_vma = page_anon_vma(page);
/* anon_vma disappear under us? */
@ -1823,6 +1836,7 @@ static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc,
} else {
anon_vma = rmap_walk_anon_lock(page, rwc);
}
if (!anon_vma)
return;
@ -1867,6 +1881,7 @@ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
struct address_space *mapping = page_mapping(page);
pgoff_t pgoff_start, pgoff_end;
struct vm_area_struct *vma;
unsigned long address;
/*
* The page lock not only makes sure that page->mapping cannot
@ -1883,6 +1898,13 @@ static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc,
pgoff_end = pgoff_start + hpage_nr_pages(page) - 1;
if (!locked)
i_mmap_lock_read(mapping);
if (rwc->target_vma) {
address = vma_address(page, rwc->target_vma);
rwc->rmap_one(page, rwc->target_vma, address, rwc->arg);
goto done;
}
vma_interval_tree_foreach(vma, &mapping->i_mmap,
pgoff_start, pgoff_end) {
unsigned long address = vma_address(page, vma);

View File

@ -131,6 +131,12 @@ struct scan_control {
/* for recording the reclaimed slab by now */
struct reclaim_state reclaim_state;
/*
* Reclaim pages from a vma. If the page is shared by other tasks
* it is zapped from a vma without reclaim so it ends up remaining
* on memory until last task zap it.
*/
struct vm_area_struct *target_vma;
};
#ifdef ARCH_HAS_PREFETCH
@ -1356,7 +1362,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
if (unlikely(PageTransHuge(page)))
flags |= TTU_SPLIT_HUGE_PMD;
if (!try_to_unmap(page, flags)) {
if (!try_to_unmap(page, flags, sc->target_vma)) {
stat->nr_unmap_fail += nr_pages;
goto activate_locked;
}
@ -1577,7 +1583,8 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
}
#ifdef CONFIG_PROCESS_RECLAIM
unsigned long reclaim_pages_from_list(struct list_head *page_list)
unsigned long reclaim_pages_from_list(struct list_head *page_list,
struct vm_area_struct *vma)
{
struct scan_control sc = {
.gfp_mask = GFP_KERNEL,
@ -1585,6 +1592,7 @@ unsigned long reclaim_pages_from_list(struct list_head *page_list)
.may_writepage = 1,
.may_unmap = 1,
.may_swap = 1,
.target_vma = vma,
};
unsigned long nr_reclaimed;