ANDROID: mm: allow fast reclaim of shmem pages

Certain usecases that uses shmem pages requires the
inactive pages to be reclaimed as soon as possible
to reduce system memory pressure. Provide an option
to move these pages to tail of inactive list for
faster reclaim.

Bug: 187798288
Change-Id: Ic5142b714d99a487aadbc2866be448e772f39b8a
Signed-off-by: Vinayak Menon <vinmenon@codeaurora.org>
This commit is contained in:
Vinayak Menon 2021-03-18 16:49:18 +05:30 committed by Suren Baghdasaryan
parent 13af062abf
commit 9975da5f43
4 changed files with 52 additions and 0 deletions

View File

@ -85,6 +85,7 @@ extern bool shmem_huge_enabled(struct vm_area_struct *vma);
extern unsigned long shmem_swap_usage(struct vm_area_struct *vma);
extern unsigned long shmem_partial_swap_usage(struct address_space *mapping,
pgoff_t start, pgoff_t end);
extern void shmem_mark_page_lazyfree(struct page *page);
/* Flag allocation requirements to shmem_getpage */
enum sgp_type {

View File

@ -363,6 +363,7 @@ extern void rotate_reclaimable_page(struct page *page);
extern void deactivate_file_page(struct page *page);
extern void deactivate_page(struct page *page);
extern void mark_page_lazyfree(struct page *page);
extern void mark_page_lazyfree_movetail(struct page *page);
extern void swap_setup(void);
extern void __lru_cache_add_inactive_or_unevictable(struct page *page,

View File

@ -4284,3 +4284,9 @@ struct page *shmem_read_mapping_page_gfp(struct address_space *mapping,
#endif
}
EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp);
void shmem_mark_page_lazyfree(struct page *page)
{
mark_page_lazyfree_movetail(page);
}
EXPORT_SYMBOL_GPL(shmem_mark_page_lazyfree);

View File

@ -65,6 +65,7 @@ struct lru_pvecs {
struct pagevec lru_deactivate_file;
struct pagevec lru_deactivate;
struct pagevec lru_lazyfree;
struct pagevec lru_lazyfree_movetail;
#ifdef CONFIG_SMP
struct pagevec activate_page;
#endif
@ -630,6 +631,21 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec,
}
}
static void lru_lazyfree_movetail_fn(struct page *page, struct lruvec *lruvec,
void *arg)
{
if (PageLRU(page) && !PageUnevictable(page) && PageSwapBacked(page) &&
!PageSwapCache(page)) {
bool active = PageActive(page);
del_page_from_lru_list(page, lruvec,
LRU_INACTIVE_ANON + active);
ClearPageActive(page);
ClearPageReferenced(page);
add_page_to_lru_list_tail(page, lruvec, LRU_INACTIVE_FILE);
}
}
/*
* Drain pages out of the cpu's pagevecs.
* Either "cpu" is the current CPU, and preemption has already been
@ -665,6 +681,10 @@ void lru_add_drain_cpu(int cpu)
if (pagevec_count(pvec))
pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL);
pvec = &per_cpu(lru_pvecs.lru_lazyfree_movetail, cpu);
if (pagevec_count(pvec))
pagevec_lru_move_fn(pvec, lru_lazyfree_movetail_fn, NULL);
activate_page_drain(cpu);
invalidate_bh_lrus_cpu(cpu);
}
@ -742,6 +762,29 @@ void mark_page_lazyfree(struct page *page)
}
}
/**
* mark_page_lazyfree_movetail - make a swapbacked page lazyfree
* @page: page to deactivate
*
* mark_page_lazyfree_movetail() moves @page to the tail of inactive file list.
* This is done to accelerate the reclaim of @page.
*/
void mark_page_lazyfree_movetail(struct page *page)
{
if (PageLRU(page) && !PageUnevictable(page) && PageSwapBacked(page) &&
!PageSwapCache(page)) {
struct pagevec *pvec;
local_lock(&lru_pvecs.lock);
pvec = this_cpu_ptr(&lru_pvecs.lru_lazyfree_movetail);
get_page(page);
if (pagevec_add_and_need_flush(pvec, page))
pagevec_lru_move_fn(pvec,
lru_lazyfree_movetail_fn, NULL);
local_unlock(&lru_pvecs.lock);
}
}
void lru_add_drain(void)
{
local_lock(&lru_pvecs.lock);
@ -854,6 +897,7 @@ inline void __lru_add_drain_all(bool force_all_cpus)
pagevec_count(&per_cpu(lru_pvecs.lru_deactivate_file, cpu)) ||
pagevec_count(&per_cpu(lru_pvecs.lru_deactivate, cpu)) ||
pagevec_count(&per_cpu(lru_pvecs.lru_lazyfree, cpu)) ||
pagevec_count(&per_cpu(lru_pvecs.lru_lazyfree_movetail, cpu)) ||
need_activate_page_drain(cpu) ||
has_bh_in_lru(cpu, NULL)) {
INIT_WORK(work, lru_add_drain_per_cpu);