From 9975da5f43bb72a37faf3981709fff766ab4ca15 Mon Sep 17 00:00:00 2001 From: Vinayak Menon Date: Thu, 18 Mar 2021 16:49:18 +0530 Subject: [PATCH] ANDROID: mm: allow fast reclaim of shmem pages Certain usecases that uses shmem pages requires the inactive pages to be reclaimed as soon as possible to reduce system memory pressure. Provide an option to move these pages to tail of inactive list for faster reclaim. Bug: 187798288 Change-Id: Ic5142b714d99a487aadbc2866be448e772f39b8a Signed-off-by: Vinayak Menon --- include/linux/shmem_fs.h | 1 + include/linux/swap.h | 1 + mm/shmem.c | 6 ++++++ mm/swap.c | 44 ++++++++++++++++++++++++++++++++++++++++ 4 files changed, 52 insertions(+) diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h index 17e9969db499..51b1dcfb5022 100644 --- a/include/linux/shmem_fs.h +++ b/include/linux/shmem_fs.h @@ -85,6 +85,7 @@ extern bool shmem_huge_enabled(struct vm_area_struct *vma); extern unsigned long shmem_swap_usage(struct vm_area_struct *vma); extern unsigned long shmem_partial_swap_usage(struct address_space *mapping, pgoff_t start, pgoff_t end); +extern void shmem_mark_page_lazyfree(struct page *page); /* Flag allocation requirements to shmem_getpage */ enum sgp_type { diff --git a/include/linux/swap.h b/include/linux/swap.h index e68ae91643f3..8ed2f6e51ae7 100644 --- a/include/linux/swap.h +++ b/include/linux/swap.h @@ -363,6 +363,7 @@ extern void rotate_reclaimable_page(struct page *page); extern void deactivate_file_page(struct page *page); extern void deactivate_page(struct page *page); extern void mark_page_lazyfree(struct page *page); +extern void mark_page_lazyfree_movetail(struct page *page); extern void swap_setup(void); extern void __lru_cache_add_inactive_or_unevictable(struct page *page, diff --git a/mm/shmem.c b/mm/shmem.c index d8f45f30b656..a4b5012b1267 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -4284,3 +4284,9 @@ struct page *shmem_read_mapping_page_gfp(struct address_space *mapping, #endif } EXPORT_SYMBOL_GPL(shmem_read_mapping_page_gfp); + +void shmem_mark_page_lazyfree(struct page *page) +{ + mark_page_lazyfree_movetail(page); +} +EXPORT_SYMBOL_GPL(shmem_mark_page_lazyfree); diff --git a/mm/swap.c b/mm/swap.c index b6c5e44e49f0..171213a64202 100644 --- a/mm/swap.c +++ b/mm/swap.c @@ -65,6 +65,7 @@ struct lru_pvecs { struct pagevec lru_deactivate_file; struct pagevec lru_deactivate; struct pagevec lru_lazyfree; + struct pagevec lru_lazyfree_movetail; #ifdef CONFIG_SMP struct pagevec activate_page; #endif @@ -630,6 +631,21 @@ static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec, } } +static void lru_lazyfree_movetail_fn(struct page *page, struct lruvec *lruvec, + void *arg) +{ + if (PageLRU(page) && !PageUnevictable(page) && PageSwapBacked(page) && + !PageSwapCache(page)) { + bool active = PageActive(page); + + del_page_from_lru_list(page, lruvec, + LRU_INACTIVE_ANON + active); + ClearPageActive(page); + ClearPageReferenced(page); + add_page_to_lru_list_tail(page, lruvec, LRU_INACTIVE_FILE); + } +} + /* * Drain pages out of the cpu's pagevecs. * Either "cpu" is the current CPU, and preemption has already been @@ -665,6 +681,10 @@ void lru_add_drain_cpu(int cpu) if (pagevec_count(pvec)) pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL); + pvec = &per_cpu(lru_pvecs.lru_lazyfree_movetail, cpu); + if (pagevec_count(pvec)) + pagevec_lru_move_fn(pvec, lru_lazyfree_movetail_fn, NULL); + activate_page_drain(cpu); invalidate_bh_lrus_cpu(cpu); } @@ -742,6 +762,29 @@ void mark_page_lazyfree(struct page *page) } } +/** + * mark_page_lazyfree_movetail - make a swapbacked page lazyfree + * @page: page to deactivate + * + * mark_page_lazyfree_movetail() moves @page to the tail of inactive file list. + * This is done to accelerate the reclaim of @page. + */ +void mark_page_lazyfree_movetail(struct page *page) +{ + if (PageLRU(page) && !PageUnevictable(page) && PageSwapBacked(page) && + !PageSwapCache(page)) { + struct pagevec *pvec; + + local_lock(&lru_pvecs.lock); + pvec = this_cpu_ptr(&lru_pvecs.lru_lazyfree_movetail); + get_page(page); + if (pagevec_add_and_need_flush(pvec, page)) + pagevec_lru_move_fn(pvec, + lru_lazyfree_movetail_fn, NULL); + local_unlock(&lru_pvecs.lock); + } +} + void lru_add_drain(void) { local_lock(&lru_pvecs.lock); @@ -854,6 +897,7 @@ inline void __lru_add_drain_all(bool force_all_cpus) pagevec_count(&per_cpu(lru_pvecs.lru_deactivate_file, cpu)) || pagevec_count(&per_cpu(lru_pvecs.lru_deactivate, cpu)) || pagevec_count(&per_cpu(lru_pvecs.lru_lazyfree, cpu)) || + pagevec_count(&per_cpu(lru_pvecs.lru_lazyfree_movetail, cpu)) || need_activate_page_drain(cpu) || has_bh_in_lru(cpu, NULL)) { INIT_WORK(work, lru_add_drain_per_cpu);