From 77ae3e7bb8cef4b25cc0d8a9e75905001b55407f Mon Sep 17 00:00:00 2001 From: Suren Baghdasaryan Date: Fri, 30 Jun 2023 14:19:52 -0700 Subject: [PATCH] FROMGIT: swap: remove remnants of polling from read_swap_cache_async Patch series "Per-VMA lock support for swap and userfaults", v7. When per-VMA locks were introduced in [1] several types of page faults would still fall back to mmap_lock to keep the patchset simple. Among them are swap and userfault pages. The main reason for skipping those cases was the fact that mmap_lock could be dropped while handling these faults and that required additional logic to be implemented. Implement the mechanism to allow per-VMA locks to be dropped for these cases. First, change handle_mm_fault to drop per-VMA locks when returning VM_FAULT_RETRY or VM_FAULT_COMPLETED to be consistent with the way mmap_lock is handled. Then change folio_lock_or_retry to accept vm_fault and return vm_fault_t which simplifies later patches. Finally allow swap and uffd page faults to be handled under per-VMA locks by dropping per-VMA and retrying, the same way it's done under mmap_lock. Naturally, once VMA lock is dropped that VMA should be assumed unstable and can't be used. This patch (of 6): Commit [1] introduced IO polling support duding swapin to reduce swap read latency for block devices that can be polled. However later commit [2] removed polling support. Therefore it seems safe to remove do_poll parameter in read_swap_cache_async and always call swap_readpage with synchronous=false waiting for IO completion in folio_lock_or_retry. [1] commit 23955622ff8d ("swap: add block io poll in swapin path") [2] commit 9650b453a3d4 ("block: ignore RWF_HIPRI hint for sync dio") Link: https://lkml.kernel.org/r/20230630211957.1341547-1-surenb@google.com Link: https://lkml.kernel.org/r/20230630211957.1341547-2-surenb@google.com Signed-off-by: Suren Baghdasaryan Suggested-by: "Huang, Ying" Reviewed-by: "Huang, Ying" Reviewed-by: Christoph Hellwig Cc: Alistair Popple Cc: Al Viro Cc: Christian Brauner Cc: David Hildenbrand Cc: David Howells Cc: Davidlohr Bueso Cc: Hillf Danton Cc: Hugh Dickins Cc: Jan Kara Cc: Johannes Weiner Cc: Josef Bacik Cc: Laurent Dufour Cc: Liam R. Howlett Cc: Lorenzo Stoakes Cc: Matthew Wilcox Cc: Michal Hocko Cc: Michel Lespinasse Cc: Minchan Kim Cc: Pavel Tatashin Cc: Peter Xu Cc: Punit Agrawal Cc: Vlastimil Babka Cc: Yu Zhao Signed-off-by: Andrew Morton (cherry picked from commit 4296c6a817b421061d6e0b9c654c7d4d5a038a5b https: //git.kernel.org/pub/scm/linux/kernel/git/akpm/mm.git mm-unstable) Bug: 161210518 Change-Id: I3d647ba4d6093f4e3db2c4ff759e5ce59b45b0e1 Signed-off-by: Suren Baghdasaryan --- mm/madvise.c | 4 ++-- mm/swap.h | 1 - mm/swap_state.c | 12 +++++------- 3 files changed, 7 insertions(+), 10 deletions(-) diff --git a/mm/madvise.c b/mm/madvise.c index f49a62a35827..42c5a65e1c2d 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -223,7 +223,7 @@ static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start, trace_android_vh_madvise_swapin_walk_pmd_entry(entry); page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE, - vma, index, false, &splug); + vma, index, &splug); if (page) put_page(page); } @@ -259,7 +259,7 @@ static void force_shm_swapin_readahead(struct vm_area_struct *vma, rcu_read_unlock(); page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE, - NULL, 0, false, &splug); + NULL, 0, &splug); if (page) put_page(page); diff --git a/mm/swap.h b/mm/swap.h index cc08c459c619..9ad061576192 100644 --- a/mm/swap.h +++ b/mm/swap.h @@ -46,7 +46,6 @@ struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index); struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, struct vm_area_struct *vma, unsigned long addr, - bool do_poll, struct swap_iocb **plug); struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, struct vm_area_struct *vma, diff --git a/mm/swap_state.c b/mm/swap_state.c index 438d0676c5be..3e7db8ea40f3 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -515,15 +515,14 @@ struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, */ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, struct vm_area_struct *vma, - unsigned long addr, bool do_poll, - struct swap_iocb **plug) + unsigned long addr, struct swap_iocb **plug) { bool page_was_allocated; struct page *retpage = __read_swap_cache_async(entry, gfp_mask, vma, addr, &page_was_allocated); if (page_was_allocated) - swap_readpage(retpage, do_poll, plug); + swap_readpage(retpage, false, plug); return retpage; } @@ -618,7 +617,7 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, struct swap_info_struct *si = swp_swap_info(entry); struct blk_plug plug; struct swap_iocb *splug = NULL; - bool do_poll = true, page_allocated; + bool page_allocated; struct vm_area_struct *vma = vmf->vma; unsigned long addr = vmf->address; @@ -626,7 +625,6 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, if (!mask) goto skip; - do_poll = false; /* Read a page_cluster sized and aligned cluster around offset. */ start_offset = offset & ~mask; end_offset = offset | mask; @@ -658,7 +656,7 @@ struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, lru_add_drain(); /* Push any new pages onto the LRU now */ skip: /* The page was likely read above, so no need for plugging here */ - return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll, NULL); + return read_swap_cache_async(entry, gfp_mask, vma, addr, NULL); } int init_swap_address_space(unsigned int type, unsigned long nr_pages) @@ -832,7 +830,7 @@ static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask, skip: /* The page was likely read above, so no need for plugging here */ return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address, - ra_info.win == 1, NULL); + NULL); } /**