mm/shmem: fix race in shmem_undo_range w/THP
commit 55ac8bbe358bdd2f3c044c12f249fd22d48fe015 upstream.
Split folios during the second loop of shmem_undo_range. It's not
sufficient to only split folios when dealing with partial pages, since
it's possible for a THP to be faulted in after that point. Calling
truncate_inode_folio in that situation can result in throwing away data
outside of the range being targeted.
[akpm@linux-foundation.org: tidy up comment layout]
Link: https://lkml.kernel.org/r/20230418084031.3439795-1-stevensd@google.com
Fixes: b9a8a4195c
("truncate,shmem: Handle truncates that split large folios")
Signed-off-by: David Stevens <stevensd@chromium.org>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Suleiman Souhlal <suleiman@google.com>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
This commit is contained in:
parent
8ec07b0620
commit
da9b7c651c
19
mm/shmem.c
19
mm/shmem.c
@ -1024,7 +1024,24 @@ static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
|
||||
}
|
||||
VM_BUG_ON_FOLIO(folio_test_writeback(folio),
|
||||
folio);
|
||||
truncate_inode_folio(mapping, folio);
|
||||
|
||||
if (!folio_test_large(folio)) {
|
||||
truncate_inode_folio(mapping, folio);
|
||||
} else if (truncate_inode_partial_folio(folio, lstart, lend)) {
|
||||
/*
|
||||
* If we split a page, reset the loop so
|
||||
* that we pick up the new sub pages.
|
||||
* Otherwise the THP was entirely
|
||||
* dropped or the target range was
|
||||
* zeroed, so just continue the loop as
|
||||
* is.
|
||||
*/
|
||||
if (!folio_test_large(folio)) {
|
||||
folio_unlock(folio);
|
||||
index = start;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
index = folio->index + folio_nr_pages(folio) - 1;
|
||||
folio_unlock(folio);
|
||||
|
Loading…
Reference in New Issue
Block a user