zsmalloc: fix races between modifications of fullness and isolated
[ Upstream commit 4b5d1e47b69426c0f7491d97d73ad0152d02d437 ]
We encountered many kernel exceptions of VM_BUG_ON(zspage->isolated ==
0) in dec_zspage_isolation() and BUG_ON(!pages[1]) in zs_unmap_object()
lately. This issue only occurs when migration and reclamation occur at
the same time.
With our memory stress test, we can reproduce this issue several times
a day. We have no idea why no one else encountered this issue. BTW,
we switched to the new kernel version with this defect a few months
ago.
Since fullness and isolated share the same unsigned int, modifications of
them should be protected by the same lock.
[andrew.yang@mediatek.com: move comment]
Link: https://lkml.kernel.org/r/20230727062910.6337-1-andrew.yang@mediatek.com
Link: https://lkml.kernel.org/r/20230721063705.11455-1-andrew.yang@mediatek.com
Fixes: c4549b8711
("zsmalloc: remove zspage isolation for migration")
Signed-off-by: Andrew Yang <andrew.yang@mediatek.com>
Reviewed-by: Sergey Senozhatsky <senozhatsky@chromium.org>
Cc: AngeloGioacchino Del Regno <angelogioacchino.delregno@collabora.com>
Cc: Matthias Brugger <matthias.bgg@gmail.com>
Cc: Minchan Kim <minchan@kernel.org>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Cc: <stable@vger.kernel.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Sasha Levin <sashal@kernel.org>
This commit is contained in:
parent
802b34e992
commit
f872672edd
@ -1816,6 +1816,7 @@ static void replace_sub_page(struct size_class *class, struct zspage *zspage,
|
||||
|
||||
static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
|
||||
{
|
||||
struct zs_pool *pool;
|
||||
struct zspage *zspage;
|
||||
|
||||
/*
|
||||
@ -1826,9 +1827,10 @@ static bool zs_page_isolate(struct page *page, isolate_mode_t mode)
|
||||
VM_BUG_ON_PAGE(PageIsolated(page), page);
|
||||
|
||||
zspage = get_zspage(page);
|
||||
migrate_write_lock(zspage);
|
||||
pool = zspage->pool;
|
||||
spin_lock(&pool->lock);
|
||||
inc_zspage_isolation(zspage);
|
||||
migrate_write_unlock(zspage);
|
||||
spin_unlock(&pool->lock);
|
||||
|
||||
return true;
|
||||
}
|
||||
@ -1895,12 +1897,12 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
|
||||
kunmap_atomic(s_addr);
|
||||
|
||||
replace_sub_page(class, zspage, newpage, page);
|
||||
dec_zspage_isolation(zspage);
|
||||
/*
|
||||
* Since we complete the data copy and set up new zspage structure,
|
||||
* it's okay to release the pool's lock.
|
||||
*/
|
||||
spin_unlock(&pool->lock);
|
||||
dec_zspage_isolation(zspage);
|
||||
migrate_write_unlock(zspage);
|
||||
|
||||
get_page(newpage);
|
||||
@ -1917,15 +1919,17 @@ static int zs_page_migrate(struct page *newpage, struct page *page,
|
||||
|
||||
static void zs_page_putback(struct page *page)
|
||||
{
|
||||
struct zs_pool *pool;
|
||||
struct zspage *zspage;
|
||||
|
||||
VM_BUG_ON_PAGE(!PageMovable(page), page);
|
||||
VM_BUG_ON_PAGE(!PageIsolated(page), page);
|
||||
|
||||
zspage = get_zspage(page);
|
||||
migrate_write_lock(zspage);
|
||||
pool = zspage->pool;
|
||||
spin_lock(&pool->lock);
|
||||
dec_zspage_isolation(zspage);
|
||||
migrate_write_unlock(zspage);
|
||||
spin_unlock(&pool->lock);
|
||||
}
|
||||
|
||||
static const struct movable_operations zsmalloc_mops = {
|
||||
|
Loading…
Reference in New Issue
Block a user